anolis: kfence: unlimit alloc interval when num_objects_pernode > 65535

OpenAnolis Bug Tracker: 0000278

We may want all slubs allocated by kfence, so when pool size is large,
the limit of alloc gate interval is cancelled. When the number of
objects is larger than 65535(and that's the default max value of upstream),
kfence will alloc pages from its pool at all time if possible.

Signed-off-by: Tianchen Ding <dtcccc@linux.alibaba.com>
Reviewed-by: Xunlei Pang <xlpang@linux.alibaba.com>
This commit is contained in:
Tianchen Ding 2021-10-28 11:23:18 +08:00 committed by Qiao Ma
parent 00eaef1e26
commit f2583e14ac
5 changed files with 34 additions and 13 deletions

View File

@ -58,7 +58,9 @@ dedicating 2 MiB to the KFENCE memory pool.
You can change the KFENCE memory pool size by setting ``kfence.num_objects_pernode``
in boot command line, and the pool size of each node will be computed and updated
in the same way as above. You can set this value as large as possible, so
please be careful DO NOT use up all memorys.
please be careful DO NOT use up all memorys. If this value is larger than 65535,
sample_interval will be invalid, and KFENCE will alloc pages from its pool at
all time if possible.
Note: On architectures that support huge pages, KFENCE will ensure that the
pool is using pages of size ``PAGE_SIZE``. This will result in additional page

View File

@ -11,6 +11,7 @@
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/static_key.h>
#ifdef CONFIG_KFENCE
@ -24,12 +25,12 @@ extern unsigned long kfence_pool_size;
extern char **__kfence_pool_node;
#ifdef CONFIG_KFENCE_STATIC_KEYS
#include <linux/static_key.h>
DECLARE_STATIC_KEY_FALSE(kfence_allocation_key);
#else
#include <linux/atomic.h>
extern atomic_t kfence_allocation_gate;
#endif
DECLARE_STATIC_KEY_FALSE(kfence_skip_interval);
/**
* is_kfence_address_node() - check if an address belongs to KFENCE pool on given node

View File

@ -57,6 +57,9 @@ config KFENCE_NUM_OBJECTS
and two adjacent ones used as guard pages. May be overridden via boot
parameter "kfence.num_objects_pernode".
If larger than 65535, sample_interval will be invalid, and KFENCE will
alloc pages from its pool at all time if possible.
config KFENCE_STRESS_TEST_FAULTS
int "Stress testing of fault handling and error reporting" if EXPERT
default 0

View File

@ -54,6 +54,11 @@ EXPORT_SYMBOL(kfence_pool_size);
#undef MODULE_PARAM_PREFIX
#endif
#define MODULE_PARAM_PREFIX "kfence."
#ifdef CONFIG_KFENCE_STATIC_KEYS
/* The static key to set up a KFENCE allocation. */
DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
#endif
DEFINE_STATIC_KEY_FALSE(kfence_skip_interval);
static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
{
@ -63,10 +68,14 @@ static int param_set_sample_interval(const char *val, const struct kernel_param
if (ret < 0)
return ret;
if (!num) /* Using 0 to indicate KFENCE is disabled. */
if (!num) { /* Using 0 to indicate KFENCE is disabled. */
WRITE_ONCE(kfence_enabled, false);
else if (!READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
#ifdef CONFIG_KFENCE_STATIC_KEYS
static_branch_disable(&kfence_allocation_key);
#endif
} else if (!READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING) {
return -EINVAL; /* Cannot (re-)enable KFENCE on-the-fly. */
}
*((unsigned long *)kp->arg) = num;
return 0;
@ -143,11 +152,6 @@ struct kfence_freelist {
};
static struct kfence_freelist freelist;
#ifdef CONFIG_KFENCE_STATIC_KEYS
/* The static key to set up a KFENCE allocation. */
DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
#endif
/* Gates the allocation, ensuring only one succeeds in a given period. */
atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
@ -901,6 +905,7 @@ void __init kfence_alloc_pool(void)
}
}
#define KFENCE_MAX_SIZE_WITH_INTERVAL 65535
void __init kfence_init(void)
{
int node, i;
@ -930,7 +935,13 @@ void __init kfence_init(void)
}
WRITE_ONCE(kfence_enabled, true);
queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
if (kfence_num_objects > KFENCE_MAX_SIZE_WITH_INTERVAL) {
static_branch_enable(&kfence_skip_interval);
static_branch_enable(&kfence_allocation_key);
} else {
queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
}
for_each_node(node) {
if (!__kfence_pool_node[node])
continue;
@ -1052,6 +1063,9 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags, int node)
(s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32)))
return NULL;
if (static_branch_likely(&kfence_skip_interval))
goto alloc;
/*
* allocation_gate only needs to become non-zero, so it doesn't make
* sense to continue writing to it and pay the associated contention
@ -1073,6 +1087,7 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags, int node)
}
#endif
alloc:
if (!READ_ONCE(kfence_enabled))
return NULL;

View File

@ -520,10 +520,10 @@ static void test_free_bulk(struct kunit *test)
(iter & 1) ? ctor_set_x : NULL);
void *objects[] = {
test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT),
test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY),
test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT),
test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY),
test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY),
};
kmem_cache_free_bulk(test_cache, ARRAY_SIZE(objects), objects);