From f2583e14ac5fb483ae876f29c55c4724b19ccd0a Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Thu, 28 Oct 2021 11:23:18 +0800 Subject: [PATCH] anolis: kfence: unlimit alloc interval when num_objects_pernode > 65535 OpenAnolis Bug Tracker: 0000278 We may want all slubs allocated by kfence, so when pool size is large, the limit of alloc gate interval is cancelled. When the number of objects is larger than 65535(and that's the default max value of upstream), kfence will alloc pages from its pool at all time if possible. Signed-off-by: Tianchen Ding Reviewed-by: Xunlei Pang --- Documentation/dev-tools/kfence.rst | 4 +++- include/linux/kfence.h | 3 ++- lib/Kconfig.kfence | 3 +++ mm/kfence/core.c | 31 ++++++++++++++++++++++-------- mm/kfence/kfence_test.c | 6 +++--- 5 files changed, 34 insertions(+), 13 deletions(-) diff --git a/Documentation/dev-tools/kfence.rst b/Documentation/dev-tools/kfence.rst index 9aa3d97cbb67..80063066e245 100644 --- a/Documentation/dev-tools/kfence.rst +++ b/Documentation/dev-tools/kfence.rst @@ -58,7 +58,9 @@ dedicating 2 MiB to the KFENCE memory pool. You can change the KFENCE memory pool size by setting ``kfence.num_objects_pernode`` in boot command line, and the pool size of each node will be computed and updated in the same way as above. You can set this value as large as possible, so -please be careful DO NOT use up all memorys. +please be careful DO NOT use up all memorys. If this value is larger than 65535, +sample_interval will be invalid, and KFENCE will alloc pages from its pool at +all time if possible. Note: On architectures that support huge pages, KFENCE will ensure that the pool is using pages of size ``PAGE_SIZE``. This will result in additional page diff --git a/include/linux/kfence.h b/include/linux/kfence.h index c7a8ba201107..bd3d90b9312c 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -11,6 +11,7 @@ #include #include +#include #ifdef CONFIG_KFENCE @@ -24,12 +25,12 @@ extern unsigned long kfence_pool_size; extern char **__kfence_pool_node; #ifdef CONFIG_KFENCE_STATIC_KEYS -#include DECLARE_STATIC_KEY_FALSE(kfence_allocation_key); #else #include extern atomic_t kfence_allocation_gate; #endif +DECLARE_STATIC_KEY_FALSE(kfence_skip_interval); /** * is_kfence_address_node() - check if an address belongs to KFENCE pool on given node diff --git a/lib/Kconfig.kfence b/lib/Kconfig.kfence index 1ee749a779bd..d9700e1f6595 100644 --- a/lib/Kconfig.kfence +++ b/lib/Kconfig.kfence @@ -57,6 +57,9 @@ config KFENCE_NUM_OBJECTS and two adjacent ones used as guard pages. May be overridden via boot parameter "kfence.num_objects_pernode". + If larger than 65535, sample_interval will be invalid, and KFENCE will + alloc pages from its pool at all time if possible. + config KFENCE_STRESS_TEST_FAULTS int "Stress testing of fault handling and error reporting" if EXPERT default 0 diff --git a/mm/kfence/core.c b/mm/kfence/core.c index cf4907816f1d..f08e38ef2a21 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -54,6 +54,11 @@ EXPORT_SYMBOL(kfence_pool_size); #undef MODULE_PARAM_PREFIX #endif #define MODULE_PARAM_PREFIX "kfence." +#ifdef CONFIG_KFENCE_STATIC_KEYS +/* The static key to set up a KFENCE allocation. */ +DEFINE_STATIC_KEY_FALSE(kfence_allocation_key); +#endif +DEFINE_STATIC_KEY_FALSE(kfence_skip_interval); static int param_set_sample_interval(const char *val, const struct kernel_param *kp) { @@ -63,10 +68,14 @@ static int param_set_sample_interval(const char *val, const struct kernel_param if (ret < 0) return ret; - if (!num) /* Using 0 to indicate KFENCE is disabled. */ + if (!num) { /* Using 0 to indicate KFENCE is disabled. */ WRITE_ONCE(kfence_enabled, false); - else if (!READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING) +#ifdef CONFIG_KFENCE_STATIC_KEYS + static_branch_disable(&kfence_allocation_key); +#endif + } else if (!READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING) { return -EINVAL; /* Cannot (re-)enable KFENCE on-the-fly. */ + } *((unsigned long *)kp->arg) = num; return 0; @@ -143,11 +152,6 @@ struct kfence_freelist { }; static struct kfence_freelist freelist; -#ifdef CONFIG_KFENCE_STATIC_KEYS -/* The static key to set up a KFENCE allocation. */ -DEFINE_STATIC_KEY_FALSE(kfence_allocation_key); -#endif - /* Gates the allocation, ensuring only one succeeds in a given period. */ atomic_t kfence_allocation_gate = ATOMIC_INIT(1); @@ -901,6 +905,7 @@ void __init kfence_alloc_pool(void) } } +#define KFENCE_MAX_SIZE_WITH_INTERVAL 65535 void __init kfence_init(void) { int node, i; @@ -930,7 +935,13 @@ void __init kfence_init(void) } WRITE_ONCE(kfence_enabled, true); - queue_delayed_work(system_unbound_wq, &kfence_timer, 0); + if (kfence_num_objects > KFENCE_MAX_SIZE_WITH_INTERVAL) { + static_branch_enable(&kfence_skip_interval); + static_branch_enable(&kfence_allocation_key); + } else { + queue_delayed_work(system_unbound_wq, &kfence_timer, 0); + } + for_each_node(node) { if (!__kfence_pool_node[node]) continue; @@ -1052,6 +1063,9 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags, int node) (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) return NULL; + if (static_branch_likely(&kfence_skip_interval)) + goto alloc; + /* * allocation_gate only needs to become non-zero, so it doesn't make * sense to continue writing to it and pay the associated contention @@ -1073,6 +1087,7 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags, int node) } #endif +alloc: if (!READ_ONCE(kfence_enabled)) return NULL; diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c index f413b4f4c27b..3c0b390e21ba 100644 --- a/mm/kfence/kfence_test.c +++ b/mm/kfence/kfence_test.c @@ -520,10 +520,10 @@ static void test_free_bulk(struct kunit *test) (iter & 1) ? ctor_set_x : NULL); void *objects[] = { test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT), - test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE), + test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY), test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT), - test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE), - test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE), + test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY), + test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY), }; kmem_cache_free_bulk(test_cache, ARRAY_SIZE(objects), objects);