From 2bc912bc134011cf3d039feb6d9893b0e0c0014d Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Thu, 23 Dec 2021 14:50:08 +0800 Subject: [PATCH] anolis: kfence: add PG_kfence to recognize kfence address in fast path ANBZ: #28 To improve performance, we add PG_kfence flag to recognize whether an address is in kfence pool first. If true, other works may be done by calling no-inline functions. Signed-off-by: Tianchen Ding Reviewed-by: Xunlei Pang --- include/linux/kfence.h | 9 ++------- include/linux/page-flags.h | 13 +++++++++++++ include/trace/events/mmflags.h | 9 ++++++++- mm/kfence/core.c | 27 +++++++++++++++++++++------ mm/page_alloc.c | 2 +- 5 files changed, 45 insertions(+), 15 deletions(-) diff --git a/include/linux/kfence.h b/include/linux/kfence.h index a091b3e5a8d3..386f27fba6cb 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -70,10 +70,7 @@ static __always_inline bool is_kfence_address_node(const void *addr, const int n */ static __always_inline bool is_kfence_address(const void *addr) { - if (!static_branch_unlikely(&kfence_once_inited) || unlikely(!virt_addr_valid(addr))) - return false; - - return unlikely(is_kfence_address_node(addr, page_to_nid(virt_to_page(addr)))); + return static_branch_unlikely(&kfence_once_inited) && PageKfence(virt_to_page(addr)); } /** @@ -273,12 +270,10 @@ static __always_inline __must_check bool kfence_free_page(struct page *page) { void *addr; - if (!static_branch_unlikely(&kfence_once_inited)) + if (!static_branch_unlikely(&kfence_once_inited) || !PageKfence(page)) return false; addr = page_to_virt(page); - if (!is_kfence_address_node(addr, page_to_nid(page))) - return false; __kfence_free_page(page, addr); return true; } diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 83557f78dec1..64e4ac9a2a2f 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -141,6 +141,9 @@ enum pageflags { #endif #ifdef CONFIG_DUPTEXT PG_dup, /* Page has NUMA replicas */ +#endif +#ifdef CONFIG_KFENCE + PG_kfence, /* Page in kfence pool */ #endif __NR_PAGEFLAGS, @@ -460,6 +463,10 @@ __PAGEFLAG(Reported, reported, PF_NO_COMPOUND) PAGEFLAG(Dup, dup, PF_HEAD) #endif +#ifdef CONFIG_KFENCE +__PAGEFLAG(Kfence, kfence, PF_ANY) +#endif + /* * On an anonymous page mapped into a user virtual memory area, * page->mapping points to its anon_vma, not to a struct address_space; @@ -858,6 +865,12 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) #define __PG_DUP 0 #endif +#ifdef CONFIG_KFENCE +#define __PG_KFENCE (1UL << PG_kfence) +#else +#define __PG_KFENCE 0 +#endif + /* * Flags checked when a page is freed. Pages being freed should not have * these flags set. It they are, there is a problem. diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 4261665e2619..c7dff5682053 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -91,6 +91,12 @@ #define IF_HAVE_PG_DUP(flag,string) #endif +#ifdef CONFIG_KFENCE +#define IF_HAVE_PG_KFENCE(flag,string) ,{1UL << flag, string} +#else +#define IF_HAVE_PG_KFENCE(flag,string) +#endif + #define __def_pageflag_names \ {1UL << PG_locked, "locked" }, \ {1UL << PG_waiters, "waiters" }, \ @@ -119,7 +125,8 @@ IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \ IF_HAVE_PG_IDLE(PG_young, "young" ) \ IF_HAVE_PG_IDLE(PG_idle, "idle" ) \ IF_HAVE_PG_ARCH_2(PG_arch_2, "arch_2" ) \ -IF_HAVE_PG_DUP(PG_dup, "dup" ) +IF_HAVE_PG_DUP(PG_dup, "dup" ) \ +IF_HAVE_PG_KFENCE(PG_kfence, "kfence" ) #define show_page_flags(flags) \ (flags) ? __print_flags(flags, "|", \ diff --git a/mm/kfence/core.c b/mm/kfence/core.c index aefbc9979db7..37753c18b02e 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -748,6 +748,21 @@ static void rcu_guarded_free(struct rcu_head *h) kfence_guarded_free((void *)meta->addr, meta, false); } +static inline void kfence_clear_page_info(unsigned long addr, unsigned long size) +{ + unsigned long i; + + for (i = addr; i < addr + size; i += PAGE_SIZE) { + struct page *page = virt_to_page(i); + + __ClearPageKfence(page); + __ClearPageSlab(page); + page->mapping = NULL; + atomic_set(&page->_refcount, 1); + kfence_unprotect(i); + } +} + static bool __init kfence_init_pool_node(int node) { char *__kfence_pool = __kfence_pool_node[node]; @@ -775,6 +790,8 @@ static bool __init kfence_init_pool_node(int node) * enters __slab_free() slow-path. */ for (i = 0; i < kfence_pool_size / PAGE_SIZE; i++) { + __SetPageKfence(&pages[i]); + if (!i || (i % 2)) continue; @@ -826,13 +843,11 @@ static bool __init kfence_init_pool_node(int node) err: /* - * Only release unprotected pages, and do not try to go back and change - * page attributes due to risk of failing to do so as well. If changing - * page attributes for some pages fails, it is very likely that it also - * fails for the first page, and therefore expect addr==__kfence_pool in - * most failure cases. + * We will support freeing unused kfence pools in the following patches, + * so here we can also free all pages in the pool. */ - memblock_free_late(__pa(addr), kfence_pool_size - (addr - (unsigned long)__kfence_pool)); + kfence_clear_page_info((unsigned long)__kfence_pool, kfence_pool_size); + memblock_free_late(__pa(__kfence_pool), kfence_pool_size); memblock_free_late(__pa(kfence_metadata), metadata_size); __kfence_pool_node[node] = NULL; kfence_metadata_node[node] = NULL; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3821f35a32c5..5007ce0723cc 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1281,7 +1281,7 @@ static __always_inline bool free_pages_prepare(struct page *page, page_cpupid_reset_last(page); kidled_set_page_age(page_pgdat(page), page_to_pfn(page), 0); - page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; + page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_KFENCE; reset_page_owner(page, order); if (!PageHighMem(page)) {