anolis: kfence: add PG_kfence to recognize kfence address in fast path
ANBZ: #28 To improve performance, we add PG_kfence flag to recognize whether an address is in kfence pool first. If true, other works may be done by calling no-inline functions. Signed-off-by: Tianchen Ding <dtcccc@linux.alibaba.com> Reviewed-by: Xunlei Pang <xlpang@linux.alibaba.com>
This commit is contained in:
parent
451d77f039
commit
2bc912bc13
|
@ -70,10 +70,7 @@ static __always_inline bool is_kfence_address_node(const void *addr, const int n
|
|||
*/
|
||||
static __always_inline bool is_kfence_address(const void *addr)
|
||||
{
|
||||
if (!static_branch_unlikely(&kfence_once_inited) || unlikely(!virt_addr_valid(addr)))
|
||||
return false;
|
||||
|
||||
return unlikely(is_kfence_address_node(addr, page_to_nid(virt_to_page(addr))));
|
||||
return static_branch_unlikely(&kfence_once_inited) && PageKfence(virt_to_page(addr));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -273,12 +270,10 @@ static __always_inline __must_check bool kfence_free_page(struct page *page)
|
|||
{
|
||||
void *addr;
|
||||
|
||||
if (!static_branch_unlikely(&kfence_once_inited))
|
||||
if (!static_branch_unlikely(&kfence_once_inited) || !PageKfence(page))
|
||||
return false;
|
||||
|
||||
addr = page_to_virt(page);
|
||||
if (!is_kfence_address_node(addr, page_to_nid(page)))
|
||||
return false;
|
||||
__kfence_free_page(page, addr);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -141,6 +141,9 @@ enum pageflags {
|
|||
#endif
|
||||
#ifdef CONFIG_DUPTEXT
|
||||
PG_dup, /* Page has NUMA replicas */
|
||||
#endif
|
||||
#ifdef CONFIG_KFENCE
|
||||
PG_kfence, /* Page in kfence pool */
|
||||
#endif
|
||||
__NR_PAGEFLAGS,
|
||||
|
||||
|
@ -460,6 +463,10 @@ __PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
|
|||
PAGEFLAG(Dup, dup, PF_HEAD)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KFENCE
|
||||
__PAGEFLAG(Kfence, kfence, PF_ANY)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* On an anonymous page mapped into a user virtual memory area,
|
||||
* page->mapping points to its anon_vma, not to a struct address_space;
|
||||
|
@ -858,6 +865,12 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
|
|||
#define __PG_DUP 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KFENCE
|
||||
#define __PG_KFENCE (1UL << PG_kfence)
|
||||
#else
|
||||
#define __PG_KFENCE 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Flags checked when a page is freed. Pages being freed should not have
|
||||
* these flags set. It they are, there is a problem.
|
||||
|
|
|
@ -91,6 +91,12 @@
|
|||
#define IF_HAVE_PG_DUP(flag,string)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KFENCE
|
||||
#define IF_HAVE_PG_KFENCE(flag,string) ,{1UL << flag, string}
|
||||
#else
|
||||
#define IF_HAVE_PG_KFENCE(flag,string)
|
||||
#endif
|
||||
|
||||
#define __def_pageflag_names \
|
||||
{1UL << PG_locked, "locked" }, \
|
||||
{1UL << PG_waiters, "waiters" }, \
|
||||
|
@ -119,7 +125,8 @@ IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \
|
|||
IF_HAVE_PG_IDLE(PG_young, "young" ) \
|
||||
IF_HAVE_PG_IDLE(PG_idle, "idle" ) \
|
||||
IF_HAVE_PG_ARCH_2(PG_arch_2, "arch_2" ) \
|
||||
IF_HAVE_PG_DUP(PG_dup, "dup" )
|
||||
IF_HAVE_PG_DUP(PG_dup, "dup" ) \
|
||||
IF_HAVE_PG_KFENCE(PG_kfence, "kfence" )
|
||||
|
||||
#define show_page_flags(flags) \
|
||||
(flags) ? __print_flags(flags, "|", \
|
||||
|
|
|
@ -748,6 +748,21 @@ static void rcu_guarded_free(struct rcu_head *h)
|
|||
kfence_guarded_free((void *)meta->addr, meta, false);
|
||||
}
|
||||
|
||||
static inline void kfence_clear_page_info(unsigned long addr, unsigned long size)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = addr; i < addr + size; i += PAGE_SIZE) {
|
||||
struct page *page = virt_to_page(i);
|
||||
|
||||
__ClearPageKfence(page);
|
||||
__ClearPageSlab(page);
|
||||
page->mapping = NULL;
|
||||
atomic_set(&page->_refcount, 1);
|
||||
kfence_unprotect(i);
|
||||
}
|
||||
}
|
||||
|
||||
static bool __init kfence_init_pool_node(int node)
|
||||
{
|
||||
char *__kfence_pool = __kfence_pool_node[node];
|
||||
|
@ -775,6 +790,8 @@ static bool __init kfence_init_pool_node(int node)
|
|||
* enters __slab_free() slow-path.
|
||||
*/
|
||||
for (i = 0; i < kfence_pool_size / PAGE_SIZE; i++) {
|
||||
__SetPageKfence(&pages[i]);
|
||||
|
||||
if (!i || (i % 2))
|
||||
continue;
|
||||
|
||||
|
@ -826,13 +843,11 @@ static bool __init kfence_init_pool_node(int node)
|
|||
|
||||
err:
|
||||
/*
|
||||
* Only release unprotected pages, and do not try to go back and change
|
||||
* page attributes due to risk of failing to do so as well. If changing
|
||||
* page attributes for some pages fails, it is very likely that it also
|
||||
* fails for the first page, and therefore expect addr==__kfence_pool in
|
||||
* most failure cases.
|
||||
* We will support freeing unused kfence pools in the following patches,
|
||||
* so here we can also free all pages in the pool.
|
||||
*/
|
||||
memblock_free_late(__pa(addr), kfence_pool_size - (addr - (unsigned long)__kfence_pool));
|
||||
kfence_clear_page_info((unsigned long)__kfence_pool, kfence_pool_size);
|
||||
memblock_free_late(__pa(__kfence_pool), kfence_pool_size);
|
||||
memblock_free_late(__pa(kfence_metadata), metadata_size);
|
||||
__kfence_pool_node[node] = NULL;
|
||||
kfence_metadata_node[node] = NULL;
|
||||
|
|
|
@ -1281,7 +1281,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
|
|||
|
||||
page_cpupid_reset_last(page);
|
||||
kidled_set_page_age(page_pgdat(page), page_to_pfn(page), 0);
|
||||
page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
|
||||
page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_KFENCE;
|
||||
reset_page_owner(page, order);
|
||||
|
||||
if (!PageHighMem(page)) {
|
||||
|
|
Loading…
Reference in New Issue