anolis: kfence: fix bug and improve performance while freeing pages

ANBZ: #28

Some functions may call free_compound_page() to free an order-0 page
allocated by kfence, e.g., io_mem_free() in io_uring. However,
__free_pages_ok() is used to free order>0 pages, so there will be mistakes
during checking and freeing.
This patch also try to improve performance by adding static branch to
some hot path.

Signed-off-by: Tianchen Ding <dtcccc@linux.alibaba.com>
Reviewed-by: Xunlei Pang <xlpang@linux.alibaba.com>
This commit is contained in:
Tianchen Ding 2021-12-10 22:47:18 +08:00 committed by Qiao Ma
parent af832b46d8
commit d6b753f731
3 changed files with 21 additions and 16 deletions

View File

@ -271,8 +271,12 @@ static __always_inline __must_check bool kfence_free(void *addr)
*/
static __always_inline __must_check bool kfence_free_page(struct page *page)
{
void *addr = page_to_virt(page);
void *addr;
if (!static_branch_unlikely(&kfence_once_inited))
return false;
addr = page_to_virt(page);
if (!is_kfence_address_node(addr, page_to_nid(page)))
return false;
__kfence_free_page(page, addr);

View File

@ -1079,6 +1079,7 @@ void __init kfence_init(void)
}
WRITE_ONCE(kfence_enabled, true);
static_branch_enable(&kfence_once_inited);
if (kfence_num_objects > KFENCE_MAX_SIZE_WITH_INTERVAL) {
static_branch_enable(&kfence_skip_interval);
static_branch_enable(&kfence_allocation_key);
@ -1098,8 +1099,6 @@ void __init kfence_init(void)
pr_cont("\n");
}
static_branch_enable(&kfence_once_inited);
return;
fail:
@ -1186,7 +1185,7 @@ void kfence_shutdown_cache(struct kmem_cache *s)
{
int node;
if (!kfence_metadata_node)
if (!static_branch_unlikely(&kfence_once_inited))
return;
for_each_node(node)
@ -1296,7 +1295,12 @@ size_t kfence_ksize(const void *addr)
void *kfence_object_start(const void *addr)
{
const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
struct kfence_metadata *meta;
if (!static_branch_unlikely(&kfence_once_inited))
return NULL;
meta = addr_to_metadata((unsigned long)addr);
/*
* Read locklessly -- if there is a race with __kfence_alloc(), this is
@ -1335,7 +1339,7 @@ bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs
enum kfence_error_type error_type;
unsigned long flags;
if (!virt_addr_valid(addr))
if (!static_branch_unlikely(&kfence_once_inited) || !virt_addr_valid(addr))
return false;
node = virt_to_nid(addr);
if (!is_kfence_address_node((void *)addr, node))

View File

@ -1155,6 +1155,10 @@ static inline int check_free_page(struct page *page)
if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
return 0;
/* It's not performance sensitive when reaching here */
if (is_kfence_address(page_to_virt(page)))
return 0;
/* Something has gone sideways, find it */
check_free_page_bad(page);
return 1;
@ -1278,17 +1282,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
page->mapping = NULL;
if (memcg_kmem_enabled() && PageKmemcg(page))
__memcg_kmem_uncharge_page(page, order);
/*
* If debug_pagealloc and kfence enabled at the same time,
* there may be problems at here. check_free_page() will check
* order-0 page, however the order-0 page allocated by kfence is
* marked with PG_reserved.
*/
#if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_KFENCE)
if (check_free && !is_kfence_address(page_to_virt(page)))
#else
if (check_free)
#endif
bad += check_free_page(page);
if (bad)
return false;
@ -1562,6 +1556,9 @@ static void __free_pages_ok(struct page *page, unsigned int order,
if (!free_pages_prepare(page, order, true))
return;
if (unlikely(!order && kfence_free_page(page)))
return;
migratetype = get_pfnblock_migratetype(page, pfn);
local_irq_save(flags);
__count_vm_events(PGFREE, 1 << order);