anolis: kfence: improve performance about kfence_ksize()

ANBZ: #28

Some functions like __alloc_skb() may call kfence_ksize() frequently.
Although kfence_ksize() will return 0 in default, this may still bring
extra cost. Fix this by calling is_kfence_address() first. It's
always_inline and has static branch, so there should be no performance
regressions.

Signed-off-by: Tianchen Ding <dtcccc@linux.alibaba.com>
Reviewed-by: Xunlei Pang <xlpang@linux.alibaba.com>
This commit is contained in:
Tianchen Ding 2022-03-30 17:24:05 +08:00 committed by Qiao Ma
parent 08a2ef1898
commit 805d226242
4 changed files with 4 additions and 9 deletions

View File

@ -76,7 +76,7 @@ static __always_inline bool is_kfence_address_area(const void *addr,
*/
static __always_inline bool is_kfence_address(const void *addr)
{
#ifdef CONFIG_KASAN
#if defined(CONFIG_KASAN) || defined(CONFIG_DEBUG_KMEMLEAK)
/*
* KASAN functions such as kasan_record_aux_stack(),
* kasan_poison_shadow(), or kasan_unpoison_shadow()

View File

@ -2035,12 +2035,7 @@ alloc:
size_t kfence_ksize(const void *addr)
{
struct kfence_metadata *meta;
if (!static_branch_unlikely(&kfence_once_inited))
return 0;
meta = addr_to_metadata((unsigned long)addr);
struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
/*
* Read locklessly -- if there is a race with __kfence_alloc(), this is

View File

@ -590,7 +590,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
atomic_set(&object->use_count, 1);
object->flags = OBJECT_ALLOCATED;
object->pointer = ptr;
object->size = kfence_ksize((void *)ptr) ?: size;
object->size = is_kfence_address((void *)ptr) ? kfence_ksize((void *)ptr) : size;
object->excess_ref = 0;
object->min_count = min_count;
object->count = 0; /* white color initially */

View File

@ -1179,7 +1179,7 @@ size_t ksize(const void *objp)
if (unlikely(ZERO_OR_NULL_PTR(objp)) || !__kasan_check_read(objp, 1))
return 0;
size = kfence_ksize(objp) ?: __ksize(objp);
size = is_kfence_address(objp) ? kfence_ksize(objp) : __ksize(objp);
/*
* We assume that ksize callers could use whole allocated area,
* so we need to unpoison this area.