anolis: mm: use oot_page_list to cache pages return to buddy

ANBZ: #9065

Memory that has been used by out-of-tree (OOT) modules has the
potential to be corrupted, and therefore cannot be directly returned to
the buddy subsystem; it needs to be cached. To prevent an excessive
number of cached pages from leading to insufficient memory, a limit on
the cache is imposed by the oot_page_limit.

Signed-off-by: Yi Tao <escape@linux.alibaba.com>
Reviewed-by: Xu Yu <xuyu@linux.alibaba.com>
Reviewed-by: Guixin Liu <kanie@linux.alibaba.com>
Link: https://gitee.com/anolis/cloud-kernel/pulls/3208
This commit is contained in:
Yi Tao 2024-04-16 17:37:34 +08:00 committed by 小龙
parent 1f31422bb6
commit ee98579729
4 changed files with 44 additions and 6 deletions

View File

@ -136,6 +136,9 @@ struct kmem_cache {
unsigned int useroffset; /* Usercopy region offset */
unsigned int usersize; /* Usercopy region size */
struct list_head oot_page_list;
spinlock_t oot_lock;
unsigned int oot_page_num;
struct kmem_cache_node *node[MAX_NUMNODES];
};

View File

@ -299,6 +299,9 @@ EXPORT_SYMBOL(is_module_sig_enforced);
int modules_disabled = 0;
core_param(nomodule, modules_disabled, bint, 0);
unsigned int oot_page_limit = -1;
module_param(oot_page_limit, uint, 0600);
/* Waiting for a module to finish initializing? */
static DECLARE_WAIT_QUEUE_HEAD(module_wq);

View File

@ -587,6 +587,8 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name,
create_boot_cache(s, name, size, flags, useroffset, usersize);
list_add(&s->list, &slab_caches);
INIT_LIST_HEAD(&s->oot_page_list);
spin_lock_init(&s->oot_lock);
s->refcount = 1;
return s;
}

View File

@ -1619,15 +1619,32 @@ static void *setup_object(struct kmem_cache *s, struct page *page,
static inline struct page *alloc_slab_page(struct kmem_cache *s,
gfp_t flags, int node, struct kmem_cache_order_objects oo)
{
struct page *page;
struct page *page, *t;
unsigned int order = oo_order(oo);
flags |= __GFP_NOKFENCE;
if (unlikely(s->flags & SLAB_OOT)) {
spin_lock(&s->oot_lock);
list_for_each_entry_safe(page, t, &s->oot_page_list, slab_list) {
if (page_to_nid(page) == node ||
(node == NUMA_NO_NODE &&
page_to_nid(page) == numa_node_id())) {
list_del(&page->slab_list);
s->oot_page_num--;
spin_unlock(&s->oot_lock);
goto find;
}
}
spin_unlock(&s->oot_lock);
}
if (node == NUMA_NO_NODE)
page = alloc_pages(flags, order);
else
page = __alloc_pages_node(node, flags, order);
find:
if (page)
account_slab_page(page, order, s);
@ -1839,6 +1856,8 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
}
extern unsigned int oot_page_limit;
static void __free_slab(struct kmem_cache *s, struct page *page)
{
int order = compound_order(page);
@ -1860,6 +1879,20 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages;
unaccount_slab_page(page, order, s);
#ifdef CONFIG_MODULES
if (unlikely(s->flags & SLAB_OOT)) {
if (s->oot_page_num < oot_page_limit) {
spin_lock(&s->oot_lock);
list_add(&page->slab_list, &s->oot_page_list);
s->oot_page_num++;
spin_unlock(&s->oot_lock);
return;
}
pr_info_once("Page in buddy may tained by module in oot list\n");
}
#endif
__free_pages(page, order);
}
@ -1880,7 +1913,6 @@ static void free_slab(struct kmem_cache *s, struct page *page)
static void discard_slab(struct kmem_cache *s, struct page *page)
{
WARN_ON_ONCE(s->flags & SLAB_OOT);
dec_slabs_node(s, page_to_nid(page), page->objects);
free_slab(s, page);
}
@ -2406,8 +2438,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
if (oldpage) {
pobjects = oldpage->pobjects;
pages = oldpage->pages;
if (drain && pobjects > slub_cpu_partial(s)
&& !(s->flags & SLAB_OOT)) {
if (drain && pobjects > slub_cpu_partial(s)) {
unsigned long flags;
/*
* partial array is full. Move the existing
@ -3070,8 +3101,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
return;
}
if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)
&& !(s->flags & SLAB_OOT))
if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
goto slab_empty;
/*