anolis: mm: filter shrinker in mglru background proactive reclaim thread
ANBZ: #8387 Since MGLRU cannot devide SLAB pages into hot and cold pages, the reclaiming for SLAB pages is currently bypassed in MGLRU background proactive thread. This approach simultaneously causes some shrinkers to be bypassed, causing a drop in memory recovery. In this patch, a flag is added for the required shrinker to indicate that it also needs to be enabled in the background proactive reclaim thread. Signed-off-by: hr567 <hr567@linux.alibaba.com> Signed-off-by: zhongjiang-ali <zhongjiang-ali@linux.alibaba.com> Reviewed-by: Simon Guo <wei.guo.simon@linux.alibaba.com> Acked-by: Ning Zhang <ningzhang@linux.alibaba.com> Acked-by: Xunlei Pang <xlpang@linux.alibaba.com> Signed-off-by: zhongjiang-ali <zhongjiang-ali@linux.alibaba.com> Reviewed-by: Guixin Liu <kanie@linux.alibaba.com> Link: https://gitee.com/anolis/cloud-kernel/pulls/2798
This commit is contained in:
parent
c65253ab29
commit
0be3a01ead
|
@ -103,6 +103,7 @@ struct shrinker {
|
|||
* non-MEMCG_AWARE shrinker should not have this flag set.
|
||||
*/
|
||||
#define SHRINKER_NONSLAB (1 << 2)
|
||||
#define SHRINKER_MGLRU_BACKGROUND (1 << 3)
|
||||
|
||||
extern int prealloc_shrinker(struct shrinker *shrinker);
|
||||
extern void register_shrinker_prepared(struct shrinker *shrinker);
|
||||
|
|
|
@ -3251,7 +3251,7 @@ static struct shrinker deferred_split_shrinker = {
|
|||
.scan_objects = deferred_split_scan,
|
||||
.seeks = DEFAULT_SEEKS,
|
||||
.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE |
|
||||
SHRINKER_NONSLAB,
|
||||
SHRINKER_NONSLAB | SHRINKER_MGLRU_BACKGROUND,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
|
|
@ -838,8 +838,7 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
|
|||
* reclaim will result in the slab cache is reclaimed, which is
|
||||
* not expected and maybe lead to the performance regression.
|
||||
*/
|
||||
if (lru_gen_enabled() && background_proactive_reclaim())
|
||||
return 0;
|
||||
bool mglru_background = lru_gen_enabled() && background_proactive_reclaim();
|
||||
|
||||
/*
|
||||
* The root memcg might be allocated even though memcg is disabled
|
||||
|
@ -848,7 +847,7 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
|
|||
* shrink, but skip global shrink. This may result in premature
|
||||
* oom.
|
||||
*/
|
||||
if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
|
||||
if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) && !mglru_background)
|
||||
return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
|
||||
|
||||
if (!down_read_trylock(&shrinker_rwsem))
|
||||
|
@ -861,6 +860,9 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
|
|||
.memcg = memcg,
|
||||
};
|
||||
|
||||
if (mglru_background && !(shrinker->flags & SHRINKER_MGLRU_BACKGROUND))
|
||||
continue;
|
||||
|
||||
ret = do_shrink_slab(&sc, shrinker, priority);
|
||||
if (ret == SHRINK_EMPTY)
|
||||
ret = 0;
|
||||
|
|
Loading…
Reference in New Issue