anolis: mm/damon: fix behaviour of init_regions
ANBZ: #1661 A fixed address range for processes can been monitored by setting init_regions in DAMON. But now, the init_regions always ignored in a new primitive period. This patch mainly to solve the above issue. The init_regions setting will be saved, checked, and restored first when next primitive period comes. Signed-off-by: Rongwei Wang <rongwei.wang@linux.alibaba.com> Signed-off-by: Yan Yan <calling@linux.alibaba.com> Reviewed-by: Xin Hao <xhao@linux.alibaba.com> Acked-by: Gang Deng <gavin.dg@linux.alibaba.com> Link: https://gitee.com/anolis/cloud-kernel/pulls/512
This commit is contained in:
parent
46af6482b2
commit
281d7cd2a9
|
@ -75,6 +75,8 @@ struct damon_region {
|
|||
struct damon_target {
|
||||
unsigned long id;
|
||||
unsigned int nr_regions;
|
||||
unsigned int nr_init_regions;
|
||||
struct damon_addr_range *init_regions;
|
||||
struct list_head regions_list;
|
||||
struct list_head list;
|
||||
spinlock_t target_lock;
|
||||
|
|
|
@ -148,6 +148,8 @@ struct damon_target *damon_new_target(unsigned long id)
|
|||
|
||||
t->id = id;
|
||||
t->nr_regions = 0;
|
||||
t->nr_init_regions = 0;
|
||||
t->init_regions = NULL;
|
||||
INIT_LIST_HEAD(&t->regions_list);
|
||||
spin_lock_init(&t->target_lock);
|
||||
|
||||
|
@ -171,6 +173,9 @@ void damon_free_target(struct damon_target *t)
|
|||
spin_lock(&t->target_lock);
|
||||
damon_for_each_region_safe(r, next, t)
|
||||
damon_free_region(r);
|
||||
|
||||
kfree(t->init_regions);
|
||||
|
||||
spin_unlock(&t->target_lock);
|
||||
kfree(t);
|
||||
}
|
||||
|
|
|
@ -503,6 +503,9 @@ static int set_init_regions(struct damon_ctx *c, const char *str, ssize_t len)
|
|||
damon_for_each_target(t, c) {
|
||||
damon_for_each_region_safe(r, next, t)
|
||||
damon_destroy_region(r, t);
|
||||
kfree(t->init_regions);
|
||||
t->init_regions = NULL;
|
||||
t->nr_init_regions = 0;
|
||||
}
|
||||
|
||||
while (pos < len) {
|
||||
|
@ -516,12 +519,37 @@ static int set_init_regions(struct damon_ctx *c, const char *str, ssize_t len)
|
|||
pos += parsed;
|
||||
}
|
||||
|
||||
/* Set damon_target->init_regions */
|
||||
damon_for_each_target(t, c) {
|
||||
unsigned int nr_regions = t->nr_regions;
|
||||
int idx = 0;
|
||||
|
||||
t->nr_init_regions = nr_regions;
|
||||
t->init_regions = kmalloc_array(nr_regions, sizeof(struct damon_addr_range),
|
||||
GFP_KERNEL);
|
||||
if (t->init_regions == NULL)
|
||||
goto fail;
|
||||
damon_for_each_region_safe(r, next, t) {
|
||||
/* TODO: Never happen? */
|
||||
if (idx == nr_regions) {
|
||||
pr_alert("nr_regions overflow, init_regions already full.");
|
||||
break;
|
||||
}
|
||||
t->init_regions[idx].start = r->ar.start;
|
||||
t->init_regions[idx].end = r->ar.end;
|
||||
idx++;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
damon_for_each_target(t, c) {
|
||||
damon_for_each_region_safe(r, next, t)
|
||||
damon_destroy_region(r, t);
|
||||
kfree(t->init_regions);
|
||||
t->init_regions = NULL;
|
||||
t->nr_init_regions = 0;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -349,6 +349,45 @@ static void damon_va_apply_three_regions(struct damon_target *t,
|
|||
}
|
||||
}
|
||||
|
||||
static void damon_va_apply_init_regions(struct damon_target *t)
|
||||
{
|
||||
struct damon_region *r, *next, *prev;
|
||||
unsigned int i = 0;
|
||||
|
||||
/* Remove all regions */
|
||||
damon_for_each_region_safe(r, next, t) {
|
||||
damon_destroy_region(r, t);
|
||||
}
|
||||
|
||||
for (i = 0; i < t->nr_init_regions; i++) {
|
||||
struct damon_addr_range ar = t->init_regions[i];
|
||||
|
||||
r = damon_new_region(ar.start, ar.end);
|
||||
if (!r) {
|
||||
pr_err("allocating memory failed for new region: 0x%lx - 0x%lx\n",
|
||||
ar.start, ar.end);
|
||||
goto fail;
|
||||
}
|
||||
damon_add_region(r, t);
|
||||
if (damon_nr_regions(t) > 1) {
|
||||
prev = damon_prev_region(r);
|
||||
if (prev->ar.end > r->ar.start) {
|
||||
/*
|
||||
* Never happen! this case had been checked during
|
||||
* setting init_regions.
|
||||
*/
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
}
|
||||
return;
|
||||
|
||||
fail:
|
||||
damon_for_each_region_safe(r, next, t) {
|
||||
damon_destroy_region(r, t);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Update regions for current memory mappings
|
||||
*/
|
||||
|
@ -358,6 +397,17 @@ void damon_va_update(struct damon_ctx *ctx)
|
|||
struct damon_target *t;
|
||||
|
||||
damon_for_each_target(t, ctx) {
|
||||
/*
|
||||
* If init_regions have been set, updating new target
|
||||
* according to init_regions.
|
||||
*/
|
||||
if (t->nr_init_regions) {
|
||||
spin_lock(&t->target_lock);
|
||||
damon_va_apply_init_regions(t);
|
||||
spin_unlock(&t->target_lock);
|
||||
|
||||
continue;
|
||||
}
|
||||
if (damon_va_three_regions(t, three_regions))
|
||||
continue;
|
||||
spin_lock(&t->target_lock);
|
||||
|
|
Loading…
Reference in New Issue