mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00
mm/damon/core: add cleanup_target() ops callback
Some DAMON operation sets may need additional cleanup per target. For example, [f]vaddr need to put pids of each target. Each user and core logic is doing that redundantly. Add another DAMON ops callback that will be used for doing such cleanups in operations set layer. [sj@kernel.org: add kernel-doc comment for damon_operations->cleanup_target] Link: https://lkml.kernel.org/r/20250715185239.89152-2-sj@kernel.org [sj@kernel.org: remove damon_ctx->callback kernel-doc comment] Link: https://lkml.kernel.org/r/20250715185239.89152-3-sj@kernel.org Link: https://lkml.kernel.org/r/20250712195016.151108-10-sj@kernel.org Signed-off-by: SeongJae Park <sj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
d4614161fb
commit
7114bc5e01
@ -576,6 +576,7 @@ enum damon_ops_id {
|
||||
* @get_scheme_score: Get the score of a region for a scheme.
|
||||
* @apply_scheme: Apply a DAMON-based operation scheme.
|
||||
* @target_valid: Determine if the target is valid.
|
||||
* @cleanup_target: Clean up each target before deallocation.
|
||||
* @cleanup: Clean up the context.
|
||||
*
|
||||
* DAMON can be extended for various address spaces and usages. For this,
|
||||
@ -608,6 +609,7 @@ enum damon_ops_id {
|
||||
* filters (&struct damos_filter) that handled by itself.
|
||||
* @target_valid should check whether the target is still valid for the
|
||||
* monitoring.
|
||||
* @cleanup_target is called before the target will be deallocated.
|
||||
* @cleanup is called from @kdamond just before its termination.
|
||||
*/
|
||||
struct damon_operations {
|
||||
@ -623,6 +625,7 @@ struct damon_operations {
|
||||
struct damon_target *t, struct damon_region *r,
|
||||
struct damos *scheme, unsigned long *sz_filter_passed);
|
||||
bool (*target_valid)(struct damon_target *t);
|
||||
void (*cleanup_target)(struct damon_target *t);
|
||||
void (*cleanup)(struct damon_ctx *context);
|
||||
};
|
||||
|
||||
@ -771,7 +774,6 @@ struct damon_attrs {
|
||||
* Accesses to other fields must be protected by themselves.
|
||||
*
|
||||
* @ops: Set of monitoring operations for given use cases.
|
||||
* @callback: Set of callbacks for monitoring events notifications.
|
||||
*
|
||||
* @adaptive_targets: Head of monitoring targets (&damon_target) list.
|
||||
* @schemes: Head of schemes (&damos) list.
|
||||
@ -933,7 +935,7 @@ struct damon_target *damon_new_target(void);
|
||||
void damon_add_target(struct damon_ctx *ctx, struct damon_target *t);
|
||||
bool damon_targets_empty(struct damon_ctx *ctx);
|
||||
void damon_free_target(struct damon_target *t);
|
||||
void damon_destroy_target(struct damon_target *t);
|
||||
void damon_destroy_target(struct damon_target *t, struct damon_ctx *ctx);
|
||||
unsigned int damon_nr_regions(struct damon_target *t);
|
||||
|
||||
struct damon_ctx *damon_new_ctx(void);
|
||||
|
@ -502,8 +502,12 @@ void damon_free_target(struct damon_target *t)
|
||||
kfree(t);
|
||||
}
|
||||
|
||||
void damon_destroy_target(struct damon_target *t)
|
||||
void damon_destroy_target(struct damon_target *t, struct damon_ctx *ctx)
|
||||
{
|
||||
|
||||
if (ctx && ctx->ops.cleanup_target)
|
||||
ctx->ops.cleanup_target(t);
|
||||
|
||||
damon_del_target(t);
|
||||
damon_free_target(t);
|
||||
}
|
||||
@ -551,7 +555,7 @@ static void damon_destroy_targets(struct damon_ctx *ctx)
|
||||
struct damon_target *t, *next_t;
|
||||
|
||||
damon_for_each_target_safe(t, next_t, ctx)
|
||||
damon_destroy_target(t);
|
||||
damon_destroy_target(t, ctx);
|
||||
}
|
||||
|
||||
void damon_destroy_ctx(struct damon_ctx *ctx)
|
||||
@ -1137,7 +1141,7 @@ static int damon_commit_targets(
|
||||
|
||||
if (damon_target_has_pid(dst))
|
||||
put_pid(dst_target->pid);
|
||||
damon_destroy_target(dst_target);
|
||||
damon_destroy_target(dst_target, dst);
|
||||
damon_for_each_scheme(s, dst) {
|
||||
if (s->quota.charge_target_from == dst_target) {
|
||||
s->quota.charge_target_from = NULL;
|
||||
@ -1156,7 +1160,7 @@ static int damon_commit_targets(
|
||||
err = damon_commit_target(new_target, false,
|
||||
src_target, damon_target_has_pid(src));
|
||||
if (err) {
|
||||
damon_destroy_target(new_target);
|
||||
damon_destroy_target(new_target, NULL);
|
||||
return err;
|
||||
}
|
||||
damon_add_target(dst, new_target);
|
||||
|
@ -1303,7 +1303,7 @@ static void damon_sysfs_destroy_targets(struct damon_ctx *ctx)
|
||||
damon_for_each_target_safe(t, next, ctx) {
|
||||
if (has_pid)
|
||||
put_pid(t->pid);
|
||||
damon_destroy_target(t);
|
||||
damon_destroy_target(t, ctx);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1389,7 +1389,7 @@ static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
|
||||
|
||||
damon_for_each_target_safe(t, next, ctx) {
|
||||
put_pid(t->pid);
|
||||
damon_destroy_target(t);
|
||||
damon_destroy_target(t, ctx);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -58,7 +58,7 @@ static void damon_test_target(struct kunit *test)
|
||||
damon_add_target(c, t);
|
||||
KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(c));
|
||||
|
||||
damon_destroy_target(t);
|
||||
damon_destroy_target(t, c);
|
||||
KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c));
|
||||
|
||||
damon_destroy_ctx(c);
|
||||
@ -310,7 +310,7 @@ static void damon_test_set_regions(struct kunit *test)
|
||||
KUNIT_EXPECT_EQ(test, r->ar.start, expects[expect_idx++]);
|
||||
KUNIT_EXPECT_EQ(test, r->ar.end, expects[expect_idx++]);
|
||||
}
|
||||
damon_destroy_target(t);
|
||||
damon_destroy_target(t, NULL);
|
||||
}
|
||||
|
||||
static void damon_test_nr_accesses_to_accesses_bp(struct kunit *test)
|
||||
|
@ -149,7 +149,7 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
|
||||
KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]);
|
||||
}
|
||||
|
||||
damon_destroy_target(t);
|
||||
damon_destroy_target(t, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user