mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00
mm, list_lru: refactor the locking code
Cocci is confused by the try lock then release RCU and return logic here. So separate the try lock part out into a standalone helper. The code is easier to follow too. No feature change, fixes: cocci warnings: (new ones prefixed by >>) >> mm/list_lru.c:82:3-9: preceding lock on line 77 >> mm/list_lru.c:82:3-9: preceding lock on line 77 mm/list_lru.c:82:3-9: preceding lock on line 75 mm/list_lru.c:82:3-9: preceding lock on line 75 Link: https://lkml.kernel.org/r/20250526180638.14609-1-ryncsn@gmail.com Signed-off-by: Kairui Song <kasong@tencent.com> Reported-by: kernel test robot <lkp@intel.com> Reported-by: Julia Lawall <julia.lawall@inria.fr> Closes: https://lore.kernel.org/r/202505252043.pbT1tBHJ-lkp@intel.com/ Reviewed-by: Qi Zheng <zhengqi.arch@bytedance.com> Reviewed-by: Muchun Song <muchun.song@linux.dev> Reviewed-by: SeongJae Park <sj@kernel.org> Cc: Chengming Zhou <zhouchengming@bytedance.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Kairui Song <kasong@tencent.com> Cc: Roman Gushchin <roman.gushchin@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
3800d55250
commit
453742ba5b
@ -60,31 +60,35 @@ list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
|
|||||||
return &lru->node[nid].lru;
|
return &lru->node[nid].lru;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool lock_list_lru(struct list_lru_one *l, bool irq)
|
||||||
|
{
|
||||||
|
if (irq)
|
||||||
|
spin_lock_irq(&l->lock);
|
||||||
|
else
|
||||||
|
spin_lock(&l->lock);
|
||||||
|
if (unlikely(READ_ONCE(l->nr_items) == LONG_MIN)) {
|
||||||
|
if (irq)
|
||||||
|
spin_unlock_irq(&l->lock);
|
||||||
|
else
|
||||||
|
spin_unlock(&l->lock);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct list_lru_one *
|
static inline struct list_lru_one *
|
||||||
lock_list_lru_of_memcg(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
|
lock_list_lru_of_memcg(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
|
||||||
bool irq, bool skip_empty)
|
bool irq, bool skip_empty)
|
||||||
{
|
{
|
||||||
struct list_lru_one *l;
|
struct list_lru_one *l;
|
||||||
long nr_items;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
again:
|
again:
|
||||||
l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
|
l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
|
||||||
if (likely(l)) {
|
if (likely(l) && lock_list_lru(l, irq)) {
|
||||||
if (irq)
|
|
||||||
spin_lock_irq(&l->lock);
|
|
||||||
else
|
|
||||||
spin_lock(&l->lock);
|
|
||||||
nr_items = READ_ONCE(l->nr_items);
|
|
||||||
if (likely(nr_items != LONG_MIN)) {
|
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
return l;
|
return l;
|
||||||
}
|
}
|
||||||
if (irq)
|
|
||||||
spin_unlock_irq(&l->lock);
|
|
||||||
else
|
|
||||||
spin_unlock(&l->lock);
|
|
||||||
}
|
|
||||||
/*
|
/*
|
||||||
* Caller may simply bail out if raced with reparenting or
|
* Caller may simply bail out if raced with reparenting or
|
||||||
* may iterate through the list_lru and expect empty slots.
|
* may iterate through the list_lru and expect empty slots.
|
||||||
|
Loading…
Reference in New Issue
Block a user