2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

mm: optimize lru_note_cost() by adding lru_note_cost_unlock_irq()

Dropping a lock, just to demand it again for an afterthought, cannot be
good if contended: convert lru_note_cost() to lru_note_cost_unlock_irq().

[hughd@google.com: delete unneeded comment]
  Link: https://lkml.kernel.org/r/dbf9352a-1ed9-a021-c0c7-9309ac73e174@google.com
Link: https://lkml.kernel.org/r/21100102-51b6-79d5-03db-1bb7f97fa94c@google.com
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev>
Tested-by: Roman Gushchin <roman.gushchin@linux.dev>
Reviewed-by: Shakeel Butt <shakeel.butt@linux.dev>
Cc: David Hildenbrand <david@redhat.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Hugh Dickins 2025-07-13 12:57:18 -07:00 committed by Andrew Morton
parent 526660b950
commit 3865301dc5
3 changed files with 25 additions and 21 deletions

View File

@ -376,8 +376,9 @@ extern unsigned long totalreserve_pages;
/* linux/mm/swap.c */ /* linux/mm/swap.c */
void lru_note_cost(struct lruvec *lruvec, bool file, void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file,
unsigned int nr_io, unsigned int nr_rotated); unsigned int nr_io, unsigned int nr_rotated)
__releases(lruvec->lru_lock);
void lru_note_cost_refault(struct folio *); void lru_note_cost_refault(struct folio *);
void folio_add_lru(struct folio *); void folio_add_lru(struct folio *);
void folio_add_lru_vma(struct folio *, struct vm_area_struct *); void folio_add_lru_vma(struct folio *, struct vm_area_struct *);

View File

@ -237,8 +237,9 @@ void folio_rotate_reclaimable(struct folio *folio)
folio_batch_add_and_move(folio, lru_move_tail, true); folio_batch_add_and_move(folio, lru_move_tail, true);
} }
void lru_note_cost(struct lruvec *lruvec, bool file, void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file,
unsigned int nr_io, unsigned int nr_rotated) unsigned int nr_io, unsigned int nr_rotated)
__releases(lruvec->lru_lock)
{ {
unsigned long cost; unsigned long cost;
@ -250,18 +251,14 @@ void lru_note_cost(struct lruvec *lruvec, bool file,
* different between them, adjust scan balance for CPU work. * different between them, adjust scan balance for CPU work.
*/ */
cost = nr_io * SWAP_CLUSTER_MAX + nr_rotated; cost = nr_io * SWAP_CLUSTER_MAX + nr_rotated;
if (!cost) {
spin_unlock_irq(&lruvec->lru_lock);
return;
}
do { for (;;) {
unsigned long lrusize; unsigned long lrusize;
/*
* Hold lruvec->lru_lock is safe here, since
* 1) The pinned lruvec in reclaim, or
* 2) From a pre-LRU page during refault (which also holds the
* rcu lock, so would be safe even if the page was on the LRU
* and could move simultaneously to a new lruvec).
*/
spin_lock_irq(&lruvec->lru_lock);
/* Record cost event */ /* Record cost event */
if (file) if (file)
lruvec->file_cost += cost; lruvec->file_cost += cost;
@ -285,13 +282,21 @@ void lru_note_cost(struct lruvec *lruvec, bool file,
lruvec->file_cost /= 2; lruvec->file_cost /= 2;
lruvec->anon_cost /= 2; lruvec->anon_cost /= 2;
} }
spin_unlock_irq(&lruvec->lru_lock); spin_unlock_irq(&lruvec->lru_lock);
} while ((lruvec = parent_lruvec(lruvec))); lruvec = parent_lruvec(lruvec);
if (!lruvec)
break;
spin_lock_irq(&lruvec->lru_lock);
}
} }
void lru_note_cost_refault(struct folio *folio) void lru_note_cost_refault(struct folio *folio)
{ {
lru_note_cost(folio_lruvec(folio), folio_is_file_lru(folio), struct lruvec *lruvec;
lruvec = folio_lruvec_lock_irq(folio);
lru_note_cost_unlock_irq(lruvec, folio_is_file_lru(folio),
folio_nr_pages(folio), 0); folio_nr_pages(folio), 0);
} }

View File

@ -2053,9 +2053,9 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
__count_vm_events(item, nr_reclaimed); __count_vm_events(item, nr_reclaimed);
count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed); count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
__count_vm_events(PGSTEAL_ANON + file, nr_reclaimed); __count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
spin_unlock_irq(&lruvec->lru_lock);
lru_note_cost(lruvec, file, stat.nr_pageout, nr_scanned - nr_reclaimed); lru_note_cost_unlock_irq(lruvec, file, stat.nr_pageout,
nr_scanned - nr_reclaimed);
/* /*
* If dirty folios are scanned that are not queued for IO, it * If dirty folios are scanned that are not queued for IO, it
@ -2201,10 +2201,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate); count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
spin_unlock_irq(&lruvec->lru_lock);
if (nr_rotated) lru_note_cost_unlock_irq(lruvec, file, 0, nr_rotated);
lru_note_cost(lruvec, file, 0, nr_rotated);
trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate, trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
nr_deactivate, nr_rotated, sc->priority, file); nr_deactivate, nr_rotated, sc->priority, file);
} }