mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00
mm: Remove swap_writepage() and shmem_writepage()
Call swap_writeout() and shmem_writeout() from pageout() instead. Signed-off-by: "Matthew Wilcox (Oracle)" <willy@infradead.org> Link: https://lore.kernel.org/r/20250402150005.2309458-9-willy@infradead.org Tested-by: Baolin Wang <baolin.wang@linux.alibaba.com> Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com> Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
parent
fe75adffac
commit
84798514db
@ -37,7 +37,7 @@
|
|||||||
enum wbt_flags {
|
enum wbt_flags {
|
||||||
WBT_TRACKED = 1, /* write, tracked for throttling */
|
WBT_TRACKED = 1, /* write, tracked for throttling */
|
||||||
WBT_READ = 2, /* read */
|
WBT_READ = 2, /* read */
|
||||||
WBT_SWAP = 4, /* write, from swap_writepage() */
|
WBT_SWAP = 4, /* write, from swap_writeout() */
|
||||||
WBT_DISCARD = 8, /* discard */
|
WBT_DISCARD = 8, /* discard */
|
||||||
|
|
||||||
WBT_NR_BITS = 4, /* number of bits */
|
WBT_NR_BITS = 4, /* number of bits */
|
||||||
|
@ -237,9 +237,8 @@ static void swap_zeromap_folio_clear(struct folio *folio)
|
|||||||
* We may have stale swap cache pages in memory: notice
|
* We may have stale swap cache pages in memory: notice
|
||||||
* them here and get rid of the unnecessary final write.
|
* them here and get rid of the unnecessary final write.
|
||||||
*/
|
*/
|
||||||
int swap_writepage(struct page *page, struct writeback_control *wbc)
|
int swap_writeout(struct folio *folio, struct writeback_control *wbc)
|
||||||
{
|
{
|
||||||
struct folio *folio = page_folio(page);
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (folio_free_swap(folio)) {
|
if (folio_free_swap(folio)) {
|
||||||
|
23
mm/shmem.c
23
mm/shmem.c
@ -98,7 +98,7 @@ static struct vfsmount *shm_mnt __ro_after_init;
|
|||||||
#define SHORT_SYMLINK_LEN 128
|
#define SHORT_SYMLINK_LEN 128
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* shmem_fallocate communicates with shmem_fault or shmem_writepage via
|
* shmem_fallocate communicates with shmem_fault or shmem_writeout via
|
||||||
* inode->i_private (with i_rwsem making sure that it has only one user at
|
* inode->i_private (with i_rwsem making sure that it has only one user at
|
||||||
* a time): we would prefer not to enlarge the shmem inode just for that.
|
* a time): we would prefer not to enlarge the shmem inode just for that.
|
||||||
*/
|
*/
|
||||||
@ -107,7 +107,7 @@ struct shmem_falloc {
|
|||||||
pgoff_t start; /* start of range currently being fallocated */
|
pgoff_t start; /* start of range currently being fallocated */
|
||||||
pgoff_t next; /* the next page offset to be fallocated */
|
pgoff_t next; /* the next page offset to be fallocated */
|
||||||
pgoff_t nr_falloced; /* how many new pages have been fallocated */
|
pgoff_t nr_falloced; /* how many new pages have been fallocated */
|
||||||
pgoff_t nr_unswapped; /* how often writepage refused to swap out */
|
pgoff_t nr_unswapped; /* how often writeout refused to swap out */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct shmem_options {
|
struct shmem_options {
|
||||||
@ -446,7 +446,7 @@ static void shmem_recalc_inode(struct inode *inode, long alloced, long swapped)
|
|||||||
/*
|
/*
|
||||||
* Special case: whereas normally shmem_recalc_inode() is called
|
* Special case: whereas normally shmem_recalc_inode() is called
|
||||||
* after i_mapping->nrpages has already been adjusted (up or down),
|
* after i_mapping->nrpages has already been adjusted (up or down),
|
||||||
* shmem_writepage() has to raise swapped before nrpages is lowered -
|
* shmem_writeout() has to raise swapped before nrpages is lowered -
|
||||||
* to stop a racing shmem_recalc_inode() from thinking that a page has
|
* to stop a racing shmem_recalc_inode() from thinking that a page has
|
||||||
* been freed. Compensate here, to avoid the need for a followup call.
|
* been freed. Compensate here, to avoid the need for a followup call.
|
||||||
*/
|
*/
|
||||||
@ -1536,11 +1536,6 @@ int shmem_unuse(unsigned int type)
|
|||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int shmem_writepage(struct page *page, struct writeback_control *wbc)
|
|
||||||
{
|
|
||||||
return shmem_writeout(page_folio(page), wbc);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* shmem_writeout - Write the folio to swap
|
* shmem_writeout - Write the folio to swap
|
||||||
* @folio: The folio to write
|
* @folio: The folio to write
|
||||||
@ -1558,13 +1553,6 @@ int shmem_writeout(struct folio *folio, struct writeback_control *wbc)
|
|||||||
int nr_pages;
|
int nr_pages;
|
||||||
bool split = false;
|
bool split = false;
|
||||||
|
|
||||||
/*
|
|
||||||
* Our capabilities prevent regular writeback or sync from ever calling
|
|
||||||
* shmem_writepage; but a stacking filesystem might use ->writepage of
|
|
||||||
* its underlying filesystem, in which case tmpfs should write out to
|
|
||||||
* swap only in response to memory pressure, and not for the writeback
|
|
||||||
* threads or sync.
|
|
||||||
*/
|
|
||||||
if (WARN_ON_ONCE(!wbc->for_reclaim))
|
if (WARN_ON_ONCE(!wbc->for_reclaim))
|
||||||
goto redirty;
|
goto redirty;
|
||||||
|
|
||||||
@ -1653,7 +1641,7 @@ try_split:
|
|||||||
|
|
||||||
mutex_unlock(&shmem_swaplist_mutex);
|
mutex_unlock(&shmem_swaplist_mutex);
|
||||||
BUG_ON(folio_mapped(folio));
|
BUG_ON(folio_mapped(folio));
|
||||||
return swap_writepage(&folio->page, wbc);
|
return swap_writeout(folio, wbc);
|
||||||
}
|
}
|
||||||
|
|
||||||
list_del_init(&info->swaplist);
|
list_del_init(&info->swaplist);
|
||||||
@ -3776,7 +3764,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
|
|||||||
index--;
|
index--;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Inform shmem_writepage() how far we have reached.
|
* Inform shmem_writeout() how far we have reached.
|
||||||
* No need for lock or barrier: we have the page lock.
|
* No need for lock or barrier: we have the page lock.
|
||||||
*/
|
*/
|
||||||
if (!folio_test_uptodate(folio))
|
if (!folio_test_uptodate(folio))
|
||||||
@ -5199,7 +5187,6 @@ static int shmem_error_remove_folio(struct address_space *mapping,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const struct address_space_operations shmem_aops = {
|
static const struct address_space_operations shmem_aops = {
|
||||||
.writepage = shmem_writepage,
|
|
||||||
.dirty_folio = noop_dirty_folio,
|
.dirty_folio = noop_dirty_folio,
|
||||||
#ifdef CONFIG_TMPFS
|
#ifdef CONFIG_TMPFS
|
||||||
.write_begin = shmem_write_begin,
|
.write_begin = shmem_write_begin,
|
||||||
|
@ -20,7 +20,7 @@ static inline void swap_read_unplug(struct swap_iocb *plug)
|
|||||||
__swap_read_unplug(plug);
|
__swap_read_unplug(plug);
|
||||||
}
|
}
|
||||||
void swap_write_unplug(struct swap_iocb *sio);
|
void swap_write_unplug(struct swap_iocb *sio);
|
||||||
int swap_writepage(struct page *page, struct writeback_control *wbc);
|
int swap_writeout(struct folio *folio, struct writeback_control *wbc);
|
||||||
void __swap_writepage(struct folio *folio, struct writeback_control *wbc);
|
void __swap_writepage(struct folio *folio, struct writeback_control *wbc);
|
||||||
|
|
||||||
/* linux/mm/swap_state.c */
|
/* linux/mm/swap_state.c */
|
||||||
@ -141,7 +141,7 @@ static inline struct folio *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
|
static inline int swap_writeout(struct folio *f, struct writeback_control *wbc)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -30,7 +30,6 @@
|
|||||||
* vmscan's shrink_folio_list.
|
* vmscan's shrink_folio_list.
|
||||||
*/
|
*/
|
||||||
static const struct address_space_operations swap_aops = {
|
static const struct address_space_operations swap_aops = {
|
||||||
.writepage = swap_writepage,
|
|
||||||
.dirty_folio = noop_dirty_folio,
|
.dirty_folio = noop_dirty_folio,
|
||||||
#ifdef CONFIG_MIGRATION
|
#ifdef CONFIG_MIGRATION
|
||||||
.migrate_folio = migrate_folio,
|
.migrate_folio = migrate_folio,
|
||||||
|
@ -2359,7 +2359,7 @@ retry:
|
|||||||
* Limit the number of retries? No: when mmget_not_zero()
|
* Limit the number of retries? No: when mmget_not_zero()
|
||||||
* above fails, that mm is likely to be freeing swap from
|
* above fails, that mm is likely to be freeing swap from
|
||||||
* exit_mmap(), which proceeds at its own independent pace;
|
* exit_mmap(), which proceeds at its own independent pace;
|
||||||
* and even shmem_writepage() could have been preempted after
|
* and even shmem_writeout() could have been preempted after
|
||||||
* folio_alloc_swap(), temporarily hiding that swap. It's easy
|
* folio_alloc_swap(), temporarily hiding that swap. It's easy
|
||||||
* and robust (though cpu-intensive) just to keep retrying.
|
* and robust (though cpu-intensive) just to keep retrying.
|
||||||
*/
|
*/
|
||||||
|
28
mm/vmscan.c
28
mm/vmscan.c
@ -653,16 +653,16 @@ typedef enum {
|
|||||||
static pageout_t pageout(struct folio *folio, struct address_space *mapping,
|
static pageout_t pageout(struct folio *folio, struct address_space *mapping,
|
||||||
struct swap_iocb **plug, struct list_head *folio_list)
|
struct swap_iocb **plug, struct list_head *folio_list)
|
||||||
{
|
{
|
||||||
|
int (*writeout)(struct folio *, struct writeback_control *);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the folio is dirty, only perform writeback if that write
|
* We no longer attempt to writeback filesystem folios here, other
|
||||||
* will be non-blocking. To prevent this allocation from being
|
* than tmpfs/shmem. That's taken care of in page-writeback.
|
||||||
* stalled by pagecache activity. But note that there may be
|
* If we find a dirty filesystem folio at the end of the LRU list,
|
||||||
* stalls if we need to run get_block(). We could test
|
* typically that means the filesystem is saturating the storage
|
||||||
* PagePrivate for that.
|
* with contiguous writes and telling it to write a folio here
|
||||||
*
|
* would only make the situation worse by injecting an element
|
||||||
* If this process is currently in __generic_file_write_iter() against
|
* of random access.
|
||||||
* this folio's queue, we can perform writeback even if that
|
|
||||||
* will block.
|
|
||||||
*
|
*
|
||||||
* If the folio is swapcache, write it back even if that would
|
* If the folio is swapcache, write it back even if that would
|
||||||
* block, for some throttling. This happens by accident, because
|
* block, for some throttling. This happens by accident, because
|
||||||
@ -685,7 +685,11 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping,
|
|||||||
}
|
}
|
||||||
return PAGE_KEEP;
|
return PAGE_KEEP;
|
||||||
}
|
}
|
||||||
if (mapping->a_ops->writepage == NULL)
|
if (shmem_mapping(mapping))
|
||||||
|
writeout = shmem_writeout;
|
||||||
|
else if (folio_test_anon(folio))
|
||||||
|
writeout = swap_writeout;
|
||||||
|
else
|
||||||
return PAGE_ACTIVATE;
|
return PAGE_ACTIVATE;
|
||||||
|
|
||||||
if (folio_clear_dirty_for_io(folio)) {
|
if (folio_clear_dirty_for_io(folio)) {
|
||||||
@ -708,7 +712,7 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping,
|
|||||||
wbc.list = folio_list;
|
wbc.list = folio_list;
|
||||||
|
|
||||||
folio_set_reclaim(folio);
|
folio_set_reclaim(folio);
|
||||||
res = mapping->a_ops->writepage(&folio->page, &wbc);
|
res = writeout(folio, &wbc);
|
||||||
if (res < 0)
|
if (res < 0)
|
||||||
handle_write_error(mapping, folio, res);
|
handle_write_error(mapping, folio, res);
|
||||||
if (res == AOP_WRITEPAGE_ACTIVATE) {
|
if (res == AOP_WRITEPAGE_ACTIVATE) {
|
||||||
@ -717,7 +721,7 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!folio_test_writeback(folio)) {
|
if (!folio_test_writeback(folio)) {
|
||||||
/* synchronous write or broken a_ops? */
|
/* synchronous write? */
|
||||||
folio_clear_reclaim(folio);
|
folio_clear_reclaim(folio);
|
||||||
}
|
}
|
||||||
trace_mm_vmscan_write_folio(folio);
|
trace_mm_vmscan_write_folio(folio);
|
||||||
|
Loading…
Reference in New Issue
Block a user