2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

mm: rename __PageMovable() to page_has_movable_ops()

Let's make it clearer that we are talking about movable_ops pages.

While at it, convert a VM_BUG_ON to a VM_WARN_ON_ONCE_PAGE.

Link: https://lkml.kernel.org/r/20250704102524.326966-17-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Eugenio Pé rez <eperezma@redhat.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Gregory Price <gourry@gourry.net>
Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Jerrin Shaji George <jerrin.shaji-george@broadcom.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mathew Brost <matthew.brost@intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Xu <peterx@redhat.com>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Cc: xu xin <xu.xin16@zte.com.cn>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
David Hildenbrand 2025-07-04 12:25:10 +02:00 committed by Andrew Morton
parent 22d103aef0
commit d4fb4587bd
8 changed files with 20 additions and 25 deletions

View File

@ -115,7 +115,7 @@ static inline void __SetPageMovable(struct page *page,
static inline static inline
const struct movable_operations *page_movable_ops(struct page *page) const struct movable_operations *page_movable_ops(struct page *page)
{ {
VM_BUG_ON(!__PageMovable(page)); VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
return (const struct movable_operations *) return (const struct movable_operations *)
((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE); ((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE);

View File

@ -750,7 +750,7 @@ static __always_inline bool __folio_test_movable(const struct folio *folio)
PAGE_MAPPING_MOVABLE; PAGE_MAPPING_MOVABLE;
} }
static __always_inline bool __PageMovable(const struct page *page) static __always_inline bool page_has_movable_ops(const struct page *page)
{ {
return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
PAGE_MAPPING_MOVABLE; PAGE_MAPPING_MOVABLE;

View File

@ -1056,11 +1056,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
* Skip any other type of page * Skip any other type of page
*/ */
if (!PageLRU(page)) { if (!PageLRU(page)) {
/* /* Isolation code will deal with any races. */
* __PageMovable can return false positive so we need if (unlikely(page_has_movable_ops(page)) &&
* to verify it under page_lock.
*/
if (unlikely(__PageMovable(page)) &&
!PageIsolated(page)) { !PageIsolated(page)) {
if (locked) { if (locked) {
unlock_page_lruvec_irqrestore(locked, flags); unlock_page_lruvec_irqrestore(locked, flags);

View File

@ -1388,8 +1388,8 @@ static inline bool HWPoisonHandlable(struct page *page, unsigned long flags)
if (PageSlab(page)) if (PageSlab(page))
return false; return false;
/* Soft offline could migrate non-LRU movable pages */ /* Soft offline could migrate movable_ops pages */
if ((flags & MF_SOFT_OFFLINE) && __PageMovable(page)) if ((flags & MF_SOFT_OFFLINE) && page_has_movable_ops(page))
return true; return true;
return PageLRU(page) || is_free_buddy_page(page); return PageLRU(page) || is_free_buddy_page(page);

View File

@ -1739,8 +1739,8 @@ bool mhp_range_allowed(u64 start, u64 size, bool need_mapping)
#ifdef CONFIG_MEMORY_HOTREMOVE #ifdef CONFIG_MEMORY_HOTREMOVE
/* /*
* Scan pfn range [start,end) to find movable/migratable pages (LRU pages, * Scan pfn range [start,end) to find movable/migratable pages (LRU and
* non-lru movable pages and hugepages). Will skip over most unmovable * hugetlb folio, movable_ops pages). Will skip over most unmovable
* pages (esp., pages that can be skipped when offlining), but bail out on * pages (esp., pages that can be skipped when offlining), but bail out on
* definitely unmovable pages. * definitely unmovable pages.
* *
@ -1759,13 +1759,11 @@ static int scan_movable_pages(unsigned long start, unsigned long end,
struct folio *folio; struct folio *folio;
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
if (PageLRU(page)) if (PageLRU(page) || page_has_movable_ops(page))
goto found;
if (__PageMovable(page))
goto found; goto found;
/* /*
* PageOffline() pages that are not marked __PageMovable() and * PageOffline() pages that do not have movable_ops and
* have a reference count > 0 (after MEM_GOING_OFFLINE) are * have a reference count > 0 (after MEM_GOING_OFFLINE) are
* definitely unmovable. If their reference count would be 0, * definitely unmovable. If their reference count would be 0,
* they could at least be skipped when offlining memory. * they could at least be skipped when offlining memory.

View File

@ -94,7 +94,7 @@ bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode)
* Note that once a page has movable_ops, it will stay that way * Note that once a page has movable_ops, it will stay that way
* until the page was freed. * until the page was freed.
*/ */
if (unlikely(!__PageMovable(page))) if (unlikely(!page_has_movable_ops(page)))
goto out_putfolio; goto out_putfolio;
/* /*
@ -111,7 +111,7 @@ bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode)
if (unlikely(!folio_trylock(folio))) if (unlikely(!folio_trylock(folio)))
goto out_putfolio; goto out_putfolio;
VM_WARN_ON_ONCE_PAGE(!__PageMovable(page), page); VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
if (PageIsolated(page)) if (PageIsolated(page))
goto out_no_isolated; goto out_no_isolated;
@ -153,7 +153,7 @@ static void putback_movable_ops_page(struct page *page)
*/ */
struct folio *folio = page_folio(page); struct folio *folio = page_folio(page);
VM_WARN_ON_ONCE_PAGE(!__PageMovable(page), page); VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
VM_WARN_ON_ONCE_PAGE(!PageIsolated(page), page); VM_WARN_ON_ONCE_PAGE(!PageIsolated(page), page);
folio_lock(folio); folio_lock(folio);
page_movable_ops(page)->putback_page(page); page_movable_ops(page)->putback_page(page);
@ -194,7 +194,7 @@ static int migrate_movable_ops_page(struct page *dst, struct page *src,
{ {
int rc = MIGRATEPAGE_SUCCESS; int rc = MIGRATEPAGE_SUCCESS;
VM_WARN_ON_ONCE_PAGE(!__PageMovable(src), src); VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(src), src);
VM_WARN_ON_ONCE_PAGE(!PageIsolated(src), src); VM_WARN_ON_ONCE_PAGE(!PageIsolated(src), src);
rc = page_movable_ops(src)->migrate_page(dst, src, mode); rc = page_movable_ops(src)->migrate_page(dst, src, mode);
if (rc == MIGRATEPAGE_SUCCESS) if (rc == MIGRATEPAGE_SUCCESS)

View File

@ -2006,7 +2006,7 @@ static bool prep_move_freepages_block(struct zone *zone, struct page *page,
* migration are movable. But we don't actually try * migration are movable. But we don't actually try
* isolating, as that would be expensive. * isolating, as that would be expensive.
*/ */
if (PageLRU(page) || __PageMovable(page)) if (PageLRU(page) || page_has_movable_ops(page))
(*num_movable)++; (*num_movable)++;
pfn++; pfn++;
} }

View File

@ -21,9 +21,9 @@
* consequently belong to a single zone. * consequently belong to a single zone.
* *
* PageLRU check without isolation or lru_lock could race so that * PageLRU check without isolation or lru_lock could race so that
* MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable * MIGRATE_MOVABLE block might include unmovable pages. Similarly, pages
* check without lock_page also may miss some movable non-lru pages at * with movable_ops can only be identified some time after they were
* race condition. So you can't expect this function should be exact. * allocated. So you can't expect this function should be exact.
* *
* Returns a page without holding a reference. If the caller wants to * Returns a page without holding a reference. If the caller wants to
* dereference that page (e.g., dumping), it has to make sure that it * dereference that page (e.g., dumping), it has to make sure that it
@ -133,7 +133,7 @@ static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long e
if ((mode == PB_ISOLATE_MODE_MEM_OFFLINE) && PageOffline(page)) if ((mode == PB_ISOLATE_MODE_MEM_OFFLINE) && PageOffline(page))
continue; continue;
if (__PageMovable(page) || PageLRU(page)) if (PageLRU(page) || page_has_movable_ops(page))
continue; continue;
/* /*
@ -421,7 +421,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn,
* proper free and split handling for them. * proper free and split handling for them.
*/ */
VM_WARN_ON_ONCE_PAGE(PageLRU(page), page); VM_WARN_ON_ONCE_PAGE(PageLRU(page), page);
VM_WARN_ON_ONCE_PAGE(__PageMovable(page), page); VM_WARN_ON_ONCE_PAGE(page_has_movable_ops(page), page);
goto failed; goto failed;
} }