2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

mm/huge_memory: refactor after-split (page) cache code

Smatch/coverity checkers report NULL mapping referencing issues[1][2][3]
every time the code is modified, because they do not understand that
mapping cannot be NULL when a folio is in page cache in the code. 
Refactor the code to make it explicit.

Remove "end = -1" for anonymous folios, since after code refactoring, end
is no longer used by anonymous folio handling code.

No functional change is intended.

Link: https://lkml.kernel.org/r/20250718023000.4044406-7-ziy@nvidia.com
Link: https://lore.kernel.org/linux-mm/2afe3d59-aca5-40f7-82a3-a6d976fb0f4f@stanley.mountain/ [1]
Link: https://lore.kernel.org/oe-kbuild/64b54034-f311-4e7d-b935-c16775dbb642@suswa.mountain/ [2]
Link: https://lore.kernel.org/linux-mm/20250716145804.4836-1-antonio@mandelbit.com/ [3]
Link: https://lkml.kernel.org/r/20250718183720.4054515-7-ziy@nvidia.com
Signed-off-by: Zi Yan <ziy@nvidia.com>
Suggested-by: David Hildenbrand <david@redhat.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Balbir Singh <balbirs@nvidia.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Dan Carpenter <dan.carpenter@linaro.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kirill A. Shutemov <k.shutemov@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mariano Pache <npache@redhat.com>
Cc: Mathew Brost <matthew.brost@intel.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Zi Yan 2025-07-18 14:37:20 -04:00 committed by Andrew Morton
parent a3871560ff
commit fde47708f9

View File

@ -3640,7 +3640,6 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
ret = -EBUSY; ret = -EBUSY;
goto out; goto out;
} }
end = -1;
mapping = NULL; mapping = NULL;
anon_vma_lock_write(anon_vma); anon_vma_lock_write(anon_vma);
} else { } else {
@ -3793,6 +3792,8 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
*/ */
for (new_folio = folio_next(folio); new_folio != end_folio; for (new_folio = folio_next(folio); new_folio != end_folio;
new_folio = next) { new_folio = next) {
unsigned long nr_pages = folio_nr_pages(new_folio);
next = folio_next(new_folio); next = folio_next(new_folio);
expected_refs = folio_expected_ref_count(new_folio) + 1; expected_refs = folio_expected_ref_count(new_folio) + 1;
@ -3800,25 +3801,36 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
lru_add_split_folio(folio, new_folio, lruvec, list); lru_add_split_folio(folio, new_folio, lruvec, list);
/* Some pages can be beyond EOF: drop them from cache */ /*
if (new_folio->index >= end) { * Anonymous folio with swap cache.
if (shmem_mapping(mapping)) * NOTE: shmem in swap cache is not supported yet.
nr_shmem_dropped += folio_nr_pages(new_folio); */
else if (folio_test_clear_dirty(new_folio)) if (swap_cache) {
folio_account_cleaned(
new_folio,
inode_to_wb(mapping->host));
__filemap_remove_folio(new_folio, NULL);
folio_put_refs(new_folio,
folio_nr_pages(new_folio));
} else if (mapping) {
__xa_store(&mapping->i_pages, new_folio->index,
new_folio, 0);
} else if (swap_cache) {
__xa_store(&swap_cache->i_pages, __xa_store(&swap_cache->i_pages,
swap_cache_index(new_folio->swap), swap_cache_index(new_folio->swap),
new_folio, 0); new_folio, 0);
continue;
} }
/* Anonymous folio without swap cache */
if (!mapping)
continue;
/* Add the new folio to the page cache. */
if (new_folio->index < end) {
__xa_store(&mapping->i_pages, new_folio->index,
new_folio, 0);
continue;
}
/* Drop folio beyond EOF: ->index >= end */
if (shmem_mapping(mapping))
nr_shmem_dropped += nr_pages;
else if (folio_test_clear_dirty(new_folio))
folio_account_cleaned(
new_folio, inode_to_wb(mapping->host));
__filemap_remove_folio(new_folio, NULL);
folio_put_refs(new_folio, nr_pages);
} }
/* /*
* Unfreeze @folio only after all page cache entries, which * Unfreeze @folio only after all page cache entries, which