mm/hugetlb: convert use of struct page to folio in __unmap_hugepage_range()

In __unmap_hugepage_range(), the "page" pointer always points to the first
page of a huge page, which guarantees there is a folio associating with
it.  Convert the "page" pointer to use folio.

Link: https://lkml.kernel.org/r/20250505182345.506888-6-nifan.cxl@gmail.com
Signed-off-by: Fan Ni <fan.ni@samsung.com>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Fan Ni
2025-05-05 11:22:44 -07:00
committed by Andrew Morton
parent 7f4b6065d9
commit 05275594a3

View File

@@ -5843,11 +5843,11 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
struct folio *folio, zap_flags_t zap_flags)
{
struct mm_struct *mm = vma->vm_mm;
const bool folio_provided = !!folio;
unsigned long address;
pte_t *ptep;
pte_t pte;
spinlock_t *ptl;
struct page *page;
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
bool adjust_reservation = false;
@@ -5911,14 +5911,13 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
continue;
}
page = pte_page(pte);
/*
* If a folio is supplied, it is because a specific
* folio is being unmapped, not a range. Ensure the folio we
* are about to unmap is the actual folio of interest.
*/
if (folio) {
if (page_folio(page) != folio) {
if (folio_provided) {
if (folio != page_folio(pte_page(pte))) {
spin_unlock(ptl);
continue;
}
@@ -5928,12 +5927,14 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
* looking like data was lost
*/
set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
} else {
folio = page_folio(pte_page(pte));
}
pte = huge_ptep_get_and_clear(mm, address, ptep, sz);
tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
if (huge_pte_dirty(pte))
set_page_dirty(page);
folio_mark_dirty(folio);
/* Leave a uffd-wp pte marker if needed */
if (huge_pte_uffd_wp(pte) &&
!(zap_flags & ZAP_FLAG_DROP_MARKER))
@@ -5941,7 +5942,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
make_pte_marker(PTE_MARKER_UFFD_WP),
sz);
hugetlb_count_sub(pages_per_huge_page(h), mm);
hugetlb_remove_rmap(page_folio(page));
hugetlb_remove_rmap(folio);
/*
* Restore the reservation for anonymous page, otherwise the
@@ -5950,8 +5951,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
* reservation bit.
*/
if (!h->surplus_huge_pages && __vma_private_lock(vma) &&
folio_test_anon(page_folio(page))) {
folio_set_hugetlb_restore_reserve(page_folio(page));
folio_test_anon(folio)) {
folio_set_hugetlb_restore_reserve(folio);
/* Reservation to be adjusted after the spin lock */
adjust_reservation = true;
}
@@ -5975,16 +5976,17 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
* count will not be incremented by free_huge_folio.
* Act as if we consumed the reservation.
*/
folio_clear_hugetlb_restore_reserve(page_folio(page));
folio_clear_hugetlb_restore_reserve(folio);
else if (rc)
vma_add_reservation(h, vma, address);
}
tlb_remove_page_size(tlb, page, huge_page_size(h));
tlb_remove_page_size(tlb, folio_page(folio, 0),
folio_size(folio));
/*
* If we were instructed to unmap a specific folio, we're done.
*/
if (folio)
if (folio_provided)
break;
}
tlb_end_vma(tlb, vma);