mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-22 07:27:12 +08:00
mm/huge_memory: refactor copy_huge_pmd() non-present logic
Right now we are inconsistent in our use of thp_migration_supported():
static inline bool thp_migration_supported(void)
{
return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
}
And simply having arbitrary and ugly #ifdef
CONFIG_ARCH_ENABLE_THP_MIGRATION blocks in code.
This is exhibited in copy_huge_pmd(), which inserts a large #ifdef
CONFIG_ARCH_ENABLE_THP_MIGRATION block and an if-branch which is difficult
to follow
It's difficult to follow the logic of such a large function and the
non-present PMD logic is clearly separate as it sits in a giant if-branch.
Therefore this patch both separates out the logic and utilises
thp_migration_supported().
No functional change intended.
Link: https://lkml.kernel.org/r/6eaadc23ed512d370ede65561e34e96241c54b9d.1762812360.git.lorenzo.stoakes@oracle.com
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Chris Li <chrisl@kernel.org>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jann Horn <jannh@google.com>
Cc: Janosch Frank <frankja@linux.ibm.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Kairui Song <kasong@tencent.com>
Cc: Kemeng Shi <shikemeng@huaweicloud.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Leon Romanovsky <leon@kernel.org>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mathew Brost <matthew.brost@intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Nico Pache <npache@redhat.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Wei Xu <weixugc@google.com>
Cc: xu xin <xu.xin16@zte.com.cn>
Cc: Yuanchu Xie <yuanchu@google.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
aa62204cb6
commit
e244d82d02
109
mm/huge_memory.c
109
mm/huge_memory.c
@@ -1699,6 +1699,62 @@ void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||
update_mmu_cache_pmd(vma, addr, pmd);
|
||||
}
|
||||
|
||||
static void copy_huge_non_present_pmd(
|
||||
struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
|
||||
struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
|
||||
pmd_t pmd, pgtable_t pgtable)
|
||||
{
|
||||
swp_entry_t entry = pmd_to_swp_entry(pmd);
|
||||
struct folio *src_folio;
|
||||
|
||||
VM_WARN_ON(!is_pmd_non_present_folio_entry(pmd));
|
||||
|
||||
if (is_writable_migration_entry(entry) ||
|
||||
is_readable_exclusive_migration_entry(entry)) {
|
||||
entry = make_readable_migration_entry(swp_offset(entry));
|
||||
pmd = swp_entry_to_pmd(entry);
|
||||
if (pmd_swp_soft_dirty(*src_pmd))
|
||||
pmd = pmd_swp_mksoft_dirty(pmd);
|
||||
if (pmd_swp_uffd_wp(*src_pmd))
|
||||
pmd = pmd_swp_mkuffd_wp(pmd);
|
||||
set_pmd_at(src_mm, addr, src_pmd, pmd);
|
||||
} else if (is_device_private_entry(entry)) {
|
||||
/*
|
||||
* For device private entries, since there are no
|
||||
* read exclusive entries, writable = !readable
|
||||
*/
|
||||
if (is_writable_device_private_entry(entry)) {
|
||||
entry = make_readable_device_private_entry(swp_offset(entry));
|
||||
pmd = swp_entry_to_pmd(entry);
|
||||
|
||||
if (pmd_swp_soft_dirty(*src_pmd))
|
||||
pmd = pmd_swp_mksoft_dirty(pmd);
|
||||
if (pmd_swp_uffd_wp(*src_pmd))
|
||||
pmd = pmd_swp_mkuffd_wp(pmd);
|
||||
set_pmd_at(src_mm, addr, src_pmd, pmd);
|
||||
}
|
||||
|
||||
src_folio = pfn_swap_entry_folio(entry);
|
||||
VM_WARN_ON(!folio_test_large(src_folio));
|
||||
|
||||
folio_get(src_folio);
|
||||
/*
|
||||
* folio_try_dup_anon_rmap_pmd does not fail for
|
||||
* device private entries.
|
||||
*/
|
||||
folio_try_dup_anon_rmap_pmd(src_folio, &src_folio->page,
|
||||
dst_vma, src_vma);
|
||||
}
|
||||
|
||||
add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
|
||||
mm_inc_nr_ptes(dst_mm);
|
||||
pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
|
||||
if (!userfaultfd_wp(dst_vma))
|
||||
pmd = pmd_swp_clear_uffd_wp(pmd);
|
||||
set_pmd_at(dst_mm, addr, dst_pmd, pmd);
|
||||
}
|
||||
|
||||
int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
|
||||
struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
|
||||
@@ -1744,59 +1800,12 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||
ret = -EAGAIN;
|
||||
pmd = *src_pmd;
|
||||
|
||||
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
|
||||
if (unlikely(is_swap_pmd(pmd))) {
|
||||
swp_entry_t entry = pmd_to_swp_entry(pmd);
|
||||
|
||||
VM_WARN_ON(!is_pmd_non_present_folio_entry(pmd));
|
||||
|
||||
if (is_writable_migration_entry(entry) ||
|
||||
is_readable_exclusive_migration_entry(entry)) {
|
||||
entry = make_readable_migration_entry(swp_offset(entry));
|
||||
pmd = swp_entry_to_pmd(entry);
|
||||
if (pmd_swp_soft_dirty(*src_pmd))
|
||||
pmd = pmd_swp_mksoft_dirty(pmd);
|
||||
if (pmd_swp_uffd_wp(*src_pmd))
|
||||
pmd = pmd_swp_mkuffd_wp(pmd);
|
||||
set_pmd_at(src_mm, addr, src_pmd, pmd);
|
||||
} else if (is_device_private_entry(entry)) {
|
||||
/*
|
||||
* For device private entries, since there are no
|
||||
* read exclusive entries, writable = !readable
|
||||
*/
|
||||
if (is_writable_device_private_entry(entry)) {
|
||||
entry = make_readable_device_private_entry(swp_offset(entry));
|
||||
pmd = swp_entry_to_pmd(entry);
|
||||
|
||||
if (pmd_swp_soft_dirty(*src_pmd))
|
||||
pmd = pmd_swp_mksoft_dirty(pmd);
|
||||
if (pmd_swp_uffd_wp(*src_pmd))
|
||||
pmd = pmd_swp_mkuffd_wp(pmd);
|
||||
set_pmd_at(src_mm, addr, src_pmd, pmd);
|
||||
}
|
||||
|
||||
src_folio = pfn_swap_entry_folio(entry);
|
||||
VM_WARN_ON(!folio_test_large(src_folio));
|
||||
|
||||
folio_get(src_folio);
|
||||
/*
|
||||
* folio_try_dup_anon_rmap_pmd does not fail for
|
||||
* device private entries.
|
||||
*/
|
||||
folio_try_dup_anon_rmap_pmd(src_folio, &src_folio->page,
|
||||
dst_vma, src_vma);
|
||||
}
|
||||
|
||||
add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
|
||||
mm_inc_nr_ptes(dst_mm);
|
||||
pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
|
||||
if (!userfaultfd_wp(dst_vma))
|
||||
pmd = pmd_swp_clear_uffd_wp(pmd);
|
||||
set_pmd_at(dst_mm, addr, dst_pmd, pmd);
|
||||
if (unlikely(thp_migration_supported() && is_swap_pmd(pmd))) {
|
||||
copy_huge_non_present_pmd(dst_mm, src_mm, dst_pmd, src_pmd, addr,
|
||||
dst_vma, src_vma, pmd, pgtable);
|
||||
ret = 0;
|
||||
goto out_unlock;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (unlikely(!pmd_trans_huge(pmd))) {
|
||||
pte_free(dst_mm, pgtable);
|
||||
|
||||
Reference in New Issue
Block a user