mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00
20 hotfixes. 10 are cc:stable and the remainder address post-6.16 issues
or aren't considered necessary for -stable kernels. 17 of these fixes are for MM. As usual, singletons all over the place, apart from a three-patch series of KHO followup work from Pasha which is actually also a bunch of singletons. -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCaKfFVwAKCRDdBJ7gKXxA jvZGAQCCRTRgwnYsH0op9Rlxs72zokENbErSzXweWLez31pNpAD/S7bVSjjk1mXr BQ24ZadKUUomWkghwCusb9VomMeneg0= =+uBT -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2025-08-21-18-17' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull misc fixes from Andrew Morton: "20 hotfixes. 10 are cc:stable and the remainder address post-6.16 issues or aren't considered necessary for -stable kernels. 17 of these fixes are for MM. As usual, singletons all over the place, apart from a three-patch series of KHO followup work from Pasha which is actually also a bunch of singletons" * tag 'mm-hotfixes-stable-2025-08-21-18-17' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: mm/mremap: fix WARN with uffd that has remap events disabled mm/damon/sysfs-schemes: put damos dests dir after removing its files mm/migrate: fix NULL movable_ops if CONFIG_ZSMALLOC=m mm/damon/core: fix damos_commit_filter not changing allow mm/memory-failure: fix infinite UCE for VM_PFNMAP pfn MAINTAINERS: mark MGLRU as maintained mm: rust: add page.rs to MEMORY MANAGEMENT - RUST iov_iter: iterate_folioq: fix handling of offset >= folio size selftests/damon: fix selftests by installing drgn related script .mailmap: add entry for Easwar Hariharan selftests/mm: add test for invalid multi VMA operations mm/mremap: catch invalid multi VMA moves earlier mm/mremap: allow multi-VMA move when filesystem uses thp_get_unmapped_area mm/damon/core: fix commit_ops_filters by using correct nth function tools/testing: add linux/args.h header and fix radix, VMA tests mm/debug_vm_pgtable: clear page table entries at destroy_args() squashfs: fix memory leak in squashfs_fill_super kho: warn if KHO is disabled due to an error kho: mm: don't allow deferred struct page with KHO kho: init new_physxa->phys_bits to fix lockdep
This commit is contained in:
commit
6eba757ce9
2
.mailmap
2
.mailmap
@ -226,6 +226,8 @@ Domen Puncer <domen@coderock.org>
|
|||||||
Douglas Gilbert <dougg@torque.net>
|
Douglas Gilbert <dougg@torque.net>
|
||||||
Drew Fustini <fustini@kernel.org> <drew@pdp7.com>
|
Drew Fustini <fustini@kernel.org> <drew@pdp7.com>
|
||||||
<duje@dujemihanovic.xyz> <duje.mihanovic@skole.hr>
|
<duje@dujemihanovic.xyz> <duje.mihanovic@skole.hr>
|
||||||
|
Easwar Hariharan <easwar.hariharan@linux.microsoft.com> <easwar.hariharan@intel.com>
|
||||||
|
Easwar Hariharan <easwar.hariharan@linux.microsoft.com> <eahariha@linux.microsoft.com>
|
||||||
Ed L. Cashin <ecashin@coraid.com>
|
Ed L. Cashin <ecashin@coraid.com>
|
||||||
Elliot Berman <quic_eberman@quicinc.com> <eberman@codeaurora.org>
|
Elliot Berman <quic_eberman@quicinc.com> <eberman@codeaurora.org>
|
||||||
Enric Balletbo i Serra <eballetbo@kernel.org> <enric.balletbo@collabora.com>
|
Enric Balletbo i Serra <eballetbo@kernel.org> <enric.balletbo@collabora.com>
|
||||||
|
19
MAINTAINERS
19
MAINTAINERS
@ -16058,6 +16058,23 @@ F: mm/mempolicy.c
|
|||||||
F: mm/migrate.c
|
F: mm/migrate.c
|
||||||
F: mm/migrate_device.c
|
F: mm/migrate_device.c
|
||||||
|
|
||||||
|
MEMORY MANAGEMENT - MGLRU (MULTI-GEN LRU)
|
||||||
|
M: Andrew Morton <akpm@linux-foundation.org>
|
||||||
|
M: Axel Rasmussen <axelrasmussen@google.com>
|
||||||
|
M: Yuanchu Xie <yuanchu@google.com>
|
||||||
|
R: Wei Xu <weixugc@google.com>
|
||||||
|
L: linux-mm@kvack.org
|
||||||
|
S: Maintained
|
||||||
|
W: http://www.linux-mm.org
|
||||||
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
|
||||||
|
F: Documentation/admin-guide/mm/multigen_lru.rst
|
||||||
|
F: Documentation/mm/multigen_lru.rst
|
||||||
|
F: include/linux/mm_inline.h
|
||||||
|
F: include/linux/mmzone.h
|
||||||
|
F: mm/swap.c
|
||||||
|
F: mm/vmscan.c
|
||||||
|
F: mm/workingset.c
|
||||||
|
|
||||||
MEMORY MANAGEMENT - MISC
|
MEMORY MANAGEMENT - MISC
|
||||||
M: Andrew Morton <akpm@linux-foundation.org>
|
M: Andrew Morton <akpm@linux-foundation.org>
|
||||||
M: David Hildenbrand <david@redhat.com>
|
M: David Hildenbrand <david@redhat.com>
|
||||||
@ -16248,8 +16265,10 @@ S: Maintained
|
|||||||
W: http://www.linux-mm.org
|
W: http://www.linux-mm.org
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
|
||||||
F: rust/helpers/mm.c
|
F: rust/helpers/mm.c
|
||||||
|
F: rust/helpers/page.c
|
||||||
F: rust/kernel/mm.rs
|
F: rust/kernel/mm.rs
|
||||||
F: rust/kernel/mm/
|
F: rust/kernel/mm/
|
||||||
|
F: rust/kernel/page.rs
|
||||||
|
|
||||||
MEMORY MAPPING
|
MEMORY MAPPING
|
||||||
M: Andrew Morton <akpm@linux-foundation.org>
|
M: Andrew Morton <akpm@linux-foundation.org>
|
||||||
|
@ -187,10 +187,15 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
|
|||||||
unsigned short flags;
|
unsigned short flags;
|
||||||
unsigned int fragments;
|
unsigned int fragments;
|
||||||
u64 lookup_table_start, xattr_id_table_start, next_table;
|
u64 lookup_table_start, xattr_id_table_start, next_table;
|
||||||
int err;
|
int err, devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE);
|
||||||
|
|
||||||
TRACE("Entered squashfs_fill_superblock\n");
|
TRACE("Entered squashfs_fill_superblock\n");
|
||||||
|
|
||||||
|
if (!devblksize) {
|
||||||
|
errorf(fc, "squashfs: unable to set blocksize\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
sb->s_fs_info = kzalloc(sizeof(*msblk), GFP_KERNEL);
|
sb->s_fs_info = kzalloc(sizeof(*msblk), GFP_KERNEL);
|
||||||
if (sb->s_fs_info == NULL) {
|
if (sb->s_fs_info == NULL) {
|
||||||
ERROR("Failed to allocate squashfs_sb_info\n");
|
ERROR("Failed to allocate squashfs_sb_info\n");
|
||||||
@ -201,12 +206,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
|
|||||||
|
|
||||||
msblk->panic_on_errors = (opts->errors == Opt_errors_panic);
|
msblk->panic_on_errors = (opts->errors == Opt_errors_panic);
|
||||||
|
|
||||||
msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE);
|
msblk->devblksize = devblksize;
|
||||||
if (!msblk->devblksize) {
|
|
||||||
errorf(fc, "squashfs: unable to set blocksize\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
msblk->devblksize_log2 = ffz(~msblk->devblksize);
|
msblk->devblksize_log2 = ffz(~msblk->devblksize);
|
||||||
|
|
||||||
mutex_init(&msblk->meta_index_mutex);
|
mutex_init(&msblk->meta_index_mutex);
|
||||||
|
@ -160,7 +160,7 @@ size_t iterate_folioq(struct iov_iter *iter, size_t len, void *priv, void *priv2
|
|||||||
|
|
||||||
do {
|
do {
|
||||||
struct folio *folio = folioq_folio(folioq, slot);
|
struct folio *folio = folioq_folio(folioq, slot);
|
||||||
size_t part, remain, consumed;
|
size_t part, remain = 0, consumed;
|
||||||
size_t fsize;
|
size_t fsize;
|
||||||
void *base;
|
void *base;
|
||||||
|
|
||||||
@ -168,14 +168,16 @@ size_t iterate_folioq(struct iov_iter *iter, size_t len, void *priv, void *priv2
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
fsize = folioq_folio_size(folioq, slot);
|
fsize = folioq_folio_size(folioq, slot);
|
||||||
base = kmap_local_folio(folio, skip);
|
if (skip < fsize) {
|
||||||
part = umin(len, PAGE_SIZE - skip % PAGE_SIZE);
|
base = kmap_local_folio(folio, skip);
|
||||||
remain = step(base, progress, part, priv, priv2);
|
part = umin(len, PAGE_SIZE - skip % PAGE_SIZE);
|
||||||
kunmap_local(base);
|
remain = step(base, progress, part, priv, priv2);
|
||||||
consumed = part - remain;
|
kunmap_local(base);
|
||||||
len -= consumed;
|
consumed = part - remain;
|
||||||
progress += consumed;
|
len -= consumed;
|
||||||
skip += consumed;
|
progress += consumed;
|
||||||
|
skip += consumed;
|
||||||
|
}
|
||||||
if (skip >= fsize) {
|
if (skip >= fsize) {
|
||||||
skip = 0;
|
skip = 0;
|
||||||
slot++;
|
slot++;
|
||||||
|
@ -79,6 +79,7 @@ void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
|
|||||||
void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
|
void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
|
||||||
int folio_migrate_mapping(struct address_space *mapping,
|
int folio_migrate_mapping(struct address_space *mapping,
|
||||||
struct folio *newfolio, struct folio *folio, int extra_count);
|
struct folio *newfolio, struct folio *folio, int extra_count);
|
||||||
|
int set_movable_ops(const struct movable_operations *ops, enum pagetype type);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
@ -100,6 +101,10 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
|
|||||||
{
|
{
|
||||||
return -ENOSYS;
|
return -ENOSYS;
|
||||||
}
|
}
|
||||||
|
static inline int set_movable_ops(const struct movable_operations *ops, enum pagetype type)
|
||||||
|
{
|
||||||
|
return -ENOSYS;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_MIGRATION */
|
#endif /* CONFIG_MIGRATION */
|
||||||
|
|
||||||
|
@ -97,6 +97,7 @@ config KEXEC_JUMP
|
|||||||
config KEXEC_HANDOVER
|
config KEXEC_HANDOVER
|
||||||
bool "kexec handover"
|
bool "kexec handover"
|
||||||
depends on ARCH_SUPPORTS_KEXEC_HANDOVER && ARCH_SUPPORTS_KEXEC_FILE
|
depends on ARCH_SUPPORTS_KEXEC_HANDOVER && ARCH_SUPPORTS_KEXEC_FILE
|
||||||
|
depends on !DEFERRED_STRUCT_PAGE_INIT
|
||||||
select MEMBLOCK_KHO_SCRATCH
|
select MEMBLOCK_KHO_SCRATCH
|
||||||
select KEXEC_FILE
|
select KEXEC_FILE
|
||||||
select DEBUG_FS
|
select DEBUG_FS
|
||||||
|
@ -144,14 +144,34 @@ static int __kho_preserve_order(struct kho_mem_track *track, unsigned long pfn,
|
|||||||
unsigned int order)
|
unsigned int order)
|
||||||
{
|
{
|
||||||
struct kho_mem_phys_bits *bits;
|
struct kho_mem_phys_bits *bits;
|
||||||
struct kho_mem_phys *physxa;
|
struct kho_mem_phys *physxa, *new_physxa;
|
||||||
const unsigned long pfn_high = pfn >> order;
|
const unsigned long pfn_high = pfn >> order;
|
||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
|
|
||||||
physxa = xa_load_or_alloc(&track->orders, order, sizeof(*physxa));
|
physxa = xa_load(&track->orders, order);
|
||||||
if (IS_ERR(physxa))
|
if (!physxa) {
|
||||||
return PTR_ERR(physxa);
|
int err;
|
||||||
|
|
||||||
|
new_physxa = kzalloc(sizeof(*physxa), GFP_KERNEL);
|
||||||
|
if (!new_physxa)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
xa_init(&new_physxa->phys_bits);
|
||||||
|
physxa = xa_cmpxchg(&track->orders, order, NULL, new_physxa,
|
||||||
|
GFP_KERNEL);
|
||||||
|
|
||||||
|
err = xa_err(physxa);
|
||||||
|
if (err || physxa) {
|
||||||
|
xa_destroy(&new_physxa->phys_bits);
|
||||||
|
kfree(new_physxa);
|
||||||
|
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
} else {
|
||||||
|
physxa = new_physxa;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
bits = xa_load_or_alloc(&physxa->phys_bits, pfn_high / PRESERVE_BITS,
|
bits = xa_load_or_alloc(&physxa->phys_bits, pfn_high / PRESERVE_BITS,
|
||||||
sizeof(*bits));
|
sizeof(*bits));
|
||||||
@ -544,6 +564,7 @@ err_free_scratch_areas:
|
|||||||
err_free_scratch_desc:
|
err_free_scratch_desc:
|
||||||
memblock_free(kho_scratch, kho_scratch_cnt * sizeof(*kho_scratch));
|
memblock_free(kho_scratch, kho_scratch_cnt * sizeof(*kho_scratch));
|
||||||
err_disable_kho:
|
err_disable_kho:
|
||||||
|
pr_warn("Failed to reserve scratch area, disabling kexec handover\n");
|
||||||
kho_enable = false;
|
kho_enable = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -254,4 +254,10 @@ const struct movable_operations balloon_mops = {
|
|||||||
.putback_page = balloon_page_putback,
|
.putback_page = balloon_page_putback,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static int __init balloon_init(void)
|
||||||
|
{
|
||||||
|
return set_movable_ops(&balloon_mops, PGTY_offline);
|
||||||
|
}
|
||||||
|
core_initcall(balloon_init);
|
||||||
|
|
||||||
#endif /* CONFIG_BALLOON_COMPACTION */
|
#endif /* CONFIG_BALLOON_COMPACTION */
|
||||||
|
@ -845,6 +845,18 @@ static struct damos_filter *damos_nth_filter(int n, struct damos *s)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct damos_filter *damos_nth_ops_filter(int n, struct damos *s)
|
||||||
|
{
|
||||||
|
struct damos_filter *filter;
|
||||||
|
int i = 0;
|
||||||
|
|
||||||
|
damos_for_each_ops_filter(filter, s) {
|
||||||
|
if (i++ == n)
|
||||||
|
return filter;
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static void damos_commit_filter_arg(
|
static void damos_commit_filter_arg(
|
||||||
struct damos_filter *dst, struct damos_filter *src)
|
struct damos_filter *dst, struct damos_filter *src)
|
||||||
{
|
{
|
||||||
@ -871,6 +883,7 @@ static void damos_commit_filter(
|
|||||||
{
|
{
|
||||||
dst->type = src->type;
|
dst->type = src->type;
|
||||||
dst->matching = src->matching;
|
dst->matching = src->matching;
|
||||||
|
dst->allow = src->allow;
|
||||||
damos_commit_filter_arg(dst, src);
|
damos_commit_filter_arg(dst, src);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -908,7 +921,7 @@ static int damos_commit_ops_filters(struct damos *dst, struct damos *src)
|
|||||||
int i = 0, j = 0;
|
int i = 0, j = 0;
|
||||||
|
|
||||||
damos_for_each_ops_filter_safe(dst_filter, next, dst) {
|
damos_for_each_ops_filter_safe(dst_filter, next, dst) {
|
||||||
src_filter = damos_nth_filter(i++, src);
|
src_filter = damos_nth_ops_filter(i++, src);
|
||||||
if (src_filter)
|
if (src_filter)
|
||||||
damos_commit_filter(dst_filter, src_filter);
|
damos_commit_filter(dst_filter, src_filter);
|
||||||
else
|
else
|
||||||
|
@ -2158,8 +2158,8 @@ static void damon_sysfs_scheme_rm_dirs(struct damon_sysfs_scheme *scheme)
|
|||||||
{
|
{
|
||||||
damon_sysfs_access_pattern_rm_dirs(scheme->access_pattern);
|
damon_sysfs_access_pattern_rm_dirs(scheme->access_pattern);
|
||||||
kobject_put(&scheme->access_pattern->kobj);
|
kobject_put(&scheme->access_pattern->kobj);
|
||||||
kobject_put(&scheme->dests->kobj);
|
|
||||||
damos_sysfs_dests_rm_dirs(scheme->dests);
|
damos_sysfs_dests_rm_dirs(scheme->dests);
|
||||||
|
kobject_put(&scheme->dests->kobj);
|
||||||
damon_sysfs_quotas_rm_dirs(scheme->quotas);
|
damon_sysfs_quotas_rm_dirs(scheme->quotas);
|
||||||
kobject_put(&scheme->quotas->kobj);
|
kobject_put(&scheme->quotas->kobj);
|
||||||
kobject_put(&scheme->watermarks->kobj);
|
kobject_put(&scheme->watermarks->kobj);
|
||||||
|
@ -990,29 +990,34 @@ static void __init destroy_args(struct pgtable_debug_args *args)
|
|||||||
|
|
||||||
/* Free page table entries */
|
/* Free page table entries */
|
||||||
if (args->start_ptep) {
|
if (args->start_ptep) {
|
||||||
|
pmd_clear(args->pmdp);
|
||||||
pte_free(args->mm, args->start_ptep);
|
pte_free(args->mm, args->start_ptep);
|
||||||
mm_dec_nr_ptes(args->mm);
|
mm_dec_nr_ptes(args->mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (args->start_pmdp) {
|
if (args->start_pmdp) {
|
||||||
|
pud_clear(args->pudp);
|
||||||
pmd_free(args->mm, args->start_pmdp);
|
pmd_free(args->mm, args->start_pmdp);
|
||||||
mm_dec_nr_pmds(args->mm);
|
mm_dec_nr_pmds(args->mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (args->start_pudp) {
|
if (args->start_pudp) {
|
||||||
|
p4d_clear(args->p4dp);
|
||||||
pud_free(args->mm, args->start_pudp);
|
pud_free(args->mm, args->start_pudp);
|
||||||
mm_dec_nr_puds(args->mm);
|
mm_dec_nr_puds(args->mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (args->start_p4dp)
|
if (args->start_p4dp) {
|
||||||
|
pgd_clear(args->pgdp);
|
||||||
p4d_free(args->mm, args->start_p4dp);
|
p4d_free(args->mm, args->start_p4dp);
|
||||||
|
}
|
||||||
|
|
||||||
/* Free vma and mm struct */
|
/* Free vma and mm struct */
|
||||||
if (args->vma)
|
if (args->vma)
|
||||||
vm_area_free(args->vma);
|
vm_area_free(args->vma);
|
||||||
|
|
||||||
if (args->mm)
|
if (args->mm)
|
||||||
mmdrop(args->mm);
|
mmput(args->mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct page * __init
|
static struct page * __init
|
||||||
|
@ -853,9 +853,17 @@ static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
|
|||||||
#define hwpoison_hugetlb_range NULL
|
#define hwpoison_hugetlb_range NULL
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static int hwpoison_test_walk(unsigned long start, unsigned long end,
|
||||||
|
struct mm_walk *walk)
|
||||||
|
{
|
||||||
|
/* We also want to consider pages mapped into VM_PFNMAP. */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct mm_walk_ops hwpoison_walk_ops = {
|
static const struct mm_walk_ops hwpoison_walk_ops = {
|
||||||
.pmd_entry = hwpoison_pte_range,
|
.pmd_entry = hwpoison_pte_range,
|
||||||
.hugetlb_entry = hwpoison_hugetlb_range,
|
.hugetlb_entry = hwpoison_hugetlb_range,
|
||||||
|
.test_walk = hwpoison_test_walk,
|
||||||
.walk_lock = PGWALK_RDLOCK,
|
.walk_lock = PGWALK_RDLOCK,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
38
mm/migrate.c
38
mm/migrate.c
@ -43,8 +43,6 @@
|
|||||||
#include <linux/sched/sysctl.h>
|
#include <linux/sched/sysctl.h>
|
||||||
#include <linux/memory-tiers.h>
|
#include <linux/memory-tiers.h>
|
||||||
#include <linux/pagewalk.h>
|
#include <linux/pagewalk.h>
|
||||||
#include <linux/balloon_compaction.h>
|
|
||||||
#include <linux/zsmalloc.h>
|
|
||||||
|
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
|
|
||||||
@ -53,6 +51,33 @@
|
|||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
#include "swap.h"
|
#include "swap.h"
|
||||||
|
|
||||||
|
static const struct movable_operations *offline_movable_ops;
|
||||||
|
static const struct movable_operations *zsmalloc_movable_ops;
|
||||||
|
|
||||||
|
int set_movable_ops(const struct movable_operations *ops, enum pagetype type)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* We only allow for selected types and don't handle concurrent
|
||||||
|
* registration attempts yet.
|
||||||
|
*/
|
||||||
|
switch (type) {
|
||||||
|
case PGTY_offline:
|
||||||
|
if (offline_movable_ops && ops)
|
||||||
|
return -EBUSY;
|
||||||
|
offline_movable_ops = ops;
|
||||||
|
break;
|
||||||
|
case PGTY_zsmalloc:
|
||||||
|
if (zsmalloc_movable_ops && ops)
|
||||||
|
return -EBUSY;
|
||||||
|
zsmalloc_movable_ops = ops;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(set_movable_ops);
|
||||||
|
|
||||||
static const struct movable_operations *page_movable_ops(struct page *page)
|
static const struct movable_operations *page_movable_ops(struct page *page)
|
||||||
{
|
{
|
||||||
VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
|
VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
|
||||||
@ -62,15 +87,12 @@ static const struct movable_operations *page_movable_ops(struct page *page)
|
|||||||
* it as movable, the page type must be sticky until the page gets freed
|
* it as movable, the page type must be sticky until the page gets freed
|
||||||
* back to the buddy.
|
* back to the buddy.
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_BALLOON_COMPACTION
|
|
||||||
if (PageOffline(page))
|
if (PageOffline(page))
|
||||||
/* Only balloon compaction sets PageOffline pages movable. */
|
/* Only balloon compaction sets PageOffline pages movable. */
|
||||||
return &balloon_mops;
|
return offline_movable_ops;
|
||||||
#endif /* CONFIG_BALLOON_COMPACTION */
|
|
||||||
#if defined(CONFIG_ZSMALLOC) && defined(CONFIG_COMPACTION)
|
|
||||||
if (PageZsmalloc(page))
|
if (PageZsmalloc(page))
|
||||||
return &zsmalloc_mops;
|
return zsmalloc_movable_ops;
|
||||||
#endif /* defined(CONFIG_ZSMALLOC) && defined(CONFIG_COMPACTION) */
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
82
mm/mremap.c
82
mm/mremap.c
@ -323,6 +323,25 @@ static inline bool arch_supports_page_table_move(void)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static inline bool uffd_supports_page_table_move(struct pagetable_move_control *pmc)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* If we are moving a VMA that has uffd-wp registered but with
|
||||||
|
* remap events disabled (new VMA will not be registered with uffd), we
|
||||||
|
* need to ensure that the uffd-wp state is cleared from all pgtables.
|
||||||
|
* This means recursing into lower page tables in move_page_tables().
|
||||||
|
*
|
||||||
|
* We might get called with VMAs reversed when recovering from a
|
||||||
|
* failed page table move. In that case, the
|
||||||
|
* "old"-but-actually-"originally new" VMA during recovery will not have
|
||||||
|
* a uffd context. Recursing into lower page tables during the original
|
||||||
|
* move but not during the recovery move will cause trouble, because we
|
||||||
|
* run into already-existing page tables. So check both VMAs.
|
||||||
|
*/
|
||||||
|
return !vma_has_uffd_without_event_remap(pmc->old) &&
|
||||||
|
!vma_has_uffd_without_event_remap(pmc->new);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_MOVE_PMD
|
#ifdef CONFIG_HAVE_MOVE_PMD
|
||||||
static bool move_normal_pmd(struct pagetable_move_control *pmc,
|
static bool move_normal_pmd(struct pagetable_move_control *pmc,
|
||||||
pmd_t *old_pmd, pmd_t *new_pmd)
|
pmd_t *old_pmd, pmd_t *new_pmd)
|
||||||
@ -335,6 +354,8 @@ static bool move_normal_pmd(struct pagetable_move_control *pmc,
|
|||||||
|
|
||||||
if (!arch_supports_page_table_move())
|
if (!arch_supports_page_table_move())
|
||||||
return false;
|
return false;
|
||||||
|
if (!uffd_supports_page_table_move(pmc))
|
||||||
|
return false;
|
||||||
/*
|
/*
|
||||||
* The destination pmd shouldn't be established, free_pgtables()
|
* The destination pmd shouldn't be established, free_pgtables()
|
||||||
* should have released it.
|
* should have released it.
|
||||||
@ -361,15 +382,6 @@ static bool move_normal_pmd(struct pagetable_move_control *pmc,
|
|||||||
if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
|
if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* If this pmd belongs to a uffd vma with remap events disabled, we need
|
|
||||||
* to ensure that the uffd-wp state is cleared from all pgtables. This
|
|
||||||
* means recursing into lower page tables in move_page_tables(), and we
|
|
||||||
* can reuse the existing code if we simply treat the entry as "not
|
|
||||||
* moved".
|
|
||||||
*/
|
|
||||||
if (vma_has_uffd_without_event_remap(vma))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We don't have to worry about the ordering of src and dst
|
* We don't have to worry about the ordering of src and dst
|
||||||
* ptlocks because exclusive mmap_lock prevents deadlock.
|
* ptlocks because exclusive mmap_lock prevents deadlock.
|
||||||
@ -418,6 +430,8 @@ static bool move_normal_pud(struct pagetable_move_control *pmc,
|
|||||||
|
|
||||||
if (!arch_supports_page_table_move())
|
if (!arch_supports_page_table_move())
|
||||||
return false;
|
return false;
|
||||||
|
if (!uffd_supports_page_table_move(pmc))
|
||||||
|
return false;
|
||||||
/*
|
/*
|
||||||
* The destination pud shouldn't be established, free_pgtables()
|
* The destination pud shouldn't be established, free_pgtables()
|
||||||
* should have released it.
|
* should have released it.
|
||||||
@ -425,15 +439,6 @@ static bool move_normal_pud(struct pagetable_move_control *pmc,
|
|||||||
if (WARN_ON_ONCE(!pud_none(*new_pud)))
|
if (WARN_ON_ONCE(!pud_none(*new_pud)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* If this pud belongs to a uffd vma with remap events disabled, we need
|
|
||||||
* to ensure that the uffd-wp state is cleared from all pgtables. This
|
|
||||||
* means recursing into lower page tables in move_page_tables(), and we
|
|
||||||
* can reuse the existing code if we simply treat the entry as "not
|
|
||||||
* moved".
|
|
||||||
*/
|
|
||||||
if (vma_has_uffd_without_event_remap(vma))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We don't have to worry about the ordering of src and dst
|
* We don't have to worry about the ordering of src and dst
|
||||||
* ptlocks because exclusive mmap_lock prevents deadlock.
|
* ptlocks because exclusive mmap_lock prevents deadlock.
|
||||||
@ -1620,7 +1625,7 @@ static void notify_uffd(struct vma_remap_struct *vrm, bool failed)
|
|||||||
|
|
||||||
static bool vma_multi_allowed(struct vm_area_struct *vma)
|
static bool vma_multi_allowed(struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
struct file *file;
|
struct file *file = vma->vm_file;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We can't support moving multiple uffd VMAs as notify requires
|
* We can't support moving multiple uffd VMAs as notify requires
|
||||||
@ -1633,15 +1638,17 @@ static bool vma_multi_allowed(struct vm_area_struct *vma)
|
|||||||
* Custom get unmapped area might result in MREMAP_FIXED not
|
* Custom get unmapped area might result in MREMAP_FIXED not
|
||||||
* being obeyed.
|
* being obeyed.
|
||||||
*/
|
*/
|
||||||
file = vma->vm_file;
|
if (!file || !file->f_op->get_unmapped_area)
|
||||||
if (file && !vma_is_shmem(vma) && !is_vm_hugetlb_page(vma)) {
|
return true;
|
||||||
const struct file_operations *fop = file->f_op;
|
/* Known good. */
|
||||||
|
if (vma_is_shmem(vma))
|
||||||
|
return true;
|
||||||
|
if (is_vm_hugetlb_page(vma))
|
||||||
|
return true;
|
||||||
|
if (file->f_op->get_unmapped_area == thp_get_unmapped_area)
|
||||||
|
return true;
|
||||||
|
|
||||||
if (fop->get_unmapped_area)
|
return false;
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int check_prep_vma(struct vma_remap_struct *vrm)
|
static int check_prep_vma(struct vma_remap_struct *vrm)
|
||||||
@ -1818,10 +1825,11 @@ static unsigned long remap_move(struct vma_remap_struct *vrm)
|
|||||||
unsigned long start = vrm->addr;
|
unsigned long start = vrm->addr;
|
||||||
unsigned long end = vrm->addr + vrm->old_len;
|
unsigned long end = vrm->addr + vrm->old_len;
|
||||||
unsigned long new_addr = vrm->new_addr;
|
unsigned long new_addr = vrm->new_addr;
|
||||||
bool allowed = true, seen_vma = false;
|
|
||||||
unsigned long target_addr = new_addr;
|
unsigned long target_addr = new_addr;
|
||||||
unsigned long res = -EFAULT;
|
unsigned long res = -EFAULT;
|
||||||
unsigned long last_end;
|
unsigned long last_end;
|
||||||
|
bool seen_vma = false;
|
||||||
|
|
||||||
VMA_ITERATOR(vmi, current->mm, start);
|
VMA_ITERATOR(vmi, current->mm, start);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1834,9 +1842,7 @@ static unsigned long remap_move(struct vma_remap_struct *vrm)
|
|||||||
unsigned long addr = max(vma->vm_start, start);
|
unsigned long addr = max(vma->vm_start, start);
|
||||||
unsigned long len = min(end, vma->vm_end) - addr;
|
unsigned long len = min(end, vma->vm_end) - addr;
|
||||||
unsigned long offset, res_vma;
|
unsigned long offset, res_vma;
|
||||||
|
bool multi_allowed;
|
||||||
if (!allowed)
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
/* No gap permitted at the start of the range. */
|
/* No gap permitted at the start of the range. */
|
||||||
if (!seen_vma && start < vma->vm_start)
|
if (!seen_vma && start < vma->vm_start)
|
||||||
@ -1865,9 +1871,15 @@ static unsigned long remap_move(struct vma_remap_struct *vrm)
|
|||||||
vrm->new_addr = target_addr + offset;
|
vrm->new_addr = target_addr + offset;
|
||||||
vrm->old_len = vrm->new_len = len;
|
vrm->old_len = vrm->new_len = len;
|
||||||
|
|
||||||
allowed = vma_multi_allowed(vma);
|
multi_allowed = vma_multi_allowed(vma);
|
||||||
if (seen_vma && !allowed)
|
if (!multi_allowed) {
|
||||||
return -EFAULT;
|
/* This is not the first VMA, abort immediately. */
|
||||||
|
if (seen_vma)
|
||||||
|
return -EFAULT;
|
||||||
|
/* This is the first, but there are more, abort. */
|
||||||
|
if (vma->vm_end < end)
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
res_vma = check_prep_vma(vrm);
|
res_vma = check_prep_vma(vrm);
|
||||||
if (!res_vma)
|
if (!res_vma)
|
||||||
@ -1876,7 +1888,7 @@ static unsigned long remap_move(struct vma_remap_struct *vrm)
|
|||||||
return res_vma;
|
return res_vma;
|
||||||
|
|
||||||
if (!seen_vma) {
|
if (!seen_vma) {
|
||||||
VM_WARN_ON_ONCE(allowed && res_vma != new_addr);
|
VM_WARN_ON_ONCE(multi_allowed && res_vma != new_addr);
|
||||||
res = res_vma;
|
res = res_vma;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2246,8 +2246,15 @@ EXPORT_SYMBOL_GPL(zs_destroy_pool);
|
|||||||
|
|
||||||
static int __init zs_init(void)
|
static int __init zs_init(void)
|
||||||
{
|
{
|
||||||
|
int rc __maybe_unused;
|
||||||
|
|
||||||
#ifdef CONFIG_ZPOOL
|
#ifdef CONFIG_ZPOOL
|
||||||
zpool_register_driver(&zs_zpool_driver);
|
zpool_register_driver(&zs_zpool_driver);
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_COMPACTION
|
||||||
|
rc = set_movable_ops(&zsmalloc_mops, PGTY_zsmalloc);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
#endif
|
#endif
|
||||||
zs_stat_init();
|
zs_stat_init();
|
||||||
return 0;
|
return 0;
|
||||||
@ -2257,6 +2264,9 @@ static void __exit zs_exit(void)
|
|||||||
{
|
{
|
||||||
#ifdef CONFIG_ZPOOL
|
#ifdef CONFIG_ZPOOL
|
||||||
zpool_unregister_driver(&zs_zpool_driver);
|
zpool_unregister_driver(&zs_zpool_driver);
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_COMPACTION
|
||||||
|
set_movable_ops(NULL, PGTY_zsmalloc);
|
||||||
#endif
|
#endif
|
||||||
zs_stat_exit();
|
zs_stat_exit();
|
||||||
}
|
}
|
||||||
|
28
tools/include/linux/args.h
Normal file
28
tools/include/linux/args.h
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
|
||||||
|
#ifndef _LINUX_ARGS_H
|
||||||
|
#define _LINUX_ARGS_H
|
||||||
|
|
||||||
|
/*
|
||||||
|
* How do these macros work?
|
||||||
|
*
|
||||||
|
* In __COUNT_ARGS() _0 to _12 are just placeholders from the start
|
||||||
|
* in order to make sure _n is positioned over the correct number
|
||||||
|
* from 12 to 0 (depending on X, which is a variadic argument list).
|
||||||
|
* They serve no purpose other than occupying a position. Since each
|
||||||
|
* macro parameter must have a distinct identifier, those identifiers
|
||||||
|
* are as good as any.
|
||||||
|
*
|
||||||
|
* In COUNT_ARGS() we use actual integers, so __COUNT_ARGS() returns
|
||||||
|
* that as _n.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* This counts to 15. Any more, it will return 16th argument. */
|
||||||
|
#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _n, X...) _n
|
||||||
|
#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
|
||||||
|
|
||||||
|
/* Concatenate two parameters, but allow them to be expanded beforehand. */
|
||||||
|
#define __CONCAT(a, b) a ## b
|
||||||
|
#define CONCATENATE(a, b) __CONCAT(a, b)
|
||||||
|
|
||||||
|
#endif /* _LINUX_ARGS_H */
|
@ -4,6 +4,7 @@
|
|||||||
TEST_GEN_FILES += access_memory access_memory_even
|
TEST_GEN_FILES += access_memory access_memory_even
|
||||||
|
|
||||||
TEST_FILES = _damon_sysfs.py
|
TEST_FILES = _damon_sysfs.py
|
||||||
|
TEST_FILES += drgn_dump_damon_status.py
|
||||||
|
|
||||||
# functionality tests
|
# functionality tests
|
||||||
TEST_PROGS += sysfs.sh
|
TEST_PROGS += sysfs.sh
|
||||||
|
@ -5,10 +5,14 @@
|
|||||||
#define _GNU_SOURCE
|
#define _GNU_SOURCE
|
||||||
|
|
||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
|
#include <fcntl.h>
|
||||||
|
#include <linux/userfaultfd.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
#include <sys/ioctl.h>
|
||||||
#include <sys/mman.h>
|
#include <sys/mman.h>
|
||||||
|
#include <syscall.h>
|
||||||
#include <time.h>
|
#include <time.h>
|
||||||
#include <stdbool.h>
|
#include <stdbool.h>
|
||||||
|
|
||||||
@ -168,6 +172,7 @@ static bool is_range_mapped(FILE *maps_fp, unsigned long start,
|
|||||||
|
|
||||||
if (first_val <= start && second_val >= end) {
|
if (first_val <= start && second_val >= end) {
|
||||||
success = true;
|
success = true;
|
||||||
|
fflush(maps_fp);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -175,6 +180,15 @@ static bool is_range_mapped(FILE *maps_fp, unsigned long start,
|
|||||||
return success;
|
return success;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Check if [ptr, ptr + size) mapped in /proc/self/maps. */
|
||||||
|
static bool is_ptr_mapped(FILE *maps_fp, void *ptr, unsigned long size)
|
||||||
|
{
|
||||||
|
unsigned long start = (unsigned long)ptr;
|
||||||
|
unsigned long end = start + size;
|
||||||
|
|
||||||
|
return is_range_mapped(maps_fp, start, end);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Returns the start address of the mapping on success, else returns
|
* Returns the start address of the mapping on success, else returns
|
||||||
* NULL on failure.
|
* NULL on failure.
|
||||||
@ -733,6 +747,249 @@ out:
|
|||||||
dont_unmap ? " [dontunnmap]" : "");
|
dont_unmap ? " [dontunnmap]" : "");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef __NR_userfaultfd
|
||||||
|
static void mremap_move_multi_invalid_vmas(FILE *maps_fp,
|
||||||
|
unsigned long page_size)
|
||||||
|
{
|
||||||
|
char *test_name = "mremap move multiple invalid vmas";
|
||||||
|
const size_t size = 10 * page_size;
|
||||||
|
bool success = true;
|
||||||
|
char *ptr, *tgt_ptr;
|
||||||
|
int uffd, err, i;
|
||||||
|
void *res;
|
||||||
|
struct uffdio_api api = {
|
||||||
|
.api = UFFD_API,
|
||||||
|
.features = UFFD_EVENT_PAGEFAULT,
|
||||||
|
};
|
||||||
|
|
||||||
|
uffd = syscall(__NR_userfaultfd, O_NONBLOCK);
|
||||||
|
if (uffd == -1) {
|
||||||
|
err = errno;
|
||||||
|
perror("userfaultfd");
|
||||||
|
if (err == EPERM) {
|
||||||
|
ksft_test_result_skip("%s - missing uffd", test_name);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
success = false;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
if (ioctl(uffd, UFFDIO_API, &api)) {
|
||||||
|
perror("ioctl UFFDIO_API");
|
||||||
|
success = false;
|
||||||
|
goto out_close_uffd;
|
||||||
|
}
|
||||||
|
|
||||||
|
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
|
||||||
|
MAP_PRIVATE | MAP_ANON, -1, 0);
|
||||||
|
if (ptr == MAP_FAILED) {
|
||||||
|
perror("mmap");
|
||||||
|
success = false;
|
||||||
|
goto out_close_uffd;
|
||||||
|
}
|
||||||
|
|
||||||
|
tgt_ptr = mmap(NULL, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0);
|
||||||
|
if (tgt_ptr == MAP_FAILED) {
|
||||||
|
perror("mmap");
|
||||||
|
success = false;
|
||||||
|
goto out_close_uffd;
|
||||||
|
}
|
||||||
|
if (munmap(tgt_ptr, size)) {
|
||||||
|
perror("munmap");
|
||||||
|
success = false;
|
||||||
|
goto out_unmap;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Unmap so we end up with:
|
||||||
|
*
|
||||||
|
* 0 2 4 6 8 10 offset in buffer
|
||||||
|
* |*| |*| |*| |*| |*|
|
||||||
|
* |*| |*| |*| |*| |*|
|
||||||
|
*
|
||||||
|
* Additionally, register each with UFFD.
|
||||||
|
*/
|
||||||
|
for (i = 0; i < 10; i += 2) {
|
||||||
|
void *unmap_ptr = &ptr[(i + 1) * page_size];
|
||||||
|
unsigned long start = (unsigned long)&ptr[i * page_size];
|
||||||
|
struct uffdio_register reg = {
|
||||||
|
.range = {
|
||||||
|
.start = start,
|
||||||
|
.len = page_size,
|
||||||
|
},
|
||||||
|
.mode = UFFDIO_REGISTER_MODE_MISSING,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (ioctl(uffd, UFFDIO_REGISTER, ®) == -1) {
|
||||||
|
perror("ioctl UFFDIO_REGISTER");
|
||||||
|
success = false;
|
||||||
|
goto out_unmap;
|
||||||
|
}
|
||||||
|
if (munmap(unmap_ptr, page_size)) {
|
||||||
|
perror("munmap");
|
||||||
|
success = false;
|
||||||
|
goto out_unmap;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Now try to move the entire range which is invalid for multi VMA move.
|
||||||
|
*
|
||||||
|
* This will fail, and no VMA should be moved, as we check this ahead of
|
||||||
|
* time.
|
||||||
|
*/
|
||||||
|
res = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, tgt_ptr);
|
||||||
|
err = errno;
|
||||||
|
if (res != MAP_FAILED) {
|
||||||
|
fprintf(stderr, "mremap() succeeded for multi VMA uffd armed\n");
|
||||||
|
success = false;
|
||||||
|
goto out_unmap;
|
||||||
|
}
|
||||||
|
if (err != EFAULT) {
|
||||||
|
errno = err;
|
||||||
|
perror("mrmeap() unexpected error");
|
||||||
|
success = false;
|
||||||
|
goto out_unmap;
|
||||||
|
}
|
||||||
|
if (is_ptr_mapped(maps_fp, tgt_ptr, page_size)) {
|
||||||
|
fprintf(stderr,
|
||||||
|
"Invalid uffd-armed VMA at start of multi range moved\n");
|
||||||
|
success = false;
|
||||||
|
goto out_unmap;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Now try to move a single VMA, this should succeed as not multi VMA
|
||||||
|
* move.
|
||||||
|
*/
|
||||||
|
res = mremap(ptr, page_size, page_size,
|
||||||
|
MREMAP_MAYMOVE | MREMAP_FIXED, tgt_ptr);
|
||||||
|
if (res == MAP_FAILED) {
|
||||||
|
perror("mremap single invalid-multi VMA");
|
||||||
|
success = false;
|
||||||
|
goto out_unmap;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Unmap the VMA, and remap a non-uffd registered (therefore, multi VMA
|
||||||
|
* move valid) VMA at the start of ptr range.
|
||||||
|
*/
|
||||||
|
if (munmap(tgt_ptr, page_size)) {
|
||||||
|
perror("munmap");
|
||||||
|
success = false;
|
||||||
|
goto out_unmap;
|
||||||
|
}
|
||||||
|
res = mmap(ptr, page_size, PROT_READ | PROT_WRITE,
|
||||||
|
MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
|
||||||
|
if (res == MAP_FAILED) {
|
||||||
|
perror("mmap");
|
||||||
|
success = false;
|
||||||
|
goto out_unmap;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Now try to move the entire range, we should succeed in moving the
|
||||||
|
* first VMA, but no others, and report a failure.
|
||||||
|
*/
|
||||||
|
res = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, tgt_ptr);
|
||||||
|
err = errno;
|
||||||
|
if (res != MAP_FAILED) {
|
||||||
|
fprintf(stderr, "mremap() succeeded for multi VMA uffd armed\n");
|
||||||
|
success = false;
|
||||||
|
goto out_unmap;
|
||||||
|
}
|
||||||
|
if (err != EFAULT) {
|
||||||
|
errno = err;
|
||||||
|
perror("mrmeap() unexpected error");
|
||||||
|
success = false;
|
||||||
|
goto out_unmap;
|
||||||
|
}
|
||||||
|
if (!is_ptr_mapped(maps_fp, tgt_ptr, page_size)) {
|
||||||
|
fprintf(stderr, "Valid VMA not moved\n");
|
||||||
|
success = false;
|
||||||
|
goto out_unmap;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Unmap the VMA, and map valid VMA at start of ptr range, and replace
|
||||||
|
* all existing multi-move invalid VMAs, except the last, with valid
|
||||||
|
* multi-move VMAs.
|
||||||
|
*/
|
||||||
|
if (munmap(tgt_ptr, page_size)) {
|
||||||
|
perror("munmap");
|
||||||
|
success = false;
|
||||||
|
goto out_unmap;
|
||||||
|
}
|
||||||
|
if (munmap(ptr, size - 2 * page_size)) {
|
||||||
|
perror("munmap");
|
||||||
|
success = false;
|
||||||
|
goto out_unmap;
|
||||||
|
}
|
||||||
|
for (i = 0; i < 8; i += 2) {
|
||||||
|
res = mmap(&ptr[i * page_size], page_size,
|
||||||
|
PROT_READ | PROT_WRITE,
|
||||||
|
MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
|
||||||
|
if (res == MAP_FAILED) {
|
||||||
|
perror("mmap");
|
||||||
|
success = false;
|
||||||
|
goto out_unmap;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Now try to move the entire range, we should succeed in moving all but
|
||||||
|
* the last VMA, and report a failure.
|
||||||
|
*/
|
||||||
|
res = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, tgt_ptr);
|
||||||
|
err = errno;
|
||||||
|
if (res != MAP_FAILED) {
|
||||||
|
fprintf(stderr, "mremap() succeeded for multi VMA uffd armed\n");
|
||||||
|
success = false;
|
||||||
|
goto out_unmap;
|
||||||
|
}
|
||||||
|
if (err != EFAULT) {
|
||||||
|
errno = err;
|
||||||
|
perror("mrmeap() unexpected error");
|
||||||
|
success = false;
|
||||||
|
goto out_unmap;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < 10; i += 2) {
|
||||||
|
bool is_mapped = is_ptr_mapped(maps_fp,
|
||||||
|
&tgt_ptr[i * page_size], page_size);
|
||||||
|
|
||||||
|
if (i < 8 && !is_mapped) {
|
||||||
|
fprintf(stderr, "Valid VMA not moved at %d\n", i);
|
||||||
|
success = false;
|
||||||
|
goto out_unmap;
|
||||||
|
} else if (i == 8 && is_mapped) {
|
||||||
|
fprintf(stderr, "Invalid VMA moved at %d\n", i);
|
||||||
|
success = false;
|
||||||
|
goto out_unmap;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out_unmap:
|
||||||
|
if (munmap(tgt_ptr, size))
|
||||||
|
perror("munmap tgt");
|
||||||
|
if (munmap(ptr, size))
|
||||||
|
perror("munmap src");
|
||||||
|
out_close_uffd:
|
||||||
|
close(uffd);
|
||||||
|
out:
|
||||||
|
if (success)
|
||||||
|
ksft_test_result_pass("%s\n", test_name);
|
||||||
|
else
|
||||||
|
ksft_test_result_fail("%s\n", test_name);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static void mremap_move_multi_invalid_vmas(FILE *maps_fp, unsigned long page_size)
|
||||||
|
{
|
||||||
|
char *test_name = "mremap move multiple invalid vmas";
|
||||||
|
|
||||||
|
ksft_test_result_skip("%s - missing uffd", test_name);
|
||||||
|
}
|
||||||
|
#endif /* __NR_userfaultfd */
|
||||||
|
|
||||||
/* Returns the time taken for the remap on success else returns -1. */
|
/* Returns the time taken for the remap on success else returns -1. */
|
||||||
static long long remap_region(struct config c, unsigned int threshold_mb,
|
static long long remap_region(struct config c, unsigned int threshold_mb,
|
||||||
char *rand_addr)
|
char *rand_addr)
|
||||||
@ -1074,7 +1331,7 @@ int main(int argc, char **argv)
|
|||||||
char *rand_addr;
|
char *rand_addr;
|
||||||
size_t rand_size;
|
size_t rand_size;
|
||||||
int num_expand_tests = 2;
|
int num_expand_tests = 2;
|
||||||
int num_misc_tests = 8;
|
int num_misc_tests = 9;
|
||||||
struct test test_cases[MAX_TEST] = {};
|
struct test test_cases[MAX_TEST] = {};
|
||||||
struct test perf_test_cases[MAX_PERF_TEST];
|
struct test perf_test_cases[MAX_PERF_TEST];
|
||||||
int page_size;
|
int page_size;
|
||||||
@ -1197,8 +1454,6 @@ int main(int argc, char **argv)
|
|||||||
mremap_expand_merge(maps_fp, page_size);
|
mremap_expand_merge(maps_fp, page_size);
|
||||||
mremap_expand_merge_offset(maps_fp, page_size);
|
mremap_expand_merge_offset(maps_fp, page_size);
|
||||||
|
|
||||||
fclose(maps_fp);
|
|
||||||
|
|
||||||
mremap_move_within_range(pattern_seed, rand_addr);
|
mremap_move_within_range(pattern_seed, rand_addr);
|
||||||
mremap_move_1mb_from_start(pattern_seed, rand_addr);
|
mremap_move_1mb_from_start(pattern_seed, rand_addr);
|
||||||
mremap_shrink_multiple_vmas(page_size, /* inplace= */true);
|
mremap_shrink_multiple_vmas(page_size, /* inplace= */true);
|
||||||
@ -1207,6 +1462,9 @@ int main(int argc, char **argv)
|
|||||||
mremap_move_multiple_vmas(pattern_seed, page_size, /* dontunmap= */ true);
|
mremap_move_multiple_vmas(pattern_seed, page_size, /* dontunmap= */ true);
|
||||||
mremap_move_multiple_vmas_split(pattern_seed, page_size, /* dontunmap= */ false);
|
mremap_move_multiple_vmas_split(pattern_seed, page_size, /* dontunmap= */ false);
|
||||||
mremap_move_multiple_vmas_split(pattern_seed, page_size, /* dontunmap= */ true);
|
mremap_move_multiple_vmas_split(pattern_seed, page_size, /* dontunmap= */ true);
|
||||||
|
mremap_move_multi_invalid_vmas(maps_fp, page_size);
|
||||||
|
|
||||||
|
fclose(maps_fp);
|
||||||
|
|
||||||
if (run_perf_tests) {
|
if (run_perf_tests) {
|
||||||
ksft_print_msg("\n%s\n",
|
ksft_print_msg("\n%s\n",
|
||||||
|
@ -1 +1,5 @@
|
|||||||
|
/* Avoid duplicate definitions due to system headers. */
|
||||||
|
#ifdef __CONCAT
|
||||||
|
#undef __CONCAT
|
||||||
|
#endif
|
||||||
#include "../../../../include/linux/idr.h"
|
#include "../../../../include/linux/idr.h"
|
||||||
|
Loading…
Reference in New Issue
Block a user