2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

selftests/mm: reuse FORCE_READ to replace "asm volatile("" : "+r" (XXX));"

Patch series "selftests/mm: reuse FORCE_READ to replace "asm volatile("" :
"+r" (XXX));" and some cleanup", v2.

This series introduces a common FORCE_READ() macro to replace the cryptic
asm volatile("" : "+r" (variable)); construct used in several mm
selftests.  This improves code readability and maintainability by removing
duplicated, hard-to-understand code.


This patch (of 2):

Several mm selftests use the `asm volatile("" : "+r" (variable));`
construct to force a read of a variable, preventing the compiler from
optimizing away the memory access.  This idiom is cryptic and duplicated
across multiple test files.

Following a suggestion from David[1], this patch refactors this common
pattern into a FORCE_READ() macro

Link: https://lkml.kernel.org/r/20250717131857.59909-1-lianux.mm@gmail.com
Link: https://lkml.kernel.org/r/20250717131857.59909-2-lianux.mm@gmail.com
Link: https://lore.kernel.org/lkml/4a3e0759-caa1-4cfa-bc3f-402593f1eee3@redhat.com/ [1]
Signed-off-by: wang lian <lianux.mm@gmail.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Jann Horn <jannh@google.com>
Cc: Kairui Song <ryncsn@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: SeongJae Park <sj@kernel.org>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
wang lian 2025-07-17 21:18:56 +08:00 committed by Andrew Morton
parent 7efa1cd5f8
commit 3f6bfd4789
7 changed files with 31 additions and 39 deletions

View File

@ -1534,7 +1534,7 @@ static void test_ro_fast_pin(char *mem, const char *smem, size_t size)
static void run_with_zeropage(non_anon_test_fn fn, const char *desc)
{
char *mem, *smem, tmp;
char *mem, *smem;
log_test_start("%s ... with shared zeropage", desc);
@ -1554,8 +1554,8 @@ static void run_with_zeropage(non_anon_test_fn fn, const char *desc)
}
/* Read from the page to populate the shared zeropage. */
tmp = *mem + *smem;
asm volatile("" : "+r" (tmp));
FORCE_READ(mem);
FORCE_READ(smem);
fn(mem, smem, pagesize);
munmap:
@ -1566,7 +1566,7 @@ munmap:
static void run_with_huge_zeropage(non_anon_test_fn fn, const char *desc)
{
char *mem, *smem, *mmap_mem, *mmap_smem, tmp;
char *mem, *smem, *mmap_mem, *mmap_smem;
size_t mmap_size;
int ret;
@ -1617,8 +1617,8 @@ static void run_with_huge_zeropage(non_anon_test_fn fn, const char *desc)
* the first sub-page and test if we get another sub-page populated
* automatically.
*/
tmp = *mem + *smem;
asm volatile("" : "+r" (tmp));
FORCE_READ(mem);
FORCE_READ(smem);
if (!pagemap_is_populated(pagemap_fd, mem + pagesize) ||
!pagemap_is_populated(pagemap_fd, smem + pagesize)) {
ksft_test_result_skip("Did not get THPs populated\n");
@ -1634,7 +1634,7 @@ munmap:
static void run_with_memfd(non_anon_test_fn fn, const char *desc)
{
char *mem, *smem, tmp;
char *mem, *smem;
int fd;
log_test_start("%s ... with memfd", desc);
@ -1668,8 +1668,8 @@ static void run_with_memfd(non_anon_test_fn fn, const char *desc)
}
/* Fault the page in. */
tmp = *mem + *smem;
asm volatile("" : "+r" (tmp));
FORCE_READ(mem);
FORCE_READ(smem);
fn(mem, smem, pagesize);
munmap:
@ -1682,7 +1682,7 @@ close:
static void run_with_tmpfile(non_anon_test_fn fn, const char *desc)
{
char *mem, *smem, tmp;
char *mem, *smem;
FILE *file;
int fd;
@ -1724,8 +1724,8 @@ static void run_with_tmpfile(non_anon_test_fn fn, const char *desc)
}
/* Fault the page in. */
tmp = *mem + *smem;
asm volatile("" : "+r" (tmp));
FORCE_READ(mem);
FORCE_READ(smem);
fn(mem, smem, pagesize);
munmap:
@ -1740,7 +1740,7 @@ static void run_with_memfd_hugetlb(non_anon_test_fn fn, const char *desc,
size_t hugetlbsize)
{
int flags = MFD_HUGETLB;
char *mem, *smem, tmp;
char *mem, *smem;
int fd;
log_test_start("%s ... with memfd hugetlb (%zu kB)", desc,
@ -1778,8 +1778,8 @@ static void run_with_memfd_hugetlb(non_anon_test_fn fn, const char *desc,
}
/* Fault the page in. */
tmp = *mem + *smem;
asm volatile("" : "+r" (tmp));
FORCE_READ(mem);
FORCE_READ(smem);
fn(mem, smem, hugetlbsize);
munmap:

View File

@ -35,13 +35,6 @@
static volatile sig_atomic_t signal_jump_set;
static sigjmp_buf signal_jmp_buf;
/*
* Ignore the checkpatch warning, we must read from x but don't want to do
* anything with it in order to trigger a read page fault. We therefore must use
* volatile to stop the compiler from optimising this away.
*/
#define FORCE_READ(x) (*(volatile typeof(x) *)x)
/*
* How is the test backing the mapping being tested?
*/

View File

@ -47,14 +47,11 @@ void write_fault_pages(void *addr, unsigned long nr_pages)
void read_fault_pages(void *addr, unsigned long nr_pages)
{
volatile unsigned long dummy = 0;
unsigned long i;
for (i = 0; i < nr_pages; i++) {
dummy += *((unsigned long *)(addr + (i * huge_page_size)));
/* Prevent the compiler from optimizing out the entire loop: */
asm volatile("" : "+r" (dummy));
FORCE_READ(((unsigned long *)(addr + (i * huge_page_size))));
}
}

View File

@ -16,6 +16,7 @@
#include <sys/types.h>
#include <signal.h>
#include <time.h>
#include "vm_util.h"
#define TWOMEG (2<<20)
#define RUNTIME (20)
@ -103,15 +104,13 @@ int migrate(uint64_t *ptr, int n1, int n2)
void *access_mem(void *ptr)
{
volatile uint64_t y = 0;
volatile uint64_t *x = ptr;
while (1) {
pthread_testcancel();
y += *x;
/* Prevent the compiler from optimizing out the writes to y: */
asm volatile("" : "+r" (y));
/* Force a read from the memory pointed to by ptr. This ensures
* the memory access actually happens and prevents the compiler
* from optimizing away this entire loop.
*/
FORCE_READ((uint64_t *)ptr);
}
return NULL;

View File

@ -1525,9 +1525,7 @@ void zeropfn_tests(void)
ret = madvise(mem, hpage_size, MADV_HUGEPAGE);
if (!ret) {
char tmp = *mem;
asm volatile("" : "+r" (tmp));
FORCE_READ(mem);
ret = pagemap_ioctl(mem, hpage_size, &vec, 1, 0,
0, PAGE_IS_PFNZERO, 0, 0, PAGE_IS_PFNZERO);

View File

@ -398,7 +398,6 @@ int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd,
char **addr)
{
size_t i;
int dummy = 0;
unsigned char buf[1024];
srand(time(NULL));
@ -440,8 +439,7 @@ int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd,
madvise(*addr, fd_size, MADV_HUGEPAGE);
for (size_t i = 0; i < fd_size; i++)
dummy += *(*addr + i);
asm volatile("" : "+r" (dummy));
FORCE_READ((*addr + i));
if (!check_huge_file(*addr, fd_size / pmd_pagesize, pmd_pagesize)) {
ksft_print_msg("No large pagecache folio generated, please provide a filesystem supporting large folio\n");

View File

@ -18,6 +18,13 @@
#define PM_SWAP BIT_ULL(62)
#define PM_PRESENT BIT_ULL(63)
/*
* Ignore the checkpatch warning, we must read from x but don't want to do
* anything with it in order to trigger a read page fault. We therefore must use
* volatile to stop the compiler from optimising this away.
*/
#define FORCE_READ(x) (*(volatile typeof(x) *)x)
extern unsigned int __page_size;
extern unsigned int __page_shift;