Merge tag 'x86_core_for_v6.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cpu updates from Borislav Petkov:

 - Add helpers for WB{NO,}INVD with the purpose of using them in KVM and
   thus diminish the number of invalidations needed. With preceding
   cleanups, as always

* tag 'x86_core_for_v6.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/lib: Add WBINVD and WBNOINVD helpers to target multiple CPUs
  x86/lib: Add WBNOINVD helper functions
  x86/lib: Drop the unused return value from wbinvd_on_all_cpus()
  drm/gpu: Remove dead checks on wbinvd_on_all_cpus()'s return value
This commit is contained in:
Linus Torvalds
2025-07-29 16:55:29 -07:00
5 changed files with 76 additions and 14 deletions

View File

@@ -112,7 +112,10 @@ void __noreturn hlt_play_dead(void);
void native_play_dead(void);
void play_dead_common(void);
void wbinvd_on_cpu(int cpu);
int wbinvd_on_all_cpus(void);
void wbinvd_on_all_cpus(void);
void wbinvd_on_cpus_mask(struct cpumask *cpus);
void wbnoinvd_on_all_cpus(void);
void wbnoinvd_on_cpus_mask(struct cpumask *cpus);
void smp_kick_mwait_play_dead(void);
void __noreturn mwait_play_dead(unsigned int eax_hint);
@@ -148,10 +151,24 @@ static inline struct cpumask *cpu_l2c_shared_mask(int cpu)
#else /* !CONFIG_SMP */
#define wbinvd_on_cpu(cpu) wbinvd()
static inline int wbinvd_on_all_cpus(void)
static inline void wbinvd_on_all_cpus(void)
{
wbinvd();
return 0;
}
static inline void wbinvd_on_cpus_mask(struct cpumask *cpus)
{
wbinvd();
}
static inline void wbnoinvd_on_all_cpus(void)
{
wbnoinvd();
}
static inline void wbnoinvd_on_cpus_mask(struct cpumask *cpus)
{
wbnoinvd();
}
static inline struct cpumask *cpu_llc_shared_mask(int cpu)

View File

@@ -104,9 +104,36 @@ static inline void wrpkru(u32 pkru)
}
#endif
/*
* Write back all modified lines in all levels of cache associated with this
* logical processor to main memory, and then invalidate all caches. Depending
* on the micro-architecture, WBINVD (and WBNOINVD below) may or may not affect
* lower level caches associated with another logical processor that shares any
* level of this processor's cache hierarchy.
*/
static __always_inline void wbinvd(void)
{
asm volatile("wbinvd": : :"memory");
asm volatile("wbinvd" : : : "memory");
}
/* Instruction encoding provided for binutils backwards compatibility. */
#define ASM_WBNOINVD _ASM_BYTES(0xf3,0x0f,0x09)
/*
* Write back all modified lines in all levels of cache associated with this
* logical processor to main memory, but do NOT explicitly invalidate caches,
* i.e. leave all/most cache lines in the hierarchy in non-modified state.
*/
static __always_inline void wbnoinvd(void)
{
/*
* Explicitly encode WBINVD if X86_FEATURE_WBNOINVD is unavailable even
* though WBNOINVD is backwards compatible (it's simply WBINVD with an
* ignored REP prefix), to guarantee that WBNOINVD isn't used if it
* needs to be avoided for any reason. For all supported usage in the
* kernel, WBINVD is functionally a superset of WBNOINVD.
*/
alternative("wbinvd", ASM_WBNOINVD, X86_FEATURE_WBNOINVD);
}
static inline unsigned long __read_cr4(void)

View File

@@ -8295,8 +8295,7 @@ static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
int cpu = get_cpu();
cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask,
wbinvd_ipi, NULL, 1);
wbinvd_on_cpus_mask(vcpu->arch.wbinvd_dirty_mask);
put_cpu();
cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
} else

View File

@@ -14,9 +14,31 @@ void wbinvd_on_cpu(int cpu)
}
EXPORT_SYMBOL(wbinvd_on_cpu);
int wbinvd_on_all_cpus(void)
void wbinvd_on_all_cpus(void)
{
on_each_cpu(__wbinvd, NULL, 1);
return 0;
}
EXPORT_SYMBOL(wbinvd_on_all_cpus);
void wbinvd_on_cpus_mask(struct cpumask *cpus)
{
on_each_cpu_mask(cpus, __wbinvd, NULL, 1);
}
EXPORT_SYMBOL_GPL(wbinvd_on_cpus_mask);
static void __wbnoinvd(void *dummy)
{
wbnoinvd();
}
void wbnoinvd_on_all_cpus(void)
{
on_each_cpu(__wbnoinvd, NULL, 1);
}
EXPORT_SYMBOL_GPL(wbnoinvd_on_all_cpus);
void wbnoinvd_on_cpus_mask(struct cpumask *cpus)
{
on_each_cpu_mask(cpus, __wbnoinvd, NULL, 1);
}
EXPORT_SYMBOL_GPL(wbnoinvd_on_cpus_mask);

View File

@@ -93,8 +93,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
return;
}
if (wbinvd_on_all_cpus())
pr_err("Timed out waiting for cache flush\n");
wbinvd_on_all_cpus();
#elif defined(__powerpc__)
unsigned long i;
@@ -139,8 +138,7 @@ drm_clflush_sg(struct sg_table *st)
return;
}
if (wbinvd_on_all_cpus())
pr_err("Timed out waiting for cache flush\n");
wbinvd_on_all_cpus();
#else
WARN_ONCE(1, "Architecture has no drm_cache.c support\n");
#endif
@@ -172,8 +170,7 @@ drm_clflush_virt_range(void *addr, unsigned long length)
return;
}
if (wbinvd_on_all_cpus())
pr_err("Timed out waiting for cache flush\n");
wbinvd_on_all_cpus();
#else
WARN_ONCE(1, "Architecture has no drm_cache.c support\n");
#endif