mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00
ARM:
- Correctly handle 'invariant' system registers for protected VMs - Improved handling of VNCR data aborts, including external aborts - Fixes for handling of FEAT_RAS for NV guests, providing a sane fault context during SEA injection and preventing the use of RASv1p1 fault injection hardware - Ensure that page table destruction when a VM is destroyed gives an opportunity to reschedule - Large fix to KVM's infrastructure for managing guest context loaded on the CPU, addressing issues where the output of AT emulation doesn't get reflected to the guest - Fix AT S12 emulation to actually perform stage-2 translation when necessary - Avoid attempting vLPI irqbypass when GICv4 has been explicitly disabled for a VM - Minor KVM + selftest fixes RISC-V: - Fix pte settings within kvm_riscv_gstage_ioremap() - Fix comments in kvm_riscv_check_vcpu_requests() - Fix stack overrun when setting vlenb via ONE_REG x86: - Use array_index_nospec() to sanitize the target vCPU ID when handling PV IPIs and yields as the ID is guest-controlled. - Drop a superfluous cpumask_empty() check when reclaiming SEV memory, as the common case, by far, is that at least one CPU will have entered the VM, and wbnoinvd_on_cpus_mask() will naturally handle the rare case where the set of have_run_cpus is empty. Selftests (not KVM): - Rename the is_signed_type() macro in kselftest_harness.h to is_signed_var() to fix a collision with linux/overflow.h. The collision generates compiler warnings due to the two macros having different meaning. -----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmix3OMUHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroOZGAf+K+xTAhbMuY4bK5Sn93/QssYUVsFv wWc/q5FXUd8t21eAN+b/qhGF4d71eDuoIUNzOBwbJ9qY/0F42Xgihfr7BarSBBqD anqQBnhhtCyPCa1tF8SyBv34HewNKts3bgSxnwo2V2CBGWqomm6cZ9Uh3yALFBGJ kqHi0kKql+QL9G9DbRQ8lEJAPnCnktFFtA94T5B+o7yh1vvPeBsK40chH8bi19nh vCdoGhNLr+k+MoYpfJ8lyOJ7QctijJBK7OlsteksMvCXKQdfz1/X7TnoF11rb4yV MPfMUDOGlIVEBaVBkokyHXXPv0Fg4zGlt/SYzOZWRHIYgQNQ+aSscAKODA== =W51r -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull kvm fixes from Paolo Bonzini: "ARM: - Correctly handle 'invariant' system registers for protected VMs - Improved handling of VNCR data aborts, including external aborts - Fixes for handling of FEAT_RAS for NV guests, providing a sane fault context during SEA injection and preventing the use of RASv1p1 fault injection hardware - Ensure that page table destruction when a VM is destroyed gives an opportunity to reschedule - Large fix to KVM's infrastructure for managing guest context loaded on the CPU, addressing issues where the output of AT emulation doesn't get reflected to the guest - Fix AT S12 emulation to actually perform stage-2 translation when necessary - Avoid attempting vLPI irqbypass when GICv4 has been explicitly disabled for a VM - Minor KVM + selftest fixes RISC-V: - Fix pte settings within kvm_riscv_gstage_ioremap() - Fix comments in kvm_riscv_check_vcpu_requests() - Fix stack overrun when setting vlenb via ONE_REG x86: - Use array_index_nospec() to sanitize the target vCPU ID when handling PV IPIs and yields as the ID is guest-controlled. - Drop a superfluous cpumask_empty() check when reclaiming SEV memory, as the common case, by far, is that at least one CPU will have entered the VM, and wbnoinvd_on_cpus_mask() will naturally handle the rare case where the set of have_run_cpus is empty. Selftests (not KVM): - Rename the is_signed_type() macro in kselftest_harness.h to is_signed_var() to fix a collision with linux/overflow.h. The collision generates compiler warnings due to the two macros having different meaning" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (29 commits) KVM: arm64: nv: Fix ATS12 handling of single-stage translation KVM: arm64: Remove __vcpu_{read,write}_sys_reg_{from,to}_cpu() KVM: arm64: Fix vcpu_{read,write}_sys_reg() accessors KVM: arm64: Simplify sysreg access on exception delivery KVM: arm64: Check for SYSREGS_ON_CPU before accessing the 32bit state RISC-V: KVM: fix stack overrun when loading vlenb RISC-V: KVM: Correct kvm_riscv_check_vcpu_requests() comment RISC-V: KVM: Fix pte settings within kvm_riscv_gstage_ioremap() KVM: arm64: selftests: Sync ID_AA64MMFR3_EL1 in set_id_regs KVM: arm64: Get rid of ARM64_FEATURE_MASK() KVM: arm64: Make ID_AA64PFR1_EL1.RAS_frac writable KVM: arm64: Make ID_AA64PFR0_EL1.RAS writable KVM: arm64: Ignore HCR_EL2.FIEN set by L1 guest's EL2 KVM: arm64: Handle RASv1p1 registers arm64: Add capability denoting FEAT_RASv1p1 KVM: arm64: Reschedule as needed when destroying the stage-2 page-tables KVM: arm64: Split kvm_pgtable_stage2_destroy() selftests: harness: Rename is_signed_type() to avoid collision with overflow.h KVM: SEV: don't check have_run_cpus in sev_writeback_caches() KVM: arm64: Correctly populate FAR_EL2 on nested SEA injection ...
This commit is contained in:
commit
11e7861d68
@ -1160,115 +1160,8 @@ u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *, enum vcpu_sysreg, u64);
|
|||||||
__v; \
|
__v; \
|
||||||
})
|
})
|
||||||
|
|
||||||
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
|
u64 vcpu_read_sys_reg(const struct kvm_vcpu *, enum vcpu_sysreg);
|
||||||
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
|
void vcpu_write_sys_reg(struct kvm_vcpu *, u64, enum vcpu_sysreg);
|
||||||
|
|
||||||
static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* *** VHE ONLY ***
|
|
||||||
*
|
|
||||||
* System registers listed in the switch are not saved on every
|
|
||||||
* exit from the guest but are only saved on vcpu_put.
|
|
||||||
*
|
|
||||||
* SYSREGS_ON_CPU *MUST* be checked before using this helper.
|
|
||||||
*
|
|
||||||
* Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
|
|
||||||
* should never be listed below, because the guest cannot modify its
|
|
||||||
* own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
|
|
||||||
* thread when emulating cross-VCPU communication.
|
|
||||||
*/
|
|
||||||
if (!has_vhe())
|
|
||||||
return false;
|
|
||||||
|
|
||||||
switch (reg) {
|
|
||||||
case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break;
|
|
||||||
case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break;
|
|
||||||
case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break;
|
|
||||||
case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break;
|
|
||||||
case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break;
|
|
||||||
case TCR2_EL1: *val = read_sysreg_s(SYS_TCR2_EL12); break;
|
|
||||||
case PIR_EL1: *val = read_sysreg_s(SYS_PIR_EL12); break;
|
|
||||||
case PIRE0_EL1: *val = read_sysreg_s(SYS_PIRE0_EL12); break;
|
|
||||||
case POR_EL1: *val = read_sysreg_s(SYS_POR_EL12); break;
|
|
||||||
case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break;
|
|
||||||
case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break;
|
|
||||||
case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break;
|
|
||||||
case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break;
|
|
||||||
case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break;
|
|
||||||
case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break;
|
|
||||||
case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
|
|
||||||
case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break;
|
|
||||||
case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
|
|
||||||
case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break;
|
|
||||||
case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break;
|
|
||||||
case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
|
|
||||||
case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break;
|
|
||||||
case SPSR_EL1: *val = read_sysreg_s(SYS_SPSR_EL12); break;
|
|
||||||
case PAR_EL1: *val = read_sysreg_par(); break;
|
|
||||||
case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break;
|
|
||||||
case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
|
|
||||||
case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
|
|
||||||
case ZCR_EL1: *val = read_sysreg_s(SYS_ZCR_EL12); break;
|
|
||||||
case SCTLR2_EL1: *val = read_sysreg_s(SYS_SCTLR2_EL12); break;
|
|
||||||
default: return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* *** VHE ONLY ***
|
|
||||||
*
|
|
||||||
* System registers listed in the switch are not restored on every
|
|
||||||
* entry to the guest but are only restored on vcpu_load.
|
|
||||||
*
|
|
||||||
* SYSREGS_ON_CPU *MUST* be checked before using this helper.
|
|
||||||
*
|
|
||||||
* Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
|
|
||||||
* should never be listed below, because the MPIDR should only be set
|
|
||||||
* once, before running the VCPU, and never changed later.
|
|
||||||
*/
|
|
||||||
if (!has_vhe())
|
|
||||||
return false;
|
|
||||||
|
|
||||||
switch (reg) {
|
|
||||||
case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
|
|
||||||
case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
|
|
||||||
case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
|
|
||||||
case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
|
|
||||||
case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
|
|
||||||
case TCR2_EL1: write_sysreg_s(val, SYS_TCR2_EL12); break;
|
|
||||||
case PIR_EL1: write_sysreg_s(val, SYS_PIR_EL12); break;
|
|
||||||
case PIRE0_EL1: write_sysreg_s(val, SYS_PIRE0_EL12); break;
|
|
||||||
case POR_EL1: write_sysreg_s(val, SYS_POR_EL12); break;
|
|
||||||
case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
|
|
||||||
case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
|
|
||||||
case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
|
|
||||||
case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break;
|
|
||||||
case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break;
|
|
||||||
case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
|
|
||||||
case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
|
|
||||||
case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
|
|
||||||
case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
|
|
||||||
case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
|
|
||||||
case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
|
|
||||||
case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
|
|
||||||
case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
|
|
||||||
case SPSR_EL1: write_sysreg_s(val, SYS_SPSR_EL12); break;
|
|
||||||
case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
|
|
||||||
case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
|
|
||||||
case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
|
|
||||||
case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
|
|
||||||
case ZCR_EL1: write_sysreg_s(val, SYS_ZCR_EL12); break;
|
|
||||||
case SCTLR2_EL1: write_sysreg_s(val, SYS_SCTLR2_EL12); break;
|
|
||||||
default: return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct kvm_vm_stat {
|
struct kvm_vm_stat {
|
||||||
struct kvm_vm_stat_generic generic;
|
struct kvm_vm_stat_generic generic;
|
||||||
|
@ -180,6 +180,7 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
|
|||||||
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
|
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
|
||||||
phys_addr_t pa, unsigned long size, bool writable);
|
phys_addr_t pa, unsigned long size, bool writable);
|
||||||
|
|
||||||
|
int kvm_handle_guest_sea(struct kvm_vcpu *vcpu);
|
||||||
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
|
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
phys_addr_t kvm_mmu_get_httbr(void);
|
phys_addr_t kvm_mmu_get_httbr(void);
|
||||||
|
@ -355,6 +355,11 @@ static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walke
|
|||||||
return pteref;
|
return pteref;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref)
|
||||||
|
{
|
||||||
|
return pteref;
|
||||||
|
}
|
||||||
|
|
||||||
static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
|
static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
@ -384,6 +389,11 @@ static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walke
|
|||||||
return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED));
|
return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref)
|
||||||
|
{
|
||||||
|
return rcu_dereference_raw(pteref);
|
||||||
|
}
|
||||||
|
|
||||||
static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
|
static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
|
||||||
{
|
{
|
||||||
if (walker->flags & KVM_PGTABLE_WALK_SHARED)
|
if (walker->flags & KVM_PGTABLE_WALK_SHARED)
|
||||||
@ -551,6 +561,26 @@ static inline int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2
|
|||||||
*/
|
*/
|
||||||
void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
|
void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* kvm_pgtable_stage2_destroy_range() - Destroy the unlinked range of addresses.
|
||||||
|
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
|
||||||
|
* @addr: Intermediate physical address at which to place the mapping.
|
||||||
|
* @size: Size of the mapping.
|
||||||
|
*
|
||||||
|
* The page-table is assumed to be unreachable by any hardware walkers prior
|
||||||
|
* to freeing and therefore no TLB invalidation is performed.
|
||||||
|
*/
|
||||||
|
void kvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
|
||||||
|
u64 addr, u64 size);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* kvm_pgtable_stage2_destroy_pgd() - Destroy the PGD of guest stage-2 page-table.
|
||||||
|
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
|
||||||
|
*
|
||||||
|
* It is assumed that the rest of the page-table is freed before this operation.
|
||||||
|
*/
|
||||||
|
void kvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure.
|
* kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure.
|
||||||
* @mm_ops: Memory management callbacks.
|
* @mm_ops: Memory management callbacks.
|
||||||
|
@ -179,7 +179,9 @@ struct pkvm_mapping {
|
|||||||
|
|
||||||
int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
|
int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
|
||||||
struct kvm_pgtable_mm_ops *mm_ops);
|
struct kvm_pgtable_mm_ops *mm_ops);
|
||||||
void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
|
void pkvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
|
||||||
|
u64 addr, u64 size);
|
||||||
|
void pkvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt);
|
||||||
int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
|
int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
|
||||||
enum kvm_pgtable_prot prot, void *mc,
|
enum kvm_pgtable_prot prot, void *mc,
|
||||||
enum kvm_pgtable_walk_flags flags);
|
enum kvm_pgtable_walk_flags flags);
|
||||||
|
@ -1,25 +0,0 @@
|
|||||||
/* SPDX-License-Identifier: GPL-2.0 */
|
|
||||||
/* Copyright (C) 2018 - Arm Ltd */
|
|
||||||
|
|
||||||
#ifndef __ARM64_KVM_RAS_H__
|
|
||||||
#define __ARM64_KVM_RAS_H__
|
|
||||||
|
|
||||||
#include <linux/acpi.h>
|
|
||||||
#include <linux/errno.h>
|
|
||||||
#include <linux/types.h>
|
|
||||||
|
|
||||||
#include <asm/acpi.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Was this synchronous external abort a RAS notification?
|
|
||||||
* Returns '0' for errors handled by some RAS subsystem, or -ENOENT.
|
|
||||||
*/
|
|
||||||
static inline int kvm_handle_guest_sea(void)
|
|
||||||
{
|
|
||||||
/* apei_claim_sea(NULL) expects to mask interrupts itself */
|
|
||||||
lockdep_assert_irqs_enabled();
|
|
||||||
|
|
||||||
return apei_claim_sea(NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __ARM64_KVM_RAS_H__ */
|
|
@ -1142,9 +1142,6 @@
|
|||||||
|
|
||||||
#define ARM64_FEATURE_FIELD_BITS 4
|
#define ARM64_FEATURE_FIELD_BITS 4
|
||||||
|
|
||||||
/* Defined for compatibility only, do not add new users. */
|
|
||||||
#define ARM64_FEATURE_MASK(x) (x##_MASK)
|
|
||||||
|
|
||||||
#ifdef __ASSEMBLY__
|
#ifdef __ASSEMBLY__
|
||||||
|
|
||||||
.macro mrs_s, rt, sreg
|
.macro mrs_s, rt, sreg
|
||||||
|
@ -2269,6 +2269,24 @@ static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
|
|||||||
/* Firmware may have left a deferred SError in this register. */
|
/* Firmware may have left a deferred SError in this register. */
|
||||||
write_sysreg_s(0, SYS_DISR_EL1);
|
write_sysreg_s(0, SYS_DISR_EL1);
|
||||||
}
|
}
|
||||||
|
static bool has_rasv1p1(const struct arm64_cpu_capabilities *__unused, int scope)
|
||||||
|
{
|
||||||
|
const struct arm64_cpu_capabilities rasv1p1_caps[] = {
|
||||||
|
{
|
||||||
|
ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, RAS, V1P1)
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, RAS, IMP)
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, RAS_frac, RASv1p1)
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
return (has_cpuid_feature(&rasv1p1_caps[0], scope) ||
|
||||||
|
(has_cpuid_feature(&rasv1p1_caps[1], scope) &&
|
||||||
|
has_cpuid_feature(&rasv1p1_caps[2], scope)));
|
||||||
|
}
|
||||||
#endif /* CONFIG_ARM64_RAS_EXTN */
|
#endif /* CONFIG_ARM64_RAS_EXTN */
|
||||||
|
|
||||||
#ifdef CONFIG_ARM64_PTR_AUTH
|
#ifdef CONFIG_ARM64_PTR_AUTH
|
||||||
@ -2687,6 +2705,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|||||||
.cpu_enable = cpu_clear_disr,
|
.cpu_enable = cpu_clear_disr,
|
||||||
ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, RAS, IMP)
|
ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, RAS, IMP)
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.desc = "RASv1p1 Extension Support",
|
||||||
|
.capability = ARM64_HAS_RASV1P1_EXTN,
|
||||||
|
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||||
|
.matches = has_rasv1p1,
|
||||||
|
},
|
||||||
#endif /* CONFIG_ARM64_RAS_EXTN */
|
#endif /* CONFIG_ARM64_RAS_EXTN */
|
||||||
#ifdef CONFIG_ARM64_AMU_EXTN
|
#ifdef CONFIG_ARM64_AMU_EXTN
|
||||||
{
|
{
|
||||||
|
@ -2408,12 +2408,12 @@ static u64 get_hyp_id_aa64pfr0_el1(void)
|
|||||||
*/
|
*/
|
||||||
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
||||||
|
|
||||||
val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
|
val &= ~(ID_AA64PFR0_EL1_CSV2 |
|
||||||
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
|
ID_AA64PFR0_EL1_CSV3);
|
||||||
|
|
||||||
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
|
val |= FIELD_PREP(ID_AA64PFR0_EL1_CSV2,
|
||||||
arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
|
arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
|
||||||
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
|
val |= FIELD_PREP(ID_AA64PFR0_EL1_CSV3,
|
||||||
arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
|
arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
|
||||||
|
|
||||||
return val;
|
return val;
|
||||||
|
@ -1420,10 +1420,10 @@ void __kvm_at_s12(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we only have a single stage of translation (E2H=0 or
|
* If we only have a single stage of translation (EL2&0), exit
|
||||||
* TGE=1), exit early. Same thing if {VM,DC}=={0,0}.
|
* early. Same thing if {VM,DC}=={0,0}.
|
||||||
*/
|
*/
|
||||||
if (!vcpu_el2_e2h_is_set(vcpu) || vcpu_el2_tge_is_set(vcpu) ||
|
if (compute_translation_regime(vcpu, op) == TR_EL20 ||
|
||||||
!(vcpu_read_sys_reg(vcpu, HCR_EL2) & (HCR_VM | HCR_DC)))
|
!(vcpu_read_sys_reg(vcpu, HCR_EL2) & (HCR_VM | HCR_DC)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -2833,7 +2833,7 @@ int kvm_inject_nested_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
|
|||||||
iabt ? ESR_ELx_EC_IABT_LOW : ESR_ELx_EC_DABT_LOW);
|
iabt ? ESR_ELx_EC_IABT_LOW : ESR_ELx_EC_DABT_LOW);
|
||||||
esr |= ESR_ELx_FSC_EXTABT | ESR_ELx_IL;
|
esr |= ESR_ELx_FSC_EXTABT | ESR_ELx_IL;
|
||||||
|
|
||||||
vcpu_write_sys_reg(vcpu, FAR_EL2, addr);
|
vcpu_write_sys_reg(vcpu, addr, FAR_EL2);
|
||||||
|
|
||||||
if (__vcpu_sys_reg(vcpu, SCTLR2_EL2) & SCTLR2_EL1_EASE)
|
if (__vcpu_sys_reg(vcpu, SCTLR2_EL2) & SCTLR2_EL1_EASE)
|
||||||
return kvm_inject_nested(vcpu, esr, except_type_serror);
|
return kvm_inject_nested(vcpu, esr, except_type_serror);
|
||||||
|
@ -22,36 +22,28 @@
|
|||||||
|
|
||||||
static inline u64 __vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
|
static inline u64 __vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
|
||||||
{
|
{
|
||||||
u64 val;
|
if (has_vhe())
|
||||||
|
|
||||||
if (unlikely(vcpu_has_nv(vcpu)))
|
|
||||||
return vcpu_read_sys_reg(vcpu, reg);
|
return vcpu_read_sys_reg(vcpu, reg);
|
||||||
else if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
|
|
||||||
__vcpu_read_sys_reg_from_cpu(reg, &val))
|
|
||||||
return val;
|
|
||||||
|
|
||||||
return __vcpu_sys_reg(vcpu, reg);
|
return __vcpu_sys_reg(vcpu, reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
|
static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
|
||||||
{
|
{
|
||||||
if (unlikely(vcpu_has_nv(vcpu)))
|
if (has_vhe())
|
||||||
vcpu_write_sys_reg(vcpu, val, reg);
|
vcpu_write_sys_reg(vcpu, val, reg);
|
||||||
else if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU) ||
|
else
|
||||||
!__vcpu_write_sys_reg_to_cpu(val, reg))
|
|
||||||
__vcpu_assign_sys_reg(vcpu, reg, val);
|
__vcpu_assign_sys_reg(vcpu, reg, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode,
|
static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode,
|
||||||
u64 val)
|
u64 val)
|
||||||
{
|
{
|
||||||
if (unlikely(vcpu_has_nv(vcpu))) {
|
if (has_vhe()) {
|
||||||
if (target_mode == PSR_MODE_EL1h)
|
if (target_mode == PSR_MODE_EL1h)
|
||||||
vcpu_write_sys_reg(vcpu, val, SPSR_EL1);
|
vcpu_write_sys_reg(vcpu, val, SPSR_EL1);
|
||||||
else
|
else
|
||||||
vcpu_write_sys_reg(vcpu, val, SPSR_EL2);
|
vcpu_write_sys_reg(vcpu, val, SPSR_EL2);
|
||||||
} else if (has_vhe()) {
|
|
||||||
write_sysreg_el1(val, SYS_SPSR);
|
|
||||||
} else {
|
} else {
|
||||||
__vcpu_assign_sys_reg(vcpu, SPSR_EL1, val);
|
__vcpu_assign_sys_reg(vcpu, SPSR_EL1, val);
|
||||||
}
|
}
|
||||||
@ -59,7 +51,7 @@ static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode,
|
|||||||
|
|
||||||
static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)
|
static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)
|
||||||
{
|
{
|
||||||
if (has_vhe())
|
if (has_vhe() && vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
|
||||||
write_sysreg(val, spsr_abt);
|
write_sysreg(val, spsr_abt);
|
||||||
else
|
else
|
||||||
vcpu->arch.ctxt.spsr_abt = val;
|
vcpu->arch.ctxt.spsr_abt = val;
|
||||||
@ -67,7 +59,7 @@ static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)
|
|||||||
|
|
||||||
static void __vcpu_write_spsr_und(struct kvm_vcpu *vcpu, u64 val)
|
static void __vcpu_write_spsr_und(struct kvm_vcpu *vcpu, u64 val)
|
||||||
{
|
{
|
||||||
if (has_vhe())
|
if (has_vhe() && vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
|
||||||
write_sysreg(val, spsr_und);
|
write_sysreg(val, spsr_und);
|
||||||
else
|
else
|
||||||
vcpu->arch.ctxt.spsr_und = val;
|
vcpu->arch.ctxt.spsr_und = val;
|
||||||
|
@ -17,7 +17,7 @@ static inline __must_check bool nvhe_check_data_corruption(bool v)
|
|||||||
bool corruption = unlikely(condition); \
|
bool corruption = unlikely(condition); \
|
||||||
if (corruption) { \
|
if (corruption) { \
|
||||||
if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \
|
if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \
|
||||||
BUG_ON(1); \
|
BUG(); \
|
||||||
} else \
|
} else \
|
||||||
WARN_ON(1); \
|
WARN_ON(1); \
|
||||||
} \
|
} \
|
||||||
|
@ -253,6 +253,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
|
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
|
||||||
*vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
|
*vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
|
||||||
|
__vcpu_assign_sys_reg(vcpu, read_sysreg_el1(SYS_VBAR), VBAR_EL1);
|
||||||
|
|
||||||
kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
|
kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
|
||||||
|
|
||||||
@ -372,6 +373,9 @@ static const struct sys_reg_desc pvm_sys_reg_descs[] = {
|
|||||||
|
|
||||||
/* Debug and Trace Registers are restricted. */
|
/* Debug and Trace Registers are restricted. */
|
||||||
|
|
||||||
|
/* Group 1 ID registers */
|
||||||
|
HOST_HANDLED(SYS_REVIDR_EL1),
|
||||||
|
|
||||||
/* AArch64 mappings of the AArch32 ID registers */
|
/* AArch64 mappings of the AArch32 ID registers */
|
||||||
/* CRm=1 */
|
/* CRm=1 */
|
||||||
AARCH32(SYS_ID_PFR0_EL1),
|
AARCH32(SYS_ID_PFR0_EL1),
|
||||||
@ -460,6 +464,7 @@ static const struct sys_reg_desc pvm_sys_reg_descs[] = {
|
|||||||
|
|
||||||
HOST_HANDLED(SYS_CCSIDR_EL1),
|
HOST_HANDLED(SYS_CCSIDR_EL1),
|
||||||
HOST_HANDLED(SYS_CLIDR_EL1),
|
HOST_HANDLED(SYS_CLIDR_EL1),
|
||||||
|
HOST_HANDLED(SYS_AIDR_EL1),
|
||||||
HOST_HANDLED(SYS_CSSELR_EL1),
|
HOST_HANDLED(SYS_CSSELR_EL1),
|
||||||
HOST_HANDLED(SYS_CTR_EL0),
|
HOST_HANDLED(SYS_CTR_EL0),
|
||||||
|
|
||||||
|
@ -1551,21 +1551,38 @@ static int stage2_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
|
void kvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
|
||||||
|
u64 addr, u64 size)
|
||||||
{
|
{
|
||||||
size_t pgd_sz;
|
|
||||||
struct kvm_pgtable_walker walker = {
|
struct kvm_pgtable_walker walker = {
|
||||||
.cb = stage2_free_walker,
|
.cb = stage2_free_walker,
|
||||||
.flags = KVM_PGTABLE_WALK_LEAF |
|
.flags = KVM_PGTABLE_WALK_LEAF |
|
||||||
KVM_PGTABLE_WALK_TABLE_POST,
|
KVM_PGTABLE_WALK_TABLE_POST,
|
||||||
};
|
};
|
||||||
|
|
||||||
WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
|
WARN_ON(kvm_pgtable_walk(pgt, addr, size, &walker));
|
||||||
|
}
|
||||||
|
|
||||||
|
void kvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt)
|
||||||
|
{
|
||||||
|
size_t pgd_sz;
|
||||||
|
|
||||||
pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
|
pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
|
||||||
pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(&walker, pgt->pgd), pgd_sz);
|
|
||||||
|
/*
|
||||||
|
* Since the pgtable is unlinked at this point, and not shared with
|
||||||
|
* other walkers, safely deference pgd with kvm_dereference_pteref_raw()
|
||||||
|
*/
|
||||||
|
pgt->mm_ops->free_pages_exact(kvm_dereference_pteref_raw(pgt->pgd), pgd_sz);
|
||||||
pgt->pgd = NULL;
|
pgt->pgd = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
|
||||||
|
{
|
||||||
|
kvm_pgtable_stage2_destroy_range(pgt, 0, BIT(pgt->ia_bits));
|
||||||
|
kvm_pgtable_stage2_destroy_pgd(pgt);
|
||||||
|
}
|
||||||
|
|
||||||
void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level)
|
void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level)
|
||||||
{
|
{
|
||||||
kvm_pteref_t ptep = (kvm_pteref_t)pgtable;
|
kvm_pteref_t ptep = (kvm_pteref_t)pgtable;
|
||||||
|
@ -20,7 +20,7 @@ static bool __is_be(struct kvm_vcpu *vcpu)
|
|||||||
if (vcpu_mode_is_32bit(vcpu))
|
if (vcpu_mode_is_32bit(vcpu))
|
||||||
return !!(read_sysreg_el2(SYS_SPSR) & PSR_AA32_E_BIT);
|
return !!(read_sysreg_el2(SYS_SPSR) & PSR_AA32_E_BIT);
|
||||||
|
|
||||||
return !!(read_sysreg(SCTLR_EL1) & SCTLR_ELx_EE);
|
return !!(read_sysreg_el1(SYS_SCTLR) & SCTLR_ELx_EE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -43,8 +43,11 @@ DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
|
|||||||
*
|
*
|
||||||
* - API/APK: they are already accounted for by vcpu_load(), and can
|
* - API/APK: they are already accounted for by vcpu_load(), and can
|
||||||
* only take effect across a load/put cycle (such as ERET)
|
* only take effect across a load/put cycle (such as ERET)
|
||||||
|
*
|
||||||
|
* - FIEN: no way we let a guest have access to the RAS "Common Fault
|
||||||
|
* Injection" thing, whatever that does
|
||||||
*/
|
*/
|
||||||
#define NV_HCR_GUEST_EXCLUDE (HCR_TGE | HCR_API | HCR_APK)
|
#define NV_HCR_GUEST_EXCLUDE (HCR_TGE | HCR_API | HCR_APK | HCR_FIEN)
|
||||||
|
|
||||||
static u64 __compute_hcr(struct kvm_vcpu *vcpu)
|
static u64 __compute_hcr(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
|
@ -4,19 +4,20 @@
|
|||||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/acpi.h>
|
||||||
#include <linux/mman.h>
|
#include <linux/mman.h>
|
||||||
#include <linux/kvm_host.h>
|
#include <linux/kvm_host.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/hugetlb.h>
|
#include <linux/hugetlb.h>
|
||||||
#include <linux/sched/signal.h>
|
#include <linux/sched/signal.h>
|
||||||
#include <trace/events/kvm.h>
|
#include <trace/events/kvm.h>
|
||||||
|
#include <asm/acpi.h>
|
||||||
#include <asm/pgalloc.h>
|
#include <asm/pgalloc.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/kvm_arm.h>
|
#include <asm/kvm_arm.h>
|
||||||
#include <asm/kvm_mmu.h>
|
#include <asm/kvm_mmu.h>
|
||||||
#include <asm/kvm_pgtable.h>
|
#include <asm/kvm_pgtable.h>
|
||||||
#include <asm/kvm_pkvm.h>
|
#include <asm/kvm_pkvm.h>
|
||||||
#include <asm/kvm_ras.h>
|
|
||||||
#include <asm/kvm_asm.h>
|
#include <asm/kvm_asm.h>
|
||||||
#include <asm/kvm_emulate.h>
|
#include <asm/kvm_emulate.h>
|
||||||
#include <asm/virt.h>
|
#include <asm/virt.h>
|
||||||
@ -903,6 +904,38 @@ static int kvm_init_ipa_range(struct kvm_s2_mmu *mmu, unsigned long type)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Assume that @pgt is valid and unlinked from the KVM MMU to free the
|
||||||
|
* page-table without taking the kvm_mmu_lock and without performing any
|
||||||
|
* TLB invalidations.
|
||||||
|
*
|
||||||
|
* Also, the range of addresses can be large enough to cause need_resched
|
||||||
|
* warnings, for instance on CONFIG_PREEMPT_NONE kernels. Hence, invoke
|
||||||
|
* cond_resched() periodically to prevent hogging the CPU for a long time
|
||||||
|
* and schedule something else, if required.
|
||||||
|
*/
|
||||||
|
static void stage2_destroy_range(struct kvm_pgtable *pgt, phys_addr_t addr,
|
||||||
|
phys_addr_t end)
|
||||||
|
{
|
||||||
|
u64 next;
|
||||||
|
|
||||||
|
do {
|
||||||
|
next = stage2_range_addr_end(addr, end);
|
||||||
|
KVM_PGT_FN(kvm_pgtable_stage2_destroy_range)(pgt, addr,
|
||||||
|
next - addr);
|
||||||
|
if (next != end)
|
||||||
|
cond_resched();
|
||||||
|
} while (addr = next, addr != end);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void kvm_stage2_destroy(struct kvm_pgtable *pgt)
|
||||||
|
{
|
||||||
|
unsigned int ia_bits = VTCR_EL2_IPA(pgt->mmu->vtcr);
|
||||||
|
|
||||||
|
stage2_destroy_range(pgt, 0, BIT(ia_bits));
|
||||||
|
KVM_PGT_FN(kvm_pgtable_stage2_destroy_pgd)(pgt);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* kvm_init_stage2_mmu - Initialise a S2 MMU structure
|
* kvm_init_stage2_mmu - Initialise a S2 MMU structure
|
||||||
* @kvm: The pointer to the KVM structure
|
* @kvm: The pointer to the KVM structure
|
||||||
@ -979,7 +1012,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_destroy_pgtable:
|
out_destroy_pgtable:
|
||||||
KVM_PGT_FN(kvm_pgtable_stage2_destroy)(pgt);
|
kvm_stage2_destroy(pgt);
|
||||||
out_free_pgtable:
|
out_free_pgtable:
|
||||||
kfree(pgt);
|
kfree(pgt);
|
||||||
return err;
|
return err;
|
||||||
@ -1076,7 +1109,7 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
|
|||||||
write_unlock(&kvm->mmu_lock);
|
write_unlock(&kvm->mmu_lock);
|
||||||
|
|
||||||
if (pgt) {
|
if (pgt) {
|
||||||
KVM_PGT_FN(kvm_pgtable_stage2_destroy)(pgt);
|
kvm_stage2_destroy(pgt);
|
||||||
kfree(pgt);
|
kfree(pgt);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1811,6 +1844,19 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
|
|||||||
read_unlock(&vcpu->kvm->mmu_lock);
|
read_unlock(&vcpu->kvm->mmu_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int kvm_handle_guest_sea(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Give APEI the opportunity to claim the abort before handling it
|
||||||
|
* within KVM. apei_claim_sea() expects to be called with IRQs enabled.
|
||||||
|
*/
|
||||||
|
lockdep_assert_irqs_enabled();
|
||||||
|
if (apei_claim_sea(NULL) == 0)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
return kvm_inject_serror(vcpu);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* kvm_handle_guest_abort - handles all 2nd stage aborts
|
* kvm_handle_guest_abort - handles all 2nd stage aborts
|
||||||
* @vcpu: the VCPU pointer
|
* @vcpu: the VCPU pointer
|
||||||
@ -1834,17 +1880,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
|
|||||||
gfn_t gfn;
|
gfn_t gfn;
|
||||||
int ret, idx;
|
int ret, idx;
|
||||||
|
|
||||||
/* Synchronous External Abort? */
|
if (kvm_vcpu_abt_issea(vcpu))
|
||||||
if (kvm_vcpu_abt_issea(vcpu)) {
|
return kvm_handle_guest_sea(vcpu);
|
||||||
/*
|
|
||||||
* For RAS the host kernel may handle this abort.
|
|
||||||
* There is no need to pass the error into the guest.
|
|
||||||
*/
|
|
||||||
if (kvm_handle_guest_sea())
|
|
||||||
return kvm_inject_serror(vcpu);
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
esr = kvm_vcpu_get_esr(vcpu);
|
esr = kvm_vcpu_get_esr(vcpu);
|
||||||
|
|
||||||
|
@ -1287,7 +1287,10 @@ int kvm_handle_vncr_abort(struct kvm_vcpu *vcpu)
|
|||||||
struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
|
struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
|
||||||
u64 esr = kvm_vcpu_get_esr(vcpu);
|
u64 esr = kvm_vcpu_get_esr(vcpu);
|
||||||
|
|
||||||
BUG_ON(!(esr & ESR_ELx_VNCR_SHIFT));
|
WARN_ON_ONCE(!(esr & ESR_ELx_VNCR));
|
||||||
|
|
||||||
|
if (kvm_vcpu_abt_issea(vcpu))
|
||||||
|
return kvm_handle_guest_sea(vcpu);
|
||||||
|
|
||||||
if (esr_fsc_is_permission_fault(esr)) {
|
if (esr_fsc_is_permission_fault(esr)) {
|
||||||
inject_vncr_perm(vcpu);
|
inject_vncr_perm(vcpu);
|
||||||
|
@ -316,9 +316,16 @@ static int __pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 start, u64 e
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
|
void pkvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
|
||||||
|
u64 addr, u64 size)
|
||||||
{
|
{
|
||||||
__pkvm_pgtable_stage2_unmap(pgt, 0, ~(0ULL));
|
__pkvm_pgtable_stage2_unmap(pgt, addr, addr + size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void pkvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt)
|
||||||
|
{
|
||||||
|
/* Expected to be called after all pKVM mappings have been released. */
|
||||||
|
WARN_ON_ONCE(!RB_EMPTY_ROOT(&pgt->pkvm_mappings.rb_root));
|
||||||
}
|
}
|
||||||
|
|
||||||
int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
|
int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
|
||||||
|
@ -82,43 +82,105 @@ static bool write_to_read_only(struct kvm_vcpu *vcpu,
|
|||||||
"sys_reg write to read-only register");
|
"sys_reg write to read-only register");
|
||||||
}
|
}
|
||||||
|
|
||||||
#define PURE_EL2_SYSREG(el2) \
|
enum sr_loc_attr {
|
||||||
case el2: { \
|
SR_LOC_MEMORY = 0, /* Register definitely in memory */
|
||||||
*el1r = el2; \
|
SR_LOC_LOADED = BIT(0), /* Register on CPU, unless it cannot */
|
||||||
return true; \
|
SR_LOC_MAPPED = BIT(1), /* Register in a different CPU register */
|
||||||
}
|
SR_LOC_XLATED = BIT(2), /* Register translated to fit another reg */
|
||||||
|
SR_LOC_SPECIAL = BIT(3), /* Demanding register, implies loaded */
|
||||||
|
};
|
||||||
|
|
||||||
#define MAPPED_EL2_SYSREG(el2, el1, fn) \
|
struct sr_loc {
|
||||||
case el2: { \
|
enum sr_loc_attr loc;
|
||||||
*xlate = fn; \
|
enum vcpu_sysreg map_reg;
|
||||||
*el1r = el1; \
|
u64 (*xlate)(u64);
|
||||||
return true; \
|
};
|
||||||
}
|
|
||||||
|
|
||||||
static bool get_el2_to_el1_mapping(unsigned int reg,
|
static enum sr_loc_attr locate_direct_register(const struct kvm_vcpu *vcpu,
|
||||||
unsigned int *el1r, u64 (**xlate)(u64))
|
enum vcpu_sysreg reg)
|
||||||
{
|
{
|
||||||
switch (reg) {
|
switch (reg) {
|
||||||
PURE_EL2_SYSREG( VPIDR_EL2 );
|
case SCTLR_EL1:
|
||||||
PURE_EL2_SYSREG( VMPIDR_EL2 );
|
case CPACR_EL1:
|
||||||
PURE_EL2_SYSREG( ACTLR_EL2 );
|
case TTBR0_EL1:
|
||||||
PURE_EL2_SYSREG( HCR_EL2 );
|
case TTBR1_EL1:
|
||||||
PURE_EL2_SYSREG( MDCR_EL2 );
|
case TCR_EL1:
|
||||||
PURE_EL2_SYSREG( HSTR_EL2 );
|
case TCR2_EL1:
|
||||||
PURE_EL2_SYSREG( HACR_EL2 );
|
case PIR_EL1:
|
||||||
PURE_EL2_SYSREG( VTTBR_EL2 );
|
case PIRE0_EL1:
|
||||||
PURE_EL2_SYSREG( VTCR_EL2 );
|
case POR_EL1:
|
||||||
PURE_EL2_SYSREG( TPIDR_EL2 );
|
case ESR_EL1:
|
||||||
PURE_EL2_SYSREG( HPFAR_EL2 );
|
case AFSR0_EL1:
|
||||||
PURE_EL2_SYSREG( HCRX_EL2 );
|
case AFSR1_EL1:
|
||||||
PURE_EL2_SYSREG( HFGRTR_EL2 );
|
case FAR_EL1:
|
||||||
PURE_EL2_SYSREG( HFGWTR_EL2 );
|
case MAIR_EL1:
|
||||||
PURE_EL2_SYSREG( HFGITR_EL2 );
|
case VBAR_EL1:
|
||||||
PURE_EL2_SYSREG( HDFGRTR_EL2 );
|
case CONTEXTIDR_EL1:
|
||||||
PURE_EL2_SYSREG( HDFGWTR_EL2 );
|
case AMAIR_EL1:
|
||||||
PURE_EL2_SYSREG( HAFGRTR_EL2 );
|
case CNTKCTL_EL1:
|
||||||
PURE_EL2_SYSREG( CNTVOFF_EL2 );
|
case ELR_EL1:
|
||||||
PURE_EL2_SYSREG( CNTHCTL_EL2 );
|
case SPSR_EL1:
|
||||||
|
case ZCR_EL1:
|
||||||
|
case SCTLR2_EL1:
|
||||||
|
/*
|
||||||
|
* EL1 registers which have an ELx2 mapping are loaded if
|
||||||
|
* we're not in hypervisor context.
|
||||||
|
*/
|
||||||
|
return is_hyp_ctxt(vcpu) ? SR_LOC_MEMORY : SR_LOC_LOADED;
|
||||||
|
|
||||||
|
case TPIDR_EL0:
|
||||||
|
case TPIDRRO_EL0:
|
||||||
|
case TPIDR_EL1:
|
||||||
|
case PAR_EL1:
|
||||||
|
case DACR32_EL2:
|
||||||
|
case IFSR32_EL2:
|
||||||
|
case DBGVCR32_EL2:
|
||||||
|
/* These registers are always loaded, no matter what */
|
||||||
|
return SR_LOC_LOADED;
|
||||||
|
|
||||||
|
default:
|
||||||
|
/* Non-mapped EL2 registers are by definition in memory. */
|
||||||
|
return SR_LOC_MEMORY;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void locate_mapped_el2_register(const struct kvm_vcpu *vcpu,
|
||||||
|
enum vcpu_sysreg reg,
|
||||||
|
enum vcpu_sysreg map_reg,
|
||||||
|
u64 (*xlate)(u64),
|
||||||
|
struct sr_loc *loc)
|
||||||
|
{
|
||||||
|
if (!is_hyp_ctxt(vcpu)) {
|
||||||
|
loc->loc = SR_LOC_MEMORY;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
loc->loc = SR_LOC_LOADED | SR_LOC_MAPPED;
|
||||||
|
loc->map_reg = map_reg;
|
||||||
|
|
||||||
|
WARN_ON(locate_direct_register(vcpu, map_reg) != SR_LOC_MEMORY);
|
||||||
|
|
||||||
|
if (xlate != NULL && !vcpu_el2_e2h_is_set(vcpu)) {
|
||||||
|
loc->loc |= SR_LOC_XLATED;
|
||||||
|
loc->xlate = xlate;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#define MAPPED_EL2_SYSREG(r, m, t) \
|
||||||
|
case r: { \
|
||||||
|
locate_mapped_el2_register(vcpu, r, m, t, loc); \
|
||||||
|
break; \
|
||||||
|
}
|
||||||
|
|
||||||
|
static void locate_register(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg,
|
||||||
|
struct sr_loc *loc)
|
||||||
|
{
|
||||||
|
if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU)) {
|
||||||
|
loc->loc = SR_LOC_MEMORY;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (reg) {
|
||||||
MAPPED_EL2_SYSREG(SCTLR_EL2, SCTLR_EL1,
|
MAPPED_EL2_SYSREG(SCTLR_EL2, SCTLR_EL1,
|
||||||
translate_sctlr_el2_to_sctlr_el1 );
|
translate_sctlr_el2_to_sctlr_el1 );
|
||||||
MAPPED_EL2_SYSREG(CPTR_EL2, CPACR_EL1,
|
MAPPED_EL2_SYSREG(CPTR_EL2, CPACR_EL1,
|
||||||
@ -144,125 +206,189 @@ static bool get_el2_to_el1_mapping(unsigned int reg,
|
|||||||
MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1, NULL );
|
MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1, NULL );
|
||||||
MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1, NULL );
|
MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1, NULL );
|
||||||
MAPPED_EL2_SYSREG(SCTLR2_EL2, SCTLR2_EL1, NULL );
|
MAPPED_EL2_SYSREG(SCTLR2_EL2, SCTLR2_EL1, NULL );
|
||||||
|
case CNTHCTL_EL2:
|
||||||
|
/* CNTHCTL_EL2 is super special, until we support NV2.1 */
|
||||||
|
loc->loc = ((is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) ?
|
||||||
|
SR_LOC_SPECIAL : SR_LOC_MEMORY);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
return false;
|
loc->loc = locate_direct_register(vcpu, reg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
|
static u64 read_sr_from_cpu(enum vcpu_sysreg reg)
|
||||||
{
|
{
|
||||||
u64 val = 0x8badf00d8badf00d;
|
u64 val = 0x8badf00d8badf00d;
|
||||||
u64 (*xlate)(u64) = NULL;
|
|
||||||
unsigned int el1r;
|
|
||||||
|
|
||||||
if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
|
switch (reg) {
|
||||||
goto memory_read;
|
case SCTLR_EL1: val = read_sysreg_s(SYS_SCTLR_EL12); break;
|
||||||
|
case CPACR_EL1: val = read_sysreg_s(SYS_CPACR_EL12); break;
|
||||||
|
case TTBR0_EL1: val = read_sysreg_s(SYS_TTBR0_EL12); break;
|
||||||
|
case TTBR1_EL1: val = read_sysreg_s(SYS_TTBR1_EL12); break;
|
||||||
|
case TCR_EL1: val = read_sysreg_s(SYS_TCR_EL12); break;
|
||||||
|
case TCR2_EL1: val = read_sysreg_s(SYS_TCR2_EL12); break;
|
||||||
|
case PIR_EL1: val = read_sysreg_s(SYS_PIR_EL12); break;
|
||||||
|
case PIRE0_EL1: val = read_sysreg_s(SYS_PIRE0_EL12); break;
|
||||||
|
case POR_EL1: val = read_sysreg_s(SYS_POR_EL12); break;
|
||||||
|
case ESR_EL1: val = read_sysreg_s(SYS_ESR_EL12); break;
|
||||||
|
case AFSR0_EL1: val = read_sysreg_s(SYS_AFSR0_EL12); break;
|
||||||
|
case AFSR1_EL1: val = read_sysreg_s(SYS_AFSR1_EL12); break;
|
||||||
|
case FAR_EL1: val = read_sysreg_s(SYS_FAR_EL12); break;
|
||||||
|
case MAIR_EL1: val = read_sysreg_s(SYS_MAIR_EL12); break;
|
||||||
|
case VBAR_EL1: val = read_sysreg_s(SYS_VBAR_EL12); break;
|
||||||
|
case CONTEXTIDR_EL1: val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
|
||||||
|
case AMAIR_EL1: val = read_sysreg_s(SYS_AMAIR_EL12); break;
|
||||||
|
case CNTKCTL_EL1: val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
|
||||||
|
case ELR_EL1: val = read_sysreg_s(SYS_ELR_EL12); break;
|
||||||
|
case SPSR_EL1: val = read_sysreg_s(SYS_SPSR_EL12); break;
|
||||||
|
case ZCR_EL1: val = read_sysreg_s(SYS_ZCR_EL12); break;
|
||||||
|
case SCTLR2_EL1: val = read_sysreg_s(SYS_SCTLR2_EL12); break;
|
||||||
|
case TPIDR_EL0: val = read_sysreg_s(SYS_TPIDR_EL0); break;
|
||||||
|
case TPIDRRO_EL0: val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
|
||||||
|
case TPIDR_EL1: val = read_sysreg_s(SYS_TPIDR_EL1); break;
|
||||||
|
case PAR_EL1: val = read_sysreg_par(); break;
|
||||||
|
case DACR32_EL2: val = read_sysreg_s(SYS_DACR32_EL2); break;
|
||||||
|
case IFSR32_EL2: val = read_sysreg_s(SYS_IFSR32_EL2); break;
|
||||||
|
case DBGVCR32_EL2: val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
|
||||||
|
default: WARN_ON_ONCE(1);
|
||||||
|
}
|
||||||
|
|
||||||
if (unlikely(get_el2_to_el1_mapping(reg, &el1r, &xlate))) {
|
return val;
|
||||||
if (!is_hyp_ctxt(vcpu))
|
}
|
||||||
goto memory_read;
|
|
||||||
|
static void write_sr_to_cpu(enum vcpu_sysreg reg, u64 val)
|
||||||
|
{
|
||||||
|
switch (reg) {
|
||||||
|
case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
|
||||||
|
case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
|
||||||
|
case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
|
||||||
|
case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
|
||||||
|
case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
|
||||||
|
case TCR2_EL1: write_sysreg_s(val, SYS_TCR2_EL12); break;
|
||||||
|
case PIR_EL1: write_sysreg_s(val, SYS_PIR_EL12); break;
|
||||||
|
case PIRE0_EL1: write_sysreg_s(val, SYS_PIRE0_EL12); break;
|
||||||
|
case POR_EL1: write_sysreg_s(val, SYS_POR_EL12); break;
|
||||||
|
case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
|
||||||
|
case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
|
||||||
|
case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
|
||||||
|
case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break;
|
||||||
|
case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break;
|
||||||
|
case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
|
||||||
|
case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
|
||||||
|
case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
|
||||||
|
case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
|
||||||
|
case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
|
||||||
|
case SPSR_EL1: write_sysreg_s(val, SYS_SPSR_EL12); break;
|
||||||
|
case ZCR_EL1: write_sysreg_s(val, SYS_ZCR_EL12); break;
|
||||||
|
case SCTLR2_EL1: write_sysreg_s(val, SYS_SCTLR2_EL12); break;
|
||||||
|
case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
|
||||||
|
case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
|
||||||
|
case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
|
||||||
|
case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
|
||||||
|
case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
|
||||||
|
case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
|
||||||
|
case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
|
||||||
|
default: WARN_ON_ONCE(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg)
|
||||||
|
{
|
||||||
|
struct sr_loc loc = {};
|
||||||
|
|
||||||
|
locate_register(vcpu, reg, &loc);
|
||||||
|
|
||||||
|
WARN_ON_ONCE(!has_vhe() && loc.loc != SR_LOC_MEMORY);
|
||||||
|
|
||||||
|
if (loc.loc & SR_LOC_SPECIAL) {
|
||||||
|
u64 val;
|
||||||
|
|
||||||
|
WARN_ON_ONCE(loc.loc & ~SR_LOC_SPECIAL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* CNTHCTL_EL2 requires some special treatment to
|
* CNTHCTL_EL2 requires some special treatment to account
|
||||||
* account for the bits that can be set via CNTKCTL_EL1.
|
* for the bits that can be set via CNTKCTL_EL1 when E2H==1.
|
||||||
*/
|
*/
|
||||||
switch (reg) {
|
switch (reg) {
|
||||||
case CNTHCTL_EL2:
|
case CNTHCTL_EL2:
|
||||||
if (vcpu_el2_e2h_is_set(vcpu)) {
|
|
||||||
val = read_sysreg_el1(SYS_CNTKCTL);
|
val = read_sysreg_el1(SYS_CNTKCTL);
|
||||||
val &= CNTKCTL_VALID_BITS;
|
val &= CNTKCTL_VALID_BITS;
|
||||||
val |= __vcpu_sys_reg(vcpu, reg) & ~CNTKCTL_VALID_BITS;
|
val |= __vcpu_sys_reg(vcpu, reg) & ~CNTKCTL_VALID_BITS;
|
||||||
return val;
|
return val;
|
||||||
|
default:
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
}
|
}
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
if (loc.loc & SR_LOC_LOADED) {
|
||||||
* If this register does not have an EL1 counterpart,
|
enum vcpu_sysreg map_reg = reg;
|
||||||
* then read the stored EL2 version.
|
|
||||||
*/
|
|
||||||
if (reg == el1r)
|
|
||||||
goto memory_read;
|
|
||||||
|
|
||||||
/*
|
if (loc.loc & SR_LOC_MAPPED)
|
||||||
* If we have a non-VHE guest and that the sysreg
|
map_reg = loc.map_reg;
|
||||||
* requires translation to be used at EL1, use the
|
|
||||||
* in-memory copy instead.
|
if (!(loc.loc & SR_LOC_XLATED)) {
|
||||||
*/
|
u64 val = read_sr_from_cpu(map_reg);
|
||||||
if (!vcpu_el2_e2h_is_set(vcpu) && xlate)
|
|
||||||
goto memory_read;
|
|
||||||
|
|
||||||
/* Get the current version of the EL1 counterpart. */
|
|
||||||
WARN_ON(!__vcpu_read_sys_reg_from_cpu(el1r, &val));
|
|
||||||
if (reg >= __SANITISED_REG_START__)
|
if (reg >= __SANITISED_REG_START__)
|
||||||
val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
|
val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
|
||||||
|
|
||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* EL1 register can't be on the CPU if the guest is in vEL2. */
|
|
||||||
if (unlikely(is_hyp_ctxt(vcpu)))
|
|
||||||
goto memory_read;
|
|
||||||
|
|
||||||
if (__vcpu_read_sys_reg_from_cpu(reg, &val))
|
|
||||||
return val;
|
|
||||||
|
|
||||||
memory_read:
|
|
||||||
return __vcpu_sys_reg(vcpu, reg);
|
return __vcpu_sys_reg(vcpu, reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
|
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, enum vcpu_sysreg reg)
|
||||||
{
|
{
|
||||||
u64 (*xlate)(u64) = NULL;
|
struct sr_loc loc = {};
|
||||||
unsigned int el1r;
|
|
||||||
|
|
||||||
if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
|
locate_register(vcpu, reg, &loc);
|
||||||
goto memory_write;
|
|
||||||
|
|
||||||
if (unlikely(get_el2_to_el1_mapping(reg, &el1r, &xlate))) {
|
WARN_ON_ONCE(!has_vhe() && loc.loc != SR_LOC_MEMORY);
|
||||||
if (!is_hyp_ctxt(vcpu))
|
|
||||||
goto memory_write;
|
|
||||||
|
|
||||||
/*
|
if (loc.loc & SR_LOC_SPECIAL) {
|
||||||
* Always store a copy of the write to memory to avoid having
|
|
||||||
* to reverse-translate virtual EL2 system registers for a
|
WARN_ON_ONCE(loc.loc & ~SR_LOC_SPECIAL);
|
||||||
* non-VHE guest hypervisor.
|
|
||||||
*/
|
|
||||||
__vcpu_assign_sys_reg(vcpu, reg, val);
|
|
||||||
|
|
||||||
switch (reg) {
|
switch (reg) {
|
||||||
case CNTHCTL_EL2:
|
case CNTHCTL_EL2:
|
||||||
/*
|
/*
|
||||||
* If E2H=0, CNHTCTL_EL2 is a pure shadow register.
|
* If E2H=1, some of the bits are backed by
|
||||||
* Otherwise, some of the bits are backed by
|
|
||||||
* CNTKCTL_EL1, while the rest is kept in memory.
|
* CNTKCTL_EL1, while the rest is kept in memory.
|
||||||
* Yes, this is fun stuff.
|
* Yes, this is fun stuff.
|
||||||
*/
|
*/
|
||||||
if (vcpu_el2_e2h_is_set(vcpu))
|
|
||||||
write_sysreg_el1(val, SYS_CNTKCTL);
|
write_sysreg_el1(val, SYS_CNTKCTL);
|
||||||
return;
|
break;
|
||||||
|
default:
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* No EL1 counterpart? We're done here.? */
|
if (loc.loc & SR_LOC_LOADED) {
|
||||||
if (reg == el1r)
|
enum vcpu_sysreg map_reg = reg;
|
||||||
return;
|
u64 xlated_val;
|
||||||
|
|
||||||
if (!vcpu_el2_e2h_is_set(vcpu) && xlate)
|
if (reg >= __SANITISED_REG_START__)
|
||||||
val = xlate(val);
|
val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
|
||||||
|
|
||||||
/* Redirect this to the EL1 version of the register. */
|
if (loc.loc & SR_LOC_MAPPED)
|
||||||
WARN_ON(!__vcpu_write_sys_reg_to_cpu(val, el1r));
|
map_reg = loc.map_reg;
|
||||||
return;
|
|
||||||
|
if (loc.loc & SR_LOC_XLATED)
|
||||||
|
xlated_val = loc.xlate(val);
|
||||||
|
else
|
||||||
|
xlated_val = val;
|
||||||
|
|
||||||
|
write_sr_to_cpu(map_reg, xlated_val);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Fall through to write the backing store anyway, which
|
||||||
|
* allows translated registers to be directly read without a
|
||||||
|
* reverse translation.
|
||||||
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
/* EL1 register can't be on the CPU if the guest is in vEL2. */
|
|
||||||
if (unlikely(is_hyp_ctxt(vcpu)))
|
|
||||||
goto memory_write;
|
|
||||||
|
|
||||||
if (__vcpu_write_sys_reg_to_cpu(val, reg))
|
|
||||||
return;
|
|
||||||
|
|
||||||
memory_write:
|
|
||||||
__vcpu_assign_sys_reg(vcpu, reg, val);
|
__vcpu_assign_sys_reg(vcpu, reg, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1584,6 +1710,7 @@ static u8 pmuver_to_perfmon(u8 pmuver)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
|
static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
|
||||||
|
static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val);
|
||||||
static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
|
static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
|
||||||
|
|
||||||
/* Read a sanitised cpufeature ID register by sys_reg_desc */
|
/* Read a sanitised cpufeature ID register by sys_reg_desc */
|
||||||
@ -1606,19 +1733,7 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
|
|||||||
val = sanitise_id_aa64pfr0_el1(vcpu, val);
|
val = sanitise_id_aa64pfr0_el1(vcpu, val);
|
||||||
break;
|
break;
|
||||||
case SYS_ID_AA64PFR1_EL1:
|
case SYS_ID_AA64PFR1_EL1:
|
||||||
if (!kvm_has_mte(vcpu->kvm)) {
|
val = sanitise_id_aa64pfr1_el1(vcpu, val);
|
||||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
|
|
||||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac);
|
|
||||||
}
|
|
||||||
|
|
||||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
|
|
||||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_RNDR_trap);
|
|
||||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_NMI);
|
|
||||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_GCS);
|
|
||||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_THE);
|
|
||||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX);
|
|
||||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_PFAR);
|
|
||||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac);
|
|
||||||
break;
|
break;
|
||||||
case SYS_ID_AA64PFR2_EL1:
|
case SYS_ID_AA64PFR2_EL1:
|
||||||
val &= ID_AA64PFR2_EL1_FPMR |
|
val &= ID_AA64PFR2_EL1_FPMR |
|
||||||
@ -1628,18 +1743,18 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
|
|||||||
break;
|
break;
|
||||||
case SYS_ID_AA64ISAR1_EL1:
|
case SYS_ID_AA64ISAR1_EL1:
|
||||||
if (!vcpu_has_ptrauth(vcpu))
|
if (!vcpu_has_ptrauth(vcpu))
|
||||||
val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
|
val &= ~(ID_AA64ISAR1_EL1_APA |
|
||||||
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
|
ID_AA64ISAR1_EL1_API |
|
||||||
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
|
ID_AA64ISAR1_EL1_GPA |
|
||||||
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
|
ID_AA64ISAR1_EL1_GPI);
|
||||||
break;
|
break;
|
||||||
case SYS_ID_AA64ISAR2_EL1:
|
case SYS_ID_AA64ISAR2_EL1:
|
||||||
if (!vcpu_has_ptrauth(vcpu))
|
if (!vcpu_has_ptrauth(vcpu))
|
||||||
val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
|
val &= ~(ID_AA64ISAR2_EL1_APA3 |
|
||||||
ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
|
ID_AA64ISAR2_EL1_GPA3);
|
||||||
if (!cpus_have_final_cap(ARM64_HAS_WFXT) ||
|
if (!cpus_have_final_cap(ARM64_HAS_WFXT) ||
|
||||||
has_broken_cntvoff())
|
has_broken_cntvoff())
|
||||||
val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
|
val &= ~ID_AA64ISAR2_EL1_WFxT;
|
||||||
break;
|
break;
|
||||||
case SYS_ID_AA64ISAR3_EL1:
|
case SYS_ID_AA64ISAR3_EL1:
|
||||||
val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_FAMINMAX;
|
val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_FAMINMAX;
|
||||||
@ -1655,7 +1770,7 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
|
|||||||
ID_AA64MMFR3_EL1_S1PIE;
|
ID_AA64MMFR3_EL1_S1PIE;
|
||||||
break;
|
break;
|
||||||
case SYS_ID_MMFR4_EL1:
|
case SYS_ID_MMFR4_EL1:
|
||||||
val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
|
val &= ~ID_MMFR4_EL1_CCIDX;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1836,6 +1951,31 @@ static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
|
|||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val)
|
||||||
|
{
|
||||||
|
u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
||||||
|
|
||||||
|
if (!kvm_has_mte(vcpu->kvm)) {
|
||||||
|
val &= ~ID_AA64PFR1_EL1_MTE;
|
||||||
|
val &= ~ID_AA64PFR1_EL1_MTE_frac;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!(cpus_have_final_cap(ARM64_HAS_RASV1P1_EXTN) &&
|
||||||
|
SYS_FIELD_GET(ID_AA64PFR0_EL1, RAS, pfr0) == ID_AA64PFR0_EL1_RAS_IMP))
|
||||||
|
val &= ~ID_AA64PFR1_EL1_RAS_frac;
|
||||||
|
|
||||||
|
val &= ~ID_AA64PFR1_EL1_SME;
|
||||||
|
val &= ~ID_AA64PFR1_EL1_RNDR_trap;
|
||||||
|
val &= ~ID_AA64PFR1_EL1_NMI;
|
||||||
|
val &= ~ID_AA64PFR1_EL1_GCS;
|
||||||
|
val &= ~ID_AA64PFR1_EL1_THE;
|
||||||
|
val &= ~ID_AA64PFR1_EL1_MTEX;
|
||||||
|
val &= ~ID_AA64PFR1_EL1_PFAR;
|
||||||
|
val &= ~ID_AA64PFR1_EL1_MPAM_frac;
|
||||||
|
|
||||||
|
return val;
|
||||||
|
}
|
||||||
|
|
||||||
static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
|
static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
|
||||||
{
|
{
|
||||||
val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8);
|
val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8);
|
||||||
@ -2697,6 +2837,18 @@ static bool access_ras(struct kvm_vcpu *vcpu,
|
|||||||
struct kvm *kvm = vcpu->kvm;
|
struct kvm *kvm = vcpu->kvm;
|
||||||
|
|
||||||
switch(reg_to_encoding(r)) {
|
switch(reg_to_encoding(r)) {
|
||||||
|
case SYS_ERXPFGCDN_EL1:
|
||||||
|
case SYS_ERXPFGCTL_EL1:
|
||||||
|
case SYS_ERXPFGF_EL1:
|
||||||
|
case SYS_ERXMISC2_EL1:
|
||||||
|
case SYS_ERXMISC3_EL1:
|
||||||
|
if (!(kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1) ||
|
||||||
|
(kvm_has_feat_enum(kvm, ID_AA64PFR0_EL1, RAS, IMP) &&
|
||||||
|
kvm_has_feat(kvm, ID_AA64PFR1_EL1, RAS_frac, RASv1p1)))) {
|
||||||
|
kvm_inject_undefined(vcpu);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) {
|
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) {
|
||||||
kvm_inject_undefined(vcpu);
|
kvm_inject_undefined(vcpu);
|
||||||
@ -2929,7 +3081,6 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||||||
~(ID_AA64PFR0_EL1_AMU |
|
~(ID_AA64PFR0_EL1_AMU |
|
||||||
ID_AA64PFR0_EL1_MPAM |
|
ID_AA64PFR0_EL1_MPAM |
|
||||||
ID_AA64PFR0_EL1_SVE |
|
ID_AA64PFR0_EL1_SVE |
|
||||||
ID_AA64PFR0_EL1_RAS |
|
|
||||||
ID_AA64PFR0_EL1_AdvSIMD |
|
ID_AA64PFR0_EL1_AdvSIMD |
|
||||||
ID_AA64PFR0_EL1_FP)),
|
ID_AA64PFR0_EL1_FP)),
|
||||||
ID_FILTERED(ID_AA64PFR1_EL1, id_aa64pfr1_el1,
|
ID_FILTERED(ID_AA64PFR1_EL1, id_aa64pfr1_el1,
|
||||||
@ -2943,7 +3094,6 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||||||
ID_AA64PFR1_EL1_SME |
|
ID_AA64PFR1_EL1_SME |
|
||||||
ID_AA64PFR1_EL1_RES0 |
|
ID_AA64PFR1_EL1_RES0 |
|
||||||
ID_AA64PFR1_EL1_MPAM_frac |
|
ID_AA64PFR1_EL1_MPAM_frac |
|
||||||
ID_AA64PFR1_EL1_RAS_frac |
|
|
||||||
ID_AA64PFR1_EL1_MTE)),
|
ID_AA64PFR1_EL1_MTE)),
|
||||||
ID_WRITABLE(ID_AA64PFR2_EL1,
|
ID_WRITABLE(ID_AA64PFR2_EL1,
|
||||||
ID_AA64PFR2_EL1_FPMR |
|
ID_AA64PFR2_EL1_FPMR |
|
||||||
@ -3063,8 +3213,13 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||||||
{ SYS_DESC(SYS_ERXCTLR_EL1), access_ras },
|
{ SYS_DESC(SYS_ERXCTLR_EL1), access_ras },
|
||||||
{ SYS_DESC(SYS_ERXSTATUS_EL1), access_ras },
|
{ SYS_DESC(SYS_ERXSTATUS_EL1), access_ras },
|
||||||
{ SYS_DESC(SYS_ERXADDR_EL1), access_ras },
|
{ SYS_DESC(SYS_ERXADDR_EL1), access_ras },
|
||||||
|
{ SYS_DESC(SYS_ERXPFGF_EL1), access_ras },
|
||||||
|
{ SYS_DESC(SYS_ERXPFGCTL_EL1), access_ras },
|
||||||
|
{ SYS_DESC(SYS_ERXPFGCDN_EL1), access_ras },
|
||||||
{ SYS_DESC(SYS_ERXMISC0_EL1), access_ras },
|
{ SYS_DESC(SYS_ERXMISC0_EL1), access_ras },
|
||||||
{ SYS_DESC(SYS_ERXMISC1_EL1), access_ras },
|
{ SYS_DESC(SYS_ERXMISC1_EL1), access_ras },
|
||||||
|
{ SYS_DESC(SYS_ERXMISC2_EL1), access_ras },
|
||||||
|
{ SYS_DESC(SYS_ERXMISC3_EL1), access_ras },
|
||||||
|
|
||||||
MTE_REG(TFSR_EL1),
|
MTE_REG(TFSR_EL1),
|
||||||
MTE_REG(TFSRE0_EL1),
|
MTE_REG(TFSRE0_EL1),
|
||||||
|
@ -50,6 +50,14 @@ bool vgic_has_its(struct kvm *kvm)
|
|||||||
|
|
||||||
bool vgic_supports_direct_msis(struct kvm *kvm)
|
bool vgic_supports_direct_msis(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* Deliberately conflate vLPI and vSGI support on GICv4.1 hardware,
|
||||||
|
* indirectly allowing userspace to control whether or not vPEs are
|
||||||
|
* allocated for the VM.
|
||||||
|
*/
|
||||||
|
if (system_supports_direct_sgis() && !vgic_supports_direct_sgis(kvm))
|
||||||
|
return false;
|
||||||
|
|
||||||
return kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm);
|
return kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1091,7 +1091,7 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
|
|||||||
len = vgic_v3_init_dist_iodev(io_device);
|
len = vgic_v3_init_dist_iodev(io_device);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
BUG_ON(1);
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
io_device->base_addr = dist_base_address;
|
io_device->base_addr = dist_base_address;
|
||||||
|
@ -396,15 +396,7 @@ bool vgic_supports_direct_sgis(struct kvm *kvm);
|
|||||||
|
|
||||||
static inline bool vgic_supports_direct_irqs(struct kvm *kvm)
|
static inline bool vgic_supports_direct_irqs(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
/*
|
return vgic_supports_direct_msis(kvm) || vgic_supports_direct_sgis(kvm);
|
||||||
* Deliberately conflate vLPI and vSGI support on GICv4.1 hardware,
|
|
||||||
* indirectly allowing userspace to control whether or not vPEs are
|
|
||||||
* allocated for the VM.
|
|
||||||
*/
|
|
||||||
if (system_supports_direct_sgis())
|
|
||||||
return vgic_supports_direct_sgis(kvm);
|
|
||||||
|
|
||||||
return vgic_supports_direct_msis(kvm);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int vgic_v4_init(struct kvm *kvm);
|
int vgic_v4_init(struct kvm *kvm);
|
||||||
|
@ -53,6 +53,7 @@ HAS_S1PIE
|
|||||||
HAS_S1POE
|
HAS_S1POE
|
||||||
HAS_SCTLR2
|
HAS_SCTLR2
|
||||||
HAS_RAS_EXTN
|
HAS_RAS_EXTN
|
||||||
|
HAS_RASV1P1_EXTN
|
||||||
HAS_RNG
|
HAS_RNG
|
||||||
HAS_SB
|
HAS_SB
|
||||||
HAS_STAGE2_FWB
|
HAS_STAGE2_FWB
|
||||||
|
@ -39,6 +39,7 @@ int kvm_riscv_mmu_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
|
|||||||
unsigned long size, bool writable, bool in_atomic)
|
unsigned long size, bool writable, bool in_atomic)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
pgprot_t prot;
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
phys_addr_t addr, end;
|
phys_addr_t addr, end;
|
||||||
struct kvm_mmu_memory_cache pcache = {
|
struct kvm_mmu_memory_cache pcache = {
|
||||||
@ -55,10 +56,12 @@ int kvm_riscv_mmu_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
|
|||||||
|
|
||||||
end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
|
end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
|
||||||
pfn = __phys_to_pfn(hpa);
|
pfn = __phys_to_pfn(hpa);
|
||||||
|
prot = pgprot_noncached(PAGE_WRITE);
|
||||||
|
|
||||||
for (addr = gpa; addr < end; addr += PAGE_SIZE) {
|
for (addr = gpa; addr < end; addr += PAGE_SIZE) {
|
||||||
map.addr = addr;
|
map.addr = addr;
|
||||||
map.pte = pfn_pte(pfn, PAGE_KERNEL_IO);
|
map.pte = pfn_pte(pfn, prot);
|
||||||
|
map.pte = pte_mkdirty(map.pte);
|
||||||
map.level = 0;
|
map.level = 0;
|
||||||
|
|
||||||
if (!writable)
|
if (!writable)
|
||||||
|
@ -683,7 +683,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* check_vcpu_requests - check and handle pending vCPU requests
|
* kvm_riscv_check_vcpu_requests - check and handle pending vCPU requests
|
||||||
* @vcpu: the VCPU pointer
|
* @vcpu: the VCPU pointer
|
||||||
*
|
*
|
||||||
* Return: 1 if we should enter the guest
|
* Return: 1 if we should enter the guest
|
||||||
|
@ -182,6 +182,8 @@ int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu,
|
|||||||
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
|
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
|
||||||
unsigned long reg_val;
|
unsigned long reg_val;
|
||||||
|
|
||||||
|
if (reg_size != sizeof(reg_val))
|
||||||
|
return -EINVAL;
|
||||||
if (copy_from_user(®_val, uaddr, reg_size))
|
if (copy_from_user(®_val, uaddr, reg_size))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
if (reg_val != cntx->vector.vlenb)
|
if (reg_val != cntx->vector.vlenb)
|
||||||
|
@ -810,6 +810,8 @@ static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
|
|||||||
if (min > map->max_apic_id)
|
if (min > map->max_apic_id)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
min = array_index_nospec(min, map->max_apic_id + 1);
|
||||||
|
|
||||||
for_each_set_bit(i, ipi_bitmap,
|
for_each_set_bit(i, ipi_bitmap,
|
||||||
min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
|
min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
|
||||||
if (map->phys_map[min + i]) {
|
if (map->phys_map[min + i]) {
|
||||||
|
@ -718,13 +718,6 @@ static void sev_clflush_pages(struct page *pages[], unsigned long npages)
|
|||||||
|
|
||||||
static void sev_writeback_caches(struct kvm *kvm)
|
static void sev_writeback_caches(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
/*
|
|
||||||
* Note, the caller is responsible for ensuring correctness if the mask
|
|
||||||
* can be modified, e.g. if a CPU could be doing VMRUN.
|
|
||||||
*/
|
|
||||||
if (cpumask_empty(to_kvm_sev_info(kvm)->have_run_cpus))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ensure that all dirty guest tagged cache entries are written back
|
* Ensure that all dirty guest tagged cache entries are written back
|
||||||
* before releasing the pages back to the system for use. CLFLUSH will
|
* before releasing the pages back to the system for use. CLFLUSH will
|
||||||
@ -739,6 +732,9 @@ static void sev_writeback_caches(struct kvm *kvm)
|
|||||||
* serializing multiple calls and having responding CPUs (to the IPI)
|
* serializing multiple calls and having responding CPUs (to the IPI)
|
||||||
* mark themselves as still running if they are running (or about to
|
* mark themselves as still running if they are running (or about to
|
||||||
* run) a vCPU for the VM.
|
* run) a vCPU for the VM.
|
||||||
|
*
|
||||||
|
* Note, the caller is responsible for ensuring correctness if the mask
|
||||||
|
* can be modified, e.g. if a CPU could be doing VMRUN.
|
||||||
*/
|
*/
|
||||||
wbnoinvd_on_cpus_mask(to_kvm_sev_info(kvm)->have_run_cpus);
|
wbnoinvd_on_cpus_mask(to_kvm_sev_info(kvm)->have_run_cpus);
|
||||||
}
|
}
|
||||||
|
@ -9908,8 +9908,11 @@ static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
|
|||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
map = rcu_dereference(vcpu->kvm->arch.apic_map);
|
map = rcu_dereference(vcpu->kvm->arch.apic_map);
|
||||||
|
|
||||||
if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id])
|
if (likely(map) && dest_id <= map->max_apic_id) {
|
||||||
|
dest_id = array_index_nospec(dest_id, map->max_apic_id + 1);
|
||||||
|
if (map->phys_map[dest_id])
|
||||||
target = map->phys_map[dest_id]->vcpu;
|
target = map->phys_map[dest_id]->vcpu;
|
||||||
|
}
|
||||||
|
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
@ -1080,9 +1080,6 @@
|
|||||||
|
|
||||||
#define ARM64_FEATURE_FIELD_BITS 4
|
#define ARM64_FEATURE_FIELD_BITS 4
|
||||||
|
|
||||||
/* Defined for compatibility only, do not add new users. */
|
|
||||||
#define ARM64_FEATURE_MASK(x) (x##_MASK)
|
|
||||||
|
|
||||||
#ifdef __ASSEMBLY__
|
#ifdef __ASSEMBLY__
|
||||||
|
|
||||||
.macro mrs_s, rt, sreg
|
.macro mrs_s, rt, sreg
|
||||||
|
@ -751,7 +751,7 @@
|
|||||||
for (; _metadata->trigger; _metadata->trigger = \
|
for (; _metadata->trigger; _metadata->trigger = \
|
||||||
__bail(_assert, _metadata))
|
__bail(_assert, _metadata))
|
||||||
|
|
||||||
#define is_signed_type(var) (!!(((__typeof__(var))(-1)) < (__typeof__(var))1))
|
#define is_signed_var(var) (!!(((__typeof__(var))(-1)) < (__typeof__(var))1))
|
||||||
|
|
||||||
#define __EXPECT(_expected, _expected_str, _seen, _seen_str, _t, _assert) do { \
|
#define __EXPECT(_expected, _expected_str, _seen, _seen_str, _t, _assert) do { \
|
||||||
/* Avoid multiple evaluation of the cases */ \
|
/* Avoid multiple evaluation of the cases */ \
|
||||||
@ -759,7 +759,7 @@
|
|||||||
__typeof__(_seen) __seen = (_seen); \
|
__typeof__(_seen) __seen = (_seen); \
|
||||||
if (!(__exp _t __seen)) { \
|
if (!(__exp _t __seen)) { \
|
||||||
/* Report with actual signedness to avoid weird output. */ \
|
/* Report with actual signedness to avoid weird output. */ \
|
||||||
switch (is_signed_type(__exp) * 2 + is_signed_type(__seen)) { \
|
switch (is_signed_var(__exp) * 2 + is_signed_var(__seen)) { \
|
||||||
case 0: { \
|
case 0: { \
|
||||||
uintmax_t __exp_print = (uintmax_t)__exp; \
|
uintmax_t __exp_print = (uintmax_t)__exp; \
|
||||||
uintmax_t __seen_print = (uintmax_t)__seen; \
|
uintmax_t __seen_print = (uintmax_t)__seen; \
|
||||||
|
@ -169,6 +169,7 @@ TEST_GEN_PROGS_arm64 += arm64/vgic_irq
|
|||||||
TEST_GEN_PROGS_arm64 += arm64/vgic_lpi_stress
|
TEST_GEN_PROGS_arm64 += arm64/vgic_lpi_stress
|
||||||
TEST_GEN_PROGS_arm64 += arm64/vpmu_counter_access
|
TEST_GEN_PROGS_arm64 += arm64/vpmu_counter_access
|
||||||
TEST_GEN_PROGS_arm64 += arm64/no-vgic-v3
|
TEST_GEN_PROGS_arm64 += arm64/no-vgic-v3
|
||||||
|
TEST_GEN_PROGS_arm64 += arm64/kvm-uuid
|
||||||
TEST_GEN_PROGS_arm64 += access_tracking_perf_test
|
TEST_GEN_PROGS_arm64 += access_tracking_perf_test
|
||||||
TEST_GEN_PROGS_arm64 += arch_timer
|
TEST_GEN_PROGS_arm64 += arch_timer
|
||||||
TEST_GEN_PROGS_arm64 += coalesced_io_test
|
TEST_GEN_PROGS_arm64 += coalesced_io_test
|
||||||
|
@ -146,7 +146,7 @@ static bool vcpu_aarch64_only(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
|
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
|
||||||
|
|
||||||
el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
|
el0 = FIELD_GET(ID_AA64PFR0_EL1_EL0, val);
|
||||||
return el0 == ID_AA64PFR0_EL1_EL0_IMP;
|
return el0 == ID_AA64PFR0_EL1_EL0_IMP;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,12 +116,12 @@ static void reset_debug_state(void)
|
|||||||
|
|
||||||
/* Reset all bcr/bvr/wcr/wvr registers */
|
/* Reset all bcr/bvr/wcr/wvr registers */
|
||||||
dfr0 = read_sysreg(id_aa64dfr0_el1);
|
dfr0 = read_sysreg(id_aa64dfr0_el1);
|
||||||
brps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_BRPs), dfr0);
|
brps = FIELD_GET(ID_AA64DFR0_EL1_BRPs, dfr0);
|
||||||
for (i = 0; i <= brps; i++) {
|
for (i = 0; i <= brps; i++) {
|
||||||
write_dbgbcr(i, 0);
|
write_dbgbcr(i, 0);
|
||||||
write_dbgbvr(i, 0);
|
write_dbgbvr(i, 0);
|
||||||
}
|
}
|
||||||
wrps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_WRPs), dfr0);
|
wrps = FIELD_GET(ID_AA64DFR0_EL1_WRPs, dfr0);
|
||||||
for (i = 0; i <= wrps; i++) {
|
for (i = 0; i <= wrps; i++) {
|
||||||
write_dbgwcr(i, 0);
|
write_dbgwcr(i, 0);
|
||||||
write_dbgwvr(i, 0);
|
write_dbgwvr(i, 0);
|
||||||
@ -418,7 +418,7 @@ static void guest_code_ss(int test_cnt)
|
|||||||
|
|
||||||
static int debug_version(uint64_t id_aa64dfr0)
|
static int debug_version(uint64_t id_aa64dfr0)
|
||||||
{
|
{
|
||||||
return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), id_aa64dfr0);
|
return FIELD_GET(ID_AA64DFR0_EL1_DebugVer, id_aa64dfr0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void test_guest_debug_exceptions(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn)
|
static void test_guest_debug_exceptions(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn)
|
||||||
@ -539,14 +539,14 @@ void test_guest_debug_exceptions_all(uint64_t aa64dfr0)
|
|||||||
int b, w, c;
|
int b, w, c;
|
||||||
|
|
||||||
/* Number of breakpoints */
|
/* Number of breakpoints */
|
||||||
brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_BRPs), aa64dfr0) + 1;
|
brp_num = FIELD_GET(ID_AA64DFR0_EL1_BRPs, aa64dfr0) + 1;
|
||||||
__TEST_REQUIRE(brp_num >= 2, "At least two breakpoints are required");
|
__TEST_REQUIRE(brp_num >= 2, "At least two breakpoints are required");
|
||||||
|
|
||||||
/* Number of watchpoints */
|
/* Number of watchpoints */
|
||||||
wrp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_WRPs), aa64dfr0) + 1;
|
wrp_num = FIELD_GET(ID_AA64DFR0_EL1_WRPs, aa64dfr0) + 1;
|
||||||
|
|
||||||
/* Number of context aware breakpoints */
|
/* Number of context aware breakpoints */
|
||||||
ctx_brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_CTX_CMPs), aa64dfr0) + 1;
|
ctx_brp_num = FIELD_GET(ID_AA64DFR0_EL1_CTX_CMPs, aa64dfr0) + 1;
|
||||||
|
|
||||||
pr_debug("%s brp_num:%d, wrp_num:%d, ctx_brp_num:%d\n", __func__,
|
pr_debug("%s brp_num:%d, wrp_num:%d, ctx_brp_num:%d\n", __func__,
|
||||||
brp_num, wrp_num, ctx_brp_num);
|
brp_num, wrp_num, ctx_brp_num);
|
||||||
|
70
tools/testing/selftests/kvm/arm64/kvm-uuid.c
Normal file
70
tools/testing/selftests/kvm/arm64/kvm-uuid.c
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
// Check that nobody has tampered with KVM's UID
|
||||||
|
|
||||||
|
#include <errno.h>
|
||||||
|
#include <linux/arm-smccc.h>
|
||||||
|
#include <asm/kvm.h>
|
||||||
|
#include <kvm_util.h>
|
||||||
|
|
||||||
|
#include "processor.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Do NOT redefine these constants, or try to replace them with some
|
||||||
|
* "common" version. They are hardcoded here to detect any potential
|
||||||
|
* breakage happening in the rest of the kernel.
|
||||||
|
*
|
||||||
|
* KVM UID value: 28b46fb6-2ec5-11e9-a9ca-4b564d003a74
|
||||||
|
*/
|
||||||
|
#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 0xb66fb428U
|
||||||
|
#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 0xe911c52eU
|
||||||
|
#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 0x564bcaa9U
|
||||||
|
#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3 0x743a004dU
|
||||||
|
|
||||||
|
static void guest_code(void)
|
||||||
|
{
|
||||||
|
struct arm_smccc_res res = {};
|
||||||
|
|
||||||
|
smccc_hvc(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, 0, 0, 0, 0, 0, 0, 0, &res);
|
||||||
|
|
||||||
|
__GUEST_ASSERT(res.a0 == ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 &&
|
||||||
|
res.a1 == ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 &&
|
||||||
|
res.a2 == ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 &&
|
||||||
|
res.a3 == ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3,
|
||||||
|
"Unexpected KVM-specific UID %lx %lx %lx %lx\n", res.a0, res.a1, res.a2, res.a3);
|
||||||
|
GUEST_DONE();
|
||||||
|
}
|
||||||
|
|
||||||
|
int main (int argc, char *argv[])
|
||||||
|
{
|
||||||
|
struct kvm_vcpu *vcpu;
|
||||||
|
struct kvm_vm *vm;
|
||||||
|
struct ucall uc;
|
||||||
|
bool guest_done = false;
|
||||||
|
|
||||||
|
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||||
|
|
||||||
|
while (!guest_done) {
|
||||||
|
vcpu_run(vcpu);
|
||||||
|
|
||||||
|
switch (get_ucall(vcpu, &uc)) {
|
||||||
|
case UCALL_SYNC:
|
||||||
|
break;
|
||||||
|
case UCALL_DONE:
|
||||||
|
guest_done = true;
|
||||||
|
break;
|
||||||
|
case UCALL_ABORT:
|
||||||
|
REPORT_GUEST_ASSERT(uc);
|
||||||
|
break;
|
||||||
|
case UCALL_PRINTF:
|
||||||
|
printf("%s", uc.buffer);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
TEST_FAIL("Unexpected guest exit");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
kvm_vm_free(vm);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
@ -54,7 +54,7 @@ static void guest_code(void)
|
|||||||
* Check that we advertise that ID_AA64PFR0_EL1.GIC == 0, having
|
* Check that we advertise that ID_AA64PFR0_EL1.GIC == 0, having
|
||||||
* hidden the feature at runtime without any other userspace action.
|
* hidden the feature at runtime without any other userspace action.
|
||||||
*/
|
*/
|
||||||
__GUEST_ASSERT(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC),
|
__GUEST_ASSERT(FIELD_GET(ID_AA64PFR0_EL1_GIC,
|
||||||
read_sysreg(id_aa64pfr0_el1)) == 0,
|
read_sysreg(id_aa64pfr0_el1)) == 0,
|
||||||
"GICv3 wrongly advertised");
|
"GICv3 wrongly advertised");
|
||||||
|
|
||||||
@ -165,7 +165,7 @@ int main(int argc, char *argv[])
|
|||||||
|
|
||||||
vm = vm_create_with_one_vcpu(&vcpu, NULL);
|
vm = vm_create_with_one_vcpu(&vcpu, NULL);
|
||||||
pfr0 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
|
pfr0 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
|
||||||
__TEST_REQUIRE(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), pfr0),
|
__TEST_REQUIRE(FIELD_GET(ID_AA64PFR0_EL1_GIC, pfr0),
|
||||||
"GICv3 not supported.");
|
"GICv3 not supported.");
|
||||||
kvm_vm_free(vm);
|
kvm_vm_free(vm);
|
||||||
|
|
||||||
|
@ -95,14 +95,14 @@ static bool guest_check_lse(void)
|
|||||||
uint64_t isar0 = read_sysreg(id_aa64isar0_el1);
|
uint64_t isar0 = read_sysreg(id_aa64isar0_el1);
|
||||||
uint64_t atomic;
|
uint64_t atomic;
|
||||||
|
|
||||||
atomic = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_ATOMIC), isar0);
|
atomic = FIELD_GET(ID_AA64ISAR0_EL1_ATOMIC, isar0);
|
||||||
return atomic >= 2;
|
return atomic >= 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool guest_check_dc_zva(void)
|
static bool guest_check_dc_zva(void)
|
||||||
{
|
{
|
||||||
uint64_t dczid = read_sysreg(dczid_el0);
|
uint64_t dczid = read_sysreg(dczid_el0);
|
||||||
uint64_t dzp = FIELD_GET(ARM64_FEATURE_MASK(DCZID_EL0_DZP), dczid);
|
uint64_t dzp = FIELD_GET(DCZID_EL0_DZP, dczid);
|
||||||
|
|
||||||
return dzp == 0;
|
return dzp == 0;
|
||||||
}
|
}
|
||||||
@ -195,7 +195,7 @@ static bool guest_set_ha(void)
|
|||||||
uint64_t hadbs, tcr;
|
uint64_t hadbs, tcr;
|
||||||
|
|
||||||
/* Skip if HA is not supported. */
|
/* Skip if HA is not supported. */
|
||||||
hadbs = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HAFDBS), mmfr1);
|
hadbs = FIELD_GET(ID_AA64MMFR1_EL1_HAFDBS, mmfr1);
|
||||||
if (hadbs == 0)
|
if (hadbs == 0)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
@ -243,6 +243,7 @@ static void guest_code(void)
|
|||||||
GUEST_REG_SYNC(SYS_ID_AA64MMFR0_EL1);
|
GUEST_REG_SYNC(SYS_ID_AA64MMFR0_EL1);
|
||||||
GUEST_REG_SYNC(SYS_ID_AA64MMFR1_EL1);
|
GUEST_REG_SYNC(SYS_ID_AA64MMFR1_EL1);
|
||||||
GUEST_REG_SYNC(SYS_ID_AA64MMFR2_EL1);
|
GUEST_REG_SYNC(SYS_ID_AA64MMFR2_EL1);
|
||||||
|
GUEST_REG_SYNC(SYS_ID_AA64MMFR3_EL1);
|
||||||
GUEST_REG_SYNC(SYS_ID_AA64ZFR0_EL1);
|
GUEST_REG_SYNC(SYS_ID_AA64ZFR0_EL1);
|
||||||
GUEST_REG_SYNC(SYS_CTR_EL0);
|
GUEST_REG_SYNC(SYS_CTR_EL0);
|
||||||
GUEST_REG_SYNC(SYS_MIDR_EL1);
|
GUEST_REG_SYNC(SYS_MIDR_EL1);
|
||||||
@ -594,8 +595,8 @@ static void test_user_set_mte_reg(struct kvm_vcpu *vcpu)
|
|||||||
*/
|
*/
|
||||||
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
|
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
|
||||||
|
|
||||||
mte = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), val);
|
mte = FIELD_GET(ID_AA64PFR1_EL1_MTE, val);
|
||||||
mte_frac = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac), val);
|
mte_frac = FIELD_GET(ID_AA64PFR1_EL1_MTE_frac, val);
|
||||||
if (mte != ID_AA64PFR1_EL1_MTE_MTE2 ||
|
if (mte != ID_AA64PFR1_EL1_MTE_MTE2 ||
|
||||||
mte_frac != ID_AA64PFR1_EL1_MTE_frac_NI) {
|
mte_frac != ID_AA64PFR1_EL1_MTE_frac_NI) {
|
||||||
ksft_test_result_skip("MTE_ASYNC or MTE_ASYMM are supported, nothing to test\n");
|
ksft_test_result_skip("MTE_ASYNC or MTE_ASYMM are supported, nothing to test\n");
|
||||||
@ -612,7 +613,7 @@ static void test_user_set_mte_reg(struct kvm_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
|
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
|
||||||
mte_frac = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac), val);
|
mte_frac = FIELD_GET(ID_AA64PFR1_EL1_MTE_frac, val);
|
||||||
if (mte_frac == ID_AA64PFR1_EL1_MTE_frac_NI)
|
if (mte_frac == ID_AA64PFR1_EL1_MTE_frac_NI)
|
||||||
ksft_test_result_pass("ID_AA64PFR1_EL1.MTE_frac=0 accepted and still 0xF\n");
|
ksft_test_result_pass("ID_AA64PFR1_EL1.MTE_frac=0 accepted and still 0xF\n");
|
||||||
else
|
else
|
||||||
@ -774,7 +775,7 @@ int main(void)
|
|||||||
|
|
||||||
/* Check for AARCH64 only system */
|
/* Check for AARCH64 only system */
|
||||||
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
|
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
|
||||||
el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
|
el0 = FIELD_GET(ID_AA64PFR0_EL1_EL0, val);
|
||||||
aarch64_only = (el0 == ID_AA64PFR0_EL1_EL0_IMP);
|
aarch64_only = (el0 == ID_AA64PFR0_EL1_EL0_IMP);
|
||||||
|
|
||||||
ksft_print_header();
|
ksft_print_header();
|
||||||
|
@ -441,7 +441,7 @@ static void create_vpmu_vm(void *guest_code)
|
|||||||
|
|
||||||
/* Make sure that PMUv3 support is indicated in the ID register */
|
/* Make sure that PMUv3 support is indicated in the ID register */
|
||||||
dfr0 = vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1));
|
dfr0 = vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1));
|
||||||
pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0);
|
pmuver = FIELD_GET(ID_AA64DFR0_EL1_PMUVer, dfr0);
|
||||||
TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF &&
|
TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF &&
|
||||||
pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP,
|
pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP,
|
||||||
"Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
|
"Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
|
||||||
|
@ -573,15 +573,15 @@ void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k,
|
|||||||
err = ioctl(vcpu_fd, KVM_GET_ONE_REG, ®);
|
err = ioctl(vcpu_fd, KVM_GET_ONE_REG, ®);
|
||||||
TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_GET_ONE_REG, vcpu_fd));
|
TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_GET_ONE_REG, vcpu_fd));
|
||||||
|
|
||||||
gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN4), val);
|
gran = FIELD_GET(ID_AA64MMFR0_EL1_TGRAN4, val);
|
||||||
*ipa4k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN4_NI,
|
*ipa4k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN4_NI,
|
||||||
ID_AA64MMFR0_EL1_TGRAN4_52_BIT);
|
ID_AA64MMFR0_EL1_TGRAN4_52_BIT);
|
||||||
|
|
||||||
gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN64), val);
|
gran = FIELD_GET(ID_AA64MMFR0_EL1_TGRAN64, val);
|
||||||
*ipa64k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN64_NI,
|
*ipa64k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN64_NI,
|
||||||
ID_AA64MMFR0_EL1_TGRAN64_IMP);
|
ID_AA64MMFR0_EL1_TGRAN64_IMP);
|
||||||
|
|
||||||
gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN16), val);
|
gran = FIELD_GET(ID_AA64MMFR0_EL1_TGRAN16, val);
|
||||||
*ipa16k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN16_NI,
|
*ipa16k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN16_NI,
|
||||||
ID_AA64MMFR0_EL1_TGRAN16_52_BIT);
|
ID_AA64MMFR0_EL1_TGRAN16_52_BIT);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user