mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00
KVM: arm64: nv: Respect exception routing rules for SEAs
Synchronous external aborts are taken to EL2 if ELIsInHost() or HCR_EL2.TEA=1. Rework the SEA injection plumbing to respect the imposed routing of the guest hypervisor and opportunistically rephrase things to make their function a bit more obvious. Reviewed-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20250708172532.1699409-6-oliver.upton@linux.dev Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
parent
aae35f4ffb
commit
9aba641b9e
@ -46,15 +46,25 @@ void kvm_skip_instr32(struct kvm_vcpu *vcpu);
|
|||||||
|
|
||||||
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
|
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
|
||||||
void kvm_inject_vabt(struct kvm_vcpu *vcpu);
|
void kvm_inject_vabt(struct kvm_vcpu *vcpu);
|
||||||
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
|
int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr);
|
||||||
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
|
|
||||||
void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
|
void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
|
static inline int kvm_inject_sea_dabt(struct kvm_vcpu *vcpu, u64 addr)
|
||||||
|
{
|
||||||
|
return kvm_inject_sea(vcpu, false, addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int kvm_inject_sea_iabt(struct kvm_vcpu *vcpu, u64 addr)
|
||||||
|
{
|
||||||
|
return kvm_inject_sea(vcpu, true, addr);
|
||||||
|
}
|
||||||
|
|
||||||
void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
|
void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
|
void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
|
||||||
int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
|
int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
|
||||||
int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
|
int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
|
||||||
|
int kvm_inject_nested_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr);
|
||||||
|
|
||||||
static inline void kvm_inject_nested_sve_trap(struct kvm_vcpu *vcpu)
|
static inline void kvm_inject_nested_sve_trap(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
|
@ -2811,3 +2811,13 @@ int kvm_inject_nested_irq(struct kvm_vcpu *vcpu)
|
|||||||
/* esr_el2 value doesn't matter for exits due to irqs. */
|
/* esr_el2 value doesn't matter for exits due to irqs. */
|
||||||
return kvm_inject_nested(vcpu, 0, except_type_irq);
|
return kvm_inject_nested(vcpu, 0, except_type_irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int kvm_inject_nested_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
|
||||||
|
{
|
||||||
|
u64 esr = FIELD_PREP(ESR_ELx_EC_MASK,
|
||||||
|
iabt ? ESR_ELx_EC_IABT_LOW : ESR_ELx_EC_DABT_LOW);
|
||||||
|
esr |= ESR_ELx_FSC_EXTABT | ESR_ELx_IL;
|
||||||
|
|
||||||
|
vcpu_write_sys_reg(vcpu, FAR_EL2, addr);
|
||||||
|
return kvm_inject_nested_sync(vcpu, esr);
|
||||||
|
}
|
||||||
|
@ -839,6 +839,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
|
|||||||
bool serror_pending = events->exception.serror_pending;
|
bool serror_pending = events->exception.serror_pending;
|
||||||
bool has_esr = events->exception.serror_has_esr;
|
bool has_esr = events->exception.serror_has_esr;
|
||||||
bool ext_dabt_pending = events->exception.ext_dabt_pending;
|
bool ext_dabt_pending = events->exception.ext_dabt_pending;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
if (serror_pending && has_esr) {
|
if (serror_pending && has_esr) {
|
||||||
if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
|
if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
|
||||||
@ -853,9 +854,9 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (ext_dabt_pending)
|
if (ext_dabt_pending)
|
||||||
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
|
ret = kvm_inject_sea_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
|
||||||
|
|
||||||
return 0;
|
return (ret < 0) ? ret : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 __attribute_const__ kvm_target_cpu(void)
|
u32 __attribute_const__ kvm_target_cpu(void)
|
||||||
|
@ -155,36 +155,28 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
|
|||||||
vcpu_write_sys_reg(vcpu, far, FAR_EL1);
|
vcpu_write_sys_reg(vcpu, far, FAR_EL1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
static void __kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
|
||||||
* kvm_inject_dabt - inject a data abort into the guest
|
|
||||||
* @vcpu: The VCPU to receive the data abort
|
|
||||||
* @addr: The address to report in the DFAR
|
|
||||||
*
|
|
||||||
* It is assumed that this code is called from the VCPU thread and that the
|
|
||||||
* VCPU therefore is not currently executing guest code.
|
|
||||||
*/
|
|
||||||
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
|
|
||||||
{
|
{
|
||||||
if (vcpu_el1_is_32bit(vcpu))
|
if (vcpu_el1_is_32bit(vcpu))
|
||||||
inject_abt32(vcpu, false, addr);
|
inject_abt32(vcpu, iabt, addr);
|
||||||
else
|
else
|
||||||
inject_abt64(vcpu, false, addr);
|
inject_abt64(vcpu, iabt, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
static bool kvm_sea_target_is_el2(struct kvm_vcpu *vcpu)
|
||||||
* kvm_inject_pabt - inject a prefetch abort into the guest
|
|
||||||
* @vcpu: The VCPU to receive the prefetch abort
|
|
||||||
* @addr: The address to report in the DFAR
|
|
||||||
*
|
|
||||||
* It is assumed that this code is called from the VCPU thread and that the
|
|
||||||
* VCPU therefore is not currently executing guest code.
|
|
||||||
*/
|
|
||||||
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
|
|
||||||
{
|
{
|
||||||
if (vcpu_el1_is_32bit(vcpu))
|
return __vcpu_sys_reg(vcpu, HCR_EL2) & (HCR_TGE | HCR_TEA);
|
||||||
inject_abt32(vcpu, true, addr);
|
}
|
||||||
else
|
|
||||||
inject_abt64(vcpu, true, addr);
|
int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
|
||||||
|
{
|
||||||
|
lockdep_assert_held(&vcpu->mutex);
|
||||||
|
|
||||||
|
if (is_nested_ctxt(vcpu) && kvm_sea_target_is_el2(vcpu))
|
||||||
|
return kvm_inject_nested_sea(vcpu, iabt, addr);
|
||||||
|
|
||||||
|
__kvm_inject_sea(vcpu, iabt, addr);
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
|
void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
|
||||||
@ -194,10 +186,7 @@ void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
|
|||||||
addr = kvm_vcpu_get_fault_ipa(vcpu);
|
addr = kvm_vcpu_get_fault_ipa(vcpu);
|
||||||
addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
|
addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
|
||||||
|
|
||||||
if (kvm_vcpu_trap_is_iabt(vcpu))
|
__kvm_inject_sea(vcpu, kvm_vcpu_trap_is_iabt(vcpu), addr);
|
||||||
kvm_inject_pabt(vcpu, addr);
|
|
||||||
else
|
|
||||||
kvm_inject_dabt(vcpu, addr);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If AArch64 or LPAE, set FSC to 0 to indicate an Address
|
* If AArch64 or LPAE, set FSC to 0 to indicate an Address
|
||||||
|
@ -169,10 +169,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
|
|||||||
trace_kvm_mmio_nisv(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
|
trace_kvm_mmio_nisv(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
|
||||||
kvm_vcpu_get_hfar(vcpu), fault_ipa);
|
kvm_vcpu_get_hfar(vcpu), fault_ipa);
|
||||||
|
|
||||||
if (vcpu_is_protected(vcpu)) {
|
if (vcpu_is_protected(vcpu))
|
||||||
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
|
return kvm_inject_sea_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (test_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER,
|
if (test_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER,
|
||||||
&vcpu->kvm->arch.flags)) {
|
&vcpu->kvm->arch.flags)) {
|
||||||
|
@ -1836,11 +1836,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
|
|||||||
if (fault_ipa >= BIT_ULL(VTCR_EL2_IPA(vcpu->arch.hw_mmu->vtcr))) {
|
if (fault_ipa >= BIT_ULL(VTCR_EL2_IPA(vcpu->arch.hw_mmu->vtcr))) {
|
||||||
fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
|
fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
|
||||||
|
|
||||||
if (is_iabt)
|
return kvm_inject_sea(vcpu, is_iabt, fault_ipa);
|
||||||
kvm_inject_pabt(vcpu, fault_ipa);
|
|
||||||
else
|
|
||||||
kvm_inject_dabt(vcpu, fault_ipa);
|
|
||||||
return 1;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1912,8 +1908,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (kvm_vcpu_abt_iss1tw(vcpu)) {
|
if (kvm_vcpu_abt_iss1tw(vcpu)) {
|
||||||
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
|
ret = kvm_inject_sea_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
|
||||||
ret = 1;
|
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1958,10 +1953,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
|
|||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
ret = 1;
|
ret = 1;
|
||||||
out:
|
out:
|
||||||
if (ret == -ENOEXEC) {
|
if (ret == -ENOEXEC)
|
||||||
kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
|
ret = kvm_inject_sea_iabt(vcpu, kvm_vcpu_get_hfar(vcpu));
|
||||||
ret = 1;
|
|
||||||
}
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||||
return ret;
|
return ret;
|
||||||
|
Loading…
Reference in New Issue
Block a user