mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-21 23:16:50 +08:00
Merge tag 'kvm-riscv-fixes-7.0-1' of https://github.com/kvm-riscv/linux into HEAD
KVM/riscv fixes for 7.0, take #1 - Prevent speculative out-of-bounds access using array_index_nospec() in APLIC interrupt handling, ONE_REG regiser access, AIA CSR access, float register access, and PMU counter access - Fix potential use-after-free issues in kvm_riscv_gstage_get_leaf(), kvm_riscv_aia_aplic_has_attr(), and kvm_riscv_aia_imsic_has_attr() - Fix potential null pointer dereference in kvm_riscv_vcpu_aia_rmw_topei() - Fix off-by-one array access in SBI PMU - Skip THP support check during dirty logging - Fix error code returned for Smstateen and Ssaia ONE_REG interface - Check host Ssaia extension when creating AIA irqchip
This commit is contained in:
@@ -13,6 +13,7 @@
|
|||||||
#include <linux/irqchip/riscv-imsic.h>
|
#include <linux/irqchip/riscv-imsic.h>
|
||||||
#include <linux/irqdomain.h>
|
#include <linux/irqdomain.h>
|
||||||
#include <linux/kvm_host.h>
|
#include <linux/kvm_host.h>
|
||||||
|
#include <linux/nospec.h>
|
||||||
#include <linux/percpu.h>
|
#include <linux/percpu.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <asm/cpufeature.h>
|
#include <asm/cpufeature.h>
|
||||||
@@ -182,9 +183,14 @@ int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu,
|
|||||||
unsigned long *out_val)
|
unsigned long *out_val)
|
||||||
{
|
{
|
||||||
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
|
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
|
||||||
|
unsigned long regs_max = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
|
||||||
|
|
||||||
if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
|
if (!riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
if (reg_num >= regs_max)
|
||||||
|
return -ENOENT;
|
||||||
|
|
||||||
|
reg_num = array_index_nospec(reg_num, regs_max);
|
||||||
|
|
||||||
*out_val = 0;
|
*out_val = 0;
|
||||||
if (kvm_riscv_aia_available())
|
if (kvm_riscv_aia_available())
|
||||||
@@ -198,9 +204,14 @@ int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu *vcpu,
|
|||||||
unsigned long val)
|
unsigned long val)
|
||||||
{
|
{
|
||||||
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
|
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
|
||||||
|
unsigned long regs_max = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
|
||||||
|
|
||||||
if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
|
if (!riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
if (reg_num >= regs_max)
|
||||||
|
return -ENOENT;
|
||||||
|
|
||||||
|
reg_num = array_index_nospec(reg_num, regs_max);
|
||||||
|
|
||||||
if (kvm_riscv_aia_available()) {
|
if (kvm_riscv_aia_available()) {
|
||||||
((unsigned long *)csr)[reg_num] = val;
|
((unsigned long *)csr)[reg_num] = val;
|
||||||
|
|||||||
@@ -10,6 +10,7 @@
|
|||||||
#include <linux/irqchip/riscv-aplic.h>
|
#include <linux/irqchip/riscv-aplic.h>
|
||||||
#include <linux/kvm_host.h>
|
#include <linux/kvm_host.h>
|
||||||
#include <linux/math.h>
|
#include <linux/math.h>
|
||||||
|
#include <linux/nospec.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/swab.h>
|
#include <linux/swab.h>
|
||||||
#include <kvm/iodev.h>
|
#include <kvm/iodev.h>
|
||||||
@@ -45,7 +46,7 @@ static u32 aplic_read_sourcecfg(struct aplic *aplic, u32 irq)
|
|||||||
|
|
||||||
if (!irq || aplic->nr_irqs <= irq)
|
if (!irq || aplic->nr_irqs <= irq)
|
||||||
return 0;
|
return 0;
|
||||||
irqd = &aplic->irqs[irq];
|
irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&irqd->lock, flags);
|
raw_spin_lock_irqsave(&irqd->lock, flags);
|
||||||
ret = irqd->sourcecfg;
|
ret = irqd->sourcecfg;
|
||||||
@@ -61,7 +62,7 @@ static void aplic_write_sourcecfg(struct aplic *aplic, u32 irq, u32 val)
|
|||||||
|
|
||||||
if (!irq || aplic->nr_irqs <= irq)
|
if (!irq || aplic->nr_irqs <= irq)
|
||||||
return;
|
return;
|
||||||
irqd = &aplic->irqs[irq];
|
irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
|
||||||
|
|
||||||
if (val & APLIC_SOURCECFG_D)
|
if (val & APLIC_SOURCECFG_D)
|
||||||
val = 0;
|
val = 0;
|
||||||
@@ -81,7 +82,7 @@ static u32 aplic_read_target(struct aplic *aplic, u32 irq)
|
|||||||
|
|
||||||
if (!irq || aplic->nr_irqs <= irq)
|
if (!irq || aplic->nr_irqs <= irq)
|
||||||
return 0;
|
return 0;
|
||||||
irqd = &aplic->irqs[irq];
|
irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&irqd->lock, flags);
|
raw_spin_lock_irqsave(&irqd->lock, flags);
|
||||||
ret = irqd->target;
|
ret = irqd->target;
|
||||||
@@ -97,7 +98,7 @@ static void aplic_write_target(struct aplic *aplic, u32 irq, u32 val)
|
|||||||
|
|
||||||
if (!irq || aplic->nr_irqs <= irq)
|
if (!irq || aplic->nr_irqs <= irq)
|
||||||
return;
|
return;
|
||||||
irqd = &aplic->irqs[irq];
|
irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
|
||||||
|
|
||||||
val &= APLIC_TARGET_EIID_MASK |
|
val &= APLIC_TARGET_EIID_MASK |
|
||||||
(APLIC_TARGET_HART_IDX_MASK << APLIC_TARGET_HART_IDX_SHIFT) |
|
(APLIC_TARGET_HART_IDX_MASK << APLIC_TARGET_HART_IDX_SHIFT) |
|
||||||
@@ -116,7 +117,7 @@ static bool aplic_read_pending(struct aplic *aplic, u32 irq)
|
|||||||
|
|
||||||
if (!irq || aplic->nr_irqs <= irq)
|
if (!irq || aplic->nr_irqs <= irq)
|
||||||
return false;
|
return false;
|
||||||
irqd = &aplic->irqs[irq];
|
irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&irqd->lock, flags);
|
raw_spin_lock_irqsave(&irqd->lock, flags);
|
||||||
ret = (irqd->state & APLIC_IRQ_STATE_PENDING) ? true : false;
|
ret = (irqd->state & APLIC_IRQ_STATE_PENDING) ? true : false;
|
||||||
@@ -132,7 +133,7 @@ static void aplic_write_pending(struct aplic *aplic, u32 irq, bool pending)
|
|||||||
|
|
||||||
if (!irq || aplic->nr_irqs <= irq)
|
if (!irq || aplic->nr_irqs <= irq)
|
||||||
return;
|
return;
|
||||||
irqd = &aplic->irqs[irq];
|
irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&irqd->lock, flags);
|
raw_spin_lock_irqsave(&irqd->lock, flags);
|
||||||
|
|
||||||
@@ -170,7 +171,7 @@ static bool aplic_read_enabled(struct aplic *aplic, u32 irq)
|
|||||||
|
|
||||||
if (!irq || aplic->nr_irqs <= irq)
|
if (!irq || aplic->nr_irqs <= irq)
|
||||||
return false;
|
return false;
|
||||||
irqd = &aplic->irqs[irq];
|
irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&irqd->lock, flags);
|
raw_spin_lock_irqsave(&irqd->lock, flags);
|
||||||
ret = (irqd->state & APLIC_IRQ_STATE_ENABLED) ? true : false;
|
ret = (irqd->state & APLIC_IRQ_STATE_ENABLED) ? true : false;
|
||||||
@@ -186,7 +187,7 @@ static void aplic_write_enabled(struct aplic *aplic, u32 irq, bool enabled)
|
|||||||
|
|
||||||
if (!irq || aplic->nr_irqs <= irq)
|
if (!irq || aplic->nr_irqs <= irq)
|
||||||
return;
|
return;
|
||||||
irqd = &aplic->irqs[irq];
|
irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&irqd->lock, flags);
|
raw_spin_lock_irqsave(&irqd->lock, flags);
|
||||||
if (enabled)
|
if (enabled)
|
||||||
@@ -205,7 +206,7 @@ static bool aplic_read_input(struct aplic *aplic, u32 irq)
|
|||||||
|
|
||||||
if (!irq || aplic->nr_irqs <= irq)
|
if (!irq || aplic->nr_irqs <= irq)
|
||||||
return false;
|
return false;
|
||||||
irqd = &aplic->irqs[irq];
|
irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&irqd->lock, flags);
|
raw_spin_lock_irqsave(&irqd->lock, flags);
|
||||||
|
|
||||||
@@ -254,7 +255,7 @@ static void aplic_update_irq_range(struct kvm *kvm, u32 first, u32 last)
|
|||||||
for (irq = first; irq <= last; irq++) {
|
for (irq = first; irq <= last; irq++) {
|
||||||
if (!irq || aplic->nr_irqs <= irq)
|
if (!irq || aplic->nr_irqs <= irq)
|
||||||
continue;
|
continue;
|
||||||
irqd = &aplic->irqs[irq];
|
irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&irqd->lock, flags);
|
raw_spin_lock_irqsave(&irqd->lock, flags);
|
||||||
|
|
||||||
@@ -283,7 +284,7 @@ int kvm_riscv_aia_aplic_inject(struct kvm *kvm, u32 source, bool level)
|
|||||||
|
|
||||||
if (!aplic || !source || (aplic->nr_irqs <= source))
|
if (!aplic || !source || (aplic->nr_irqs <= source))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
irqd = &aplic->irqs[source];
|
irqd = &aplic->irqs[array_index_nospec(source, aplic->nr_irqs)];
|
||||||
ie = (aplic->domaincfg & APLIC_DOMAINCFG_IE) ? true : false;
|
ie = (aplic->domaincfg & APLIC_DOMAINCFG_IE) ? true : false;
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&irqd->lock, flags);
|
raw_spin_lock_irqsave(&irqd->lock, flags);
|
||||||
|
|||||||
@@ -11,6 +11,7 @@
|
|||||||
#include <linux/irqchip/riscv-imsic.h>
|
#include <linux/irqchip/riscv-imsic.h>
|
||||||
#include <linux/kvm_host.h>
|
#include <linux/kvm_host.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
|
#include <linux/cpufeature.h>
|
||||||
|
|
||||||
static int aia_create(struct kvm_device *dev, u32 type)
|
static int aia_create(struct kvm_device *dev, u32 type)
|
||||||
{
|
{
|
||||||
@@ -22,6 +23,9 @@ static int aia_create(struct kvm_device *dev, u32 type)
|
|||||||
if (irqchip_in_kernel(kvm))
|
if (irqchip_in_kernel(kvm))
|
||||||
return -EEXIST;
|
return -EEXIST;
|
||||||
|
|
||||||
|
if (!riscv_isa_extension_available(NULL, SSAIA))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
if (kvm_trylock_all_vcpus(kvm))
|
if (kvm_trylock_all_vcpus(kvm))
|
||||||
return ret;
|
return ret;
|
||||||
@@ -437,7 +441,7 @@ static int aia_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
|
|||||||
|
|
||||||
static int aia_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
|
static int aia_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
|
||||||
{
|
{
|
||||||
int nr_vcpus;
|
int nr_vcpus, r = -ENXIO;
|
||||||
|
|
||||||
switch (attr->group) {
|
switch (attr->group) {
|
||||||
case KVM_DEV_RISCV_AIA_GRP_CONFIG:
|
case KVM_DEV_RISCV_AIA_GRP_CONFIG:
|
||||||
@@ -466,12 +470,18 @@ static int aia_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case KVM_DEV_RISCV_AIA_GRP_APLIC:
|
case KVM_DEV_RISCV_AIA_GRP_APLIC:
|
||||||
return kvm_riscv_aia_aplic_has_attr(dev->kvm, attr->attr);
|
mutex_lock(&dev->kvm->lock);
|
||||||
|
r = kvm_riscv_aia_aplic_has_attr(dev->kvm, attr->attr);
|
||||||
|
mutex_unlock(&dev->kvm->lock);
|
||||||
|
break;
|
||||||
case KVM_DEV_RISCV_AIA_GRP_IMSIC:
|
case KVM_DEV_RISCV_AIA_GRP_IMSIC:
|
||||||
return kvm_riscv_aia_imsic_has_attr(dev->kvm, attr->attr);
|
mutex_lock(&dev->kvm->lock);
|
||||||
|
r = kvm_riscv_aia_imsic_has_attr(dev->kvm, attr->attr);
|
||||||
|
mutex_unlock(&dev->kvm->lock);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
return -ENXIO;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct kvm_device_ops kvm_riscv_aia_device_ops = {
|
struct kvm_device_ops kvm_riscv_aia_device_ops = {
|
||||||
|
|||||||
@@ -908,6 +908,10 @@ int kvm_riscv_vcpu_aia_imsic_rmw(struct kvm_vcpu *vcpu, unsigned long isel,
|
|||||||
int r, rc = KVM_INSN_CONTINUE_NEXT_SEPC;
|
int r, rc = KVM_INSN_CONTINUE_NEXT_SEPC;
|
||||||
struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
|
struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
|
||||||
|
|
||||||
|
/* If IMSIC vCPU state not initialized then forward to user space */
|
||||||
|
if (!imsic)
|
||||||
|
return KVM_INSN_EXIT_TO_USER_SPACE;
|
||||||
|
|
||||||
if (isel == KVM_RISCV_AIA_IMSIC_TOPEI) {
|
if (isel == KVM_RISCV_AIA_IMSIC_TOPEI) {
|
||||||
/* Read pending and enabled interrupt with highest priority */
|
/* Read pending and enabled interrupt with highest priority */
|
||||||
topei = imsic_mrif_topei(imsic->swfile, imsic->nr_eix,
|
topei = imsic_mrif_topei(imsic->swfile, imsic->nr_eix,
|
||||||
|
|||||||
@@ -245,6 +245,7 @@ out:
|
|||||||
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
|
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
|
||||||
{
|
{
|
||||||
struct kvm_gstage gstage;
|
struct kvm_gstage gstage;
|
||||||
|
bool mmu_locked;
|
||||||
|
|
||||||
if (!kvm->arch.pgd)
|
if (!kvm->arch.pgd)
|
||||||
return false;
|
return false;
|
||||||
@@ -253,9 +254,12 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
|
|||||||
gstage.flags = 0;
|
gstage.flags = 0;
|
||||||
gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
|
gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
|
||||||
gstage.pgd = kvm->arch.pgd;
|
gstage.pgd = kvm->arch.pgd;
|
||||||
|
mmu_locked = spin_trylock(&kvm->mmu_lock);
|
||||||
kvm_riscv_gstage_unmap_range(&gstage, range->start << PAGE_SHIFT,
|
kvm_riscv_gstage_unmap_range(&gstage, range->start << PAGE_SHIFT,
|
||||||
(range->end - range->start) << PAGE_SHIFT,
|
(range->end - range->start) << PAGE_SHIFT,
|
||||||
range->may_block);
|
range->may_block);
|
||||||
|
if (mmu_locked)
|
||||||
|
spin_unlock(&kvm->mmu_lock);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -535,7 +539,7 @@ int kvm_riscv_mmu_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
|
|||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
/* Check if we are backed by a THP and thus use block mapping if possible */
|
/* Check if we are backed by a THP and thus use block mapping if possible */
|
||||||
if (vma_pagesize == PAGE_SIZE)
|
if (!logging && (vma_pagesize == PAGE_SIZE))
|
||||||
vma_pagesize = transparent_hugepage_adjust(kvm, memslot, hva, &hfn, &gpa);
|
vma_pagesize = transparent_hugepage_adjust(kvm, memslot, hva, &hfn, &gpa);
|
||||||
|
|
||||||
if (writable) {
|
if (writable) {
|
||||||
|
|||||||
@@ -10,6 +10,7 @@
|
|||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/kvm_host.h>
|
#include <linux/kvm_host.h>
|
||||||
|
#include <linux/nospec.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <asm/cpufeature.h>
|
#include <asm/cpufeature.h>
|
||||||
|
|
||||||
@@ -93,9 +94,11 @@ int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
|
|||||||
if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
|
if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
|
||||||
reg_val = &cntx->fp.f.fcsr;
|
reg_val = &cntx->fp.f.fcsr;
|
||||||
else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
|
else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
|
||||||
reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
|
reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) {
|
||||||
|
reg_num = array_index_nospec(reg_num,
|
||||||
|
ARRAY_SIZE(cntx->fp.f.f));
|
||||||
reg_val = &cntx->fp.f.f[reg_num];
|
reg_val = &cntx->fp.f.f[reg_num];
|
||||||
else
|
} else
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
} else if ((rtype == KVM_REG_RISCV_FP_D) &&
|
} else if ((rtype == KVM_REG_RISCV_FP_D) &&
|
||||||
riscv_isa_extension_available(vcpu->arch.isa, d)) {
|
riscv_isa_extension_available(vcpu->arch.isa, d)) {
|
||||||
@@ -107,6 +110,8 @@ int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
|
|||||||
reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
|
reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
|
||||||
if (KVM_REG_SIZE(reg->id) != sizeof(u64))
|
if (KVM_REG_SIZE(reg->id) != sizeof(u64))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
reg_num = array_index_nospec(reg_num,
|
||||||
|
ARRAY_SIZE(cntx->fp.d.f));
|
||||||
reg_val = &cntx->fp.d.f[reg_num];
|
reg_val = &cntx->fp.d.f[reg_num];
|
||||||
} else
|
} else
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
@@ -138,9 +143,11 @@ int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
|
|||||||
if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
|
if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
|
||||||
reg_val = &cntx->fp.f.fcsr;
|
reg_val = &cntx->fp.f.fcsr;
|
||||||
else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
|
else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
|
||||||
reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
|
reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) {
|
||||||
|
reg_num = array_index_nospec(reg_num,
|
||||||
|
ARRAY_SIZE(cntx->fp.f.f));
|
||||||
reg_val = &cntx->fp.f.f[reg_num];
|
reg_val = &cntx->fp.f.f[reg_num];
|
||||||
else
|
} else
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
} else if ((rtype == KVM_REG_RISCV_FP_D) &&
|
} else if ((rtype == KVM_REG_RISCV_FP_D) &&
|
||||||
riscv_isa_extension_available(vcpu->arch.isa, d)) {
|
riscv_isa_extension_available(vcpu->arch.isa, d)) {
|
||||||
@@ -152,6 +159,8 @@ int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
|
|||||||
reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
|
reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
|
||||||
if (KVM_REG_SIZE(reg->id) != sizeof(u64))
|
if (KVM_REG_SIZE(reg->id) != sizeof(u64))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
reg_num = array_index_nospec(reg_num,
|
||||||
|
ARRAY_SIZE(cntx->fp.d.f));
|
||||||
reg_val = &cntx->fp.d.f[reg_num];
|
reg_val = &cntx->fp.d.f[reg_num];
|
||||||
} else
|
} else
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|||||||
@@ -10,6 +10,7 @@
|
|||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
|
#include <linux/nospec.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <linux/kvm_host.h>
|
#include <linux/kvm_host.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
@@ -127,6 +128,7 @@ static int kvm_riscv_vcpu_isa_check_host(unsigned long kvm_ext, unsigned long *g
|
|||||||
kvm_ext >= ARRAY_SIZE(kvm_isa_ext_arr))
|
kvm_ext >= ARRAY_SIZE(kvm_isa_ext_arr))
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
|
kvm_ext = array_index_nospec(kvm_ext, ARRAY_SIZE(kvm_isa_ext_arr));
|
||||||
*guest_ext = kvm_isa_ext_arr[kvm_ext];
|
*guest_ext = kvm_isa_ext_arr[kvm_ext];
|
||||||
switch (*guest_ext) {
|
switch (*guest_ext) {
|
||||||
case RISCV_ISA_EXT_SMNPM:
|
case RISCV_ISA_EXT_SMNPM:
|
||||||
@@ -443,13 +445,16 @@ static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
|
|||||||
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
|
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
|
||||||
KVM_REG_SIZE_MASK |
|
KVM_REG_SIZE_MASK |
|
||||||
KVM_REG_RISCV_CORE);
|
KVM_REG_RISCV_CORE);
|
||||||
|
unsigned long regs_max = sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
|
||||||
unsigned long reg_val;
|
unsigned long reg_val;
|
||||||
|
|
||||||
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
|
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
|
if (reg_num >= regs_max)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
|
reg_num = array_index_nospec(reg_num, regs_max);
|
||||||
|
|
||||||
if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
|
if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
|
||||||
reg_val = cntx->sepc;
|
reg_val = cntx->sepc;
|
||||||
else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
|
else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
|
||||||
@@ -476,13 +481,16 @@ static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
|
|||||||
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
|
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
|
||||||
KVM_REG_SIZE_MASK |
|
KVM_REG_SIZE_MASK |
|
||||||
KVM_REG_RISCV_CORE);
|
KVM_REG_RISCV_CORE);
|
||||||
|
unsigned long regs_max = sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
|
||||||
unsigned long reg_val;
|
unsigned long reg_val;
|
||||||
|
|
||||||
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
|
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
|
if (reg_num >= regs_max)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
|
reg_num = array_index_nospec(reg_num, regs_max);
|
||||||
|
|
||||||
if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
|
if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
@@ -507,10 +515,13 @@ static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
|
|||||||
unsigned long *out_val)
|
unsigned long *out_val)
|
||||||
{
|
{
|
||||||
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
|
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
|
||||||
|
unsigned long regs_max = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
|
||||||
|
|
||||||
if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
|
if (reg_num >= regs_max)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
|
reg_num = array_index_nospec(reg_num, regs_max);
|
||||||
|
|
||||||
if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
|
if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
|
||||||
kvm_riscv_vcpu_flush_interrupts(vcpu);
|
kvm_riscv_vcpu_flush_interrupts(vcpu);
|
||||||
*out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
|
*out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
|
||||||
@@ -526,10 +537,13 @@ static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
|
|||||||
unsigned long reg_val)
|
unsigned long reg_val)
|
||||||
{
|
{
|
||||||
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
|
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
|
||||||
|
unsigned long regs_max = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
|
||||||
|
|
||||||
if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
|
if (reg_num >= regs_max)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
|
reg_num = array_index_nospec(reg_num, regs_max);
|
||||||
|
|
||||||
if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
|
if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
|
||||||
reg_val &= VSIP_VALID_MASK;
|
reg_val &= VSIP_VALID_MASK;
|
||||||
reg_val <<= VSIP_TO_HVIP_SHIFT;
|
reg_val <<= VSIP_TO_HVIP_SHIFT;
|
||||||
@@ -548,10 +562,15 @@ static inline int kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu *vcpu,
|
|||||||
unsigned long reg_val)
|
unsigned long reg_val)
|
||||||
{
|
{
|
||||||
struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
|
struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
|
||||||
|
unsigned long regs_max = sizeof(struct kvm_riscv_smstateen_csr) /
|
||||||
|
sizeof(unsigned long);
|
||||||
|
|
||||||
if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
|
if (!riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
|
||||||
sizeof(unsigned long))
|
return -ENOENT;
|
||||||
return -EINVAL;
|
if (reg_num >= regs_max)
|
||||||
|
return -ENOENT;
|
||||||
|
|
||||||
|
reg_num = array_index_nospec(reg_num, regs_max);
|
||||||
|
|
||||||
((unsigned long *)csr)[reg_num] = reg_val;
|
((unsigned long *)csr)[reg_num] = reg_val;
|
||||||
return 0;
|
return 0;
|
||||||
@@ -562,10 +581,15 @@ static int kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu *vcpu,
|
|||||||
unsigned long *out_val)
|
unsigned long *out_val)
|
||||||
{
|
{
|
||||||
struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
|
struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
|
||||||
|
unsigned long regs_max = sizeof(struct kvm_riscv_smstateen_csr) /
|
||||||
|
sizeof(unsigned long);
|
||||||
|
|
||||||
if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
|
if (!riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
|
||||||
sizeof(unsigned long))
|
return -ENOENT;
|
||||||
return -EINVAL;
|
if (reg_num >= regs_max)
|
||||||
|
return -ENOENT;
|
||||||
|
|
||||||
|
reg_num = array_index_nospec(reg_num, regs_max);
|
||||||
|
|
||||||
*out_val = ((unsigned long *)csr)[reg_num];
|
*out_val = ((unsigned long *)csr)[reg_num];
|
||||||
return 0;
|
return 0;
|
||||||
@@ -595,10 +619,7 @@ static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
|
|||||||
rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, ®_val);
|
rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, ®_val);
|
||||||
break;
|
break;
|
||||||
case KVM_REG_RISCV_CSR_SMSTATEEN:
|
case KVM_REG_RISCV_CSR_SMSTATEEN:
|
||||||
rc = -EINVAL;
|
rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num, ®_val);
|
||||||
if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
|
|
||||||
rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num,
|
|
||||||
®_val);
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
rc = -ENOENT;
|
rc = -ENOENT;
|
||||||
@@ -640,10 +661,7 @@ static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
|
|||||||
rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
|
rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
|
||||||
break;
|
break;
|
||||||
case KVM_REG_RISCV_CSR_SMSTATEEN:
|
case KVM_REG_RISCV_CSR_SMSTATEEN:
|
||||||
rc = -EINVAL;
|
rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num, reg_val);
|
||||||
if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
|
|
||||||
rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num,
|
|
||||||
reg_val);
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
rc = -ENOENT;
|
rc = -ENOENT;
|
||||||
|
|||||||
@@ -10,6 +10,7 @@
|
|||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/kvm_host.h>
|
#include <linux/kvm_host.h>
|
||||||
|
#include <linux/nospec.h>
|
||||||
#include <linux/perf/riscv_pmu.h>
|
#include <linux/perf/riscv_pmu.h>
|
||||||
#include <asm/csr.h>
|
#include <asm/csr.h>
|
||||||
#include <asm/kvm_vcpu_sbi.h>
|
#include <asm/kvm_vcpu_sbi.h>
|
||||||
@@ -87,7 +88,8 @@ static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
|
|||||||
|
|
||||||
static u64 kvm_pmu_get_perf_event_hw_config(u32 sbi_event_code)
|
static u64 kvm_pmu_get_perf_event_hw_config(u32 sbi_event_code)
|
||||||
{
|
{
|
||||||
return hw_event_perf_map[sbi_event_code];
|
return hw_event_perf_map[array_index_nospec(sbi_event_code,
|
||||||
|
SBI_PMU_HW_GENERAL_MAX)];
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 kvm_pmu_get_perf_event_cache_config(u32 sbi_event_code)
|
static u64 kvm_pmu_get_perf_event_cache_config(u32 sbi_event_code)
|
||||||
@@ -218,6 +220,7 @@ static int pmu_fw_ctr_read_hi(struct kvm_vcpu *vcpu, unsigned long cidx,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cidx = array_index_nospec(cidx, RISCV_KVM_MAX_COUNTERS);
|
||||||
pmc = &kvpmu->pmc[cidx];
|
pmc = &kvpmu->pmc[cidx];
|
||||||
|
|
||||||
if (pmc->cinfo.type != SBI_PMU_CTR_TYPE_FW)
|
if (pmc->cinfo.type != SBI_PMU_CTR_TYPE_FW)
|
||||||
@@ -244,6 +247,7 @@ static int pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cidx = array_index_nospec(cidx, RISCV_KVM_MAX_COUNTERS);
|
||||||
pmc = &kvpmu->pmc[cidx];
|
pmc = &kvpmu->pmc[cidx];
|
||||||
|
|
||||||
if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) {
|
if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) {
|
||||||
@@ -520,11 +524,12 @@ int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx,
|
|||||||
{
|
{
|
||||||
struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
|
struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
|
||||||
|
|
||||||
if (cidx > RISCV_KVM_MAX_COUNTERS || cidx == 1) {
|
if (cidx >= RISCV_KVM_MAX_COUNTERS || cidx == 1) {
|
||||||
retdata->err_val = SBI_ERR_INVALID_PARAM;
|
retdata->err_val = SBI_ERR_INVALID_PARAM;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cidx = array_index_nospec(cidx, RISCV_KVM_MAX_COUNTERS);
|
||||||
retdata->out_val = kvpmu->pmc[cidx].cinfo.value;
|
retdata->out_val = kvpmu->pmc[cidx].cinfo.value;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -559,7 +564,8 @@ int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base,
|
|||||||
}
|
}
|
||||||
/* Start the counters that have been configured and requested by the guest */
|
/* Start the counters that have been configured and requested by the guest */
|
||||||
for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) {
|
for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) {
|
||||||
pmc_index = i + ctr_base;
|
pmc_index = array_index_nospec(i + ctr_base,
|
||||||
|
RISCV_KVM_MAX_COUNTERS);
|
||||||
if (!test_bit(pmc_index, kvpmu->pmc_in_use))
|
if (!test_bit(pmc_index, kvpmu->pmc_in_use))
|
||||||
continue;
|
continue;
|
||||||
/* The guest started the counter again. Reset the overflow status */
|
/* The guest started the counter again. Reset the overflow status */
|
||||||
@@ -630,7 +636,8 @@ int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base,
|
|||||||
|
|
||||||
/* Stop the counters that have been configured and requested by the guest */
|
/* Stop the counters that have been configured and requested by the guest */
|
||||||
for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) {
|
for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) {
|
||||||
pmc_index = i + ctr_base;
|
pmc_index = array_index_nospec(i + ctr_base,
|
||||||
|
RISCV_KVM_MAX_COUNTERS);
|
||||||
if (!test_bit(pmc_index, kvpmu->pmc_in_use))
|
if (!test_bit(pmc_index, kvpmu->pmc_in_use))
|
||||||
continue;
|
continue;
|
||||||
pmc = &kvpmu->pmc[pmc_index];
|
pmc = &kvpmu->pmc[pmc_index];
|
||||||
@@ -761,6 +768,7 @@ int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_ba
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctr_idx = array_index_nospec(ctr_idx, RISCV_KVM_MAX_COUNTERS);
|
||||||
pmc = &kvpmu->pmc[ctr_idx];
|
pmc = &kvpmu->pmc[ctr_idx];
|
||||||
pmc->idx = ctr_idx;
|
pmc->idx = ctr_idx;
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user