diff --git a/arch/riscv/kvm/aia.c b/arch/riscv/kvm/aia.c index cac3c2b51d72..5ec503288555 100644 --- a/arch/riscv/kvm/aia.c +++ b/arch/riscv/kvm/aia.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -182,9 +183,14 @@ int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu, unsigned long *out_val) { struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; + unsigned long regs_max = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long); - if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long)) + if (!riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) return -ENOENT; + if (reg_num >= regs_max) + return -ENOENT; + + reg_num = array_index_nospec(reg_num, regs_max); *out_val = 0; if (kvm_riscv_aia_available()) @@ -198,9 +204,14 @@ int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu *vcpu, unsigned long val) { struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; + unsigned long regs_max = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long); - if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long)) + if (!riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) return -ENOENT; + if (reg_num >= regs_max) + return -ENOENT; + + reg_num = array_index_nospec(reg_num, regs_max); if (kvm_riscv_aia_available()) { ((unsigned long *)csr)[reg_num] = val; diff --git a/arch/riscv/kvm/aia_aplic.c b/arch/riscv/kvm/aia_aplic.c index d1e50bf5c351..3464f3351df7 100644 --- a/arch/riscv/kvm/aia_aplic.c +++ b/arch/riscv/kvm/aia_aplic.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -45,7 +46,7 @@ static u32 aplic_read_sourcecfg(struct aplic *aplic, u32 irq) if (!irq || aplic->nr_irqs <= irq) return 0; - irqd = &aplic->irqs[irq]; + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; raw_spin_lock_irqsave(&irqd->lock, flags); ret = irqd->sourcecfg; @@ -61,7 +62,7 @@ static void aplic_write_sourcecfg(struct aplic *aplic, u32 irq, u32 val) if (!irq || aplic->nr_irqs <= irq) return; - irqd = &aplic->irqs[irq]; + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; if (val & APLIC_SOURCECFG_D) val = 0; @@ -81,7 +82,7 @@ static u32 aplic_read_target(struct aplic *aplic, u32 irq) if (!irq || aplic->nr_irqs <= irq) return 0; - irqd = &aplic->irqs[irq]; + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; raw_spin_lock_irqsave(&irqd->lock, flags); ret = irqd->target; @@ -97,7 +98,7 @@ static void aplic_write_target(struct aplic *aplic, u32 irq, u32 val) if (!irq || aplic->nr_irqs <= irq) return; - irqd = &aplic->irqs[irq]; + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; val &= APLIC_TARGET_EIID_MASK | (APLIC_TARGET_HART_IDX_MASK << APLIC_TARGET_HART_IDX_SHIFT) | @@ -116,7 +117,7 @@ static bool aplic_read_pending(struct aplic *aplic, u32 irq) if (!irq || aplic->nr_irqs <= irq) return false; - irqd = &aplic->irqs[irq]; + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; raw_spin_lock_irqsave(&irqd->lock, flags); ret = (irqd->state & APLIC_IRQ_STATE_PENDING) ? true : false; @@ -132,7 +133,7 @@ static void aplic_write_pending(struct aplic *aplic, u32 irq, bool pending) if (!irq || aplic->nr_irqs <= irq) return; - irqd = &aplic->irqs[irq]; + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; raw_spin_lock_irqsave(&irqd->lock, flags); @@ -170,7 +171,7 @@ static bool aplic_read_enabled(struct aplic *aplic, u32 irq) if (!irq || aplic->nr_irqs <= irq) return false; - irqd = &aplic->irqs[irq]; + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; raw_spin_lock_irqsave(&irqd->lock, flags); ret = (irqd->state & APLIC_IRQ_STATE_ENABLED) ? true : false; @@ -186,7 +187,7 @@ static void aplic_write_enabled(struct aplic *aplic, u32 irq, bool enabled) if (!irq || aplic->nr_irqs <= irq) return; - irqd = &aplic->irqs[irq]; + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; raw_spin_lock_irqsave(&irqd->lock, flags); if (enabled) @@ -205,7 +206,7 @@ static bool aplic_read_input(struct aplic *aplic, u32 irq) if (!irq || aplic->nr_irqs <= irq) return false; - irqd = &aplic->irqs[irq]; + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; raw_spin_lock_irqsave(&irqd->lock, flags); @@ -254,7 +255,7 @@ static void aplic_update_irq_range(struct kvm *kvm, u32 first, u32 last) for (irq = first; irq <= last; irq++) { if (!irq || aplic->nr_irqs <= irq) continue; - irqd = &aplic->irqs[irq]; + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; raw_spin_lock_irqsave(&irqd->lock, flags); @@ -283,7 +284,7 @@ int kvm_riscv_aia_aplic_inject(struct kvm *kvm, u32 source, bool level) if (!aplic || !source || (aplic->nr_irqs <= source)) return -ENODEV; - irqd = &aplic->irqs[source]; + irqd = &aplic->irqs[array_index_nospec(source, aplic->nr_irqs)]; ie = (aplic->domaincfg & APLIC_DOMAINCFG_IE) ? true : false; raw_spin_lock_irqsave(&irqd->lock, flags); diff --git a/arch/riscv/kvm/aia_device.c b/arch/riscv/kvm/aia_device.c index b195a93add1c..49c71d3cdb00 100644 --- a/arch/riscv/kvm/aia_device.c +++ b/arch/riscv/kvm/aia_device.c @@ -11,6 +11,7 @@ #include #include #include +#include static int aia_create(struct kvm_device *dev, u32 type) { @@ -22,6 +23,9 @@ static int aia_create(struct kvm_device *dev, u32 type) if (irqchip_in_kernel(kvm)) return -EEXIST; + if (!riscv_isa_extension_available(NULL, SSAIA)) + return -ENODEV; + ret = -EBUSY; if (kvm_trylock_all_vcpus(kvm)) return ret; @@ -437,7 +441,7 @@ static int aia_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) static int aia_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { - int nr_vcpus; + int nr_vcpus, r = -ENXIO; switch (attr->group) { case KVM_DEV_RISCV_AIA_GRP_CONFIG: @@ -466,12 +470,18 @@ static int aia_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) } break; case KVM_DEV_RISCV_AIA_GRP_APLIC: - return kvm_riscv_aia_aplic_has_attr(dev->kvm, attr->attr); + mutex_lock(&dev->kvm->lock); + r = kvm_riscv_aia_aplic_has_attr(dev->kvm, attr->attr); + mutex_unlock(&dev->kvm->lock); + break; case KVM_DEV_RISCV_AIA_GRP_IMSIC: - return kvm_riscv_aia_imsic_has_attr(dev->kvm, attr->attr); + mutex_lock(&dev->kvm->lock); + r = kvm_riscv_aia_imsic_has_attr(dev->kvm, attr->attr); + mutex_unlock(&dev->kvm->lock); + break; } - return -ENXIO; + return r; } struct kvm_device_ops kvm_riscv_aia_device_ops = { diff --git a/arch/riscv/kvm/aia_imsic.c b/arch/riscv/kvm/aia_imsic.c index 06752fa24798..8786f52cf65a 100644 --- a/arch/riscv/kvm/aia_imsic.c +++ b/arch/riscv/kvm/aia_imsic.c @@ -908,6 +908,10 @@ int kvm_riscv_vcpu_aia_imsic_rmw(struct kvm_vcpu *vcpu, unsigned long isel, int r, rc = KVM_INSN_CONTINUE_NEXT_SEPC; struct imsic *imsic = vcpu->arch.aia_context.imsic_state; + /* If IMSIC vCPU state not initialized then forward to user space */ + if (!imsic) + return KVM_INSN_EXIT_TO_USER_SPACE; + if (isel == KVM_RISCV_AIA_IMSIC_TOPEI) { /* Read pending and enabled interrupt with highest priority */ topei = imsic_mrif_topei(imsic->swfile, imsic->nr_eix, diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index 0b75eb2a1820..088d33ba90ed 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -245,6 +245,7 @@ out: bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) { struct kvm_gstage gstage; + bool mmu_locked; if (!kvm->arch.pgd) return false; @@ -253,9 +254,12 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) gstage.flags = 0; gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid); gstage.pgd = kvm->arch.pgd; + mmu_locked = spin_trylock(&kvm->mmu_lock); kvm_riscv_gstage_unmap_range(&gstage, range->start << PAGE_SHIFT, (range->end - range->start) << PAGE_SHIFT, range->may_block); + if (mmu_locked) + spin_unlock(&kvm->mmu_lock); return false; } @@ -535,7 +539,7 @@ int kvm_riscv_mmu_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, goto out_unlock; /* Check if we are backed by a THP and thus use block mapping if possible */ - if (vma_pagesize == PAGE_SIZE) + if (!logging && (vma_pagesize == PAGE_SIZE)) vma_pagesize = transparent_hugepage_adjust(kvm, memslot, hva, &hfn, &gpa); if (writable) { diff --git a/arch/riscv/kvm/vcpu_fp.c b/arch/riscv/kvm/vcpu_fp.c index 030904d82b58..bd5a9e7e7165 100644 --- a/arch/riscv/kvm/vcpu_fp.c +++ b/arch/riscv/kvm/vcpu_fp.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -93,9 +94,11 @@ int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu, if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr)) reg_val = &cntx->fp.f.fcsr; else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) && - reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) + reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) { + reg_num = array_index_nospec(reg_num, + ARRAY_SIZE(cntx->fp.f.f)); reg_val = &cntx->fp.f.f[reg_num]; - else + } else return -ENOENT; } else if ((rtype == KVM_REG_RISCV_FP_D) && riscv_isa_extension_available(vcpu->arch.isa, d)) { @@ -107,6 +110,8 @@ int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu, reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) { if (KVM_REG_SIZE(reg->id) != sizeof(u64)) return -EINVAL; + reg_num = array_index_nospec(reg_num, + ARRAY_SIZE(cntx->fp.d.f)); reg_val = &cntx->fp.d.f[reg_num]; } else return -ENOENT; @@ -138,9 +143,11 @@ int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu, if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr)) reg_val = &cntx->fp.f.fcsr; else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) && - reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) + reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) { + reg_num = array_index_nospec(reg_num, + ARRAY_SIZE(cntx->fp.f.f)); reg_val = &cntx->fp.f.f[reg_num]; - else + } else return -ENOENT; } else if ((rtype == KVM_REG_RISCV_FP_D) && riscv_isa_extension_available(vcpu->arch.isa, d)) { @@ -152,6 +159,8 @@ int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu, reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) { if (KVM_REG_SIZE(reg->id) != sizeof(u64)) return -EINVAL; + reg_num = array_index_nospec(reg_num, + ARRAY_SIZE(cntx->fp.d.f)); reg_val = &cntx->fp.d.f[reg_num]; } else return -ENOENT; diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c index e7ab6cb00646..45ecc0082e90 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -127,6 +128,7 @@ static int kvm_riscv_vcpu_isa_check_host(unsigned long kvm_ext, unsigned long *g kvm_ext >= ARRAY_SIZE(kvm_isa_ext_arr)) return -ENOENT; + kvm_ext = array_index_nospec(kvm_ext, ARRAY_SIZE(kvm_isa_ext_arr)); *guest_ext = kvm_isa_ext_arr[kvm_ext]; switch (*guest_ext) { case RISCV_ISA_EXT_SMNPM: @@ -443,13 +445,16 @@ static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu, unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_RISCV_CORE); + unsigned long regs_max = sizeof(struct kvm_riscv_core) / sizeof(unsigned long); unsigned long reg_val; if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) return -EINVAL; - if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long)) + if (reg_num >= regs_max) return -ENOENT; + reg_num = array_index_nospec(reg_num, regs_max); + if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc)) reg_val = cntx->sepc; else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num && @@ -476,13 +481,16 @@ static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu, unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_RISCV_CORE); + unsigned long regs_max = sizeof(struct kvm_riscv_core) / sizeof(unsigned long); unsigned long reg_val; if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) return -EINVAL; - if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long)) + if (reg_num >= regs_max) return -ENOENT; + reg_num = array_index_nospec(reg_num, regs_max); + if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) return -EFAULT; @@ -507,10 +515,13 @@ static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu, unsigned long *out_val) { struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; + unsigned long regs_max = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long); - if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long)) + if (reg_num >= regs_max) return -ENOENT; + reg_num = array_index_nospec(reg_num, regs_max); + if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) { kvm_riscv_vcpu_flush_interrupts(vcpu); *out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK; @@ -526,10 +537,13 @@ static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu, unsigned long reg_val) { struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; + unsigned long regs_max = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long); - if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long)) + if (reg_num >= regs_max) return -ENOENT; + reg_num = array_index_nospec(reg_num, regs_max); + if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) { reg_val &= VSIP_VALID_MASK; reg_val <<= VSIP_TO_HVIP_SHIFT; @@ -548,10 +562,15 @@ static inline int kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu *vcpu, unsigned long reg_val) { struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr; + unsigned long regs_max = sizeof(struct kvm_riscv_smstateen_csr) / + sizeof(unsigned long); - if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) / - sizeof(unsigned long)) - return -EINVAL; + if (!riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) + return -ENOENT; + if (reg_num >= regs_max) + return -ENOENT; + + reg_num = array_index_nospec(reg_num, regs_max); ((unsigned long *)csr)[reg_num] = reg_val; return 0; @@ -562,10 +581,15 @@ static int kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu *vcpu, unsigned long *out_val) { struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr; + unsigned long regs_max = sizeof(struct kvm_riscv_smstateen_csr) / + sizeof(unsigned long); - if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) / - sizeof(unsigned long)) - return -EINVAL; + if (!riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) + return -ENOENT; + if (reg_num >= regs_max) + return -ENOENT; + + reg_num = array_index_nospec(reg_num, regs_max); *out_val = ((unsigned long *)csr)[reg_num]; return 0; @@ -595,10 +619,7 @@ static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu, rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, ®_val); break; case KVM_REG_RISCV_CSR_SMSTATEEN: - rc = -EINVAL; - if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) - rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num, - ®_val); + rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num, ®_val); break; default: rc = -ENOENT; @@ -640,10 +661,7 @@ static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu, rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val); break; case KVM_REG_RISCV_CSR_SMSTATEEN: - rc = -EINVAL; - if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) - rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num, - reg_val); + rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num, reg_val); break; default: rc = -ENOENT; diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c index 4d8d5e9aa53d..e873430e596b 100644 --- a/arch/riscv/kvm/vcpu_pmu.c +++ b/arch/riscv/kvm/vcpu_pmu.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -87,7 +88,8 @@ static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc) static u64 kvm_pmu_get_perf_event_hw_config(u32 sbi_event_code) { - return hw_event_perf_map[sbi_event_code]; + return hw_event_perf_map[array_index_nospec(sbi_event_code, + SBI_PMU_HW_GENERAL_MAX)]; } static u64 kvm_pmu_get_perf_event_cache_config(u32 sbi_event_code) @@ -218,6 +220,7 @@ static int pmu_fw_ctr_read_hi(struct kvm_vcpu *vcpu, unsigned long cidx, return -EINVAL; } + cidx = array_index_nospec(cidx, RISCV_KVM_MAX_COUNTERS); pmc = &kvpmu->pmc[cidx]; if (pmc->cinfo.type != SBI_PMU_CTR_TYPE_FW) @@ -244,6 +247,7 @@ static int pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx, return -EINVAL; } + cidx = array_index_nospec(cidx, RISCV_KVM_MAX_COUNTERS); pmc = &kvpmu->pmc[cidx]; if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { @@ -520,11 +524,12 @@ int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx, { struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); - if (cidx > RISCV_KVM_MAX_COUNTERS || cidx == 1) { + if (cidx >= RISCV_KVM_MAX_COUNTERS || cidx == 1) { retdata->err_val = SBI_ERR_INVALID_PARAM; return 0; } + cidx = array_index_nospec(cidx, RISCV_KVM_MAX_COUNTERS); retdata->out_val = kvpmu->pmc[cidx].cinfo.value; return 0; @@ -559,7 +564,8 @@ int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base, } /* Start the counters that have been configured and requested by the guest */ for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) { - pmc_index = i + ctr_base; + pmc_index = array_index_nospec(i + ctr_base, + RISCV_KVM_MAX_COUNTERS); if (!test_bit(pmc_index, kvpmu->pmc_in_use)) continue; /* The guest started the counter again. Reset the overflow status */ @@ -630,7 +636,8 @@ int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base, /* Stop the counters that have been configured and requested by the guest */ for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) { - pmc_index = i + ctr_base; + pmc_index = array_index_nospec(i + ctr_base, + RISCV_KVM_MAX_COUNTERS); if (!test_bit(pmc_index, kvpmu->pmc_in_use)) continue; pmc = &kvpmu->pmc[pmc_index]; @@ -761,6 +768,7 @@ int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_ba } } + ctr_idx = array_index_nospec(ctr_idx, RISCV_KVM_MAX_COUNTERS); pmc = &kvpmu->pmc[ctr_idx]; pmc->idx = ctr_idx;