mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00
KVM: arm64: Get rid of ARM64_FEATURE_MASK()
The ARM64_FEATURE_MASK() macro was a hack introduce whilst the automatic generation of sysreg encoding was introduced, and was too unreliable to be entirely trusted. We are in a better place now, and we could really do without this macro. Get rid of it altogether. Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20250817202158.395078-7-maz@kernel.org Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
parent
7a765aa88e
commit
0843e0ced3
@ -1146,9 +1146,6 @@
|
||||
|
||||
#define ARM64_FEATURE_FIELD_BITS 4
|
||||
|
||||
/* Defined for compatibility only, do not add new users. */
|
||||
#define ARM64_FEATURE_MASK(x) (x##_MASK)
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
.macro mrs_s, rt, sreg
|
||||
|
@ -2404,12 +2404,12 @@ static u64 get_hyp_id_aa64pfr0_el1(void)
|
||||
*/
|
||||
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
||||
|
||||
val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
|
||||
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
|
||||
val &= ~(ID_AA64PFR0_EL1_CSV2 |
|
||||
ID_AA64PFR0_EL1_CSV3);
|
||||
|
||||
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
|
||||
val |= FIELD_PREP(ID_AA64PFR0_EL1_CSV2,
|
||||
arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
|
||||
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
|
||||
val |= FIELD_PREP(ID_AA64PFR0_EL1_CSV3,
|
||||
arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
|
||||
|
||||
return val;
|
||||
|
@ -1615,18 +1615,18 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
|
||||
break;
|
||||
case SYS_ID_AA64ISAR1_EL1:
|
||||
if (!vcpu_has_ptrauth(vcpu))
|
||||
val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
|
||||
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
|
||||
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
|
||||
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
|
||||
val &= ~(ID_AA64ISAR1_EL1_APA |
|
||||
ID_AA64ISAR1_EL1_API |
|
||||
ID_AA64ISAR1_EL1_GPA |
|
||||
ID_AA64ISAR1_EL1_GPI);
|
||||
break;
|
||||
case SYS_ID_AA64ISAR2_EL1:
|
||||
if (!vcpu_has_ptrauth(vcpu))
|
||||
val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
|
||||
ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
|
||||
val &= ~(ID_AA64ISAR2_EL1_APA3 |
|
||||
ID_AA64ISAR2_EL1_GPA3);
|
||||
if (!cpus_have_final_cap(ARM64_HAS_WFXT) ||
|
||||
has_broken_cntvoff())
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
|
||||
val &= ~ID_AA64ISAR2_EL1_WFxT;
|
||||
break;
|
||||
case SYS_ID_AA64ISAR3_EL1:
|
||||
val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_FAMINMAX;
|
||||
@ -1642,7 +1642,7 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
|
||||
ID_AA64MMFR3_EL1_S1PIE;
|
||||
break;
|
||||
case SYS_ID_MMFR4_EL1:
|
||||
val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
|
||||
val &= ~ID_MMFR4_EL1_CCIDX;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1828,22 +1828,22 @@ static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val)
|
||||
u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
||||
|
||||
if (!kvm_has_mte(vcpu->kvm)) {
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac);
|
||||
val &= ~ID_AA64PFR1_EL1_MTE;
|
||||
val &= ~ID_AA64PFR1_EL1_MTE_frac;
|
||||
}
|
||||
|
||||
if (!(cpus_have_final_cap(ARM64_HAS_RASV1P1_EXTN) &&
|
||||
SYS_FIELD_GET(ID_AA64PFR0_EL1, RAS, pfr0) == ID_AA64PFR0_EL1_RAS_IMP))
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_RAS_frac);
|
||||
val &= ~ID_AA64PFR1_EL1_RAS_frac;
|
||||
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_RNDR_trap);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_NMI);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_GCS);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_THE);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_PFAR);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac);
|
||||
val &= ~ID_AA64PFR1_EL1_SME;
|
||||
val &= ~ID_AA64PFR1_EL1_RNDR_trap;
|
||||
val &= ~ID_AA64PFR1_EL1_NMI;
|
||||
val &= ~ID_AA64PFR1_EL1_GCS;
|
||||
val &= ~ID_AA64PFR1_EL1_THE;
|
||||
val &= ~ID_AA64PFR1_EL1_MTEX;
|
||||
val &= ~ID_AA64PFR1_EL1_PFAR;
|
||||
val &= ~ID_AA64PFR1_EL1_MPAM_frac;
|
||||
|
||||
return val;
|
||||
}
|
||||
|
@ -1080,9 +1080,6 @@
|
||||
|
||||
#define ARM64_FEATURE_FIELD_BITS 4
|
||||
|
||||
/* Defined for compatibility only, do not add new users. */
|
||||
#define ARM64_FEATURE_MASK(x) (x##_MASK)
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
.macro mrs_s, rt, sreg
|
||||
|
@ -146,7 +146,7 @@ static bool vcpu_aarch64_only(struct kvm_vcpu *vcpu)
|
||||
|
||||
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
|
||||
|
||||
el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
|
||||
el0 = FIELD_GET(ID_AA64PFR0_EL1_EL0, val);
|
||||
return el0 == ID_AA64PFR0_EL1_EL0_IMP;
|
||||
}
|
||||
|
||||
|
@ -116,12 +116,12 @@ static void reset_debug_state(void)
|
||||
|
||||
/* Reset all bcr/bvr/wcr/wvr registers */
|
||||
dfr0 = read_sysreg(id_aa64dfr0_el1);
|
||||
brps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_BRPs), dfr0);
|
||||
brps = FIELD_GET(ID_AA64DFR0_EL1_BRPs, dfr0);
|
||||
for (i = 0; i <= brps; i++) {
|
||||
write_dbgbcr(i, 0);
|
||||
write_dbgbvr(i, 0);
|
||||
}
|
||||
wrps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_WRPs), dfr0);
|
||||
wrps = FIELD_GET(ID_AA64DFR0_EL1_WRPs, dfr0);
|
||||
for (i = 0; i <= wrps; i++) {
|
||||
write_dbgwcr(i, 0);
|
||||
write_dbgwvr(i, 0);
|
||||
@ -418,7 +418,7 @@ static void guest_code_ss(int test_cnt)
|
||||
|
||||
static int debug_version(uint64_t id_aa64dfr0)
|
||||
{
|
||||
return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), id_aa64dfr0);
|
||||
return FIELD_GET(ID_AA64DFR0_EL1_DebugVer, id_aa64dfr0);
|
||||
}
|
||||
|
||||
static void test_guest_debug_exceptions(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn)
|
||||
@ -539,14 +539,14 @@ void test_guest_debug_exceptions_all(uint64_t aa64dfr0)
|
||||
int b, w, c;
|
||||
|
||||
/* Number of breakpoints */
|
||||
brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_BRPs), aa64dfr0) + 1;
|
||||
brp_num = FIELD_GET(ID_AA64DFR0_EL1_BRPs, aa64dfr0) + 1;
|
||||
__TEST_REQUIRE(brp_num >= 2, "At least two breakpoints are required");
|
||||
|
||||
/* Number of watchpoints */
|
||||
wrp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_WRPs), aa64dfr0) + 1;
|
||||
wrp_num = FIELD_GET(ID_AA64DFR0_EL1_WRPs, aa64dfr0) + 1;
|
||||
|
||||
/* Number of context aware breakpoints */
|
||||
ctx_brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_CTX_CMPs), aa64dfr0) + 1;
|
||||
ctx_brp_num = FIELD_GET(ID_AA64DFR0_EL1_CTX_CMPs, aa64dfr0) + 1;
|
||||
|
||||
pr_debug("%s brp_num:%d, wrp_num:%d, ctx_brp_num:%d\n", __func__,
|
||||
brp_num, wrp_num, ctx_brp_num);
|
||||
|
@ -54,7 +54,7 @@ static void guest_code(void)
|
||||
* Check that we advertise that ID_AA64PFR0_EL1.GIC == 0, having
|
||||
* hidden the feature at runtime without any other userspace action.
|
||||
*/
|
||||
__GUEST_ASSERT(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC),
|
||||
__GUEST_ASSERT(FIELD_GET(ID_AA64PFR0_EL1_GIC,
|
||||
read_sysreg(id_aa64pfr0_el1)) == 0,
|
||||
"GICv3 wrongly advertised");
|
||||
|
||||
@ -165,7 +165,7 @@ int main(int argc, char *argv[])
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, NULL);
|
||||
pfr0 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
|
||||
__TEST_REQUIRE(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), pfr0),
|
||||
__TEST_REQUIRE(FIELD_GET(ID_AA64PFR0_EL1_GIC, pfr0),
|
||||
"GICv3 not supported.");
|
||||
kvm_vm_free(vm);
|
||||
|
||||
|
@ -95,14 +95,14 @@ static bool guest_check_lse(void)
|
||||
uint64_t isar0 = read_sysreg(id_aa64isar0_el1);
|
||||
uint64_t atomic;
|
||||
|
||||
atomic = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_ATOMIC), isar0);
|
||||
atomic = FIELD_GET(ID_AA64ISAR0_EL1_ATOMIC, isar0);
|
||||
return atomic >= 2;
|
||||
}
|
||||
|
||||
static bool guest_check_dc_zva(void)
|
||||
{
|
||||
uint64_t dczid = read_sysreg(dczid_el0);
|
||||
uint64_t dzp = FIELD_GET(ARM64_FEATURE_MASK(DCZID_EL0_DZP), dczid);
|
||||
uint64_t dzp = FIELD_GET(DCZID_EL0_DZP, dczid);
|
||||
|
||||
return dzp == 0;
|
||||
}
|
||||
@ -195,7 +195,7 @@ static bool guest_set_ha(void)
|
||||
uint64_t hadbs, tcr;
|
||||
|
||||
/* Skip if HA is not supported. */
|
||||
hadbs = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HAFDBS), mmfr1);
|
||||
hadbs = FIELD_GET(ID_AA64MMFR1_EL1_HAFDBS, mmfr1);
|
||||
if (hadbs == 0)
|
||||
return false;
|
||||
|
||||
|
@ -594,8 +594,8 @@ static void test_user_set_mte_reg(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
|
||||
|
||||
mte = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), val);
|
||||
mte_frac = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac), val);
|
||||
mte = FIELD_GET(ID_AA64PFR1_EL1_MTE, val);
|
||||
mte_frac = FIELD_GET(ID_AA64PFR1_EL1_MTE_frac, val);
|
||||
if (mte != ID_AA64PFR1_EL1_MTE_MTE2 ||
|
||||
mte_frac != ID_AA64PFR1_EL1_MTE_frac_NI) {
|
||||
ksft_test_result_skip("MTE_ASYNC or MTE_ASYMM are supported, nothing to test\n");
|
||||
@ -612,7 +612,7 @@ static void test_user_set_mte_reg(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
|
||||
mte_frac = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac), val);
|
||||
mte_frac = FIELD_GET(ID_AA64PFR1_EL1_MTE_frac, val);
|
||||
if (mte_frac == ID_AA64PFR1_EL1_MTE_frac_NI)
|
||||
ksft_test_result_pass("ID_AA64PFR1_EL1.MTE_frac=0 accepted and still 0xF\n");
|
||||
else
|
||||
@ -774,7 +774,7 @@ int main(void)
|
||||
|
||||
/* Check for AARCH64 only system */
|
||||
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
|
||||
el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
|
||||
el0 = FIELD_GET(ID_AA64PFR0_EL1_EL0, val);
|
||||
aarch64_only = (el0 == ID_AA64PFR0_EL1_EL0_IMP);
|
||||
|
||||
ksft_print_header();
|
||||
|
@ -441,7 +441,7 @@ static void create_vpmu_vm(void *guest_code)
|
||||
|
||||
/* Make sure that PMUv3 support is indicated in the ID register */
|
||||
dfr0 = vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1));
|
||||
pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0);
|
||||
pmuver = FIELD_GET(ID_AA64DFR0_EL1_PMUVer, dfr0);
|
||||
TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF &&
|
||||
pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP,
|
||||
"Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
|
||||
|
@ -573,15 +573,15 @@ void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k,
|
||||
err = ioctl(vcpu_fd, KVM_GET_ONE_REG, ®);
|
||||
TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_GET_ONE_REG, vcpu_fd));
|
||||
|
||||
gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN4), val);
|
||||
gran = FIELD_GET(ID_AA64MMFR0_EL1_TGRAN4, val);
|
||||
*ipa4k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN4_NI,
|
||||
ID_AA64MMFR0_EL1_TGRAN4_52_BIT);
|
||||
|
||||
gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN64), val);
|
||||
gran = FIELD_GET(ID_AA64MMFR0_EL1_TGRAN64, val);
|
||||
*ipa64k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN64_NI,
|
||||
ID_AA64MMFR0_EL1_TGRAN64_IMP);
|
||||
|
||||
gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN16), val);
|
||||
gran = FIELD_GET(ID_AA64MMFR0_EL1_TGRAN16, val);
|
||||
*ipa16k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN16_NI,
|
||||
ID_AA64MMFR0_EL1_TGRAN16_52_BIT);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user