diff --git a/qemu/target/arm/cpu.h b/qemu/target/arm/cpu.h index 2485b001..25b4eb97 100644 --- a/qemu/target/arm/cpu.h +++ b/qemu/target/arm/cpu.h @@ -3326,6 +3326,11 @@ static inline bool isar_feature_aa64_sve(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0; } +static inline bool isar_feature_aa64_lor(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, LO) != 0; +} + /* * Forward to the above feature tests given an ARMCPU pointer. */ diff --git a/qemu/target/arm/cpu64.c b/qemu/target/arm/cpu64.c index 156a38ca..a35847a1 100644 --- a/qemu/target/arm/cpu64.c +++ b/qemu/target/arm/cpu64.c @@ -271,6 +271,7 @@ static void aarch64_max_initfn(struct uc_struct *uc, Object *obj, void *opaque) t = cpu->isar.id_aa64mmfr1; t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* HPD */ + t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1); cpu->isar.id_aa64mmfr1 = t; /* Replicate the same data to the 32-bit id registers. */ diff --git a/qemu/target/arm/helper.c b/qemu/target/arm/helper.c index 91b6936c..1cd16ce2 100644 --- a/qemu/target/arm/helper.c +++ b/qemu/target/arm/helper.c @@ -1145,6 +1145,7 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* Begin with base v8.0 state. */ uint32_t valid_mask = 0x3fff; + ARMCPU *cpu = arm_env_get_cpu(env); if (arm_el_is_aa64(env, 3)) { value |= SCR_FW | SCR_AW; /* these two bits are RES1. */ @@ -1167,6 +1168,9 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) valid_mask &= ~SCR_SMD; } } + if (cpu_isar_feature(aa64_lor, cpu)) { + valid_mask |= SCR_TLOR; + } /* Clear all-context RES0 bits. */ value &= valid_mask; @@ -3517,6 +3521,9 @@ static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) */ valid_mask &= ~HCR_TSC; } + if (cpu_isar_feature(aa64_lor, cpu)) { + valid_mask |= HCR_TLOR; + } /* Clear RES0 bits. */ value &= valid_mask; @@ -4425,6 +4432,42 @@ static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) return pfr0; } +/* Shared logic between LORID and the rest of the LOR* registers. + * Secure state has already been delt with. + */ +static CPAccessResult access_lor_ns(CPUARMState *env) +{ + int el = arm_current_el(env); + + if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) { + return CP_ACCESS_TRAP_EL2; + } + if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + +static CPAccessResult access_lorid(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_is_secure_below_el3(env)) { + /* Access ok in secure mode. */ + return CP_ACCESS_OK; + } + return access_lor_ns(env); +} + +static CPAccessResult access_lor_other(CPUARMState *env, + const ARMCPRegInfo *ri, bool isread) +{ + if (arm_is_secure_below_el3(env)) { + /* Access denied in secure mode. */ + return CP_ACCESS_TRAP; + } + return access_lor_ns(env); +} + void register_cp_regs_for_features(ARMCPU *cpu) { /* Register all the coprocessor registers based on feature bits */ @@ -4981,6 +5024,38 @@ void register_cp_regs_for_features(ARMCPU *cpu) define_one_arm_cp_reg(cpu, &sctlr); } + if (cpu_isar_feature(aa64_lor, cpu)) { + /* + * A trivial implementation of ARMv8.1-LOR leaves all of these + * registers fixed at 0, which indicates that there are zero + * supported Limited Ordering regions. + */ + static const ARMCPRegInfo lor_reginfo[] = { + { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0, + .access = PL1_RW, .accessfn = access_lor_other, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1, + .access = PL1_RW, .accessfn = access_lor_other, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2, + .access = PL1_RW, .accessfn = access_lor_other, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3, + .access = PL1_RW, .accessfn = access_lor_other, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7, + .access = PL1_R, .accessfn = access_lorid, + .type = ARM_CP_CONST, .resetvalue = 0 }, + REGINFO_SENTINEL + }; + define_arm_cp_regs(cpu, lor_reginfo); + } + if (cpu_isar_feature(aa64_sve, cpu)) { define_one_arm_cp_reg(cpu, &zcr_el1_reginfo); if (arm_feature(env, ARM_FEATURE_EL2)) { diff --git a/qemu/target/arm/translate-a64.c b/qemu/target/arm/translate-a64.c index 00bcb79c..1abb8819 100644 --- a/qemu/target/arm/translate-a64.c +++ b/qemu/target/arm/translate-a64.c @@ -2351,6 +2351,12 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) } return; + case 0x8: /* STLLR */ + if (!dc_isar_feature(aa64_lor, s)) { + break; + } + /* StoreLORelease is the same as Store-Release for QEMU. */ + /* fall through */ case 0x9: /* STLR */ /* Generate ISS for non-exclusive accesses including LASR. */ if (rn == 31) { @@ -2362,6 +2368,12 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) disas_ldst_compute_iss_sf(size, false, 0), is_lasr); return; + case 0xc: /* LDLAR */ + if (!dc_isar_feature(aa64_lor, s)) { + break; + } + /* LoadLOAcquire is the same as Load-Acquire for QEMU. */ + /* fall through */ case 0xd: /* LDAR */ /* Generate ISS for non-exclusive accesses including LASR. */ if (rn == 31) {