target/arm: Update arm_sctlr for VHE

Use the correct sctlr for EL2&0 regime. Due to header ordering,
and where arm_mmu_idx_el is declared, we need to move the function
out of line. Use the function in many more places in order to
select the correct control.

Backports commit aaec143212bb70ac9549cf73203d13100bd5c7c2 from qemu
This commit is contained in:
Richard Henderson 2020-03-21 15:27:27 -04:00 committed by Lioncash
parent 6073542afc
commit 3a5135473f
9 changed files with 25 additions and 25 deletions

View File

@ -3410,6 +3410,7 @@
#define arm_register_pre_el_change_hook arm_register_pre_el_change_hook_aarch64
#define arm_register_el_change_hook arm_register_el_change_hook_aarch64
#define arm_reset_cpu arm_reset_cpu_aarch64
#define arm_sctlr arm_sctlr_aarch64
#define arm_set_cpu_off arm_set_cpu_off_aarch64
#define arm_set_cpu_on arm_set_cpu_on_aarch64
#define arm_stage1_mmu_idx arm_stage1_mmu_idx_aarch64

View File

@ -3410,6 +3410,7 @@
#define arm_register_pre_el_change_hook arm_register_pre_el_change_hook_aarch64eb
#define arm_register_el_change_hook arm_register_el_change_hook_aarch64eb
#define arm_reset_cpu arm_reset_cpu_aarch64eb
#define arm_sctlr arm_sctlr_aarch64eb
#define arm_set_cpu_off arm_set_cpu_off_aarch64eb
#define arm_set_cpu_on arm_set_cpu_on_aarch64eb
#define arm_stage1_mmu_idx arm_stage1_mmu_idx_aarch64eb

View File

@ -3402,6 +3402,7 @@
#define arm_register_pre_el_change_hook arm_register_pre_el_change_hook_arm
#define arm_register_el_change_hook arm_register_el_change_hook_arm
#define arm_reset_cpu arm_reset_cpu_arm
#define arm_sctlr arm_sctlr_arm
#define arm_set_cpu_off arm_set_cpu_off_arm
#define arm_set_cpu_on arm_set_cpu_on_arm
#define arm_stage1_mmu_idx arm_stage1_mmu_idx_arm

View File

@ -3402,6 +3402,7 @@
#define arm_register_pre_el_change_hook arm_register_pre_el_change_hook_armeb
#define arm_register_el_change_hook arm_register_el_change_hook_armeb
#define arm_reset_cpu arm_reset_cpu_armeb
#define arm_sctlr arm_sctlr_armeb
#define arm_set_cpu_off arm_set_cpu_off_armeb
#define arm_set_cpu_on arm_set_cpu_on_armeb
#define arm_stage1_mmu_idx arm_stage1_mmu_idx_armeb

View File

@ -3411,6 +3411,7 @@ arm_symbols = (
'arm_register_pre_el_change_hook',
'arm_register_el_change_hook',
'arm_reset_cpu',
'arm_sctlr',
'arm_set_cpu_off',
'arm_set_cpu_on',
'arm_stage1_mmu_idx',
@ -3469,6 +3470,7 @@ aarch64_symbols = (
'arm_register_pre_el_change_hook',
'arm_register_el_change_hook',
'arm_reset_cpu',
'arm_sctlr',
'arm_set_cpu_off',
'arm_set_cpu_on',
'arm_stage1_mmu_idx',

View File

@ -3036,16 +3036,7 @@ static inline bool arm_sctlr_b(CPUARMState *env)
(env->cp15.sctlr_el[1] & SCTLR_B) != 0;
}
static inline uint64_t arm_sctlr(CPUARMState *env, int el)
{
if (el == 0) {
/* FIXME: ARMv8.1-VHE S2 translation regime. */
return env->cp15.sctlr_el[1];
} else {
return env->cp15.sctlr_el[el];
}
}
uint64_t arm_sctlr(CPUARMState *env, int el);
/* Return true if the processor is in big-endian mode. */
static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)

View File

@ -69,7 +69,7 @@ static void daif_check(CPUARMState *env, uint32_t op,
uint32_t imm, uintptr_t ra)
{
/* DAIF update to PSTATE. This is OK from EL0 only if UMA is set. */
if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
raise_exception_ra(env, EXCP_UDEF,
syn_aa64_sysregtrap(0, extract32(op, 0, 3),
extract32(op, 3, 3), 4,

View File

@ -3689,7 +3689,7 @@ static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
return CP_ACCESS_TRAP;
}
return CP_ACCESS_OK;
@ -3708,7 +3708,7 @@ static CPAccessResult aa64_cacheop_access(CPUARMState *env,
/* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
* SCTLR_EL1.UCI is set.
*/
if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UCI)) {
return CP_ACCESS_TRAP;
}
return CP_ACCESS_OK;
@ -8579,14 +8579,24 @@ static uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
}
}
#ifndef CONFIG_USER_ONLY
uint64_t arm_sctlr(CPUARMState *env, int el)
{
/* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
if (el == 0) {
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
el = (mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1);
}
return env->cp15.sctlr_el[el];
}
/* Return the SCTLR value which controls this address translation regime */
static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
{
return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
}
#ifndef CONFIG_USER_ONLY
/* Return true if the specified stage of address translation is disabled */
static inline bool regime_translation_disabled(CPUARMState *env,
ARMMMUIdx mmu_idx)
@ -11251,9 +11261,9 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
*pc = env->pc;
flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1);
ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
{
ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
ARMVAParameters p0 = aa64_va_parameters_both(env, 0, stage1);
int tbii, tbid;
@ -11288,7 +11298,7 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len);
}
sctlr = arm_sctlr(env, current_el);
sctlr = regime_sctlr(env, stage1);
if (cpu_isar_feature(aa64_pauth, cpu)) {
/*

View File

@ -386,14 +386,7 @@ static void pauth_check_trap(CPUARMState *env, int el, uintptr_t ra)
static bool pauth_key_enabled(CPUARMState *env, int el, uint32_t bit)
{
uint32_t sctlr;
if (el == 0) {
/* FIXME: ARMv8.1-VHE S2 translation regime. */
sctlr = env->cp15.sctlr_el[1];
} else {
sctlr = env->cp15.sctlr_el[el];
}
return (sctlr & bit) != 0;
return (arm_sctlr(env, el) & bit) != 0;
}
uint64_t HELPER(pacia)(CPUARMState *env, uint64_t x, uint64_t y)