From bf0313353ec29c40799abbde3841c419c281aa44 Mon Sep 17 00:00:00 2001 From: "Edgar E. Iglesias" Date: Sat, 17 Feb 2018 13:23:53 -0500 Subject: [PATCH] target-arm: Add computation of starting level for S2 PTW The starting level for S2 pagetable walks is computed differently from the S1 starting level. Implement the S2 variant. Backports commit 1853d5a9dcac910322c6cc5b2fddec45fd052d25 from qemu --- qemu/target-arm/helper.c | 114 ++++++++++++++++++++++++++++++++---- qemu/target-arm/internals.h | 25 ++++++++ 2 files changed, 126 insertions(+), 13 deletions(-) diff --git a/qemu/target-arm/helper.c b/qemu/target-arm/helper.c index b09ecbe1..89a0f2ce 100644 --- a/qemu/target-arm/helper.c +++ b/qemu/target-arm/helper.c @@ -5821,12 +5821,72 @@ typedef enum { permission_fault = 3, } MMUFaultType; +/* + * check_s2_startlevel + * @cpu: ARMCPU + * @is_aa64: True if the translation regime is in AArch64 state + * @startlevel: Suggested starting level + * @inputsize: Bitsize of IPAs + * @stride: Page-table stride (See the ARM ARM) + * + * Returns true if the suggested starting level is OK and false otherwise. + */ +static bool check_s2_startlevel(ARMCPU *cpu, bool is_aa64, int level, + int inputsize, int stride) +{ + /* Negative levels are never allowed. */ + if (level < 0) { + return false; + } + + if (is_aa64) { + unsigned int pamax = arm_pamax(cpu); + + switch (stride) { + case 13: /* 64KB Pages. */ + if (level == 0 || (level == 1 && pamax <= 42)) { + return false; + } + break; + case 11: /* 16KB Pages. */ + if (level == 0 || (level == 1 && pamax <= 40)) { + return false; + } + break; + case 9: /* 4KB Pages. */ + if (level == 0 && pamax <= 42) { + return false; + } + break; + default: + g_assert_not_reached(); + } + } else { + const int grainsize = stride + 3; + int startsizecheck; + + /* AArch32 only supports 4KB pages. Assert on that. */ + assert(stride == 9); + + if (level == 0) { + return false; + } + + startsizecheck = inputsize - ((3 - level) * stride + grainsize); + if (startsizecheck < 1 || startsizecheck > stride + 4) { + return false; + } + } + return true; +} + static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, int access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, target_ulong *page_size_ptr, uint32_t *fsr) { - CPUState *cs = CPU(arm_env_get_cpu(env)); + ARMCPU *cpu = arm_env_get_cpu(env); + CPUState *cs = CPU(cpu); /* Read an LPAE long-descriptor translation table. */ MMUFaultType fault_type = translation_fault; uint32_t level = 1; @@ -5981,18 +6041,46 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, goto do_fault; } - /* The starting level depends on the virtual address size (which can be - * up to 48 bits) and the translation granule size. It indicates the number - * of strides (stride bits at a time) needed to consume the bits - * of the input address. In the pseudocode this is: - * level = 4 - RoundUp((inputsize - grainsize) / stride) - * where their 'inputsize' is our 'inputsize', 'grainsize' is - * our 'stride + 3' and 'stride' is our 'stride'. - * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: - * = 4 - (inputsize - stride - 3 + stride - 1) / stride - * = 4 - (inputsize - 4) / stride; - */ - level = 4 - (inputsize - 4) / stride; + if (mmu_idx != ARMMMUIdx_S2NS) { + /* The starting level depends on the virtual address size (which can + * be up to 48 bits) and the translation granule size. It indicates + * the number of strides (stride bits at a time) needed to + * consume the bits of the input address. In the pseudocode this is: + * level = 4 - RoundUp((inputsize - grainsize) / stride) + * where their 'inputsize' is our 'inputsize', 'grainsize' is + * our 'stride + 3' and 'stride' is our 'stride'. + * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: + * = 4 - (inputsize - stride - 3 + stride - 1) / stride + * = 4 - (inputsize - 4) / stride; + */ + level = 4 - (inputsize - 4) / stride; + } else { + /* For stage 2 translations the starting level is specified by the + * VTCR_EL2.SL0 field (whose interpretation depends on the page size) + */ + int startlevel = extract32(tcr->raw_tcr, 6, 2); + bool ok; + + if (va_size == 32 || stride == 9) { + /* AArch32 or 4KB pages */ + level = 2 - startlevel; + } else { + /* 16KB or 64KB pages */ + level = 3 - startlevel; + } + + /* Check that the starting level is valid. */ + ok = check_s2_startlevel(cpu, va_size == 64, level, + inputsize, stride); + if (!ok) { + /* AArch64 reports these as level 0 faults. + * AArch32 reports these as level 1 faults. + */ + level = va_size == 64 ? 0 : 1; + fault_type = translation_fault; + goto do_fault; + } + } /* Clear the vaddr bits which aren't part of the within-region address, * so that we don't have to special case things when calculating the diff --git a/qemu/target-arm/internals.h b/qemu/target-arm/internals.h index 67a4e096..361bc717 100644 --- a/qemu/target-arm/internals.h +++ b/qemu/target-arm/internals.h @@ -154,6 +154,31 @@ static inline void update_spsel(CPUARMState *env, uint32_t imm) aarch64_restore_sp(env, cur_el); } +/* + * arm_pamax + * @cpu: ARMCPU + * + * Returns the implementation defined bit-width of physical addresses. + * The ARMv8 reference manuals refer to this as PAMax(). + */ +static inline unsigned int arm_pamax(ARMCPU *cpu) +{ + static const unsigned int pamax_map[] = { + 32, + 36, + 40, + 42, + 44, + 48, + }; + unsigned int parange = extract32(cpu->id_aa64mmfr0, 0, 4); + + /* id_aa64mmfr0 is a read-only register so values outside of the + * supported mappings can be considered an implementation error. */ + assert(parange < ARRAY_SIZE(pamax_map)); + return pamax_map[parange]; +} + /* Return true if extended addresses are enabled. * This is always the case if our translation regime is 64 bit, * but depends on TTBCR.EAE for 32 bit.