target-arm queue:
* Add (experimental) support for FEAT_RME * host-utils: Avoid using __builtin_subcll on buggy versions of Apple Clang * target/arm: Restructure has_vfp_d32 test * hw/arm/sbsa-ref: add ITS support in SBSA GIC * target/arm: Fix sve predicate store, 8 <= VQ <= 15 * pc-bios/keymaps: Use the official xkb name for Arabic layout, not the legacy synonym -----BEGIN PGP SIGNATURE----- iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmSVkGcZHHBldGVyLm1h eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3tUZEACGBkfRmEa3CRVdOzRWeJS8 vcvcHEVDUVBTMKvpBah5YC5mK8fx040fymoSiYtxiWyf4l7U2Zr/kYouIbqos5Wy KW6It3Sq2IXHdl0n34D1GAWXujcJp/RP+jt+SZy1cWv9aPOy0xOpofMusytkLLeT 4+8il6t8eGDVxqBam5jwTi2vskosP4IsDmuqZk4/o3Yg5Gg2NGFaS+SMf/V5pJSv M/aH09sYtsTMoAIihpGbQsQeUtUjRXijr/WOKKwa4LeDd/abA7ZTiIGkfkzCOxOa 82LmoSFarIkfe5xgtfF3DArkN+ajvrJHLbsB0PwuYFqjSUAfcB7gs4r+I7IdvjN+ hdY2oTxa8nDerPDdiW61i4xg6qtNRc87l/y2qX6xMrqBEQ743V/e/4cNsGLsLxou R1iHq2R8LZ00051pZeXYrOUW3Bu6GK/b30nDFgTb4uLStA/OtlXKWspeGj4JIgzi 04xwndUMbq6eZp89BDHc52AEF9SreCz8/YVu32W1JWvRgGWV1uv6E5rYQMXsrf/3 CVNVBOyNeDuGcKNaXGFd2bvpebyEMbtM29kpYP8Xl6YFDdopC2J99NZS+829c+/w Zl6gVTEpWOOIYif/z2VgwP74MvMDxSRsuyfxNei+eAnkoIDXpMdRvQZDRqbvooU6 nIFnyoEgiDX051C9UZa+mg== =Q2Ei -----END PGP SIGNATURE----- Merge tag 'pull-target-arm-20230623' of https://git.linaro.org/people/pmaydell/qemu-arm into staging target-arm queue: * Add (experimental) support for FEAT_RME * host-utils: Avoid using __builtin_subcll on buggy versions of Apple Clang * target/arm: Restructure has_vfp_d32 test * hw/arm/sbsa-ref: add ITS support in SBSA GIC * target/arm: Fix sve predicate store, 8 <= VQ <= 15 * pc-bios/keymaps: Use the official xkb name for Arabic layout, not the legacy synonym # -----BEGIN PGP SIGNATURE----- # # iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmSVkGcZHHBldGVyLm1h # eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3tUZEACGBkfRmEa3CRVdOzRWeJS8 # vcvcHEVDUVBTMKvpBah5YC5mK8fx040fymoSiYtxiWyf4l7U2Zr/kYouIbqos5Wy # KW6It3Sq2IXHdl0n34D1GAWXujcJp/RP+jt+SZy1cWv9aPOy0xOpofMusytkLLeT # 4+8il6t8eGDVxqBam5jwTi2vskosP4IsDmuqZk4/o3Yg5Gg2NGFaS+SMf/V5pJSv # M/aH09sYtsTMoAIihpGbQsQeUtUjRXijr/WOKKwa4LeDd/abA7ZTiIGkfkzCOxOa # 82LmoSFarIkfe5xgtfF3DArkN+ajvrJHLbsB0PwuYFqjSUAfcB7gs4r+I7IdvjN+ # hdY2oTxa8nDerPDdiW61i4xg6qtNRc87l/y2qX6xMrqBEQ743V/e/4cNsGLsLxou # R1iHq2R8LZ00051pZeXYrOUW3Bu6GK/b30nDFgTb4uLStA/OtlXKWspeGj4JIgzi # 04xwndUMbq6eZp89BDHc52AEF9SreCz8/YVu32W1JWvRgGWV1uv6E5rYQMXsrf/3 # CVNVBOyNeDuGcKNaXGFd2bvpebyEMbtM29kpYP8Xl6YFDdopC2J99NZS+829c+/w # Zl6gVTEpWOOIYif/z2VgwP74MvMDxSRsuyfxNei+eAnkoIDXpMdRvQZDRqbvooU6 # nIFnyoEgiDX051C9UZa+mg== # =Q2Ei # -----END PGP SIGNATURE----- # gpg: Signature made Fri 23 Jun 2023 02:30:31 PM CEST # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [full] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [full] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [full] # gpg: aka "Peter Maydell <peter@archaic.org.uk>" [unknown] * tag 'pull-target-arm-20230623' of https://git.linaro.org/people/pmaydell/qemu-arm: (26 commits) pc-bios/keymaps: Use the official xkb name for Arabic layout, not the legacy synonym target/arm: Fix sve predicate store, 8 <= VQ <= 15 hw/arm/sbsa-ref: add ITS support in SBSA GIC target/arm: Restructure has_vfp_d32 test host-utils: Avoid using __builtin_subcll on buggy versions of Apple Clang docs/system/arm: Document FEAT_RME target/arm: Add cpu properties for enabling FEAT_RME target/arm: Implement the granule protection check target/arm: Implement GPC exceptions target/arm: Add GPC syndrome target/arm: Use get_phys_addr_with_struct for stage2 target/arm: Move s1_is_el0 into S1Translate target/arm: Use get_phys_addr_with_struct in S1_ptw_translate target/arm: Handle no-execute for Realm and Root regimes target/arm: Handle Block and Page bits for security space target/arm: NSTable is RES0 for the RME EL3 regime target/arm: Pipe ARMSecuritySpace through ptw.c target/arm: Remove __attribute__((nonnull)) from ptw.c target/arm: Introduce ARMMMUIdx_Phys_{Realm,Root} target/arm: Adjust the order of Phys and Stage2 ARMMMUIdx ... Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
commit
fa7dd27bc3
@ -435,3 +435,26 @@ As with ``sve-default-vector-length``, if the default length is larger
|
||||
than the maximum vector length enabled, the actual vector length will
|
||||
be reduced. If this property is set to ``-1`` then the default vector
|
||||
length is set to the maximum possible length.
|
||||
|
||||
RME CPU Properties
|
||||
==================
|
||||
|
||||
The status of RME support with QEMU is experimental. At this time we
|
||||
only support RME within the CPU proper, not within the SMMU or GIC.
|
||||
The feature is enabled by the CPU property ``x-rme``, with the ``x-``
|
||||
prefix present as a reminder of the experimental status, and defaults off.
|
||||
|
||||
The method for enabling RME will change in some future QEMU release
|
||||
without notice or backward compatibility.
|
||||
|
||||
RME Level 0 GPT Size Property
|
||||
-----------------------------
|
||||
|
||||
To aid firmware developers in testing different possible CPU
|
||||
configurations, ``x-l0gptsz=S`` may be used to specify the value
|
||||
to encode into ``GPCCR_EL3.L0GPTSZ``, a read-only field that
|
||||
specifies the size of the Level 0 Granule Protection Table.
|
||||
Legal values for ``S`` are 30, 34, 36, and 39; the default is 30.
|
||||
|
||||
As with ``x-rme``, the ``x-l0gptsz`` property may be renamed or
|
||||
removed in some future QEMU release.
|
||||
|
@ -66,6 +66,7 @@ the following architecture extensions:
|
||||
- FEAT_RAS (Reliability, availability, and serviceability)
|
||||
- FEAT_RASv1p1 (RAS Extension v1.1)
|
||||
- FEAT_RDM (Advanced SIMD rounding double multiply accumulate instructions)
|
||||
- FEAT_RME (Realm Management Extension) (NB: support status in QEMU is experimental)
|
||||
- FEAT_RNG (Random number generator)
|
||||
- FEAT_S2FWB (Stage 2 forced Write-Back)
|
||||
- FEAT_SB (Speculation Barrier)
|
||||
|
@ -46,6 +46,9 @@ to be a complete compliant DT. It currently reports:
|
||||
- platform version
|
||||
- GIC addresses
|
||||
|
||||
Platform version
|
||||
''''''''''''''''
|
||||
|
||||
The platform version is only for informing platform firmware about
|
||||
what kind of ``sbsa-ref`` board it is running on. It is neither
|
||||
a QEMU versioned machine type nor a reflection of the level of the
|
||||
@ -54,3 +57,14 @@ SBSA/SystemReady SR support provided.
|
||||
The ``machine-version-major`` value is updated when changes breaking
|
||||
fw compatibility are introduced. The ``machine-version-minor`` value
|
||||
is updated when features are added that don't break fw compatibility.
|
||||
|
||||
Platform version changes:
|
||||
|
||||
0.0
|
||||
Devicetree holds information about CPUs, memory and platform version.
|
||||
|
||||
0.1
|
||||
GIC information is present in devicetree.
|
||||
|
||||
0.2
|
||||
GIC ITS information is present in devicetree.
|
||||
|
@ -65,6 +65,7 @@ enum {
|
||||
SBSA_CPUPERIPHS,
|
||||
SBSA_GIC_DIST,
|
||||
SBSA_GIC_REDIST,
|
||||
SBSA_GIC_ITS,
|
||||
SBSA_SECURE_EC,
|
||||
SBSA_GWDT_WS0,
|
||||
SBSA_GWDT_REFRESH,
|
||||
@ -108,6 +109,7 @@ static const MemMapEntry sbsa_ref_memmap[] = {
|
||||
[SBSA_CPUPERIPHS] = { 0x40000000, 0x00040000 },
|
||||
[SBSA_GIC_DIST] = { 0x40060000, 0x00010000 },
|
||||
[SBSA_GIC_REDIST] = { 0x40080000, 0x04000000 },
|
||||
[SBSA_GIC_ITS] = { 0x44081000, 0x00020000 },
|
||||
[SBSA_SECURE_EC] = { 0x50000000, 0x00001000 },
|
||||
[SBSA_GWDT_REFRESH] = { 0x50010000, 0x00001000 },
|
||||
[SBSA_GWDT_CONTROL] = { 0x50011000, 0x00001000 },
|
||||
@ -181,8 +183,15 @@ static void sbsa_fdt_add_gic_node(SBSAMachineState *sms)
|
||||
2, sbsa_ref_memmap[SBSA_GIC_REDIST].base,
|
||||
2, sbsa_ref_memmap[SBSA_GIC_REDIST].size);
|
||||
|
||||
nodename = g_strdup_printf("/intc/its");
|
||||
qemu_fdt_add_subnode(sms->fdt, nodename);
|
||||
qemu_fdt_setprop_sized_cells(sms->fdt, nodename, "reg",
|
||||
2, sbsa_ref_memmap[SBSA_GIC_ITS].base,
|
||||
2, sbsa_ref_memmap[SBSA_GIC_ITS].size);
|
||||
|
||||
g_free(nodename);
|
||||
}
|
||||
|
||||
/*
|
||||
* Firmware on this machine only uses ACPI table to load OS, these limited
|
||||
* device tree nodes are just to let firmware know the info which varies from
|
||||
@ -219,7 +228,7 @@ static void create_fdt(SBSAMachineState *sms)
|
||||
* fw compatibility.
|
||||
*/
|
||||
qemu_fdt_setprop_cell(fdt, "/", "machine-version-major", 0);
|
||||
qemu_fdt_setprop_cell(fdt, "/", "machine-version-minor", 1);
|
||||
qemu_fdt_setprop_cell(fdt, "/", "machine-version-minor", 2);
|
||||
|
||||
if (ms->numa_state->have_numa_distance) {
|
||||
int size = nb_numa_nodes * nb_numa_nodes * 3 * sizeof(uint32_t);
|
||||
@ -409,7 +418,20 @@ static void create_secure_ram(SBSAMachineState *sms,
|
||||
memory_region_add_subregion(secure_sysmem, base, secram);
|
||||
}
|
||||
|
||||
static void create_gic(SBSAMachineState *sms)
|
||||
static void create_its(SBSAMachineState *sms)
|
||||
{
|
||||
const char *itsclass = its_class_name();
|
||||
DeviceState *dev;
|
||||
|
||||
dev = qdev_new(itsclass);
|
||||
|
||||
object_property_set_link(OBJECT(dev), "parent-gicv3", OBJECT(sms->gic),
|
||||
&error_abort);
|
||||
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
|
||||
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, sbsa_ref_memmap[SBSA_GIC_ITS].base);
|
||||
}
|
||||
|
||||
static void create_gic(SBSAMachineState *sms, MemoryRegion *mem)
|
||||
{
|
||||
unsigned int smp_cpus = MACHINE(sms)->smp.cpus;
|
||||
SysBusDevice *gicbusdev;
|
||||
@ -436,6 +458,10 @@ static void create_gic(SBSAMachineState *sms)
|
||||
qdev_prop_set_uint32(sms->gic, "len-redist-region-count", 1);
|
||||
qdev_prop_set_uint32(sms->gic, "redist-region-count[0]", redist0_count);
|
||||
|
||||
object_property_set_link(OBJECT(sms->gic), "sysmem",
|
||||
OBJECT(mem), &error_fatal);
|
||||
qdev_prop_set_bit(sms->gic, "has-lpi", true);
|
||||
|
||||
gicbusdev = SYS_BUS_DEVICE(sms->gic);
|
||||
sysbus_realize_and_unref(gicbusdev, &error_fatal);
|
||||
sysbus_mmio_map(gicbusdev, 0, sbsa_ref_memmap[SBSA_GIC_DIST].base);
|
||||
@ -482,6 +508,7 @@ static void create_gic(SBSAMachineState *sms)
|
||||
sysbus_connect_irq(gicbusdev, i + 3 * smp_cpus,
|
||||
qdev_get_gpio_in(cpudev, ARM_CPU_VFIQ));
|
||||
}
|
||||
create_its(sms);
|
||||
}
|
||||
|
||||
static void create_uart(const SBSAMachineState *sms, int uart,
|
||||
@ -788,7 +815,7 @@ static void sbsa_ref_init(MachineState *machine)
|
||||
|
||||
create_secure_ram(sms, secure_sysmem);
|
||||
|
||||
create_gic(sms);
|
||||
create_gic(sms, sysmem);
|
||||
|
||||
create_uart(sms, SBSA_UART, sysmem, serial_hd(0));
|
||||
create_uart(sms, SBSA_SECURE_UART, secure_sysmem, serial_hd(1));
|
||||
|
@ -29,10 +29,17 @@ typedef struct MemTxAttrs {
|
||||
* "didn't specify" if necessary.
|
||||
*/
|
||||
unsigned int unspecified:1;
|
||||
/* ARM/AMBA: TrustZone Secure access
|
||||
/*
|
||||
* ARM/AMBA: TrustZone Secure access
|
||||
* x86: System Management Mode access
|
||||
*/
|
||||
unsigned int secure:1;
|
||||
/*
|
||||
* ARM: ArmSecuritySpace. This partially overlaps secure, but it is
|
||||
* easier to have both fields to assist code that does not understand
|
||||
* ARMv9 RME, or no specific knowledge of ARM at all (e.g. pflash).
|
||||
*/
|
||||
unsigned int space:2;
|
||||
/* Memory access is usermode (unprivileged) */
|
||||
unsigned int user:1;
|
||||
/*
|
||||
|
@ -184,4 +184,17 @@
|
||||
#define QEMU_DISABLE_CFI
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Apple clang version 14 has a bug in its __builtin_subcll(); define
|
||||
* BUILTIN_SUBCLL_BROKEN for the offending versions so we can avoid it.
|
||||
* When a version of Apple clang which has this bug fixed is released
|
||||
* we can add an upper bound to this check.
|
||||
* See https://gitlab.com/qemu-project/qemu/-/issues/1631
|
||||
* and https://gitlab.com/qemu-project/qemu/-/issues/1659 for details.
|
||||
* The bug never made it into any upstream LLVM releases, only Apple ones.
|
||||
*/
|
||||
#if defined(__apple_build_version__) && __clang_major__ >= 14
|
||||
#define BUILTIN_SUBCLL_BROKEN
|
||||
#endif
|
||||
|
||||
#endif /* COMPILER_H */
|
||||
|
@ -649,7 +649,7 @@ static inline uint64_t uadd64_carry(uint64_t x, uint64_t y, bool *pcarry)
|
||||
*/
|
||||
static inline uint64_t usub64_borrow(uint64_t x, uint64_t y, bool *pborrow)
|
||||
{
|
||||
#if __has_builtin(__builtin_subcll)
|
||||
#if __has_builtin(__builtin_subcll) && !defined(BUILTIN_SUBCLL_BROKEN)
|
||||
unsigned long long b = *pborrow;
|
||||
x = __builtin_subcll(x, y, b, &b);
|
||||
*pborrow = b & 1;
|
||||
|
@ -1,5 +1,5 @@
|
||||
keymaps = {
|
||||
'ar': '-l ar',
|
||||
'ar': '-l ara',
|
||||
'bepo': '-l fr -v dvorak',
|
||||
'cz': '-l cz',
|
||||
'da': '-l dk',
|
||||
|
@ -1402,25 +1402,27 @@ void arm_cpu_post_init(Object *obj)
|
||||
* KVM does not currently allow us to lie to the guest about its
|
||||
* ID/feature registers, so the guest always sees what the host has.
|
||||
*/
|
||||
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)
|
||||
? cpu_isar_feature(aa64_fp_simd, cpu)
|
||||
: cpu_isar_feature(aa32_vfp, cpu)) {
|
||||
cpu->has_vfp = true;
|
||||
if (!kvm_enabled()) {
|
||||
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_vfp_property);
|
||||
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
|
||||
if (cpu_isar_feature(aa64_fp_simd, cpu)) {
|
||||
cpu->has_vfp = true;
|
||||
cpu->has_vfp_d32 = true;
|
||||
if (tcg_enabled() || qtest_enabled()) {
|
||||
qdev_property_add_static(DEVICE(obj),
|
||||
&arm_cpu_has_vfp_property);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (cpu->has_vfp && cpu_isar_feature(aa32_simd_r32, cpu)) {
|
||||
cpu->has_vfp_d32 = true;
|
||||
if (!kvm_enabled()) {
|
||||
} else if (cpu_isar_feature(aa32_vfp, cpu)) {
|
||||
cpu->has_vfp = true;
|
||||
if (cpu_isar_feature(aa32_simd_r32, cpu)) {
|
||||
cpu->has_vfp_d32 = true;
|
||||
/*
|
||||
* The permitted values of the SIMDReg bits [3:0] on
|
||||
* Armv8-A are either 0b0000 and 0b0010. On such CPUs,
|
||||
* make sure that has_vfp_d32 can not be set to false.
|
||||
*/
|
||||
if (!(arm_feature(&cpu->env, ARM_FEATURE_V8) &&
|
||||
!arm_feature(&cpu->env, ARM_FEATURE_M))) {
|
||||
if ((tcg_enabled() || qtest_enabled())
|
||||
&& !(arm_feature(&cpu->env, ARM_FEATURE_V8)
|
||||
&& !arm_feature(&cpu->env, ARM_FEATURE_M))) {
|
||||
qdev_property_add_static(DEVICE(obj),
|
||||
&arm_cpu_has_vfp_d32_property);
|
||||
}
|
||||
@ -1989,6 +1991,10 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
|
||||
cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, COPSDBG, 0);
|
||||
cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
|
||||
ID_AA64PFR0, EL3, 0);
|
||||
|
||||
/* Disable the realm management extension, which requires EL3. */
|
||||
cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
|
||||
ID_AA64PFR0, RME, 0);
|
||||
}
|
||||
|
||||
if (!cpu->has_el2) {
|
||||
|
151
target/arm/cpu.h
151
target/arm/cpu.h
@ -57,6 +57,7 @@
|
||||
#define EXCP_UNALIGNED 22 /* v7M UNALIGNED UsageFault */
|
||||
#define EXCP_DIVBYZERO 23 /* v7M DIVBYZERO UsageFault */
|
||||
#define EXCP_VSERR 24
|
||||
#define EXCP_GPC 25 /* v9 Granule Protection Check Fault */
|
||||
/* NB: add new EXCP_ defines to the array in arm_log_exception() too */
|
||||
|
||||
#define ARMV7M_EXCP_RESET 1
|
||||
@ -541,6 +542,11 @@ typedef struct CPUArchState {
|
||||
uint64_t fgt_read[2]; /* HFGRTR, HDFGRTR */
|
||||
uint64_t fgt_write[2]; /* HFGWTR, HDFGWTR */
|
||||
uint64_t fgt_exec[1]; /* HFGITR */
|
||||
|
||||
/* RME registers */
|
||||
uint64_t gpccr_el3;
|
||||
uint64_t gptbr_el3;
|
||||
uint64_t mfar_el3;
|
||||
} cp15;
|
||||
|
||||
struct {
|
||||
@ -1055,6 +1061,7 @@ struct ArchCPU {
|
||||
uint64_t reset_cbar;
|
||||
uint32_t reset_auxcr;
|
||||
bool reset_hivecs;
|
||||
uint8_t reset_l0gptsz;
|
||||
|
||||
/*
|
||||
* Intermediate values used during property parsing.
|
||||
@ -1655,7 +1662,7 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
|
||||
#define HCR_TERR (1ULL << 36)
|
||||
#define HCR_TEA (1ULL << 37)
|
||||
#define HCR_MIOCNCE (1ULL << 38)
|
||||
/* RES0 bit 39 */
|
||||
#define HCR_TME (1ULL << 39)
|
||||
#define HCR_APK (1ULL << 40)
|
||||
#define HCR_API (1ULL << 41)
|
||||
#define HCR_NV (1ULL << 42)
|
||||
@ -1664,7 +1671,7 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
|
||||
#define HCR_NV2 (1ULL << 45)
|
||||
#define HCR_FWB (1ULL << 46)
|
||||
#define HCR_FIEN (1ULL << 47)
|
||||
/* RES0 bit 48 */
|
||||
#define HCR_GPF (1ULL << 48)
|
||||
#define HCR_TID4 (1ULL << 49)
|
||||
#define HCR_TICAB (1ULL << 50)
|
||||
#define HCR_AMVOFFEN (1ULL << 51)
|
||||
@ -1729,6 +1736,7 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
|
||||
#define SCR_TRNDR (1ULL << 40)
|
||||
#define SCR_ENTP2 (1ULL << 41)
|
||||
#define SCR_GPF (1ULL << 48)
|
||||
#define SCR_NSE (1ULL << 62)
|
||||
|
||||
#define HSTR_TTEE (1 << 16)
|
||||
#define HSTR_TJDBX (1 << 17)
|
||||
@ -2195,6 +2203,7 @@ FIELD(ID_AA64PFR0, SEL2, 36, 4)
|
||||
FIELD(ID_AA64PFR0, MPAM, 40, 4)
|
||||
FIELD(ID_AA64PFR0, AMU, 44, 4)
|
||||
FIELD(ID_AA64PFR0, DIT, 48, 4)
|
||||
FIELD(ID_AA64PFR0, RME, 52, 4)
|
||||
FIELD(ID_AA64PFR0, CSV2, 56, 4)
|
||||
FIELD(ID_AA64PFR0, CSV3, 60, 4)
|
||||
|
||||
@ -2339,6 +2348,19 @@ FIELD(MVFR1, SIMDFMAC, 28, 4)
|
||||
FIELD(MVFR2, SIMDMISC, 0, 4)
|
||||
FIELD(MVFR2, FPMISC, 4, 4)
|
||||
|
||||
FIELD(GPCCR, PPS, 0, 3)
|
||||
FIELD(GPCCR, IRGN, 8, 2)
|
||||
FIELD(GPCCR, ORGN, 10, 2)
|
||||
FIELD(GPCCR, SH, 12, 2)
|
||||
FIELD(GPCCR, PGS, 14, 2)
|
||||
FIELD(GPCCR, GPC, 16, 1)
|
||||
FIELD(GPCCR, GPCP, 17, 1)
|
||||
FIELD(GPCCR, L0GPTSZ, 20, 4)
|
||||
|
||||
FIELD(MFAR, FPA, 12, 40)
|
||||
FIELD(MFAR, NSE, 62, 1)
|
||||
FIELD(MFAR, NS, 63, 1)
|
||||
|
||||
QEMU_BUILD_BUG_ON(ARRAY_SIZE(((ARMCPU *)0)->ccsidr) <= R_V7M_CSSELR_INDEX_MASK);
|
||||
|
||||
/* If adding a feature bit which corresponds to a Linux ELF
|
||||
@ -2393,25 +2415,53 @@ static inline int arm_feature(CPUARMState *env, int feature)
|
||||
|
||||
void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp);
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
/*
|
||||
* ARM v9 security states.
|
||||
* The ordering of the enumeration corresponds to the low 2 bits
|
||||
* of the GPI value, and (except for Root) the concat of NSE:NS.
|
||||
*/
|
||||
|
||||
typedef enum ARMSecuritySpace {
|
||||
ARMSS_Secure = 0,
|
||||
ARMSS_NonSecure = 1,
|
||||
ARMSS_Root = 2,
|
||||
ARMSS_Realm = 3,
|
||||
} ARMSecuritySpace;
|
||||
|
||||
/* Return true if @space is secure, in the pre-v9 sense. */
|
||||
static inline bool arm_space_is_secure(ARMSecuritySpace space)
|
||||
{
|
||||
return space == ARMSS_Secure || space == ARMSS_Root;
|
||||
}
|
||||
|
||||
/* Return the ARMSecuritySpace for @secure, assuming !RME or EL[0-2]. */
|
||||
static inline ARMSecuritySpace arm_secure_to_space(bool secure)
|
||||
{
|
||||
return secure ? ARMSS_Secure : ARMSS_NonSecure;
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
/**
|
||||
* arm_security_space_below_el3:
|
||||
* @env: cpu context
|
||||
*
|
||||
* Return the security space of exception levels below EL3, following
|
||||
* an exception return to those levels. Unlike arm_security_space,
|
||||
* this doesn't care about the current EL.
|
||||
*/
|
||||
ARMSecuritySpace arm_security_space_below_el3(CPUARMState *env);
|
||||
|
||||
/**
|
||||
* arm_is_secure_below_el3:
|
||||
* @env: cpu context
|
||||
*
|
||||
* Return true if exception levels below EL3 are in secure state,
|
||||
* or would be following an exception return to that level.
|
||||
* Unlike arm_is_secure() (which is always a question about the
|
||||
* _current_ state of the CPU) this doesn't care about the current
|
||||
* EL or mode.
|
||||
* or would be following an exception return to those levels.
|
||||
*/
|
||||
static inline bool arm_is_secure_below_el3(CPUARMState *env)
|
||||
{
|
||||
assert(!arm_feature(env, ARM_FEATURE_M));
|
||||
if (arm_feature(env, ARM_FEATURE_EL3)) {
|
||||
return !(env->cp15.scr_el3 & SCR_NS);
|
||||
} else {
|
||||
/* If EL3 is not supported then the secure state is implementation
|
||||
* defined, in which case QEMU defaults to non-secure.
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
ARMSecuritySpace ss = arm_security_space_below_el3(env);
|
||||
return ss == ARMSS_Secure;
|
||||
}
|
||||
|
||||
/* Return true if the CPU is AArch64 EL3 or AArch32 Mon */
|
||||
@ -2431,16 +2481,23 @@ static inline bool arm_is_el3_or_mon(CPUARMState *env)
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Return true if the processor is in secure state */
|
||||
/**
|
||||
* arm_security_space:
|
||||
* @env: cpu context
|
||||
*
|
||||
* Return the current security space of the cpu.
|
||||
*/
|
||||
ARMSecuritySpace arm_security_space(CPUARMState *env);
|
||||
|
||||
/**
|
||||
* arm_is_secure:
|
||||
* @env: cpu context
|
||||
*
|
||||
* Return true if the processor is in secure state.
|
||||
*/
|
||||
static inline bool arm_is_secure(CPUARMState *env)
|
||||
{
|
||||
if (arm_feature(env, ARM_FEATURE_M)) {
|
||||
return env->v7m.secure;
|
||||
}
|
||||
if (arm_is_el3_or_mon(env)) {
|
||||
return true;
|
||||
}
|
||||
return arm_is_secure_below_el3(env);
|
||||
return arm_space_is_secure(arm_security_space(env));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2459,11 +2516,21 @@ static inline bool arm_is_el2_enabled(CPUARMState *env)
|
||||
}
|
||||
|
||||
#else
|
||||
static inline ARMSecuritySpace arm_security_space_below_el3(CPUARMState *env)
|
||||
{
|
||||
return ARMSS_NonSecure;
|
||||
}
|
||||
|
||||
static inline bool arm_is_secure_below_el3(CPUARMState *env)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline ARMSecuritySpace arm_security_space(CPUARMState *env)
|
||||
{
|
||||
return ARMSS_NonSecure;
|
||||
}
|
||||
|
||||
static inline bool arm_is_secure(CPUARMState *env)
|
||||
{
|
||||
return false;
|
||||
@ -2794,18 +2861,20 @@ typedef enum ARMMMUIdx {
|
||||
ARMMMUIdx_E2 = 6 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_E3 = 7 | ARM_MMU_IDX_A,
|
||||
|
||||
/* TLBs with 1-1 mapping to the physical address spaces. */
|
||||
ARMMMUIdx_Phys_NS = 8 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_Phys_S = 9 | ARM_MMU_IDX_A,
|
||||
|
||||
/*
|
||||
* Used for second stage of an S12 page table walk, or for descriptor
|
||||
* loads during first stage of an S1 page table walk. Note that both
|
||||
* are in use simultaneously for SecureEL2: the security state for
|
||||
* the S2 ptw is selected by the NS bit from the S1 ptw.
|
||||
*/
|
||||
ARMMMUIdx_Stage2 = 10 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_Stage2_S = 11 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_Stage2_S = 8 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_Stage2 = 9 | ARM_MMU_IDX_A,
|
||||
|
||||
/* TLBs with 1-1 mapping to the physical address spaces. */
|
||||
ARMMMUIdx_Phys_S = 10 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_Phys_NS = 11 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_Phys_Root = 12 | ARM_MMU_IDX_A,
|
||||
ARMMMUIdx_Phys_Realm = 13 | ARM_MMU_IDX_A,
|
||||
|
||||
/*
|
||||
* These are not allocated TLBs and are used only for AT system
|
||||
@ -2869,6 +2938,23 @@ typedef enum ARMASIdx {
|
||||
ARMASIdx_TagS = 3,
|
||||
} ARMASIdx;
|
||||
|
||||
static inline ARMMMUIdx arm_space_to_phys(ARMSecuritySpace space)
|
||||
{
|
||||
/* Assert the relative order of the physical mmu indexes. */
|
||||
QEMU_BUILD_BUG_ON(ARMSS_Secure != 0);
|
||||
QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_NS != ARMMMUIdx_Phys_S + ARMSS_NonSecure);
|
||||
QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_Root != ARMMMUIdx_Phys_S + ARMSS_Root);
|
||||
QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_Realm != ARMMMUIdx_Phys_S + ARMSS_Realm);
|
||||
|
||||
return ARMMMUIdx_Phys_S + space;
|
||||
}
|
||||
|
||||
static inline ARMSecuritySpace arm_phys_to_space(ARMMMUIdx idx)
|
||||
{
|
||||
assert(idx >= ARMMMUIdx_Phys_S && idx <= ARMMMUIdx_Phys_Realm);
|
||||
return idx - ARMMMUIdx_Phys_S;
|
||||
}
|
||||
|
||||
static inline bool arm_v7m_csselr_razwi(ARMCPU *cpu)
|
||||
{
|
||||
/* If all the CLIDR.Ctypem bits are 0 there are no caches, and
|
||||
@ -3814,6 +3900,11 @@ static inline bool isar_feature_aa64_sel2(const ARMISARegisters *id)
|
||||
return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SEL2) != 0;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_rme(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, RME) != 0;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_vh(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, VH) != 0;
|
||||
|
@ -1855,6 +1855,9 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
|
||||
}
|
||||
if (cpu_isar_feature(aa64_sel2, cpu)) {
|
||||
valid_mask |= SCR_EEL2;
|
||||
} else if (cpu_isar_feature(aa64_rme, cpu)) {
|
||||
/* With RME and without SEL2, NS is RES1 (R_GSWWH, I_DJJQJ). */
|
||||
value |= SCR_NS;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_mte, cpu)) {
|
||||
valid_mask |= SCR_ATA;
|
||||
@ -1874,6 +1877,9 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
|
||||
if (cpu_isar_feature(aa64_fgt, cpu)) {
|
||||
valid_mask |= SCR_FGTEN;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_rme, cpu)) {
|
||||
valid_mask |= SCR_NSE | SCR_GPF;
|
||||
}
|
||||
} else {
|
||||
valid_mask &= ~(SCR_RW | SCR_ST);
|
||||
if (cpu_isar_feature(aa32_ras, cpu)) {
|
||||
@ -1903,10 +1909,10 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
|
||||
env->cp15.scr_el3 = value;
|
||||
|
||||
/*
|
||||
* If SCR_EL3.NS changes, i.e. arm_is_secure_below_el3, then
|
||||
* If SCR_EL3.{NS,NSE} changes, i.e. change of security state,
|
||||
* we must invalidate all TLBs below EL3.
|
||||
*/
|
||||
if (changed & SCR_NS) {
|
||||
if (changed & (SCR_NS | SCR_NSE)) {
|
||||
tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 |
|
||||
ARMMMUIdxBit_E20_0 |
|
||||
ARMMMUIdxBit_E10_1 |
|
||||
@ -5654,6 +5660,9 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
|
||||
if (cpu_isar_feature(aa64_fwb, cpu)) {
|
||||
valid_mask |= HCR_FWB;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_rme, cpu)) {
|
||||
valid_mask |= HCR_GPF;
|
||||
}
|
||||
}
|
||||
|
||||
if (cpu_isar_feature(any_evt, cpu)) {
|
||||
@ -6901,6 +6910,83 @@ static const ARMCPRegInfo sme_reginfo[] = {
|
||||
.access = PL2_RW, .accessfn = access_esm,
|
||||
.type = ARM_CP_CONST, .resetvalue = 0 },
|
||||
};
|
||||
|
||||
static void tlbi_aa64_paall_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
||||
tlb_flush(cs);
|
||||
}
|
||||
|
||||
static void gpccr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
/* L0GPTSZ is RO; other bits not mentioned are RES0. */
|
||||
uint64_t rw_mask = R_GPCCR_PPS_MASK | R_GPCCR_IRGN_MASK |
|
||||
R_GPCCR_ORGN_MASK | R_GPCCR_SH_MASK | R_GPCCR_PGS_MASK |
|
||||
R_GPCCR_GPC_MASK | R_GPCCR_GPCP_MASK;
|
||||
|
||||
env->cp15.gpccr_el3 = (value & rw_mask) | (env->cp15.gpccr_el3 & ~rw_mask);
|
||||
}
|
||||
|
||||
static void gpccr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
{
|
||||
env->cp15.gpccr_el3 = FIELD_DP64(0, GPCCR, L0GPTSZ,
|
||||
env_archcpu(env)->reset_l0gptsz);
|
||||
}
|
||||
|
||||
static void tlbi_aa64_paallos_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
||||
tlb_flush_all_cpus_synced(cs);
|
||||
}
|
||||
|
||||
static const ARMCPRegInfo rme_reginfo[] = {
|
||||
{ .name = "GPCCR_EL3", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 6,
|
||||
.access = PL3_RW, .writefn = gpccr_write, .resetfn = gpccr_reset,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.gpccr_el3) },
|
||||
{ .name = "GPTBR_EL3", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 4,
|
||||
.access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.gptbr_el3) },
|
||||
{ .name = "MFAR_EL3", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 5,
|
||||
.access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mfar_el3) },
|
||||
{ .name = "TLBI_PAALL", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 4,
|
||||
.access = PL3_W, .type = ARM_CP_NO_RAW,
|
||||
.writefn = tlbi_aa64_paall_write },
|
||||
{ .name = "TLBI_PAALLOS", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 4,
|
||||
.access = PL3_W, .type = ARM_CP_NO_RAW,
|
||||
.writefn = tlbi_aa64_paallos_write },
|
||||
/*
|
||||
* QEMU does not have a way to invalidate by physical address, thus
|
||||
* invalidating a range of physical addresses is accomplished by
|
||||
* flushing all tlb entries in the outer sharable domain,
|
||||
* just like PAALLOS.
|
||||
*/
|
||||
{ .name = "TLBI_RPALOS", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 7,
|
||||
.access = PL3_W, .type = ARM_CP_NO_RAW,
|
||||
.writefn = tlbi_aa64_paallos_write },
|
||||
{ .name = "TLBI_RPAOS", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 3,
|
||||
.access = PL3_W, .type = ARM_CP_NO_RAW,
|
||||
.writefn = tlbi_aa64_paallos_write },
|
||||
{ .name = "DC_CIPAPA", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 1,
|
||||
.access = PL3_W, .type = ARM_CP_NOP },
|
||||
};
|
||||
|
||||
static const ARMCPRegInfo rme_mte_reginfo[] = {
|
||||
{ .name = "DC_CIGDPAPA", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 5,
|
||||
.access = PL3_W, .type = ARM_CP_NOP },
|
||||
};
|
||||
#endif /* TARGET_AARCH64 */
|
||||
|
||||
static void define_pmu_regs(ARMCPU *cpu)
|
||||
@ -9121,6 +9207,13 @@ void register_cp_regs_for_features(ARMCPU *cpu)
|
||||
if (cpu_isar_feature(aa64_fgt, cpu)) {
|
||||
define_arm_cp_regs(cpu, fgt_reginfo);
|
||||
}
|
||||
|
||||
if (cpu_isar_feature(aa64_rme, cpu)) {
|
||||
define_arm_cp_regs(cpu, rme_reginfo);
|
||||
if (cpu_isar_feature(aa64_mte, cpu)) {
|
||||
define_arm_cp_regs(cpu, rme_mte_reginfo);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (cpu_isar_feature(any_predinv, cpu)) {
|
||||
@ -10091,6 +10184,7 @@ void arm_log_exception(CPUState *cs)
|
||||
[EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
|
||||
[EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
|
||||
[EXCP_VSERR] = "Virtual SERR",
|
||||
[EXCP_GPC] = "Granule Protection Check",
|
||||
};
|
||||
|
||||
if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
|
||||
@ -10822,6 +10916,10 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
|
||||
}
|
||||
|
||||
switch (cs->exception_index) {
|
||||
case EXCP_GPC:
|
||||
qemu_log_mask(CPU_LOG_INT, "...with MFAR 0x%" PRIx64 "\n",
|
||||
env->cp15.mfar_el3);
|
||||
/* fall through */
|
||||
case EXCP_PREFETCH_ABORT:
|
||||
case EXCP_DATA_ABORT:
|
||||
/*
|
||||
@ -12043,3 +12141,63 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
ARMSecuritySpace arm_security_space(CPUARMState *env)
|
||||
{
|
||||
if (arm_feature(env, ARM_FEATURE_M)) {
|
||||
return arm_secure_to_space(env->v7m.secure);
|
||||
}
|
||||
|
||||
/*
|
||||
* If EL3 is not supported then the secure state is implementation
|
||||
* defined, in which case QEMU defaults to non-secure.
|
||||
*/
|
||||
if (!arm_feature(env, ARM_FEATURE_EL3)) {
|
||||
return ARMSS_NonSecure;
|
||||
}
|
||||
|
||||
/* Check for AArch64 EL3 or AArch32 Mon. */
|
||||
if (is_a64(env)) {
|
||||
if (extract32(env->pstate, 2, 2) == 3) {
|
||||
if (cpu_isar_feature(aa64_rme, env_archcpu(env))) {
|
||||
return ARMSS_Root;
|
||||
} else {
|
||||
return ARMSS_Secure;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
|
||||
return ARMSS_Secure;
|
||||
}
|
||||
}
|
||||
|
||||
return arm_security_space_below_el3(env);
|
||||
}
|
||||
|
||||
ARMSecuritySpace arm_security_space_below_el3(CPUARMState *env)
|
||||
{
|
||||
assert(!arm_feature(env, ARM_FEATURE_M));
|
||||
|
||||
/*
|
||||
* If EL3 is not supported then the secure state is implementation
|
||||
* defined, in which case QEMU defaults to non-secure.
|
||||
*/
|
||||
if (!arm_feature(env, ARM_FEATURE_EL3)) {
|
||||
return ARMSS_NonSecure;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note NSE cannot be set without RME, and NSE & !NS is Reserved.
|
||||
* Ignoring NSE when !NS retains consistency without having to
|
||||
* modify other predicates.
|
||||
*/
|
||||
if (!(env->cp15.scr_el3 & SCR_NS)) {
|
||||
return ARMSS_Secure;
|
||||
} else if (env->cp15.scr_el3 & SCR_NSE) {
|
||||
return ARMSS_Realm;
|
||||
} else {
|
||||
return ARMSS_NonSecure;
|
||||
}
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
@ -358,14 +358,27 @@ typedef enum ARMFaultType {
|
||||
ARMFault_ICacheMaint,
|
||||
ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
|
||||
ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
|
||||
ARMFault_GPCFOnWalk,
|
||||
ARMFault_GPCFOnOutput,
|
||||
} ARMFaultType;
|
||||
|
||||
typedef enum ARMGPCF {
|
||||
GPCF_None,
|
||||
GPCF_AddressSize,
|
||||
GPCF_Walk,
|
||||
GPCF_EABT,
|
||||
GPCF_Fail,
|
||||
} ARMGPCF;
|
||||
|
||||
/**
|
||||
* ARMMMUFaultInfo: Information describing an ARM MMU Fault
|
||||
* @type: Type of fault
|
||||
* @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}.
|
||||
* @level: Table walk level (for translation, access flag and permission faults)
|
||||
* @domain: Domain of the fault address (for non-LPAE CPUs only)
|
||||
* @s2addr: Address that caused a fault at stage 2
|
||||
* @paddr: physical address that caused a fault for gpc
|
||||
* @paddr_space: physical address space that caused a fault for gpc
|
||||
* @stage2: True if we faulted at stage 2
|
||||
* @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
|
||||
* @s1ns: True if we faulted on a non-secure IPA while in secure state
|
||||
@ -374,7 +387,10 @@ typedef enum ARMFaultType {
|
||||
typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
|
||||
struct ARMMMUFaultInfo {
|
||||
ARMFaultType type;
|
||||
ARMGPCF gpcf;
|
||||
target_ulong s2addr;
|
||||
target_ulong paddr;
|
||||
ARMSecuritySpace paddr_space;
|
||||
int level;
|
||||
int domain;
|
||||
bool stage2;
|
||||
@ -548,6 +564,17 @@ static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
|
||||
case ARMFault_Exclusive:
|
||||
fsc = 0x35;
|
||||
break;
|
||||
case ARMFault_GPCFOnWalk:
|
||||
assert(fi->level >= -1 && fi->level <= 3);
|
||||
if (fi->level < 0) {
|
||||
fsc = 0b100011;
|
||||
} else {
|
||||
fsc = 0b100100 | fi->level;
|
||||
}
|
||||
break;
|
||||
case ARMFault_GPCFOnOutput:
|
||||
fsc = 0b101000;
|
||||
break;
|
||||
default:
|
||||
/* Other faults can't occur in a context that requires a
|
||||
* long-format status code.
|
||||
|
570
target/arm/ptw.c
570
target/arm/ptw.c
@ -21,28 +21,35 @@
|
||||
typedef struct S1Translate {
|
||||
ARMMMUIdx in_mmu_idx;
|
||||
ARMMMUIdx in_ptw_idx;
|
||||
ARMSecuritySpace in_space;
|
||||
bool in_secure;
|
||||
bool in_debug;
|
||||
/*
|
||||
* If this is stage 2 of a stage 1+2 page table walk, then this must
|
||||
* be true if stage 1 is an EL0 access; otherwise this is ignored.
|
||||
* Stage 2 is indicated by in_mmu_idx set to ARMMMUIdx_Stage2{,_S}.
|
||||
*/
|
||||
bool in_s1_is_el0;
|
||||
bool out_secure;
|
||||
bool out_rw;
|
||||
bool out_be;
|
||||
ARMSecuritySpace out_space;
|
||||
hwaddr out_virt;
|
||||
hwaddr out_phys;
|
||||
void *out_host;
|
||||
} S1Translate;
|
||||
|
||||
static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
||||
uint64_t address,
|
||||
MMUAccessType access_type, bool s1_is_el0,
|
||||
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
|
||||
__attribute__((nonnull));
|
||||
static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
|
||||
target_ulong address,
|
||||
MMUAccessType access_type,
|
||||
GetPhysAddrResult *result,
|
||||
ARMMMUFaultInfo *fi);
|
||||
|
||||
static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
|
||||
target_ulong address,
|
||||
MMUAccessType access_type,
|
||||
GetPhysAddrResult *result,
|
||||
ARMMMUFaultInfo *fi)
|
||||
__attribute__((nonnull));
|
||||
static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
|
||||
target_ulong address,
|
||||
MMUAccessType access_type,
|
||||
GetPhysAddrResult *result,
|
||||
ARMMMUFaultInfo *fi);
|
||||
|
||||
/* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
|
||||
static const uint8_t pamax_map[] = {
|
||||
@ -215,8 +222,10 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
|
||||
case ARMMMUIdx_E3:
|
||||
break;
|
||||
|
||||
case ARMMMUIdx_Phys_NS:
|
||||
case ARMMMUIdx_Phys_S:
|
||||
case ARMMMUIdx_Phys_NS:
|
||||
case ARMMMUIdx_Phys_Root:
|
||||
case ARMMMUIdx_Phys_Realm:
|
||||
/* No translation for physical address spaces. */
|
||||
return true;
|
||||
|
||||
@ -227,6 +236,197 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
|
||||
return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
|
||||
}
|
||||
|
||||
static bool granule_protection_check(CPUARMState *env, uint64_t paddress,
|
||||
ARMSecuritySpace pspace,
|
||||
ARMMMUFaultInfo *fi)
|
||||
{
|
||||
MemTxAttrs attrs = {
|
||||
.secure = true,
|
||||
.space = ARMSS_Root,
|
||||
};
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
uint64_t gpccr = env->cp15.gpccr_el3;
|
||||
unsigned pps, pgs, l0gptsz, level = 0;
|
||||
uint64_t tableaddr, pps_mask, align, entry, index;
|
||||
AddressSpace *as;
|
||||
MemTxResult result;
|
||||
int gpi;
|
||||
|
||||
if (!FIELD_EX64(gpccr, GPCCR, GPC)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* GPC Priority 1 (R_GMGRR):
|
||||
* R_JWCSM: If the configuration of GPCCR_EL3 is invalid,
|
||||
* the access fails as GPT walk fault at level 0.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Configuration of PPS to a value exceeding the implemented
|
||||
* physical address size is invalid.
|
||||
*/
|
||||
pps = FIELD_EX64(gpccr, GPCCR, PPS);
|
||||
if (pps > FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE)) {
|
||||
goto fault_walk;
|
||||
}
|
||||
pps = pamax_map[pps];
|
||||
pps_mask = MAKE_64BIT_MASK(0, pps);
|
||||
|
||||
switch (FIELD_EX64(gpccr, GPCCR, SH)) {
|
||||
case 0b10: /* outer shareable */
|
||||
break;
|
||||
case 0b00: /* non-shareable */
|
||||
case 0b11: /* inner shareable */
|
||||
/* Inner and Outer non-cacheable requires Outer shareable. */
|
||||
if (FIELD_EX64(gpccr, GPCCR, ORGN) == 0 &&
|
||||
FIELD_EX64(gpccr, GPCCR, IRGN) == 0) {
|
||||
goto fault_walk;
|
||||
}
|
||||
break;
|
||||
default: /* reserved */
|
||||
goto fault_walk;
|
||||
}
|
||||
|
||||
switch (FIELD_EX64(gpccr, GPCCR, PGS)) {
|
||||
case 0b00: /* 4KB */
|
||||
pgs = 12;
|
||||
break;
|
||||
case 0b01: /* 64KB */
|
||||
pgs = 16;
|
||||
break;
|
||||
case 0b10: /* 16KB */
|
||||
pgs = 14;
|
||||
break;
|
||||
default: /* reserved */
|
||||
goto fault_walk;
|
||||
}
|
||||
|
||||
/* Note this field is read-only and fixed at reset. */
|
||||
l0gptsz = 30 + FIELD_EX64(gpccr, GPCCR, L0GPTSZ);
|
||||
|
||||
/*
|
||||
* GPC Priority 2: Secure, Realm or Root address exceeds PPS.
|
||||
* R_CPDSB: A NonSecure physical address input exceeding PPS
|
||||
* does not experience any fault.
|
||||
*/
|
||||
if (paddress & ~pps_mask) {
|
||||
if (pspace == ARMSS_NonSecure) {
|
||||
return true;
|
||||
}
|
||||
goto fault_size;
|
||||
}
|
||||
|
||||
/* GPC Priority 3: the base address of GPTBR_EL3 exceeds PPS. */
|
||||
tableaddr = env->cp15.gptbr_el3 << 12;
|
||||
if (tableaddr & ~pps_mask) {
|
||||
goto fault_size;
|
||||
}
|
||||
|
||||
/*
|
||||
* BADDR is aligned per a function of PPS and L0GPTSZ.
|
||||
* These bits of GPTBR_EL3 are RES0, but are not a configuration error,
|
||||
* unlike the RES0 bits of the GPT entries (R_XNKFZ).
|
||||
*/
|
||||
align = MAX(pps - l0gptsz + 3, 12);
|
||||
align = MAKE_64BIT_MASK(0, align);
|
||||
tableaddr &= ~align;
|
||||
|
||||
as = arm_addressspace(env_cpu(env), attrs);
|
||||
|
||||
/* Level 0 lookup. */
|
||||
index = extract64(paddress, l0gptsz, pps - l0gptsz);
|
||||
tableaddr += index * 8;
|
||||
entry = address_space_ldq_le(as, tableaddr, attrs, &result);
|
||||
if (result != MEMTX_OK) {
|
||||
goto fault_eabt;
|
||||
}
|
||||
|
||||
switch (extract32(entry, 0, 4)) {
|
||||
case 1: /* block descriptor */
|
||||
if (entry >> 8) {
|
||||
goto fault_walk; /* RES0 bits not 0 */
|
||||
}
|
||||
gpi = extract32(entry, 4, 4);
|
||||
goto found;
|
||||
case 3: /* table descriptor */
|
||||
tableaddr = entry & ~0xf;
|
||||
align = MAX(l0gptsz - pgs - 1, 12);
|
||||
align = MAKE_64BIT_MASK(0, align);
|
||||
if (tableaddr & (~pps_mask | align)) {
|
||||
goto fault_walk; /* RES0 bits not 0 */
|
||||
}
|
||||
break;
|
||||
default: /* invalid */
|
||||
goto fault_walk;
|
||||
}
|
||||
|
||||
/* Level 1 lookup */
|
||||
level = 1;
|
||||
index = extract64(paddress, pgs + 4, l0gptsz - pgs - 4);
|
||||
tableaddr += index * 8;
|
||||
entry = address_space_ldq_le(as, tableaddr, attrs, &result);
|
||||
if (result != MEMTX_OK) {
|
||||
goto fault_eabt;
|
||||
}
|
||||
|
||||
switch (extract32(entry, 0, 4)) {
|
||||
case 1: /* contiguous descriptor */
|
||||
if (entry >> 10) {
|
||||
goto fault_walk; /* RES0 bits not 0 */
|
||||
}
|
||||
/*
|
||||
* Because the softmmu tlb only works on units of TARGET_PAGE_SIZE,
|
||||
* and because we cannot invalidate by pa, and thus will always
|
||||
* flush entire tlbs, we don't actually care about the range here
|
||||
* and can simply extract the GPI as the result.
|
||||
*/
|
||||
if (extract32(entry, 8, 2) == 0) {
|
||||
goto fault_walk; /* reserved contig */
|
||||
}
|
||||
gpi = extract32(entry, 4, 4);
|
||||
break;
|
||||
default:
|
||||
index = extract64(paddress, pgs, 4);
|
||||
gpi = extract64(entry, index * 4, 4);
|
||||
break;
|
||||
}
|
||||
|
||||
found:
|
||||
switch (gpi) {
|
||||
case 0b0000: /* no access */
|
||||
break;
|
||||
case 0b1111: /* all access */
|
||||
return true;
|
||||
case 0b1000:
|
||||
case 0b1001:
|
||||
case 0b1010:
|
||||
case 0b1011:
|
||||
if (pspace == (gpi & 3)) {
|
||||
return true;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
goto fault_walk; /* reserved */
|
||||
}
|
||||
|
||||
fi->gpcf = GPCF_Fail;
|
||||
goto fault_common;
|
||||
fault_eabt:
|
||||
fi->gpcf = GPCF_EABT;
|
||||
goto fault_common;
|
||||
fault_size:
|
||||
fi->gpcf = GPCF_AddressSize;
|
||||
goto fault_common;
|
||||
fault_walk:
|
||||
fi->gpcf = GPCF_Walk;
|
||||
fault_common:
|
||||
fi->level = level;
|
||||
fi->paddr = paddress;
|
||||
fi->paddr_space = pspace;
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs)
|
||||
{
|
||||
/*
|
||||
@ -249,6 +449,7 @@ static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs)
|
||||
static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
|
||||
hwaddr addr, ARMMMUFaultInfo *fi)
|
||||
{
|
||||
ARMSecuritySpace space = ptw->in_space;
|
||||
bool is_secure = ptw->in_secure;
|
||||
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
|
||||
ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx;
|
||||
@ -261,30 +462,27 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
|
||||
* From gdbstub, do not use softmmu so that we don't modify the
|
||||
* state of the cpu at all, including softmmu tlb contents.
|
||||
*/
|
||||
if (regime_is_stage2(s2_mmu_idx)) {
|
||||
S1Translate s2ptw = {
|
||||
.in_mmu_idx = s2_mmu_idx,
|
||||
.in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx),
|
||||
.in_secure = s2_mmu_idx == ARMMMUIdx_Stage2_S,
|
||||
.in_debug = true,
|
||||
};
|
||||
GetPhysAddrResult s2 = { };
|
||||
S1Translate s2ptw = {
|
||||
.in_mmu_idx = s2_mmu_idx,
|
||||
.in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx),
|
||||
.in_secure = s2_mmu_idx == ARMMMUIdx_Stage2_S,
|
||||
.in_space = (s2_mmu_idx == ARMMMUIdx_Stage2_S ? ARMSS_Secure
|
||||
: space == ARMSS_Realm ? ARMSS_Realm
|
||||
: ARMSS_NonSecure),
|
||||
.in_debug = true,
|
||||
};
|
||||
GetPhysAddrResult s2 = { };
|
||||
|
||||
if (get_phys_addr_lpae(env, &s2ptw, addr, MMU_DATA_LOAD,
|
||||
false, &s2, fi)) {
|
||||
goto fail;
|
||||
}
|
||||
ptw->out_phys = s2.f.phys_addr;
|
||||
pte_attrs = s2.cacheattrs.attrs;
|
||||
ptw->out_secure = s2.f.attrs.secure;
|
||||
} else {
|
||||
/* Regime is physical. */
|
||||
ptw->out_phys = addr;
|
||||
pte_attrs = 0;
|
||||
ptw->out_secure = s2_mmu_idx == ARMMMUIdx_Phys_S;
|
||||
if (get_phys_addr_gpc(env, &s2ptw, addr, MMU_DATA_LOAD, &s2, fi)) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ptw->out_phys = s2.f.phys_addr;
|
||||
pte_attrs = s2.cacheattrs.attrs;
|
||||
ptw->out_host = NULL;
|
||||
ptw->out_rw = false;
|
||||
ptw->out_secure = s2.f.attrs.secure;
|
||||
ptw->out_space = s2.f.attrs.space;
|
||||
} else {
|
||||
#ifdef CONFIG_TCG
|
||||
CPUTLBEntryFull *full;
|
||||
@ -303,6 +501,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
|
||||
ptw->out_rw = full->prot & PAGE_WRITE;
|
||||
pte_attrs = full->pte_attrs;
|
||||
ptw->out_secure = full->attrs.secure;
|
||||
ptw->out_space = full->attrs.space;
|
||||
#else
|
||||
g_assert_not_reached();
|
||||
#endif
|
||||
@ -330,6 +529,9 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
|
||||
|
||||
fail:
|
||||
assert(fi->type != ARMFault_None);
|
||||
if (fi->type == ARMFault_GPCFOnOutput) {
|
||||
fi->type = ARMFault_GPCFOnWalk;
|
||||
}
|
||||
fi->s2addr = addr;
|
||||
fi->stage2 = true;
|
||||
fi->s1ptw = true;
|
||||
@ -355,7 +557,10 @@ static uint32_t arm_ldl_ptw(CPUARMState *env, S1Translate *ptw,
|
||||
}
|
||||
} else {
|
||||
/* Page tables are in MMIO. */
|
||||
MemTxAttrs attrs = { .secure = ptw->out_secure };
|
||||
MemTxAttrs attrs = {
|
||||
.secure = ptw->out_secure,
|
||||
.space = ptw->out_space,
|
||||
};
|
||||
AddressSpace *as = arm_addressspace(cs, attrs);
|
||||
MemTxResult result = MEMTX_OK;
|
||||
|
||||
@ -398,7 +603,10 @@ static uint64_t arm_ldq_ptw(CPUARMState *env, S1Translate *ptw,
|
||||
#endif
|
||||
} else {
|
||||
/* Page tables are in MMIO. */
|
||||
MemTxAttrs attrs = { .secure = ptw->out_secure };
|
||||
MemTxAttrs attrs = {
|
||||
.secure = ptw->out_secure,
|
||||
.space = ptw->out_space,
|
||||
};
|
||||
AddressSpace *as = arm_addressspace(cs, attrs);
|
||||
MemTxResult result = MEMTX_OK;
|
||||
|
||||
@ -909,6 +1117,7 @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw,
|
||||
* regime, because the attribute will already be non-secure.
|
||||
*/
|
||||
result->f.attrs.secure = false;
|
||||
result->f.attrs.space = ARMSS_NonSecure;
|
||||
}
|
||||
result->f.phys_addr = phys_addr;
|
||||
return false;
|
||||
@ -925,7 +1134,7 @@ do_fault:
|
||||
* @xn: XN (execute-never) bits
|
||||
* @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
|
||||
*/
|
||||
static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
|
||||
static int get_S2prot_noexecute(int s2ap)
|
||||
{
|
||||
int prot = 0;
|
||||
|
||||
@ -935,6 +1144,12 @@ static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
|
||||
if (s2ap & 2) {
|
||||
prot |= PAGE_WRITE;
|
||||
}
|
||||
return prot;
|
||||
}
|
||||
|
||||
static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
|
||||
{
|
||||
int prot = get_S2prot_noexecute(s2ap);
|
||||
|
||||
if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
|
||||
switch (xn) {
|
||||
@ -972,12 +1187,14 @@ static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
|
||||
* @mmu_idx: MMU index indicating required translation regime
|
||||
* @is_aa64: TRUE if AArch64
|
||||
* @ap: The 2-bit simple AP (AP[2:1])
|
||||
* @ns: NS (non-secure) bit
|
||||
* @xn: XN (execute-never) bit
|
||||
* @pxn: PXN (privileged execute-never) bit
|
||||
* @in_pa: The original input pa space
|
||||
* @out_pa: The output pa space, modified by NSTable, NS, and NSE
|
||||
*/
|
||||
static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
|
||||
int ap, int ns, int xn, int pxn)
|
||||
int ap, int xn, int pxn,
|
||||
ARMSecuritySpace in_pa, ARMSecuritySpace out_pa)
|
||||
{
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
bool is_user = regime_is_user(env, mmu_idx);
|
||||
@ -1010,8 +1227,39 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
|
||||
}
|
||||
}
|
||||
|
||||
if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
|
||||
return prot_rw;
|
||||
if (in_pa != out_pa) {
|
||||
switch (in_pa) {
|
||||
case ARMSS_Root:
|
||||
/*
|
||||
* R_ZWRVD: permission fault for insn fetched from non-Root,
|
||||
* I_WWBFB: SIF has no effect in EL3.
|
||||
*/
|
||||
return prot_rw;
|
||||
case ARMSS_Realm:
|
||||
/*
|
||||
* R_PKTDS: permission fault for insn fetched from non-Realm,
|
||||
* for Realm EL2 or EL2&0. The corresponding fault for EL1&0
|
||||
* happens during any stage2 translation.
|
||||
*/
|
||||
switch (mmu_idx) {
|
||||
case ARMMMUIdx_E2:
|
||||
case ARMMMUIdx_E20_0:
|
||||
case ARMMMUIdx_E20_2:
|
||||
case ARMMMUIdx_E20_2_PAN:
|
||||
return prot_rw;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case ARMSS_Secure:
|
||||
if (env->cp15.scr_el3 & SCR_SIF) {
|
||||
return prot_rw;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
/* Input NonSecure must have output NonSecure. */
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO have_wxn should be replaced with
|
||||
@ -1242,22 +1490,16 @@ static int check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, uint64_t tcr,
|
||||
* @ptw: Current and next stage parameters for the walk.
|
||||
* @address: virtual address to get physical address for
|
||||
* @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
|
||||
* @s1_is_el0: if @ptw->in_mmu_idx is ARMMMUIdx_Stage2
|
||||
* (so this is a stage 2 page table walk),
|
||||
* must be true if this is stage 2 of a stage 1+2
|
||||
* walk for an EL0 access. If @mmu_idx is anything else,
|
||||
* @s1_is_el0 is ignored.
|
||||
* @result: set on translation success,
|
||||
* @fi: set to fault info if the translation fails
|
||||
*/
|
||||
static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
||||
uint64_t address,
|
||||
MMUAccessType access_type, bool s1_is_el0,
|
||||
MMUAccessType access_type,
|
||||
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
|
||||
{
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
|
||||
bool is_secure = ptw->in_secure;
|
||||
int32_t level;
|
||||
ARMVAParameters param;
|
||||
uint64_t ttbr;
|
||||
@ -1268,12 +1510,12 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
||||
int32_t stride;
|
||||
int addrsize, inputsize, outputsize;
|
||||
uint64_t tcr = regime_tcr(env, mmu_idx);
|
||||
int ap, ns, xn, pxn;
|
||||
int ap, xn, pxn;
|
||||
uint32_t el = regime_el(env, mmu_idx);
|
||||
uint64_t descaddrmask;
|
||||
bool aarch64 = arm_el_is_aa64(env, el);
|
||||
uint64_t descriptor, new_descriptor;
|
||||
bool nstable;
|
||||
ARMSecuritySpace out_space;
|
||||
|
||||
/* TODO: This code does not support shareability levels. */
|
||||
if (aarch64) {
|
||||
@ -1435,32 +1677,32 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
||||
descaddrmask = MAKE_64BIT_MASK(0, 40);
|
||||
}
|
||||
descaddrmask &= ~indexmask_grainsize;
|
||||
|
||||
/*
|
||||
* Secure stage 1 accesses start with the page table in secure memory and
|
||||
* can be downgraded to non-secure at any step. Non-secure accesses
|
||||
* remain non-secure. We implement this by just ORing in the NSTable/NS
|
||||
* bits at each step.
|
||||
* Stage 2 never gets this kind of downgrade.
|
||||
*/
|
||||
tableattrs = is_secure ? 0 : (1 << 4);
|
||||
tableattrs = 0;
|
||||
|
||||
next_level:
|
||||
descaddr |= (address >> (stride * (4 - level))) & indexmask;
|
||||
descaddr &= ~7ULL;
|
||||
nstable = !regime_is_stage2(mmu_idx) && extract32(tableattrs, 4, 1);
|
||||
if (nstable) {
|
||||
|
||||
/*
|
||||
* Process the NSTable bit from the previous level. This changes
|
||||
* the table address space and the output space from Secure to
|
||||
* NonSecure. With RME, the EL3 translation regime does not change
|
||||
* from Root to NonSecure.
|
||||
*/
|
||||
if (ptw->in_space == ARMSS_Secure
|
||||
&& !regime_is_stage2(mmu_idx)
|
||||
&& extract32(tableattrs, 4, 1)) {
|
||||
/*
|
||||
* Stage2_S -> Stage2 or Phys_S -> Phys_NS
|
||||
* Assert that the non-secure idx are even, and relative order.
|
||||
* Assert the relative order of the secure/non-secure indexes.
|
||||
*/
|
||||
QEMU_BUILD_BUG_ON((ARMMMUIdx_Phys_NS & 1) != 0);
|
||||
QEMU_BUILD_BUG_ON((ARMMMUIdx_Stage2 & 1) != 0);
|
||||
QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_NS + 1 != ARMMMUIdx_Phys_S);
|
||||
QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2 + 1 != ARMMMUIdx_Stage2_S);
|
||||
ptw->in_ptw_idx &= ~1;
|
||||
QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_S + 1 != ARMMMUIdx_Phys_NS);
|
||||
QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2_S + 1 != ARMMMUIdx_Stage2);
|
||||
ptw->in_ptw_idx += 1;
|
||||
ptw->in_secure = false;
|
||||
ptw->in_space = ARMSS_NonSecure;
|
||||
}
|
||||
|
||||
if (!S1_ptw_translate(env, ptw, descaddr, fi)) {
|
||||
goto do_fault;
|
||||
}
|
||||
@ -1563,7 +1805,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
||||
*/
|
||||
attrs = new_descriptor & (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14));
|
||||
if (!regime_is_stage2(mmu_idx)) {
|
||||
attrs |= nstable << 5; /* NS */
|
||||
attrs |= !ptw->in_secure << 5; /* NS */
|
||||
if (!param.hpd) {
|
||||
attrs |= extract64(tableattrs, 0, 2) << 53; /* XN, PXN */
|
||||
/*
|
||||
@ -1576,15 +1818,79 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
||||
}
|
||||
|
||||
ap = extract32(attrs, 6, 2);
|
||||
out_space = ptw->in_space;
|
||||
if (regime_is_stage2(mmu_idx)) {
|
||||
ns = mmu_idx == ARMMMUIdx_Stage2;
|
||||
xn = extract64(attrs, 53, 2);
|
||||
result->f.prot = get_S2prot(env, ap, xn, s1_is_el0);
|
||||
/*
|
||||
* R_GYNXY: For stage2 in Realm security state, bit 55 is NS.
|
||||
* The bit remains ignored for other security states.
|
||||
* R_YMCSL: Executing an insn fetched from non-Realm causes
|
||||
* a stage2 permission fault.
|
||||
*/
|
||||
if (out_space == ARMSS_Realm && extract64(attrs, 55, 1)) {
|
||||
out_space = ARMSS_NonSecure;
|
||||
result->f.prot = get_S2prot_noexecute(ap);
|
||||
} else {
|
||||
xn = extract64(attrs, 53, 2);
|
||||
result->f.prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0);
|
||||
}
|
||||
} else {
|
||||
ns = extract32(attrs, 5, 1);
|
||||
int nse, ns = extract32(attrs, 5, 1);
|
||||
switch (out_space) {
|
||||
case ARMSS_Root:
|
||||
/*
|
||||
* R_GVZML: Bit 11 becomes the NSE field in the EL3 regime.
|
||||
* R_XTYPW: NSE and NS together select the output pa space.
|
||||
*/
|
||||
nse = extract32(attrs, 11, 1);
|
||||
out_space = (nse << 1) | ns;
|
||||
if (out_space == ARMSS_Secure &&
|
||||
!cpu_isar_feature(aa64_sel2, cpu)) {
|
||||
out_space = ARMSS_NonSecure;
|
||||
}
|
||||
break;
|
||||
case ARMSS_Secure:
|
||||
if (ns) {
|
||||
out_space = ARMSS_NonSecure;
|
||||
}
|
||||
break;
|
||||
case ARMSS_Realm:
|
||||
switch (mmu_idx) {
|
||||
case ARMMMUIdx_Stage1_E0:
|
||||
case ARMMMUIdx_Stage1_E1:
|
||||
case ARMMMUIdx_Stage1_E1_PAN:
|
||||
/* I_CZPRF: For Realm EL1&0 stage1, NS bit is RES0. */
|
||||
break;
|
||||
case ARMMMUIdx_E2:
|
||||
case ARMMMUIdx_E20_0:
|
||||
case ARMMMUIdx_E20_2:
|
||||
case ARMMMUIdx_E20_2_PAN:
|
||||
/*
|
||||
* R_LYKFZ, R_WGRZN: For Realm EL2 and EL2&1,
|
||||
* NS changes the output to non-secure space.
|
||||
*/
|
||||
if (ns) {
|
||||
out_space = ARMSS_NonSecure;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
break;
|
||||
case ARMSS_NonSecure:
|
||||
/* R_QRMFF: For NonSecure state, the NS bit is RES0. */
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
xn = extract64(attrs, 54, 1);
|
||||
pxn = extract64(attrs, 53, 1);
|
||||
result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
|
||||
|
||||
/*
|
||||
* Note that we modified ptw->in_space earlier for NSTable, but
|
||||
* result->f.attrs retains a copy of the original security space.
|
||||
*/
|
||||
result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, xn, pxn,
|
||||
result->f.attrs.space, out_space);
|
||||
}
|
||||
|
||||
if (!(result->f.prot & (1 << access_type))) {
|
||||
@ -1611,14 +1917,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
||||
}
|
||||
}
|
||||
|
||||
if (ns) {
|
||||
/*
|
||||
* The NS bit will (as required by the architecture) have no effect if
|
||||
* the CPU doesn't support TZ or this is a non-secure translation
|
||||
* regime, because the attribute will already be non-secure.
|
||||
*/
|
||||
result->f.attrs.secure = false;
|
||||
}
|
||||
result->f.attrs.space = out_space;
|
||||
result->f.attrs.secure = arm_space_is_secure(out_space);
|
||||
|
||||
if (regime_is_stage2(mmu_idx)) {
|
||||
result->cacheattrs.is_s2_format = true;
|
||||
@ -2402,6 +2702,7 @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
|
||||
*/
|
||||
if (sattrs.ns) {
|
||||
result->f.attrs.secure = false;
|
||||
result->f.attrs.space = ARMSS_NonSecure;
|
||||
} else if (!secure) {
|
||||
/*
|
||||
* NS access to S memory must fault.
|
||||
@ -2668,14 +2969,16 @@ static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
|
||||
ARMMMUFaultInfo *fi)
|
||||
{
|
||||
uint8_t memattr = 0x00; /* Device nGnRnE */
|
||||
uint8_t shareability = 0; /* non-sharable */
|
||||
uint8_t shareability = 0; /* non-shareable */
|
||||
int r_el;
|
||||
|
||||
switch (mmu_idx) {
|
||||
case ARMMMUIdx_Stage2:
|
||||
case ARMMMUIdx_Stage2_S:
|
||||
case ARMMMUIdx_Phys_NS:
|
||||
case ARMMMUIdx_Phys_S:
|
||||
case ARMMMUIdx_Phys_NS:
|
||||
case ARMMMUIdx_Phys_Root:
|
||||
case ARMMMUIdx_Phys_Realm:
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -2725,7 +3028,7 @@ static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
|
||||
} else {
|
||||
memattr = 0x44; /* Normal, NC, No */
|
||||
}
|
||||
shareability = 2; /* outer sharable */
|
||||
shareability = 2; /* outer shareable */
|
||||
}
|
||||
result->cacheattrs.is_s2_format = false;
|
||||
break;
|
||||
@ -2750,10 +3053,10 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
|
||||
bool is_secure = ptw->in_secure;
|
||||
bool ret, ipa_secure;
|
||||
ARMCacheAttrs cacheattrs1;
|
||||
bool is_el0;
|
||||
ARMSecuritySpace ipa_space;
|
||||
uint64_t hcr;
|
||||
|
||||
ret = get_phys_addr_with_struct(env, ptw, address, access_type, result, fi);
|
||||
ret = get_phys_addr_nogpc(env, ptw, address, access_type, result, fi);
|
||||
|
||||
/* If S1 fails, return early. */
|
||||
if (ret) {
|
||||
@ -2762,10 +3065,12 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
|
||||
|
||||
ipa = result->f.phys_addr;
|
||||
ipa_secure = result->f.attrs.secure;
|
||||
ipa_space = result->f.attrs.space;
|
||||
|
||||
is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0;
|
||||
ptw->in_s1_is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0;
|
||||
ptw->in_mmu_idx = ipa_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
|
||||
ptw->in_secure = ipa_secure;
|
||||
ptw->in_space = ipa_space;
|
||||
ptw->in_ptw_idx = ptw_idx_for_stage_2(env, ptw->in_mmu_idx);
|
||||
|
||||
/*
|
||||
@ -2777,13 +3082,7 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
|
||||
cacheattrs1 = result->cacheattrs;
|
||||
memset(result, 0, sizeof(*result));
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_PMSA)) {
|
||||
ret = get_phys_addr_pmsav8(env, ipa, access_type,
|
||||
ptw->in_mmu_idx, is_secure, result, fi);
|
||||
} else {
|
||||
ret = get_phys_addr_lpae(env, ptw, ipa, access_type,
|
||||
is_el0, result, fi);
|
||||
}
|
||||
ret = get_phys_addr_nogpc(env, ptw, ipa, access_type, result, fi);
|
||||
fi->s2addr = ipa;
|
||||
|
||||
/* Combine the S1 and S2 perms. */
|
||||
@ -2843,7 +3142,7 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
|
||||
static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
|
||||
target_ulong address,
|
||||
MMUAccessType access_type,
|
||||
GetPhysAddrResult *result,
|
||||
@ -2854,15 +3153,18 @@ static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
|
||||
ARMMMUIdx s1_mmu_idx;
|
||||
|
||||
/*
|
||||
* The page table entries may downgrade secure to non-secure, but
|
||||
* cannot upgrade an non-secure translation regime's attributes
|
||||
* to secure.
|
||||
* The page table entries may downgrade Secure to NonSecure, but
|
||||
* cannot upgrade a NonSecure translation regime's attributes
|
||||
* to Secure or Realm.
|
||||
*/
|
||||
result->f.attrs.secure = is_secure;
|
||||
result->f.attrs.space = ptw->in_space;
|
||||
|
||||
switch (mmu_idx) {
|
||||
case ARMMMUIdx_Phys_S:
|
||||
case ARMMMUIdx_Phys_NS:
|
||||
case ARMMMUIdx_Phys_Root:
|
||||
case ARMMMUIdx_Phys_Realm:
|
||||
/* Checking Phys early avoids special casing later vs regime_el. */
|
||||
return get_phys_addr_disabled(env, address, access_type, mmu_idx,
|
||||
is_secure, result, fi);
|
||||
@ -2908,7 +3210,7 @@ static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
|
||||
|
||||
default:
|
||||
/* Single stage uses physical for ptw. */
|
||||
ptw->in_ptw_idx = is_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
|
||||
ptw->in_ptw_idx = arm_space_to_phys(ptw->in_space);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -2965,8 +3267,7 @@ static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
|
||||
}
|
||||
|
||||
if (regime_using_lpae_format(env, mmu_idx)) {
|
||||
return get_phys_addr_lpae(env, ptw, address, access_type, false,
|
||||
result, fi);
|
||||
return get_phys_addr_lpae(env, ptw, address, access_type, result, fi);
|
||||
} else if (arm_feature(env, ARM_FEATURE_V7) ||
|
||||
regime_sctlr(env, mmu_idx) & SCTLR_XP) {
|
||||
return get_phys_addr_v6(env, ptw, address, access_type, result, fi);
|
||||
@ -2975,6 +3276,23 @@ static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
|
||||
}
|
||||
}
|
||||
|
||||
static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
|
||||
target_ulong address,
|
||||
MMUAccessType access_type,
|
||||
GetPhysAddrResult *result,
|
||||
ARMMMUFaultInfo *fi)
|
||||
{
|
||||
if (get_phys_addr_nogpc(env, ptw, address, access_type, result, fi)) {
|
||||
return true;
|
||||
}
|
||||
if (!granule_protection_check(env, result->f.phys_addr,
|
||||
result->f.attrs.space, fi)) {
|
||||
fi->type = ARMFault_GPCFOnOutput;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
bool is_secure, GetPhysAddrResult *result,
|
||||
@ -2983,16 +3301,19 @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
|
||||
S1Translate ptw = {
|
||||
.in_mmu_idx = mmu_idx,
|
||||
.in_secure = is_secure,
|
||||
.in_space = arm_secure_to_space(is_secure),
|
||||
};
|
||||
return get_phys_addr_with_struct(env, &ptw, address, access_type,
|
||||
result, fi);
|
||||
return get_phys_addr_gpc(env, &ptw, address, access_type, result, fi);
|
||||
}
|
||||
|
||||
bool get_phys_addr(CPUARMState *env, target_ulong address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
|
||||
{
|
||||
bool is_secure;
|
||||
S1Translate ptw = {
|
||||
.in_mmu_idx = mmu_idx,
|
||||
};
|
||||
ARMSecuritySpace ss;
|
||||
|
||||
switch (mmu_idx) {
|
||||
case ARMMMUIdx_E10_0:
|
||||
@ -3005,30 +3326,54 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
|
||||
case ARMMMUIdx_Stage1_E1:
|
||||
case ARMMMUIdx_Stage1_E1_PAN:
|
||||
case ARMMMUIdx_E2:
|
||||
is_secure = arm_is_secure_below_el3(env);
|
||||
ss = arm_security_space_below_el3(env);
|
||||
break;
|
||||
case ARMMMUIdx_Stage2:
|
||||
/*
|
||||
* For Secure EL2, we need this index to be NonSecure;
|
||||
* otherwise this will already be NonSecure or Realm.
|
||||
*/
|
||||
ss = arm_security_space_below_el3(env);
|
||||
if (ss == ARMSS_Secure) {
|
||||
ss = ARMSS_NonSecure;
|
||||
}
|
||||
break;
|
||||
case ARMMMUIdx_Phys_NS:
|
||||
case ARMMMUIdx_MPrivNegPri:
|
||||
case ARMMMUIdx_MUserNegPri:
|
||||
case ARMMMUIdx_MPriv:
|
||||
case ARMMMUIdx_MUser:
|
||||
is_secure = false;
|
||||
ss = ARMSS_NonSecure;
|
||||
break;
|
||||
case ARMMMUIdx_E3:
|
||||
case ARMMMUIdx_Stage2_S:
|
||||
case ARMMMUIdx_Phys_S:
|
||||
case ARMMMUIdx_MSPrivNegPri:
|
||||
case ARMMMUIdx_MSUserNegPri:
|
||||
case ARMMMUIdx_MSPriv:
|
||||
case ARMMMUIdx_MSUser:
|
||||
is_secure = true;
|
||||
ss = ARMSS_Secure;
|
||||
break;
|
||||
case ARMMMUIdx_E3:
|
||||
if (arm_feature(env, ARM_FEATURE_AARCH64) &&
|
||||
cpu_isar_feature(aa64_rme, env_archcpu(env))) {
|
||||
ss = ARMSS_Root;
|
||||
} else {
|
||||
ss = ARMSS_Secure;
|
||||
}
|
||||
break;
|
||||
case ARMMMUIdx_Phys_Root:
|
||||
ss = ARMSS_Root;
|
||||
break;
|
||||
case ARMMMUIdx_Phys_Realm:
|
||||
ss = ARMSS_Realm;
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
return get_phys_addr_with_secure(env, address, access_type, mmu_idx,
|
||||
is_secure, result, fi);
|
||||
|
||||
ptw.in_space = ss;
|
||||
ptw.in_secure = arm_space_is_secure(ss);
|
||||
return get_phys_addr_gpc(env, &ptw, address, access_type, result, fi);
|
||||
}
|
||||
|
||||
hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
|
||||
@ -3036,16 +3381,19 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
CPUARMState *env = &cpu->env;
|
||||
ARMMMUIdx mmu_idx = arm_mmu_idx(env);
|
||||
ARMSecuritySpace ss = arm_security_space(env);
|
||||
S1Translate ptw = {
|
||||
.in_mmu_idx = arm_mmu_idx(env),
|
||||
.in_secure = arm_is_secure(env),
|
||||
.in_mmu_idx = mmu_idx,
|
||||
.in_space = ss,
|
||||
.in_secure = arm_space_is_secure(ss),
|
||||
.in_debug = true,
|
||||
};
|
||||
GetPhysAddrResult res = {};
|
||||
ARMMMUFaultInfo fi = {};
|
||||
bool ret;
|
||||
|
||||
ret = get_phys_addr_with_struct(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi);
|
||||
ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi);
|
||||
*attrs = res.f.attrs;
|
||||
|
||||
if (ret) {
|
||||
|
@ -50,6 +50,7 @@ enum arm_exception_class {
|
||||
EC_SVEACCESSTRAP = 0x19,
|
||||
EC_ERETTRAP = 0x1a,
|
||||
EC_SMETRAP = 0x1d,
|
||||
EC_GPC = 0x1e,
|
||||
EC_INSNABORT = 0x20,
|
||||
EC_INSNABORT_SAME_EL = 0x21,
|
||||
EC_PCALIGNMENT = 0x22,
|
||||
@ -247,6 +248,15 @@ static inline uint32_t syn_bxjtrap(int cv, int cond, int rm)
|
||||
(cv << 24) | (cond << 20) | rm;
|
||||
}
|
||||
|
||||
static inline uint32_t syn_gpc(int s2ptw, int ind, int gpcsc,
|
||||
int cm, int s1ptw, int wnr, int fsc)
|
||||
{
|
||||
/* TODO: FEAT_NV2 adds VNCR */
|
||||
return (EC_GPC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (s2ptw << 21)
|
||||
| (ind << 20) | (gpcsc << 14) | (cm << 8) | (s1ptw << 7)
|
||||
| (wnr << 6) | fsc;
|
||||
}
|
||||
|
||||
static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc)
|
||||
{
|
||||
return (EC_INSNABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
|
||||
|
@ -142,6 +142,56 @@ static void cpu_max_set_sve_max_vq(Object *obj, Visitor *v, const char *name,
|
||||
cpu->sve_max_vq = max_vq;
|
||||
}
|
||||
|
||||
static bool cpu_arm_get_rme(Object *obj, Error **errp)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(obj);
|
||||
return cpu_isar_feature(aa64_rme, cpu);
|
||||
}
|
||||
|
||||
static void cpu_arm_set_rme(Object *obj, bool value, Error **errp)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(obj);
|
||||
uint64_t t;
|
||||
|
||||
t = cpu->isar.id_aa64pfr0;
|
||||
t = FIELD_DP64(t, ID_AA64PFR0, RME, value);
|
||||
cpu->isar.id_aa64pfr0 = t;
|
||||
}
|
||||
|
||||
static void cpu_max_set_l0gptsz(Object *obj, Visitor *v, const char *name,
|
||||
void *opaque, Error **errp)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(obj);
|
||||
uint32_t value;
|
||||
|
||||
if (!visit_type_uint32(v, name, &value, errp)) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Encode the value for the GPCCR_EL3 field. */
|
||||
switch (value) {
|
||||
case 30:
|
||||
case 34:
|
||||
case 36:
|
||||
case 39:
|
||||
cpu->reset_l0gptsz = value - 30;
|
||||
break;
|
||||
default:
|
||||
error_setg(errp, "invalid value for l0gptsz");
|
||||
error_append_hint(errp, "valid values are 30, 34, 36, 39\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void cpu_max_get_l0gptsz(Object *obj, Visitor *v, const char *name,
|
||||
void *opaque, Error **errp)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(obj);
|
||||
uint32_t value = cpu->reset_l0gptsz + 30;
|
||||
|
||||
visit_type_uint32(v, name, &value, errp);
|
||||
}
|
||||
|
||||
static Property arm_cpu_lpa2_property =
|
||||
DEFINE_PROP_BOOL("lpa2", ARMCPU, prop_lpa2, true);
|
||||
|
||||
@ -700,6 +750,9 @@ void aarch64_max_tcg_initfn(Object *obj)
|
||||
aarch64_add_sme_properties(obj);
|
||||
object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
|
||||
cpu_max_set_sve_max_vq, NULL, NULL);
|
||||
object_property_add_bool(obj, "x-rme", cpu_arm_get_rme, cpu_arm_set_rme);
|
||||
object_property_add(obj, "x-l0gptsz", "uint32", cpu_max_get_l0gptsz,
|
||||
cpu_max_set_l0gptsz, NULL, NULL);
|
||||
qdev_property_add_static(DEVICE(obj), &arm_cpu_lpa2_property);
|
||||
}
|
||||
|
||||
|
@ -107,17 +107,106 @@ static uint32_t compute_fsr_fsc(CPUARMState *env, ARMMMUFaultInfo *fi,
|
||||
return fsr;
|
||||
}
|
||||
|
||||
static bool report_as_gpc_exception(ARMCPU *cpu, int current_el,
|
||||
ARMMMUFaultInfo *fi)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
switch (fi->gpcf) {
|
||||
case GPCF_None:
|
||||
return false;
|
||||
case GPCF_AddressSize:
|
||||
case GPCF_Walk:
|
||||
case GPCF_EABT:
|
||||
/* R_PYTGX: GPT faults are reported as GPC. */
|
||||
ret = true;
|
||||
break;
|
||||
case GPCF_Fail:
|
||||
/*
|
||||
* R_BLYPM: A GPF at EL3 is reported as insn or data abort.
|
||||
* R_VBZMW, R_LXHQR: A GPF at EL[0-2] is reported as a GPC
|
||||
* if SCR_EL3.GPF is set, otherwise an insn or data abort.
|
||||
*/
|
||||
ret = (cpu->env.cp15.scr_el3 & SCR_GPF) && current_el != 3;
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
assert(cpu_isar_feature(aa64_rme, cpu));
|
||||
assert(fi->type == ARMFault_GPCFOnWalk ||
|
||||
fi->type == ARMFault_GPCFOnOutput);
|
||||
if (fi->gpcf == GPCF_AddressSize) {
|
||||
assert(fi->level == 0);
|
||||
} else {
|
||||
assert(fi->level >= 0 && fi->level <= 1);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static unsigned encode_gpcsc(ARMMMUFaultInfo *fi)
|
||||
{
|
||||
static uint8_t const gpcsc[] = {
|
||||
[GPCF_AddressSize] = 0b000000,
|
||||
[GPCF_Walk] = 0b000100,
|
||||
[GPCF_Fail] = 0b001100,
|
||||
[GPCF_EABT] = 0b010100,
|
||||
};
|
||||
|
||||
/* Note that we've validated fi->gpcf and fi->level above. */
|
||||
return gpcsc[fi->gpcf] | fi->level;
|
||||
}
|
||||
|
||||
static G_NORETURN
|
||||
void arm_deliver_fault(ARMCPU *cpu, vaddr addr,
|
||||
MMUAccessType access_type,
|
||||
int mmu_idx, ARMMMUFaultInfo *fi)
|
||||
{
|
||||
CPUARMState *env = &cpu->env;
|
||||
int target_el;
|
||||
int target_el = exception_target_el(env);
|
||||
int current_el = arm_current_el(env);
|
||||
bool same_el;
|
||||
uint32_t syn, exc, fsr, fsc;
|
||||
|
||||
target_el = exception_target_el(env);
|
||||
if (report_as_gpc_exception(cpu, current_el, fi)) {
|
||||
target_el = 3;
|
||||
|
||||
fsr = compute_fsr_fsc(env, fi, target_el, mmu_idx, &fsc);
|
||||
|
||||
syn = syn_gpc(fi->stage2 && fi->type == ARMFault_GPCFOnWalk,
|
||||
access_type == MMU_INST_FETCH,
|
||||
encode_gpcsc(fi), 0, fi->s1ptw,
|
||||
access_type == MMU_DATA_STORE, fsc);
|
||||
|
||||
env->cp15.mfar_el3 = fi->paddr;
|
||||
switch (fi->paddr_space) {
|
||||
case ARMSS_Secure:
|
||||
break;
|
||||
case ARMSS_NonSecure:
|
||||
env->cp15.mfar_el3 |= R_MFAR_NS_MASK;
|
||||
break;
|
||||
case ARMSS_Root:
|
||||
env->cp15.mfar_el3 |= R_MFAR_NSE_MASK;
|
||||
break;
|
||||
case ARMSS_Realm:
|
||||
env->cp15.mfar_el3 |= R_MFAR_NSE_MASK | R_MFAR_NS_MASK;
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
exc = EXCP_GPC;
|
||||
goto do_raise;
|
||||
}
|
||||
|
||||
/* If SCR_EL3.GPF is unset, GPF may still be routed to EL2. */
|
||||
if (fi->gpcf == GPCF_Fail && target_el < 2) {
|
||||
if (arm_hcr_el2_eff(env) & HCR_GPF) {
|
||||
target_el = 2;
|
||||
}
|
||||
}
|
||||
|
||||
if (fi->stage2) {
|
||||
target_el = 2;
|
||||
env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
|
||||
@ -125,8 +214,8 @@ void arm_deliver_fault(ARMCPU *cpu, vaddr addr,
|
||||
env->cp15.hpfar_el2 |= HPFAR_NS;
|
||||
}
|
||||
}
|
||||
same_el = (arm_current_el(env) == target_el);
|
||||
|
||||
same_el = current_el == target_el;
|
||||
fsr = compute_fsr_fsc(env, fi, target_el, mmu_idx, &fsc);
|
||||
|
||||
if (access_type == MMU_INST_FETCH) {
|
||||
@ -143,6 +232,7 @@ void arm_deliver_fault(ARMCPU *cpu, vaddr addr,
|
||||
exc = EXCP_DATA_ABORT;
|
||||
}
|
||||
|
||||
do_raise:
|
||||
env->exception.vaddress = addr;
|
||||
env->exception.fsr = fsr;
|
||||
raise_exception(env, exc, syn, target_el);
|
||||
|
@ -4329,7 +4329,7 @@ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs,
|
||||
/* Predicate register stores can be any multiple of 2. */
|
||||
if (len_remain >= 8) {
|
||||
t0 = tcg_temp_new_i64();
|
||||
tcg_gen_st_i64(t0, base, vofs + len_align);
|
||||
tcg_gen_ld_i64(t0, base, vofs + len_align);
|
||||
tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ | MO_ATOM_NONE);
|
||||
len_remain -= 8;
|
||||
len_align += 8;
|
||||
|
Loading…
Reference in New Issue
Block a user