From 3b16766b5ae7f61079e3ae674e140219c4cdbe85 Mon Sep 17 00:00:00 2001 From: Hao Wu Date: Thu, 18 Aug 2022 17:20:15 -0700 Subject: [PATCH 01/20] target/arm: Add cortex-a35 Add cortex A35 core and enable it for virt board. Signed-off-by: Hao Wu Reviewed-by: Joe Komlodi Reviewed-by: Peter Maydell Message-Id: <20220819002015.1663247-1-wuhaotsh@google.com> Signed-off-by: Richard Henderson --- docs/system/arm/virt.rst | 1 + hw/arm/virt.c | 1 + target/arm/cpu64.c | 80 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 82 insertions(+) diff --git a/docs/system/arm/virt.rst b/docs/system/arm/virt.rst index 3b6ba69a9a..20442ea2c1 100644 --- a/docs/system/arm/virt.rst +++ b/docs/system/arm/virt.rst @@ -52,6 +52,7 @@ Supported guest CPU types: - ``cortex-a7`` (32-bit) - ``cortex-a15`` (32-bit; the default) +- ``cortex-a35`` (64-bit) - ``cortex-a53`` (64-bit) - ``cortex-a57`` (64-bit) - ``cortex-a72`` (64-bit) diff --git a/hw/arm/virt.c b/hw/arm/virt.c index 1a6480fd2a..0961e053e5 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -199,6 +199,7 @@ static const int a15irqmap[] = { static const char *valid_cpus[] = { ARM_CPU_TYPE_NAME("cortex-a7"), ARM_CPU_TYPE_NAME("cortex-a15"), + ARM_CPU_TYPE_NAME("cortex-a35"), ARM_CPU_TYPE_NAME("cortex-a53"), ARM_CPU_TYPE_NAME("cortex-a57"), ARM_CPU_TYPE_NAME("cortex-a72"), diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c index 78e27f778a..9d1ea32057 100644 --- a/target/arm/cpu64.c +++ b/target/arm/cpu64.c @@ -36,6 +36,85 @@ #include "hw/qdev-properties.h" #include "internals.h" +static void aarch64_a35_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + cpu->dtb_compatible = "arm,cortex-a35"; + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_AARCH64); + set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); + set_feature(&cpu->env, ARM_FEATURE_EL2); + set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); + + /* From B2.2 AArch64 identification registers. */ + cpu->midr = 0x411fd040; + cpu->revidr = 0; + cpu->ctr = 0x84448004; + cpu->isar.id_pfr0 = 0x00000131; + cpu->isar.id_pfr1 = 0x00011011; + cpu->isar.id_dfr0 = 0x03010066; + cpu->id_afr0 = 0; + cpu->isar.id_mmfr0 = 0x10201105; + cpu->isar.id_mmfr1 = 0x40000000; + cpu->isar.id_mmfr2 = 0x01260000; + cpu->isar.id_mmfr3 = 0x02102211; + cpu->isar.id_isar0 = 0x02101110; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232042; + cpu->isar.id_isar3 = 0x01112131; + cpu->isar.id_isar4 = 0x00011142; + cpu->isar.id_isar5 = 0x00011121; + cpu->isar.id_aa64pfr0 = 0x00002222; + cpu->isar.id_aa64pfr1 = 0; + cpu->isar.id_aa64dfr0 = 0x10305106; + cpu->isar.id_aa64dfr1 = 0; + cpu->isar.id_aa64isar0 = 0x00011120; + cpu->isar.id_aa64isar1 = 0; + cpu->isar.id_aa64mmfr0 = 0x00101122; + cpu->isar.id_aa64mmfr1 = 0; + cpu->clidr = 0x0a200023; + cpu->dcz_blocksize = 4; + + /* From B2.4 AArch64 Virtual Memory control registers */ + cpu->reset_sctlr = 0x00c50838; + + /* From B2.10 AArch64 performance monitor registers */ + cpu->isar.reset_pmcr_el0 = 0x410a3000; + + /* From B2.29 Cache ID registers */ + cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */ + cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */ + cpu->ccsidr[2] = 0x703fe03a; /* 512KB L2 cache */ + + /* From B3.5 VGIC Type register */ + cpu->gic_num_lrs = 4; + cpu->gic_vpribits = 5; + cpu->gic_vprebits = 5; + cpu->gic_pribits = 5; + + /* From C6.4 Debug ID Register */ + cpu->isar.dbgdidr = 0x3516d000; + /* From C6.5 Debug Device ID Register */ + cpu->isar.dbgdevid = 0x00110f13; + /* From C6.6 Debug Device ID Register 1 */ + cpu->isar.dbgdevid1 = 0x2; + + /* From Cortex-A35 SIMD and Floating-point Support r1p0 */ + /* From 3.2 AArch32 register summary */ + cpu->reset_fpsid = 0x41034043; + + /* From 2.2 AArch64 register summary */ + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x12111111; + cpu->isar.mvfr2 = 0x00000043; + + /* These values are the same with A53/A57/A72. */ + define_cortex_a72_a57_a53_cp_reginfo(cpu); +} static void aarch64_a57_initfn(Object *obj) { @@ -1158,6 +1237,7 @@ static void aarch64_a64fx_initfn(Object *obj) } static const ARMCPUInfo aarch64_cpus[] = { + { .name = "cortex-a35", .initfn = aarch64_a35_initfn }, { .name = "cortex-a57", .initfn = aarch64_a57_initfn }, { .name = "cortex-a53", .initfn = aarch64_a53_initfn }, { .name = "cortex-a72", .initfn = aarch64_a72_initfn }, From 3b9a030e846dd42a53f9f5bdcdd0228bb898e01b Mon Sep 17 00:00:00 2001 From: Enrik Berkhan Date: Fri, 12 Aug 2022 16:35:19 +0200 Subject: [PATCH 02/20] hw/arm/bcm2835_property: Add support for RPI_FIRMWARE_FRAMEBUFFER_GET_NUM_DISPLAYS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In more recent Raspbian OS Linux kernels, the fb driver gives up immediately if RPI_FIRMWARE_FRAMEBUFFER_GET_NUM_DISPLAYS fails or no displays are reported. This change simply always reports one display. It makes bcm2835_fb work again with these more recent kernels. Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Enrik Berkhan Message-Id: <20220812143519.59134-1-Enrik.Berkhan@inka.de> Signed-off-by: Richard Henderson --- hw/misc/bcm2835_property.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hw/misc/bcm2835_property.c b/hw/misc/bcm2835_property.c index e94e951057..890ae7bae5 100644 --- a/hw/misc/bcm2835_property.c +++ b/hw/misc/bcm2835_property.c @@ -270,6 +270,10 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value) stl_le_phys(&s->dma_as, value + 12, 0); resplen = 4; break; + case 0x00040013: /* Get number of displays */ + stl_le_phys(&s->dma_as, value + 12, 1); + resplen = 4; + break; case 0x00060001: /* Get DMA channels */ /* channels 2-5 */ From dde4d028dc829cb70d285bb943ebdc049692fa27 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Fri, 19 Aug 2022 12:00:47 +0100 Subject: [PATCH 03/20] target/arm: Make cpregs 0, c0, c{3-15}, {0-7} correctly RAZ in v8 In the AArch32 ID register scheme, coprocessor registers with encoding cp15, 0, c0, c{0-7}, {0-7} are all in the space covered by what in v6 and v7 was called the "CPUID scheme", and are supposed to RAZ if they're not allocated to a specific ID register. For our pre-v8 CPUs we get this right, because the regdefs in id_pre_v8_midr_cp_reginfo[] cover these RAZ requirements. However for v8 we failed to put in the necessary patterns to cover this, so we end up UNDEFing on everything we didn't have an ID register for. This is a problem because in Armv8 some encodings in 0, c0, c3, {0-7} are now being used for new ID registers, and guests might thus start trying to read them. (We already have one of these: ID_PFR2.) For v8 CPUs, we already have regdefs for 0, c0, c{0-2}, {0-7} (that is, the space is completely allocated with no reserved spaces). Add entries to v8_idregs[] covering 0, c0, c3, {0-7}: * c3, {0-2} is the reserved AArch32 space corresponding to the AArch64 MVFR[012]_EL1 * c3, {3,5,6,7} are reserved RAZ for both AArch32 and AArch64 (in fact some of these are given defined meanings in Armv8.6, but we don't implement them yet) * c3, 4 is ID_PFR2 (already defined) We then programmatically add RAZ patterns for AArch32 for 0, c0, c{4..15}, {0-7}: * c4-c7 are unused, and not shared with AArch64 (these are the encodings corresponding to where the AArch64 specific ID registers live in the system register space) * c8-c15 weren't required to RAZ in v6/v7, but v8 extends the AArch32 reserved-should-RAZ space to cover these; the equivalent area of the AArch64 sysreg space is not defined as must-RAZ Note that the architecture allows some registers in this space to return an UNKNOWN value; we always return 0. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-Id: <20220819110052.2942289-2-peter.maydell@linaro.org> Signed-off-by: Richard Henderson --- target/arm/helper.c | 65 +++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 60 insertions(+), 5 deletions(-) diff --git a/target/arm/helper.c b/target/arm/helper.c index d7bc467a2a..c171770b03 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -7345,11 +7345,16 @@ void register_cp_regs_for_features(ARMCPU *cpu) define_arm_cp_regs(cpu, not_v7_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_V8)) { - /* AArch64 ID registers, which all have impdef reset values. + /* + * v8 ID registers, which all have impdef reset values. * Note that within the ID register ranges the unused slots * must all RAZ, not UNDEF; future architecture versions may * define new registers here. + * ID registers which are AArch64 views of the AArch32 ID registers + * which already existed in v6 and v7 are handled elsewhere, + * in v6_idregs[]. */ + int i; ARMCPRegInfo v8_idregs[] = { /* * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system @@ -7539,7 +7544,34 @@ void register_cp_regs_for_features(ARMCPU *cpu) .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = cpu->isar.mvfr2 }, - { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + /* + * "0, c0, c3, {0,1,2}" are the encodings corresponding to + * AArch64 MVFR[012]_EL1. Define the STATE_AA32 encoding + * as RAZ, since it is in the "reserved for future ID + * registers, RAZ" part of the AArch32 encoding space. + */ + { .name = "RES_0_C0_C3_0", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "RES_0_C0_C3_1", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "RES_0_C0_C3_2", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + /* + * Other encodings in "0, c0, c3, ..." are STATE_BOTH because + * they're also RAZ for AArch64, and in v8 are gradually + * being filled with AArch64-view-of-AArch32-ID-register + * for new ID registers. + */ + { .name = "RES_0_C0_C3_3", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, @@ -7549,17 +7581,17 @@ void register_cp_regs_for_features(ARMCPU *cpu) .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = cpu->isar.id_pfr2 }, - { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + { .name = "RES_0_C0_C3_5", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, - { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + { .name = "RES_0_C0_C3_6", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, - { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + { .name = "RES_0_C0_C3_7", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, @@ -7625,6 +7657,29 @@ void register_cp_regs_for_features(ARMCPU *cpu) } define_arm_cp_regs(cpu, v8_idregs); define_arm_cp_regs(cpu, v8_cp_reginfo); + + for (i = 4; i < 16; i++) { + /* + * Encodings in "0, c0, {c4-c7}, {0-7}" are RAZ for AArch32. + * For pre-v8 cores there are RAZ patterns for these in + * id_pre_v8_midr_cp_reginfo[]; for v8 we do that here. + * v8 extends the "must RAZ" part of the ID register space + * to also cover c0, 0, c{8-15}, {0-7}. + * These are STATE_AA32 because in the AArch64 sysreg space + * c4-c7 is where the AArch64 ID registers live (and we've + * already defined those in v8_idregs[]), and c8-c15 are not + * "must RAZ" for AArch64. + */ + g_autofree char *name = g_strdup_printf("RES_0_C0_C%d_X", i); + ARMCPRegInfo v8_aa32_raz_idregs = { + .name = name, + .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 0, .crm = i, .opc2 = CP_ANY, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }; + define_one_arm_cp_reg(cpu, &v8_aa32_raz_idregs); + } } /* From 62b6f5e2f0a029810791dde6e02f86ad8575a02c Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Fri, 19 Aug 2022 12:00:48 +0100 Subject: [PATCH 04/20] target/arm: Sort KVM reads of AArch32 ID registers into encoding order The code that reads the AArch32 ID registers from KVM in kvm_arm_get_host_cpu_features() does so almost but not quite in encoding order. Move the read of ID_PFR2 down so it's really in encoding order. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-Id: <20220819110052.2942289-3-peter.maydell@linaro.org> Signed-off-by: Richard Henderson --- target/arm/kvm64.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c index 9b9dd46d78..84c4c85f40 100644 --- a/target/arm/kvm64.c +++ b/target/arm/kvm64.c @@ -608,8 +608,6 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) ARM64_SYS_REG(3, 0, 0, 1, 0)); err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1, ARM64_SYS_REG(3, 0, 0, 1, 1)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2, - ARM64_SYS_REG(3, 0, 0, 3, 4)); err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0, ARM64_SYS_REG(3, 0, 0, 1, 2)); err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0, @@ -643,6 +641,8 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) ARM64_SYS_REG(3, 0, 0, 3, 1)); err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2, ARM64_SYS_REG(3, 0, 0, 3, 2)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2, + ARM64_SYS_REG(3, 0, 0, 3, 4)); /* * DBGDIDR is a bit complicated because the kernel doesn't From 32957aad8ce3cd4f923b464efb80645963479ff8 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Fri, 19 Aug 2022 12:00:49 +0100 Subject: [PATCH 05/20] target/arm: Implement ID_MMFR5 In Armv8.6 a new AArch32 ID register ID_MMFR5 is defined. Implement this; we want to be able to use it to report to the guest that we implement FEAT_ETS. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-Id: <20220819110052.2942289-4-peter.maydell@linaro.org> Signed-off-by: Richard Henderson --- target/arm/cpu.h | 1 + target/arm/helper.c | 4 ++-- target/arm/kvm64.c | 2 ++ 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 5168e3d837..fcc5927587 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -975,6 +975,7 @@ struct ArchCPU { uint32_t id_mmfr2; uint32_t id_mmfr3; uint32_t id_mmfr4; + uint32_t id_mmfr5; uint32_t id_pfr0; uint32_t id_pfr1; uint32_t id_pfr2; diff --git a/target/arm/helper.c b/target/arm/helper.c index c171770b03..0737851925 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -7586,11 +7586,11 @@ void register_cp_regs_for_features(ARMCPU *cpu) .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, - { .name = "RES_0_C0_C3_6", .state = ARM_CP_STATE_BOTH, + { .name = "ID_MMFR5", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = 0 }, + .resetvalue = cpu->isar.id_mmfr5 }, { .name = "RES_0_C0_C3_7", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c index 84c4c85f40..2d737c443e 100644 --- a/target/arm/kvm64.c +++ b/target/arm/kvm64.c @@ -643,6 +643,8 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) ARM64_SYS_REG(3, 0, 0, 3, 2)); err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2, ARM64_SYS_REG(3, 0, 0, 3, 4)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr5, + ARM64_SYS_REG(3, 0, 0, 3, 6)); /* * DBGDIDR is a bit complicated because the kernel doesn't From d22c564958ffa6bd40be34c6ea3333ee7ef73b68 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Fri, 19 Aug 2022 12:00:50 +0100 Subject: [PATCH 06/20] target/arm: Implement ID_DFR1 In Armv8.6, a new AArch32 ID register ID_DFR1 is defined; implement it. We don't have any CPUs with features that they need to advertise here yet, but plumbing in the ID register gives it the right name when debugging and will help in future when we do add a CPU that has non-zero ID_DFR1 fields. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-Id: <20220819110052.2942289-5-peter.maydell@linaro.org> Signed-off-by: Richard Henderson --- target/arm/cpu.h | 1 + target/arm/helper.c | 4 ++-- target/arm/kvm64.c | 2 ++ 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/target/arm/cpu.h b/target/arm/cpu.h index fcc5927587..fa24ce9f96 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -983,6 +983,7 @@ struct ArchCPU { uint32_t mvfr1; uint32_t mvfr2; uint32_t id_dfr0; + uint32_t id_dfr1; uint32_t dbgdidr; uint32_t dbgdevid; uint32_t dbgdevid1; diff --git a/target/arm/helper.c b/target/arm/helper.c index 0737851925..7ff03f1a4b 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -7581,11 +7581,11 @@ void register_cp_regs_for_features(ARMCPU *cpu) .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = cpu->isar.id_pfr2 }, - { .name = "RES_0_C0_C3_5", .state = ARM_CP_STATE_BOTH, + { .name = "ID_DFR1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = 0 }, + .resetvalue = cpu->isar.id_dfr1 }, { .name = "ID_MMFR5", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c index 2d737c443e..1197253d12 100644 --- a/target/arm/kvm64.c +++ b/target/arm/kvm64.c @@ -643,6 +643,8 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) ARM64_SYS_REG(3, 0, 0, 3, 2)); err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2, ARM64_SYS_REG(3, 0, 0, 3, 4)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr1, + ARM64_SYS_REG(3, 0, 0, 3, 5)); err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr5, ARM64_SYS_REG(3, 0, 0, 3, 6)); From 3fe72e213eae33daaea88685878a571bd5ef3e08 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Fri, 19 Aug 2022 12:00:51 +0100 Subject: [PATCH 07/20] target/arm: Advertise FEAT_ETS for '-cpu max' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The architectural feature FEAT_ETS (Enhanced Translation Synchronization) is a set of tightened guarantees about memory ordering involving translation table walks: * if memory access RW1 is ordered-before memory access RW2 then it is also ordered-before any translation table walk generated by RW2 that generates a translation fault, address size fault or access fault * TLB maintenance on non-exec-permission translations is guaranteed complete after a DSB (ie it does not need the context synchronization event that you have to have if you don’t have FEAT_ETS) For QEMU’s implementation we don’t reorder translation table walk accesses, and we guarantee to finish the TLB maintenance as soon as the TLB op is done (the tlb_flush functions will complete at the end of the TLB, and TLB ops always end the TB because they’re sysreg writes). So we’re already compliant and all we need to do is say so in the ID registers for the 'max' CPU. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-Id: <20220819110052.2942289-6-peter.maydell@linaro.org> Signed-off-by: Richard Henderson --- docs/system/arm/emulation.rst | 1 + target/arm/cpu64.c | 1 + target/arm/cpu_tcg.c | 4 ++++ 3 files changed, 6 insertions(+) diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst index 8e494c8bea..811358fd0a 100644 --- a/docs/system/arm/emulation.rst +++ b/docs/system/arm/emulation.rst @@ -24,6 +24,7 @@ the following architecture extensions: - FEAT_Debugv8p4 (Debug changes for v8.4) - FEAT_DotProd (Advanced SIMD dot product instructions) - FEAT_DoubleFault (Double Fault Extension) +- FEAT_ETS (Enhanced Translation Synchronization) - FEAT_FCMA (Floating-point complex number instructions) - FEAT_FHM (Floating-point half-precision multiplication instructions) - FEAT_FP16 (Half-precision floating-point data processing) diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c index 9d1ea32057..3ac5e197a7 100644 --- a/target/arm/cpu64.c +++ b/target/arm/cpu64.c @@ -1122,6 +1122,7 @@ static void aarch64_max_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1); /* FEAT_LOR */ t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 2); /* FEAT_PAN2 */ t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* FEAT_XNX */ + t = FIELD_DP64(t, ID_AA64MMFR1, ETS, 1); /* FEAT_ETS */ t = FIELD_DP64(t, ID_AA64MMFR1, HCX, 1); /* FEAT_HCX */ cpu->isar.id_aa64mmfr1 = t; diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c index 3099b38e32..f63f8cdd95 100644 --- a/target/arm/cpu_tcg.c +++ b/target/arm/cpu_tcg.c @@ -67,6 +67,10 @@ void aa32_max_features(ARMCPU *cpu) t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* FEAT_XNX*/ cpu->isar.id_mmfr4 = t; + t = cpu->isar.id_mmfr5; + t = FIELD_DP32(t, ID_MMFR5, ETS, 1); /* FEAT_ETS */ + cpu->isar.id_mmfr5 = t; + t = cpu->isar.id_pfr0; t = FIELD_DP32(t, ID_PFR0, CSV2, 2); /* FEAT_CVS2 */ t = FIELD_DP32(t, ID_PFR0, DIT, 1); /* FEAT_DIT */ From bb7d902154f7f17c9127631c42a21fbbc805cb40 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Fri, 19 Aug 2022 12:00:52 +0100 Subject: [PATCH 08/20] target/arm: Add missing space in comment Fix a missing space before a comment terminator. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-Id: <20220819110052.2942289-7-peter.maydell@linaro.org> Signed-off-by: Richard Henderson --- target/arm/cpu_tcg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c index f63f8cdd95..b714c61d94 100644 --- a/target/arm/cpu_tcg.c +++ b/target/arm/cpu_tcg.c @@ -64,7 +64,7 @@ void aa32_max_features(ARMCPU *cpu) t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* FEAT_AA32HPD */ t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */ t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* FEAT_TTCNP */ - t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* FEAT_XNX*/ + t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* FEAT_XNX */ cpu->isar.id_mmfr4 = t; t = cpu->isar.id_mmfr5; From 76e25d41d44c49eb0fe399064a719702a3023102 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 22 Aug 2022 14:23:49 +0100 Subject: [PATCH 09/20] target/arm: Don't corrupt high half of PMOVSR when cycle counter overflows When the cycle counter overflows, we are intended to set bit 31 in PMOVSR to indicate this. However a missing ULL suffix means that we end up setting all of bits 63-31. Fix the bug. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-Id: <20220822132358.3524971-2-peter.maydell@linaro.org> Signed-off-by: Richard Henderson --- target/arm/helper.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/target/arm/helper.c b/target/arm/helper.c index 7ff03f1a4b..e4824e01b8 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -1186,7 +1186,7 @@ static void pmccntr_op_start(CPUARMState *env) uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \ 1ull << 63 : 1ull << 31; if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) { - env->cp15.c9_pmovsr |= (1 << 31); + env->cp15.c9_pmovsr |= (1ULL << 31); pmu_update_irq(env); } From c117c0649ce4022f518a7f0bc14bf7b036c89de3 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 22 Aug 2022 14:23:50 +0100 Subject: [PATCH 10/20] target/arm: Correct value returned by pmu_counter_mask() pmu_counter_mask() accidentally returns a value with bits [63:32] set, because the expression it returns is evaluated as a signed value that gets sign-extended to 64 bits. Force the whole expression to be evaluated with 64-bit arithmetic with ULL suffixes. The main effect of this bug was that a guest could write to the bits in the high half of registers like PMCNTENSET_EL0 that are supposed to be RES0. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-Id: <20220822132358.3524971-3-peter.maydell@linaro.org> Signed-off-by: Richard Henderson --- target/arm/internals.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/target/arm/internals.h b/target/arm/internals.h index b8fefdff67..83526166de 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -1296,7 +1296,7 @@ static inline uint32_t pmu_num_counters(CPUARMState *env) /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ static inline uint64_t pmu_counter_mask(CPUARMState *env) { - return (1 << 31) | ((1 << pmu_num_counters(env)) - 1); + return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1); } #ifdef TARGET_AARCH64 From 01765386a88868ae993bcbb39f111501fe690035 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 22 Aug 2022 14:23:51 +0100 Subject: [PATCH 11/20] target/arm: Don't mishandle count when enabling or disabling PMU counters The PMU cycle and event counter infrastructure design requires that operations on the PMU register fields are wrapped in pmu_op_start() and pmu_op_finish() calls (or their more specific pmmcntr and pmevcntr equivalents). This includes any changes to registers which affect whether the counter should be enabled or disabled, but we forgot to do this. The effect of this bug is that in sequences like: * disable the cycle counter (PMCCNTR) using the PMCNTEN register * write a value such as 0xfffff000 to the PMCCNTR * restart the counter by writing to PMCNTEN the value written to the cycle counter is corrupted, and it starts counting from the wrong place. (Essentially, we fail to record that the QEMU_CLOCK_VIRTUAL timestamp when the counter should be considered to have started counting is the point when PMCNTEN is written to enable the counter.) Add the necessary bracketing calls, so that updates to the various registers which affect whether the PMU is counting are handled correctly. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-Id: <20220822132358.3524971-4-peter.maydell@linaro.org> Signed-off-by: Richard Henderson --- target/arm/helper.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/target/arm/helper.c b/target/arm/helper.c index e4824e01b8..a348c7407d 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -1079,6 +1079,14 @@ static CPAccessResult pmreg_access_ccntr(CPUARMState *env, return pmreg_access(env, ri, isread); } +/* + * Bits in MDCR_EL2 and MDCR_EL3 which pmu_counter_enabled() looks at. + * We use these to decide whether we need to wrap a write to MDCR_EL2 + * or MDCR_EL3 in pmu_op_start()/pmu_op_finish() calls. + */ +#define MDCR_EL2_PMU_ENABLE_BITS (MDCR_HPME | MDCR_HPMD | MDCR_HPMN) +#define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME) + /* Returns true if the counter (pass 31 for PMCCNTR) should count events using * the current EL, security state, and register configuration. */ @@ -1432,15 +1440,19 @@ static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri) static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { + pmu_op_start(env); value &= pmu_counter_mask(env); env->cp15.c9_pmcnten |= value; + pmu_op_finish(env); } static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { + pmu_op_start(env); value &= pmu_counter_mask(env); env->cp15.c9_pmcnten &= ~value; + pmu_op_finish(env); } static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -4681,7 +4693,39 @@ static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { + /* + * Some MDCR_EL3 bits affect whether PMU counters are running: + * if we are trying to change any of those then we must + * bracket this update with PMU start/finish calls. + */ + bool pmu_op = (env->cp15.mdcr_el3 ^ value) & MDCR_EL3_PMU_ENABLE_BITS; + + if (pmu_op) { + pmu_op_start(env); + } env->cp15.mdcr_el3 = value & SDCR_VALID_MASK; + if (pmu_op) { + pmu_op_finish(env); + } +} + +static void mdcr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* + * Some MDCR_EL2 bits affect whether PMU counters are running: + * if we are trying to change any of those then we must + * bracket this update with PMU start/finish calls. + */ + bool pmu_op = (env->cp15.mdcr_el2 ^ value) & MDCR_EL2_PMU_ENABLE_BITS; + + if (pmu_op) { + pmu_op_start(env); + } + env->cp15.mdcr_el2 = value; + if (pmu_op) { + pmu_op_finish(env); + } } static const ARMCPRegInfo v8_cp_reginfo[] = { @@ -7724,6 +7768,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) ARMCPRegInfo mdcr_el2 = { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, + .writefn = mdcr_el2_write, .access = PL2_RW, .resetvalue = pmu_num_counters(env), .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), }; From b57aa7bdc3b4f16b216bd854e86431146729a640 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 22 Aug 2022 14:23:52 +0100 Subject: [PATCH 12/20] target/arm: Ignore PMCR.D when PMCR.LC is set The architecture requires that if PMCR.LC is set (for a 64-bit cycle counter) then PMCR.D (which enables the clock divider so the counter ticks every 64 cycles rather than every cycle) should be ignored. We were always honouring PMCR.D; fix the bug so we correctly ignore it in this situation. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-Id: <20220822132358.3524971-5-peter.maydell@linaro.org> Signed-off-by: Richard Henderson --- target/arm/helper.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/target/arm/helper.c b/target/arm/helper.c index a348c7407d..f1b20de16d 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -1172,6 +1172,17 @@ static void pmu_update_irq(CPUARMState *env) (env->cp15.c9_pminten & env->cp15.c9_pmovsr)); } +static bool pmccntr_clockdiv_enabled(CPUARMState *env) +{ + /* + * Return true if the clock divider is enabled and the cycle counter + * is supposed to tick only once every 64 clock cycles. This is + * controlled by PMCR.D, but if PMCR.LC is set to enable the long + * (64-bit) cycle counter PMCR.D has no effect. + */ + return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD; +} + /* * Ensure c15_ccnt is the guest-visible count so that operations such as * enabling/disabling the counter or filtering, modifying the count itself, @@ -1184,8 +1195,7 @@ static void pmccntr_op_start(CPUARMState *env) if (pmu_counter_enabled(env, 31)) { uint64_t eff_cycles = cycles; - if (env->cp15.c9_pmcr & PMCRD) { - /* Increment once every 64 processor clock cycles */ + if (pmccntr_clockdiv_enabled(env)) { eff_cycles /= 64; } @@ -1228,8 +1238,7 @@ static void pmccntr_op_finish(CPUARMState *env) #endif uint64_t prev_cycles = env->cp15.c15_ccnt_delta; - if (env->cp15.c9_pmcr & PMCRD) { - /* Increment once every 64 processor clock cycles */ + if (pmccntr_clockdiv_enabled(env)) { prev_cycles /= 64; } env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt; From 872d20343de4f1af45a31270f47cc54cc474bf55 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 22 Aug 2022 14:23:53 +0100 Subject: [PATCH 13/20] target/arm: Honour MDCR_EL2.HPMD in Secure EL2 The logic in pmu_counter_enabled() for handling the 'prohibit event counting' bits MDCR_EL2.HPMD and MDCR_EL3.SPME is written in a way that assumes that EL2 is never Secure. This used to be true, but the architecture now permits Secure EL2, and QEMU can emulate this. Refactor the prohibit logic so that we effectively OR together the various prohibit bits when they apply, rather than trying to construct an if-else ladder where any particular state of the CPU ends up in exactly one branch of the ladder. This fixes the Secure EL2 case and also is a better structure for adding the PMUv8.5 bits MDCR_EL2.HCCD and MDCR_EL3.SCCD. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-Id: <20220822132358.3524971-6-peter.maydell@linaro.org> Signed-off-by: Richard Henderson --- target/arm/helper.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/target/arm/helper.c b/target/arm/helper.c index f1b20de16d..b792694df0 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -1094,7 +1094,7 @@ static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter) { uint64_t filter; bool e, p, u, nsk, nsu, nsh, m; - bool enabled, prohibited, filtered; + bool enabled, prohibited = false, filtered; bool secure = arm_is_secure(env); int el = arm_current_el(env); uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); @@ -1112,15 +1112,12 @@ static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter) } enabled = e && (env->cp15.c9_pmcnten & (1 << counter)); - if (!secure) { - if (el == 2 && (counter < hpmn || counter == 31)) { - prohibited = mdcr_el2 & MDCR_HPMD; - } else { - prohibited = false; - } - } else { - prohibited = arm_feature(env, ARM_FEATURE_EL3) && - !(env->cp15.mdcr_el3 & MDCR_SPME); + /* Is event counting prohibited? */ + if (el == 2 && (counter < hpmn || counter == 31)) { + prohibited = mdcr_el2 & MDCR_HPMD; + } + if (secure) { + prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME); } if (prohibited && counter == 31) { From f1dd2506ee75b21a604e5a8b6e1c0fd00a435f7e Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 22 Aug 2022 14:23:54 +0100 Subject: [PATCH 14/20] target/arm: Detect overflow when calculating next PMU interrupt In pmccntr_op_finish() and pmevcntr_op_finish() we calculate the next point at which we will get an overflow and need to fire the PMU interrupt or set the overflow flag. We do this by calculating the number of nanoseconds to the overflow event and then adding it to qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL). However, we don't check whether that signed addition overflows, which can happen if the next PMU interrupt would happen massively far in the future (250 years or more). Since QEMU assumes that "when the QEMU_CLOCK_VIRTUAL rolls over" is "never", the sensible behaviour in this situation is simply to not try to set the timer if it would be beyond that point. Detect the overflow, and skip setting the timer in that case. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-Id: <20220822132358.3524971-7-peter.maydell@linaro.org> Signed-off-by: Richard Henderson --- target/arm/helper.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/target/arm/helper.c b/target/arm/helper.c index b792694df0..11a7a57710 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -1227,10 +1227,13 @@ static void pmccntr_op_finish(CPUARMState *env) int64_t overflow_in = cycles_ns_per(remaining_cycles); if (overflow_in > 0) { - int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + - overflow_in; - ARMCPU *cpu = env_archcpu(env); - timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); + int64_t overflow_at; + + if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + overflow_in, &overflow_at)) { + ARMCPU *cpu = env_archcpu(env); + timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); + } } #endif @@ -1275,10 +1278,13 @@ static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter) int64_t overflow_in = pm_events[event_idx].ns_per_count(delta); if (overflow_in > 0) { - int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + - overflow_in; - ARMCPU *cpu = env_archcpu(env); - timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); + int64_t overflow_at; + + if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + overflow_in, &overflow_at)) { + ARMCPU *cpu = env_archcpu(env); + timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); + } } #endif From a793bcd0272d9d25c942184ee18c7ed4dc75dc0d Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 22 Aug 2022 14:23:55 +0100 Subject: [PATCH 15/20] target/arm: Rename pmu_8_n feature test functions Our feature test functions that check the PMU version are named isar_feature_{aa32,aa64,any}_pmu_8_{1,4}. This doesn't match the current Arm ARM official feature names, which are FEAT_PMUv3p1 and FEAT_PMUv3p4. Rename these functions to _pmuv3p1 and _pmuv3p4. This commit was created with: sed -i -e 's/pmu_8_/pmuv3p/g' target/arm/*.[ch] Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-Id: <20220822132358.3524971-8-peter.maydell@linaro.org> Signed-off-by: Richard Henderson --- target/arm/cpu.h | 16 ++++++++-------- target/arm/helper.c | 18 +++++++++--------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/target/arm/cpu.h b/target/arm/cpu.h index fa24ce9f96..d86e51992a 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -3712,14 +3712,14 @@ static inline bool isar_feature_aa32_ats1e1(const ARMISARegisters *id) return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) >= 2; } -static inline bool isar_feature_aa32_pmu_8_1(const ARMISARegisters *id) +static inline bool isar_feature_aa32_pmuv3p1(const ARMISARegisters *id) { /* 0xf means "non-standard IMPDEF PMU" */ return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 4 && FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf; } -static inline bool isar_feature_aa32_pmu_8_4(const ARMISARegisters *id) +static inline bool isar_feature_aa32_pmuv3p4(const ARMISARegisters *id) { /* 0xf means "non-standard IMPDEF PMU" */ return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 5 && @@ -4038,13 +4038,13 @@ static inline bool isar_feature_aa64_sme(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, SME) != 0; } -static inline bool isar_feature_aa64_pmu_8_1(const ARMISARegisters *id) +static inline bool isar_feature_aa64_pmuv3p1(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 4 && FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf; } -static inline bool isar_feature_aa64_pmu_8_4(const ARMISARegisters *id) +static inline bool isar_feature_aa64_pmuv3p4(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 5 && FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf; @@ -4213,14 +4213,14 @@ static inline bool isar_feature_any_predinv(const ARMISARegisters *id) return isar_feature_aa64_predinv(id) || isar_feature_aa32_predinv(id); } -static inline bool isar_feature_any_pmu_8_1(const ARMISARegisters *id) +static inline bool isar_feature_any_pmuv3p1(const ARMISARegisters *id) { - return isar_feature_aa64_pmu_8_1(id) || isar_feature_aa32_pmu_8_1(id); + return isar_feature_aa64_pmuv3p1(id) || isar_feature_aa32_pmuv3p1(id); } -static inline bool isar_feature_any_pmu_8_4(const ARMISARegisters *id) +static inline bool isar_feature_any_pmuv3p4(const ARMISARegisters *id) { - return isar_feature_aa64_pmu_8_4(id) || isar_feature_aa32_pmu_8_4(id); + return isar_feature_aa64_pmuv3p4(id) || isar_feature_aa32_pmuv3p4(id); } static inline bool isar_feature_any_ccidx(const ARMISARegisters *id) diff --git a/target/arm/helper.c b/target/arm/helper.c index 11a7a57710..987ac19fe8 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -879,16 +879,16 @@ static int64_t instructions_ns_per(uint64_t icount) } #endif -static bool pmu_8_1_events_supported(CPUARMState *env) +static bool pmuv3p1_events_supported(CPUARMState *env) { /* For events which are supported in any v8.1 PMU */ - return cpu_isar_feature(any_pmu_8_1, env_archcpu(env)); + return cpu_isar_feature(any_pmuv3p1, env_archcpu(env)); } -static bool pmu_8_4_events_supported(CPUARMState *env) +static bool pmuv3p4_events_supported(CPUARMState *env) { /* For events which are supported in any v8.1 PMU */ - return cpu_isar_feature(any_pmu_8_4, env_archcpu(env)); + return cpu_isar_feature(any_pmuv3p4, env_archcpu(env)); } static uint64_t zero_event_get_count(CPUARMState *env) @@ -922,17 +922,17 @@ static const pm_event pm_events[] = { }, #endif { .number = 0x023, /* STALL_FRONTEND */ - .supported = pmu_8_1_events_supported, + .supported = pmuv3p1_events_supported, .get_count = zero_event_get_count, .ns_per_count = zero_event_ns_per, }, { .number = 0x024, /* STALL_BACKEND */ - .supported = pmu_8_1_events_supported, + .supported = pmuv3p1_events_supported, .get_count = zero_event_get_count, .ns_per_count = zero_event_ns_per, }, { .number = 0x03c, /* STALL */ - .supported = pmu_8_4_events_supported, + .supported = pmuv3p4_events_supported, .get_count = zero_event_get_count, .ns_per_count = zero_event_ns_per, }, @@ -6400,7 +6400,7 @@ static void define_pmu_regs(ARMCPU *cpu) g_free(pmevtyper_name); g_free(pmevtyper_el0_name); } - if (cpu_isar_feature(aa32_pmu_8_1, cpu)) { + if (cpu_isar_feature(aa32_pmuv3p1, cpu)) { ARMCPRegInfo v81_pmu_regs[] = { { .name = "PMCEID2", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4, @@ -6413,7 +6413,7 @@ static void define_pmu_regs(ARMCPU *cpu) }; define_arm_cp_regs(cpu, v81_pmu_regs); } - if (cpu_isar_feature(any_pmu_8_4, cpu)) { + if (cpu_isar_feature(any_pmuv3p4, cpu)) { static const ARMCPRegInfo v84_pmmir = { .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6, From 0b42f4fab9d3e994efa44e17cb76c15b269bcbda Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 22 Aug 2022 14:23:56 +0100 Subject: [PATCH 16/20] target/arm: Implement FEAT_PMUv3p5 cycle counter disable bits FEAT_PMUv3p5 introduces new bits which disable the cycle counter from counting: * MDCR_EL2.HCCD disables the counter when in EL2 * MDCR_EL3.SCCD disables the counter when Secure Add the code to support these bits. (Note that there is a third documented counter-disable bit, MDCR_EL3.MCCD, which disables the counter when in EL3. This is not present until FEAT_PMUv3p7, so is out of scope for now.) Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-Id: <20220822132358.3524971-9-peter.maydell@linaro.org> Signed-off-by: Richard Henderson --- target/arm/cpu.h | 20 ++++++++++++++++++++ target/arm/helper.c | 21 +++++++++++++++++---- 2 files changed, 37 insertions(+), 4 deletions(-) diff --git a/target/arm/cpu.h b/target/arm/cpu.h index d86e51992a..41e74df104 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -1334,6 +1334,8 @@ FIELD(CPTR_EL3, TTA, 20, 1) FIELD(CPTR_EL3, TAM, 30, 1) FIELD(CPTR_EL3, TCPAC, 31, 1) +#define MDCR_SCCD (1U << 23) /* MDCR_EL3 */ +#define MDCR_HCCD (1U << 23) /* MDCR_EL2 */ #define MDCR_EPMAD (1U << 21) #define MDCR_EDAD (1U << 20) #define MDCR_SPME (1U << 17) /* MDCR_EL3 */ @@ -3726,6 +3728,13 @@ static inline bool isar_feature_aa32_pmuv3p4(const ARMISARegisters *id) FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf; } +static inline bool isar_feature_aa32_pmuv3p5(const ARMISARegisters *id) +{ + /* 0xf means "non-standard IMPDEF PMU" */ + return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 6 && + FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf; +} + static inline bool isar_feature_aa32_hpd(const ARMISARegisters *id) { return FIELD_EX32(id->id_mmfr4, ID_MMFR4, HPDS) != 0; @@ -4050,6 +4059,12 @@ static inline bool isar_feature_aa64_pmuv3p4(const ARMISARegisters *id) FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf; } +static inline bool isar_feature_aa64_pmuv3p5(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 6 && + FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf; +} + static inline bool isar_feature_aa64_rcpc_8_3(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) != 0; @@ -4223,6 +4238,11 @@ static inline bool isar_feature_any_pmuv3p4(const ARMISARegisters *id) return isar_feature_aa64_pmuv3p4(id) || isar_feature_aa32_pmuv3p4(id); } +static inline bool isar_feature_any_pmuv3p5(const ARMISARegisters *id) +{ + return isar_feature_aa64_pmuv3p5(id) || isar_feature_aa32_pmuv3p5(id); +} + static inline bool isar_feature_any_ccidx(const ARMISARegisters *id) { return isar_feature_aa64_ccidx(id) || isar_feature_aa32_ccidx(id); diff --git a/target/arm/helper.c b/target/arm/helper.c index 987ac19fe8..0d1f23de09 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -1084,8 +1084,8 @@ static CPAccessResult pmreg_access_ccntr(CPUARMState *env, * We use these to decide whether we need to wrap a write to MDCR_EL2 * or MDCR_EL3 in pmu_op_start()/pmu_op_finish() calls. */ -#define MDCR_EL2_PMU_ENABLE_BITS (MDCR_HPME | MDCR_HPMD | MDCR_HPMN) -#define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME) +#define MDCR_EL2_PMU_ENABLE_BITS (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD) +#define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD) /* Returns true if the counter (pass 31 for PMCCNTR) should count events using * the current EL, security state, and register configuration. @@ -1120,8 +1120,21 @@ static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter) prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME); } - if (prohibited && counter == 31) { - prohibited = env->cp15.c9_pmcr & PMCRDP; + if (counter == 31) { + /* + * The cycle counter defaults to running. PMCR.DP says "disable + * the cycle counter when event counting is prohibited". + * Some MDCR bits disable the cycle counter specifically. + */ + prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP; + if (cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) { + if (secure) { + prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_SCCD); + } + if (el == 2) { + prohibited = prohibited || (mdcr_el2 & MDCR_HCCD); + } + } } if (counter == 31) { From 47b385dae8e95e833114285625f4db796c52e8a3 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 22 Aug 2022 14:23:57 +0100 Subject: [PATCH 17/20] target/arm: Support 64-bit event counters for FEAT_PMUv3p5 With FEAT_PMUv3p5, the event counters are now 64 bit, rather than 32 bit. (Previously, only the cycle counter could be 64 bit, and other event counters were always 32 bits). For any given event counter, whether the overflow event is noted for overflow from bit 31 or from bit 63 is controlled by a combination of PMCR.LP, MDCR_EL2.HLP and MDCR_EL2.HPMN. Implement the 64-bit event counter handling. We choose to make our counters always 64 bits, and mask out the top 32 bits on read or write of PMXEVCNTR for CPUs which don't have FEAT_PMUv3p5. (Note that the changes to pmenvcntr_op_start() and pmenvcntr_op_finish() bring their logic closer into line with that of pmccntr_op_start() and pmccntr_op_finish(), which already had to cope with the overflow being either at 32 or 64 bits.) Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-Id: <20220822132358.3524971-10-peter.maydell@linaro.org> Signed-off-by: Richard Henderson --- target/arm/cpu.h | 1 + target/arm/helper.c | 62 ++++++++++++++++++++++++++++++++++++------ target/arm/internals.h | 3 +- 3 files changed, 57 insertions(+), 9 deletions(-) diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 41e74df104..33cdbc0143 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -1334,6 +1334,7 @@ FIELD(CPTR_EL3, TTA, 20, 1) FIELD(CPTR_EL3, TAM, 30, 1) FIELD(CPTR_EL3, TCPAC, 31, 1) +#define MDCR_HLP (1U << 26) /* MDCR_EL2 */ #define MDCR_SCCD (1U << 23) /* MDCR_EL3 */ #define MDCR_HCCD (1U << 23) /* MDCR_EL2 */ #define MDCR_EPMAD (1U << 21) diff --git a/target/arm/helper.c b/target/arm/helper.c index 0d1f23de09..1a57d2e1d6 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -1084,7 +1084,8 @@ static CPAccessResult pmreg_access_ccntr(CPUARMState *env, * We use these to decide whether we need to wrap a write to MDCR_EL2 * or MDCR_EL3 in pmu_op_start()/pmu_op_finish() calls. */ -#define MDCR_EL2_PMU_ENABLE_BITS (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD) +#define MDCR_EL2_PMU_ENABLE_BITS \ + (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP) #define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD) /* Returns true if the counter (pass 31 for PMCCNTR) should count events using @@ -1193,6 +1194,32 @@ static bool pmccntr_clockdiv_enabled(CPUARMState *env) return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD; } +static bool pmevcntr_is_64_bit(CPUARMState *env, int counter) +{ + /* Return true if the specified event counter is configured to be 64 bit */ + + /* This isn't intended to be used with the cycle counter */ + assert(counter < 31); + + if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) { + return false; + } + + if (arm_feature(env, ARM_FEATURE_EL2)) { + /* + * MDCR_EL2.HLP still applies even when EL2 is disabled in the + * current security state, so we don't use arm_mdcr_el2_eff() here. + */ + bool hlp = env->cp15.mdcr_el2 & MDCR_HLP; + int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN; + + if (hpmn != 0 && counter >= hpmn) { + return hlp; + } + } + return env->cp15.c9_pmcr & PMCRLP; +} + /* * Ensure c15_ccnt is the guest-visible count so that operations such as * enabling/disabling the counter or filtering, modifying the count itself, @@ -1269,9 +1296,11 @@ static void pmevcntr_op_start(CPUARMState *env, uint8_t counter) } if (pmu_counter_enabled(env, counter)) { - uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter]; + uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter]; + uint64_t overflow_mask = pmevcntr_is_64_bit(env, counter) ? + 1ULL << 63 : 1ULL << 31; - if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) { + if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) { env->cp15.c9_pmovsr |= (1 << counter); pmu_update_irq(env); } @@ -1286,9 +1315,13 @@ static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter) #ifndef CONFIG_USER_ONLY uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; uint16_t event_idx = supported_event_map[event]; - uint64_t delta = UINT32_MAX - - (uint32_t)env->cp15.c14_pmevcntr[counter] + 1; - int64_t overflow_in = pm_events[event_idx].ns_per_count(delta); + uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1); + int64_t overflow_in; + + if (!pmevcntr_is_64_bit(env, counter)) { + delta = (uint32_t)delta; + } + overflow_in = pm_events[event_idx].ns_per_count(delta); if (overflow_in > 0) { int64_t overflow_at; @@ -1375,6 +1408,8 @@ static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { unsigned int i; + uint64_t overflow_mask, new_pmswinc; + for (i = 0; i < pmu_num_counters(env); i++) { /* Increment a counter's count iff: */ if ((value & (1 << i)) && /* counter's bit is set */ @@ -1388,9 +1423,12 @@ static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri, * Detect if this write causes an overflow since we can't predict * PMSWINC overflows like we can for other events */ - uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1; + new_pmswinc = env->cp15.c14_pmevcntr[i] + 1; - if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) { + overflow_mask = pmevcntr_is_64_bit(env, i) ? + 1ULL << 63 : 1ULL << 31; + + if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) { env->cp15.c9_pmovsr |= (1 << i); pmu_update_irq(env); } @@ -1597,6 +1635,10 @@ static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri) static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value, uint8_t counter) { + if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) { + /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */ + value &= MAKE_64BIT_MASK(0, 32); + } if (counter < pmu_num_counters(env)) { pmevcntr_op_start(env, counter); env->cp15.c14_pmevcntr[counter] = value; @@ -1616,6 +1658,10 @@ static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri, pmevcntr_op_start(env, counter); ret = env->cp15.c14_pmevcntr[counter]; pmevcntr_op_finish(env, counter); + if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) { + /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */ + ret &= MAKE_64BIT_MASK(0, 32); + } return ret; } else { /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR diff --git a/target/arm/internals.h b/target/arm/internals.h index 83526166de..bf60cd5f84 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -1256,6 +1256,7 @@ enum MVEECIState { /* Definitions for the PMU registers */ #define PMCRN_MASK 0xf800 #define PMCRN_SHIFT 11 +#define PMCRLP 0x80 #define PMCRLC 0x40 #define PMCRDP 0x20 #define PMCRX 0x10 @@ -1267,7 +1268,7 @@ enum MVEECIState { * Mask of PMCR bits writable by guest (not including WO bits like C, P, * which can be written as 1 to trigger behaviour but which stay RAZ). */ -#define PMCR_WRITABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) +#define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) #define PMXEVTYPER_P 0x80000000 #define PMXEVTYPER_U 0x40000000 From e31e0f56616ae88cda22da8d5a257c2fcee4de2a Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 22 Aug 2022 14:23:58 +0100 Subject: [PATCH 18/20] target/arm: Report FEAT_PMUv3p5 for TCG '-cpu max' Update the ID registers for TCG's '-cpu max' to report a FEAT_PMUv3p5 compliant PMU. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-Id: <20220822132358.3524971-11-peter.maydell@linaro.org> Signed-off-by: Richard Henderson --- docs/system/arm/emulation.rst | 1 + target/arm/cpu64.c | 2 +- target/arm/cpu_tcg.c | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst index 811358fd0a..be7bbffe59 100644 --- a/docs/system/arm/emulation.rst +++ b/docs/system/arm/emulation.rst @@ -53,6 +53,7 @@ the following architecture extensions: - FEAT_PMULL (PMULL, PMULL2 instructions) - FEAT_PMUv3p1 (PMU Extensions v3.1) - FEAT_PMUv3p4 (PMU Extensions v3.4) +- FEAT_PMUv3p5 (PMU Extensions v3.5) - FEAT_RAS (Reliability, availability, and serviceability) - FEAT_RASv1p1 (RAS Extension v1.1) - FEAT_RDM (Advanced SIMD rounding double multiply accumulate instructions) diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c index 3ac5e197a7..e6314e86d2 100644 --- a/target/arm/cpu64.c +++ b/target/arm/cpu64.c @@ -1152,7 +1152,7 @@ static void aarch64_max_initfn(Object *obj) t = cpu->isar.id_aa64dfr0; t = FIELD_DP64(t, ID_AA64DFR0, DEBUGVER, 9); /* FEAT_Debugv8p4 */ - t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* FEAT_PMUv3p4 */ + t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 6); /* FEAT_PMUv3p5 */ cpu->isar.id_aa64dfr0 = t; t = cpu->isar.id_aa64smfr0; diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c index b714c61d94..98b5ba2160 100644 --- a/target/arm/cpu_tcg.c +++ b/target/arm/cpu_tcg.c @@ -85,7 +85,7 @@ void aa32_max_features(ARMCPU *cpu) t = cpu->isar.id_dfr0; t = FIELD_DP32(t, ID_DFR0, COPDBG, 9); /* FEAT_Debugv8p4 */ t = FIELD_DP32(t, ID_DFR0, COPSDBG, 9); /* FEAT_Debugv8p4 */ - t = FIELD_DP32(t, ID_DFR0, PERFMON, 5); /* FEAT_PMUv3p4 */ + t = FIELD_DP32(t, ID_DFR0, PERFMON, 6); /* FEAT_PMUv3p5 */ cpu->isar.id_dfr0 = t; } From f92bd43480bf2da36354e148d139ef2aac12d3f2 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 23 Aug 2022 17:04:16 +0100 Subject: [PATCH 19/20] target/arm: Remove useless TARGET_BIG_ENDIAN check in armv7m_load_kernel() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Arm system emulation targets always have TARGET_BIG_ENDIAN clear, so there is no need to have handling in armv7m_load_kernel() for the case when it is defined. Remove the unnecessary code. Side notes: * our M-profile implementation is always little-endian (that is, it makes the IMPDEF choice that the read-only AIRCR.ENDIANNESS is 0) * if we did want to handle big-endian ELF files here we should do it the way that hw/arm/boot.c:arm_load_elf() does, by looking at the ELF header to see what endianness the file itself is Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Reviewed-by: Philippe Mathieu-Daudé Message-Id: <20220823160417.3858216-2-peter.maydell@linaro.org> Signed-off-by: Richard Henderson --- hw/arm/armv7m.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c index 990861ee5e..fa4c2c735d 100644 --- a/hw/arm/armv7m.c +++ b/hw/arm/armv7m.c @@ -572,17 +572,10 @@ void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, int mem_size) { ssize_t image_size; uint64_t entry; - int big_endian; AddressSpace *as; int asidx; CPUState *cs = CPU(cpu); -#if TARGET_BIG_ENDIAN - big_endian = 1; -#else - big_endian = 0; -#endif - if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) { asidx = ARMASIdx_S; } else { @@ -593,7 +586,7 @@ void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, int mem_size) if (kernel_filename) { image_size = load_elf_as(kernel_filename, NULL, NULL, NULL, &entry, NULL, NULL, - NULL, big_endian, EM_ARM, 1, 0, as); + NULL, 0, EM_ARM, 1, 0, as); if (image_size < 0) { image_size = load_image_targphys_as(kernel_filename, 0, mem_size, as); From 761c532ab1ebe9d345c9afe4fb9c2c4b26c58582 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 23 Aug 2022 17:04:17 +0100 Subject: [PATCH 20/20] target/arm: Make boards pass base address to armv7m_load_kernel() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently armv7m_load_kernel() takes the size of the block of memory where it should load the initial guest image, but assumes that it should always load it at address 0. This happens to be true of all our M-profile boards at the moment, but it isn't guaranteed to always be so: M-profile CPUs can be configured (via init-svtor and init-nsvtor, which match equivalent hardware configuration signals) to have the initial vector table at any address, not just zero. (For instance the Teeny board has the boot ROM at address 0x0200_0000.) Add a base address argument to armv7m_load_kernel(), so that callers now pass in both base address and size. All the current callers pass 0, so this is not a behaviour change. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Reviewed-by: Philippe Mathieu-Daudé Message-Id: <20220823160417.3858216-3-peter.maydell@linaro.org> Signed-off-by: Richard Henderson --- hw/arm/armv7m.c | 5 +++-- hw/arm/aspeed.c | 1 + hw/arm/microbit.c | 2 +- hw/arm/mps2-tz.c | 2 +- hw/arm/mps2.c | 2 +- hw/arm/msf2-som.c | 2 +- hw/arm/musca.c | 3 ++- hw/arm/netduino2.c | 2 +- hw/arm/netduinoplus2.c | 2 +- hw/arm/stellaris.c | 2 +- hw/arm/stm32vldiscovery.c | 2 +- include/hw/arm/boot.h | 5 ++++- 12 files changed, 18 insertions(+), 12 deletions(-) diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c index fa4c2c735d..50a9507c0b 100644 --- a/hw/arm/armv7m.c +++ b/hw/arm/armv7m.c @@ -568,7 +568,8 @@ static void armv7m_reset(void *opaque) cpu_reset(CPU(cpu)); } -void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, int mem_size) +void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, + hwaddr mem_base, int mem_size) { ssize_t image_size; uint64_t entry; @@ -588,7 +589,7 @@ void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, int mem_size) &entry, NULL, NULL, NULL, 0, EM_ARM, 1, 0, as); if (image_size < 0) { - image_size = load_image_targphys_as(kernel_filename, 0, + image_size = load_image_targphys_as(kernel_filename, mem_base, mem_size, as); } if (image_size < 0) { diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c index b3bbe06f8f..bc3ecdb619 100644 --- a/hw/arm/aspeed.c +++ b/hw/arm/aspeed.c @@ -1430,6 +1430,7 @@ static void aspeed_minibmc_machine_init(MachineState *machine) armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, + 0, AST1030_INTERNAL_FLASH_SIZE); } diff --git a/hw/arm/microbit.c b/hw/arm/microbit.c index e9494334ce..50df362088 100644 --- a/hw/arm/microbit.c +++ b/hw/arm/microbit.c @@ -57,7 +57,7 @@ static void microbit_init(MachineState *machine) mr, -1); armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, - s->nrf51.flash_size); + 0, s->nrf51.flash_size); } static void microbit_machine_class_init(ObjectClass *oc, void *data) diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c index 4017392bf5..394192b9b2 100644 --- a/hw/arm/mps2-tz.c +++ b/hw/arm/mps2-tz.c @@ -1197,7 +1197,7 @@ static void mps2tz_common_init(MachineState *machine) } armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, - boot_ram_size(mms)); + 0, boot_ram_size(mms)); } static void mps2_tz_idau_check(IDAUInterface *ii, uint32_t address, diff --git a/hw/arm/mps2.c b/hw/arm/mps2.c index bb76fa6889..a86a994dba 100644 --- a/hw/arm/mps2.c +++ b/hw/arm/mps2.c @@ -450,7 +450,7 @@ static void mps2_common_init(MachineState *machine) mmc->fpga_type == FPGA_AN511 ? 47 : 13)); armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, - 0x400000); + 0, 0x400000); } static void mps2_class_init(ObjectClass *oc, void *data) diff --git a/hw/arm/msf2-som.c b/hw/arm/msf2-som.c index d9f881690e..a6df473ec9 100644 --- a/hw/arm/msf2-som.c +++ b/hw/arm/msf2-som.c @@ -98,7 +98,7 @@ static void emcraft_sf2_s2s010_init(MachineState *machine) sysbus_connect_irq(SYS_BUS_DEVICE(&soc->spi[0]), 1, cs_line); armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, - soc->envm_size); + 0, soc->envm_size); } static void emcraft_sf2_machine_init(MachineClass *mc) diff --git a/hw/arm/musca.c b/hw/arm/musca.c index 7a83f7dda7..6eeee57c9d 100644 --- a/hw/arm/musca.c +++ b/hw/arm/musca.c @@ -597,7 +597,8 @@ static void musca_init(MachineState *machine) "cfg_sec_resp", 0)); } - armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, 0x2000000); + armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, + 0, 0x2000000); } static void musca_class_init(ObjectClass *oc, void *data) diff --git a/hw/arm/netduino2.c b/hw/arm/netduino2.c index 3365da11bf..83753d53a3 100644 --- a/hw/arm/netduino2.c +++ b/hw/arm/netduino2.c @@ -49,7 +49,7 @@ static void netduino2_init(MachineState *machine) sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, - FLASH_SIZE); + 0, FLASH_SIZE); } static void netduino2_machine_init(MachineClass *mc) diff --git a/hw/arm/netduinoplus2.c b/hw/arm/netduinoplus2.c index 76cea8e489..515c081605 100644 --- a/hw/arm/netduinoplus2.c +++ b/hw/arm/netduinoplus2.c @@ -50,7 +50,7 @@ static void netduinoplus2_init(MachineState *machine) armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, - FLASH_SIZE); + 0, FLASH_SIZE); } static void netduinoplus2_machine_init(MachineClass *mc) diff --git a/hw/arm/stellaris.c b/hw/arm/stellaris.c index 12c673c917..a9e96c37f8 100644 --- a/hw/arm/stellaris.c +++ b/hw/arm/stellaris.c @@ -1302,7 +1302,7 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) create_unimplemented_device("hibernation", 0x400fc000, 0x1000); create_unimplemented_device("flash-control", 0x400fd000, 0x1000); - armv7m_load_kernel(ARM_CPU(first_cpu), ms->kernel_filename, flash_size); + armv7m_load_kernel(ARM_CPU(first_cpu), ms->kernel_filename, 0, flash_size); } /* FIXME: Figure out how to generate these from stellaris_boards. */ diff --git a/hw/arm/stm32vldiscovery.c b/hw/arm/stm32vldiscovery.c index 04036da3ee..67675e952f 100644 --- a/hw/arm/stm32vldiscovery.c +++ b/hw/arm/stm32vldiscovery.c @@ -53,7 +53,7 @@ static void stm32vldiscovery_init(MachineState *machine) armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, - FLASH_SIZE); + 0, FLASH_SIZE); } static void stm32vldiscovery_machine_init(MachineClass *mc) diff --git a/include/hw/arm/boot.h b/include/hw/arm/boot.h index c7ebae156e..f18cc3064f 100644 --- a/include/hw/arm/boot.h +++ b/include/hw/arm/boot.h @@ -25,13 +25,16 @@ typedef enum { * armv7m_load_kernel: * @cpu: CPU * @kernel_filename: file to load + * @mem_base: base address to load image at (should be where the + * CPU expects to find its vector table on reset) * @mem_size: mem_size: maximum image size to load * * Load the guest image for an ARMv7M system. This must be called by * any ARMv7M board. (This is necessary to ensure that the CPU resets * correctly on system reset, as well as for kernel loading.) */ -void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, int mem_size); +void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, + hwaddr mem_base, int mem_size); /* arm_boot.c */ struct arm_boot_info {