target-arm queue:

* ssi-sd: Make devices picking up backends unavailable with -device
  * Add support for VCPU event states
  * Move towards making ID registers the source of truth for
    whether a guest CPU implements a feature, rather than having
    parallel ID registers and feature bit flags
  * Implement various HCR hypervisor trap/config bits
  * Get IL bit correct for v7 syndrome values
  * Report correct syndrome for FP/SIMD traps to Hyp mode
  * hw/arm/boot: Increase compliance with kernel arm64 boot protocol
  * Refactor A32 Neon to use generic vector infrastructure
  * Fix a bug in A32 VLD2 "(multiple 2-element structures)" insn
  * net: cadence_gem: Report features correctly in ID register
  * Avoid some unnecessary TLB flushes on TTBR register writes
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABCAAGBQJb0D9ZAAoJEDwlJe0UNgzevOgP/A/36uB7UJq0fhZIP0Ltm+Mg
 Nr1QZY7yz7ggNnrX1GSFRGloSE2U4QDdySS6YG7HolSQqb/kYd0mUgQJ/kOyRThb
 1VWlSQeVQ499W+yBscbEaKNjPnpFCv1mUVcyZgTkEy90AOpNFacxw5IBpZEbROQ2
 KJ85Ca7yzzboI084HGqvx5NxJ+Vwt4rsu9pjvGGcoybwQzdc1CsjEJSAD5Om8XEg
 RogIL3EbRPQn9oMWssbm993YeQ31DmcKuGReEHHUvKp1PzxY+7mTikzIeVGzFwa6
 6HWQ9+w7KfdgUpD1mEDlpzs5yB+bDuIZzDgKsfDbyvF6YhH/ErJz4Rr6eOYG1GAS
 wUUQa+vQxdK9RRpo4GJLqxS6COcK6nFoj1klBo2959AYqcnl24Kxy0ZegBbh70Lr
 zsNBd5f1hwEDt7tXzco9IiPsGI9B/+g40Hp9LpiTi8gRKbHyAiEIWRiwV+0tXd/O
 7mlL5k5T4zgyJ1cfKvF0KtmsWcM8VOu6fB5593HlcRud8bw1CfE2E2rCzI5wqoB+
 S/k73LbAU97RW4/JQrEapFM7vKnQ+Cf1Ntfs3iJrQvkVd9f3EvaPGkHtrLD85b+5
 ove0lZxiA7/9+Iu9XkljQpYA8xP7FH0xpVz+1E7SDa/bVp7QPAUYTCeaj1DTSlxn
 RsBCYl7jYzdiQLX4vEND
 =Fa5X
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20181024' into staging

target-arm queue:
 * ssi-sd: Make devices picking up backends unavailable with -device
 * Add support for VCPU event states
 * Move towards making ID registers the source of truth for
   whether a guest CPU implements a feature, rather than having
   parallel ID registers and feature bit flags
 * Implement various HCR hypervisor trap/config bits
 * Get IL bit correct for v7 syndrome values
 * Report correct syndrome for FP/SIMD traps to Hyp mode
 * hw/arm/boot: Increase compliance with kernel arm64 boot protocol
 * Refactor A32 Neon to use generic vector infrastructure
 * Fix a bug in A32 VLD2 "(multiple 2-element structures)" insn
 * net: cadence_gem: Report features correctly in ID register
 * Avoid some unnecessary TLB flushes on TTBR register writes

# gpg: Signature made Wed 24 Oct 2018 10:46:01 BST
# gpg:                using RSA key 3C2525ED14360CDE
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>"
# gpg:                 aka "Peter Maydell <pmaydell@gmail.com>"
# gpg:                 aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>"
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83  15CF 3C25 25ED 1436 0CDE

* remotes/pmaydell/tags/pull-target-arm-20181024: (44 commits)
  target/arm: Only flush tlb if ASID changes
  target/arm: Remove writefn from TTBR0_EL3
  net: cadence_gem: Announce 64bit addressing support
  net: cadence_gem: Announce availability of priority queues
  target/arm: Reorg NEON VLD/VST single element to one lane
  target/arm: Promote consecutive memory ops for aa32
  target/arm: Reorg NEON VLD/VST all elements
  target/arm: Use gvec for NEON VLD all lanes
  target/arm: Use gvec for NEON_3R_VTST_VCEQ, NEON_3R_VCGT, NEON_3R_VCGE
  target/arm: Use gvec for NEON_3R_VML
  target/arm: Use gvec for VSRI, VSLI
  target/arm: Use gvec for VSRA
  target/arm: Use gvec for VSHR, VSHL
  target/arm: Use gvec for NEON_3R_VMUL
  target/arm: Use gvec for NEON_2RM_VMN, NEON_2RM_VNEG
  target/arm: Use gvec for NEON_3R_VADD_VSUB insns
  target/arm: Use gvec for NEON_3R_LOGIC insns
  target/arm: Use gvec for NEON VMOV, VMVN, VBIC & VORR (immediate)
  target/arm: Use gvec for NEON VDUP
  target/arm: Mark some arrays const
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2018-10-24 10:49:14 +01:00
commit e60b38f445
21 changed files with 2013 additions and 1473 deletions

View File

@ -24,6 +24,7 @@
#include "qemu/config-file.h"
#include "qemu/option.h"
#include "exec/address-spaces.h"
#include "qemu/units.h"
/* Kernel boot protocol is specified in the kernel docs
* Documentation/arm/Booting and Documentation/arm64/booting.txt
@ -36,6 +37,8 @@
#define ARM64_TEXT_OFFSET_OFFSET 8
#define ARM64_MAGIC_OFFSET 56
#define BOOTLOADER_MAX_SIZE (4 * KiB)
AddressSpace *arm_boot_address_space(ARMCPU *cpu,
const struct arm_boot_info *info)
{
@ -184,6 +187,8 @@ static void write_bootloader(const char *name, hwaddr addr,
code[i] = tswap32(insn);
}
assert((len * sizeof(uint32_t)) < BOOTLOADER_MAX_SIZE);
rom_add_blob_fixed_as(name, code, len * sizeof(uint32_t), addr, as);
g_free(code);
@ -919,6 +924,19 @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base,
memcpy(&hdrvals, buffer + ARM64_TEXT_OFFSET_OFFSET, sizeof(hdrvals));
if (hdrvals[1] != 0) {
kernel_load_offset = le64_to_cpu(hdrvals[0]);
/*
* We write our startup "bootloader" at the very bottom of RAM,
* so that bit can't be used for the image. Luckily the Image
* format specification is that the image requests only an offset
* from a 2MB boundary, not an absolute load address. So if the
* image requests an offset that might mean it overlaps with the
* bootloader, we can just load it starting at 2MB+offset rather
* than 0MB + offset.
*/
if (kernel_load_offset < BOOTLOADER_MAX_SIZE) {
kernel_load_offset += 2 * MiB;
}
}
}

View File

@ -1055,17 +1055,17 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
case 0xd5c: /* MMFR3. */
return cpu->id_mmfr3;
case 0xd60: /* ISAR0. */
return cpu->id_isar0;
return cpu->isar.id_isar0;
case 0xd64: /* ISAR1. */
return cpu->id_isar1;
return cpu->isar.id_isar1;
case 0xd68: /* ISAR2. */
return cpu->id_isar2;
return cpu->isar.id_isar2;
case 0xd6c: /* ISAR3. */
return cpu->id_isar3;
return cpu->isar.id_isar3;
case 0xd70: /* ISAR4. */
return cpu->id_isar4;
return cpu->isar.id_isar4;
case 0xd74: /* ISAR5. */
return cpu->id_isar5;
return cpu->isar.id_isar5;
case 0xd78: /* CLIDR */
return cpu->clidr;
case 0xd7c: /* CTR */

View File

@ -142,6 +142,7 @@
#define GEM_DESCONF4 (0x0000028C/4)
#define GEM_DESCONF5 (0x00000290/4)
#define GEM_DESCONF6 (0x00000294/4)
#define GEM_DESCONF6_64B_MASK (1U << 23)
#define GEM_DESCONF7 (0x00000298/4)
#define GEM_INT_Q1_STATUS (0x00000400 / 4)
@ -1283,6 +1284,7 @@ static void gem_reset(DeviceState *d)
int i;
CadenceGEMState *s = CADENCE_GEM(d);
const uint8_t *a;
uint32_t queues_mask = 0;
DB_PRINT("\n");
@ -1299,7 +1301,12 @@ static void gem_reset(DeviceState *d)
s->regs[GEM_DESCONF] = 0x02500111;
s->regs[GEM_DESCONF2] = 0x2ab13fff;
s->regs[GEM_DESCONF5] = 0x002f2045;
s->regs[GEM_DESCONF6] = 0x00000200;
s->regs[GEM_DESCONF6] = GEM_DESCONF6_64B_MASK;
if (s->num_priority_queues > 1) {
queues_mask = MAKE_64BIT_MASK(1, s->num_priority_queues - 1);
s->regs[GEM_DESCONF6] |= queues_mask;
}
/* Set MAC address */
a = &s->conf.macaddr.a[0];

View File

@ -284,6 +284,8 @@ static void ssi_sd_class_init(ObjectClass *klass, void *data)
k->cs_polarity = SSI_CS_LOW;
dc->vmsd = &vmstate_ssi_sd;
dc->reset = ssi_sd_reset;
/* Reason: init() method uses drive_get_next() */
dc->user_creatable = false;
}
static const TypeInfo ssi_sd_info = {

View File

@ -314,7 +314,7 @@ static int target_restore_sigframe(CPUARMState *env,
break;
case TARGET_SVE_MAGIC:
if (arm_feature(env, ARM_FEATURE_SVE)) {
if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(env))) {
vq = (env->vfp.zcr_el[1] & 0xf) + 1;
sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
if (!sve && size == sve_size) {
@ -433,7 +433,7 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
&layout);
/* SVE state needs saving only if it exists. */
if (arm_feature(env, ARM_FEATURE_SVE)) {
if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(env))) {
vq = (env->vfp.zcr_el[1] & 0xf) + 1;
sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
sve_ofs = alloc_sigframe_space(sve_size, &layout);

View File

@ -458,6 +458,10 @@ static uint32_t get_elf_hwcap(void)
/* probe for the extra features */
#define GET_FEATURE(feat, hwcap) \
do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
#define GET_FEATURE_ID(feat, hwcap) \
do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
/* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */
GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP);
GET_FEATURE(ARM_FEATURE_VFP, ARM_HWCAP_ARM_VFP);
@ -467,8 +471,8 @@ static uint32_t get_elf_hwcap(void)
GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPv3);
GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS);
GET_FEATURE(ARM_FEATURE_VFP4, ARM_HWCAP_ARM_VFPv4);
GET_FEATURE(ARM_FEATURE_ARM_DIV, ARM_HWCAP_ARM_IDIVA);
GET_FEATURE(ARM_FEATURE_THUMB_DIV, ARM_HWCAP_ARM_IDIVT);
GET_FEATURE_ID(arm_div, ARM_HWCAP_ARM_IDIVA);
GET_FEATURE_ID(thumb_div, ARM_HWCAP_ARM_IDIVT);
/* All QEMU's VFPv3 CPUs have 32 registers, see VFP_DREG in translate.c.
* Note that the ARM_HWCAP_ARM_VFPv3D16 bit is always the inverse of
* ARM_HWCAP_ARM_VFPD32 (and so always clear for QEMU); it is unrelated
@ -485,15 +489,16 @@ static uint32_t get_elf_hwcap2(void)
ARMCPU *cpu = ARM_CPU(thread_cpu);
uint32_t hwcaps = 0;
GET_FEATURE(ARM_FEATURE_V8_AES, ARM_HWCAP2_ARM_AES);
GET_FEATURE(ARM_FEATURE_V8_PMULL, ARM_HWCAP2_ARM_PMULL);
GET_FEATURE(ARM_FEATURE_V8_SHA1, ARM_HWCAP2_ARM_SHA1);
GET_FEATURE(ARM_FEATURE_V8_SHA256, ARM_HWCAP2_ARM_SHA2);
GET_FEATURE(ARM_FEATURE_CRC, ARM_HWCAP2_ARM_CRC32);
GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES);
GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL);
GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1);
GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2);
GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32);
return hwcaps;
}
#undef GET_FEATURE
#undef GET_FEATURE_ID
#else
/* 64 bit ARM definitions */
@ -568,25 +573,26 @@ static uint32_t get_elf_hwcap(void)
hwcaps |= ARM_HWCAP_A64_ASIMD;
/* probe for the extra features */
#define GET_FEATURE(feat, hwcap) \
do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
GET_FEATURE(ARM_FEATURE_V8_AES, ARM_HWCAP_A64_AES);
GET_FEATURE(ARM_FEATURE_V8_PMULL, ARM_HWCAP_A64_PMULL);
GET_FEATURE(ARM_FEATURE_V8_SHA1, ARM_HWCAP_A64_SHA1);
GET_FEATURE(ARM_FEATURE_V8_SHA256, ARM_HWCAP_A64_SHA2);
GET_FEATURE(ARM_FEATURE_CRC, ARM_HWCAP_A64_CRC32);
GET_FEATURE(ARM_FEATURE_V8_SHA3, ARM_HWCAP_A64_SHA3);
GET_FEATURE(ARM_FEATURE_V8_SM3, ARM_HWCAP_A64_SM3);
GET_FEATURE(ARM_FEATURE_V8_SM4, ARM_HWCAP_A64_SM4);
GET_FEATURE(ARM_FEATURE_V8_SHA512, ARM_HWCAP_A64_SHA512);
GET_FEATURE(ARM_FEATURE_V8_FP16,
ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
GET_FEATURE(ARM_FEATURE_V8_ATOMICS, ARM_HWCAP_A64_ATOMICS);
GET_FEATURE(ARM_FEATURE_V8_RDM, ARM_HWCAP_A64_ASIMDRDM);
GET_FEATURE(ARM_FEATURE_V8_DOTPROD, ARM_HWCAP_A64_ASIMDDP);
GET_FEATURE(ARM_FEATURE_V8_FCMA, ARM_HWCAP_A64_FCMA);
GET_FEATURE(ARM_FEATURE_SVE, ARM_HWCAP_A64_SVE);
#undef GET_FEATURE
#define GET_FEATURE_ID(feat, hwcap) \
do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES);
GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL);
GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1);
GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2);
GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512);
GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32);
GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3);
GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3);
GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4);
GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS);
GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM);
GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP);
GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA);
GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE);
#undef GET_FEATURE_ID
return hwcaps;
}

View File

@ -9544,7 +9544,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
* even though the current architectural maximum is VQ=16.
*/
ret = -TARGET_EINVAL;
if (arm_feature(cpu_env, ARM_FEATURE_SVE)
if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env))
&& arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
CPUARMState *env = cpu_env;
ARMCPU *cpu = arm_env_get_cpu(env);
@ -9563,9 +9563,11 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
return ret;
case TARGET_PR_SVE_GET_VL:
ret = -TARGET_EINVAL;
if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
CPUARMState *env = cpu_env;
ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
{
ARMCPU *cpu = arm_env_get_cpu(cpu_env);
if (cpu_isar_feature(aa64_sve, cpu)) {
ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
}
}
return ret;
#endif /* AARCH64 */

View File

@ -144,9 +144,9 @@ static void arm_cpu_reset(CPUState *s)
g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu);
env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid;
env->vfp.xregs[ARM_VFP_MVFR0] = cpu->mvfr0;
env->vfp.xregs[ARM_VFP_MVFR1] = cpu->mvfr1;
env->vfp.xregs[ARM_VFP_MVFR2] = cpu->mvfr2;
env->vfp.xregs[ARM_VFP_MVFR0] = cpu->isar.mvfr0;
env->vfp.xregs[ARM_VFP_MVFR1] = cpu->isar.mvfr1;
env->vfp.xregs[ARM_VFP_MVFR2] = cpu->isar.mvfr2;
cpu->power_state = cpu->start_powered_off ? PSCI_OFF : PSCI_ON;
s->halted = cpu->start_powered_off;
@ -814,7 +814,11 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
/* Some features automatically imply others: */
if (arm_feature(env, ARM_FEATURE_V8)) {
set_feature(env, ARM_FEATURE_V7VE);
if (arm_feature(env, ARM_FEATURE_M)) {
set_feature(env, ARM_FEATURE_V7);
} else {
set_feature(env, ARM_FEATURE_V7VE);
}
}
if (arm_feature(env, ARM_FEATURE_V7VE)) {
/* v7 Virtualization Extensions. In real hardware this implies
@ -825,7 +829,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* Presence of EL2 itself is ARM_FEATURE_EL2, and of the
* Security Extensions is ARM_FEATURE_EL3.
*/
set_feature(env, ARM_FEATURE_ARM_DIV);
assert(cpu_isar_feature(arm_div, cpu));
set_feature(env, ARM_FEATURE_LPAE);
set_feature(env, ARM_FEATURE_V7);
}
@ -850,20 +854,14 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
}
if (arm_feature(env, ARM_FEATURE_V6)) {
set_feature(env, ARM_FEATURE_V5);
set_feature(env, ARM_FEATURE_JAZELLE);
if (!arm_feature(env, ARM_FEATURE_M)) {
assert(cpu_isar_feature(jazelle, cpu));
set_feature(env, ARM_FEATURE_AUXCR);
}
}
if (arm_feature(env, ARM_FEATURE_V5)) {
set_feature(env, ARM_FEATURE_V4T);
}
if (arm_feature(env, ARM_FEATURE_M)) {
set_feature(env, ARM_FEATURE_THUMB_DIV);
}
if (arm_feature(env, ARM_FEATURE_ARM_DIV)) {
set_feature(env, ARM_FEATURE_THUMB_DIV);
}
if (arm_feature(env, ARM_FEATURE_VFP4)) {
set_feature(env, ARM_FEATURE_VFP3);
set_feature(env, ARM_FEATURE_VFP_FP16);
@ -938,7 +936,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12].
*/
cpu->id_pfr1 &= ~0xf0;
cpu->id_aa64pfr0 &= ~0xf000;
cpu->isar.id_aa64pfr0 &= ~0xf000;
}
if (!cpu->has_el2) {
@ -955,7 +953,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* registers if we don't have EL2. These are id_pfr1[15:12] and
* id_aa64pfr0_el1[11:8].
*/
cpu->id_aa64pfr0 &= ~0xf00;
cpu->isar.id_aa64pfr0 &= ~0xf00;
cpu->id_pfr1 &= ~0xf000;
}
@ -1084,11 +1082,16 @@ static void arm926_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_VFP);
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN);
set_feature(&cpu->env, ARM_FEATURE_JAZELLE);
cpu->midr = 0x41069265;
cpu->reset_fpsid = 0x41011090;
cpu->ctr = 0x1dd20d2;
cpu->reset_sctlr = 0x00090078;
/*
* ARMv5 does not have the ID_ISAR registers, but we can still
* set the field to indicate Jazelle support within QEMU.
*/
cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1);
}
static void arm946_initfn(Object *obj)
@ -1114,12 +1117,18 @@ static void arm1026_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_AUXCR);
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN);
set_feature(&cpu->env, ARM_FEATURE_JAZELLE);
cpu->midr = 0x4106a262;
cpu->reset_fpsid = 0x410110a0;
cpu->ctr = 0x1dd20d2;
cpu->reset_sctlr = 0x00090078;
cpu->reset_auxcr = 1;
/*
* ARMv5 does not have the ID_ISAR registers, but we can still
* set the field to indicate Jazelle support within QEMU.
*/
cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1);
{
/* The 1026 had an IFAR at c6,c0,0,1 rather than the ARMv6 c6,c0,0,2 */
ARMCPRegInfo ifar = {
@ -1151,8 +1160,8 @@ static void arm1136_r2_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
cpu->midr = 0x4107b362;
cpu->reset_fpsid = 0x410120b4;
cpu->mvfr0 = 0x11111111;
cpu->mvfr1 = 0x00000000;
cpu->isar.mvfr0 = 0x11111111;
cpu->isar.mvfr1 = 0x00000000;
cpu->ctr = 0x1dd20d2;
cpu->reset_sctlr = 0x00050078;
cpu->id_pfr0 = 0x111;
@ -1162,11 +1171,11 @@ static void arm1136_r2_initfn(Object *obj)
cpu->id_mmfr0 = 0x01130003;
cpu->id_mmfr1 = 0x10030302;
cpu->id_mmfr2 = 0x01222110;
cpu->id_isar0 = 0x00140011;
cpu->id_isar1 = 0x12002111;
cpu->id_isar2 = 0x11231111;
cpu->id_isar3 = 0x01102131;
cpu->id_isar4 = 0x141;
cpu->isar.id_isar0 = 0x00140011;
cpu->isar.id_isar1 = 0x12002111;
cpu->isar.id_isar2 = 0x11231111;
cpu->isar.id_isar3 = 0x01102131;
cpu->isar.id_isar4 = 0x141;
cpu->reset_auxcr = 7;
}
@ -1183,8 +1192,8 @@ static void arm1136_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
cpu->midr = 0x4117b363;
cpu->reset_fpsid = 0x410120b4;
cpu->mvfr0 = 0x11111111;
cpu->mvfr1 = 0x00000000;
cpu->isar.mvfr0 = 0x11111111;
cpu->isar.mvfr1 = 0x00000000;
cpu->ctr = 0x1dd20d2;
cpu->reset_sctlr = 0x00050078;
cpu->id_pfr0 = 0x111;
@ -1194,11 +1203,11 @@ static void arm1136_initfn(Object *obj)
cpu->id_mmfr0 = 0x01130003;
cpu->id_mmfr1 = 0x10030302;
cpu->id_mmfr2 = 0x01222110;
cpu->id_isar0 = 0x00140011;
cpu->id_isar1 = 0x12002111;
cpu->id_isar2 = 0x11231111;
cpu->id_isar3 = 0x01102131;
cpu->id_isar4 = 0x141;
cpu->isar.id_isar0 = 0x00140011;
cpu->isar.id_isar1 = 0x12002111;
cpu->isar.id_isar2 = 0x11231111;
cpu->isar.id_isar3 = 0x01102131;
cpu->isar.id_isar4 = 0x141;
cpu->reset_auxcr = 7;
}
@ -1216,8 +1225,8 @@ static void arm1176_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_EL3);
cpu->midr = 0x410fb767;
cpu->reset_fpsid = 0x410120b5;
cpu->mvfr0 = 0x11111111;
cpu->mvfr1 = 0x00000000;
cpu->isar.mvfr0 = 0x11111111;
cpu->isar.mvfr1 = 0x00000000;
cpu->ctr = 0x1dd20d2;
cpu->reset_sctlr = 0x00050078;
cpu->id_pfr0 = 0x111;
@ -1227,11 +1236,11 @@ static void arm1176_initfn(Object *obj)
cpu->id_mmfr0 = 0x01130003;
cpu->id_mmfr1 = 0x10030302;
cpu->id_mmfr2 = 0x01222100;
cpu->id_isar0 = 0x0140011;
cpu->id_isar1 = 0x12002111;
cpu->id_isar2 = 0x11231121;
cpu->id_isar3 = 0x01102131;
cpu->id_isar4 = 0x01141;
cpu->isar.id_isar0 = 0x0140011;
cpu->isar.id_isar1 = 0x12002111;
cpu->isar.id_isar2 = 0x11231121;
cpu->isar.id_isar3 = 0x01102131;
cpu->isar.id_isar4 = 0x01141;
cpu->reset_auxcr = 7;
}
@ -1247,8 +1256,8 @@ static void arm11mpcore_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
cpu->midr = 0x410fb022;
cpu->reset_fpsid = 0x410120b4;
cpu->mvfr0 = 0x11111111;
cpu->mvfr1 = 0x00000000;
cpu->isar.mvfr0 = 0x11111111;
cpu->isar.mvfr1 = 0x00000000;
cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */
cpu->id_pfr0 = 0x111;
cpu->id_pfr1 = 0x1;
@ -1257,11 +1266,11 @@ static void arm11mpcore_initfn(Object *obj)
cpu->id_mmfr0 = 0x01100103;
cpu->id_mmfr1 = 0x10020302;
cpu->id_mmfr2 = 0x01222000;
cpu->id_isar0 = 0x00100011;
cpu->id_isar1 = 0x12002111;
cpu->id_isar2 = 0x11221011;
cpu->id_isar3 = 0x01102131;
cpu->id_isar4 = 0x141;
cpu->isar.id_isar0 = 0x00100011;
cpu->isar.id_isar1 = 0x12002111;
cpu->isar.id_isar2 = 0x11221011;
cpu->isar.id_isar3 = 0x01102131;
cpu->isar.id_isar4 = 0x141;
cpu->reset_auxcr = 1;
}
@ -1290,13 +1299,13 @@ static void cortex_m3_initfn(Object *obj)
cpu->id_mmfr1 = 0x00000000;
cpu->id_mmfr2 = 0x00000000;
cpu->id_mmfr3 = 0x00000000;
cpu->id_isar0 = 0x01141110;
cpu->id_isar1 = 0x02111000;
cpu->id_isar2 = 0x21112231;
cpu->id_isar3 = 0x01111110;
cpu->id_isar4 = 0x01310102;
cpu->id_isar5 = 0x00000000;
cpu->id_isar6 = 0x00000000;
cpu->isar.id_isar0 = 0x01141110;
cpu->isar.id_isar1 = 0x02111000;
cpu->isar.id_isar2 = 0x21112231;
cpu->isar.id_isar3 = 0x01111110;
cpu->isar.id_isar4 = 0x01310102;
cpu->isar.id_isar5 = 0x00000000;
cpu->isar.id_isar6 = 0x00000000;
}
static void cortex_m4_initfn(Object *obj)
@ -1317,13 +1326,13 @@ static void cortex_m4_initfn(Object *obj)
cpu->id_mmfr1 = 0x00000000;
cpu->id_mmfr2 = 0x00000000;
cpu->id_mmfr3 = 0x00000000;
cpu->id_isar0 = 0x01141110;
cpu->id_isar1 = 0x02111000;
cpu->id_isar2 = 0x21112231;
cpu->id_isar3 = 0x01111110;
cpu->id_isar4 = 0x01310102;
cpu->id_isar5 = 0x00000000;
cpu->id_isar6 = 0x00000000;
cpu->isar.id_isar0 = 0x01141110;
cpu->isar.id_isar1 = 0x02111000;
cpu->isar.id_isar2 = 0x21112231;
cpu->isar.id_isar3 = 0x01111110;
cpu->isar.id_isar4 = 0x01310102;
cpu->isar.id_isar5 = 0x00000000;
cpu->isar.id_isar6 = 0x00000000;
}
static void cortex_m33_initfn(Object *obj)
@ -1346,13 +1355,13 @@ static void cortex_m33_initfn(Object *obj)
cpu->id_mmfr1 = 0x00000000;
cpu->id_mmfr2 = 0x01000000;
cpu->id_mmfr3 = 0x00000000;
cpu->id_isar0 = 0x01101110;
cpu->id_isar1 = 0x02212000;
cpu->id_isar2 = 0x20232232;
cpu->id_isar3 = 0x01111131;
cpu->id_isar4 = 0x01310132;
cpu->id_isar5 = 0x00000000;
cpu->id_isar6 = 0x00000000;
cpu->isar.id_isar0 = 0x01101110;
cpu->isar.id_isar1 = 0x02212000;
cpu->isar.id_isar2 = 0x20232232;
cpu->isar.id_isar3 = 0x01111131;
cpu->isar.id_isar4 = 0x01310132;
cpu->isar.id_isar5 = 0x00000000;
cpu->isar.id_isar6 = 0x00000000;
cpu->clidr = 0x00000000;
cpu->ctr = 0x8000c000;
}
@ -1384,8 +1393,6 @@ static void cortex_r5_initfn(Object *obj)
ARMCPU *cpu = ARM_CPU(obj);
set_feature(&cpu->env, ARM_FEATURE_V7);
set_feature(&cpu->env, ARM_FEATURE_THUMB_DIV);
set_feature(&cpu->env, ARM_FEATURE_ARM_DIV);
set_feature(&cpu->env, ARM_FEATURE_V7MP);
set_feature(&cpu->env, ARM_FEATURE_PMSA);
cpu->midr = 0x411fc153; /* r1p3 */
@ -1397,13 +1404,13 @@ static void cortex_r5_initfn(Object *obj)
cpu->id_mmfr1 = 0x00000000;
cpu->id_mmfr2 = 0x01200000;
cpu->id_mmfr3 = 0x0211;
cpu->id_isar0 = 0x02101111;
cpu->id_isar1 = 0x13112111;
cpu->id_isar2 = 0x21232141;
cpu->id_isar3 = 0x01112131;
cpu->id_isar4 = 0x0010142;
cpu->id_isar5 = 0x0;
cpu->id_isar6 = 0x0;
cpu->isar.id_isar0 = 0x02101111;
cpu->isar.id_isar1 = 0x13112111;
cpu->isar.id_isar2 = 0x21232141;
cpu->isar.id_isar3 = 0x01112131;
cpu->isar.id_isar4 = 0x0010142;
cpu->isar.id_isar5 = 0x0;
cpu->isar.id_isar6 = 0x0;
cpu->mp_is_up = true;
cpu->pmsav7_dregion = 16;
define_arm_cp_regs(cpu, cortexr5_cp_reginfo);
@ -1438,8 +1445,8 @@ static void cortex_a8_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_EL3);
cpu->midr = 0x410fc080;
cpu->reset_fpsid = 0x410330c0;
cpu->mvfr0 = 0x11110222;
cpu->mvfr1 = 0x00011111;
cpu->isar.mvfr0 = 0x11110222;
cpu->isar.mvfr1 = 0x00011111;
cpu->ctr = 0x82048004;
cpu->reset_sctlr = 0x00c50078;
cpu->id_pfr0 = 0x1031;
@ -1450,11 +1457,11 @@ static void cortex_a8_initfn(Object *obj)
cpu->id_mmfr1 = 0x20000000;
cpu->id_mmfr2 = 0x01202000;
cpu->id_mmfr3 = 0x11;
cpu->id_isar0 = 0x00101111;
cpu->id_isar1 = 0x12112111;
cpu->id_isar2 = 0x21232031;
cpu->id_isar3 = 0x11112131;
cpu->id_isar4 = 0x00111142;
cpu->isar.id_isar0 = 0x00101111;
cpu->isar.id_isar1 = 0x12112111;
cpu->isar.id_isar2 = 0x21232031;
cpu->isar.id_isar3 = 0x11112131;
cpu->isar.id_isar4 = 0x00111142;
cpu->dbgdidr = 0x15141000;
cpu->clidr = (1 << 27) | (2 << 24) | 3;
cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */
@ -1512,8 +1519,8 @@ static void cortex_a9_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_CBAR);
cpu->midr = 0x410fc090;
cpu->reset_fpsid = 0x41033090;
cpu->mvfr0 = 0x11110222;
cpu->mvfr1 = 0x01111111;
cpu->isar.mvfr0 = 0x11110222;
cpu->isar.mvfr1 = 0x01111111;
cpu->ctr = 0x80038003;
cpu->reset_sctlr = 0x00c50078;
cpu->id_pfr0 = 0x1031;
@ -1524,11 +1531,11 @@ static void cortex_a9_initfn(Object *obj)
cpu->id_mmfr1 = 0x20000000;
cpu->id_mmfr2 = 0x01230000;
cpu->id_mmfr3 = 0x00002111;
cpu->id_isar0 = 0x00101111;
cpu->id_isar1 = 0x13112111;
cpu->id_isar2 = 0x21232041;
cpu->id_isar3 = 0x11112131;
cpu->id_isar4 = 0x00111142;
cpu->isar.id_isar0 = 0x00101111;
cpu->isar.id_isar1 = 0x13112111;
cpu->isar.id_isar2 = 0x21232041;
cpu->isar.id_isar3 = 0x11112131;
cpu->isar.id_isar4 = 0x00111142;
cpu->dbgdidr = 0x35141000;
cpu->clidr = (1 << 27) | (1 << 24) | 3;
cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */
@ -1573,8 +1580,8 @@ static void cortex_a7_initfn(Object *obj)
cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A7;
cpu->midr = 0x410fc075;
cpu->reset_fpsid = 0x41023075;
cpu->mvfr0 = 0x10110222;
cpu->mvfr1 = 0x11111111;
cpu->isar.mvfr0 = 0x10110222;
cpu->isar.mvfr1 = 0x11111111;
cpu->ctr = 0x84448003;
cpu->reset_sctlr = 0x00c50078;
cpu->id_pfr0 = 0x00001131;
@ -1590,11 +1597,11 @@ static void cortex_a7_initfn(Object *obj)
/* a7_mpcore_r0p5_trm, page 4-4 gives 0x01101110; but
* table 4-41 gives 0x02101110, which includes the arm div insns.
*/
cpu->id_isar0 = 0x02101110;
cpu->id_isar1 = 0x13112111;
cpu->id_isar2 = 0x21232041;
cpu->id_isar3 = 0x11112131;
cpu->id_isar4 = 0x10011142;
cpu->isar.id_isar0 = 0x02101110;
cpu->isar.id_isar1 = 0x13112111;
cpu->isar.id_isar2 = 0x21232041;
cpu->isar.id_isar3 = 0x11112131;
cpu->isar.id_isar4 = 0x10011142;
cpu->dbgdidr = 0x3515f005;
cpu->clidr = 0x0a200023;
cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
@ -1619,8 +1626,8 @@ static void cortex_a15_initfn(Object *obj)
cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15;
cpu->midr = 0x412fc0f1;
cpu->reset_fpsid = 0x410430f0;
cpu->mvfr0 = 0x10110222;
cpu->mvfr1 = 0x11111111;
cpu->isar.mvfr0 = 0x10110222;
cpu->isar.mvfr1 = 0x11111111;
cpu->ctr = 0x8444c004;
cpu->reset_sctlr = 0x00c50078;
cpu->id_pfr0 = 0x00001131;
@ -1633,11 +1640,11 @@ static void cortex_a15_initfn(Object *obj)
cpu->id_mmfr1 = 0x20000000;
cpu->id_mmfr2 = 0x01240000;
cpu->id_mmfr3 = 0x02102211;
cpu->id_isar0 = 0x02101110;
cpu->id_isar1 = 0x13112111;
cpu->id_isar2 = 0x21232041;
cpu->id_isar3 = 0x11112131;
cpu->id_isar4 = 0x10011142;
cpu->isar.id_isar0 = 0x02101110;
cpu->isar.id_isar1 = 0x13112111;
cpu->isar.id_isar2 = 0x21232041;
cpu->isar.id_isar3 = 0x11112131;
cpu->isar.id_isar4 = 0x10011142;
cpu->dbgdidr = 0x3515f021;
cpu->clidr = 0x0a200023;
cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
@ -1830,17 +1837,26 @@ static void arm_max_initfn(Object *obj)
cortex_a15_initfn(obj);
#ifdef CONFIG_USER_ONLY
/* We don't set these in system emulation mode for the moment,
* since we don't correctly set the ID registers to advertise them,
* since we don't correctly set (all of) the ID registers to
* advertise them.
*/
set_feature(&cpu->env, ARM_FEATURE_V8);
set_feature(&cpu->env, ARM_FEATURE_V8_AES);
set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
set_feature(&cpu->env, ARM_FEATURE_CRC);
set_feature(&cpu->env, ARM_FEATURE_V8_RDM);
set_feature(&cpu->env, ARM_FEATURE_V8_DOTPROD);
set_feature(&cpu->env, ARM_FEATURE_V8_FCMA);
{
uint32_t t;
t = cpu->isar.id_isar5;
t = FIELD_DP32(t, ID_ISAR5, AES, 2);
t = FIELD_DP32(t, ID_ISAR5, SHA1, 1);
t = FIELD_DP32(t, ID_ISAR5, SHA2, 1);
t = FIELD_DP32(t, ID_ISAR5, CRC32, 1);
t = FIELD_DP32(t, ID_ISAR5, RDM, 1);
t = FIELD_DP32(t, ID_ISAR5, VCMA, 1);
cpu->isar.id_isar5 = t;
t = cpu->isar.id_isar6;
t = FIELD_DP32(t, ID_ISAR6, DP, 1);
cpu->isar.id_isar6 = t;
}
#endif
}
}

View File

@ -531,6 +531,13 @@ typedef struct CPUARMState {
*/
} exception;
/* Information associated with an SError */
struct {
uint8_t pending;
uint8_t has_esr;
uint64_t esr;
} serror;
/* Thumb-2 EE state. */
uint32_t teecr;
uint32_t teehbr;
@ -669,6 +676,8 @@ typedef enum ARMPSCIState {
PSCI_ON_PENDING = 2
} ARMPSCIState;
typedef struct ARMISARegisters ARMISARegisters;
/**
* ARMCPU:
* @env: #CPUARMState
@ -788,13 +797,28 @@ struct ARMCPU {
* ARMv7AR ARM Architecture Reference Manual. A reset_ prefix
* is used for reset values of non-constant registers; no reset_
* prefix means a constant register.
* Some of these registers are split out into a substructure that
* is shared with the translators to control the ISA.
*/
struct ARMISARegisters {
uint32_t id_isar0;
uint32_t id_isar1;
uint32_t id_isar2;
uint32_t id_isar3;
uint32_t id_isar4;
uint32_t id_isar5;
uint32_t id_isar6;
uint32_t mvfr0;
uint32_t mvfr1;
uint32_t mvfr2;
uint64_t id_aa64isar0;
uint64_t id_aa64isar1;
uint64_t id_aa64pfr0;
uint64_t id_aa64pfr1;
} isar;
uint32_t midr;
uint32_t revidr;
uint32_t reset_fpsid;
uint32_t mvfr0;
uint32_t mvfr1;
uint32_t mvfr2;
uint32_t ctr;
uint32_t reset_sctlr;
uint32_t id_pfr0;
@ -808,21 +832,10 @@ struct ARMCPU {
uint32_t id_mmfr2;
uint32_t id_mmfr3;
uint32_t id_mmfr4;
uint32_t id_isar0;
uint32_t id_isar1;
uint32_t id_isar2;
uint32_t id_isar3;
uint32_t id_isar4;
uint32_t id_isar5;
uint32_t id_isar6;
uint64_t id_aa64pfr0;
uint64_t id_aa64pfr1;
uint64_t id_aa64dfr0;
uint64_t id_aa64dfr1;
uint64_t id_aa64afr0;
uint64_t id_aa64afr1;
uint64_t id_aa64isar0;
uint64_t id_aa64isar1;
uint64_t id_aa64mmfr0;
uint64_t id_aa64mmfr1;
uint32_t dbgdidr;
@ -1531,6 +1544,16 @@ FIELD(ID_AA64ISAR1, FRINTTS, 32, 4)
FIELD(ID_AA64ISAR1, SB, 36, 4)
FIELD(ID_AA64ISAR1, SPECRES, 40, 4)
FIELD(ID_AA64PFR0, EL0, 0, 4)
FIELD(ID_AA64PFR0, EL1, 4, 4)
FIELD(ID_AA64PFR0, EL2, 8, 4)
FIELD(ID_AA64PFR0, EL3, 12, 4)
FIELD(ID_AA64PFR0, FP, 16, 4)
FIELD(ID_AA64PFR0, ADVSIMD, 20, 4)
FIELD(ID_AA64PFR0, GIC, 24, 4)
FIELD(ID_AA64PFR0, RAS, 28, 4)
FIELD(ID_AA64PFR0, SVE, 32, 4)
QEMU_BUILD_BUG_ON(ARRAY_SIZE(((ARMCPU *)0)->ccsidr) <= R_V7M_CSSELR_INDEX_MASK);
/* If adding a feature bit which corresponds to a Linux ELF
@ -1550,7 +1573,6 @@ enum arm_features {
ARM_FEATURE_VFP3,
ARM_FEATURE_VFP_FP16,
ARM_FEATURE_NEON,
ARM_FEATURE_THUMB_DIV, /* divide supported in Thumb encoding */
ARM_FEATURE_M, /* Microcontroller profile. */
ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */
ARM_FEATURE_THUMB2EE,
@ -1560,7 +1582,6 @@ enum arm_features {
ARM_FEATURE_V5,
ARM_FEATURE_STRONGARM,
ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */
ARM_FEATURE_ARM_DIV, /* divide supported in ARM encoding */
ARM_FEATURE_VFP4, /* VFPv4 (implies that NEON is v2) */
ARM_FEATURE_GENERIC_TIMER,
ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */
@ -1573,30 +1594,15 @@ enum arm_features {
ARM_FEATURE_LPAE, /* has Large Physical Address Extension */
ARM_FEATURE_V8,
ARM_FEATURE_AARCH64, /* supports 64 bit mode */
ARM_FEATURE_V8_AES, /* implements AES part of v8 Crypto Extensions */
ARM_FEATURE_CBAR, /* has cp15 CBAR */
ARM_FEATURE_CRC, /* ARMv8 CRC instructions */
ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */
ARM_FEATURE_EL2, /* has EL2 Virtualization support */
ARM_FEATURE_EL3, /* has EL3 Secure monitor support */
ARM_FEATURE_V8_SHA1, /* implements SHA1 part of v8 Crypto Extensions */
ARM_FEATURE_V8_SHA256, /* implements SHA256 part of v8 Crypto Extensions */
ARM_FEATURE_V8_PMULL, /* implements PMULL part of v8 Crypto Extensions */
ARM_FEATURE_THUMB_DSP, /* DSP insns supported in the Thumb encodings */
ARM_FEATURE_PMU, /* has PMU support */
ARM_FEATURE_VBAR, /* has cp15 VBAR */
ARM_FEATURE_M_SECURITY, /* M profile Security Extension */
ARM_FEATURE_JAZELLE, /* has (trivial) Jazelle implementation */
ARM_FEATURE_SVE, /* has Scalable Vector Extension */
ARM_FEATURE_V8_SHA512, /* implements SHA512 part of v8 Crypto Extensions */
ARM_FEATURE_V8_SHA3, /* implements SHA3 part of v8 Crypto Extensions */
ARM_FEATURE_V8_SM3, /* implements SM3 part of v8 Crypto Extensions */
ARM_FEATURE_V8_SM4, /* implements SM4 part of v8 Crypto Extensions */
ARM_FEATURE_V8_ATOMICS, /* ARMv8.1-Atomics feature */
ARM_FEATURE_V8_RDM, /* implements v8.1 simd round multiply */
ARM_FEATURE_V8_DOTPROD, /* implements v8.2 simd dot product */
ARM_FEATURE_V8_FP16, /* implements v8.2 half-precision float */
ARM_FEATURE_V8_FCMA, /* has complex number part of v8.3 extensions. */
ARM_FEATURE_M_MAIN, /* M profile Main Extension */
};
@ -3148,4 +3154,157 @@ static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno)
/* Shared between translate-sve.c and sve_helper.c. */
extern const uint64_t pred_esz_masks[4];
/*
* 32-bit feature tests via id registers.
*/
static inline bool isar_feature_thumb_div(const ARMISARegisters *id)
{
return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) != 0;
}
static inline bool isar_feature_arm_div(const ARMISARegisters *id)
{
return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) > 1;
}
static inline bool isar_feature_jazelle(const ARMISARegisters *id)
{
return FIELD_EX32(id->id_isar1, ID_ISAR1, JAZELLE) != 0;
}
static inline bool isar_feature_aa32_aes(const ARMISARegisters *id)
{
return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0;
}
static inline bool isar_feature_aa32_pmull(const ARMISARegisters *id)
{
return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) > 1;
}
static inline bool isar_feature_aa32_sha1(const ARMISARegisters *id)
{
return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA1) != 0;
}
static inline bool isar_feature_aa32_sha2(const ARMISARegisters *id)
{
return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA2) != 0;
}
static inline bool isar_feature_aa32_crc32(const ARMISARegisters *id)
{
return FIELD_EX32(id->id_isar5, ID_ISAR5, CRC32) != 0;
}
static inline bool isar_feature_aa32_rdm(const ARMISARegisters *id)
{
return FIELD_EX32(id->id_isar5, ID_ISAR5, RDM) != 0;
}
static inline bool isar_feature_aa32_vcma(const ARMISARegisters *id)
{
return FIELD_EX32(id->id_isar5, ID_ISAR5, VCMA) != 0;
}
static inline bool isar_feature_aa32_dp(const ARMISARegisters *id)
{
return FIELD_EX32(id->id_isar6, ID_ISAR6, DP) != 0;
}
static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id)
{
/*
* This is a placeholder for use by VCMA until the rest of
* the ARMv8.2-FP16 extension is implemented for aa32 mode.
* At which point we can properly set and check MVFR1.FPHP.
*/
return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1;
}
/*
* 64-bit feature tests via id registers.
*/
static inline bool isar_feature_aa64_aes(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) != 0;
}
static inline bool isar_feature_aa64_pmull(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) > 1;
}
static inline bool isar_feature_aa64_sha1(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA1) != 0;
}
static inline bool isar_feature_aa64_sha256(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) != 0;
}
static inline bool isar_feature_aa64_sha512(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) > 1;
}
static inline bool isar_feature_aa64_crc32(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, CRC32) != 0;
}
static inline bool isar_feature_aa64_atomics(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, ATOMIC) != 0;
}
static inline bool isar_feature_aa64_rdm(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RDM) != 0;
}
static inline bool isar_feature_aa64_sha3(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA3) != 0;
}
static inline bool isar_feature_aa64_sm3(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM3) != 0;
}
static inline bool isar_feature_aa64_sm4(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM4) != 0;
}
static inline bool isar_feature_aa64_dp(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, DP) != 0;
}
static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0;
}
static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id)
{
/* We always set the AdvSIMD and FP fields identically wrt FP16. */
return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1;
}
static inline bool isar_feature_aa64_sve(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0;
}
/*
* Forward to the above feature tests given an ARMCPU pointer.
*/
#define cpu_isar_feature(name, cpu) \
({ ARMCPU *cpu_ = (cpu); isar_feature_##name(&cpu_->isar); })
#endif

View File

@ -109,11 +109,6 @@ static void aarch64_a57_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
set_feature(&cpu->env, ARM_FEATURE_AARCH64);
set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
set_feature(&cpu->env, ARM_FEATURE_V8_AES);
set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
set_feature(&cpu->env, ARM_FEATURE_CRC);
set_feature(&cpu->env, ARM_FEATURE_EL2);
set_feature(&cpu->env, ARM_FEATURE_EL3);
set_feature(&cpu->env, ARM_FEATURE_PMU);
@ -121,9 +116,9 @@ static void aarch64_a57_initfn(Object *obj)
cpu->midr = 0x411fd070;
cpu->revidr = 0x00000000;
cpu->reset_fpsid = 0x41034070;
cpu->mvfr0 = 0x10110222;
cpu->mvfr1 = 0x12111111;
cpu->mvfr2 = 0x00000043;
cpu->isar.mvfr0 = 0x10110222;
cpu->isar.mvfr1 = 0x12111111;
cpu->isar.mvfr2 = 0x00000043;
cpu->ctr = 0x8444c004;
cpu->reset_sctlr = 0x00c50838;
cpu->id_pfr0 = 0x00000131;
@ -134,18 +129,18 @@ static void aarch64_a57_initfn(Object *obj)
cpu->id_mmfr1 = 0x40000000;
cpu->id_mmfr2 = 0x01260000;
cpu->id_mmfr3 = 0x02102211;
cpu->id_isar0 = 0x02101110;
cpu->id_isar1 = 0x13112111;
cpu->id_isar2 = 0x21232042;
cpu->id_isar3 = 0x01112131;
cpu->id_isar4 = 0x00011142;
cpu->id_isar5 = 0x00011121;
cpu->id_isar6 = 0;
cpu->id_aa64pfr0 = 0x00002222;
cpu->isar.id_isar0 = 0x02101110;
cpu->isar.id_isar1 = 0x13112111;
cpu->isar.id_isar2 = 0x21232042;
cpu->isar.id_isar3 = 0x01112131;
cpu->isar.id_isar4 = 0x00011142;
cpu->isar.id_isar5 = 0x00011121;
cpu->isar.id_isar6 = 0;
cpu->isar.id_aa64pfr0 = 0x00002222;
cpu->id_aa64dfr0 = 0x10305106;
cpu->pmceid0 = 0x00000000;
cpu->pmceid1 = 0x00000000;
cpu->id_aa64isar0 = 0x00011120;
cpu->isar.id_aa64isar0 = 0x00011120;
cpu->id_aa64mmfr0 = 0x00001124;
cpu->dbgdidr = 0x3516d000;
cpu->clidr = 0x0a200023;
@ -170,11 +165,6 @@ static void aarch64_a53_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
set_feature(&cpu->env, ARM_FEATURE_AARCH64);
set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
set_feature(&cpu->env, ARM_FEATURE_V8_AES);
set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
set_feature(&cpu->env, ARM_FEATURE_CRC);
set_feature(&cpu->env, ARM_FEATURE_EL2);
set_feature(&cpu->env, ARM_FEATURE_EL3);
set_feature(&cpu->env, ARM_FEATURE_PMU);
@ -182,9 +172,9 @@ static void aarch64_a53_initfn(Object *obj)
cpu->midr = 0x410fd034;
cpu->revidr = 0x00000000;
cpu->reset_fpsid = 0x41034070;
cpu->mvfr0 = 0x10110222;
cpu->mvfr1 = 0x12111111;
cpu->mvfr2 = 0x00000043;
cpu->isar.mvfr0 = 0x10110222;
cpu->isar.mvfr1 = 0x12111111;
cpu->isar.mvfr2 = 0x00000043;
cpu->ctr = 0x84448004; /* L1Ip = VIPT */
cpu->reset_sctlr = 0x00c50838;
cpu->id_pfr0 = 0x00000131;
@ -195,16 +185,16 @@ static void aarch64_a53_initfn(Object *obj)
cpu->id_mmfr1 = 0x40000000;
cpu->id_mmfr2 = 0x01260000;
cpu->id_mmfr3 = 0x02102211;
cpu->id_isar0 = 0x02101110;
cpu->id_isar1 = 0x13112111;
cpu->id_isar2 = 0x21232042;
cpu->id_isar3 = 0x01112131;
cpu->id_isar4 = 0x00011142;
cpu->id_isar5 = 0x00011121;
cpu->id_isar6 = 0;
cpu->id_aa64pfr0 = 0x00002222;
cpu->isar.id_isar0 = 0x02101110;
cpu->isar.id_isar1 = 0x13112111;
cpu->isar.id_isar2 = 0x21232042;
cpu->isar.id_isar3 = 0x01112131;
cpu->isar.id_isar4 = 0x00011142;
cpu->isar.id_isar5 = 0x00011121;
cpu->isar.id_isar6 = 0;
cpu->isar.id_aa64pfr0 = 0x00002222;
cpu->id_aa64dfr0 = 0x10305106;
cpu->id_aa64isar0 = 0x00011120;
cpu->isar.id_aa64isar0 = 0x00011120;
cpu->id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */
cpu->dbgdidr = 0x3516d000;
cpu->clidr = 0x0a200023;
@ -229,20 +219,15 @@ static void aarch64_a72_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
set_feature(&cpu->env, ARM_FEATURE_AARCH64);
set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
set_feature(&cpu->env, ARM_FEATURE_V8_AES);
set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
set_feature(&cpu->env, ARM_FEATURE_CRC);
set_feature(&cpu->env, ARM_FEATURE_EL2);
set_feature(&cpu->env, ARM_FEATURE_EL3);
set_feature(&cpu->env, ARM_FEATURE_PMU);
cpu->midr = 0x410fd083;
cpu->revidr = 0x00000000;
cpu->reset_fpsid = 0x41034080;
cpu->mvfr0 = 0x10110222;
cpu->mvfr1 = 0x12111111;
cpu->mvfr2 = 0x00000043;
cpu->isar.mvfr0 = 0x10110222;
cpu->isar.mvfr1 = 0x12111111;
cpu->isar.mvfr2 = 0x00000043;
cpu->ctr = 0x8444c004;
cpu->reset_sctlr = 0x00c50838;
cpu->id_pfr0 = 0x00000131;
@ -253,17 +238,17 @@ static void aarch64_a72_initfn(Object *obj)
cpu->id_mmfr1 = 0x40000000;
cpu->id_mmfr2 = 0x01260000;
cpu->id_mmfr3 = 0x02102211;
cpu->id_isar0 = 0x02101110;
cpu->id_isar1 = 0x13112111;
cpu->id_isar2 = 0x21232042;
cpu->id_isar3 = 0x01112131;
cpu->id_isar4 = 0x00011142;
cpu->id_isar5 = 0x00011121;
cpu->id_aa64pfr0 = 0x00002222;
cpu->isar.id_isar0 = 0x02101110;
cpu->isar.id_isar1 = 0x13112111;
cpu->isar.id_isar2 = 0x21232042;
cpu->isar.id_isar3 = 0x01112131;
cpu->isar.id_isar4 = 0x00011142;
cpu->isar.id_isar5 = 0x00011121;
cpu->isar.id_aa64pfr0 = 0x00002222;
cpu->id_aa64dfr0 = 0x10305106;
cpu->pmceid0 = 0x00000000;
cpu->pmceid1 = 0x00000000;
cpu->id_aa64isar0 = 0x00011120;
cpu->isar.id_aa64isar0 = 0x00011120;
cpu->id_aa64mmfr0 = 0x00001124;
cpu->dbgdidr = 0x3516d000;
cpu->clidr = 0x0a200023;
@ -312,24 +297,55 @@ static void aarch64_max_initfn(Object *obj)
if (kvm_enabled()) {
kvm_arm_set_cpu_features_from_host(cpu);
} else {
uint64_t t;
uint32_t u;
aarch64_a57_initfn(obj);
#ifdef CONFIG_USER_ONLY
/* We don't set these in system emulation mode for the moment,
* since we don't correctly set the ID registers to advertise them,
* and in some cases they're only available in AArch64 and not AArch32,
* whereas the architecture requires them to be present in both if
* present in either.
t = cpu->isar.id_aa64isar0;
t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* AES + PMULL */
t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1);
t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* SHA512 */
t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1);
t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2);
t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1);
t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1);
t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 1);
t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 1);
t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1);
cpu->isar.id_aa64isar0 = t;
t = cpu->isar.id_aa64isar1;
t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1);
cpu->isar.id_aa64isar1 = t;
t = cpu->isar.id_aa64pfr0;
t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
t = FIELD_DP64(t, ID_AA64PFR0, FP, 1);
t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1);
cpu->isar.id_aa64pfr0 = t;
/* Replicate the same data to the 32-bit id registers. */
u = cpu->isar.id_isar5;
u = FIELD_DP32(u, ID_ISAR5, AES, 2); /* AES + PMULL */
u = FIELD_DP32(u, ID_ISAR5, SHA1, 1);
u = FIELD_DP32(u, ID_ISAR5, SHA2, 1);
u = FIELD_DP32(u, ID_ISAR5, CRC32, 1);
u = FIELD_DP32(u, ID_ISAR5, RDM, 1);
u = FIELD_DP32(u, ID_ISAR5, VCMA, 1);
cpu->isar.id_isar5 = u;
u = cpu->isar.id_isar6;
u = FIELD_DP32(u, ID_ISAR6, DP, 1);
cpu->isar.id_isar6 = u;
/*
* FIXME: We do not yet support ARMv8.2-fp16 for AArch32 yet,
* so do not set MVFR1.FPHP. Strictly speaking this is not legal,
* but it is also not legal to enable SVE without support for FP16,
* and enabling SVE in system mode is more useful in the short term.
*/
set_feature(&cpu->env, ARM_FEATURE_V8_SHA512);
set_feature(&cpu->env, ARM_FEATURE_V8_SHA3);
set_feature(&cpu->env, ARM_FEATURE_V8_SM3);
set_feature(&cpu->env, ARM_FEATURE_V8_SM4);
set_feature(&cpu->env, ARM_FEATURE_V8_ATOMICS);
set_feature(&cpu->env, ARM_FEATURE_V8_RDM);
set_feature(&cpu->env, ARM_FEATURE_V8_DOTPROD);
set_feature(&cpu->env, ARM_FEATURE_V8_FP16);
set_feature(&cpu->env, ARM_FEATURE_V8_FCMA);
set_feature(&cpu->env, ARM_FEATURE_SVE);
#ifdef CONFIG_USER_ONLY
/* For usermode -cpu max we can use a larger and more efficient DCZ
* blocksize since we don't have to follow what the hardware does.
*/

View File

@ -56,6 +56,8 @@ static void v8m_security_lookup(CPUARMState *env, uint32_t address,
V8M_SAttributes *sattrs);
#endif
static void switch_mode(CPUARMState *env, int mode);
static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
{
int nregs;
@ -552,42 +554,6 @@ static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
raw_write(env, ri, value);
}
static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate all (TLBIALL) */
ARMCPU *cpu = arm_env_get_cpu(env);
tlb_flush(CPU(cpu));
}
static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
ARMCPU *cpu = arm_env_get_cpu(env);
tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
}
static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate by ASID (TLBIASID) */
ARMCPU *cpu = arm_env_get_cpu(env);
tlb_flush(CPU(cpu));
}
static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
ARMCPU *cpu = arm_env_get_cpu(env);
tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
}
/* IS variants of TLB operations must affect all cores */
static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
@ -621,6 +587,73 @@ static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
}
/*
* Non-IS variants of TLB operations are upgraded to
* IS versions if we are at NS EL1 and HCR_EL2.FB is set to
* force broadcast of these operations.
*/
static bool tlb_force_broadcast(CPUARMState *env)
{
return (env->cp15.hcr_el2 & HCR_FB) &&
arm_current_el(env) == 1 && arm_is_secure_below_el3(env);
}
static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate all (TLBIALL) */
ARMCPU *cpu = arm_env_get_cpu(env);
if (tlb_force_broadcast(env)) {
tlbiall_is_write(env, NULL, value);
return;
}
tlb_flush(CPU(cpu));
}
static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
ARMCPU *cpu = arm_env_get_cpu(env);
if (tlb_force_broadcast(env)) {
tlbimva_is_write(env, NULL, value);
return;
}
tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
}
static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate by ASID (TLBIASID) */
ARMCPU *cpu = arm_env_get_cpu(env);
if (tlb_force_broadcast(env)) {
tlbiasid_is_write(env, NULL, value);
return;
}
tlb_flush(CPU(cpu));
}
static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
ARMCPU *cpu = arm_env_get_cpu(env);
if (tlb_force_broadcast(env)) {
tlbimvaa_is_write(env, NULL, value);
return;
}
tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
}
static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@ -1296,12 +1329,26 @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
CPUState *cs = ENV_GET_CPU(env);
uint64_t ret = 0;
if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
ret |= CPSR_I;
if (arm_hcr_el2_imo(env)) {
if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
ret |= CPSR_I;
}
} else {
if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
ret |= CPSR_I;
}
}
if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
ret |= CPSR_F;
if (arm_hcr_el2_fmo(env)) {
if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
ret |= CPSR_F;
}
} else {
if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
ret |= CPSR_F;
}
}
/* External aborts are not possible in QEMU so A bit is always clear */
return ret;
}
@ -2270,13 +2317,15 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
* * The Non-secure TTBCR.EAE bit is set to 1
* * The implementation includes EL2, and the value of HCR.VM is 1
*
* (Note that HCR.DC makes HCR.VM behave as if it is 1.)
*
* ATS1Hx always uses the 64bit format (not supported yet).
*/
format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
if (arm_feature(env, ARM_FEATURE_EL2)) {
if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
format64 |= env->cp15.hcr_el2 & HCR_VM;
format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
} else {
format64 |= arm_current_el(env) == 2;
}
@ -2709,12 +2758,10 @@ static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* 64 bit accesses to the TTBRs can change the ASID and so we
* must flush the TLB.
*/
if (cpreg_field_is_64bit(ri)) {
/* If the ASID changes (with a 64-bit write), we must flush the TLB. */
if (cpreg_field_is_64bit(ri) &&
extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
ARMCPU *cpu = arm_env_get_cpu(env);
tlb_flush(CPU(cpu));
}
raw_write(env, ri, value);
@ -3083,22 +3130,6 @@ static CPAccessResult aa64_cacheop_access(CPUARMState *env,
* Page D4-1736 (DDI0487A.b)
*/
static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
CPUState *cs = ENV_GET_CPU(env);
if (arm_is_secure_below_el3(env)) {
tlb_flush_by_mmuidx(cs,
ARMMMUIdxBit_S1SE1 |
ARMMMUIdxBit_S1SE0);
} else {
tlb_flush_by_mmuidx(cs,
ARMMMUIdxBit_S12NSE1 |
ARMMMUIdxBit_S12NSE0);
}
}
static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@ -3116,6 +3147,27 @@ static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
}
}
static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
CPUState *cs = ENV_GET_CPU(env);
if (tlb_force_broadcast(env)) {
tlbi_aa64_vmalle1_write(env, NULL, value);
return;
}
if (arm_is_secure_below_el3(env)) {
tlb_flush_by_mmuidx(cs,
ARMMMUIdxBit_S1SE1 |
ARMMMUIdxBit_S1SE0);
} else {
tlb_flush_by_mmuidx(cs,
ARMMMUIdxBit_S12NSE1 |
ARMMMUIdxBit_S12NSE0);
}
}
static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@ -3205,29 +3257,6 @@ static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3);
}
static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate by VA, EL1&0 (AArch64 version).
* Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
* since we don't support flush-for-specific-ASID-only or
* flush-last-level-only.
*/
ARMCPU *cpu = arm_env_get_cpu(env);
CPUState *cs = CPU(cpu);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
if (arm_is_secure_below_el3(env)) {
tlb_flush_page_by_mmuidx(cs, pageaddr,
ARMMMUIdxBit_S1SE1 |
ARMMMUIdxBit_S1SE0);
} else {
tlb_flush_page_by_mmuidx(cs, pageaddr,
ARMMMUIdxBit_S12NSE1 |
ARMMMUIdxBit_S12NSE0);
}
}
static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@ -3275,6 +3304,34 @@ static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
}
}
static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate by VA, EL1&0 (AArch64 version).
* Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
* since we don't support flush-for-specific-ASID-only or
* flush-last-level-only.
*/
ARMCPU *cpu = arm_env_get_cpu(env);
CPUState *cs = CPU(cpu);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
if (tlb_force_broadcast(env)) {
tlbi_aa64_vae1is_write(env, NULL, value);
return;
}
if (arm_is_secure_below_el3(env)) {
tlb_flush_page_by_mmuidx(cs, pageaddr,
ARMMMUIdxBit_S1SE1 |
ARMMMUIdxBit_S1SE0);
} else {
tlb_flush_page_by_mmuidx(cs, pageaddr,
ARMMMUIdxBit_S12NSE1 |
ARMMMUIdxBit_S12NSE0);
}
}
static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@ -3872,6 +3929,7 @@ static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = {
static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
CPUState *cs = ENV_GET_CPU(env);
uint64_t valid_mask = HCR_MASK;
if (arm_feature(env, ARM_FEATURE_EL3)) {
@ -3890,6 +3948,28 @@ static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
/* Clear RES0 bits. */
value &= valid_mask;
/*
* VI and VF are kept in cs->interrupt_request. Modifying that
* requires that we have the iothread lock, which is done by
* marking the reginfo structs as ARM_CP_IO.
* Note that if a write to HCR pends a VIRQ or VFIQ it is never
* possible for it to be taken immediately, because VIRQ and
* VFIQ are masked unless running at EL0 or EL1, and HCR
* can only be written at EL2.
*/
g_assert(qemu_mutex_iothread_locked());
if (value & HCR_VI) {
cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
} else {
cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
}
if (value & HCR_VF) {
cs->interrupt_request |= CPU_INTERRUPT_VFIQ;
} else {
cs->interrupt_request &= ~CPU_INTERRUPT_VFIQ;
}
value &= ~(HCR_VI | HCR_VF);
/* These bits change the MMU setup:
* HCR_VM enables stage 2 translation
* HCR_PTW forbids certain page-table setups
@ -3917,16 +3997,32 @@ static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
hcr_write(env, NULL, value);
}
static uint64_t hcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
/* The VI and VF bits live in cs->interrupt_request */
uint64_t ret = env->cp15.hcr_el2 & ~(HCR_VI | HCR_VF);
CPUState *cs = ENV_GET_CPU(env);
if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
ret |= HCR_VI;
}
if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
ret |= HCR_VF;
}
return ret;
}
static const ARMCPRegInfo el2_cp_reginfo[] = {
{ .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_IO,
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
.writefn = hcr_write },
.writefn = hcr_write, .readfn = hcr_read },
{ .name = "HCR", .state = ARM_CP_STATE_AA32,
.type = ARM_CP_ALIAS,
.type = ARM_CP_ALIAS | ARM_CP_IO,
.cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
.writefn = hcr_writelow },
.writefn = hcr_writelow, .readfn = hcr_read },
{ .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
@ -4163,7 +4259,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
{ .name = "HCR2", .state = ARM_CP_STATE_AA32,
.type = ARM_CP_ALIAS,
.type = ARM_CP_ALIAS | ARM_CP_IO,
.cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
.access = PL2_RW,
.fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
@ -4214,7 +4310,7 @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
.fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
{ .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
.access = PL3_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
.access = PL3_RW, .resetvalue = 0,
.fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
{ .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
@ -4873,7 +4969,7 @@ static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
ARMCPU *cpu = arm_env_get_cpu(env);
uint64_t pfr0 = cpu->id_aa64pfr0;
uint64_t pfr0 = cpu->isar.id_aa64pfr0;
if (env->gicv3state) {
pfr0 |= 1 << 24;
@ -4940,27 +5036,27 @@ void register_cp_regs_for_features(ARMCPU *cpu)
{ .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_isar0 },
.resetvalue = cpu->isar.id_isar0 },
{ .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_isar1 },
.resetvalue = cpu->isar.id_isar1 },
{ .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_isar2 },
.resetvalue = cpu->isar.id_isar2 },
{ .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_isar3 },
.resetvalue = cpu->isar.id_isar3 },
{ .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_isar4 },
.resetvalue = cpu->isar.id_isar4 },
{ .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_isar5 },
.resetvalue = cpu->isar.id_isar5 },
{ .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
.access = PL1_R, .type = ARM_CP_CONST,
@ -4968,7 +5064,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
{ .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
.access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_isar6 },
.resetvalue = cpu->isar.id_isar6 },
REGINFO_SENTINEL
};
define_arm_cp_regs(cpu, v6_idregs);
@ -5039,7 +5135,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
{ .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_aa64pfr1},
.resetvalue = cpu->isar.id_aa64pfr1},
{ .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
@ -5100,11 +5196,11 @@ void register_cp_regs_for_features(ARMCPU *cpu)
{ .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_aa64isar0 },
.resetvalue = cpu->isar.id_aa64isar0 },
{ .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_aa64isar1 },
.resetvalue = cpu->isar.id_aa64isar1 },
{ .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
@ -5164,15 +5260,15 @@ void register_cp_regs_for_features(ARMCPU *cpu)
{ .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->mvfr0 },
.resetvalue = cpu->isar.mvfr0 },
{ .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->mvfr1 },
.resetvalue = cpu->isar.mvfr1 },
{ .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->mvfr2 },
.resetvalue = cpu->isar.mvfr2 },
{ .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
@ -5618,7 +5714,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_one_arm_cp_reg(cpu, &sctlr);
}
if (arm_feature(env, ARM_FEATURE_SVE)) {
if (cpu_isar_feature(aa64_sve, cpu)) {
define_one_arm_cp_reg(cpu, &zcr_el1_reginfo);
if (arm_feature(env, ARM_FEATURE_EL2)) {
define_one_arm_cp_reg(cpu, &zcr_el2_reginfo);
@ -6208,7 +6304,17 @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
mask |= CPSR_IL;
val |= CPSR_IL;
}
qemu_log_mask(LOG_GUEST_ERROR,
"Illegal AArch32 mode switch attempt from %s to %s\n",
aarch32_mode_name(env->uncached_cpsr),
aarch32_mode_name(val));
} else {
qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
write_type == CPSRWriteExceptionReturn ?
"Exception return from AArch32" :
"AArch32 mode switch from",
aarch32_mode_name(env->uncached_cpsr),
aarch32_mode_name(val), env->regs[15]);
switch_mode(env, val & CPSR_M);
}
}
@ -6306,7 +6412,7 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
return 0;
}
void switch_mode(CPUARMState *env, int mode)
static void switch_mode(CPUARMState *env, int mode)
{
ARMCPU *cpu = arm_env_get_cpu(env);
@ -6328,7 +6434,7 @@ void aarch64_sync_64_to_32(CPUARMState *env)
#else
void switch_mode(CPUARMState *env, int mode)
static void switch_mode(CPUARMState *env, int mode)
{
int old_mode;
int i;
@ -8194,6 +8300,19 @@ static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
}
if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
if (!arm_feature(env, ARM_FEATURE_V8)) {
/*
* QEMU syndrome values are v8-style. v7 has the IL bit
* UNK/SBZP for "field not valid" cases, where v8 uses RES1.
* If this is a v7 CPU, squash the IL bit in those cases.
*/
if (cs->exception_index == EXCP_PREFETCH_ABORT ||
(cs->exception_index == EXCP_DATA_ABORT &&
!(env->exception.syndrome & ARM_EL_ISV)) ||
syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
env->exception.syndrome &= ~ARM_EL_IL;
}
}
env->cp15.esr_el[2] = env->exception.syndrome;
}
@ -8228,7 +8347,7 @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
uint32_t moe;
/* If this is a debug exception we must update the DBGDSCR.MOE bits */
switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) {
switch (syn_get_ec(env->exception.syndrome)) {
case EC_BREAKPOINT:
case EC_BREAKPOINT_SAME_EL:
moe = 1;
@ -8425,6 +8544,15 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
case EXCP_HVC:
case EXCP_HYP_TRAP:
case EXCP_SMC:
if (syn_get_ec(env->exception.syndrome) == EC_ADVSIMDFPACCESSTRAP) {
/*
* QEMU internal FP/SIMD syndromes from AArch32 include the
* TA and coproc fields which are only exposed if the exception
* is taken to AArch32 Hyp mode. Mask them out to get a valid
* AArch64 format syndrome.
*/
env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
}
env->cp15.esr_el[new_el] = env->exception.syndrome;
break;
case EXCP_IRQ:
@ -8568,7 +8696,7 @@ void arm_cpu_do_interrupt(CPUState *cs)
if (qemu_loglevel_mask(CPU_LOG_INT)
&& !excp_is_internal(cs->exception_index)) {
qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
env->exception.syndrome >> ARM_EL_EC_SHIFT,
syn_get_ec(env->exception.syndrome),
env->exception.syndrome);
}
@ -8665,7 +8793,8 @@ static inline bool regime_translation_disabled(CPUARMState *env,
}
if (mmu_idx == ARMMMUIdx_S2NS) {
return (env->cp15.hcr_el2 & HCR_VM) == 0;
/* HCR.DC means HCR.VM behaves as 1 */
return (env->cp15.hcr_el2 & (HCR_DC | HCR_VM)) == 0;
}
if (env->cp15.hcr_el2 & HCR_TGE) {
@ -8675,6 +8804,12 @@ static inline bool regime_translation_disabled(CPUARMState *env,
}
}
if ((env->cp15.hcr_el2 & HCR_DC) &&
(mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1)) {
/* HCR.DC means SCTLR_EL1.M behaves as 0 */
return true;
}
return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
}
@ -9026,9 +9161,20 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
hwaddr s2pa;
int s2prot;
int ret;
ARMCacheAttrs cacheattrs = {};
ARMCacheAttrs *pcacheattrs = NULL;
if (env->cp15.hcr_el2 & HCR_PTW) {
/*
* PTW means we must fault if this S1 walk touches S2 Device
* memory; otherwise we don't care about the attributes and can
* save the S2 translation the effort of computing them.
*/
pcacheattrs = &cacheattrs;
}
ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
&txattrs, &s2prot, &s2size, fi, NULL);
&txattrs, &s2prot, &s2size, fi, pcacheattrs);
if (ret) {
assert(fi->type != ARMFault_None);
fi->s2addr = addr;
@ -9036,6 +9182,14 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
fi->s1ptw = true;
return ~0;
}
if (pcacheattrs && (pcacheattrs->attrs & 0xf0) == 0) {
/* Access was to Device memory: generate Permission fault */
fi->type = ARMFault_Permission;
fi->s2addr = addr;
fi->stage2 = true;
fi->s1ptw = true;
return ~0;
}
addr = s2pa;
}
return addr;
@ -10655,6 +10809,16 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
/* Combine the S1 and S2 cache attributes, if needed */
if (!ret && cacheattrs != NULL) {
if (env->cp15.hcr_el2 & HCR_DC) {
/*
* HCR.DC forces the first stage attributes to
* Normal Non-Shareable,
* Inner Write-Back Read-Allocate Write-Allocate,
* Outer Write-Back Read-Allocate Write-Allocate.
*/
cacheattrs->attrs = 0xff;
cacheattrs->shareability = 0;
}
*cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
}
@ -11612,7 +11776,7 @@ void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
uint32_t changed;
/* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */
if (!arm_feature(env, ARM_FEATURE_V8_FP16)) {
if (!cpu_isar_feature(aa64_fp16, arm_env_get_cpu(env))) {
val &= ~FPCR_FZ16;
}
@ -12671,13 +12835,15 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
uint32_t flags;
if (is_a64(env)) {
ARMCPU *cpu = arm_env_get_cpu(env);
*pc = env->pc;
flags = ARM_TBFLAG_AARCH64_STATE_MASK;
/* Get control bits for tagged addresses */
flags |= (arm_regime_tbi0(env, mmu_idx) << ARM_TBFLAG_TBI0_SHIFT);
flags |= (arm_regime_tbi1(env, mmu_idx) << ARM_TBFLAG_TBI1_SHIFT);
if (arm_feature(env, ARM_FEATURE_SVE)) {
if (cpu_isar_feature(aa64_sve, cpu)) {
int sve_el = sve_exception_el(env, current_el);
uint32_t zcr_len;
@ -12801,11 +12967,12 @@ void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
void aarch64_sve_change_el(CPUARMState *env, int old_el,
int new_el, bool el0_a64)
{
ARMCPU *cpu = arm_env_get_cpu(env);
int old_len, new_len;
bool old_a64, new_a64;
/* Nothing to do if no SVE. */
if (!arm_feature(env, ARM_FEATURE_SVE)) {
if (!cpu_isar_feature(aa64_sve, cpu)) {
return;
}

View File

@ -145,7 +145,6 @@ static inline int bank_number(int mode)
g_assert_not_reached();
}
void switch_mode(CPUARMState *, int);
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
void arm_translate_init(void);
@ -279,14 +278,19 @@ enum arm_exception_class {
#define ARM_EL_IL (1 << ARM_EL_IL_SHIFT)
#define ARM_EL_ISV (1 << ARM_EL_ISV_SHIFT)
static inline uint32_t syn_get_ec(uint32_t syn)
{
return syn >> ARM_EL_EC_SHIFT;
}
/* Utility functions for constructing various kinds of syndrome value.
* Note that in general we follow the AArch64 syndrome values; in a
* few cases the value in HSR for exceptions taken to AArch32 Hyp
* mode differs slightly, so if we ever implemented Hyp mode then the
* syndrome value would need some massaging on exception entry.
* (One example of this is that AArch64 defaults to IL bit set for
* exceptions which don't specifically indicate information about the
* trapping instruction, whereas AArch32 defaults to IL bit clear.)
* mode differs slightly, and we fix this up when populating HSR in
* arm_cpu_do_interrupt_aarch32_hyp().
* The exception is FP/SIMD access traps -- these report extra information
* when taking an exception to AArch32. For those we include the extra coproc
* and TA fields, and mask them out when taking the exception to AArch64.
*/
static inline uint32_t syn_uncategorized(void)
{
@ -386,9 +390,18 @@ static inline uint32_t syn_cp15_rrt_trap(int cv, int cond, int opc1, int crm,
static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_16bit)
{
/* AArch32 FP trap or any AArch64 FP/SIMD trap: TA == 0 coproc == 0xa */
return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
| (is_16bit ? 0 : ARM_EL_IL)
| (cv << 24) | (cond << 20);
| (cv << 24) | (cond << 20) | 0xa;
}
static inline uint32_t syn_simd_access_trap(int cv, int cond, bool is_16bit)
{
/* AArch32 SIMD trap: TA == 1 coproc == 0 */
return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
| (is_16bit ? 0 : ARM_EL_IL)
| (cv << 24) | (cond << 20) | (1 << 5);
}
static inline uint32_t syn_sve_access_trap(void)
@ -840,4 +853,22 @@ static inline uint32_t v7m_sp_limit(CPUARMState *env)
}
}
/**
* aarch32_mode_name(): Return name of the AArch32 CPU mode
* @psr: Program Status Register indicating CPU mode
*
* Returns, for debug logging purposes, a printable representation
* of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
* the low bits of the specified PSR.
*/
static inline const char *aarch32_mode_name(uint32_t psr)
{
static const char cpu_mode_names[16][4] = {
"usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
"???", "???", "hyp", "und", "???", "???", "???", "sys"
};
return cpu_mode_names[psr & 0xf];
}
#endif

View File

@ -34,6 +34,7 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
};
static bool cap_has_mp_state;
static bool cap_has_inject_serror_esr;
static ARMHostCPUFeatures arm_host_cpu_features;
@ -48,6 +49,12 @@ int kvm_arm_vcpu_init(CPUState *cs)
return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init);
}
void kvm_arm_init_serror_injection(CPUState *cs)
{
cap_has_inject_serror_esr = kvm_check_extension(cs->kvm_state,
KVM_CAP_ARM_INJECT_SERROR_ESR);
}
bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
int *fdarray,
struct kvm_vcpu_init *init)
@ -522,6 +529,59 @@ int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu)
return 0;
}
int kvm_put_vcpu_events(ARMCPU *cpu)
{
CPUARMState *env = &cpu->env;
struct kvm_vcpu_events events;
int ret;
if (!kvm_has_vcpu_events()) {
return 0;
}
memset(&events, 0, sizeof(events));
events.exception.serror_pending = env->serror.pending;
/* Inject SError to guest with specified syndrome if host kernel
* supports it, otherwise inject SError without syndrome.
*/
if (cap_has_inject_serror_esr) {
events.exception.serror_has_esr = env->serror.has_esr;
events.exception.serror_esr = env->serror.esr;
}
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
if (ret) {
error_report("failed to put vcpu events");
}
return ret;
}
int kvm_get_vcpu_events(ARMCPU *cpu)
{
CPUARMState *env = &cpu->env;
struct kvm_vcpu_events events;
int ret;
if (!kvm_has_vcpu_events()) {
return 0;
}
memset(&events, 0, sizeof(events));
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
if (ret) {
error_report("failed to get vcpu events");
return ret;
}
env->serror.pending = events.exception.serror_pending;
env->serror.has_esr = events.exception.serror_has_esr;
env->serror.esr = events.exception.serror_esr;
return 0;
}
void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
{
}

View File

@ -217,6 +217,9 @@ int kvm_arch_init_vcpu(CPUState *cs)
}
cpu->mp_affinity = mpidr & ARM32_AFFINITY_MASK;
/* Check whether userspace can specify guest syndrome value */
kvm_arm_init_serror_injection(cs);
return kvm_arm_init_cpreg_list(cpu);
}
@ -358,6 +361,11 @@ int kvm_arch_put_registers(CPUState *cs, int level)
return ret;
}
ret = kvm_put_vcpu_events(cpu);
if (ret) {
return ret;
}
/* Note that we do not call write_cpustate_to_list()
* here, so we are only writing the tuple list back to
* KVM. This is safe because nothing can change the
@ -445,6 +453,11 @@ int kvm_arch_get_registers(CPUState *cs)
}
vfp_set_fpscr(env, fpscr);
ret = kvm_get_vcpu_events(cpu);
if (ret) {
return ret;
}
if (!write_kvmstate_to_list(cpu)) {
return EINVAL;
}

View File

@ -546,6 +546,9 @@ int kvm_arch_init_vcpu(CPUState *cs)
kvm_arm_init_debug(cs);
/* Check whether user space can specify guest syndrome value */
kvm_arm_init_serror_injection(cs);
return kvm_arm_init_cpreg_list(cpu);
}
@ -727,6 +730,11 @@ int kvm_arch_put_registers(CPUState *cs, int level)
return ret;
}
ret = kvm_put_vcpu_events(cpu);
if (ret) {
return ret;
}
if (!write_list_to_kvmstate(cpu, level)) {
return EINVAL;
}
@ -863,6 +871,11 @@ int kvm_arch_get_registers(CPUState *cs)
}
vfp_set_fpcr(env, fpr);
ret = kvm_get_vcpu_events(cpu);
if (ret) {
return ret;
}
if (!write_kvmstate_to_list(cpu)) {
return EINVAL;
}
@ -920,7 +933,7 @@ int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
{
int hsr_ec = debug_exit->hsr >> ARM_EL_EC_SHIFT;
int hsr_ec = syn_get_ec(debug_exit->hsr);
ARMCPU *cpu = ARM_CPU(cs);
CPUClass *cc = CPU_GET_CLASS(cs);
CPUARMState *env = &cpu->env;

View File

@ -121,6 +121,30 @@ bool write_kvmstate_to_list(ARMCPU *cpu);
*/
void kvm_arm_reset_vcpu(ARMCPU *cpu);
/**
* kvm_arm_init_serror_injection:
* @cs: CPUState
*
* Check whether KVM can set guest SError syndrome.
*/
void kvm_arm_init_serror_injection(CPUState *cs);
/**
* kvm_get_vcpu_events:
* @cpu: ARMCPU
*
* Get VCPU related state from kvm.
*/
int kvm_get_vcpu_events(ARMCPU *cpu);
/**
* kvm_put_vcpu_events:
* @cpu: ARMCPU
*
* Put VCPU related state to kvm.
*/
int kvm_put_vcpu_events(ARMCPU *cpu);
#ifdef CONFIG_KVM
/**
* kvm_arm_create_scratch_host_vcpu:

View File

@ -131,9 +131,8 @@ static const VMStateDescription vmstate_iwmmxt = {
static bool sve_needed(void *opaque)
{
ARMCPU *cpu = opaque;
CPUARMState *env = &cpu->env;
return arm_feature(env, ARM_FEATURE_SVE);
return cpu_isar_feature(aa64_sve, cpu);
}
/* The first two words of each Zreg is stored in VFP state. */
@ -172,6 +171,27 @@ static const VMStateDescription vmstate_sve = {
};
#endif /* AARCH64 */
static bool serror_needed(void *opaque)
{
ARMCPU *cpu = opaque;
CPUARMState *env = &cpu->env;
return env->serror.pending != 0;
}
static const VMStateDescription vmstate_serror = {
.name = "cpu/serror",
.version_id = 1,
.minimum_version_id = 1,
.needed = serror_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT8(env.serror.pending, ARMCPU),
VMSTATE_UINT8(env.serror.has_esr, ARMCPU),
VMSTATE_UINT64(env.serror.esr, ARMCPU),
VMSTATE_END_OF_LIST()
}
};
static bool m_needed(void *opaque)
{
ARMCPU *cpu = opaque;
@ -726,6 +746,7 @@ const VMStateDescription vmstate_arm_cpu = {
#ifdef TARGET_AARCH64
&vmstate_sve,
#endif
&vmstate_serror,
NULL
}
};

View File

@ -42,7 +42,7 @@ void raise_exception(CPUARMState *env, uint32_t excp,
* (see DDI0478C.a D1.10.4)
*/
target_el = 2;
if (syndrome >> ARM_EL_EC_SHIFT == EC_ADVSIMDFPACCESSTRAP) {
if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) {
syndrome = syn_uncategorized();
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -7,6 +7,7 @@
/* internal defines */
typedef struct DisasContext {
DisasContextBase base;
const ARMISARegisters *isar;
target_ulong pc;
target_ulong page_start;
@ -190,4 +191,24 @@ static inline TCGv_i32 get_ahp_flag(void)
return ret;
}
/* Vector operations shared between ARM and AArch64. */
extern const GVecGen3 bsl_op;
extern const GVecGen3 bit_op;
extern const GVecGen3 bif_op;
extern const GVecGen3 mla_op[4];
extern const GVecGen3 mls_op[4];
extern const GVecGen3 cmtst_op[4];
extern const GVecGen2i ssra_op[4];
extern const GVecGen2i usra_op[4];
extern const GVecGen2i sri_op[4];
extern const GVecGen2i sli_op[4];
void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
/*
* Forward to the isar_feature_* tests given a DisasContext pointer.
*/
#define dc_isar_feature(name, ctx) \
({ DisasContext *ctx_ = (ctx); isar_feature_##name(ctx_->isar); })
#endif /* TARGET_ARM_TRANSLATE_H */