target-arm queue:

* hw/char/stm32f2xx_usart: Do not update data register when device is disabled
  * hw/arm/virt-acpi-build: Set COHACC override flag in IORT SMMUv3 node
  * target/arm: Allow Aarch32 exception return to switch from Mon->Hyp
  * ftgmac100: implement the new MDIO interface on Aspeed SoC
  * implement the ARMv8.3-PAuth extension
  * improve emulation of the ARM PMU
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABCAAGBQJcRaIDAAoJEDwlJe0UNgzePuIP/0WcYrIpoUMXcAFBcxR19qBt
 SI9t10vm+BHqvh1YZ45ZFSLH7YMjX0g9W1DJpIxaAKyhUa9FJy2e1WpMwNAnGtNT
 Iol4ogqapMEE7BtNlE5ou4KKV8Lr+uQjwxS045PAuQQiIaKNMGndPG6Nb7sRFeOm
 x/elxhm+k8ohLA8/6f/uHxlk77qRUcWQUZjvMGtxPSb5TCkBgP2TuSVQouN9xcCz
 Ma9oWZjIoWwpIeOpHMfDq4B173TVumH1Pw0lRdWrrxAgAbcuQugY6j0TYTyAQHug
 hd3UOoT+3OsU5Xfhz18KIG1K5TP709wbWP+8T/uupkcsRbzLzX5cgguNtkED41OK
 tGTY2BR0E4qV8XeajeSSw+GfRcoAHwRbEOnmXNmTMQfkLJ2w543rOBj4vkyhXh6a
 m/r4tK26KdQL1dggxlAreUareaNUzU9bQUD7XH8zxME1r+Ja8QokOiomi2ZwZyXy
 INmhzZgCTIx3x9xCJNs8YwU4ufAVyrg2X52tOranIhqwv6wBHP5oOiNRR1ESiK1k
 oAf12kmyf2R//i8HFH1dm1bhzN7NrufOnYlyCh/rN+B4hbb73ji1D7jzfoiYcuVR
 A4ZIx0Ylt6TXbb/Rxj1vthCSoL5Tx2QShKV/1IJwF5ZjkIuZ/0l4b8Ksv7fo56Ep
 HkkKyevoNEzG7S5C+8po
 =0Xhe
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20190121' into staging

target-arm queue:
 * hw/char/stm32f2xx_usart: Do not update data register when device is disabled
 * hw/arm/virt-acpi-build: Set COHACC override flag in IORT SMMUv3 node
 * target/arm: Allow Aarch32 exception return to switch from Mon->Hyp
 * ftgmac100: implement the new MDIO interface on Aspeed SoC
 * implement the ARMv8.3-PAuth extension
 * improve emulation of the ARM PMU

# gpg: Signature made Mon 21 Jan 2019 10:42:11 GMT
# gpg:                using RSA key 3C2525ED14360CDE
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>"
# gpg:                 aka "Peter Maydell <pmaydell@gmail.com>"
# gpg:                 aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>"
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83  15CF 3C25 25ED 1436 0CDE

* remotes/pmaydell/tags/pull-target-arm-20190121: (48 commits)
  target/arm: Implement PMSWINC
  target/arm: PMU: Set PMCR.N to 4
  target/arm: PMU: Add instruction and cycle events
  target/arm: Finish implementation of PM[X]EVCNTR and PM[X]EVTYPER
  target/arm: Add array for supported PMU events, generate PMCEID[01]_EL0
  target/arm: Make PMCEID[01]_EL0 64 bit registers, add PMCEID[23]
  target/arm: Define FIELDs for ID_DFR0
  target/arm: Implement PMOVSSET
  target/arm: Allow AArch32 access for PMCCFILTR
  target/arm: Filter cycle counter based on PMCCFILTR_EL0
  target/arm: Swap PMU values before/after migrations
  target/arm: Reorganize PMCCNTR accesses
  migration: Add post_save function to VMStateDescription
  target/arm: Tidy TBI handling in gen_a64_set_pc
  target/arm: Enable PAuth for user-only
  target/arm: Enable PAuth for -cpu max
  target/arm: Add PAuth system registers
  target/arm: Implement pauth_computepac
  target/arm: Implement pauth_addpac
  target/arm: Implement pauth_auth
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2019-01-21 12:49:48 +00:00
commit bbca106776
21 changed files with 2515 additions and 632 deletions

View File

@ -419,8 +419,13 @@ The functions to do that are inside a vmstate definition, and are called:
This function is called before we save the state of one device.
Example: You can look at hpet.c, that uses the three function to
massage the state that is transferred.
- ``int (*post_save)(void *opaque);``
This function is called after we save the state of one device
(even upon failure, unless the call to pre_save returned an error).
Example: You can look at hpet.c, that uses the first three functions
to massage the state that is transferred.
The ``VMSTATE_WITH_TMP`` macro may be useful when the migration
data doesn't match the stored device data well; it allows an

View File

@ -418,6 +418,7 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
smmu->mapping_count = cpu_to_le32(1);
smmu->mapping_offset = cpu_to_le32(sizeof(*smmu));
smmu->base_address = cpu_to_le64(vms->memmap[VIRT_SMMU].base);
smmu->flags = cpu_to_le32(ACPI_IORT_SMMU_V3_COHACC_OVERRIDE);
smmu->event_gsiv = cpu_to_le32(irq);
smmu->pri_gsiv = cpu_to_le32(irq + 1);
smmu->gerr_gsiv = cpu_to_le32(irq + 2);

View File

@ -53,14 +53,13 @@ static void stm32f2xx_usart_receive(void *opaque, const uint8_t *buf, int size)
{
STM32F2XXUsartState *s = opaque;
s->usart_dr = *buf;
if (!(s->usart_cr1 & USART_CR1_UE && s->usart_cr1 & USART_CR1_RE)) {
/* USART not enabled - drop the chars */
DB_PRINT("Dropping the chars\n");
return;
}
s->usart_dr = *buf;
s->usart_sr |= USART_SR_RXNE;
if (s->usart_cr1 & USART_CR1_RXNEIE) {

View File

@ -89,6 +89,18 @@
#define FTGMAC100_PHYDATA_MIIWDATA(x) ((x) & 0xffff)
#define FTGMAC100_PHYDATA_MIIRDATA(x) (((x) >> 16) & 0xffff)
/*
* PHY control register - New MDC/MDIO interface
*/
#define FTGMAC100_PHYCR_NEW_DATA(x) (((x) >> 16) & 0xffff)
#define FTGMAC100_PHYCR_NEW_FIRE (1 << 15)
#define FTGMAC100_PHYCR_NEW_ST_22 (1 << 12)
#define FTGMAC100_PHYCR_NEW_OP(x) (((x) >> 10) & 3)
#define FTGMAC100_PHYCR_NEW_OP_WRITE 0x1
#define FTGMAC100_PHYCR_NEW_OP_READ 0x2
#define FTGMAC100_PHYCR_NEW_DEV(x) (((x) >> 5) & 0x1f)
#define FTGMAC100_PHYCR_NEW_REG(x) ((x) & 0x1f)
/*
* Feature Register
*/
@ -269,9 +281,9 @@ static void phy_reset(FTGMAC100State *s)
s->phy_int = 0;
}
static uint32_t do_phy_read(FTGMAC100State *s, int reg)
static uint16_t do_phy_read(FTGMAC100State *s, uint8_t reg)
{
uint32_t val;
uint16_t val;
switch (reg) {
case MII_BMCR: /* Basic Control */
@ -336,7 +348,7 @@ static uint32_t do_phy_read(FTGMAC100State *s, int reg)
MII_BMCR_FD | MII_BMCR_CTST)
#define MII_ANAR_MASK 0x2d7f
static void do_phy_write(FTGMAC100State *s, int reg, uint32_t val)
static void do_phy_write(FTGMAC100State *s, uint8_t reg, uint16_t val)
{
switch (reg) {
case MII_BMCR: /* Basic Control */
@ -373,6 +385,55 @@ static void do_phy_write(FTGMAC100State *s, int reg, uint32_t val)
}
}
static void do_phy_new_ctl(FTGMAC100State *s)
{
uint8_t reg;
uint16_t data;
if (!(s->phycr & FTGMAC100_PHYCR_NEW_ST_22)) {
qemu_log_mask(LOG_UNIMP, "%s: unsupported ST code\n", __func__);
return;
}
/* Nothing to do */
if (!(s->phycr & FTGMAC100_PHYCR_NEW_FIRE)) {
return;
}
reg = FTGMAC100_PHYCR_NEW_REG(s->phycr);
data = FTGMAC100_PHYCR_NEW_DATA(s->phycr);
switch (FTGMAC100_PHYCR_NEW_OP(s->phycr)) {
case FTGMAC100_PHYCR_NEW_OP_WRITE:
do_phy_write(s, reg, data);
break;
case FTGMAC100_PHYCR_NEW_OP_READ:
s->phydata = do_phy_read(s, reg) & 0xffff;
break;
default:
qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid OP code %08x\n",
__func__, s->phycr);
}
s->phycr &= ~FTGMAC100_PHYCR_NEW_FIRE;
}
static void do_phy_ctl(FTGMAC100State *s)
{
uint8_t reg = FTGMAC100_PHYCR_REG(s->phycr);
if (s->phycr & FTGMAC100_PHYCR_MIIWR) {
do_phy_write(s, reg, s->phydata & 0xffff);
s->phycr &= ~FTGMAC100_PHYCR_MIIWR;
} else if (s->phycr & FTGMAC100_PHYCR_MIIRD) {
s->phydata = do_phy_read(s, reg) << 16;
s->phycr &= ~FTGMAC100_PHYCR_MIIRD;
} else {
qemu_log_mask(LOG_GUEST_ERROR, "%s: no OP code %08x\n",
__func__, s->phycr);
}
}
static int ftgmac100_read_bd(FTGMAC100Desc *bd, dma_addr_t addr)
{
if (dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd))) {
@ -628,7 +689,6 @@ static void ftgmac100_write(void *opaque, hwaddr addr,
uint64_t value, unsigned size)
{
FTGMAC100State *s = FTGMAC100(opaque);
int reg;
switch (addr & 0xff) {
case FTGMAC100_ISR: /* Interrupt status */
@ -711,14 +771,11 @@ static void ftgmac100_write(void *opaque, hwaddr addr,
break;
case FTGMAC100_PHYCR: /* PHY Device control */
reg = FTGMAC100_PHYCR_REG(value);
s->phycr = value;
if (value & FTGMAC100_PHYCR_MIIWR) {
do_phy_write(s, reg, s->phydata & 0xffff);
s->phycr &= ~FTGMAC100_PHYCR_MIIWR;
if (s->revr & FTGMAC100_REVR_NEW_MDIO_INTERFACE) {
do_phy_new_ctl(s);
} else {
s->phydata = do_phy_read(s, reg) << 16;
s->phycr &= ~FTGMAC100_PHYCR_MIIRD;
do_phy_ctl(s);
}
break;
case FTGMAC100_PHYDATA:
@ -728,8 +785,7 @@ static void ftgmac100_write(void *opaque, hwaddr addr,
s->dblac = value;
break;
case FTGMAC100_REVR: /* Feature Register */
/* TODO: Only Old MDIO interface is supported */
s->revr = value & ~FTGMAC100_REVR_NEW_MDIO_INTERFACE;
s->revr = value;
break;
case FTGMAC100_FEAR1: /* Feature Register 1 */
s->fear1 = value;

View File

@ -626,6 +626,8 @@ struct AcpiIortItsGroup {
} QEMU_PACKED;
typedef struct AcpiIortItsGroup AcpiIortItsGroup;
#define ACPI_IORT_SMMU_V3_COHACC_OVERRIDE 1
struct AcpiIortSmmu3 {
ACPI_IORT_NODE_HEADER_DEF
uint64_t base_address;

View File

@ -185,6 +185,7 @@ struct VMStateDescription {
int (*pre_load)(void *opaque);
int (*post_load)(void *opaque, int version_id);
int (*pre_save)(void *opaque);
int (*post_save)(void *opaque);
bool (*needed)(void *opaque);
const VMStateField *fields;
const VMStateDescription **subsections;

View File

@ -390,6 +390,9 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd,
if (ret) {
error_report("Save of field %s/%s failed",
vmsd->name, field->name);
if (vmsd->post_save) {
vmsd->post_save(opaque);
}
return ret;
}
@ -415,7 +418,15 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd,
json_end_array(vmdesc);
}
return vmstate_subsection_save(f, vmsd, opaque, vmdesc);
ret = vmstate_subsection_save(f, vmsd, opaque, vmdesc);
if (vmsd->post_save) {
int ps_ret = vmsd->post_save(opaque);
if (!ret) {
ret = ps_ret;
}
}
return ret;
}
static const VMStateDescription *

View File

@ -8,6 +8,7 @@ obj-y += translate.o op_helper.o helper.o cpu.o
obj-y += neon_helper.o iwmmxt_helper.o vec_helper.o
obj-y += gdbstub.o
obj-$(TARGET_AARCH64) += cpu64.o translate-a64.o helper-a64.o gdbstub64.o
obj-$(TARGET_AARCH64) += pauth_helper.o
obj-y += crypto_helper.o
obj-$(CONFIG_SOFTMMU) += arm-powerctl.o

View File

@ -162,6 +162,9 @@ static void arm_cpu_reset(CPUState *s)
env->pstate = PSTATE_MODE_EL0t;
/* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */
env->cp15.sctlr_el[1] |= SCTLR_UCT | SCTLR_UCI | SCTLR_DZE;
/* Enable all PAC instructions */
env->cp15.hcr_el2 |= HCR_API;
env->cp15.scr_el3 |= SCR_API;
/* and to the FP/Neon instructions */
env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 2, 3);
/* and to the SVE instructions */
@ -1034,7 +1037,19 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
if (!cpu->has_pmu) {
unset_feature(env, ARM_FEATURE_PMU);
}
if (arm_feature(env, ARM_FEATURE_PMU)) {
cpu->pmceid0 = get_pmceid(&cpu->env, 0);
cpu->pmceid1 = get_pmceid(&cpu->env, 1);
if (!kvm_enabled()) {
arm_register_pre_el_change_hook(cpu, &pmu_pre_el_change, 0);
arm_register_el_change_hook(cpu, &pmu_post_el_change, 0);
}
} else {
cpu->id_aa64dfr0 &= ~0xf00;
cpu->pmceid0 = 0;
cpu->pmceid1 = 0;
}
if (!arm_feature(env, ARM_FEATURE_EL2)) {
@ -1679,8 +1694,6 @@ static void cortex_a7_initfn(Object *obj)
cpu->id_pfr0 = 0x00001131;
cpu->id_pfr1 = 0x00011011;
cpu->id_dfr0 = 0x02010555;
cpu->pmceid0 = 0x00000000;
cpu->pmceid1 = 0x00000000;
cpu->id_afr0 = 0x00000000;
cpu->id_mmfr0 = 0x10101105;
cpu->id_mmfr1 = 0x40000000;
@ -1726,8 +1739,6 @@ static void cortex_a15_initfn(Object *obj)
cpu->id_pfr0 = 0x00001131;
cpu->id_pfr1 = 0x00011011;
cpu->id_dfr0 = 0x02010555;
cpu->pmceid0 = 0x0000000;
cpu->pmceid1 = 0x00000000;
cpu->id_afr0 = 0x00000000;
cpu->id_mmfr0 = 0x10201105;
cpu->id_mmfr1 = 0x20000000;

View File

@ -201,11 +201,16 @@ typedef struct ARMVectorReg {
uint64_t d[2 * ARM_MAX_VQ] QEMU_ALIGNED(16);
} ARMVectorReg;
/* In AArch32 mode, predicate registers do not exist at all. */
#ifdef TARGET_AARCH64
/* In AArch32 mode, predicate registers do not exist at all. */
typedef struct ARMPredicateReg {
uint64_t p[2 * ARM_MAX_VQ / 8] QEMU_ALIGNED(16);
} ARMPredicateReg;
/* In AArch32 mode, PAC keys do not exist at all. */
typedef struct ARMPACKey {
uint64_t lo, hi;
} ARMPACKey;
#endif
@ -468,10 +473,23 @@ typedef struct CPUARMState {
uint64_t oslsr_el1; /* OS Lock Status */
uint64_t mdcr_el2;
uint64_t mdcr_el3;
/* If the counter is enabled, this stores the last time the counter
* was reset. Otherwise it stores the counter value
/* Stores the architectural value of the counter *the last time it was
* updated* by pmccntr_op_start. Accesses should always be surrounded
* by pmccntr_op_start/pmccntr_op_finish to guarantee the latest
* architecturally-correct value is being read/set.
*/
uint64_t c15_ccnt;
/* Stores the delta between the architectural value and the underlying
* cycle count during normal operation. It is used to update c15_ccnt
* to be the correct architectural value before accesses. During
* accesses, c15_ccnt_delta contains the underlying count being used
* for the access, after which it reverts to the delta value in
* pmccntr_op_finish.
*/
uint64_t c15_ccnt_delta;
uint64_t c14_pmevcntr[31];
uint64_t c14_pmevcntr_delta[31];
uint64_t c14_pmevtyper[31];
uint64_t pmccfiltr_el0; /* Performance Monitor Filter Register */
uint64_t vpidr_el2; /* Virtualization Processor ID Register */
uint64_t vmpidr_el2; /* Virtualization Multiprocessor ID Register */
@ -605,6 +623,14 @@ typedef struct CPUARMState {
uint32_t cregs[16];
} iwmmxt;
#ifdef TARGET_AARCH64
ARMPACKey apia_key;
ARMPACKey apib_key;
ARMPACKey apda_key;
ARMPACKey apdb_key;
ARMPACKey apga_key;
#endif
#if defined(CONFIG_USER_ONLY)
/* For usermode syscall translation. */
int eabi;
@ -829,8 +855,8 @@ struct ARMCPU {
uint32_t id_pfr0;
uint32_t id_pfr1;
uint32_t id_dfr0;
uint32_t pmceid0;
uint32_t pmceid1;
uint64_t pmceid0;
uint64_t pmceid1;
uint32_t id_afr0;
uint32_t id_mmfr0;
uint32_t id_mmfr1;
@ -958,15 +984,42 @@ int cpu_arm_signal_handler(int host_signum, void *pinfo,
void *puc);
/**
* pmccntr_sync
* pmccntr_op_start/finish
* @env: CPUARMState
*
* Synchronises the counter in the PMCCNTR. This must always be called twice,
* once before any action that might affect the timer and again afterwards.
* The function is used to swap the state of the register if required.
* This only happens when not in user mode (!CONFIG_USER_ONLY)
* Convert the counter in the PMCCNTR between its delta form (the typical mode
* when it's enabled) and the guest-visible value. These two calls must always
* surround any action which might affect the counter.
*/
void pmccntr_sync(CPUARMState *env);
void pmccntr_op_start(CPUARMState *env);
void pmccntr_op_finish(CPUARMState *env);
/**
* pmu_op_start/finish
* @env: CPUARMState
*
* Convert all PMU counters between their delta form (the typical mode when
* they are enabled) and the guest-visible values. These two calls must
* surround any action which might affect the counters.
*/
void pmu_op_start(CPUARMState *env);
void pmu_op_finish(CPUARMState *env);
/**
* Functions to register as EL change hooks for PMU mode filtering
*/
void pmu_pre_el_change(ARMCPU *cpu, void *ignored);
void pmu_post_el_change(ARMCPU *cpu, void *ignored);
/*
* get_pmceid
* @env: CPUARMState
* @which: which PMCEID register to return (0 or 1)
*
* Return the PMCEID[01]_EL0 register values corresponding to the counters
* which are supported given the current configuration
*/
uint64_t get_pmceid(CPUARMState *env, unsigned which);
/* SCTLR bit meanings. Several bits have been reused in newer
* versions of the architecture; in that case we define constants
@ -978,12 +1031,15 @@ void pmccntr_sync(CPUARMState *env);
#define SCTLR_A (1U << 1)
#define SCTLR_C (1U << 2)
#define SCTLR_W (1U << 3) /* up to v6; RAO in v7 */
#define SCTLR_SA (1U << 3)
#define SCTLR_nTLSMD_32 (1U << 3) /* v8.2-LSMAOC, AArch32 only */
#define SCTLR_SA (1U << 3) /* AArch64 only */
#define SCTLR_P (1U << 4) /* up to v5; RAO in v6 and v7 */
#define SCTLR_LSMAOE_32 (1U << 4) /* v8.2-LSMAOC, AArch32 only */
#define SCTLR_SA0 (1U << 4) /* v8 onward, AArch64 only */
#define SCTLR_D (1U << 5) /* up to v5; RAO in v6 */
#define SCTLR_CP15BEN (1U << 5) /* v7 onward */
#define SCTLR_L (1U << 6) /* up to v5; RAO in v6 and v7; RAZ in v8 */
#define SCTLR_nAA (1U << 6) /* when v8.4-LSE is implemented */
#define SCTLR_B (1U << 7) /* up to v6; RAZ in v7 */
#define SCTLR_ITD (1U << 7) /* v8 onward */
#define SCTLR_S (1U << 8) /* up to v6; RAZ in v7 */
@ -991,35 +1047,53 @@ void pmccntr_sync(CPUARMState *env);
#define SCTLR_R (1U << 9) /* up to v6; RAZ in v7 */
#define SCTLR_UMA (1U << 9) /* v8 onward, AArch64 only */
#define SCTLR_F (1U << 10) /* up to v6 */
#define SCTLR_SW (1U << 10) /* v7 onward */
#define SCTLR_Z (1U << 11)
#define SCTLR_SW (1U << 10) /* v7, RES0 in v8 */
#define SCTLR_Z (1U << 11) /* in v7, RES1 in v8 */
#define SCTLR_EOS (1U << 11) /* v8.5-ExS */
#define SCTLR_I (1U << 12)
#define SCTLR_V (1U << 13)
#define SCTLR_V (1U << 13) /* AArch32 only */
#define SCTLR_EnDB (1U << 13) /* v8.3, AArch64 only */
#define SCTLR_RR (1U << 14) /* up to v7 */
#define SCTLR_DZE (1U << 14) /* v8 onward, AArch64 only */
#define SCTLR_L4 (1U << 15) /* up to v6; RAZ in v7 */
#define SCTLR_UCT (1U << 15) /* v8 onward, AArch64 only */
#define SCTLR_DT (1U << 16) /* up to ??, RAO in v6 and v7 */
#define SCTLR_nTWI (1U << 16) /* v8 onward */
#define SCTLR_HA (1U << 17)
#define SCTLR_HA (1U << 17) /* up to v7, RES0 in v8 */
#define SCTLR_BR (1U << 17) /* PMSA only */
#define SCTLR_IT (1U << 18) /* up to ??, RAO in v6 and v7 */
#define SCTLR_nTWE (1U << 18) /* v8 onward */
#define SCTLR_WXN (1U << 19)
#define SCTLR_ST (1U << 20) /* up to ??, RAZ in v6 */
#define SCTLR_UWXN (1U << 20) /* v7 onward */
#define SCTLR_FI (1U << 21)
#define SCTLR_U (1U << 22)
#define SCTLR_UWXN (1U << 20) /* v7 onward, AArch32 only */
#define SCTLR_FI (1U << 21) /* up to v7, v8 RES0 */
#define SCTLR_IESB (1U << 21) /* v8.2-IESB, AArch64 only */
#define SCTLR_U (1U << 22) /* up to v6, RAO in v7 */
#define SCTLR_EIS (1U << 22) /* v8.5-ExS */
#define SCTLR_XP (1U << 23) /* up to v6; v7 onward RAO */
#define SCTLR_SPAN (1U << 23) /* v8.1-PAN */
#define SCTLR_VE (1U << 24) /* up to v7 */
#define SCTLR_E0E (1U << 24) /* v8 onward, AArch64 only */
#define SCTLR_EE (1U << 25)
#define SCTLR_L2 (1U << 26) /* up to v6, RAZ in v7 */
#define SCTLR_UCI (1U << 26) /* v8 onward, AArch64 only */
#define SCTLR_NMFI (1U << 27)
#define SCTLR_TRE (1U << 28)
#define SCTLR_AFE (1U << 29)
#define SCTLR_TE (1U << 30)
#define SCTLR_NMFI (1U << 27) /* up to v7, RAZ in v7VE and v8 */
#define SCTLR_EnDA (1U << 27) /* v8.3, AArch64 only */
#define SCTLR_TRE (1U << 28) /* AArch32 only */
#define SCTLR_nTLSMD_64 (1U << 28) /* v8.2-LSMAOC, AArch64 only */
#define SCTLR_AFE (1U << 29) /* AArch32 only */
#define SCTLR_LSMAOE_64 (1U << 29) /* v8.2-LSMAOC, AArch64 only */
#define SCTLR_TE (1U << 30) /* AArch32 only */
#define SCTLR_EnIB (1U << 30) /* v8.3, AArch64 only */
#define SCTLR_EnIA (1U << 31) /* v8.3, AArch64 only */
#define SCTLR_BT0 (1ULL << 35) /* v8.5-BTI */
#define SCTLR_BT1 (1ULL << 36) /* v8.5-BTI */
#define SCTLR_ITFSB (1ULL << 37) /* v8.5-MemTag */
#define SCTLR_TCF0 (3ULL << 38) /* v8.5-MemTag */
#define SCTLR_TCF (3ULL << 40) /* v8.5-MemTag */
#define SCTLR_ATA0 (1ULL << 42) /* v8.5-MemTag */
#define SCTLR_ATA (1ULL << 43) /* v8.5-MemTag */
#define SCTLR_DSSBS (1ULL << 44) /* v8.5 */
#define CPTR_TCPAC (1U << 31)
#define CPTR_TTA (1U << 20)
@ -1029,7 +1103,8 @@ void pmccntr_sync(CPUARMState *env);
#define MDCR_EPMAD (1U << 21)
#define MDCR_EDAD (1U << 20)
#define MDCR_SPME (1U << 17)
#define MDCR_SPME (1U << 17) /* MDCR_EL3 */
#define MDCR_HPMD (1U << 17) /* MDCR_EL2 */
#define MDCR_SDD (1U << 16)
#define MDCR_SPD (3U << 14)
#define MDCR_TDRA (1U << 11)
@ -1039,6 +1114,7 @@ void pmccntr_sync(CPUARMState *env);
#define MDCR_HPME (1U << 7)
#define MDCR_TPM (1U << 6)
#define MDCR_TPMCR (1U << 5)
#define MDCR_HPMN (0x1fU)
/* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */
#define SDCR_VALID_MASK (MDCR_EPMAD | MDCR_EDAD | MDCR_SPME | MDCR_SPD)
@ -1618,6 +1694,15 @@ FIELD(ID_AA64MMFR1, PAN, 20, 4)
FIELD(ID_AA64MMFR1, SPECSEI, 24, 4)
FIELD(ID_AA64MMFR1, XNX, 28, 4)
FIELD(ID_DFR0, COPDBG, 0, 4)
FIELD(ID_DFR0, COPSDBG, 4, 4)
FIELD(ID_DFR0, MMAPDBG, 8, 4)
FIELD(ID_DFR0, COPTRC, 12, 4)
FIELD(ID_DFR0, MMAPTRC, 16, 4)
FIELD(ID_DFR0, MPROFDBG, 20, 4)
FIELD(ID_DFR0, PERFMON, 24, 4)
FIELD(ID_DFR0, TRACEFILT, 28, 4)
QEMU_BUILD_BUG_ON(ARRAY_SIZE(((ARMCPU *)0)->ccsidr) <= R_V7M_CSSELR_INDEX_MASK);
/* If adding a feature bit which corresponds to a Linux ELF
@ -2707,54 +2792,23 @@ static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
}
/* Return the MMU index for a v7M CPU in the specified security and
* privilege state
* privilege state.
*/
static inline ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
bool secstate,
bool priv)
{
ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
if (priv) {
mmu_idx |= ARM_MMU_IDX_M_PRIV;
}
if (armv7m_nvic_neg_prio_requested(env->nvic, secstate)) {
mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
}
if (secstate) {
mmu_idx |= ARM_MMU_IDX_M_S;
}
return mmu_idx;
}
ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
bool secstate, bool priv);
/* Return the MMU index for a v7M CPU in the specified security state */
static inline ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env,
bool secstate)
{
bool priv = arm_current_el(env) != 0;
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
}
/* Determine the current mmu_idx to use for normal loads/stores */
static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
{
int el = arm_current_el(env);
if (arm_feature(env, ARM_FEATURE_M)) {
ARMMMUIdx mmu_idx = arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
return arm_to_core_mmu_idx(mmu_idx);
}
if (el < 2 && arm_is_secure_below_el3(env)) {
return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0 + el);
}
return el;
}
/**
* cpu_mmu_index:
* @env: The cpu environment
* @ifetch: True for code access, false for data access.
*
* Return the core mmu index for the current translation regime.
* This function is used by generic TCG code paths.
*/
int cpu_mmu_index(CPUARMState *env, bool ifetch);
/* Indexes used when registering address spaces with cpu_address_space_init */
typedef enum ARMASIdx {
@ -2976,10 +3030,10 @@ FIELD(TBFLAG_A32, HANDLER, 21, 1)
FIELD(TBFLAG_A32, STACKCHECK, 22, 1)
/* Bit usage when in AArch64 state */
FIELD(TBFLAG_A64, TBI0, 0, 1)
FIELD(TBFLAG_A64, TBI1, 1, 1)
FIELD(TBFLAG_A64, TBII, 0, 2)
FIELD(TBFLAG_A64, SVEEXC_EL, 2, 2)
FIELD(TBFLAG_A64, ZCR_LEN, 4, 4)
FIELD(TBFLAG_A64, PAUTH_ACTIVE, 8, 1)
static inline bool bswap_code(bool sctlr_b)
{
@ -3012,41 +3066,6 @@ static inline bool arm_cpu_bswap_data(CPUARMState *env)
}
#endif
#ifndef CONFIG_USER_ONLY
/**
* arm_regime_tbi0:
* @env: CPUARMState
* @mmu_idx: MMU index indicating required translation regime
*
* Extracts the TBI0 value from the appropriate TCR for the current EL
*
* Returns: the TBI0 value.
*/
uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx);
/**
* arm_regime_tbi1:
* @env: CPUARMState
* @mmu_idx: MMU index indicating required translation regime
*
* Extracts the TBI1 value from the appropriate TCR for the current EL
*
* Returns: the TBI1 value.
*/
uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx);
#else
/* We can't handle tagged addresses properly in user-only mode */
static inline uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx)
{
return 0;
}
static inline uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx)
{
return 0;
}
#endif
void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
target_ulong *cs_base, uint32_t *flags);
@ -3264,6 +3283,21 @@ static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id)
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0;
}
static inline bool isar_feature_aa64_pauth(const ARMISARegisters *id)
{
/*
* Note that while QEMU will only implement the architected algorithm
* QARMA, and thus APA+GPA, the host cpu for kvm may use implementation
* defined algorithms, and thus API+GPI, and this predicate controls
* migration of the 128-bit keys.
*/
return (id->id_aa64isar1 &
(FIELD_DP64(0, ID_AA64ISAR1, APA, 0xf) |
FIELD_DP64(0, ID_AA64ISAR1, API, 0xf) |
FIELD_DP64(0, ID_AA64ISAR1, GPA, 0xf) |
FIELD_DP64(0, ID_AA64ISAR1, GPI, 0xf))) != 0;
}
static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id)
{
/* We always set the AdvSIMD and FP fields identically wrt FP16. */

View File

@ -138,8 +138,6 @@ static void aarch64_a57_initfn(Object *obj)
cpu->isar.id_isar6 = 0;
cpu->isar.id_aa64pfr0 = 0x00002222;
cpu->id_aa64dfr0 = 0x10305106;
cpu->pmceid0 = 0x00000000;
cpu->pmceid1 = 0x00000000;
cpu->isar.id_aa64isar0 = 0x00011120;
cpu->isar.id_aa64mmfr0 = 0x00001124;
cpu->dbgdidr = 0x3516d000;
@ -246,8 +244,6 @@ static void aarch64_a72_initfn(Object *obj)
cpu->isar.id_isar5 = 0x00011121;
cpu->isar.id_aa64pfr0 = 0x00002222;
cpu->id_aa64dfr0 = 0x10305106;
cpu->pmceid0 = 0x00000000;
cpu->pmceid1 = 0x00000000;
cpu->isar.id_aa64isar0 = 0x00011120;
cpu->isar.id_aa64mmfr0 = 0x00001124;
cpu->dbgdidr = 0x3516d000;
@ -285,6 +281,38 @@ static void cpu_max_set_sve_vq(Object *obj, Visitor *v, const char *name,
error_propagate(errp, err);
}
#ifdef CONFIG_USER_ONLY
static void cpu_max_get_packey(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
ARMCPU *cpu = ARM_CPU(obj);
const uint64_t *bit = opaque;
bool enabled = (cpu->env.cp15.sctlr_el[1] & *bit) != 0;
visit_type_bool(v, name, &enabled, errp);
}
static void cpu_max_set_packey(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
ARMCPU *cpu = ARM_CPU(obj);
Error *err = NULL;
const uint64_t *bit = opaque;
bool enabled;
visit_type_bool(v, name, &enabled, errp);
if (!err) {
if (enabled) {
cpu->env.cp15.sctlr_el[1] |= *bit;
} else {
cpu->env.cp15.sctlr_el[1] &= ~*bit;
}
}
error_propagate(errp, err);
}
#endif
/* -cpu max: if KVM is enabled, like -cpu host (best possible with this host);
* otherwise, a CPU with as many features enabled as our emulation supports.
* The version of '-cpu max' for qemu-system-arm is defined in cpu.c;
@ -316,6 +344,10 @@ static void aarch64_max_initfn(Object *obj)
t = cpu->isar.id_aa64isar1;
t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1);
t = FIELD_DP64(t, ID_AA64ISAR1, APA, 1); /* PAuth, architected only */
t = FIELD_DP64(t, ID_AA64ISAR1, API, 0);
t = FIELD_DP64(t, ID_AA64ISAR1, GPA, 1);
t = FIELD_DP64(t, ID_AA64ISAR1, GPI, 0);
cpu->isar.id_aa64isar1 = t;
t = cpu->isar.id_aa64pfr0;
@ -356,6 +388,34 @@ static void aarch64_max_initfn(Object *obj)
*/
cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */
cpu->dcz_blocksize = 7; /* 512 bytes */
/*
* Note that Linux will enable enable all of the keys at once.
* But doing it this way will allow experimentation beyond that.
*/
{
static const uint64_t apia_bit = SCTLR_EnIA;
static const uint64_t apib_bit = SCTLR_EnIB;
static const uint64_t apda_bit = SCTLR_EnDA;
static const uint64_t apdb_bit = SCTLR_EnDB;
object_property_add(obj, "apia", "bool", cpu_max_get_packey,
cpu_max_set_packey, NULL,
(void *)&apia_bit, &error_fatal);
object_property_add(obj, "apib", "bool", cpu_max_get_packey,
cpu_max_set_packey, NULL,
(void *)&apib_bit, &error_fatal);
object_property_add(obj, "apda", "bool", cpu_max_get_packey,
cpu_max_set_packey, NULL,
(void *)&apda_bit, &error_fatal);
object_property_add(obj, "apdb", "bool", cpu_max_get_packey,
cpu_max_set_packey, NULL,
(void *)&apdb_bit, &error_fatal);
/* Enable all PAC keys by default. */
cpu->env.cp15.sctlr_el[1] |= SCTLR_EnIA | SCTLR_EnIB;
cpu->env.cp15.sctlr_el[1] |= SCTLR_EnDA | SCTLR_EnDB;
}
#endif
cpu->sve_max_vq = ARM_MAX_VQ;

View File

@ -887,6 +887,161 @@ uint32_t HELPER(advsimd_f16touinth)(uint32_t a, void *fpstp)
return float16_to_uint16(a, fpst);
}
static int el_from_spsr(uint32_t spsr)
{
/* Return the exception level that this SPSR is requesting a return to,
* or -1 if it is invalid (an illegal return)
*/
if (spsr & PSTATE_nRW) {
switch (spsr & CPSR_M) {
case ARM_CPU_MODE_USR:
return 0;
case ARM_CPU_MODE_HYP:
return 2;
case ARM_CPU_MODE_FIQ:
case ARM_CPU_MODE_IRQ:
case ARM_CPU_MODE_SVC:
case ARM_CPU_MODE_ABT:
case ARM_CPU_MODE_UND:
case ARM_CPU_MODE_SYS:
return 1;
case ARM_CPU_MODE_MON:
/* Returning to Mon from AArch64 is never possible,
* so this is an illegal return.
*/
default:
return -1;
}
} else {
if (extract32(spsr, 1, 1)) {
/* Return with reserved M[1] bit set */
return -1;
}
if (extract32(spsr, 0, 4) == 1) {
/* return to EL0 with M[0] bit set */
return -1;
}
return extract32(spsr, 2, 2);
}
}
void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
{
int cur_el = arm_current_el(env);
unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
uint32_t spsr = env->banked_spsr[spsr_idx];
int new_el;
bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
aarch64_save_sp(env, cur_el);
arm_clear_exclusive(env);
/* We must squash the PSTATE.SS bit to zero unless both of the
* following hold:
* 1. debug exceptions are currently disabled
* 2. singlestep will be active in the EL we return to
* We check 1 here and 2 after we've done the pstate/cpsr write() to
* transition to the EL we're going to.
*/
if (arm_generate_debug_exceptions(env)) {
spsr &= ~PSTATE_SS;
}
new_el = el_from_spsr(spsr);
if (new_el == -1) {
goto illegal_return;
}
if (new_el > cur_el
|| (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
/* Disallow return to an EL which is unimplemented or higher
* than the current one.
*/
goto illegal_return;
}
if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
/* Return to an EL which is configured for a different register width */
goto illegal_return;
}
if (new_el == 2 && arm_is_secure_below_el3(env)) {
/* Return to the non-existent secure-EL2 */
goto illegal_return;
}
if (new_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
goto illegal_return;
}
qemu_mutex_lock_iothread();
arm_call_pre_el_change_hook(arm_env_get_cpu(env));
qemu_mutex_unlock_iothread();
if (!return_to_aa64) {
env->aarch64 = 0;
/* We do a raw CPSR write because aarch64_sync_64_to_32()
* will sort the register banks out for us, and we've already
* caught all the bad-mode cases in el_from_spsr().
*/
cpsr_write(env, spsr, ~0, CPSRWriteRaw);
if (!arm_singlestep_active(env)) {
env->uncached_cpsr &= ~PSTATE_SS;
}
aarch64_sync_64_to_32(env);
if (spsr & CPSR_T) {
env->regs[15] = new_pc & ~0x1;
} else {
env->regs[15] = new_pc & ~0x3;
}
qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
"AArch32 EL%d PC 0x%" PRIx32 "\n",
cur_el, new_el, env->regs[15]);
} else {
env->aarch64 = 1;
pstate_write(env, spsr);
if (!arm_singlestep_active(env)) {
env->pstate &= ~PSTATE_SS;
}
aarch64_restore_sp(env, new_el);
env->pc = new_pc;
qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
"AArch64 EL%d PC 0x%" PRIx64 "\n",
cur_el, new_el, env->pc);
}
/*
* Note that cur_el can never be 0. If new_el is 0, then
* el0_a64 is return_to_aa64, else el0_a64 is ignored.
*/
aarch64_sve_change_el(env, cur_el, new_el, return_to_aa64);
qemu_mutex_lock_iothread();
arm_call_el_change_hook(arm_env_get_cpu(env));
qemu_mutex_unlock_iothread();
return;
illegal_return:
/* Illegal return events of various kinds have architecturally
* mandated behaviour:
* restore NZCV and DAIF from SPSR_ELx
* set PSTATE.IL
* restore PC from ELR_ELx
* no change to exception level, execution state or stack pointer
*/
env->pstate |= PSTATE_IL;
env->pc = new_pc;
spsr &= PSTATE_NZCV | PSTATE_DAIF;
spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
pstate_write(env, spsr);
if (!arm_singlestep_active(env)) {
env->pstate &= ~PSTATE_SS;
}
qemu_log_mask(LOG_GUEST_ERROR, "Illegal exception return at EL%d: "
"resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc);
}
/*
* Square Root and Reciprocal square root
*/

View File

@ -85,3 +85,17 @@ DEF_HELPER_2(advsimd_rinth, f16, f16, ptr)
DEF_HELPER_2(advsimd_f16tosinth, i32, f16, ptr)
DEF_HELPER_2(advsimd_f16touinth, i32, f16, ptr)
DEF_HELPER_2(sqrt_f16, f16, f16, ptr)
DEF_HELPER_2(exception_return, void, env, i64)
DEF_HELPER_FLAGS_3(pacia, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(pacib, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(pacda, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(pacdb, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(pacga, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(autia, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(autib, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(autda, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(autdb, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_2(xpaci, TCG_CALL_NO_RWG_SE, i64, env, i64)
DEF_HELPER_FLAGS_2(xpacd, TCG_CALL_NO_RWG_SE, i64, env, i64)

File diff suppressed because it is too large Load Diff

View File

@ -79,7 +79,6 @@ DEF_HELPER_2(get_cp_reg64, i64, env, ptr)
DEF_HELPER_3(msr_i_pstate, void, env, i32, i32)
DEF_HELPER_1(clear_pstate_ss, void, env)
DEF_HELPER_1(exception_return, void, env)
DEF_HELPER_2(get_r13_banked, i32, env, i32)
DEF_HELPER_3(set_r13_banked, void, env, i32, i32)

View File

@ -103,6 +103,13 @@ FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
void QEMU_NORETURN raise_exception(CPUARMState *env, uint32_t excp,
uint32_t syndrome, uint32_t target_el);
/*
* Similarly, but also use unwinding to restore cpu state.
*/
void QEMU_NORETURN raise_exception_ra(CPUARMState *env, uint32_t excp,
uint32_t syndrome, uint32_t target_el,
uintptr_t ra);
/*
* For AArch64, map a given EL to an index in the banked_spsr array.
* Note that this mapping and the AArch32 mapping defined in bank_number()
@ -259,6 +266,7 @@ enum arm_exception_class {
EC_CP14DTTRAP = 0x06,
EC_ADVSIMDFPACCESSTRAP = 0x07,
EC_FPIDTRAP = 0x08,
EC_PACTRAP = 0x09,
EC_CP14RRTTRAP = 0x0c,
EC_ILLEGALSTATE = 0x0e,
EC_AA32_SVC = 0x11,
@ -426,6 +434,11 @@ static inline uint32_t syn_sve_access_trap(void)
return EC_SVEACCESSTRAP << ARM_EL_EC_SHIFT;
}
static inline uint32_t syn_pactrap(void)
{
return EC_PACTRAP << ARM_EL_EC_SHIFT;
}
static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc)
{
return (EC_INSNABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
@ -906,4 +919,68 @@ void arm_cpu_update_virq(ARMCPU *cpu);
*/
void arm_cpu_update_vfiq(ARMCPU *cpu);
/**
* arm_mmu_idx:
* @env: The cpu environment
*
* Return the full ARMMMUIdx for the current translation regime.
*/
ARMMMUIdx arm_mmu_idx(CPUARMState *env);
/**
* arm_stage1_mmu_idx:
* @env: The cpu environment
*
* Return the ARMMMUIdx for the stage1 traversal for the current regime.
*/
#ifdef CONFIG_USER_ONLY
static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
{
return ARMMMUIdx_S1NSE0;
}
#else
ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
#endif
/*
* Parameters of a given virtual address, as extracted from the
* translation control register (TCR) for a given regime.
*/
typedef struct ARMVAParameters {
unsigned tsz : 8;
unsigned select : 1;
bool tbi : 1;
bool tbid : 1;
bool epd : 1;
bool hpd : 1;
bool using16k : 1;
bool using64k : 1;
} ARMVAParameters;
#ifdef CONFIG_USER_ONLY
static inline ARMVAParameters aa64_va_parameters_both(CPUARMState *env,
uint64_t va,
ARMMMUIdx mmu_idx)
{
return (ARMVAParameters) {
/* 48-bit address space */
.tsz = 16,
/* We can't handle tagged addresses properly in user-only mode */
.tbi = false,
};
}
static inline ARMVAParameters aa64_va_parameters(CPUARMState *env,
uint64_t va,
ARMMMUIdx mmu_idx, bool data)
{
return aa64_va_parameters_both(env, va, mmu_idx);
}
#else
ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va,
ARMMMUIdx mmu_idx);
ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
ARMMMUIdx mmu_idx, bool data);
#endif
#endif

View File

@ -620,6 +620,10 @@ static int cpu_pre_save(void *opaque)
{
ARMCPU *cpu = opaque;
if (!kvm_enabled()) {
pmu_op_start(&cpu->env);
}
if (kvm_enabled()) {
if (!write_kvmstate_to_list(cpu)) {
/* This should never fail */
@ -641,6 +645,17 @@ static int cpu_pre_save(void *opaque)
return 0;
}
static int cpu_post_save(void *opaque)
{
ARMCPU *cpu = opaque;
if (!kvm_enabled()) {
pmu_op_finish(&cpu->env);
}
return 0;
}
static int cpu_pre_load(void *opaque)
{
ARMCPU *cpu = opaque;
@ -653,6 +668,10 @@ static int cpu_pre_load(void *opaque)
*/
env->irq_line_state = UINT32_MAX;
if (!kvm_enabled()) {
pmu_op_start(&cpu->env);
}
return 0;
}
@ -721,6 +740,10 @@ static int cpu_post_load(void *opaque, int version_id)
hw_breakpoint_update_all(cpu);
hw_watchpoint_update_all(cpu);
if (!kvm_enabled()) {
pmu_op_finish(&cpu->env);
}
return 0;
}
@ -729,6 +752,7 @@ const VMStateDescription vmstate_arm_cpu = {
.version_id = 22,
.minimum_version_id = 22,
.pre_save = cpu_pre_save,
.post_save = cpu_post_save,
.pre_load = cpu_pre_load,
.post_load = cpu_post_load,
.fields = (VMStateField[]) {

View File

@ -28,8 +28,8 @@
#define SIGNBIT (uint32_t)0x80000000
#define SIGNBIT64 ((uint64_t)1 << 63)
void raise_exception(CPUARMState *env, uint32_t excp,
uint32_t syndrome, uint32_t target_el)
static CPUState *do_raise_exception(CPUARMState *env, uint32_t excp,
uint32_t syndrome, uint32_t target_el)
{
CPUState *cs = CPU(arm_env_get_cpu(env));
@ -50,9 +50,24 @@ void raise_exception(CPUARMState *env, uint32_t excp,
cs->exception_index = excp;
env->exception.syndrome = syndrome;
env->exception.target_el = target_el;
return cs;
}
void raise_exception(CPUARMState *env, uint32_t excp,
uint32_t syndrome, uint32_t target_el)
{
CPUState *cs = do_raise_exception(env, excp, syndrome, target_el);
cpu_loop_exit(cs);
}
void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
uint32_t target_el, uintptr_t ra)
{
CPUState *cs = do_raise_exception(env, excp, syndrome, target_el);
cpu_loop_exit_restore(cs, ra);
}
static int exception_target_el(CPUARMState *env)
{
int target_el = MAX(1, arm_current_el(env));
@ -1014,161 +1029,6 @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
}
}
static int el_from_spsr(uint32_t spsr)
{
/* Return the exception level that this SPSR is requesting a return to,
* or -1 if it is invalid (an illegal return)
*/
if (spsr & PSTATE_nRW) {
switch (spsr & CPSR_M) {
case ARM_CPU_MODE_USR:
return 0;
case ARM_CPU_MODE_HYP:
return 2;
case ARM_CPU_MODE_FIQ:
case ARM_CPU_MODE_IRQ:
case ARM_CPU_MODE_SVC:
case ARM_CPU_MODE_ABT:
case ARM_CPU_MODE_UND:
case ARM_CPU_MODE_SYS:
return 1;
case ARM_CPU_MODE_MON:
/* Returning to Mon from AArch64 is never possible,
* so this is an illegal return.
*/
default:
return -1;
}
} else {
if (extract32(spsr, 1, 1)) {
/* Return with reserved M[1] bit set */
return -1;
}
if (extract32(spsr, 0, 4) == 1) {
/* return to EL0 with M[0] bit set */
return -1;
}
return extract32(spsr, 2, 2);
}
}
void HELPER(exception_return)(CPUARMState *env)
{
int cur_el = arm_current_el(env);
unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
uint32_t spsr = env->banked_spsr[spsr_idx];
int new_el;
bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
aarch64_save_sp(env, cur_el);
arm_clear_exclusive(env);
/* We must squash the PSTATE.SS bit to zero unless both of the
* following hold:
* 1. debug exceptions are currently disabled
* 2. singlestep will be active in the EL we return to
* We check 1 here and 2 after we've done the pstate/cpsr write() to
* transition to the EL we're going to.
*/
if (arm_generate_debug_exceptions(env)) {
spsr &= ~PSTATE_SS;
}
new_el = el_from_spsr(spsr);
if (new_el == -1) {
goto illegal_return;
}
if (new_el > cur_el
|| (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
/* Disallow return to an EL which is unimplemented or higher
* than the current one.
*/
goto illegal_return;
}
if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
/* Return to an EL which is configured for a different register width */
goto illegal_return;
}
if (new_el == 2 && arm_is_secure_below_el3(env)) {
/* Return to the non-existent secure-EL2 */
goto illegal_return;
}
if (new_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
goto illegal_return;
}
qemu_mutex_lock_iothread();
arm_call_pre_el_change_hook(arm_env_get_cpu(env));
qemu_mutex_unlock_iothread();
if (!return_to_aa64) {
env->aarch64 = 0;
/* We do a raw CPSR write because aarch64_sync_64_to_32()
* will sort the register banks out for us, and we've already
* caught all the bad-mode cases in el_from_spsr().
*/
cpsr_write(env, spsr, ~0, CPSRWriteRaw);
if (!arm_singlestep_active(env)) {
env->uncached_cpsr &= ~PSTATE_SS;
}
aarch64_sync_64_to_32(env);
if (spsr & CPSR_T) {
env->regs[15] = env->elr_el[cur_el] & ~0x1;
} else {
env->regs[15] = env->elr_el[cur_el] & ~0x3;
}
qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
"AArch32 EL%d PC 0x%" PRIx32 "\n",
cur_el, new_el, env->regs[15]);
} else {
env->aarch64 = 1;
pstate_write(env, spsr);
if (!arm_singlestep_active(env)) {
env->pstate &= ~PSTATE_SS;
}
aarch64_restore_sp(env, new_el);
env->pc = env->elr_el[cur_el];
qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
"AArch64 EL%d PC 0x%" PRIx64 "\n",
cur_el, new_el, env->pc);
}
/*
* Note that cur_el can never be 0. If new_el is 0, then
* el0_a64 is return_to_aa64, else el0_a64 is ignored.
*/
aarch64_sve_change_el(env, cur_el, new_el, return_to_aa64);
qemu_mutex_lock_iothread();
arm_call_el_change_hook(arm_env_get_cpu(env));
qemu_mutex_unlock_iothread();
return;
illegal_return:
/* Illegal return events of various kinds have architecturally
* mandated behaviour:
* restore NZCV and DAIF from SPSR_ELx
* set PSTATE.IL
* restore PC from ELR_ELx
* no change to exception level, execution state or stack pointer
*/
env->pstate |= PSTATE_IL;
env->pc = env->elr_el[cur_el];
spsr &= PSTATE_NZCV | PSTATE_DAIF;
spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
pstate_write(env, spsr);
if (!arm_singlestep_active(env)) {
env->pstate &= ~PSTATE_SS;
}
qemu_log_mask(LOG_GUEST_ERROR, "Illegal exception return at EL%d: "
"resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc);
}
/* Return true if the linked breakpoint entry lbn passes its checks */
static bool linked_bp_matches(ARMCPU *cpu, int lbn)
{

497
target/arm/pauth_helper.c Normal file
View File

@ -0,0 +1,497 @@
/*
* ARM v8.3-PAuth Operations
*
* Copyright (c) 2019 Linaro, Ltd.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "internals.h"
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
#include "exec/helper-proto.h"
#include "tcg/tcg-gvec-desc.h"
static uint64_t pac_cell_shuffle(uint64_t i)
{
uint64_t o = 0;
o |= extract64(i, 52, 4);
o |= extract64(i, 24, 4) << 4;
o |= extract64(i, 44, 4) << 8;
o |= extract64(i, 0, 4) << 12;
o |= extract64(i, 28, 4) << 16;
o |= extract64(i, 48, 4) << 20;
o |= extract64(i, 4, 4) << 24;
o |= extract64(i, 40, 4) << 28;
o |= extract64(i, 32, 4) << 32;
o |= extract64(i, 12, 4) << 36;
o |= extract64(i, 56, 4) << 40;
o |= extract64(i, 20, 4) << 44;
o |= extract64(i, 8, 4) << 48;
o |= extract64(i, 36, 4) << 52;
o |= extract64(i, 16, 4) << 56;
o |= extract64(i, 60, 4) << 60;
return o;
}
static uint64_t pac_cell_inv_shuffle(uint64_t i)
{
uint64_t o = 0;
o |= extract64(i, 12, 4);
o |= extract64(i, 24, 4) << 4;
o |= extract64(i, 48, 4) << 8;
o |= extract64(i, 36, 4) << 12;
o |= extract64(i, 56, 4) << 16;
o |= extract64(i, 44, 4) << 20;
o |= extract64(i, 4, 4) << 24;
o |= extract64(i, 16, 4) << 28;
o |= i & MAKE_64BIT_MASK(32, 4);
o |= extract64(i, 52, 4) << 36;
o |= extract64(i, 28, 4) << 40;
o |= extract64(i, 8, 4) << 44;
o |= extract64(i, 20, 4) << 48;
o |= extract64(i, 0, 4) << 52;
o |= extract64(i, 40, 4) << 56;
o |= i & MAKE_64BIT_MASK(60, 4);
return o;
}
static uint64_t pac_sub(uint64_t i)
{
static const uint8_t sub[16] = {
0xb, 0x6, 0x8, 0xf, 0xc, 0x0, 0x9, 0xe,
0x3, 0x7, 0x4, 0x5, 0xd, 0x2, 0x1, 0xa,
};
uint64_t o = 0;
int b;
for (b = 0; b < 64; b += 16) {
o |= (uint64_t)sub[(i >> b) & 0xf] << b;
}
return o;
}
static uint64_t pac_inv_sub(uint64_t i)
{
static const uint8_t inv_sub[16] = {
0x5, 0xe, 0xd, 0x8, 0xa, 0xb, 0x1, 0x9,
0x2, 0x6, 0xf, 0x0, 0x4, 0xc, 0x7, 0x3,
};
uint64_t o = 0;
int b;
for (b = 0; b < 64; b += 16) {
o |= (uint64_t)inv_sub[(i >> b) & 0xf] << b;
}
return o;
}
static int rot_cell(int cell, int n)
{
/* 4-bit rotate left by n. */
cell |= cell << 4;
return extract32(cell, 4 - n, 4);
}
static uint64_t pac_mult(uint64_t i)
{
uint64_t o = 0;
int b;
for (b = 0; b < 4 * 4; b += 4) {
int i0, i4, i8, ic, t0, t1, t2, t3;
i0 = extract64(i, b, 4);
i4 = extract64(i, b + 4 * 4, 4);
i8 = extract64(i, b + 8 * 4, 4);
ic = extract64(i, b + 12 * 4, 4);
t0 = rot_cell(i8, 1) ^ rot_cell(i4, 2) ^ rot_cell(i0, 1);
t1 = rot_cell(ic, 1) ^ rot_cell(i4, 1) ^ rot_cell(i0, 2);
t2 = rot_cell(ic, 2) ^ rot_cell(i8, 1) ^ rot_cell(i0, 1);
t3 = rot_cell(ic, 1) ^ rot_cell(i8, 2) ^ rot_cell(i4, 1);
o |= (uint64_t)t3 << b;
o |= (uint64_t)t2 << (b + 4 * 4);
o |= (uint64_t)t1 << (b + 8 * 4);
o |= (uint64_t)t0 << (b + 12 * 4);
}
return o;
}
static uint64_t tweak_cell_rot(uint64_t cell)
{
return (cell >> 1) | (((cell ^ (cell >> 1)) & 1) << 3);
}
static uint64_t tweak_shuffle(uint64_t i)
{
uint64_t o = 0;
o |= extract64(i, 16, 4) << 0;
o |= extract64(i, 20, 4) << 4;
o |= tweak_cell_rot(extract64(i, 24, 4)) << 8;
o |= extract64(i, 28, 4) << 12;
o |= tweak_cell_rot(extract64(i, 44, 4)) << 16;
o |= extract64(i, 8, 4) << 20;
o |= extract64(i, 12, 4) << 24;
o |= tweak_cell_rot(extract64(i, 32, 4)) << 28;
o |= extract64(i, 48, 4) << 32;
o |= extract64(i, 52, 4) << 36;
o |= extract64(i, 56, 4) << 40;
o |= tweak_cell_rot(extract64(i, 60, 4)) << 44;
o |= tweak_cell_rot(extract64(i, 0, 4)) << 48;
o |= extract64(i, 4, 4) << 52;
o |= tweak_cell_rot(extract64(i, 40, 4)) << 56;
o |= tweak_cell_rot(extract64(i, 36, 4)) << 60;
return o;
}
static uint64_t tweak_cell_inv_rot(uint64_t cell)
{
return ((cell << 1) & 0xf) | ((cell & 1) ^ (cell >> 3));
}
static uint64_t tweak_inv_shuffle(uint64_t i)
{
uint64_t o = 0;
o |= tweak_cell_inv_rot(extract64(i, 48, 4));
o |= extract64(i, 52, 4) << 4;
o |= extract64(i, 20, 4) << 8;
o |= extract64(i, 24, 4) << 12;
o |= extract64(i, 0, 4) << 16;
o |= extract64(i, 4, 4) << 20;
o |= tweak_cell_inv_rot(extract64(i, 8, 4)) << 24;
o |= extract64(i, 12, 4) << 28;
o |= tweak_cell_inv_rot(extract64(i, 28, 4)) << 32;
o |= tweak_cell_inv_rot(extract64(i, 60, 4)) << 36;
o |= tweak_cell_inv_rot(extract64(i, 56, 4)) << 40;
o |= tweak_cell_inv_rot(extract64(i, 16, 4)) << 44;
o |= extract64(i, 32, 4) << 48;
o |= extract64(i, 36, 4) << 52;
o |= extract64(i, 40, 4) << 56;
o |= tweak_cell_inv_rot(extract64(i, 44, 4)) << 60;
return o;
}
static uint64_t pauth_computepac(uint64_t data, uint64_t modifier,
ARMPACKey key)
{
static const uint64_t RC[5] = {
0x0000000000000000ull,
0x13198A2E03707344ull,
0xA4093822299F31D0ull,
0x082EFA98EC4E6C89ull,
0x452821E638D01377ull,
};
const uint64_t alpha = 0xC0AC29B7C97C50DDull;
/*
* Note that in the ARM pseudocode, key0 contains bits <127:64>
* and key1 contains bits <63:0> of the 128-bit key.
*/
uint64_t key0 = key.hi, key1 = key.lo;
uint64_t workingval, runningmod, roundkey, modk0;
int i;
modk0 = (key0 << 63) | ((key0 >> 1) ^ (key0 >> 63));
runningmod = modifier;
workingval = data ^ key0;
for (i = 0; i <= 4; ++i) {
roundkey = key1 ^ runningmod;
workingval ^= roundkey;
workingval ^= RC[i];
if (i > 0) {
workingval = pac_cell_shuffle(workingval);
workingval = pac_mult(workingval);
}
workingval = pac_sub(workingval);
runningmod = tweak_shuffle(runningmod);
}
roundkey = modk0 ^ runningmod;
workingval ^= roundkey;
workingval = pac_cell_shuffle(workingval);
workingval = pac_mult(workingval);
workingval = pac_sub(workingval);
workingval = pac_cell_shuffle(workingval);
workingval = pac_mult(workingval);
workingval ^= key1;
workingval = pac_cell_inv_shuffle(workingval);
workingval = pac_inv_sub(workingval);
workingval = pac_mult(workingval);
workingval = pac_cell_inv_shuffle(workingval);
workingval ^= key0;
workingval ^= runningmod;
for (i = 0; i <= 4; ++i) {
workingval = pac_inv_sub(workingval);
if (i < 4) {
workingval = pac_mult(workingval);
workingval = pac_cell_inv_shuffle(workingval);
}
runningmod = tweak_inv_shuffle(runningmod);
roundkey = key1 ^ runningmod;
workingval ^= RC[4 - i];
workingval ^= roundkey;
workingval ^= alpha;
}
workingval ^= modk0;
return workingval;
}
static uint64_t pauth_addpac(CPUARMState *env, uint64_t ptr, uint64_t modifier,
ARMPACKey *key, bool data)
{
ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env);
ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data);
uint64_t pac, ext_ptr, ext, test;
int bot_bit, top_bit;
/* If tagged pointers are in use, use ptr<55>, otherwise ptr<63>. */
if (param.tbi) {
ext = sextract64(ptr, 55, 1);
} else {
ext = sextract64(ptr, 63, 1);
}
/* Build a pointer with known good extension bits. */
top_bit = 64 - 8 * param.tbi;
bot_bit = 64 - param.tsz;
ext_ptr = deposit64(ptr, bot_bit, top_bit - bot_bit, ext);
pac = pauth_computepac(ext_ptr, modifier, *key);
/*
* Check if the ptr has good extension bits and corrupt the
* pointer authentication code if not.
*/
test = sextract64(ptr, bot_bit, top_bit - bot_bit);
if (test != 0 && test != -1) {
pac ^= MAKE_64BIT_MASK(top_bit - 1, 1);
}
/*
* Preserve the determination between upper and lower at bit 55,
* and insert pointer authentication code.
*/
if (param.tbi) {
ptr &= ~MAKE_64BIT_MASK(bot_bit, 55 - bot_bit + 1);
pac &= MAKE_64BIT_MASK(bot_bit, 54 - bot_bit + 1);
} else {
ptr &= MAKE_64BIT_MASK(0, bot_bit);
pac &= ~(MAKE_64BIT_MASK(55, 1) | MAKE_64BIT_MASK(0, bot_bit));
}
ext &= MAKE_64BIT_MASK(55, 1);
return pac | ext | ptr;
}
static uint64_t pauth_original_ptr(uint64_t ptr, ARMVAParameters param)
{
uint64_t extfield = -param.select;
int bot_pac_bit = 64 - param.tsz;
int top_pac_bit = 64 - 8 * param.tbi;
return deposit64(ptr, bot_pac_bit, top_pac_bit - bot_pac_bit, extfield);
}
static uint64_t pauth_auth(CPUARMState *env, uint64_t ptr, uint64_t modifier,
ARMPACKey *key, bool data, int keynumber)
{
ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env);
ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data);
int bot_bit, top_bit;
uint64_t pac, orig_ptr, test;
orig_ptr = pauth_original_ptr(ptr, param);
pac = pauth_computepac(orig_ptr, modifier, *key);
bot_bit = 64 - param.tsz;
top_bit = 64 - 8 * param.tbi;
test = (pac ^ ptr) & ~MAKE_64BIT_MASK(55, 1);
if (unlikely(extract64(test, bot_bit, top_bit - bot_bit))) {
int error_code = (keynumber << 1) | (keynumber ^ 1);
if (param.tbi) {
return deposit64(ptr, 53, 2, error_code);
} else {
return deposit64(ptr, 61, 2, error_code);
}
}
return orig_ptr;
}
static uint64_t pauth_strip(CPUARMState *env, uint64_t ptr, bool data)
{
ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env);
ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data);
return pauth_original_ptr(ptr, param);
}
static void QEMU_NORETURN pauth_trap(CPUARMState *env, int target_el,
uintptr_t ra)
{
raise_exception_ra(env, EXCP_UDEF, syn_pactrap(), target_el, ra);
}
static void pauth_check_trap(CPUARMState *env, int el, uintptr_t ra)
{
if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
uint64_t hcr = arm_hcr_el2_eff(env);
bool trap = !(hcr & HCR_API);
/* FIXME: ARMv8.1-VHE: trap only applies to EL1&0 regime. */
/* FIXME: ARMv8.3-NV: HCR_NV trap takes precedence for ERETA[AB]. */
if (trap) {
pauth_trap(env, 2, ra);
}
}
if (el < 3 && arm_feature(env, ARM_FEATURE_EL3)) {
if (!(env->cp15.scr_el3 & SCR_API)) {
pauth_trap(env, 3, ra);
}
}
}
static bool pauth_key_enabled(CPUARMState *env, int el, uint32_t bit)
{
uint32_t sctlr;
if (el == 0) {
/* FIXME: ARMv8.1-VHE S2 translation regime. */
sctlr = env->cp15.sctlr_el[1];
} else {
sctlr = env->cp15.sctlr_el[el];
}
return (sctlr & bit) != 0;
}
uint64_t HELPER(pacia)(CPUARMState *env, uint64_t x, uint64_t y)
{
int el = arm_current_el(env);
if (!pauth_key_enabled(env, el, SCTLR_EnIA)) {
return x;
}
pauth_check_trap(env, el, GETPC());
return pauth_addpac(env, x, y, &env->apia_key, false);
}
uint64_t HELPER(pacib)(CPUARMState *env, uint64_t x, uint64_t y)
{
int el = arm_current_el(env);
if (!pauth_key_enabled(env, el, SCTLR_EnIB)) {
return x;
}
pauth_check_trap(env, el, GETPC());
return pauth_addpac(env, x, y, &env->apib_key, false);
}
uint64_t HELPER(pacda)(CPUARMState *env, uint64_t x, uint64_t y)
{
int el = arm_current_el(env);
if (!pauth_key_enabled(env, el, SCTLR_EnDA)) {
return x;
}
pauth_check_trap(env, el, GETPC());
return pauth_addpac(env, x, y, &env->apda_key, true);
}
uint64_t HELPER(pacdb)(CPUARMState *env, uint64_t x, uint64_t y)
{
int el = arm_current_el(env);
if (!pauth_key_enabled(env, el, SCTLR_EnDB)) {
return x;
}
pauth_check_trap(env, el, GETPC());
return pauth_addpac(env, x, y, &env->apdb_key, true);
}
uint64_t HELPER(pacga)(CPUARMState *env, uint64_t x, uint64_t y)
{
uint64_t pac;
pauth_check_trap(env, arm_current_el(env), GETPC());
pac = pauth_computepac(x, y, env->apga_key);
return pac & 0xffffffff00000000ull;
}
uint64_t HELPER(autia)(CPUARMState *env, uint64_t x, uint64_t y)
{
int el = arm_current_el(env);
if (!pauth_key_enabled(env, el, SCTLR_EnIA)) {
return x;
}
pauth_check_trap(env, el, GETPC());
return pauth_auth(env, x, y, &env->apia_key, false, 0);
}
uint64_t HELPER(autib)(CPUARMState *env, uint64_t x, uint64_t y)
{
int el = arm_current_el(env);
if (!pauth_key_enabled(env, el, SCTLR_EnIB)) {
return x;
}
pauth_check_trap(env, el, GETPC());
return pauth_auth(env, x, y, &env->apib_key, false, 1);
}
uint64_t HELPER(autda)(CPUARMState *env, uint64_t x, uint64_t y)
{
int el = arm_current_el(env);
if (!pauth_key_enabled(env, el, SCTLR_EnDA)) {
return x;
}
pauth_check_trap(env, el, GETPC());
return pauth_auth(env, x, y, &env->apda_key, true, 0);
}
uint64_t HELPER(autdb)(CPUARMState *env, uint64_t x, uint64_t y)
{
int el = arm_current_el(env);
if (!pauth_key_enabled(env, el, SCTLR_EnDB)) {
return x;
}
pauth_check_trap(env, el, GETPC());
return pauth_auth(env, x, y, &env->apdb_key, true, 1);
}
uint64_t HELPER(xpaci)(CPUARMState *env, uint64_t a)
{
return pauth_strip(env, a, false);
}
uint64_t HELPER(xpacd)(CPUARMState *env, uint64_t a)
{
return pauth_strip(env, a, true);
}

View File

@ -261,7 +261,7 @@ void gen_a64_set_pc_im(uint64_t val)
/* Load the PC from a generic TCG variable.
*
* If address tagging is enabled via the TCR TBI bits, then loading
* an address into the PC will clear out any tag in the it:
* an address into the PC will clear out any tag in it:
* + for EL2 and EL3 there is only one TBI bit, and if it is set
* then the address is zero-extended, clearing bits [63:56]
* + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
@ -276,56 +276,38 @@ void gen_a64_set_pc_im(uint64_t val)
*/
static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
{
/* Note that TBII is TBI1:TBI0. */
int tbi = s->tbii;
if (s->current_el <= 1) {
/* Test if NEITHER or BOTH TBI values are set. If so, no need to
* examine bit 55 of address, can just generate code.
* If mixed, then test via generated code
*/
if (s->tbi0 && s->tbi1) {
TCGv_i64 tmp_reg = tcg_temp_new_i64();
/* Both bits set, sign extension from bit 55 into [63:56] will
* cover both cases
*/
tcg_gen_shli_i64(tmp_reg, src, 8);
tcg_gen_sari_i64(cpu_pc, tmp_reg, 8);
tcg_temp_free_i64(tmp_reg);
} else if (!s->tbi0 && !s->tbi1) {
/* Neither bit set, just load it as-is */
tcg_gen_mov_i64(cpu_pc, src);
} else {
TCGv_i64 tcg_tmpval = tcg_temp_new_i64();
TCGv_i64 tcg_bit55 = tcg_temp_new_i64();
TCGv_i64 tcg_zero = tcg_const_i64(0);
if (tbi != 0) {
/* Sign-extend from bit 55. */
tcg_gen_sextract_i64(cpu_pc, src, 0, 56);
tcg_gen_andi_i64(tcg_bit55, src, (1ull << 55));
if (tbi != 3) {
TCGv_i64 tcg_zero = tcg_const_i64(0);
if (s->tbi0) {
/* tbi0==1, tbi1==0, so 0-fill upper byte if bit 55 = 0 */
tcg_gen_andi_i64(tcg_tmpval, src,
0x00FFFFFFFFFFFFFFull);
tcg_gen_movcond_i64(TCG_COND_EQ, cpu_pc, tcg_bit55, tcg_zero,
tcg_tmpval, src);
} else {
/* tbi0==0, tbi1==1, so 1-fill upper byte if bit 55 = 1 */
tcg_gen_ori_i64(tcg_tmpval, src,
0xFF00000000000000ull);
tcg_gen_movcond_i64(TCG_COND_NE, cpu_pc, tcg_bit55, tcg_zero,
tcg_tmpval, src);
/*
* The two TBI bits differ.
* If tbi0, then !tbi1: only use the extension if positive.
* if !tbi0, then tbi1: only use the extension if negative.
*/
tcg_gen_movcond_i64(tbi == 1 ? TCG_COND_GE : TCG_COND_LT,
cpu_pc, cpu_pc, tcg_zero, cpu_pc, src);
tcg_temp_free_i64(tcg_zero);
}
tcg_temp_free_i64(tcg_zero);
tcg_temp_free_i64(tcg_bit55);
tcg_temp_free_i64(tcg_tmpval);
return;
}
} else { /* EL > 1 */
if (s->tbi0) {
} else {
if (tbi != 0) {
/* Force tag byte to all zero */
tcg_gen_andi_i64(cpu_pc, src, 0x00FFFFFFFFFFFFFFull);
} else {
/* Load unmodified address */
tcg_gen_mov_i64(cpu_pc, src);
tcg_gen_extract_i64(cpu_pc, src, 0, 56);
return;
}
}
/* Load unmodified address */
tcg_gen_mov_i64(cpu_pc, src);
}
typedef struct DisasCompare64 {
@ -1471,33 +1453,102 @@ static void handle_hint(DisasContext *s, uint32_t insn,
}
switch (selector) {
case 0: /* NOP */
return;
case 3: /* WFI */
case 0b00000: /* NOP */
break;
case 0b00011: /* WFI */
s->base.is_jmp = DISAS_WFI;
return;
break;
case 0b00001: /* YIELD */
/* When running in MTTCG we don't generate jumps to the yield and
* WFE helpers as it won't affect the scheduling of other vCPUs.
* If we wanted to more completely model WFE/SEV so we don't busy
* spin unnecessarily we would need to do something more involved.
*/
case 1: /* YIELD */
if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
s->base.is_jmp = DISAS_YIELD;
}
return;
case 2: /* WFE */
break;
case 0b00010: /* WFE */
if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
s->base.is_jmp = DISAS_WFE;
}
return;
case 4: /* SEV */
case 5: /* SEVL */
break;
case 0b00100: /* SEV */
case 0b00101: /* SEVL */
/* we treat all as NOP at least for now */
return;
break;
case 0b00111: /* XPACLRI */
if (s->pauth_active) {
gen_helper_xpaci(cpu_X[30], cpu_env, cpu_X[30]);
}
break;
case 0b01000: /* PACIA1716 */
if (s->pauth_active) {
gen_helper_pacia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
}
break;
case 0b01010: /* PACIB1716 */
if (s->pauth_active) {
gen_helper_pacib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
}
break;
case 0b01100: /* AUTIA1716 */
if (s->pauth_active) {
gen_helper_autia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
}
break;
case 0b01110: /* AUTIB1716 */
if (s->pauth_active) {
gen_helper_autib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
}
break;
case 0b11000: /* PACIAZ */
if (s->pauth_active) {
gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30],
new_tmp_a64_zero(s));
}
break;
case 0b11001: /* PACIASP */
if (s->pauth_active) {
gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
}
break;
case 0b11010: /* PACIBZ */
if (s->pauth_active) {
gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30],
new_tmp_a64_zero(s));
}
break;
case 0b11011: /* PACIBSP */
if (s->pauth_active) {
gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
}
break;
case 0b11100: /* AUTIAZ */
if (s->pauth_active) {
gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30],
new_tmp_a64_zero(s));
}
break;
case 0b11101: /* AUTIASP */
if (s->pauth_active) {
gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
}
break;
case 0b11110: /* AUTIBZ */
if (s->pauth_active) {
gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30],
new_tmp_a64_zero(s));
}
break;
case 0b11111: /* AUTIBSP */
if (s->pauth_active) {
gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
}
break;
default:
/* default specified as NOP equivalent */
return;
break;
}
}
@ -1912,6 +1963,8 @@ static void disas_exc(DisasContext *s, uint32_t insn)
static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
{
unsigned int opc, op2, op3, rn, op4;
TCGv_i64 dst;
TCGv_i64 modifier;
opc = extract32(insn, 21, 4);
op2 = extract32(insn, 16, 5);
@ -1919,44 +1972,152 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
rn = extract32(insn, 5, 5);
op4 = extract32(insn, 0, 5);
if (op4 != 0x0 || op3 != 0x0 || op2 != 0x1f) {
unallocated_encoding(s);
return;
if (op2 != 0x1f) {
goto do_unallocated;
}
switch (opc) {
case 0: /* BR */
case 1: /* BLR */
case 2: /* RET */
gen_a64_set_pc(s, cpu_reg(s, rn));
switch (op3) {
case 0:
/* BR, BLR, RET */
if (op4 != 0) {
goto do_unallocated;
}
dst = cpu_reg(s, rn);
break;
case 2:
case 3:
if (!dc_isar_feature(aa64_pauth, s)) {
goto do_unallocated;
}
if (opc == 2) {
/* RETAA, RETAB */
if (rn != 0x1f || op4 != 0x1f) {
goto do_unallocated;
}
rn = 30;
modifier = cpu_X[31];
} else {
/* BRAAZ, BRABZ, BLRAAZ, BLRABZ */
if (op4 != 0x1f) {
goto do_unallocated;
}
modifier = new_tmp_a64_zero(s);
}
if (s->pauth_active) {
dst = new_tmp_a64(s);
if (op3 == 2) {
gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier);
} else {
gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier);
}
} else {
dst = cpu_reg(s, rn);
}
break;
default:
goto do_unallocated;
}
gen_a64_set_pc(s, dst);
/* BLR also needs to load return address */
if (opc == 1) {
tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
}
break;
case 8: /* BRAA */
case 9: /* BLRAA */
if (!dc_isar_feature(aa64_pauth, s)) {
goto do_unallocated;
}
if (op3 != 2 || op3 != 3) {
goto do_unallocated;
}
if (s->pauth_active) {
dst = new_tmp_a64(s);
modifier = cpu_reg_sp(s, op4);
if (op3 == 2) {
gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier);
} else {
gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier);
}
} else {
dst = cpu_reg(s, rn);
}
gen_a64_set_pc(s, dst);
/* BLRAA also needs to load return address */
if (opc == 9) {
tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
}
break;
case 4: /* ERET */
if (s->current_el == 0) {
unallocated_encoding(s);
return;
goto do_unallocated;
}
switch (op3) {
case 0: /* ERET */
if (op4 != 0) {
goto do_unallocated;
}
dst = tcg_temp_new_i64();
tcg_gen_ld_i64(dst, cpu_env,
offsetof(CPUARMState, elr_el[s->current_el]));
break;
case 2: /* ERETAA */
case 3: /* ERETAB */
if (!dc_isar_feature(aa64_pauth, s)) {
goto do_unallocated;
}
if (rn != 0x1f || op4 != 0x1f) {
goto do_unallocated;
}
dst = tcg_temp_new_i64();
tcg_gen_ld_i64(dst, cpu_env,
offsetof(CPUARMState, elr_el[s->current_el]));
if (s->pauth_active) {
modifier = cpu_X[31];
if (op3 == 2) {
gen_helper_autia(dst, cpu_env, dst, modifier);
} else {
gen_helper_autib(dst, cpu_env, dst, modifier);
}
}
break;
default:
goto do_unallocated;
}
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_exception_return(cpu_env);
gen_helper_exception_return(cpu_env, dst);
tcg_temp_free_i64(dst);
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end();
}
/* Must exit loop to check un-masked IRQs */
s->base.is_jmp = DISAS_EXIT;
return;
case 5: /* DRPS */
if (rn != 0x1f) {
unallocated_encoding(s);
if (op3 != 0 || op4 != 0 || rn != 0x1f) {
goto do_unallocated;
} else {
unsupported_encoding(s, insn);
}
return;
default:
do_unallocated:
unallocated_encoding(s);
return;
}
@ -2967,6 +3128,64 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
s->be_data | size | MO_ALIGN);
}
/*
* PAC memory operations
*
* 31 30 27 26 24 22 21 12 11 10 5 0
* +------+-------+---+-----+-----+---+--------+---+---+----+-----+
* | size | 1 1 1 | V | 0 0 | M S | 1 | imm9 | W | 1 | Rn | Rt |
* +------+-------+---+-----+-----+---+--------+---+---+----+-----+
*
* Rt: the result register
* Rn: base address or SP
* V: vector flag (always 0 as of v8.3)
* M: clear for key DA, set for key DB
* W: pre-indexing flag
* S: sign for imm9.
*/
static void disas_ldst_pac(DisasContext *s, uint32_t insn,
int size, int rt, bool is_vector)
{
int rn = extract32(insn, 5, 5);
bool is_wback = extract32(insn, 11, 1);
bool use_key_a = !extract32(insn, 23, 1);
int offset;
TCGv_i64 tcg_addr, tcg_rt;
if (size != 3 || is_vector || !dc_isar_feature(aa64_pauth, s)) {
unallocated_encoding(s);
return;
}
if (rn == 31) {
gen_check_sp_alignment(s);
}
tcg_addr = read_cpu_reg_sp(s, rn, 1);
if (s->pauth_active) {
if (use_key_a) {
gen_helper_autda(tcg_addr, cpu_env, tcg_addr, cpu_X[31]);
} else {
gen_helper_autdb(tcg_addr, cpu_env, tcg_addr, cpu_X[31]);
}
}
/* Form the 10-bit signed, scaled offset. */
offset = (extract32(insn, 22, 1) << 9) | extract32(insn, 12, 9);
offset = sextract32(offset << size, 0, 10 + size);
tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
tcg_rt = cpu_reg(s, rt);
do_gpr_ld(s, tcg_rt, tcg_addr, size, /* is_signed */ false,
/* extend */ false, /* iss_valid */ !is_wback,
/* iss_srt */ rt, /* iss_sf */ true, /* iss_ar */ false);
if (is_wback) {
tcg_gen_mov_i64(cpu_reg_sp(s, rn), tcg_addr);
}
}
/* Load/store register (all forms) */
static void disas_ldst_reg(DisasContext *s, uint32_t insn)
{
@ -2992,6 +3211,9 @@ static void disas_ldst_reg(DisasContext *s, uint32_t insn)
case 2:
disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
return;
default:
disas_ldst_pac(s, insn, size, rt, is_vector);
return;
}
break;
case 1:
@ -4494,38 +4716,197 @@ static void handle_rev16(DisasContext *s, unsigned int sf,
*/
static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
{
unsigned int sf, opcode, rn, rd;
unsigned int sf, opcode, opcode2, rn, rd;
TCGv_i64 tcg_rd;
if (extract32(insn, 29, 1) || extract32(insn, 16, 5)) {
if (extract32(insn, 29, 1)) {
unallocated_encoding(s);
return;
}
sf = extract32(insn, 31, 1);
opcode = extract32(insn, 10, 6);
opcode2 = extract32(insn, 16, 5);
rn = extract32(insn, 5, 5);
rd = extract32(insn, 0, 5);
switch (opcode) {
case 0: /* RBIT */
#define MAP(SF, O2, O1) ((SF) | (O1 << 1) | (O2 << 7))
switch (MAP(sf, opcode2, opcode)) {
case MAP(0, 0x00, 0x00): /* RBIT */
case MAP(1, 0x00, 0x00):
handle_rbit(s, sf, rn, rd);
break;
case 1: /* REV16 */
case MAP(0, 0x00, 0x01): /* REV16 */
case MAP(1, 0x00, 0x01):
handle_rev16(s, sf, rn, rd);
break;
case 2: /* REV32 */
case MAP(0, 0x00, 0x02): /* REV/REV32 */
case MAP(1, 0x00, 0x02):
handle_rev32(s, sf, rn, rd);
break;
case 3: /* REV64 */
case MAP(1, 0x00, 0x03): /* REV64 */
handle_rev64(s, sf, rn, rd);
break;
case 4: /* CLZ */
case MAP(0, 0x00, 0x04): /* CLZ */
case MAP(1, 0x00, 0x04):
handle_clz(s, sf, rn, rd);
break;
case 5: /* CLS */
case MAP(0, 0x00, 0x05): /* CLS */
case MAP(1, 0x00, 0x05):
handle_cls(s, sf, rn, rd);
break;
case MAP(1, 0x01, 0x00): /* PACIA */
if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
} else if (!dc_isar_feature(aa64_pauth, s)) {
goto do_unallocated;
}
break;
case MAP(1, 0x01, 0x01): /* PACIB */
if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
} else if (!dc_isar_feature(aa64_pauth, s)) {
goto do_unallocated;
}
break;
case MAP(1, 0x01, 0x02): /* PACDA */
if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
} else if (!dc_isar_feature(aa64_pauth, s)) {
goto do_unallocated;
}
break;
case MAP(1, 0x01, 0x03): /* PACDB */
if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
} else if (!dc_isar_feature(aa64_pauth, s)) {
goto do_unallocated;
}
break;
case MAP(1, 0x01, 0x04): /* AUTIA */
if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
gen_helper_autia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
} else if (!dc_isar_feature(aa64_pauth, s)) {
goto do_unallocated;
}
break;
case MAP(1, 0x01, 0x05): /* AUTIB */
if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
gen_helper_autib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
} else if (!dc_isar_feature(aa64_pauth, s)) {
goto do_unallocated;
}
break;
case MAP(1, 0x01, 0x06): /* AUTDA */
if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
gen_helper_autda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
} else if (!dc_isar_feature(aa64_pauth, s)) {
goto do_unallocated;
}
break;
case MAP(1, 0x01, 0x07): /* AUTDB */
if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
} else if (!dc_isar_feature(aa64_pauth, s)) {
goto do_unallocated;
}
break;
case MAP(1, 0x01, 0x08): /* PACIZA */
if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
goto do_unallocated;
} else if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
}
break;
case MAP(1, 0x01, 0x09): /* PACIZB */
if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
goto do_unallocated;
} else if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
}
break;
case MAP(1, 0x01, 0x0a): /* PACDZA */
if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
goto do_unallocated;
} else if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
}
break;
case MAP(1, 0x01, 0x0b): /* PACDZB */
if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
goto do_unallocated;
} else if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
}
break;
case MAP(1, 0x01, 0x0c): /* AUTIZA */
if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
goto do_unallocated;
} else if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
gen_helper_autia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
}
break;
case MAP(1, 0x01, 0x0d): /* AUTIZB */
if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
goto do_unallocated;
} else if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
gen_helper_autib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
}
break;
case MAP(1, 0x01, 0x0e): /* AUTDZA */
if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
goto do_unallocated;
} else if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
gen_helper_autda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
}
break;
case MAP(1, 0x01, 0x0f): /* AUTDZB */
if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
goto do_unallocated;
} else if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
}
break;
case MAP(1, 0x01, 0x10): /* XPACI */
if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
goto do_unallocated;
} else if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
gen_helper_xpaci(tcg_rd, cpu_env, tcg_rd);
}
break;
case MAP(1, 0x01, 0x11): /* XPACD */
if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
goto do_unallocated;
} else if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
gen_helper_xpacd(tcg_rd, cpu_env, tcg_rd);
}
break;
default:
do_unallocated:
unallocated_encoding(s);
break;
}
#undef MAP
}
static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
@ -4656,6 +5037,13 @@ static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
case 11: /* RORV */
handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
break;
case 12: /* PACGA */
if (sf == 0 || !dc_isar_feature(aa64_pauth, s)) {
goto do_unallocated;
}
gen_helper_pacga(cpu_reg(s, rd), cpu_env,
cpu_reg(s, rn), cpu_reg_sp(s, rm));
break;
case 16:
case 17:
case 18:
@ -4671,6 +5059,7 @@ static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
break;
}
default:
do_unallocated:
unallocated_encoding(s);
break;
}
@ -13400,8 +13789,7 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
dc->condexec_cond = 0;
core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
dc->tbi0 = FIELD_EX32(tb_flags, TBFLAG_A64, TBI0);
dc->tbi1 = FIELD_EX32(tb_flags, TBFLAG_A64, TBI1);
dc->tbii = FIELD_EX32(tb_flags, TBFLAG_A64, TBII);
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
#if !defined(CONFIG_USER_ONLY)
dc->user = (dc->current_el == 0);
@ -13409,6 +13797,7 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
dc->sve_excp_el = FIELD_EX32(tb_flags, TBFLAG_A64, SVEEXC_EL);
dc->sve_len = (FIELD_EX32(tb_flags, TBFLAG_A64, ZCR_LEN) + 1) * 16;
dc->pauth_active = FIELD_EX32(tb_flags, TBFLAG_A64, PAUTH_ACTIVE);
dc->vec_len = 0;
dc->vec_stride = 0;
dc->cp_regs = arm_cpu->cp_regs;

View File

@ -26,8 +26,7 @@ typedef struct DisasContext {
int user;
#endif
ARMMMUIdx mmu_idx; /* MMU index to use for normal loads/stores */
bool tbi0; /* TBI0 for EL0/1 or TBI for EL2/3 */
bool tbi1; /* TBI1 for EL0/1, not used for EL2/3 */
uint8_t tbii; /* TBI1|TBI0 for EL0/1 or TBI for EL2/3 */
bool ns; /* Use non-secure CPREG bank on access */
int fp_excp_el; /* FP exception EL or 0 if enabled */
int sve_excp_el; /* SVE exception EL or 0 if enabled */
@ -68,6 +67,8 @@ typedef struct DisasContext {
bool is_ldex;
/* True if a single-step exception will be taken to the current EL */
bool ss_same_el;
/* True if v8.3-PAuth is active. */
bool pauth_active;
/* Bottom two bits of XScale c15_cpar coprocessor access control reg */
int c15_cpar;
/* TCG op of the current insn_start. */