target-arm queue:
* Support M profile derived exceptions on exception entry and exit * Implement AArch64 v8.2 crypto insns (SHA-512, SHA-3, SM3, SM4) * Implement working i.MX6 SD controller * Various devices preparatory to i.MX7 support * Preparatory patches for SVE emulation * v8M: Fix bug in implementation of 'TT' insn * Give useful error if user tries to use userspace GICv3 with KVM -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABCAAGBQJafX+DAAoJEDwlJe0UNgzegGgP/2OepsVnpg++NkBpJdCq0wjC DFaGLpDqaXabv0E5b56flkqBsK6mO2vXoIiWOLF3f25G5pBf5zu7XOEYXWuOIBEP Yr/vl4/yWRZfiNKl6aPC5HwHKN8SWMV+Mp/GtKz0PtHLY+oGl3fBRBu8fbeoOyj9 3x+HB0c5fpAtWrRoS+XrUJfQPdkkZ4c5lDvpxec57cqOzJ8qYZEcFCi7WP3Rglox sQJEls5IXkOVGx35o7k0rIw0qzcV8iCGBsuA+4QAonxE5B0ip6aolPAaq+gf/mLP StX2UJG9JO5tOhaNBexHE/QwLNu2pver8mghIyluirLvisB6kx+dEoa0Zj/7J7or LD9O6pqpSPCCKLSw0xPOl2FqjXzbIYcjgddxrOGvUQgAhdagB2EeRHiCdZ7hQXvm Pg4gAsIAZURPrbj7LaXgDCzM0IP6bZjNR2U+C0ekJDBRJb/NTLcvlmx3MQo3s4ot s/t6FP728tnmMk7ib8w95oo2oSMiPUZBfj2IMYogXgY+pup0OunKwOQL93Lmj4By LJwawcsa/9ctyzF/XvBUHO/n4l0zZi3wLmtVojbxkxtHiFmvv5gQMlyHYM08aTJ1 Vu/V0tiX6+oXoNrJg5e8TAchsS5PmO54Sj/ywMm5Q8FWfBE19KTRl5BatxbMYahU gQQt64esxQQFxxjdSrZY =bokK -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20180209' into staging target-arm queue: * Support M profile derived exceptions on exception entry and exit * Implement AArch64 v8.2 crypto insns (SHA-512, SHA-3, SM3, SM4) * Implement working i.MX6 SD controller * Various devices preparatory to i.MX7 support * Preparatory patches for SVE emulation * v8M: Fix bug in implementation of 'TT' insn * Give useful error if user tries to use userspace GICv3 with KVM # gpg: Signature made Fri 09 Feb 2018 11:01:23 GMT # gpg: using RSA key 3C2525ED14360CDE # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" # gpg: aka "Peter Maydell <pmaydell@gmail.com>" # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * remotes/pmaydell/tags/pull-target-arm-20180209: (30 commits) hw/core/generic-loader: Allow PC to be set on command line target/arm/translate.c: Fix missing 'break' for TT insns target/arm/kvm: gic: Prevent creating userspace GICv3 with KVM target/arm: Add SVE state to TB->FLAGS target/arm: Add ZCR_ELx target/arm: Add SVE to migration state target/arm: Add predicate registers for SVE target/arm: Expand vector registers for SVE hw/arm: Move virt's PSCI DT fixup code to arm/boot.c usb: Add basic code to emulate Chipidea USB IP i.MX: Add implementation of i.MX7 GPR IP block i.MX: Add i.MX7 GPT variant i.MX: Add code to emulate GPCv2 IP block i.MX: Add code to emulate i.MX7 SNVS IP-block i.MX: Add code to emulate i.MX2 watchdog IP block i.MX: Add code to emulate i.MX7 CCM, PMU and ANALOG IP blocks hw: i.MX: Convert i.MX6 to use TYPE_IMX_USDHC sdhci: Add i.MX specific subtype of SDHCI target/arm: enable user-mode SHA-3, SM3, SM4 and SHA-512 instruction support target/arm: implement SM4 instructions ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
f31cd9e4e2
@ -385,6 +385,69 @@ static void set_kernel_args_old(const struct arm_boot_info *info)
|
||||
}
|
||||
}
|
||||
|
||||
static void fdt_add_psci_node(void *fdt)
|
||||
{
|
||||
uint32_t cpu_suspend_fn;
|
||||
uint32_t cpu_off_fn;
|
||||
uint32_t cpu_on_fn;
|
||||
uint32_t migrate_fn;
|
||||
ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(0));
|
||||
const char *psci_method;
|
||||
int64_t psci_conduit;
|
||||
|
||||
psci_conduit = object_property_get_int(OBJECT(armcpu),
|
||||
"psci-conduit",
|
||||
&error_abort);
|
||||
switch (psci_conduit) {
|
||||
case QEMU_PSCI_CONDUIT_DISABLED:
|
||||
return;
|
||||
case QEMU_PSCI_CONDUIT_HVC:
|
||||
psci_method = "hvc";
|
||||
break;
|
||||
case QEMU_PSCI_CONDUIT_SMC:
|
||||
psci_method = "smc";
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
qemu_fdt_add_subnode(fdt, "/psci");
|
||||
if (armcpu->psci_version == 2) {
|
||||
const char comp[] = "arm,psci-0.2\0arm,psci";
|
||||
qemu_fdt_setprop(fdt, "/psci", "compatible", comp, sizeof(comp));
|
||||
|
||||
cpu_off_fn = QEMU_PSCI_0_2_FN_CPU_OFF;
|
||||
if (arm_feature(&armcpu->env, ARM_FEATURE_AARCH64)) {
|
||||
cpu_suspend_fn = QEMU_PSCI_0_2_FN64_CPU_SUSPEND;
|
||||
cpu_on_fn = QEMU_PSCI_0_2_FN64_CPU_ON;
|
||||
migrate_fn = QEMU_PSCI_0_2_FN64_MIGRATE;
|
||||
} else {
|
||||
cpu_suspend_fn = QEMU_PSCI_0_2_FN_CPU_SUSPEND;
|
||||
cpu_on_fn = QEMU_PSCI_0_2_FN_CPU_ON;
|
||||
migrate_fn = QEMU_PSCI_0_2_FN_MIGRATE;
|
||||
}
|
||||
} else {
|
||||
qemu_fdt_setprop_string(fdt, "/psci", "compatible", "arm,psci");
|
||||
|
||||
cpu_suspend_fn = QEMU_PSCI_0_1_FN_CPU_SUSPEND;
|
||||
cpu_off_fn = QEMU_PSCI_0_1_FN_CPU_OFF;
|
||||
cpu_on_fn = QEMU_PSCI_0_1_FN_CPU_ON;
|
||||
migrate_fn = QEMU_PSCI_0_1_FN_MIGRATE;
|
||||
}
|
||||
|
||||
/* We adopt the PSCI spec's nomenclature, and use 'conduit' to refer
|
||||
* to the instruction that should be used to invoke PSCI functions.
|
||||
* However, the device tree binding uses 'method' instead, so that is
|
||||
* what we should use here.
|
||||
*/
|
||||
qemu_fdt_setprop_string(fdt, "/psci", "method", psci_method);
|
||||
|
||||
qemu_fdt_setprop_cell(fdt, "/psci", "cpu_suspend", cpu_suspend_fn);
|
||||
qemu_fdt_setprop_cell(fdt, "/psci", "cpu_off", cpu_off_fn);
|
||||
qemu_fdt_setprop_cell(fdt, "/psci", "cpu_on", cpu_on_fn);
|
||||
qemu_fdt_setprop_cell(fdt, "/psci", "migrate", migrate_fn);
|
||||
}
|
||||
|
||||
/**
|
||||
* load_dtb() - load a device tree binary image into memory
|
||||
* @addr: the address to load the image at
|
||||
@ -541,6 +604,8 @@ static int load_dtb(hwaddr addr, const struct arm_boot_info *binfo,
|
||||
}
|
||||
}
|
||||
|
||||
fdt_add_psci_node(fdt);
|
||||
|
||||
if (binfo->modify_dtb) {
|
||||
binfo->modify_dtb(binfo, fdt);
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ static void fsl_imx6_init(Object *obj)
|
||||
}
|
||||
|
||||
for (i = 0; i < FSL_IMX6_NUM_ESDHCS; i++) {
|
||||
object_initialize(&s->esdhc[i], sizeof(s->esdhc[i]), TYPE_SYSBUS_SDHCI);
|
||||
object_initialize(&s->esdhc[i], sizeof(s->esdhc[i]), TYPE_IMX_USDHC);
|
||||
qdev_set_parent_bus(DEVICE(&s->esdhc[i]), sysbus_get_default());
|
||||
snprintf(name, NAME_SIZE, "sdhc%d", i + 1);
|
||||
object_property_add_child(obj, name, OBJECT(&s->esdhc[i]), NULL);
|
||||
|
@ -244,66 +244,6 @@ static void create_fdt(VirtMachineState *vms)
|
||||
}
|
||||
}
|
||||
|
||||
static void fdt_add_psci_node(const VirtMachineState *vms)
|
||||
{
|
||||
uint32_t cpu_suspend_fn;
|
||||
uint32_t cpu_off_fn;
|
||||
uint32_t cpu_on_fn;
|
||||
uint32_t migrate_fn;
|
||||
void *fdt = vms->fdt;
|
||||
ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(0));
|
||||
const char *psci_method;
|
||||
|
||||
switch (vms->psci_conduit) {
|
||||
case QEMU_PSCI_CONDUIT_DISABLED:
|
||||
return;
|
||||
case QEMU_PSCI_CONDUIT_HVC:
|
||||
psci_method = "hvc";
|
||||
break;
|
||||
case QEMU_PSCI_CONDUIT_SMC:
|
||||
psci_method = "smc";
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
qemu_fdt_add_subnode(fdt, "/psci");
|
||||
if (armcpu->psci_version == 2) {
|
||||
const char comp[] = "arm,psci-0.2\0arm,psci";
|
||||
qemu_fdt_setprop(fdt, "/psci", "compatible", comp, sizeof(comp));
|
||||
|
||||
cpu_off_fn = QEMU_PSCI_0_2_FN_CPU_OFF;
|
||||
if (arm_feature(&armcpu->env, ARM_FEATURE_AARCH64)) {
|
||||
cpu_suspend_fn = QEMU_PSCI_0_2_FN64_CPU_SUSPEND;
|
||||
cpu_on_fn = QEMU_PSCI_0_2_FN64_CPU_ON;
|
||||
migrate_fn = QEMU_PSCI_0_2_FN64_MIGRATE;
|
||||
} else {
|
||||
cpu_suspend_fn = QEMU_PSCI_0_2_FN_CPU_SUSPEND;
|
||||
cpu_on_fn = QEMU_PSCI_0_2_FN_CPU_ON;
|
||||
migrate_fn = QEMU_PSCI_0_2_FN_MIGRATE;
|
||||
}
|
||||
} else {
|
||||
qemu_fdt_setprop_string(fdt, "/psci", "compatible", "arm,psci");
|
||||
|
||||
cpu_suspend_fn = QEMU_PSCI_0_1_FN_CPU_SUSPEND;
|
||||
cpu_off_fn = QEMU_PSCI_0_1_FN_CPU_OFF;
|
||||
cpu_on_fn = QEMU_PSCI_0_1_FN_CPU_ON;
|
||||
migrate_fn = QEMU_PSCI_0_1_FN_MIGRATE;
|
||||
}
|
||||
|
||||
/* We adopt the PSCI spec's nomenclature, and use 'conduit' to refer
|
||||
* to the instruction that should be used to invoke PSCI functions.
|
||||
* However, the device tree binding uses 'method' instead, so that is
|
||||
* what we should use here.
|
||||
*/
|
||||
qemu_fdt_setprop_string(fdt, "/psci", "method", psci_method);
|
||||
|
||||
qemu_fdt_setprop_cell(fdt, "/psci", "cpu_suspend", cpu_suspend_fn);
|
||||
qemu_fdt_setprop_cell(fdt, "/psci", "cpu_off", cpu_off_fn);
|
||||
qemu_fdt_setprop_cell(fdt, "/psci", "cpu_on", cpu_on_fn);
|
||||
qemu_fdt_setprop_cell(fdt, "/psci", "migrate", migrate_fn);
|
||||
}
|
||||
|
||||
static void fdt_add_timer_nodes(const VirtMachineState *vms)
|
||||
{
|
||||
/* On real hardware these interrupts are level-triggered.
|
||||
@ -1409,7 +1349,6 @@ static void machvirt_init(MachineState *machine)
|
||||
}
|
||||
fdt_add_timer_nodes(vms);
|
||||
fdt_add_cpu_nodes(vms);
|
||||
fdt_add_psci_node(vms);
|
||||
|
||||
memory_region_allocate_system_memory(ram, NULL, "mach-virt.ram",
|
||||
machine->ram_size);
|
||||
|
@ -105,7 +105,7 @@ static void generic_loader_realize(DeviceState *dev, Error **errp)
|
||||
error_setg(errp, "data can not be specified when setting a "
|
||||
"program counter");
|
||||
return;
|
||||
} else if (!s->cpu_num) {
|
||||
} else if (s->cpu_num == CPU_NONE) {
|
||||
error_setg(errp, "cpu_num must be specified when setting a "
|
||||
"program counter");
|
||||
return;
|
||||
|
@ -6,7 +6,7 @@ common-obj-$(CONFIG_XILINX) += xilinx_intc.o
|
||||
common-obj-$(CONFIG_XLNX_ZYNQMP) += xlnx-pmu-iomod-intc.o
|
||||
common-obj-$(CONFIG_XLNX_ZYNQMP) += xlnx-zynqmp-ipi.o
|
||||
common-obj-$(CONFIG_ETRAXFS) += etraxfs_pic.o
|
||||
common-obj-$(CONFIG_IMX) += imx_avic.o
|
||||
common-obj-$(CONFIG_IMX) += imx_avic.o imx_gpcv2.o
|
||||
common-obj-$(CONFIG_LM32) += lm32_pic.o
|
||||
common-obj-$(CONFIG_REALVIEW) += realview_gic.o
|
||||
common-obj-$(CONFIG_SLAVIO) += slavio_intctl.o
|
||||
|
@ -503,8 +503,25 @@ static void armv7m_nvic_clear_pending(void *opaque, int irq, bool secure)
|
||||
}
|
||||
}
|
||||
|
||||
void armv7m_nvic_set_pending(void *opaque, int irq, bool secure)
|
||||
static void do_armv7m_nvic_set_pending(void *opaque, int irq, bool secure,
|
||||
bool derived)
|
||||
{
|
||||
/* Pend an exception, including possibly escalating it to HardFault.
|
||||
*
|
||||
* This function handles both "normal" pending of interrupts and
|
||||
* exceptions, and also derived exceptions (ones which occur as
|
||||
* a result of trying to take some other exception).
|
||||
*
|
||||
* If derived == true, the caller guarantees that we are part way through
|
||||
* trying to take an exception (but have not yet called
|
||||
* armv7m_nvic_acknowledge_irq() to make it active), and so:
|
||||
* - s->vectpending is the "original exception" we were trying to take
|
||||
* - irq is the "derived exception"
|
||||
* - nvic_exec_prio(s) gives the priority before exception entry
|
||||
* Here we handle the prioritization logic which the pseudocode puts
|
||||
* in the DerivedLateArrival() function.
|
||||
*/
|
||||
|
||||
NVICState *s = (NVICState *)opaque;
|
||||
bool banked = exc_is_banked(irq);
|
||||
VecInfo *vec;
|
||||
@ -514,7 +531,44 @@ void armv7m_nvic_set_pending(void *opaque, int irq, bool secure)
|
||||
|
||||
vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
|
||||
|
||||
trace_nvic_set_pending(irq, secure, vec->enabled, vec->prio);
|
||||
trace_nvic_set_pending(irq, secure, derived, vec->enabled, vec->prio);
|
||||
|
||||
if (derived) {
|
||||
/* Derived exceptions are always synchronous. */
|
||||
assert(irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV);
|
||||
|
||||
if (irq == ARMV7M_EXCP_DEBUG &&
|
||||
exc_group_prio(s, vec->prio, secure) >= nvic_exec_prio(s)) {
|
||||
/* DebugMonitorFault, but its priority is lower than the
|
||||
* preempted exception priority: just ignore it.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
if (irq == ARMV7M_EXCP_HARD && vec->prio >= s->vectpending_prio) {
|
||||
/* If this is a terminal exception (one which means we cannot
|
||||
* take the original exception, like a failure to read its
|
||||
* vector table entry), then we must take the derived exception.
|
||||
* If the derived exception can't take priority over the
|
||||
* original exception, then we go into Lockup.
|
||||
*
|
||||
* For QEMU, we rely on the fact that a derived exception is
|
||||
* terminal if and only if it's reported to us as HardFault,
|
||||
* which saves having to have an extra argument is_terminal
|
||||
* that we'd only use in one place.
|
||||
*/
|
||||
cpu_abort(&s->cpu->parent_obj,
|
||||
"Lockup: can't take terminal derived exception "
|
||||
"(original exception priority %d)\n",
|
||||
s->vectpending_prio);
|
||||
}
|
||||
/* We now continue with the same code as for a normal pending
|
||||
* exception, which will cause us to pend the derived exception.
|
||||
* We'll then take either the original or the derived exception
|
||||
* based on which is higher priority by the usual mechanism
|
||||
* for selecting the highest priority pending interrupt.
|
||||
*/
|
||||
}
|
||||
|
||||
if (irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV) {
|
||||
/* If a synchronous exception is pending then it may be
|
||||
@ -585,25 +639,31 @@ void armv7m_nvic_set_pending(void *opaque, int irq, bool secure)
|
||||
}
|
||||
}
|
||||
|
||||
void armv7m_nvic_set_pending(void *opaque, int irq, bool secure)
|
||||
{
|
||||
do_armv7m_nvic_set_pending(opaque, irq, secure, false);
|
||||
}
|
||||
|
||||
void armv7m_nvic_set_pending_derived(void *opaque, int irq, bool secure)
|
||||
{
|
||||
do_armv7m_nvic_set_pending(opaque, irq, secure, true);
|
||||
}
|
||||
|
||||
/* Make pending IRQ active. */
|
||||
bool armv7m_nvic_acknowledge_irq(void *opaque)
|
||||
void armv7m_nvic_acknowledge_irq(void *opaque)
|
||||
{
|
||||
NVICState *s = (NVICState *)opaque;
|
||||
CPUARMState *env = &s->cpu->env;
|
||||
const int pending = s->vectpending;
|
||||
const int running = nvic_exec_prio(s);
|
||||
VecInfo *vec;
|
||||
bool targets_secure;
|
||||
|
||||
assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
|
||||
|
||||
if (s->vectpending_is_s_banked) {
|
||||
vec = &s->sec_vectors[pending];
|
||||
targets_secure = true;
|
||||
} else {
|
||||
vec = &s->vectors[pending];
|
||||
targets_secure = !exc_is_banked(s->vectpending) &&
|
||||
exc_targets_secure(s, s->vectpending);
|
||||
}
|
||||
|
||||
assert(vec->enabled);
|
||||
@ -611,7 +671,7 @@ bool armv7m_nvic_acknowledge_irq(void *opaque)
|
||||
|
||||
assert(s->vectpending_prio < running);
|
||||
|
||||
trace_nvic_acknowledge_irq(pending, s->vectpending_prio, targets_secure);
|
||||
trace_nvic_acknowledge_irq(pending, s->vectpending_prio);
|
||||
|
||||
vec->active = 1;
|
||||
vec->pending = 0;
|
||||
@ -619,8 +679,28 @@ bool armv7m_nvic_acknowledge_irq(void *opaque)
|
||||
write_v7m_exception(env, s->vectpending);
|
||||
|
||||
nvic_irq_update(s);
|
||||
}
|
||||
|
||||
return targets_secure;
|
||||
void armv7m_nvic_get_pending_irq_info(void *opaque,
|
||||
int *pirq, bool *ptargets_secure)
|
||||
{
|
||||
NVICState *s = (NVICState *)opaque;
|
||||
const int pending = s->vectpending;
|
||||
bool targets_secure;
|
||||
|
||||
assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
|
||||
|
||||
if (s->vectpending_is_s_banked) {
|
||||
targets_secure = true;
|
||||
} else {
|
||||
targets_secure = !exc_is_banked(pending) &&
|
||||
exc_targets_secure(s, pending);
|
||||
}
|
||||
|
||||
trace_nvic_get_pending_irq_info(pending, targets_secure);
|
||||
|
||||
*ptargets_secure = targets_secure;
|
||||
*pirq = pending;
|
||||
}
|
||||
|
||||
int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure)
|
||||
|
125
hw/intc/imx_gpcv2.c
Normal file
125
hw/intc/imx_gpcv2.c
Normal file
@ -0,0 +1,125 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Impinj, Inc.
|
||||
*
|
||||
* i.MX7 GPCv2 block emulation code
|
||||
*
|
||||
* Author: Andrey Smirnov <andrew.smirnov@gmail.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "hw/intc/imx_gpcv2.h"
|
||||
#include "qemu/log.h"
|
||||
|
||||
#define GPC_PU_PGC_SW_PUP_REQ 0x0f8
|
||||
#define GPC_PU_PGC_SW_PDN_REQ 0x104
|
||||
|
||||
#define USB_HSIC_PHY_SW_Pxx_REQ BIT(4)
|
||||
#define USB_OTG2_PHY_SW_Pxx_REQ BIT(3)
|
||||
#define USB_OTG1_PHY_SW_Pxx_REQ BIT(2)
|
||||
#define PCIE_PHY_SW_Pxx_REQ BIT(1)
|
||||
#define MIPI_PHY_SW_Pxx_REQ BIT(0)
|
||||
|
||||
|
||||
static void imx_gpcv2_reset(DeviceState *dev)
|
||||
{
|
||||
IMXGPCv2State *s = IMX_GPCV2(dev);
|
||||
|
||||
memset(s->regs, 0, sizeof(s->regs));
|
||||
}
|
||||
|
||||
static uint64_t imx_gpcv2_read(void *opaque, hwaddr offset,
|
||||
unsigned size)
|
||||
{
|
||||
IMXGPCv2State *s = opaque;
|
||||
|
||||
return s->regs[offset / sizeof(uint32_t)];
|
||||
}
|
||||
|
||||
static void imx_gpcv2_write(void *opaque, hwaddr offset,
|
||||
uint64_t value, unsigned size)
|
||||
{
|
||||
IMXGPCv2State *s = opaque;
|
||||
const size_t idx = offset / sizeof(uint32_t);
|
||||
|
||||
s->regs[idx] = value;
|
||||
|
||||
/*
|
||||
* Real HW will clear those bits once as a way to indicate that
|
||||
* power up request is complete
|
||||
*/
|
||||
if (offset == GPC_PU_PGC_SW_PUP_REQ ||
|
||||
offset == GPC_PU_PGC_SW_PDN_REQ) {
|
||||
s->regs[idx] &= ~(USB_HSIC_PHY_SW_Pxx_REQ |
|
||||
USB_OTG2_PHY_SW_Pxx_REQ |
|
||||
USB_OTG1_PHY_SW_Pxx_REQ |
|
||||
PCIE_PHY_SW_Pxx_REQ |
|
||||
MIPI_PHY_SW_Pxx_REQ);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct MemoryRegionOps imx_gpcv2_ops = {
|
||||
.read = imx_gpcv2_read,
|
||||
.write = imx_gpcv2_write,
|
||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||
.impl = {
|
||||
/*
|
||||
* Our device would not work correctly if the guest was doing
|
||||
* unaligned access. This might not be a limitation on the real
|
||||
* device but in practice there is no reason for a guest to access
|
||||
* this device unaligned.
|
||||
*/
|
||||
.min_access_size = 4,
|
||||
.max_access_size = 4,
|
||||
.unaligned = false,
|
||||
},
|
||||
};
|
||||
|
||||
static void imx_gpcv2_init(Object *obj)
|
||||
{
|
||||
SysBusDevice *sd = SYS_BUS_DEVICE(obj);
|
||||
IMXGPCv2State *s = IMX_GPCV2(obj);
|
||||
|
||||
memory_region_init_io(&s->iomem,
|
||||
obj,
|
||||
&imx_gpcv2_ops,
|
||||
s,
|
||||
TYPE_IMX_GPCV2 ".iomem",
|
||||
sizeof(s->regs));
|
||||
sysbus_init_mmio(sd, &s->iomem);
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_imx_gpcv2 = {
|
||||
.name = TYPE_IMX_GPCV2,
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT32_ARRAY(regs, IMXGPCv2State, GPC_NUM),
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
};
|
||||
|
||||
static void imx_gpcv2_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
|
||||
dc->reset = imx_gpcv2_reset;
|
||||
dc->vmsd = &vmstate_imx_gpcv2;
|
||||
dc->desc = "i.MX GPCv2 Module";
|
||||
}
|
||||
|
||||
static const TypeInfo imx_gpcv2_info = {
|
||||
.name = TYPE_IMX_GPCV2,
|
||||
.parent = TYPE_SYS_BUS_DEVICE,
|
||||
.instance_size = sizeof(IMXGPCv2State),
|
||||
.instance_init = imx_gpcv2_init,
|
||||
.class_init = imx_gpcv2_class_init,
|
||||
};
|
||||
|
||||
static void imx_gpcv2_register_type(void)
|
||||
{
|
||||
type_register_static(&imx_gpcv2_info);
|
||||
}
|
||||
type_init(imx_gpcv2_register_type)
|
@ -177,10 +177,11 @@ nvic_set_prio(int irq, bool secure, uint8_t prio) "NVIC set irq %d secure-bank %
|
||||
nvic_irq_update(int vectpending, int pendprio, int exception_prio, int level) "NVIC vectpending %d pending prio %d exception_prio %d: setting irq line to %d"
|
||||
nvic_escalate_prio(int irq, int irqprio, int runprio) "NVIC escalating irq %d to HardFault: insufficient priority %d >= %d"
|
||||
nvic_escalate_disabled(int irq) "NVIC escalating irq %d to HardFault: disabled"
|
||||
nvic_set_pending(int irq, bool secure, int en, int prio) "NVIC set pending irq %d secure-bank %d (enabled: %d priority %d)"
|
||||
nvic_set_pending(int irq, bool secure, bool derived, int en, int prio) "NVIC set pending irq %d secure-bank %d derived %d (enabled: %d priority %d)"
|
||||
nvic_clear_pending(int irq, bool secure, int en, int prio) "NVIC clear pending irq %d secure-bank %d (enabled: %d priority %d)"
|
||||
nvic_set_pending_level(int irq) "NVIC set pending: irq %d higher prio than vectpending: setting irq line to 1"
|
||||
nvic_acknowledge_irq(int irq, int prio, bool targets_secure) "NVIC acknowledge IRQ: %d now active (prio %d targets_secure %d)"
|
||||
nvic_acknowledge_irq(int irq, int prio) "NVIC acknowledge IRQ: %d now active (prio %d)"
|
||||
nvic_get_pending_irq_info(int irq, bool secure) "NVIC next IRQ %d: targets_secure: %d"
|
||||
nvic_complete_irq(int irq, bool secure) "NVIC complete IRQ %d (secure %d)"
|
||||
nvic_set_irq_level(int irq, int level) "NVIC external irq %d level set to %d"
|
||||
nvic_sysreg_read(uint64_t addr, uint32_t value, unsigned size) "NVIC sysreg read addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u"
|
||||
|
@ -33,6 +33,10 @@ obj-$(CONFIG_IMX) += imx31_ccm.o
|
||||
obj-$(CONFIG_IMX) += imx25_ccm.o
|
||||
obj-$(CONFIG_IMX) += imx6_ccm.o
|
||||
obj-$(CONFIG_IMX) += imx6_src.o
|
||||
obj-$(CONFIG_IMX) += imx7_ccm.o
|
||||
obj-$(CONFIG_IMX) += imx2_wdt.o
|
||||
obj-$(CONFIG_IMX) += imx7_snvs.o
|
||||
obj-$(CONFIG_IMX) += imx7_gpr.o
|
||||
obj-$(CONFIG_MILKYMIST) += milkymist-hpdmc.o
|
||||
obj-$(CONFIG_MILKYMIST) += milkymist-pfpu.o
|
||||
obj-$(CONFIG_MAINSTONE) += mst_fpga.o
|
||||
|
89
hw/misc/imx2_wdt.c
Normal file
89
hw/misc/imx2_wdt.c
Normal file
@ -0,0 +1,89 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Impinj, Inc.
|
||||
*
|
||||
* i.MX2 Watchdog IP block
|
||||
*
|
||||
* Author: Andrey Smirnov <andrew.smirnov@gmail.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/bitops.h"
|
||||
#include "sysemu/watchdog.h"
|
||||
|
||||
#include "hw/misc/imx2_wdt.h"
|
||||
|
||||
#define IMX2_WDT_WCR_WDA BIT(5) /* -> External Reset WDOG_B */
|
||||
#define IMX2_WDT_WCR_SRS BIT(4) /* -> Software Reset Signal */
|
||||
|
||||
static uint64_t imx2_wdt_read(void *opaque, hwaddr addr,
|
||||
unsigned int size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void imx2_wdt_write(void *opaque, hwaddr addr,
|
||||
uint64_t value, unsigned int size)
|
||||
{
|
||||
if (addr == IMX2_WDT_WCR &&
|
||||
(value & (IMX2_WDT_WCR_WDA | IMX2_WDT_WCR_SRS))) {
|
||||
watchdog_perform_action();
|
||||
}
|
||||
}
|
||||
|
||||
static const MemoryRegionOps imx2_wdt_ops = {
|
||||
.read = imx2_wdt_read,
|
||||
.write = imx2_wdt_write,
|
||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||
.impl = {
|
||||
/*
|
||||
* Our device would not work correctly if the guest was doing
|
||||
* unaligned access. This might not be a limitation on the
|
||||
* real device but in practice there is no reason for a guest
|
||||
* to access this device unaligned.
|
||||
*/
|
||||
.min_access_size = 4,
|
||||
.max_access_size = 4,
|
||||
.unaligned = false,
|
||||
},
|
||||
};
|
||||
|
||||
static void imx2_wdt_realize(DeviceState *dev, Error **errp)
|
||||
{
|
||||
IMX2WdtState *s = IMX2_WDT(dev);
|
||||
|
||||
memory_region_init_io(&s->mmio, OBJECT(dev),
|
||||
&imx2_wdt_ops, s,
|
||||
TYPE_IMX2_WDT".mmio",
|
||||
IMX2_WDT_REG_NUM * sizeof(uint16_t));
|
||||
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mmio);
|
||||
}
|
||||
|
||||
static void imx2_wdt_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
|
||||
dc->realize = imx2_wdt_realize;
|
||||
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
|
||||
}
|
||||
|
||||
static const TypeInfo imx2_wdt_info = {
|
||||
.name = TYPE_IMX2_WDT,
|
||||
.parent = TYPE_SYS_BUS_DEVICE,
|
||||
.instance_size = sizeof(IMX2WdtState),
|
||||
.class_init = imx2_wdt_class_init,
|
||||
};
|
||||
|
||||
static WatchdogTimerModel model = {
|
||||
.wdt_name = "imx2-watchdog",
|
||||
.wdt_description = "i.MX2 Watchdog",
|
||||
};
|
||||
|
||||
static void imx2_wdt_register_type(void)
|
||||
{
|
||||
watchdog_add_model(&model);
|
||||
type_register_static(&imx2_wdt_info);
|
||||
}
|
||||
type_init(imx2_wdt_register_type)
|
277
hw/misc/imx7_ccm.c
Normal file
277
hw/misc/imx7_ccm.c
Normal file
@ -0,0 +1,277 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Impinj, Inc.
|
||||
*
|
||||
* i.MX7 CCM, PMU and ANALOG IP blocks emulation code
|
||||
*
|
||||
* Author: Andrey Smirnov <andrew.smirnov@gmail.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/log.h"
|
||||
|
||||
#include "hw/misc/imx7_ccm.h"
|
||||
|
||||
static void imx7_analog_reset(DeviceState *dev)
|
||||
{
|
||||
IMX7AnalogState *s = IMX7_ANALOG(dev);
|
||||
|
||||
memset(s->pmu, 0, sizeof(s->pmu));
|
||||
memset(s->analog, 0, sizeof(s->analog));
|
||||
|
||||
s->analog[ANALOG_PLL_ARM] = 0x00002042;
|
||||
s->analog[ANALOG_PLL_DDR] = 0x0060302c;
|
||||
s->analog[ANALOG_PLL_DDR_SS] = 0x00000000;
|
||||
s->analog[ANALOG_PLL_DDR_NUM] = 0x06aaac4d;
|
||||
s->analog[ANALOG_PLL_DDR_DENOM] = 0x100003ec;
|
||||
s->analog[ANALOG_PLL_480] = 0x00002000;
|
||||
s->analog[ANALOG_PLL_480A] = 0x52605a56;
|
||||
s->analog[ANALOG_PLL_480B] = 0x52525216;
|
||||
s->analog[ANALOG_PLL_ENET] = 0x00001fc0;
|
||||
s->analog[ANALOG_PLL_AUDIO] = 0x0001301b;
|
||||
s->analog[ANALOG_PLL_AUDIO_SS] = 0x00000000;
|
||||
s->analog[ANALOG_PLL_AUDIO_NUM] = 0x05f5e100;
|
||||
s->analog[ANALOG_PLL_AUDIO_DENOM] = 0x2964619c;
|
||||
s->analog[ANALOG_PLL_VIDEO] = 0x0008201b;
|
||||
s->analog[ANALOG_PLL_VIDEO_SS] = 0x00000000;
|
||||
s->analog[ANALOG_PLL_VIDEO_NUM] = 0x0000f699;
|
||||
s->analog[ANALOG_PLL_VIDEO_DENOM] = 0x000f4240;
|
||||
s->analog[ANALOG_PLL_MISC0] = 0x00000000;
|
||||
|
||||
/* all PLLs need to be locked */
|
||||
s->analog[ANALOG_PLL_ARM] |= ANALOG_PLL_LOCK;
|
||||
s->analog[ANALOG_PLL_DDR] |= ANALOG_PLL_LOCK;
|
||||
s->analog[ANALOG_PLL_480] |= ANALOG_PLL_LOCK;
|
||||
s->analog[ANALOG_PLL_480A] |= ANALOG_PLL_LOCK;
|
||||
s->analog[ANALOG_PLL_480B] |= ANALOG_PLL_LOCK;
|
||||
s->analog[ANALOG_PLL_ENET] |= ANALOG_PLL_LOCK;
|
||||
s->analog[ANALOG_PLL_AUDIO] |= ANALOG_PLL_LOCK;
|
||||
s->analog[ANALOG_PLL_VIDEO] |= ANALOG_PLL_LOCK;
|
||||
s->analog[ANALOG_PLL_MISC0] |= ANALOG_PLL_LOCK;
|
||||
|
||||
/*
|
||||
* Since I couldn't find any info about this in the reference
|
||||
* manual the value of this register is based strictly on matching
|
||||
* what Linux kernel expects it to be.
|
||||
*/
|
||||
s->analog[ANALOG_DIGPROG] = 0x720000;
|
||||
/*
|
||||
* Set revision to be 1.0 (Arbitrary choice, no particular
|
||||
* reason).
|
||||
*/
|
||||
s->analog[ANALOG_DIGPROG] |= 0x000010;
|
||||
}
|
||||
|
||||
static void imx7_ccm_reset(DeviceState *dev)
|
||||
{
|
||||
IMX7CCMState *s = IMX7_CCM(dev);
|
||||
|
||||
memset(s->ccm, 0, sizeof(s->ccm));
|
||||
}
|
||||
|
||||
#define CCM_INDEX(offset) (((offset) & ~(hwaddr)0xF) / sizeof(uint32_t))
|
||||
#define CCM_BITOP(offset) ((offset) & (hwaddr)0xF)
|
||||
|
||||
enum {
|
||||
CCM_BITOP_NONE = 0x00,
|
||||
CCM_BITOP_SET = 0x04,
|
||||
CCM_BITOP_CLR = 0x08,
|
||||
CCM_BITOP_TOG = 0x0C,
|
||||
};
|
||||
|
||||
static uint64_t imx7_set_clr_tog_read(void *opaque, hwaddr offset,
|
||||
unsigned size)
|
||||
{
|
||||
const uint32_t *mmio = opaque;
|
||||
|
||||
return mmio[CCM_INDEX(offset)];
|
||||
}
|
||||
|
||||
static void imx7_set_clr_tog_write(void *opaque, hwaddr offset,
|
||||
uint64_t value, unsigned size)
|
||||
{
|
||||
const uint8_t bitop = CCM_BITOP(offset);
|
||||
const uint32_t index = CCM_INDEX(offset);
|
||||
uint32_t *mmio = opaque;
|
||||
|
||||
switch (bitop) {
|
||||
case CCM_BITOP_NONE:
|
||||
mmio[index] = value;
|
||||
break;
|
||||
case CCM_BITOP_SET:
|
||||
mmio[index] |= value;
|
||||
break;
|
||||
case CCM_BITOP_CLR:
|
||||
mmio[index] &= ~value;
|
||||
break;
|
||||
case CCM_BITOP_TOG:
|
||||
mmio[index] ^= value;
|
||||
break;
|
||||
};
|
||||
}
|
||||
|
||||
static const struct MemoryRegionOps imx7_set_clr_tog_ops = {
|
||||
.read = imx7_set_clr_tog_read,
|
||||
.write = imx7_set_clr_tog_write,
|
||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||
.impl = {
|
||||
/*
|
||||
* Our device would not work correctly if the guest was doing
|
||||
* unaligned access. This might not be a limitation on the real
|
||||
* device but in practice there is no reason for a guest to access
|
||||
* this device unaligned.
|
||||
*/
|
||||
.min_access_size = 4,
|
||||
.max_access_size = 4,
|
||||
.unaligned = false,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct MemoryRegionOps imx7_digprog_ops = {
|
||||
.read = imx7_set_clr_tog_read,
|
||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||
.impl = {
|
||||
.min_access_size = 4,
|
||||
.max_access_size = 4,
|
||||
.unaligned = false,
|
||||
},
|
||||
};
|
||||
|
||||
static void imx7_ccm_init(Object *obj)
|
||||
{
|
||||
SysBusDevice *sd = SYS_BUS_DEVICE(obj);
|
||||
IMX7CCMState *s = IMX7_CCM(obj);
|
||||
|
||||
memory_region_init_io(&s->iomem,
|
||||
obj,
|
||||
&imx7_set_clr_tog_ops,
|
||||
s->ccm,
|
||||
TYPE_IMX7_CCM ".ccm",
|
||||
sizeof(s->ccm));
|
||||
|
||||
sysbus_init_mmio(sd, &s->iomem);
|
||||
}
|
||||
|
||||
static void imx7_analog_init(Object *obj)
|
||||
{
|
||||
SysBusDevice *sd = SYS_BUS_DEVICE(obj);
|
||||
IMX7AnalogState *s = IMX7_ANALOG(obj);
|
||||
|
||||
memory_region_init(&s->mmio.container, obj, TYPE_IMX7_ANALOG,
|
||||
0x10000);
|
||||
|
||||
memory_region_init_io(&s->mmio.analog,
|
||||
obj,
|
||||
&imx7_set_clr_tog_ops,
|
||||
s->analog,
|
||||
TYPE_IMX7_ANALOG,
|
||||
sizeof(s->analog));
|
||||
|
||||
memory_region_add_subregion(&s->mmio.container,
|
||||
0x60, &s->mmio.analog);
|
||||
|
||||
memory_region_init_io(&s->mmio.pmu,
|
||||
obj,
|
||||
&imx7_set_clr_tog_ops,
|
||||
s->pmu,
|
||||
TYPE_IMX7_ANALOG ".pmu",
|
||||
sizeof(s->pmu));
|
||||
|
||||
memory_region_add_subregion(&s->mmio.container,
|
||||
0x200, &s->mmio.pmu);
|
||||
|
||||
memory_region_init_io(&s->mmio.digprog,
|
||||
obj,
|
||||
&imx7_digprog_ops,
|
||||
&s->analog[ANALOG_DIGPROG],
|
||||
TYPE_IMX7_ANALOG ".digprog",
|
||||
sizeof(uint32_t));
|
||||
|
||||
memory_region_add_subregion_overlap(&s->mmio.container,
|
||||
0x800, &s->mmio.digprog, 10);
|
||||
|
||||
|
||||
sysbus_init_mmio(sd, &s->mmio.container);
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_imx7_ccm = {
|
||||
.name = TYPE_IMX7_CCM,
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT32_ARRAY(ccm, IMX7CCMState, CCM_MAX),
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
};
|
||||
|
||||
static uint32_t imx7_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock)
|
||||
{
|
||||
/*
|
||||
* This function is "consumed" by GPT emulation code, however on
|
||||
* i.MX7 each GPT block can have their own clock root. This means
|
||||
* that this functions needs somehow to know requester's identity
|
||||
* and the way to pass it: be it via additional IMXClk constants
|
||||
* or by adding another argument to this method needs to be
|
||||
* figured out
|
||||
*/
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Not implemented\n",
|
||||
TYPE_IMX7_CCM, __func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void imx7_ccm_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
IMXCCMClass *ccm = IMX_CCM_CLASS(klass);
|
||||
|
||||
dc->reset = imx7_ccm_reset;
|
||||
dc->vmsd = &vmstate_imx7_ccm;
|
||||
dc->desc = "i.MX7 Clock Control Module";
|
||||
|
||||
ccm->get_clock_frequency = imx7_ccm_get_clock_frequency;
|
||||
}
|
||||
|
||||
static const TypeInfo imx7_ccm_info = {
|
||||
.name = TYPE_IMX7_CCM,
|
||||
.parent = TYPE_IMX_CCM,
|
||||
.instance_size = sizeof(IMX7CCMState),
|
||||
.instance_init = imx7_ccm_init,
|
||||
.class_init = imx7_ccm_class_init,
|
||||
};
|
||||
|
||||
static const VMStateDescription vmstate_imx7_analog = {
|
||||
.name = TYPE_IMX7_ANALOG,
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT32_ARRAY(analog, IMX7AnalogState, ANALOG_MAX),
|
||||
VMSTATE_UINT32_ARRAY(pmu, IMX7AnalogState, PMU_MAX),
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
};
|
||||
|
||||
static void imx7_analog_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
|
||||
dc->reset = imx7_analog_reset;
|
||||
dc->vmsd = &vmstate_imx7_analog;
|
||||
dc->desc = "i.MX7 Analog Module";
|
||||
}
|
||||
|
||||
static const TypeInfo imx7_analog_info = {
|
||||
.name = TYPE_IMX7_ANALOG,
|
||||
.parent = TYPE_SYS_BUS_DEVICE,
|
||||
.instance_size = sizeof(IMX7AnalogState),
|
||||
.instance_init = imx7_analog_init,
|
||||
.class_init = imx7_analog_class_init,
|
||||
};
|
||||
|
||||
static void imx7_ccm_register_type(void)
|
||||
{
|
||||
type_register_static(&imx7_ccm_info);
|
||||
type_register_static(&imx7_analog_info);
|
||||
}
|
||||
type_init(imx7_ccm_register_type)
|
124
hw/misc/imx7_gpr.c
Normal file
124
hw/misc/imx7_gpr.c
Normal file
@ -0,0 +1,124 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Impinj, Inc.
|
||||
*
|
||||
* i.MX7 GPR IP block emulation code
|
||||
*
|
||||
* Author: Andrey Smirnov <andrew.smirnov@gmail.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
* Bare minimum emulation code needed to support being able to shut
|
||||
* down linux guest gracefully.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "hw/misc/imx7_gpr.h"
|
||||
#include "qemu/log.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
|
||||
#include "trace.h"
|
||||
|
||||
enum IMX7GPRRegisters {
|
||||
IOMUXC_GPR0 = 0x00,
|
||||
IOMUXC_GPR1 = 0x04,
|
||||
IOMUXC_GPR2 = 0x08,
|
||||
IOMUXC_GPR3 = 0x0c,
|
||||
IOMUXC_GPR4 = 0x10,
|
||||
IOMUXC_GPR5 = 0x14,
|
||||
IOMUXC_GPR6 = 0x18,
|
||||
IOMUXC_GPR7 = 0x1c,
|
||||
IOMUXC_GPR8 = 0x20,
|
||||
IOMUXC_GPR9 = 0x24,
|
||||
IOMUXC_GPR10 = 0x28,
|
||||
IOMUXC_GPR11 = 0x2c,
|
||||
IOMUXC_GPR12 = 0x30,
|
||||
IOMUXC_GPR13 = 0x34,
|
||||
IOMUXC_GPR14 = 0x38,
|
||||
IOMUXC_GPR15 = 0x3c,
|
||||
IOMUXC_GPR16 = 0x40,
|
||||
IOMUXC_GPR17 = 0x44,
|
||||
IOMUXC_GPR18 = 0x48,
|
||||
IOMUXC_GPR19 = 0x4c,
|
||||
IOMUXC_GPR20 = 0x50,
|
||||
IOMUXC_GPR21 = 0x54,
|
||||
IOMUXC_GPR22 = 0x58,
|
||||
};
|
||||
|
||||
#define IMX7D_GPR1_IRQ_MASK BIT(12)
|
||||
#define IMX7D_GPR1_ENET1_TX_CLK_SEL_MASK BIT(13)
|
||||
#define IMX7D_GPR1_ENET2_TX_CLK_SEL_MASK BIT(14)
|
||||
#define IMX7D_GPR1_ENET_TX_CLK_SEL_MASK (0x3 << 13)
|
||||
#define IMX7D_GPR1_ENET1_CLK_DIR_MASK BIT(17)
|
||||
#define IMX7D_GPR1_ENET2_CLK_DIR_MASK BIT(18)
|
||||
#define IMX7D_GPR1_ENET_CLK_DIR_MASK (0x3 << 17)
|
||||
|
||||
#define IMX7D_GPR5_CSI_MUX_CONTROL_MIPI BIT(4)
|
||||
#define IMX7D_GPR12_PCIE_PHY_REFCLK_SEL BIT(5)
|
||||
#define IMX7D_GPR22_PCIE_PHY_PLL_LOCKED BIT(31)
|
||||
|
||||
|
||||
static uint64_t imx7_gpr_read(void *opaque, hwaddr offset, unsigned size)
|
||||
{
|
||||
trace_imx7_gpr_read(offset);
|
||||
|
||||
if (offset == IOMUXC_GPR22) {
|
||||
return IMX7D_GPR22_PCIE_PHY_PLL_LOCKED;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void imx7_gpr_write(void *opaque, hwaddr offset,
|
||||
uint64_t v, unsigned size)
|
||||
{
|
||||
trace_imx7_gpr_write(offset, v);
|
||||
}
|
||||
|
||||
static const struct MemoryRegionOps imx7_gpr_ops = {
|
||||
.read = imx7_gpr_read,
|
||||
.write = imx7_gpr_write,
|
||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||
.impl = {
|
||||
/*
|
||||
* Our device would not work correctly if the guest was doing
|
||||
* unaligned access. This might not be a limitation on the
|
||||
* real device but in practice there is no reason for a guest
|
||||
* to access this device unaligned.
|
||||
*/
|
||||
.min_access_size = 4,
|
||||
.max_access_size = 4,
|
||||
.unaligned = false,
|
||||
},
|
||||
};
|
||||
|
||||
static void imx7_gpr_init(Object *obj)
|
||||
{
|
||||
SysBusDevice *sd = SYS_BUS_DEVICE(obj);
|
||||
IMX7GPRState *s = IMX7_GPR(obj);
|
||||
|
||||
memory_region_init_io(&s->mmio, obj, &imx7_gpr_ops, s,
|
||||
TYPE_IMX7_GPR, 64 * 1024);
|
||||
sysbus_init_mmio(sd, &s->mmio);
|
||||
}
|
||||
|
||||
static void imx7_gpr_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
|
||||
dc->desc = "i.MX7 General Purpose Registers Module";
|
||||
}
|
||||
|
||||
static const TypeInfo imx7_gpr_info = {
|
||||
.name = TYPE_IMX7_GPR,
|
||||
.parent = TYPE_SYS_BUS_DEVICE,
|
||||
.instance_size = sizeof(IMX7GPRState),
|
||||
.instance_init = imx7_gpr_init,
|
||||
.class_init = imx7_gpr_class_init,
|
||||
};
|
||||
|
||||
static void imx7_gpr_register_type(void)
|
||||
{
|
||||
type_register_static(&imx7_gpr_info);
|
||||
}
|
||||
type_init(imx7_gpr_register_type)
|
83
hw/misc/imx7_snvs.c
Normal file
83
hw/misc/imx7_snvs.c
Normal file
@ -0,0 +1,83 @@
|
||||
/*
|
||||
* IMX7 Secure Non-Volatile Storage
|
||||
*
|
||||
* Copyright (c) 2018, Impinj, Inc.
|
||||
*
|
||||
* Author: Andrey Smirnov <andrew.smirnov@gmail.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
* Bare minimum emulation code needed to support being able to shut
|
||||
* down linux guest gracefully.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "hw/misc/imx7_snvs.h"
|
||||
#include "qemu/log.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
|
||||
static uint64_t imx7_snvs_read(void *opaque, hwaddr offset, unsigned size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void imx7_snvs_write(void *opaque, hwaddr offset,
|
||||
uint64_t v, unsigned size)
|
||||
{
|
||||
const uint32_t value = v;
|
||||
const uint32_t mask = SNVS_LPCR_TOP | SNVS_LPCR_DP_EN;
|
||||
|
||||
if (offset == SNVS_LPCR && ((value & mask) == mask)) {
|
||||
qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct MemoryRegionOps imx7_snvs_ops = {
|
||||
.read = imx7_snvs_read,
|
||||
.write = imx7_snvs_write,
|
||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||
.impl = {
|
||||
/*
|
||||
* Our device would not work correctly if the guest was doing
|
||||
* unaligned access. This might not be a limitation on the real
|
||||
* device but in practice there is no reason for a guest to access
|
||||
* this device unaligned.
|
||||
*/
|
||||
.min_access_size = 4,
|
||||
.max_access_size = 4,
|
||||
.unaligned = false,
|
||||
},
|
||||
};
|
||||
|
||||
static void imx7_snvs_init(Object *obj)
|
||||
{
|
||||
SysBusDevice *sd = SYS_BUS_DEVICE(obj);
|
||||
IMX7SNVSState *s = IMX7_SNVS(obj);
|
||||
|
||||
memory_region_init_io(&s->mmio, obj, &imx7_snvs_ops, s,
|
||||
TYPE_IMX7_SNVS, 0x1000);
|
||||
|
||||
sysbus_init_mmio(sd, &s->mmio);
|
||||
}
|
||||
|
||||
static void imx7_snvs_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
|
||||
dc->desc = "i.MX7 Secure Non-Volatile Storage Module";
|
||||
}
|
||||
|
||||
static const TypeInfo imx7_snvs_info = {
|
||||
.name = TYPE_IMX7_SNVS,
|
||||
.parent = TYPE_SYS_BUS_DEVICE,
|
||||
.instance_size = sizeof(IMX7SNVSState),
|
||||
.instance_init = imx7_snvs_init,
|
||||
.class_init = imx7_snvs_class_init,
|
||||
};
|
||||
|
||||
static void imx7_snvs_register_type(void)
|
||||
{
|
||||
type_register_static(&imx7_snvs_info);
|
||||
}
|
||||
type_init(imx7_snvs_register_type)
|
@ -66,3 +66,7 @@ mps2_scc_cfg_read(unsigned function, unsigned device, uint32_t value) "MPS2 SCC
|
||||
msf2_sysreg_write(uint64_t offset, uint32_t val, uint32_t prev) "msf2-sysreg write: addr 0x%08" HWADDR_PRIx " data 0x%" PRIx32 " prev 0x%" PRIx32
|
||||
msf2_sysreg_read(uint64_t offset, uint32_t val) "msf2-sysreg read: addr 0x%08" HWADDR_PRIx " data 0x%08" PRIx32
|
||||
msf2_sysreg_write_pll_status(void) "Invalid write to read only PLL status register"
|
||||
|
||||
#hw/misc/imx7_gpr.c
|
||||
imx7_gpr_read(uint64_t offset) "addr 0x%08" HWADDR_PRIx
|
||||
imx7_gpr_write(uint64_t offset, uint64_t value) "addr 0x%08" HWADDR_PRIx "value 0x%08" HWADDR_PRIx
|
||||
|
@ -84,12 +84,18 @@
|
||||
|
||||
/* R/W Host control Register 0x0 */
|
||||
#define SDHC_HOSTCTL 0x28
|
||||
#define SDHC_CTRL_LED 0x01
|
||||
#define SDHC_CTRL_DMA_CHECK_MASK 0x18
|
||||
#define SDHC_CTRL_SDMA 0x00
|
||||
#define SDHC_CTRL_ADMA1_32 0x08
|
||||
#define SDHC_CTRL_ADMA2_32 0x10
|
||||
#define SDHC_CTRL_ADMA2_64 0x18
|
||||
#define SDHC_DMA_TYPE(x) ((x) & SDHC_CTRL_DMA_CHECK_MASK)
|
||||
#define SDHC_CTRL_4BITBUS 0x02
|
||||
#define SDHC_CTRL_8BITBUS 0x20
|
||||
#define SDHC_CTRL_CDTEST_INS 0x40
|
||||
#define SDHC_CTRL_CDTEST_EN 0x80
|
||||
|
||||
|
||||
/* R/W Power Control Register 0x0 */
|
||||
#define SDHC_PWRCON 0x29
|
||||
@ -226,4 +232,21 @@ enum {
|
||||
sdhc_gap_write = 2 /* SDHC stopped at block gap during write operation */
|
||||
};
|
||||
|
||||
extern const VMStateDescription sdhci_vmstate;
|
||||
|
||||
|
||||
#define ESDHC_MIX_CTRL 0x48
|
||||
#define ESDHC_VENDOR_SPEC 0xc0
|
||||
#define ESDHC_DLL_CTRL 0x60
|
||||
|
||||
#define ESDHC_TUNING_CTRL 0xcc
|
||||
#define ESDHC_TUNE_CTRL_STATUS 0x68
|
||||
#define ESDHC_WTMK_LVL 0x44
|
||||
|
||||
/* Undocumented register used by guests working around erratum ERR004536 */
|
||||
#define ESDHC_UNDOCUMENTED_REG27 0x6c
|
||||
|
||||
#define ESDHC_CTRL_4BITBUS (0x1 << 1)
|
||||
#define ESDHC_CTRL_8BITBUS (0x2 << 1)
|
||||
|
||||
#endif
|
||||
|
230
hw/sd/sdhci.c
230
hw/sd/sdhci.c
@ -244,7 +244,8 @@ static void sdhci_send_command(SDHCIState *s)
|
||||
}
|
||||
}
|
||||
|
||||
if ((s->norintstsen & SDHC_NISEN_TRSCMP) &&
|
||||
if (!(s->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
|
||||
(s->norintstsen & SDHC_NISEN_TRSCMP) &&
|
||||
(s->cmdreg & SDHC_CMD_RESPONSE) == SDHC_CMD_RSP_WITH_BUSY) {
|
||||
s->norintsts |= SDHC_NIS_TRSCMP;
|
||||
}
|
||||
@ -1189,6 +1190,8 @@ static void sdhci_initfn(SDHCIState *s)
|
||||
|
||||
s->insert_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_raise_insertion_irq, s);
|
||||
s->transfer_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_data_transfer, s);
|
||||
|
||||
s->io_ops = &sdhci_mmio_ops;
|
||||
}
|
||||
|
||||
static void sdhci_uninitfn(SDHCIState *s)
|
||||
@ -1396,6 +1399,10 @@ static void sdhci_sysbus_realize(DeviceState *dev, Error ** errp)
|
||||
}
|
||||
|
||||
sysbus_init_irq(sbd, &s->irq);
|
||||
|
||||
memory_region_init_io(&s->iomem, OBJECT(s), s->io_ops, s, "sdhci",
|
||||
SDHC_REGISTERS_MAP_SIZE);
|
||||
|
||||
sysbus_init_mmio(sbd, &s->iomem);
|
||||
}
|
||||
|
||||
@ -1447,11 +1454,232 @@ static const TypeInfo sdhci_bus_info = {
|
||||
.class_init = sdhci_bus_class_init,
|
||||
};
|
||||
|
||||
static uint64_t usdhc_read(void *opaque, hwaddr offset, unsigned size)
|
||||
{
|
||||
SDHCIState *s = SYSBUS_SDHCI(opaque);
|
||||
uint32_t ret;
|
||||
uint16_t hostctl;
|
||||
|
||||
switch (offset) {
|
||||
default:
|
||||
return sdhci_read(opaque, offset, size);
|
||||
|
||||
case SDHC_HOSTCTL:
|
||||
/*
|
||||
* For a detailed explanation on the following bit
|
||||
* manipulation code see comments in a similar part of
|
||||
* usdhc_write()
|
||||
*/
|
||||
hostctl = SDHC_DMA_TYPE(s->hostctl) << (8 - 3);
|
||||
|
||||
if (s->hostctl & SDHC_CTRL_8BITBUS) {
|
||||
hostctl |= ESDHC_CTRL_8BITBUS;
|
||||
}
|
||||
|
||||
if (s->hostctl & SDHC_CTRL_4BITBUS) {
|
||||
hostctl |= ESDHC_CTRL_4BITBUS;
|
||||
}
|
||||
|
||||
ret = hostctl;
|
||||
ret |= (uint32_t)s->blkgap << 16;
|
||||
ret |= (uint32_t)s->wakcon << 24;
|
||||
|
||||
break;
|
||||
|
||||
case ESDHC_DLL_CTRL:
|
||||
case ESDHC_TUNE_CTRL_STATUS:
|
||||
case ESDHC_UNDOCUMENTED_REG27:
|
||||
case ESDHC_TUNING_CTRL:
|
||||
case ESDHC_VENDOR_SPEC:
|
||||
case ESDHC_MIX_CTRL:
|
||||
case ESDHC_WTMK_LVL:
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
usdhc_write(void *opaque, hwaddr offset, uint64_t val, unsigned size)
|
||||
{
|
||||
SDHCIState *s = SYSBUS_SDHCI(opaque);
|
||||
uint8_t hostctl;
|
||||
uint32_t value = (uint32_t)val;
|
||||
|
||||
switch (offset) {
|
||||
case ESDHC_DLL_CTRL:
|
||||
case ESDHC_TUNE_CTRL_STATUS:
|
||||
case ESDHC_UNDOCUMENTED_REG27:
|
||||
case ESDHC_TUNING_CTRL:
|
||||
case ESDHC_WTMK_LVL:
|
||||
case ESDHC_VENDOR_SPEC:
|
||||
break;
|
||||
|
||||
case SDHC_HOSTCTL:
|
||||
/*
|
||||
* Here's What ESDHCI has at offset 0x28 (SDHC_HOSTCTL)
|
||||
*
|
||||
* 7 6 5 4 3 2 1 0
|
||||
* |-----------+--------+--------+-----------+----------+---------|
|
||||
* | Card | Card | Endian | DATA3 | Data | Led |
|
||||
* | Detect | Detect | Mode | as Card | Transfer | Control |
|
||||
* | Signal | Test | | Detection | Width | |
|
||||
* | Selection | Level | | Pin | | |
|
||||
* |-----------+--------+--------+-----------+----------+---------|
|
||||
*
|
||||
* and 0x29
|
||||
*
|
||||
* 15 10 9 8
|
||||
* |----------+------|
|
||||
* | Reserved | DMA |
|
||||
* | | Sel. |
|
||||
* | | |
|
||||
* |----------+------|
|
||||
*
|
||||
* and here's what SDCHI spec expects those offsets to be:
|
||||
*
|
||||
* 0x28 (Host Control Register)
|
||||
*
|
||||
* 7 6 5 4 3 2 1 0
|
||||
* |--------+--------+----------+------+--------+----------+---------|
|
||||
* | Card | Card | Extended | DMA | High | Data | LED |
|
||||
* | Detect | Detect | Data | Sel. | Speed | Transfer | Control |
|
||||
* | Signal | Test | Transfer | | Enable | Width | |
|
||||
* | Sel. | Level | Width | | | | |
|
||||
* |--------+--------+----------+------+--------+----------+---------|
|
||||
*
|
||||
* and 0x29 (Power Control Register)
|
||||
*
|
||||
* |----------------------------------|
|
||||
* | Power Control Register |
|
||||
* | |
|
||||
* | Description omitted, |
|
||||
* | since it has no analog in ESDHCI |
|
||||
* | |
|
||||
* |----------------------------------|
|
||||
*
|
||||
* Since offsets 0x2A and 0x2B should be compatible between
|
||||
* both IP specs we only need to reconcile least 16-bit of the
|
||||
* word we've been given.
|
||||
*/
|
||||
|
||||
/*
|
||||
* First, save bits 7 6 and 0 since they are identical
|
||||
*/
|
||||
hostctl = value & (SDHC_CTRL_LED |
|
||||
SDHC_CTRL_CDTEST_INS |
|
||||
SDHC_CTRL_CDTEST_EN);
|
||||
/*
|
||||
* Second, split "Data Transfer Width" from bits 2 and 1 in to
|
||||
* bits 5 and 1
|
||||
*/
|
||||
if (value & ESDHC_CTRL_8BITBUS) {
|
||||
hostctl |= SDHC_CTRL_8BITBUS;
|
||||
}
|
||||
|
||||
if (value & ESDHC_CTRL_4BITBUS) {
|
||||
hostctl |= ESDHC_CTRL_4BITBUS;
|
||||
}
|
||||
|
||||
/*
|
||||
* Third, move DMA select from bits 9 and 8 to bits 4 and 3
|
||||
*/
|
||||
hostctl |= SDHC_DMA_TYPE(value >> (8 - 3));
|
||||
|
||||
/*
|
||||
* Now place the corrected value into low 16-bit of the value
|
||||
* we are going to give standard SDHCI write function
|
||||
*
|
||||
* NOTE: This transformation should be the inverse of what can
|
||||
* be found in drivers/mmc/host/sdhci-esdhc-imx.c in Linux
|
||||
* kernel
|
||||
*/
|
||||
value &= ~UINT16_MAX;
|
||||
value |= hostctl;
|
||||
value |= (uint16_t)s->pwrcon << 8;
|
||||
|
||||
sdhci_write(opaque, offset, value, size);
|
||||
break;
|
||||
|
||||
case ESDHC_MIX_CTRL:
|
||||
/*
|
||||
* So, when SD/MMC stack in Linux tries to write to "Transfer
|
||||
* Mode Register", ESDHC i.MX quirk code will translate it
|
||||
* into a write to ESDHC_MIX_CTRL, so we do the opposite in
|
||||
* order to get where we started
|
||||
*
|
||||
* Note that Auto CMD23 Enable bit is located in a wrong place
|
||||
* on i.MX, but since it is not used by QEMU we do not care.
|
||||
*
|
||||
* We don't want to call sdhci_write(.., SDHC_TRNMOD, ...)
|
||||
* here becuase it will result in a call to
|
||||
* sdhci_send_command(s) which we don't want.
|
||||
*
|
||||
*/
|
||||
s->trnmod = value & UINT16_MAX;
|
||||
break;
|
||||
case SDHC_TRNMOD:
|
||||
/*
|
||||
* Similar to above, but this time a write to "Command
|
||||
* Register" will be translated into a 4-byte write to
|
||||
* "Transfer Mode register" where lower 16-bit of value would
|
||||
* be set to zero. So what we do is fill those bits with
|
||||
* cached value from s->trnmod and let the SDHCI
|
||||
* infrastructure handle the rest
|
||||
*/
|
||||
sdhci_write(opaque, offset, val | s->trnmod, size);
|
||||
break;
|
||||
case SDHC_BLKSIZE:
|
||||
/*
|
||||
* ESDHCI does not implement "Host SDMA Buffer Boundary", and
|
||||
* Linux driver will try to zero this field out which will
|
||||
* break the rest of SDHCI emulation.
|
||||
*
|
||||
* Linux defaults to maximum possible setting (512K boundary)
|
||||
* and it seems to be the only option that i.MX IP implements,
|
||||
* so we artificially set it to that value.
|
||||
*/
|
||||
val |= 0x7 << 12;
|
||||
/* FALLTHROUGH */
|
||||
default:
|
||||
sdhci_write(opaque, offset, val, size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static const MemoryRegionOps usdhc_mmio_ops = {
|
||||
.read = usdhc_read,
|
||||
.write = usdhc_write,
|
||||
.valid = {
|
||||
.min_access_size = 1,
|
||||
.max_access_size = 4,
|
||||
.unaligned = false
|
||||
},
|
||||
.endianness = DEVICE_LITTLE_ENDIAN,
|
||||
};
|
||||
|
||||
static void imx_usdhc_init(Object *obj)
|
||||
{
|
||||
SDHCIState *s = SYSBUS_SDHCI(obj);
|
||||
|
||||
s->io_ops = &usdhc_mmio_ops;
|
||||
s->quirks = SDHCI_QUIRK_NO_BUSY_IRQ;
|
||||
}
|
||||
|
||||
static const TypeInfo imx_usdhc_info = {
|
||||
.name = TYPE_IMX_USDHC,
|
||||
.parent = TYPE_SYSBUS_SDHCI,
|
||||
.instance_init = imx_usdhc_init,
|
||||
};
|
||||
|
||||
static void sdhci_register_types(void)
|
||||
{
|
||||
type_register_static(&sdhci_pci_info);
|
||||
type_register_static(&sdhci_sysbus_info);
|
||||
type_register_static(&sdhci_bus_info);
|
||||
type_register_static(&imx_usdhc_info);
|
||||
}
|
||||
|
||||
type_init(sdhci_register_types)
|
||||
|
@ -113,6 +113,17 @@ static const IMXClk imx6_gpt_clocks[] = {
|
||||
CLK_HIGH, /* 111 reference clock */
|
||||
};
|
||||
|
||||
static const IMXClk imx7_gpt_clocks[] = {
|
||||
CLK_NONE, /* 000 No clock source */
|
||||
CLK_IPG, /* 001 ipg_clk, 532MHz*/
|
||||
CLK_IPG_HIGH, /* 010 ipg_clk_highfreq */
|
||||
CLK_EXT, /* 011 External clock */
|
||||
CLK_32k, /* 100 ipg_clk_32k */
|
||||
CLK_HIGH, /* 101 reference clock */
|
||||
CLK_NONE, /* 110 not defined */
|
||||
CLK_NONE, /* 111 not defined */
|
||||
};
|
||||
|
||||
static void imx_gpt_set_freq(IMXGPTState *s)
|
||||
{
|
||||
uint32_t clksrc = extract32(s->cr, GPT_CR_CLKSRC_SHIFT, 3);
|
||||
@ -512,6 +523,13 @@ static void imx6_gpt_init(Object *obj)
|
||||
s->clocks = imx6_gpt_clocks;
|
||||
}
|
||||
|
||||
static void imx7_gpt_init(Object *obj)
|
||||
{
|
||||
IMXGPTState *s = IMX_GPT(obj);
|
||||
|
||||
s->clocks = imx7_gpt_clocks;
|
||||
}
|
||||
|
||||
static const TypeInfo imx25_gpt_info = {
|
||||
.name = TYPE_IMX25_GPT,
|
||||
.parent = TYPE_SYS_BUS_DEVICE,
|
||||
@ -532,11 +550,18 @@ static const TypeInfo imx6_gpt_info = {
|
||||
.instance_init = imx6_gpt_init,
|
||||
};
|
||||
|
||||
static const TypeInfo imx7_gpt_info = {
|
||||
.name = TYPE_IMX7_GPT,
|
||||
.parent = TYPE_IMX25_GPT,
|
||||
.instance_init = imx7_gpt_init,
|
||||
};
|
||||
|
||||
static void imx_gpt_register_types(void)
|
||||
{
|
||||
type_register_static(&imx25_gpt_info);
|
||||
type_register_static(&imx31_gpt_info);
|
||||
type_register_static(&imx6_gpt_info);
|
||||
type_register_static(&imx7_gpt_info);
|
||||
}
|
||||
|
||||
type_init(imx_gpt_register_types)
|
||||
|
@ -12,6 +12,7 @@ common-obj-$(CONFIG_USB_XHCI_NEC) += hcd-xhci-nec.o
|
||||
common-obj-$(CONFIG_USB_MUSB) += hcd-musb.o
|
||||
|
||||
obj-$(CONFIG_TUSB6010) += tusb6010.o
|
||||
obj-$(CONFIG_IMX) += chipidea.o
|
||||
|
||||
# emulated usb devices
|
||||
common-obj-$(CONFIG_USB) += dev-hub.o
|
||||
|
176
hw/usb/chipidea.c
Normal file
176
hw/usb/chipidea.c
Normal file
@ -0,0 +1,176 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Impinj, Inc.
|
||||
*
|
||||
* Chipidea USB block emulation code
|
||||
*
|
||||
* Author: Andrey Smirnov <andrew.smirnov@gmail.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "hw/usb/hcd-ehci.h"
|
||||
#include "hw/usb/chipidea.h"
|
||||
#include "qemu/log.h"
|
||||
|
||||
enum {
|
||||
CHIPIDEA_USBx_DCIVERSION = 0x000,
|
||||
CHIPIDEA_USBx_DCCPARAMS = 0x004,
|
||||
CHIPIDEA_USBx_DCCPARAMS_HC = BIT(8),
|
||||
};
|
||||
|
||||
static uint64_t chipidea_read(void *opaque, hwaddr offset,
|
||||
unsigned size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void chipidea_write(void *opaque, hwaddr offset,
|
||||
uint64_t value, unsigned size)
|
||||
{
|
||||
}
|
||||
|
||||
static const struct MemoryRegionOps chipidea_ops = {
|
||||
.read = chipidea_read,
|
||||
.write = chipidea_write,
|
||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||
.impl = {
|
||||
/*
|
||||
* Our device would not work correctly if the guest was doing
|
||||
* unaligned access. This might not be a limitation on the
|
||||
* real device but in practice there is no reason for a guest
|
||||
* to access this device unaligned.
|
||||
*/
|
||||
.min_access_size = 4,
|
||||
.max_access_size = 4,
|
||||
.unaligned = false,
|
||||
},
|
||||
};
|
||||
|
||||
static uint64_t chipidea_dc_read(void *opaque, hwaddr offset,
|
||||
unsigned size)
|
||||
{
|
||||
switch (offset) {
|
||||
case CHIPIDEA_USBx_DCIVERSION:
|
||||
return 0x1;
|
||||
case CHIPIDEA_USBx_DCCPARAMS:
|
||||
/*
|
||||
* Real hardware (at least i.MX7) will also report the
|
||||
* controller as "Device Capable" (and 8 supported endpoints),
|
||||
* but there doesn't seem to be much point in doing so, since
|
||||
* we don't emulate that part.
|
||||
*/
|
||||
return CHIPIDEA_USBx_DCCPARAMS_HC;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void chipidea_dc_write(void *opaque, hwaddr offset,
|
||||
uint64_t value, unsigned size)
|
||||
{
|
||||
}
|
||||
|
||||
static const struct MemoryRegionOps chipidea_dc_ops = {
|
||||
.read = chipidea_dc_read,
|
||||
.write = chipidea_dc_write,
|
||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||
.impl = {
|
||||
/*
|
||||
* Our device would not work correctly if the guest was doing
|
||||
* unaligned access. This might not be a limitation on the real
|
||||
* device but in practice there is no reason for a guest to access
|
||||
* this device unaligned.
|
||||
*/
|
||||
.min_access_size = 4,
|
||||
.max_access_size = 4,
|
||||
.unaligned = false,
|
||||
},
|
||||
};
|
||||
|
||||
static void chipidea_init(Object *obj)
|
||||
{
|
||||
EHCIState *ehci = &SYS_BUS_EHCI(obj)->ehci;
|
||||
ChipideaState *ci = CHIPIDEA(obj);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ci->iomem); i++) {
|
||||
const struct {
|
||||
const char *name;
|
||||
hwaddr offset;
|
||||
uint64_t size;
|
||||
const struct MemoryRegionOps *ops;
|
||||
} regions[ARRAY_SIZE(ci->iomem)] = {
|
||||
/*
|
||||
* Registers located between offsets 0x000 and 0xFC
|
||||
*/
|
||||
{
|
||||
.name = TYPE_CHIPIDEA ".misc",
|
||||
.offset = 0x000,
|
||||
.size = 0x100,
|
||||
.ops = &chipidea_ops,
|
||||
},
|
||||
/*
|
||||
* Registers located between offsets 0x1A4 and 0x1DC
|
||||
*/
|
||||
{
|
||||
.name = TYPE_CHIPIDEA ".endpoints",
|
||||
.offset = 0x1A4,
|
||||
.size = 0x1DC - 0x1A4 + 4,
|
||||
.ops = &chipidea_ops,
|
||||
},
|
||||
/*
|
||||
* USB_x_DCIVERSION and USB_x_DCCPARAMS
|
||||
*/
|
||||
{
|
||||
.name = TYPE_CHIPIDEA ".dc",
|
||||
.offset = 0x120,
|
||||
.size = 8,
|
||||
.ops = &chipidea_dc_ops,
|
||||
},
|
||||
};
|
||||
|
||||
memory_region_init_io(&ci->iomem[i],
|
||||
obj,
|
||||
regions[i].ops,
|
||||
ci,
|
||||
regions[i].name,
|
||||
regions[i].size);
|
||||
|
||||
memory_region_add_subregion(&ehci->mem,
|
||||
regions[i].offset,
|
||||
&ci->iomem[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static void chipidea_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(klass);
|
||||
|
||||
/*
|
||||
* Offsets used were taken from i.MX7Dual Applications Processor
|
||||
* Reference Manual, Rev 0.1, p. 3177, Table 11-59
|
||||
*/
|
||||
sec->capsbase = 0x100;
|
||||
sec->opregbase = 0x140;
|
||||
sec->portnr = 1;
|
||||
|
||||
set_bit(DEVICE_CATEGORY_USB, dc->categories);
|
||||
dc->desc = "Chipidea USB Module";
|
||||
}
|
||||
|
||||
static const TypeInfo chipidea_info = {
|
||||
.name = TYPE_CHIPIDEA,
|
||||
.parent = TYPE_SYS_BUS_EHCI,
|
||||
.instance_size = sizeof(ChipideaState),
|
||||
.instance_init = chipidea_init,
|
||||
.class_init = chipidea_class_init,
|
||||
};
|
||||
|
||||
static void chipidea_register_type(void)
|
||||
{
|
||||
type_register_static(&chipidea_info);
|
||||
}
|
||||
type_init(chipidea_register_type)
|
22
include/hw/intc/imx_gpcv2.h
Normal file
22
include/hw/intc/imx_gpcv2.h
Normal file
@ -0,0 +1,22 @@
|
||||
#ifndef IMX_GPCV2_H
|
||||
#define IMX_GPCV2_H
|
||||
|
||||
#include "hw/sysbus.h"
|
||||
|
||||
enum IMXGPCv2Registers {
|
||||
GPC_NUM = 0xE00 / sizeof(uint32_t),
|
||||
};
|
||||
|
||||
typedef struct IMXGPCv2State {
|
||||
/*< private >*/
|
||||
SysBusDevice parent_obj;
|
||||
|
||||
/*< public >*/
|
||||
MemoryRegion iomem;
|
||||
uint32_t regs[GPC_NUM];
|
||||
} IMXGPCv2State;
|
||||
|
||||
#define TYPE_IMX_GPCV2 "imx-gpcv2"
|
||||
#define IMX_GPCV2(obj) OBJECT_CHECK(IMXGPCv2State, (obj), TYPE_IMX_GPCV2)
|
||||
|
||||
#endif /* IMX_GPCV2_H */
|
33
include/hw/misc/imx2_wdt.h
Normal file
33
include/hw/misc/imx2_wdt.h
Normal file
@ -0,0 +1,33 @@
|
||||
/*
|
||||
* Copyright (c) 2017, Impinj, Inc.
|
||||
*
|
||||
* i.MX2 Watchdog IP block
|
||||
*
|
||||
* Author: Andrey Smirnov <andrew.smirnov@gmail.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#ifndef IMX2_WDT_H
|
||||
#define IMX2_WDT_H
|
||||
|
||||
#include "hw/sysbus.h"
|
||||
|
||||
#define TYPE_IMX2_WDT "imx2.wdt"
|
||||
#define IMX2_WDT(obj) OBJECT_CHECK(IMX2WdtState, (obj), TYPE_IMX2_WDT)
|
||||
|
||||
enum IMX2WdtRegisters {
|
||||
IMX2_WDT_WCR = 0x0000,
|
||||
IMX2_WDT_REG_NUM = 0x0008 / sizeof(uint16_t) + 1,
|
||||
};
|
||||
|
||||
|
||||
typedef struct IMX2WdtState {
|
||||
/* <private> */
|
||||
SysBusDevice parent_obj;
|
||||
|
||||
MemoryRegion mmio;
|
||||
} IMX2WdtState;
|
||||
|
||||
#endif /* IMX7_SNVS_H */
|
139
include/hw/misc/imx7_ccm.h
Normal file
139
include/hw/misc/imx7_ccm.h
Normal file
@ -0,0 +1,139 @@
|
||||
/*
|
||||
* Copyright (c) 2017, Impinj, Inc.
|
||||
*
|
||||
* i.MX7 CCM, PMU and ANALOG IP blocks emulation code
|
||||
*
|
||||
* Author: Andrey Smirnov <andrew.smirnov@gmail.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#ifndef IMX7_CCM_H
|
||||
#define IMX7_CCM_H
|
||||
|
||||
#include "hw/misc/imx_ccm.h"
|
||||
#include "qemu/bitops.h"
|
||||
|
||||
enum IMX7AnalogRegisters {
|
||||
ANALOG_PLL_ARM,
|
||||
ANALOG_PLL_ARM_SET,
|
||||
ANALOG_PLL_ARM_CLR,
|
||||
ANALOG_PLL_ARM_TOG,
|
||||
ANALOG_PLL_DDR,
|
||||
ANALOG_PLL_DDR_SET,
|
||||
ANALOG_PLL_DDR_CLR,
|
||||
ANALOG_PLL_DDR_TOG,
|
||||
ANALOG_PLL_DDR_SS,
|
||||
ANALOG_PLL_DDR_SS_SET,
|
||||
ANALOG_PLL_DDR_SS_CLR,
|
||||
ANALOG_PLL_DDR_SS_TOG,
|
||||
ANALOG_PLL_DDR_NUM,
|
||||
ANALOG_PLL_DDR_NUM_SET,
|
||||
ANALOG_PLL_DDR_NUM_CLR,
|
||||
ANALOG_PLL_DDR_NUM_TOG,
|
||||
ANALOG_PLL_DDR_DENOM,
|
||||
ANALOG_PLL_DDR_DENOM_SET,
|
||||
ANALOG_PLL_DDR_DENOM_CLR,
|
||||
ANALOG_PLL_DDR_DENOM_TOG,
|
||||
ANALOG_PLL_480,
|
||||
ANALOG_PLL_480_SET,
|
||||
ANALOG_PLL_480_CLR,
|
||||
ANALOG_PLL_480_TOG,
|
||||
ANALOG_PLL_480A,
|
||||
ANALOG_PLL_480A_SET,
|
||||
ANALOG_PLL_480A_CLR,
|
||||
ANALOG_PLL_480A_TOG,
|
||||
ANALOG_PLL_480B,
|
||||
ANALOG_PLL_480B_SET,
|
||||
ANALOG_PLL_480B_CLR,
|
||||
ANALOG_PLL_480B_TOG,
|
||||
ANALOG_PLL_ENET,
|
||||
ANALOG_PLL_ENET_SET,
|
||||
ANALOG_PLL_ENET_CLR,
|
||||
ANALOG_PLL_ENET_TOG,
|
||||
ANALOG_PLL_AUDIO,
|
||||
ANALOG_PLL_AUDIO_SET,
|
||||
ANALOG_PLL_AUDIO_CLR,
|
||||
ANALOG_PLL_AUDIO_TOG,
|
||||
ANALOG_PLL_AUDIO_SS,
|
||||
ANALOG_PLL_AUDIO_SS_SET,
|
||||
ANALOG_PLL_AUDIO_SS_CLR,
|
||||
ANALOG_PLL_AUDIO_SS_TOG,
|
||||
ANALOG_PLL_AUDIO_NUM,
|
||||
ANALOG_PLL_AUDIO_NUM_SET,
|
||||
ANALOG_PLL_AUDIO_NUM_CLR,
|
||||
ANALOG_PLL_AUDIO_NUM_TOG,
|
||||
ANALOG_PLL_AUDIO_DENOM,
|
||||
ANALOG_PLL_AUDIO_DENOM_SET,
|
||||
ANALOG_PLL_AUDIO_DENOM_CLR,
|
||||
ANALOG_PLL_AUDIO_DENOM_TOG,
|
||||
ANALOG_PLL_VIDEO,
|
||||
ANALOG_PLL_VIDEO_SET,
|
||||
ANALOG_PLL_VIDEO_CLR,
|
||||
ANALOG_PLL_VIDEO_TOG,
|
||||
ANALOG_PLL_VIDEO_SS,
|
||||
ANALOG_PLL_VIDEO_SS_SET,
|
||||
ANALOG_PLL_VIDEO_SS_CLR,
|
||||
ANALOG_PLL_VIDEO_SS_TOG,
|
||||
ANALOG_PLL_VIDEO_NUM,
|
||||
ANALOG_PLL_VIDEO_NUM_SET,
|
||||
ANALOG_PLL_VIDEO_NUM_CLR,
|
||||
ANALOG_PLL_VIDEO_NUM_TOG,
|
||||
ANALOG_PLL_VIDEO_DENOM,
|
||||
ANALOG_PLL_VIDEO_DENOM_SET,
|
||||
ANALOG_PLL_VIDEO_DENOM_CLR,
|
||||
ANALOG_PLL_VIDEO_DENOM_TOG,
|
||||
ANALOG_PLL_MISC0,
|
||||
ANALOG_PLL_MISC0_SET,
|
||||
ANALOG_PLL_MISC0_CLR,
|
||||
ANALOG_PLL_MISC0_TOG,
|
||||
|
||||
ANALOG_DIGPROG = 0x800 / sizeof(uint32_t),
|
||||
ANALOG_MAX,
|
||||
|
||||
ANALOG_PLL_LOCK = BIT(31)
|
||||
};
|
||||
|
||||
enum IMX7CCMRegisters {
|
||||
CCM_MAX = 0xBE00 / sizeof(uint32_t) + 1,
|
||||
};
|
||||
|
||||
enum IMX7PMURegisters {
|
||||
PMU_MAX = 0x140 / sizeof(uint32_t),
|
||||
};
|
||||
|
||||
#define TYPE_IMX7_CCM "imx7.ccm"
|
||||
#define IMX7_CCM(obj) OBJECT_CHECK(IMX7CCMState, (obj), TYPE_IMX7_CCM)
|
||||
|
||||
typedef struct IMX7CCMState {
|
||||
/* <private> */
|
||||
IMXCCMState parent_obj;
|
||||
|
||||
/* <public> */
|
||||
MemoryRegion iomem;
|
||||
|
||||
uint32_t ccm[CCM_MAX];
|
||||
} IMX7CCMState;
|
||||
|
||||
|
||||
#define TYPE_IMX7_ANALOG "imx7.analog"
|
||||
#define IMX7_ANALOG(obj) OBJECT_CHECK(IMX7AnalogState, (obj), TYPE_IMX7_ANALOG)
|
||||
|
||||
typedef struct IMX7AnalogState {
|
||||
/* <private> */
|
||||
IMXCCMState parent_obj;
|
||||
|
||||
/* <public> */
|
||||
struct {
|
||||
MemoryRegion container;
|
||||
MemoryRegion analog;
|
||||
MemoryRegion digprog;
|
||||
MemoryRegion pmu;
|
||||
} mmio;
|
||||
|
||||
uint32_t analog[ANALOG_MAX];
|
||||
uint32_t pmu[PMU_MAX];
|
||||
} IMX7AnalogState;
|
||||
|
||||
#endif /* IMX7_CCM_H */
|
28
include/hw/misc/imx7_gpr.h
Normal file
28
include/hw/misc/imx7_gpr.h
Normal file
@ -0,0 +1,28 @@
|
||||
/*
|
||||
* Copyright (c) 2017, Impinj, Inc.
|
||||
*
|
||||
* i.MX7 GPR IP block emulation code
|
||||
*
|
||||
* Author: Andrey Smirnov <andrew.smirnov@gmail.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#ifndef IMX7_GPR_H
|
||||
#define IMX7_GPR_H
|
||||
|
||||
#include "qemu/bitops.h"
|
||||
#include "hw/sysbus.h"
|
||||
|
||||
#define TYPE_IMX7_GPR "imx7.gpr"
|
||||
#define IMX7_GPR(obj) OBJECT_CHECK(IMX7GPRState, (obj), TYPE_IMX7_GPR)
|
||||
|
||||
typedef struct IMX7GPRState {
|
||||
/* <private> */
|
||||
SysBusDevice parent_obj;
|
||||
|
||||
MemoryRegion mmio;
|
||||
} IMX7GPRState;
|
||||
|
||||
#endif /* IMX7_GPR_H */
|
35
include/hw/misc/imx7_snvs.h
Normal file
35
include/hw/misc/imx7_snvs.h
Normal file
@ -0,0 +1,35 @@
|
||||
/*
|
||||
* Copyright (c) 2017, Impinj, Inc.
|
||||
*
|
||||
* i.MX7 SNVS block emulation code
|
||||
*
|
||||
* Author: Andrey Smirnov <andrew.smirnov@gmail.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#ifndef IMX7_SNVS_H
|
||||
#define IMX7_SNVS_H
|
||||
|
||||
#include "qemu/bitops.h"
|
||||
#include "hw/sysbus.h"
|
||||
|
||||
|
||||
enum IMX7SNVSRegisters {
|
||||
SNVS_LPCR = 0x38,
|
||||
SNVS_LPCR_TOP = BIT(6),
|
||||
SNVS_LPCR_DP_EN = BIT(5)
|
||||
};
|
||||
|
||||
#define TYPE_IMX7_SNVS "imx7.snvs"
|
||||
#define IMX7_SNVS(obj) OBJECT_CHECK(IMX7SNVSState, (obj), TYPE_IMX7_SNVS)
|
||||
|
||||
typedef struct IMX7SNVSState {
|
||||
/* <private> */
|
||||
SysBusDevice parent_obj;
|
||||
|
||||
MemoryRegion mmio;
|
||||
} IMX7SNVSState;
|
||||
|
||||
#endif /* IMX7_SNVS_H */
|
@ -44,6 +44,7 @@ typedef struct SDHCIState {
|
||||
AddressSpace sysbus_dma_as;
|
||||
AddressSpace *dma_as;
|
||||
MemoryRegion *dma_mr;
|
||||
const MemoryRegionOps *io_ops;
|
||||
|
||||
QEMUTimer *insert_timer; /* timer for 'changing' sd card. */
|
||||
QEMUTimer *transfer_timer;
|
||||
@ -91,8 +92,18 @@ typedef struct SDHCIState {
|
||||
|
||||
/* Configurable properties */
|
||||
bool pending_insert_quirk; /* Quirk for Raspberry Pi card insert int */
|
||||
uint32_t quirks;
|
||||
} SDHCIState;
|
||||
|
||||
/*
|
||||
* Controller does not provide transfer-complete interrupt when not
|
||||
* busy.
|
||||
*
|
||||
* NOTE: This definition is taken out of Linux kernel and so the
|
||||
* original bit number is preserved
|
||||
*/
|
||||
#define SDHCI_QUIRK_NO_BUSY_IRQ BIT(14)
|
||||
|
||||
#define TYPE_PCI_SDHCI "sdhci-pci"
|
||||
#define PCI_SDHCI(obj) OBJECT_CHECK(SDHCIState, (obj), TYPE_PCI_SDHCI)
|
||||
|
||||
@ -100,4 +111,6 @@ typedef struct SDHCIState {
|
||||
#define SYSBUS_SDHCI(obj) \
|
||||
OBJECT_CHECK(SDHCIState, (obj), TYPE_SYSBUS_SDHCI)
|
||||
|
||||
#define TYPE_IMX_USDHC "imx-usdhc"
|
||||
|
||||
#endif /* SDHCI_H */
|
||||
|
@ -77,6 +77,7 @@
|
||||
#define TYPE_IMX25_GPT "imx25.gpt"
|
||||
#define TYPE_IMX31_GPT "imx31.gpt"
|
||||
#define TYPE_IMX6_GPT "imx6.gpt"
|
||||
#define TYPE_IMX7_GPT "imx7.gpt"
|
||||
|
||||
#define TYPE_IMX_GPT TYPE_IMX25_GPT
|
||||
|
||||
|
16
include/hw/usb/chipidea.h
Normal file
16
include/hw/usb/chipidea.h
Normal file
@ -0,0 +1,16 @@
|
||||
#ifndef CHIPIDEA_H
|
||||
#define CHIPIDEA_H
|
||||
|
||||
#include "hw/usb/hcd-ehci.h"
|
||||
|
||||
typedef struct ChipideaState {
|
||||
/*< private >*/
|
||||
EHCISysBusState parent_obj;
|
||||
|
||||
MemoryRegion iomem[3];
|
||||
} ChipideaState;
|
||||
|
||||
#define TYPE_CHIPIDEA "usb-chipidea"
|
||||
#define CHIPIDEA(obj) OBJECT_CHECK(ChipideaState, (obj), TYPE_CHIPIDEA)
|
||||
|
||||
#endif /* CHIPIDEA_H */
|
@ -512,6 +512,21 @@ enum {
|
||||
ARM_HWCAP_A64_SHA1 = 1 << 5,
|
||||
ARM_HWCAP_A64_SHA2 = 1 << 6,
|
||||
ARM_HWCAP_A64_CRC32 = 1 << 7,
|
||||
ARM_HWCAP_A64_ATOMICS = 1 << 8,
|
||||
ARM_HWCAP_A64_FPHP = 1 << 9,
|
||||
ARM_HWCAP_A64_ASIMDHP = 1 << 10,
|
||||
ARM_HWCAP_A64_CPUID = 1 << 11,
|
||||
ARM_HWCAP_A64_ASIMDRDM = 1 << 12,
|
||||
ARM_HWCAP_A64_JSCVT = 1 << 13,
|
||||
ARM_HWCAP_A64_FCMA = 1 << 14,
|
||||
ARM_HWCAP_A64_LRCPC = 1 << 15,
|
||||
ARM_HWCAP_A64_DCPOP = 1 << 16,
|
||||
ARM_HWCAP_A64_SHA3 = 1 << 17,
|
||||
ARM_HWCAP_A64_SM3 = 1 << 18,
|
||||
ARM_HWCAP_A64_SM4 = 1 << 19,
|
||||
ARM_HWCAP_A64_ASIMDDP = 1 << 20,
|
||||
ARM_HWCAP_A64_SHA512 = 1 << 21,
|
||||
ARM_HWCAP_A64_SVE = 1 << 22,
|
||||
};
|
||||
|
||||
#define ELF_HWCAP get_elf_hwcap()
|
||||
@ -532,6 +547,10 @@ static uint32_t get_elf_hwcap(void)
|
||||
GET_FEATURE(ARM_FEATURE_V8_SHA1, ARM_HWCAP_A64_SHA1);
|
||||
GET_FEATURE(ARM_FEATURE_V8_SHA256, ARM_HWCAP_A64_SHA2);
|
||||
GET_FEATURE(ARM_FEATURE_CRC, ARM_HWCAP_A64_CRC32);
|
||||
GET_FEATURE(ARM_FEATURE_V8_SHA3, ARM_HWCAP_A64_SHA3);
|
||||
GET_FEATURE(ARM_FEATURE_V8_SM3, ARM_HWCAP_A64_SM3);
|
||||
GET_FEATURE(ARM_FEATURE_V8_SM4, ARM_HWCAP_A64_SM4);
|
||||
GET_FEATURE(ARM_FEATURE_V8_SHA512, ARM_HWCAP_A64_SHA512);
|
||||
#undef GET_FEATURE
|
||||
|
||||
return hwcaps;
|
||||
|
120
target/arm/cpu.h
120
target/arm/cpu.h
@ -153,6 +153,49 @@ typedef struct {
|
||||
uint32_t base_mask;
|
||||
} TCR;
|
||||
|
||||
/* Define a maximum sized vector register.
|
||||
* For 32-bit, this is a 128-bit NEON/AdvSIMD register.
|
||||
* For 64-bit, this is a 2048-bit SVE register.
|
||||
*
|
||||
* Note that the mapping between S, D, and Q views of the register bank
|
||||
* differs between AArch64 and AArch32.
|
||||
* In AArch32:
|
||||
* Qn = regs[n].d[1]:regs[n].d[0]
|
||||
* Dn = regs[n / 2].d[n & 1]
|
||||
* Sn = regs[n / 4].d[n % 4 / 2],
|
||||
* bits 31..0 for even n, and bits 63..32 for odd n
|
||||
* (and regs[16] to regs[31] are inaccessible)
|
||||
* In AArch64:
|
||||
* Zn = regs[n].d[*]
|
||||
* Qn = regs[n].d[1]:regs[n].d[0]
|
||||
* Dn = regs[n].d[0]
|
||||
* Sn = regs[n].d[0] bits 31..0
|
||||
*
|
||||
* This corresponds to the architecturally defined mapping between
|
||||
* the two execution states, and means we do not need to explicitly
|
||||
* map these registers when changing states.
|
||||
*
|
||||
* Align the data for use with TCG host vector operations.
|
||||
*/
|
||||
|
||||
#ifdef TARGET_AARCH64
|
||||
# define ARM_MAX_VQ 16
|
||||
#else
|
||||
# define ARM_MAX_VQ 1
|
||||
#endif
|
||||
|
||||
typedef struct ARMVectorReg {
|
||||
uint64_t d[2 * ARM_MAX_VQ] QEMU_ALIGNED(16);
|
||||
} ARMVectorReg;
|
||||
|
||||
/* In AArch32 mode, predicate registers do not exist at all. */
|
||||
#ifdef TARGET_AARCH64
|
||||
typedef struct ARMPredicateReg {
|
||||
uint64_t p[2 * ARM_MAX_VQ / 8] QEMU_ALIGNED(16);
|
||||
} ARMPredicateReg;
|
||||
#endif
|
||||
|
||||
|
||||
typedef struct CPUARMState {
|
||||
/* Regs for current mode. */
|
||||
uint32_t regs[16];
|
||||
@ -477,22 +520,12 @@ typedef struct CPUARMState {
|
||||
|
||||
/* VFP coprocessor state. */
|
||||
struct {
|
||||
/* VFP/Neon register state. Note that the mapping between S, D and Q
|
||||
* views of the register bank differs between AArch64 and AArch32:
|
||||
* In AArch32:
|
||||
* Qn = regs[2n+1]:regs[2n]
|
||||
* Dn = regs[n]
|
||||
* Sn = regs[n/2] bits 31..0 for even n, and bits 63..32 for odd n
|
||||
* (and regs[32] to regs[63] are inaccessible)
|
||||
* In AArch64:
|
||||
* Qn = regs[2n+1]:regs[2n]
|
||||
* Dn = regs[2n]
|
||||
* Sn = regs[2n] bits 31..0
|
||||
* This corresponds to the architecturally defined mapping between
|
||||
* the two execution states, and means we do not need to explicitly
|
||||
* map these registers when changing states.
|
||||
*/
|
||||
uint64_t regs[64] QEMU_ALIGNED(16);
|
||||
ARMVectorReg zregs[32];
|
||||
|
||||
#ifdef TARGET_AARCH64
|
||||
/* Store FFR as pregs[16] to make it easier to treat as any other. */
|
||||
ARMPredicateReg pregs[17];
|
||||
#endif
|
||||
|
||||
uint32_t xregs[16];
|
||||
/* We store these fpcsr fields separately for convenience. */
|
||||
@ -516,6 +549,9 @@ typedef struct CPUARMState {
|
||||
*/
|
||||
float_status fp_status;
|
||||
float_status standard_fp_status;
|
||||
|
||||
/* ZCR_EL[1-3] */
|
||||
uint64_t zcr_el[4];
|
||||
} vfp;
|
||||
uint64_t exclusive_addr;
|
||||
uint64_t exclusive_val;
|
||||
@ -890,6 +926,8 @@ void pmccntr_sync(CPUARMState *env);
|
||||
#define CPTR_TCPAC (1U << 31)
|
||||
#define CPTR_TTA (1U << 20)
|
||||
#define CPTR_TFP (1U << 10)
|
||||
#define CPTR_TZ (1U << 8) /* CPTR_EL2 */
|
||||
#define CPTR_EZ (1U << 8) /* CPTR_EL3 */
|
||||
|
||||
#define MDCR_EPMAD (1U << 21)
|
||||
#define MDCR_EDAD (1U << 20)
|
||||
@ -1341,6 +1379,10 @@ enum arm_features {
|
||||
ARM_FEATURE_M_SECURITY, /* M profile Security Extension */
|
||||
ARM_FEATURE_JAZELLE, /* has (trivial) Jazelle implementation */
|
||||
ARM_FEATURE_SVE, /* has Scalable Vector Extension */
|
||||
ARM_FEATURE_V8_SHA512, /* implements SHA512 part of v8 Crypto Extensions */
|
||||
ARM_FEATURE_V8_SHA3, /* implements SHA3 part of v8 Crypto Extensions */
|
||||
ARM_FEATURE_V8_SM3, /* implements SM3 part of v8 Crypto Extensions */
|
||||
ARM_FEATURE_V8_SM4, /* implements SM4 part of v8 Crypto Extensions */
|
||||
};
|
||||
|
||||
static inline int arm_feature(CPUARMState *env, int feature)
|
||||
@ -1505,6 +1547,34 @@ static inline bool armv7m_nvic_can_take_pending_exception(void *opaque)
|
||||
* of architecturally banked exceptions.
|
||||
*/
|
||||
void armv7m_nvic_set_pending(void *opaque, int irq, bool secure);
|
||||
/**
|
||||
* armv7m_nvic_set_pending_derived: mark this derived exception as pending
|
||||
* @opaque: the NVIC
|
||||
* @irq: the exception number to mark pending
|
||||
* @secure: false for non-banked exceptions or for the nonsecure
|
||||
* version of a banked exception, true for the secure version of a banked
|
||||
* exception.
|
||||
*
|
||||
* Similar to armv7m_nvic_set_pending(), but specifically for derived
|
||||
* exceptions (exceptions generated in the course of trying to take
|
||||
* a different exception).
|
||||
*/
|
||||
void armv7m_nvic_set_pending_derived(void *opaque, int irq, bool secure);
|
||||
/**
|
||||
* armv7m_nvic_get_pending_irq_info: return highest priority pending
|
||||
* exception, and whether it targets Secure state
|
||||
* @opaque: the NVIC
|
||||
* @pirq: set to pending exception number
|
||||
* @ptargets_secure: set to whether pending exception targets Secure
|
||||
*
|
||||
* This function writes the number of the highest priority pending
|
||||
* exception (the one which would be made active by
|
||||
* armv7m_nvic_acknowledge_irq()) to @pirq, and sets @ptargets_secure
|
||||
* to true if the current highest priority pending exception should
|
||||
* be taken to Secure state, false for NS.
|
||||
*/
|
||||
void armv7m_nvic_get_pending_irq_info(void *opaque, int *pirq,
|
||||
bool *ptargets_secure);
|
||||
/**
|
||||
* armv7m_nvic_acknowledge_irq: make highest priority pending exception active
|
||||
* @opaque: the NVIC
|
||||
@ -1512,10 +1582,8 @@ void armv7m_nvic_set_pending(void *opaque, int irq, bool secure);
|
||||
* Move the current highest priority pending exception from the pending
|
||||
* state to the active state, and update v7m.exception to indicate that
|
||||
* it is the exception currently being handled.
|
||||
*
|
||||
* Returns: true if exception should be taken to Secure state, false for NS
|
||||
*/
|
||||
bool armv7m_nvic_acknowledge_irq(void *opaque);
|
||||
void armv7m_nvic_acknowledge_irq(void *opaque);
|
||||
/**
|
||||
* armv7m_nvic_complete_irq: complete specified interrupt or exception
|
||||
* @opaque: the NVIC
|
||||
@ -2610,6 +2678,10 @@ static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
|
||||
#define ARM_TBFLAG_TBI0_MASK (0x1ull << ARM_TBFLAG_TBI0_SHIFT)
|
||||
#define ARM_TBFLAG_TBI1_SHIFT 1 /* TBI1 for EL0/1 */
|
||||
#define ARM_TBFLAG_TBI1_MASK (0x1ull << ARM_TBFLAG_TBI1_SHIFT)
|
||||
#define ARM_TBFLAG_SVEEXC_EL_SHIFT 2
|
||||
#define ARM_TBFLAG_SVEEXC_EL_MASK (0x3 << ARM_TBFLAG_SVEEXC_EL_SHIFT)
|
||||
#define ARM_TBFLAG_ZCR_LEN_SHIFT 4
|
||||
#define ARM_TBFLAG_ZCR_LEN_MASK (0xf << ARM_TBFLAG_ZCR_LEN_SHIFT)
|
||||
|
||||
/* some convenience accessor macros */
|
||||
#define ARM_TBFLAG_AARCH64_STATE(F) \
|
||||
@ -2646,6 +2718,10 @@ static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
|
||||
(((F) & ARM_TBFLAG_TBI0_MASK) >> ARM_TBFLAG_TBI0_SHIFT)
|
||||
#define ARM_TBFLAG_TBI1(F) \
|
||||
(((F) & ARM_TBFLAG_TBI1_MASK) >> ARM_TBFLAG_TBI1_SHIFT)
|
||||
#define ARM_TBFLAG_SVEEXC_EL(F) \
|
||||
(((F) & ARM_TBFLAG_SVEEXC_EL_MASK) >> ARM_TBFLAG_SVEEXC_EL_SHIFT)
|
||||
#define ARM_TBFLAG_ZCR_LEN(F) \
|
||||
(((F) & ARM_TBFLAG_ZCR_LEN_MASK) >> ARM_TBFLAG_ZCR_LEN_SHIFT)
|
||||
|
||||
static inline bool bswap_code(bool sctlr_b)
|
||||
{
|
||||
@ -2769,7 +2845,7 @@ static inline void *arm_get_el_change_hook_opaque(ARMCPU *cpu)
|
||||
*/
|
||||
static inline uint64_t *aa32_vfp_dreg(CPUARMState *env, unsigned regno)
|
||||
{
|
||||
return &env->vfp.regs[regno];
|
||||
return &env->vfp.zregs[regno >> 1].d[regno & 1];
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2778,7 +2854,7 @@ static inline uint64_t *aa32_vfp_dreg(CPUARMState *env, unsigned regno)
|
||||
*/
|
||||
static inline uint64_t *aa32_vfp_qreg(CPUARMState *env, unsigned regno)
|
||||
{
|
||||
return &env->vfp.regs[2 * regno];
|
||||
return &env->vfp.zregs[regno].d[0];
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2787,7 +2863,7 @@ static inline uint64_t *aa32_vfp_qreg(CPUARMState *env, unsigned regno)
|
||||
*/
|
||||
static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno)
|
||||
{
|
||||
return &env->vfp.regs[2 * regno];
|
||||
return &env->vfp.zregs[regno].d[0];
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -224,6 +224,10 @@ static void aarch64_any_initfn(Object *obj)
|
||||
set_feature(&cpu->env, ARM_FEATURE_V8_AES);
|
||||
set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
|
||||
set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
|
||||
set_feature(&cpu->env, ARM_FEATURE_V8_SHA512);
|
||||
set_feature(&cpu->env, ARM_FEATURE_V8_SHA3);
|
||||
set_feature(&cpu->env, ARM_FEATURE_V8_SM3);
|
||||
set_feature(&cpu->env, ARM_FEATURE_V8_SM4);
|
||||
set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
|
||||
set_feature(&cpu->env, ARM_FEATURE_CRC);
|
||||
cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* crypto_helper.c - emulate v8 Crypto Extensions instructions
|
||||
*
|
||||
* Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
|
||||
* Copyright (C) 2013 - 2018 Linaro Ltd <ard.biesheuvel@linaro.org>
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
@ -419,3 +419,278 @@ void HELPER(crypto_sha256su1)(void *vd, void *vn, void *vm)
|
||||
rd[0] = d.l[0];
|
||||
rd[1] = d.l[1];
|
||||
}
|
||||
|
||||
/*
|
||||
* The SHA-512 logical functions (same as above but using 64-bit operands)
|
||||
*/
|
||||
|
||||
static uint64_t cho512(uint64_t x, uint64_t y, uint64_t z)
|
||||
{
|
||||
return (x & (y ^ z)) ^ z;
|
||||
}
|
||||
|
||||
static uint64_t maj512(uint64_t x, uint64_t y, uint64_t z)
|
||||
{
|
||||
return (x & y) | ((x | y) & z);
|
||||
}
|
||||
|
||||
static uint64_t S0_512(uint64_t x)
|
||||
{
|
||||
return ror64(x, 28) ^ ror64(x, 34) ^ ror64(x, 39);
|
||||
}
|
||||
|
||||
static uint64_t S1_512(uint64_t x)
|
||||
{
|
||||
return ror64(x, 14) ^ ror64(x, 18) ^ ror64(x, 41);
|
||||
}
|
||||
|
||||
static uint64_t s0_512(uint64_t x)
|
||||
{
|
||||
return ror64(x, 1) ^ ror64(x, 8) ^ (x >> 7);
|
||||
}
|
||||
|
||||
static uint64_t s1_512(uint64_t x)
|
||||
{
|
||||
return ror64(x, 19) ^ ror64(x, 61) ^ (x >> 6);
|
||||
}
|
||||
|
||||
void HELPER(crypto_sha512h)(void *vd, void *vn, void *vm)
|
||||
{
|
||||
uint64_t *rd = vd;
|
||||
uint64_t *rn = vn;
|
||||
uint64_t *rm = vm;
|
||||
uint64_t d0 = rd[0];
|
||||
uint64_t d1 = rd[1];
|
||||
|
||||
d1 += S1_512(rm[1]) + cho512(rm[1], rn[0], rn[1]);
|
||||
d0 += S1_512(d1 + rm[0]) + cho512(d1 + rm[0], rm[1], rn[0]);
|
||||
|
||||
rd[0] = d0;
|
||||
rd[1] = d1;
|
||||
}
|
||||
|
||||
void HELPER(crypto_sha512h2)(void *vd, void *vn, void *vm)
|
||||
{
|
||||
uint64_t *rd = vd;
|
||||
uint64_t *rn = vn;
|
||||
uint64_t *rm = vm;
|
||||
uint64_t d0 = rd[0];
|
||||
uint64_t d1 = rd[1];
|
||||
|
||||
d1 += S0_512(rm[0]) + maj512(rn[0], rm[1], rm[0]);
|
||||
d0 += S0_512(d1) + maj512(d1, rm[0], rm[1]);
|
||||
|
||||
rd[0] = d0;
|
||||
rd[1] = d1;
|
||||
}
|
||||
|
||||
void HELPER(crypto_sha512su0)(void *vd, void *vn)
|
||||
{
|
||||
uint64_t *rd = vd;
|
||||
uint64_t *rn = vn;
|
||||
uint64_t d0 = rd[0];
|
||||
uint64_t d1 = rd[1];
|
||||
|
||||
d0 += s0_512(rd[1]);
|
||||
d1 += s0_512(rn[0]);
|
||||
|
||||
rd[0] = d0;
|
||||
rd[1] = d1;
|
||||
}
|
||||
|
||||
void HELPER(crypto_sha512su1)(void *vd, void *vn, void *vm)
|
||||
{
|
||||
uint64_t *rd = vd;
|
||||
uint64_t *rn = vn;
|
||||
uint64_t *rm = vm;
|
||||
|
||||
rd[0] += s1_512(rn[0]) + rm[0];
|
||||
rd[1] += s1_512(rn[1]) + rm[1];
|
||||
}
|
||||
|
||||
void HELPER(crypto_sm3partw1)(void *vd, void *vn, void *vm)
|
||||
{
|
||||
uint64_t *rd = vd;
|
||||
uint64_t *rn = vn;
|
||||
uint64_t *rm = vm;
|
||||
union CRYPTO_STATE d = { .l = { rd[0], rd[1] } };
|
||||
union CRYPTO_STATE n = { .l = { rn[0], rn[1] } };
|
||||
union CRYPTO_STATE m = { .l = { rm[0], rm[1] } };
|
||||
uint32_t t;
|
||||
|
||||
t = CR_ST_WORD(d, 0) ^ CR_ST_WORD(n, 0) ^ ror32(CR_ST_WORD(m, 1), 17);
|
||||
CR_ST_WORD(d, 0) = t ^ ror32(t, 17) ^ ror32(t, 9);
|
||||
|
||||
t = CR_ST_WORD(d, 1) ^ CR_ST_WORD(n, 1) ^ ror32(CR_ST_WORD(m, 2), 17);
|
||||
CR_ST_WORD(d, 1) = t ^ ror32(t, 17) ^ ror32(t, 9);
|
||||
|
||||
t = CR_ST_WORD(d, 2) ^ CR_ST_WORD(n, 2) ^ ror32(CR_ST_WORD(m, 3), 17);
|
||||
CR_ST_WORD(d, 2) = t ^ ror32(t, 17) ^ ror32(t, 9);
|
||||
|
||||
t = CR_ST_WORD(d, 3) ^ CR_ST_WORD(n, 3) ^ ror32(CR_ST_WORD(d, 0), 17);
|
||||
CR_ST_WORD(d, 3) = t ^ ror32(t, 17) ^ ror32(t, 9);
|
||||
|
||||
rd[0] = d.l[0];
|
||||
rd[1] = d.l[1];
|
||||
}
|
||||
|
||||
void HELPER(crypto_sm3partw2)(void *vd, void *vn, void *vm)
|
||||
{
|
||||
uint64_t *rd = vd;
|
||||
uint64_t *rn = vn;
|
||||
uint64_t *rm = vm;
|
||||
union CRYPTO_STATE d = { .l = { rd[0], rd[1] } };
|
||||
union CRYPTO_STATE n = { .l = { rn[0], rn[1] } };
|
||||
union CRYPTO_STATE m = { .l = { rm[0], rm[1] } };
|
||||
uint32_t t = CR_ST_WORD(n, 0) ^ ror32(CR_ST_WORD(m, 0), 25);
|
||||
|
||||
CR_ST_WORD(d, 0) ^= t;
|
||||
CR_ST_WORD(d, 1) ^= CR_ST_WORD(n, 1) ^ ror32(CR_ST_WORD(m, 1), 25);
|
||||
CR_ST_WORD(d, 2) ^= CR_ST_WORD(n, 2) ^ ror32(CR_ST_WORD(m, 2), 25);
|
||||
CR_ST_WORD(d, 3) ^= CR_ST_WORD(n, 3) ^ ror32(CR_ST_WORD(m, 3), 25) ^
|
||||
ror32(t, 17) ^ ror32(t, 2) ^ ror32(t, 26);
|
||||
|
||||
rd[0] = d.l[0];
|
||||
rd[1] = d.l[1];
|
||||
}
|
||||
|
||||
void HELPER(crypto_sm3tt)(void *vd, void *vn, void *vm, uint32_t imm2,
|
||||
uint32_t opcode)
|
||||
{
|
||||
uint64_t *rd = vd;
|
||||
uint64_t *rn = vn;
|
||||
uint64_t *rm = vm;
|
||||
union CRYPTO_STATE d = { .l = { rd[0], rd[1] } };
|
||||
union CRYPTO_STATE n = { .l = { rn[0], rn[1] } };
|
||||
union CRYPTO_STATE m = { .l = { rm[0], rm[1] } };
|
||||
uint32_t t;
|
||||
|
||||
assert(imm2 < 4);
|
||||
|
||||
if (opcode == 0 || opcode == 2) {
|
||||
/* SM3TT1A, SM3TT2A */
|
||||
t = par(CR_ST_WORD(d, 3), CR_ST_WORD(d, 2), CR_ST_WORD(d, 1));
|
||||
} else if (opcode == 1) {
|
||||
/* SM3TT1B */
|
||||
t = maj(CR_ST_WORD(d, 3), CR_ST_WORD(d, 2), CR_ST_WORD(d, 1));
|
||||
} else if (opcode == 3) {
|
||||
/* SM3TT2B */
|
||||
t = cho(CR_ST_WORD(d, 3), CR_ST_WORD(d, 2), CR_ST_WORD(d, 1));
|
||||
} else {
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
t += CR_ST_WORD(d, 0) + CR_ST_WORD(m, imm2);
|
||||
|
||||
CR_ST_WORD(d, 0) = CR_ST_WORD(d, 1);
|
||||
|
||||
if (opcode < 2) {
|
||||
/* SM3TT1A, SM3TT1B */
|
||||
t += CR_ST_WORD(n, 3) ^ ror32(CR_ST_WORD(d, 3), 20);
|
||||
|
||||
CR_ST_WORD(d, 1) = ror32(CR_ST_WORD(d, 2), 23);
|
||||
} else {
|
||||
/* SM3TT2A, SM3TT2B */
|
||||
t += CR_ST_WORD(n, 3);
|
||||
t ^= rol32(t, 9) ^ rol32(t, 17);
|
||||
|
||||
CR_ST_WORD(d, 1) = ror32(CR_ST_WORD(d, 2), 13);
|
||||
}
|
||||
|
||||
CR_ST_WORD(d, 2) = CR_ST_WORD(d, 3);
|
||||
CR_ST_WORD(d, 3) = t;
|
||||
|
||||
rd[0] = d.l[0];
|
||||
rd[1] = d.l[1];
|
||||
}
|
||||
|
||||
static uint8_t const sm4_sbox[] = {
|
||||
0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7,
|
||||
0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05,
|
||||
0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3,
|
||||
0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99,
|
||||
0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a,
|
||||
0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62,
|
||||
0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95,
|
||||
0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6,
|
||||
0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba,
|
||||
0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8,
|
||||
0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b,
|
||||
0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35,
|
||||
0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2,
|
||||
0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87,
|
||||
0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52,
|
||||
0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e,
|
||||
0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5,
|
||||
0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1,
|
||||
0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55,
|
||||
0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3,
|
||||
0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60,
|
||||
0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f,
|
||||
0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f,
|
||||
0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51,
|
||||
0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f,
|
||||
0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8,
|
||||
0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd,
|
||||
0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0,
|
||||
0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e,
|
||||
0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84,
|
||||
0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20,
|
||||
0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48,
|
||||
};
|
||||
|
||||
void HELPER(crypto_sm4e)(void *vd, void *vn)
|
||||
{
|
||||
uint64_t *rd = vd;
|
||||
uint64_t *rn = vn;
|
||||
union CRYPTO_STATE d = { .l = { rd[0], rd[1] } };
|
||||
union CRYPTO_STATE n = { .l = { rn[0], rn[1] } };
|
||||
uint32_t t, i;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
t = CR_ST_WORD(d, (i + 1) % 4) ^
|
||||
CR_ST_WORD(d, (i + 2) % 4) ^
|
||||
CR_ST_WORD(d, (i + 3) % 4) ^
|
||||
CR_ST_WORD(n, i);
|
||||
|
||||
t = sm4_sbox[t & 0xff] |
|
||||
sm4_sbox[(t >> 8) & 0xff] << 8 |
|
||||
sm4_sbox[(t >> 16) & 0xff] << 16 |
|
||||
sm4_sbox[(t >> 24) & 0xff] << 24;
|
||||
|
||||
CR_ST_WORD(d, i) ^= t ^ rol32(t, 2) ^ rol32(t, 10) ^ rol32(t, 18) ^
|
||||
rol32(t, 24);
|
||||
}
|
||||
|
||||
rd[0] = d.l[0];
|
||||
rd[1] = d.l[1];
|
||||
}
|
||||
|
||||
void HELPER(crypto_sm4ekey)(void *vd, void *vn, void* vm)
|
||||
{
|
||||
uint64_t *rd = vd;
|
||||
uint64_t *rn = vn;
|
||||
uint64_t *rm = vm;
|
||||
union CRYPTO_STATE d;
|
||||
union CRYPTO_STATE n = { .l = { rn[0], rn[1] } };
|
||||
union CRYPTO_STATE m = { .l = { rm[0], rm[1] } };
|
||||
uint32_t t, i;
|
||||
|
||||
d = n;
|
||||
for (i = 0; i < 4; i++) {
|
||||
t = CR_ST_WORD(d, (i + 1) % 4) ^
|
||||
CR_ST_WORD(d, (i + 2) % 4) ^
|
||||
CR_ST_WORD(d, (i + 3) % 4) ^
|
||||
CR_ST_WORD(m, i);
|
||||
|
||||
t = sm4_sbox[t & 0xff] |
|
||||
sm4_sbox[(t >> 8) & 0xff] << 8 |
|
||||
sm4_sbox[(t >> 16) & 0xff] << 16 |
|
||||
sm4_sbox[(t >> 24) & 0xff] << 24;
|
||||
|
||||
CR_ST_WORD(d, i) ^= t ^ rol32(t, 13) ^ rol32(t, 23);
|
||||
}
|
||||
|
||||
rd[0] = d.l[0];
|
||||
rd[1] = d.l[1];
|
||||
}
|
||||
|
@ -4266,6 +4266,125 @@ static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
|
||||
REGINFO_SENTINEL
|
||||
};
|
||||
|
||||
/* Return the exception level to which SVE-disabled exceptions should
|
||||
* be taken, or 0 if SVE is enabled.
|
||||
*/
|
||||
static int sve_exception_el(CPUARMState *env)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
unsigned current_el = arm_current_el(env);
|
||||
|
||||
/* The CPACR.ZEN controls traps to EL1:
|
||||
* 0, 2 : trap EL0 and EL1 accesses
|
||||
* 1 : trap only EL0 accesses
|
||||
* 3 : trap no accesses
|
||||
*/
|
||||
switch (extract32(env->cp15.cpacr_el1, 16, 2)) {
|
||||
default:
|
||||
if (current_el <= 1) {
|
||||
/* Trap to PL1, which might be EL1 or EL3 */
|
||||
if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
|
||||
return 3;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
if (current_el == 0) {
|
||||
return 1;
|
||||
}
|
||||
break;
|
||||
case 3:
|
||||
break;
|
||||
}
|
||||
|
||||
/* Similarly for CPACR.FPEN, after having checked ZEN. */
|
||||
switch (extract32(env->cp15.cpacr_el1, 20, 2)) {
|
||||
default:
|
||||
if (current_el <= 1) {
|
||||
if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
|
||||
return 3;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
if (current_el == 0) {
|
||||
return 1;
|
||||
}
|
||||
break;
|
||||
case 3:
|
||||
break;
|
||||
}
|
||||
|
||||
/* CPTR_EL2. Check both TZ and TFP. */
|
||||
if (current_el <= 2
|
||||
&& (env->cp15.cptr_el[2] & (CPTR_TFP | CPTR_TZ))
|
||||
&& !arm_is_secure_below_el3(env)) {
|
||||
return 2;
|
||||
}
|
||||
|
||||
/* CPTR_EL3. Check both EZ and TFP. */
|
||||
if (!(env->cp15.cptr_el[3] & CPTR_EZ)
|
||||
|| (env->cp15.cptr_el[3] & CPTR_TFP)) {
|
||||
return 3;
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static CPAccessResult zcr_access(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
bool isread)
|
||||
{
|
||||
switch (sve_exception_el(env)) {
|
||||
case 3:
|
||||
return CP_ACCESS_TRAP_EL3;
|
||||
case 2:
|
||||
return CP_ACCESS_TRAP_EL2;
|
||||
case 1:
|
||||
return CP_ACCESS_TRAP;
|
||||
}
|
||||
return CP_ACCESS_OK;
|
||||
}
|
||||
|
||||
static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
/* Bits other than [3:0] are RAZ/WI. */
|
||||
raw_write(env, ri, value & 0xf);
|
||||
}
|
||||
|
||||
static const ARMCPRegInfo zcr_el1_reginfo = {
|
||||
.name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
|
||||
.access = PL1_RW, .accessfn = zcr_access, .type = ARM_CP_64BIT,
|
||||
.fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
|
||||
.writefn = zcr_write, .raw_writefn = raw_write
|
||||
};
|
||||
|
||||
static const ARMCPRegInfo zcr_el2_reginfo = {
|
||||
.name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
|
||||
.access = PL2_RW, .accessfn = zcr_access, .type = ARM_CP_64BIT,
|
||||
.fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
|
||||
.writefn = zcr_write, .raw_writefn = raw_write
|
||||
};
|
||||
|
||||
static const ARMCPRegInfo zcr_no_el2_reginfo = {
|
||||
.name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
|
||||
.access = PL2_RW, .type = ARM_CP_64BIT,
|
||||
.readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore
|
||||
};
|
||||
|
||||
static const ARMCPRegInfo zcr_el3_reginfo = {
|
||||
.name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
|
||||
.access = PL3_RW, .accessfn = zcr_access, .type = ARM_CP_64BIT,
|
||||
.fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
|
||||
.writefn = zcr_write, .raw_writefn = raw_write
|
||||
};
|
||||
|
||||
void hw_watchpoint_update(ARMCPU *cpu, int n)
|
||||
{
|
||||
CPUARMState *env = &cpu->env;
|
||||
@ -5332,6 +5451,18 @@ void register_cp_regs_for_features(ARMCPU *cpu)
|
||||
}
|
||||
define_one_arm_cp_reg(cpu, &sctlr);
|
||||
}
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_SVE)) {
|
||||
define_one_arm_cp_reg(cpu, &zcr_el1_reginfo);
|
||||
if (arm_feature(env, ARM_FEATURE_EL2)) {
|
||||
define_one_arm_cp_reg(cpu, &zcr_el2_reginfo);
|
||||
} else {
|
||||
define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo);
|
||||
}
|
||||
if (arm_feature(env, ARM_FEATURE_EL3)) {
|
||||
define_one_arm_cp_reg(cpu, &zcr_el3_reginfo);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
|
||||
@ -6161,12 +6292,127 @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
|
||||
return target_el;
|
||||
}
|
||||
|
||||
static void v7m_push(CPUARMState *env, uint32_t val)
|
||||
static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
|
||||
ARMMMUIdx mmu_idx, bool ignfault)
|
||||
{
|
||||
CPUState *cs = CPU(arm_env_get_cpu(env));
|
||||
CPUState *cs = CPU(cpu);
|
||||
CPUARMState *env = &cpu->env;
|
||||
MemTxAttrs attrs = {};
|
||||
MemTxResult txres;
|
||||
target_ulong page_size;
|
||||
hwaddr physaddr;
|
||||
int prot;
|
||||
ARMMMUFaultInfo fi;
|
||||
bool secure = mmu_idx & ARM_MMU_IDX_M_S;
|
||||
int exc;
|
||||
bool exc_secure;
|
||||
|
||||
env->regs[13] -= 4;
|
||||
stl_phys(cs->as, env->regs[13], val);
|
||||
if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr,
|
||||
&attrs, &prot, &page_size, &fi, NULL)) {
|
||||
/* MPU/SAU lookup failed */
|
||||
if (fi.type == ARMFault_QEMU_SFault) {
|
||||
qemu_log_mask(CPU_LOG_INT,
|
||||
"...SecureFault with SFSR.AUVIOL during stacking\n");
|
||||
env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
|
||||
env->v7m.sfar = addr;
|
||||
exc = ARMV7M_EXCP_SECURE;
|
||||
exc_secure = false;
|
||||
} else {
|
||||
qemu_log_mask(CPU_LOG_INT, "...MemManageFault with CFSR.MSTKERR\n");
|
||||
env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
|
||||
exc = ARMV7M_EXCP_MEM;
|
||||
exc_secure = secure;
|
||||
}
|
||||
goto pend_fault;
|
||||
}
|
||||
address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value,
|
||||
attrs, &txres);
|
||||
if (txres != MEMTX_OK) {
|
||||
/* BusFault trying to write the data */
|
||||
qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
|
||||
env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
|
||||
exc = ARMV7M_EXCP_BUS;
|
||||
exc_secure = false;
|
||||
goto pend_fault;
|
||||
}
|
||||
return true;
|
||||
|
||||
pend_fault:
|
||||
/* By pending the exception at this point we are making
|
||||
* the IMPDEF choice "overridden exceptions pended" (see the
|
||||
* MergeExcInfo() pseudocode). The other choice would be to not
|
||||
* pend them now and then make a choice about which to throw away
|
||||
* later if we have two derived exceptions.
|
||||
* The only case when we must not pend the exception but instead
|
||||
* throw it away is if we are doing the push of the callee registers
|
||||
* and we've already generated a derived exception. Even in this
|
||||
* case we will still update the fault status registers.
|
||||
*/
|
||||
if (!ignfault) {
|
||||
armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
|
||||
ARMMMUIdx mmu_idx)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
CPUARMState *env = &cpu->env;
|
||||
MemTxAttrs attrs = {};
|
||||
MemTxResult txres;
|
||||
target_ulong page_size;
|
||||
hwaddr physaddr;
|
||||
int prot;
|
||||
ARMMMUFaultInfo fi;
|
||||
bool secure = mmu_idx & ARM_MMU_IDX_M_S;
|
||||
int exc;
|
||||
bool exc_secure;
|
||||
uint32_t value;
|
||||
|
||||
if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
|
||||
&attrs, &prot, &page_size, &fi, NULL)) {
|
||||
/* MPU/SAU lookup failed */
|
||||
if (fi.type == ARMFault_QEMU_SFault) {
|
||||
qemu_log_mask(CPU_LOG_INT,
|
||||
"...SecureFault with SFSR.AUVIOL during unstack\n");
|
||||
env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
|
||||
env->v7m.sfar = addr;
|
||||
exc = ARMV7M_EXCP_SECURE;
|
||||
exc_secure = false;
|
||||
} else {
|
||||
qemu_log_mask(CPU_LOG_INT,
|
||||
"...MemManageFault with CFSR.MUNSTKERR\n");
|
||||
env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
|
||||
exc = ARMV7M_EXCP_MEM;
|
||||
exc_secure = secure;
|
||||
}
|
||||
goto pend_fault;
|
||||
}
|
||||
|
||||
value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
|
||||
attrs, &txres);
|
||||
if (txres != MEMTX_OK) {
|
||||
/* BusFault trying to read the data */
|
||||
qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
|
||||
env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
|
||||
exc = ARMV7M_EXCP_BUS;
|
||||
exc_secure = false;
|
||||
goto pend_fault;
|
||||
}
|
||||
|
||||
*dest = value;
|
||||
return true;
|
||||
|
||||
pend_fault:
|
||||
/* By pending the exception at this point we are making
|
||||
* the IMPDEF choice "overridden exceptions pended" (see the
|
||||
* MergeExcInfo() pseudocode). The other choice would be to not
|
||||
* pend them now and then make a choice about which to throw away
|
||||
* later if we have two derived exceptions.
|
||||
*/
|
||||
armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Return true if we're using the process stack pointer (not the MSP) */
|
||||
@ -6395,65 +6641,126 @@ static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t arm_v7m_load_vector(ARMCPU *cpu, bool targets_secure)
|
||||
static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
|
||||
uint32_t *pvec)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
CPUARMState *env = &cpu->env;
|
||||
MemTxResult result;
|
||||
hwaddr vec = env->v7m.vecbase[targets_secure] + env->v7m.exception * 4;
|
||||
uint32_t addr;
|
||||
uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
|
||||
uint32_t vector_entry;
|
||||
MemTxAttrs attrs = {};
|
||||
ARMMMUIdx mmu_idx;
|
||||
bool exc_secure;
|
||||
|
||||
addr = address_space_ldl(cs->as, vec,
|
||||
MEMTXATTRS_UNSPECIFIED, &result);
|
||||
if (result != MEMTX_OK) {
|
||||
/* Architecturally this should cause a HardFault setting HSFR.VECTTBL,
|
||||
* which would then be immediately followed by our failing to load
|
||||
* the entry vector for that HardFault, which is a Lockup case.
|
||||
* Since we don't model Lockup, we just report this guest error
|
||||
* via cpu_abort().
|
||||
*/
|
||||
cpu_abort(cs, "Failed to read from %s exception vector table "
|
||||
"entry %08x\n", targets_secure ? "secure" : "nonsecure",
|
||||
(unsigned)vec);
|
||||
mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
|
||||
|
||||
/* We don't do a get_phys_addr() here because the rules for vector
|
||||
* loads are special: they always use the default memory map, and
|
||||
* the default memory map permits reads from all addresses.
|
||||
* Since there's no easy way to pass through to pmsav8_mpu_lookup()
|
||||
* that we want this special case which would always say "yes",
|
||||
* we just do the SAU lookup here followed by a direct physical load.
|
||||
*/
|
||||
attrs.secure = targets_secure;
|
||||
attrs.user = false;
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
|
||||
V8M_SAttributes sattrs = {};
|
||||
|
||||
v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
|
||||
if (sattrs.ns) {
|
||||
attrs.secure = false;
|
||||
} else if (!targets_secure) {
|
||||
/* NS access to S memory */
|
||||
goto load_fail;
|
||||
}
|
||||
}
|
||||
return addr;
|
||||
|
||||
vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
|
||||
attrs, &result);
|
||||
if (result != MEMTX_OK) {
|
||||
goto load_fail;
|
||||
}
|
||||
*pvec = vector_entry;
|
||||
return true;
|
||||
|
||||
load_fail:
|
||||
/* All vector table fetch fails are reported as HardFault, with
|
||||
* HFSR.VECTTBL and .FORCED set. (FORCED is set because
|
||||
* technically the underlying exception is a MemManage or BusFault
|
||||
* that is escalated to HardFault.) This is a terminal exception,
|
||||
* so we will either take the HardFault immediately or else enter
|
||||
* lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
|
||||
*/
|
||||
exc_secure = targets_secure ||
|
||||
!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
|
||||
env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK;
|
||||
armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
|
||||
return false;
|
||||
}
|
||||
|
||||
static void v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain)
|
||||
static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
|
||||
bool ignore_faults)
|
||||
{
|
||||
/* For v8M, push the callee-saves register part of the stack frame.
|
||||
* Compare the v8M pseudocode PushCalleeStack().
|
||||
* In the tailchaining case this may not be the current stack.
|
||||
*/
|
||||
CPUARMState *env = &cpu->env;
|
||||
CPUState *cs = CPU(cpu);
|
||||
uint32_t *frame_sp_p;
|
||||
uint32_t frameptr;
|
||||
ARMMMUIdx mmu_idx;
|
||||
bool stacked_ok;
|
||||
|
||||
if (dotailchain) {
|
||||
frame_sp_p = get_v7m_sp_ptr(env, true,
|
||||
lr & R_V7M_EXCRET_MODE_MASK,
|
||||
bool mode = lr & R_V7M_EXCRET_MODE_MASK;
|
||||
bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
|
||||
!mode;
|
||||
|
||||
mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
|
||||
frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
|
||||
lr & R_V7M_EXCRET_SPSEL_MASK);
|
||||
} else {
|
||||
mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
|
||||
frame_sp_p = &env->regs[13];
|
||||
}
|
||||
|
||||
frameptr = *frame_sp_p - 0x28;
|
||||
|
||||
stl_phys(cs->as, frameptr, 0xfefa125b);
|
||||
stl_phys(cs->as, frameptr + 0x8, env->regs[4]);
|
||||
stl_phys(cs->as, frameptr + 0xc, env->regs[5]);
|
||||
stl_phys(cs->as, frameptr + 0x10, env->regs[6]);
|
||||
stl_phys(cs->as, frameptr + 0x14, env->regs[7]);
|
||||
stl_phys(cs->as, frameptr + 0x18, env->regs[8]);
|
||||
stl_phys(cs->as, frameptr + 0x1c, env->regs[9]);
|
||||
stl_phys(cs->as, frameptr + 0x20, env->regs[10]);
|
||||
stl_phys(cs->as, frameptr + 0x24, env->regs[11]);
|
||||
/* Write as much of the stack frame as we can. A write failure may
|
||||
* cause us to pend a derived exception.
|
||||
*/
|
||||
stacked_ok =
|
||||
v7m_stack_write(cpu, frameptr, 0xfefa125b, mmu_idx, ignore_faults) &&
|
||||
v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx,
|
||||
ignore_faults) &&
|
||||
v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx,
|
||||
ignore_faults) &&
|
||||
v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx,
|
||||
ignore_faults) &&
|
||||
v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx,
|
||||
ignore_faults) &&
|
||||
v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx,
|
||||
ignore_faults) &&
|
||||
v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx,
|
||||
ignore_faults) &&
|
||||
v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx,
|
||||
ignore_faults) &&
|
||||
v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx,
|
||||
ignore_faults);
|
||||
|
||||
/* Update SP regardless of whether any of the stack accesses failed.
|
||||
* When we implement v8M stack limit checking then this attempt to
|
||||
* update SP might also fail and result in a derived exception.
|
||||
*/
|
||||
*frame_sp_p = frameptr;
|
||||
|
||||
return !stacked_ok;
|
||||
}
|
||||
|
||||
static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain)
|
||||
static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
|
||||
bool ignore_stackfaults)
|
||||
{
|
||||
/* Do the "take the exception" parts of exception entry,
|
||||
* but not the pushing of state to the stack. This is
|
||||
@ -6462,8 +6769,10 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain)
|
||||
CPUARMState *env = &cpu->env;
|
||||
uint32_t addr;
|
||||
bool targets_secure;
|
||||
int exc;
|
||||
bool push_failed = false;
|
||||
|
||||
targets_secure = armv7m_nvic_acknowledge_irq(env->nvic);
|
||||
armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_V8)) {
|
||||
if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
|
||||
@ -6489,7 +6798,8 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain)
|
||||
*/
|
||||
if (lr & R_V7M_EXCRET_DCRS_MASK &&
|
||||
!(dotailchain && (lr & R_V7M_EXCRET_ES_MASK))) {
|
||||
v7m_push_callee_stack(cpu, lr, dotailchain);
|
||||
push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
|
||||
ignore_stackfaults);
|
||||
}
|
||||
lr |= R_V7M_EXCRET_DCRS_MASK;
|
||||
}
|
||||
@ -6531,6 +6841,27 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain)
|
||||
}
|
||||
}
|
||||
|
||||
if (push_failed && !ignore_stackfaults) {
|
||||
/* Derived exception on callee-saves register stacking:
|
||||
* we might now want to take a different exception which
|
||||
* targets a different security state, so try again from the top.
|
||||
*/
|
||||
v7m_exception_taken(cpu, lr, true, true);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
|
||||
/* Vector load failed: derived exception */
|
||||
v7m_exception_taken(cpu, lr, true, true);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Now we've done everything that might cause a derived exception
|
||||
* we can go ahead and activate whichever exception we're going to
|
||||
* take (which might now be the derived exception).
|
||||
*/
|
||||
armv7m_nvic_acknowledge_irq(env->nvic);
|
||||
|
||||
/* Switch to target security state -- must do this before writing SPSEL */
|
||||
switch_v7m_security_state(env, targets_secure);
|
||||
write_v7m_control_spsel(env, 0);
|
||||
@ -6538,34 +6869,55 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain)
|
||||
/* Clear IT bits */
|
||||
env->condexec_bits = 0;
|
||||
env->regs[14] = lr;
|
||||
addr = arm_v7m_load_vector(cpu, targets_secure);
|
||||
env->regs[15] = addr & 0xfffffffe;
|
||||
env->thumb = addr & 1;
|
||||
}
|
||||
|
||||
static void v7m_push_stack(ARMCPU *cpu)
|
||||
static bool v7m_push_stack(ARMCPU *cpu)
|
||||
{
|
||||
/* Do the "set up stack frame" part of exception entry,
|
||||
* similar to pseudocode PushStack().
|
||||
* Return true if we generate a derived exception (and so
|
||||
* should ignore further stack faults trying to process
|
||||
* that derived exception.)
|
||||
*/
|
||||
bool stacked_ok;
|
||||
CPUARMState *env = &cpu->env;
|
||||
uint32_t xpsr = xpsr_read(env);
|
||||
uint32_t frameptr = env->regs[13];
|
||||
ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
|
||||
|
||||
/* Align stack pointer if the guest wants that */
|
||||
if ((env->regs[13] & 4) &&
|
||||
if ((frameptr & 4) &&
|
||||
(env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
|
||||
env->regs[13] -= 4;
|
||||
frameptr -= 4;
|
||||
xpsr |= XPSR_SPREALIGN;
|
||||
}
|
||||
/* Switch to the handler mode. */
|
||||
v7m_push(env, xpsr);
|
||||
v7m_push(env, env->regs[15]);
|
||||
v7m_push(env, env->regs[14]);
|
||||
v7m_push(env, env->regs[12]);
|
||||
v7m_push(env, env->regs[3]);
|
||||
v7m_push(env, env->regs[2]);
|
||||
v7m_push(env, env->regs[1]);
|
||||
v7m_push(env, env->regs[0]);
|
||||
|
||||
frameptr -= 0x20;
|
||||
|
||||
/* Write as much of the stack frame as we can. If we fail a stack
|
||||
* write this will result in a derived exception being pended
|
||||
* (which may be taken in preference to the one we started with
|
||||
* if it has higher priority).
|
||||
*/
|
||||
stacked_ok =
|
||||
v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, false) &&
|
||||
v7m_stack_write(cpu, frameptr + 4, env->regs[1], mmu_idx, false) &&
|
||||
v7m_stack_write(cpu, frameptr + 8, env->regs[2], mmu_idx, false) &&
|
||||
v7m_stack_write(cpu, frameptr + 12, env->regs[3], mmu_idx, false) &&
|
||||
v7m_stack_write(cpu, frameptr + 16, env->regs[12], mmu_idx, false) &&
|
||||
v7m_stack_write(cpu, frameptr + 20, env->regs[14], mmu_idx, false) &&
|
||||
v7m_stack_write(cpu, frameptr + 24, env->regs[15], mmu_idx, false) &&
|
||||
v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, false);
|
||||
|
||||
/* Update SP regardless of whether any of the stack accesses failed.
|
||||
* When we implement v8M stack limit checking then this attempt to
|
||||
* update SP might also fail and result in a derived exception.
|
||||
*/
|
||||
env->regs[13] = frameptr;
|
||||
|
||||
return !stacked_ok;
|
||||
}
|
||||
|
||||
static void do_v7m_exception_exit(ARMCPU *cpu)
|
||||
@ -6711,7 +7063,7 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
|
||||
if (sfault) {
|
||||
env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
|
||||
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
|
||||
v7m_exception_taken(cpu, excret, true);
|
||||
v7m_exception_taken(cpu, excret, true, false);
|
||||
qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
|
||||
"stackframe: failed EXC_RETURN.ES validity check\n");
|
||||
return;
|
||||
@ -6723,7 +7075,7 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
|
||||
*/
|
||||
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
|
||||
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
|
||||
v7m_exception_taken(cpu, excret, true);
|
||||
v7m_exception_taken(cpu, excret, true, false);
|
||||
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
|
||||
"stackframe: failed exception return integrity check\n");
|
||||
return;
|
||||
@ -6752,6 +7104,11 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
|
||||
!return_to_handler,
|
||||
return_to_sp_process);
|
||||
uint32_t frameptr = *frame_sp_p;
|
||||
bool pop_ok = true;
|
||||
ARMMMUIdx mmu_idx;
|
||||
|
||||
mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
|
||||
!return_to_handler);
|
||||
|
||||
if (!QEMU_IS_ALIGNED(frameptr, 8) &&
|
||||
arm_feature(env, ARM_FEATURE_V8)) {
|
||||
@ -6771,36 +7128,45 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
|
||||
/* Take a SecureFault on the current stack */
|
||||
env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
|
||||
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
|
||||
v7m_exception_taken(cpu, excret, true);
|
||||
v7m_exception_taken(cpu, excret, true, false);
|
||||
qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
|
||||
"stackframe: failed exception return integrity "
|
||||
"signature check\n");
|
||||
return;
|
||||
}
|
||||
|
||||
env->regs[4] = ldl_phys(cs->as, frameptr + 0x8);
|
||||
env->regs[5] = ldl_phys(cs->as, frameptr + 0xc);
|
||||
env->regs[6] = ldl_phys(cs->as, frameptr + 0x10);
|
||||
env->regs[7] = ldl_phys(cs->as, frameptr + 0x14);
|
||||
env->regs[8] = ldl_phys(cs->as, frameptr + 0x18);
|
||||
env->regs[9] = ldl_phys(cs->as, frameptr + 0x1c);
|
||||
env->regs[10] = ldl_phys(cs->as, frameptr + 0x20);
|
||||
env->regs[11] = ldl_phys(cs->as, frameptr + 0x24);
|
||||
pop_ok =
|
||||
v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
|
||||
v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
|
||||
v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
|
||||
v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
|
||||
v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
|
||||
v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
|
||||
v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
|
||||
v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
|
||||
v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
|
||||
|
||||
frameptr += 0x28;
|
||||
}
|
||||
|
||||
/* Pop registers. TODO: make these accesses use the correct
|
||||
* attributes and address space (S/NS, priv/unpriv) and handle
|
||||
* memory transaction failures.
|
||||
*/
|
||||
env->regs[0] = ldl_phys(cs->as, frameptr);
|
||||
env->regs[1] = ldl_phys(cs->as, frameptr + 0x4);
|
||||
env->regs[2] = ldl_phys(cs->as, frameptr + 0x8);
|
||||
env->regs[3] = ldl_phys(cs->as, frameptr + 0xc);
|
||||
env->regs[12] = ldl_phys(cs->as, frameptr + 0x10);
|
||||
env->regs[14] = ldl_phys(cs->as, frameptr + 0x14);
|
||||
env->regs[15] = ldl_phys(cs->as, frameptr + 0x18);
|
||||
/* Pop registers */
|
||||
pop_ok = pop_ok &&
|
||||
v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
|
||||
v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
|
||||
v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
|
||||
v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
|
||||
v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
|
||||
v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
|
||||
v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
|
||||
v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
|
||||
|
||||
if (!pop_ok) {
|
||||
/* v7m_stack_read() pended a fault, so take it (as a tail
|
||||
* chained exception on the same stack frame)
|
||||
*/
|
||||
v7m_exception_taken(cpu, excret, true, false);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Returning from an exception with a PC with bit 0 set is defined
|
||||
* behaviour on v8M (bit 0 is ignored), but for v7M it was specified
|
||||
@ -6819,8 +7185,6 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
|
||||
}
|
||||
}
|
||||
|
||||
xpsr = ldl_phys(cs->as, frameptr + 0x1c);
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_V8)) {
|
||||
/* For v8M we have to check whether the xPSR exception field
|
||||
* matches the EXCRET value for return to handler/thread
|
||||
@ -6836,7 +7200,7 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
|
||||
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
|
||||
env->v7m.secure);
|
||||
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
|
||||
v7m_exception_taken(cpu, excret, true);
|
||||
v7m_exception_taken(cpu, excret, true, false);
|
||||
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
|
||||
"stackframe: failed exception return integrity "
|
||||
"check\n");
|
||||
@ -6869,11 +7233,13 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
|
||||
/* Take an INVPC UsageFault by pushing the stack again;
|
||||
* we know we're v7M so this is never a Secure UsageFault.
|
||||
*/
|
||||
bool ignore_stackfaults;
|
||||
|
||||
assert(!arm_feature(env, ARM_FEATURE_V8));
|
||||
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
|
||||
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
|
||||
v7m_push_stack(cpu);
|
||||
v7m_exception_taken(cpu, excret, false);
|
||||
ignore_stackfaults = v7m_push_stack(cpu);
|
||||
v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
|
||||
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
|
||||
"failed exception return integrity check\n");
|
||||
return;
|
||||
@ -7114,6 +7480,7 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
CPUARMState *env = &cpu->env;
|
||||
uint32_t lr;
|
||||
bool ignore_stackfaults;
|
||||
|
||||
arm_log_exception(cs->exception_index);
|
||||
|
||||
@ -7288,8 +7655,8 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
|
||||
lr |= R_V7M_EXCRET_MODE_MASK;
|
||||
}
|
||||
|
||||
v7m_push_stack(cpu);
|
||||
v7m_exception_taken(cpu, lr, false);
|
||||
ignore_stackfaults = v7m_push_stack(cpu);
|
||||
v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
|
||||
qemu_log_mask(CPU_LOG_INT, "... as %d\n", env->v7m.exception);
|
||||
}
|
||||
|
||||
@ -11692,14 +12059,37 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
|
||||
target_ulong *cs_base, uint32_t *pflags)
|
||||
{
|
||||
ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
|
||||
int fp_el = fp_exception_el(env);
|
||||
uint32_t flags;
|
||||
|
||||
if (is_a64(env)) {
|
||||
int sve_el = sve_exception_el(env);
|
||||
uint32_t zcr_len;
|
||||
|
||||
*pc = env->pc;
|
||||
flags = ARM_TBFLAG_AARCH64_STATE_MASK;
|
||||
/* Get control bits for tagged addresses */
|
||||
flags |= (arm_regime_tbi0(env, mmu_idx) << ARM_TBFLAG_TBI0_SHIFT);
|
||||
flags |= (arm_regime_tbi1(env, mmu_idx) << ARM_TBFLAG_TBI1_SHIFT);
|
||||
flags |= sve_el << ARM_TBFLAG_SVEEXC_EL_SHIFT;
|
||||
|
||||
/* If SVE is disabled, but FP is enabled,
|
||||
then the effective len is 0. */
|
||||
if (sve_el != 0 && fp_el == 0) {
|
||||
zcr_len = 0;
|
||||
} else {
|
||||
int current_el = arm_current_el(env);
|
||||
|
||||
zcr_len = env->vfp.zcr_el[current_el <= 1 ? 1 : current_el];
|
||||
zcr_len &= 0xf;
|
||||
if (current_el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
|
||||
zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]);
|
||||
}
|
||||
if (current_el < 3 && arm_feature(env, ARM_FEATURE_EL3)) {
|
||||
zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
|
||||
}
|
||||
}
|
||||
flags |= zcr_len << ARM_TBFLAG_ZCR_LEN_SHIFT;
|
||||
} else {
|
||||
*pc = env->regs[15];
|
||||
flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT)
|
||||
@ -11742,7 +12132,7 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
|
||||
if (arm_cpu_data_is_big_endian(env)) {
|
||||
flags |= ARM_TBFLAG_BE_DATA_MASK;
|
||||
}
|
||||
flags |= fp_exception_el(env) << ARM_TBFLAG_FPEXC_EL_SHIFT;
|
||||
flags |= fp_el << ARM_TBFLAG_FPEXC_EL_SHIFT;
|
||||
|
||||
if (arm_v7m_is_handler_mode(env)) {
|
||||
flags |= ARM_TBFLAG_HANDLER_MASK;
|
||||
|
@ -534,6 +534,18 @@ DEF_HELPER_FLAGS_3(crypto_sha256h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr)
|
||||
DEF_HELPER_FLAGS_2(crypto_sha256su0, TCG_CALL_NO_RWG, void, ptr, ptr)
|
||||
DEF_HELPER_FLAGS_3(crypto_sha256su1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr)
|
||||
|
||||
DEF_HELPER_FLAGS_3(crypto_sha512h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr)
|
||||
DEF_HELPER_FLAGS_3(crypto_sha512h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr)
|
||||
DEF_HELPER_FLAGS_2(crypto_sha512su0, TCG_CALL_NO_RWG, void, ptr, ptr)
|
||||
DEF_HELPER_FLAGS_3(crypto_sha512su1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr)
|
||||
|
||||
DEF_HELPER_FLAGS_5(crypto_sm3tt, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32, i32)
|
||||
DEF_HELPER_FLAGS_3(crypto_sm3partw1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr)
|
||||
DEF_HELPER_FLAGS_3(crypto_sm3partw2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr)
|
||||
|
||||
DEF_HELPER_FLAGS_2(crypto_sm4e, TCG_CALL_NO_RWG, void, ptr, ptr)
|
||||
DEF_HELPER_FLAGS_3(crypto_sm4ekey, TCG_CALL_NO_RWG, void, ptr, ptr, ptr)
|
||||
|
||||
DEF_HELPER_FLAGS_3(crc32, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
|
||||
DEF_HELPER_FLAGS_3(crc32c, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
|
||||
DEF_HELPER_2(dc_zva, void, env, i64)
|
||||
|
@ -234,6 +234,10 @@ static inline const char *gicv3_class_name(void)
|
||||
exit(1);
|
||||
#endif
|
||||
} else {
|
||||
if (kvm_enabled()) {
|
||||
error_report("Userspace GICv3 is not supported with KVM");
|
||||
exit(1);
|
||||
}
|
||||
return "arm-gicv3";
|
||||
}
|
||||
}
|
||||
|
@ -50,7 +50,40 @@ static const VMStateDescription vmstate_vfp = {
|
||||
.minimum_version_id = 3,
|
||||
.needed = vfp_needed,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT64_ARRAY(env.vfp.regs, ARMCPU, 64),
|
||||
/* For compatibility, store Qn out of Zn here. */
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[0].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[1].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[2].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[3].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[4].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[5].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[6].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[7].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[8].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[9].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[10].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[11].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[12].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[13].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[14].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[15].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[16].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[17].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[18].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[19].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[20].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[21].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[22].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[23].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[24].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[25].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[26].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[27].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[28].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[29].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[30].d, ARMCPU, 0, 2),
|
||||
VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[31].d, ARMCPU, 0, 2),
|
||||
|
||||
/* The xregs array is a little awkward because element 1 (FPSCR)
|
||||
* requires a specific accessor, so we have to split it up in
|
||||
* the vmstate:
|
||||
@ -89,6 +122,56 @@ static const VMStateDescription vmstate_iwmmxt = {
|
||||
}
|
||||
};
|
||||
|
||||
#ifdef TARGET_AARCH64
|
||||
/* The expression ARM_MAX_VQ - 2 is 0 for pure AArch32 build,
|
||||
* and ARMPredicateReg is actively empty. This triggers errors
|
||||
* in the expansion of the VMSTATE macros.
|
||||
*/
|
||||
|
||||
static bool sve_needed(void *opaque)
|
||||
{
|
||||
ARMCPU *cpu = opaque;
|
||||
CPUARMState *env = &cpu->env;
|
||||
|
||||
return arm_feature(env, ARM_FEATURE_SVE);
|
||||
}
|
||||
|
||||
/* The first two words of each Zreg is stored in VFP state. */
|
||||
static const VMStateDescription vmstate_zreg_hi_reg = {
|
||||
.name = "cpu/sve/zreg_hi",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT64_SUB_ARRAY(d, ARMVectorReg, 2, ARM_MAX_VQ - 2),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
static const VMStateDescription vmstate_preg_reg = {
|
||||
.name = "cpu/sve/preg",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT64_ARRAY(p, ARMPredicateReg, 2 * ARM_MAX_VQ / 8),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
static const VMStateDescription vmstate_sve = {
|
||||
.name = "cpu/sve",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.needed = sve_needed,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_STRUCT_ARRAY(env.vfp.zregs, ARMCPU, 32, 0,
|
||||
vmstate_zreg_hi_reg, ARMVectorReg),
|
||||
VMSTATE_STRUCT_ARRAY(env.vfp.pregs, ARMCPU, 17, 0,
|
||||
vmstate_preg_reg, ARMPredicateReg),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
#endif /* AARCH64 */
|
||||
|
||||
static bool m_needed(void *opaque)
|
||||
{
|
||||
ARMCPU *cpu = opaque;
|
||||
@ -553,6 +636,9 @@ const VMStateDescription vmstate_arm_cpu = {
|
||||
&vmstate_pmsav7,
|
||||
&vmstate_pmsav8,
|
||||
&vmstate_m_security,
|
||||
#ifdef TARGET_AARCH64
|
||||
&vmstate_sve,
|
||||
#endif
|
||||
NULL
|
||||
}
|
||||
};
|
||||
|
@ -525,8 +525,8 @@ static inline int vec_reg_offset(DisasContext *s, int regno,
|
||||
{
|
||||
int offs = 0;
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
/* This is complicated slightly because vfp.regs[2n] is
|
||||
* still the low half and vfp.regs[2n+1] the high half
|
||||
/* This is complicated slightly because vfp.zregs[n].d[0] is
|
||||
* still the low half and vfp.zregs[n].d[1] the high half
|
||||
* of the 128 bit vector, even on big endian systems.
|
||||
* Calculate the offset assuming a fully bigendian 128 bits,
|
||||
* then XOR to account for the order of the two 64 bit halves.
|
||||
@ -536,7 +536,7 @@ static inline int vec_reg_offset(DisasContext *s, int regno,
|
||||
#else
|
||||
offs += element * (1 << size);
|
||||
#endif
|
||||
offs += offsetof(CPUARMState, vfp.regs[regno * 2]);
|
||||
offs += offsetof(CPUARMState, vfp.zregs[regno]);
|
||||
assert_fp_access_checked(s);
|
||||
return offs;
|
||||
}
|
||||
@ -545,7 +545,7 @@ static inline int vec_reg_offset(DisasContext *s, int regno,
|
||||
static inline int vec_full_reg_offset(DisasContext *s, int regno)
|
||||
{
|
||||
assert_fp_access_checked(s);
|
||||
return offsetof(CPUARMState, vfp.regs[regno * 2]);
|
||||
return offsetof(CPUARMState, vfp.zregs[regno]);
|
||||
}
|
||||
|
||||
/* Return a newly allocated pointer to the vector register. */
|
||||
@ -11587,6 +11587,341 @@ static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
|
||||
tcg_temp_free_ptr(tcg_rn_ptr);
|
||||
}
|
||||
|
||||
/* Crypto three-reg SHA512
|
||||
* 31 21 20 16 15 14 13 12 11 10 9 5 4 0
|
||||
* +-----------------------+------+---+---+-----+--------+------+------+
|
||||
* | 1 1 0 0 1 1 1 0 0 1 1 | Rm | 1 | O | 0 0 | opcode | Rn | Rd |
|
||||
* +-----------------------+------+---+---+-----+--------+------+------+
|
||||
*/
|
||||
static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
|
||||
{
|
||||
int opcode = extract32(insn, 10, 2);
|
||||
int o = extract32(insn, 14, 1);
|
||||
int rm = extract32(insn, 16, 5);
|
||||
int rn = extract32(insn, 5, 5);
|
||||
int rd = extract32(insn, 0, 5);
|
||||
int feature;
|
||||
CryptoThreeOpFn *genfn;
|
||||
|
||||
if (o == 0) {
|
||||
switch (opcode) {
|
||||
case 0: /* SHA512H */
|
||||
feature = ARM_FEATURE_V8_SHA512;
|
||||
genfn = gen_helper_crypto_sha512h;
|
||||
break;
|
||||
case 1: /* SHA512H2 */
|
||||
feature = ARM_FEATURE_V8_SHA512;
|
||||
genfn = gen_helper_crypto_sha512h2;
|
||||
break;
|
||||
case 2: /* SHA512SU1 */
|
||||
feature = ARM_FEATURE_V8_SHA512;
|
||||
genfn = gen_helper_crypto_sha512su1;
|
||||
break;
|
||||
case 3: /* RAX1 */
|
||||
feature = ARM_FEATURE_V8_SHA3;
|
||||
genfn = NULL;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
switch (opcode) {
|
||||
case 0: /* SM3PARTW1 */
|
||||
feature = ARM_FEATURE_V8_SM3;
|
||||
genfn = gen_helper_crypto_sm3partw1;
|
||||
break;
|
||||
case 1: /* SM3PARTW2 */
|
||||
feature = ARM_FEATURE_V8_SM3;
|
||||
genfn = gen_helper_crypto_sm3partw2;
|
||||
break;
|
||||
case 2: /* SM4EKEY */
|
||||
feature = ARM_FEATURE_V8_SM4;
|
||||
genfn = gen_helper_crypto_sm4ekey;
|
||||
break;
|
||||
default:
|
||||
unallocated_encoding(s);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (!arm_dc_feature(s, feature)) {
|
||||
unallocated_encoding(s);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!fp_access_check(s)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (genfn) {
|
||||
TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
|
||||
|
||||
tcg_rd_ptr = vec_full_reg_ptr(s, rd);
|
||||
tcg_rn_ptr = vec_full_reg_ptr(s, rn);
|
||||
tcg_rm_ptr = vec_full_reg_ptr(s, rm);
|
||||
|
||||
genfn(tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr);
|
||||
|
||||
tcg_temp_free_ptr(tcg_rd_ptr);
|
||||
tcg_temp_free_ptr(tcg_rn_ptr);
|
||||
tcg_temp_free_ptr(tcg_rm_ptr);
|
||||
} else {
|
||||
TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
|
||||
int pass;
|
||||
|
||||
tcg_op1 = tcg_temp_new_i64();
|
||||
tcg_op2 = tcg_temp_new_i64();
|
||||
tcg_res[0] = tcg_temp_new_i64();
|
||||
tcg_res[1] = tcg_temp_new_i64();
|
||||
|
||||
for (pass = 0; pass < 2; pass++) {
|
||||
read_vec_element(s, tcg_op1, rn, pass, MO_64);
|
||||
read_vec_element(s, tcg_op2, rm, pass, MO_64);
|
||||
|
||||
tcg_gen_rotli_i64(tcg_res[pass], tcg_op2, 1);
|
||||
tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
|
||||
}
|
||||
write_vec_element(s, tcg_res[0], rd, 0, MO_64);
|
||||
write_vec_element(s, tcg_res[1], rd, 1, MO_64);
|
||||
|
||||
tcg_temp_free_i64(tcg_op1);
|
||||
tcg_temp_free_i64(tcg_op2);
|
||||
tcg_temp_free_i64(tcg_res[0]);
|
||||
tcg_temp_free_i64(tcg_res[1]);
|
||||
}
|
||||
}
|
||||
|
||||
/* Crypto two-reg SHA512
|
||||
* 31 12 11 10 9 5 4 0
|
||||
* +-----------------------------------------+--------+------+------+
|
||||
* | 1 1 0 0 1 1 1 0 1 1 0 0 0 0 0 0 1 0 0 0 | opcode | Rn | Rd |
|
||||
* +-----------------------------------------+--------+------+------+
|
||||
*/
|
||||
static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
|
||||
{
|
||||
int opcode = extract32(insn, 10, 2);
|
||||
int rn = extract32(insn, 5, 5);
|
||||
int rd = extract32(insn, 0, 5);
|
||||
TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
|
||||
int feature;
|
||||
CryptoTwoOpFn *genfn;
|
||||
|
||||
switch (opcode) {
|
||||
case 0: /* SHA512SU0 */
|
||||
feature = ARM_FEATURE_V8_SHA512;
|
||||
genfn = gen_helper_crypto_sha512su0;
|
||||
break;
|
||||
case 1: /* SM4E */
|
||||
feature = ARM_FEATURE_V8_SM4;
|
||||
genfn = gen_helper_crypto_sm4e;
|
||||
break;
|
||||
default:
|
||||
unallocated_encoding(s);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!arm_dc_feature(s, feature)) {
|
||||
unallocated_encoding(s);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!fp_access_check(s)) {
|
||||
return;
|
||||
}
|
||||
|
||||
tcg_rd_ptr = vec_full_reg_ptr(s, rd);
|
||||
tcg_rn_ptr = vec_full_reg_ptr(s, rn);
|
||||
|
||||
genfn(tcg_rd_ptr, tcg_rn_ptr);
|
||||
|
||||
tcg_temp_free_ptr(tcg_rd_ptr);
|
||||
tcg_temp_free_ptr(tcg_rn_ptr);
|
||||
}
|
||||
|
||||
/* Crypto four-register
|
||||
* 31 23 22 21 20 16 15 14 10 9 5 4 0
|
||||
* +-------------------+-----+------+---+------+------+------+
|
||||
* | 1 1 0 0 1 1 1 0 0 | Op0 | Rm | 0 | Ra | Rn | Rd |
|
||||
* +-------------------+-----+------+---+------+------+------+
|
||||
*/
|
||||
static void disas_crypto_four_reg(DisasContext *s, uint32_t insn)
|
||||
{
|
||||
int op0 = extract32(insn, 21, 2);
|
||||
int rm = extract32(insn, 16, 5);
|
||||
int ra = extract32(insn, 10, 5);
|
||||
int rn = extract32(insn, 5, 5);
|
||||
int rd = extract32(insn, 0, 5);
|
||||
int feature;
|
||||
|
||||
switch (op0) {
|
||||
case 0: /* EOR3 */
|
||||
case 1: /* BCAX */
|
||||
feature = ARM_FEATURE_V8_SHA3;
|
||||
break;
|
||||
case 2: /* SM3SS1 */
|
||||
feature = ARM_FEATURE_V8_SM3;
|
||||
break;
|
||||
default:
|
||||
unallocated_encoding(s);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!arm_dc_feature(s, feature)) {
|
||||
unallocated_encoding(s);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!fp_access_check(s)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (op0 < 2) {
|
||||
TCGv_i64 tcg_op1, tcg_op2, tcg_op3, tcg_res[2];
|
||||
int pass;
|
||||
|
||||
tcg_op1 = tcg_temp_new_i64();
|
||||
tcg_op2 = tcg_temp_new_i64();
|
||||
tcg_op3 = tcg_temp_new_i64();
|
||||
tcg_res[0] = tcg_temp_new_i64();
|
||||
tcg_res[1] = tcg_temp_new_i64();
|
||||
|
||||
for (pass = 0; pass < 2; pass++) {
|
||||
read_vec_element(s, tcg_op1, rn, pass, MO_64);
|
||||
read_vec_element(s, tcg_op2, rm, pass, MO_64);
|
||||
read_vec_element(s, tcg_op3, ra, pass, MO_64);
|
||||
|
||||
if (op0 == 0) {
|
||||
/* EOR3 */
|
||||
tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op3);
|
||||
} else {
|
||||
/* BCAX */
|
||||
tcg_gen_andc_i64(tcg_res[pass], tcg_op2, tcg_op3);
|
||||
}
|
||||
tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
|
||||
}
|
||||
write_vec_element(s, tcg_res[0], rd, 0, MO_64);
|
||||
write_vec_element(s, tcg_res[1], rd, 1, MO_64);
|
||||
|
||||
tcg_temp_free_i64(tcg_op1);
|
||||
tcg_temp_free_i64(tcg_op2);
|
||||
tcg_temp_free_i64(tcg_op3);
|
||||
tcg_temp_free_i64(tcg_res[0]);
|
||||
tcg_temp_free_i64(tcg_res[1]);
|
||||
} else {
|
||||
TCGv_i32 tcg_op1, tcg_op2, tcg_op3, tcg_res, tcg_zero;
|
||||
|
||||
tcg_op1 = tcg_temp_new_i32();
|
||||
tcg_op2 = tcg_temp_new_i32();
|
||||
tcg_op3 = tcg_temp_new_i32();
|
||||
tcg_res = tcg_temp_new_i32();
|
||||
tcg_zero = tcg_const_i32(0);
|
||||
|
||||
read_vec_element_i32(s, tcg_op1, rn, 3, MO_32);
|
||||
read_vec_element_i32(s, tcg_op2, rm, 3, MO_32);
|
||||
read_vec_element_i32(s, tcg_op3, ra, 3, MO_32);
|
||||
|
||||
tcg_gen_rotri_i32(tcg_res, tcg_op1, 20);
|
||||
tcg_gen_add_i32(tcg_res, tcg_res, tcg_op2);
|
||||
tcg_gen_add_i32(tcg_res, tcg_res, tcg_op3);
|
||||
tcg_gen_rotri_i32(tcg_res, tcg_res, 25);
|
||||
|
||||
write_vec_element_i32(s, tcg_zero, rd, 0, MO_32);
|
||||
write_vec_element_i32(s, tcg_zero, rd, 1, MO_32);
|
||||
write_vec_element_i32(s, tcg_zero, rd, 2, MO_32);
|
||||
write_vec_element_i32(s, tcg_res, rd, 3, MO_32);
|
||||
|
||||
tcg_temp_free_i32(tcg_op1);
|
||||
tcg_temp_free_i32(tcg_op2);
|
||||
tcg_temp_free_i32(tcg_op3);
|
||||
tcg_temp_free_i32(tcg_res);
|
||||
tcg_temp_free_i32(tcg_zero);
|
||||
}
|
||||
}
|
||||
|
||||
/* Crypto XAR
|
||||
* 31 21 20 16 15 10 9 5 4 0
|
||||
* +-----------------------+------+--------+------+------+
|
||||
* | 1 1 0 0 1 1 1 0 1 0 0 | Rm | imm6 | Rn | Rd |
|
||||
* +-----------------------+------+--------+------+------+
|
||||
*/
|
||||
static void disas_crypto_xar(DisasContext *s, uint32_t insn)
|
||||
{
|
||||
int rm = extract32(insn, 16, 5);
|
||||
int imm6 = extract32(insn, 10, 6);
|
||||
int rn = extract32(insn, 5, 5);
|
||||
int rd = extract32(insn, 0, 5);
|
||||
TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
|
||||
int pass;
|
||||
|
||||
if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA3)) {
|
||||
unallocated_encoding(s);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!fp_access_check(s)) {
|
||||
return;
|
||||
}
|
||||
|
||||
tcg_op1 = tcg_temp_new_i64();
|
||||
tcg_op2 = tcg_temp_new_i64();
|
||||
tcg_res[0] = tcg_temp_new_i64();
|
||||
tcg_res[1] = tcg_temp_new_i64();
|
||||
|
||||
for (pass = 0; pass < 2; pass++) {
|
||||
read_vec_element(s, tcg_op1, rn, pass, MO_64);
|
||||
read_vec_element(s, tcg_op2, rm, pass, MO_64);
|
||||
|
||||
tcg_gen_xor_i64(tcg_res[pass], tcg_op1, tcg_op2);
|
||||
tcg_gen_rotri_i64(tcg_res[pass], tcg_res[pass], imm6);
|
||||
}
|
||||
write_vec_element(s, tcg_res[0], rd, 0, MO_64);
|
||||
write_vec_element(s, tcg_res[1], rd, 1, MO_64);
|
||||
|
||||
tcg_temp_free_i64(tcg_op1);
|
||||
tcg_temp_free_i64(tcg_op2);
|
||||
tcg_temp_free_i64(tcg_res[0]);
|
||||
tcg_temp_free_i64(tcg_res[1]);
|
||||
}
|
||||
|
||||
/* Crypto three-reg imm2
|
||||
* 31 21 20 16 15 14 13 12 11 10 9 5 4 0
|
||||
* +-----------------------+------+-----+------+--------+------+------+
|
||||
* | 1 1 0 0 1 1 1 0 0 1 0 | Rm | 1 0 | imm2 | opcode | Rn | Rd |
|
||||
* +-----------------------+------+-----+------+--------+------+------+
|
||||
*/
|
||||
static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn)
|
||||
{
|
||||
int opcode = extract32(insn, 10, 2);
|
||||
int imm2 = extract32(insn, 12, 2);
|
||||
int rm = extract32(insn, 16, 5);
|
||||
int rn = extract32(insn, 5, 5);
|
||||
int rd = extract32(insn, 0, 5);
|
||||
TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
|
||||
TCGv_i32 tcg_imm2, tcg_opcode;
|
||||
|
||||
if (!arm_dc_feature(s, ARM_FEATURE_V8_SM3)) {
|
||||
unallocated_encoding(s);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!fp_access_check(s)) {
|
||||
return;
|
||||
}
|
||||
|
||||
tcg_rd_ptr = vec_full_reg_ptr(s, rd);
|
||||
tcg_rn_ptr = vec_full_reg_ptr(s, rn);
|
||||
tcg_rm_ptr = vec_full_reg_ptr(s, rm);
|
||||
tcg_imm2 = tcg_const_i32(imm2);
|
||||
tcg_opcode = tcg_const_i32(opcode);
|
||||
|
||||
gen_helper_crypto_sm3tt(tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr, tcg_imm2,
|
||||
tcg_opcode);
|
||||
|
||||
tcg_temp_free_ptr(tcg_rd_ptr);
|
||||
tcg_temp_free_ptr(tcg_rn_ptr);
|
||||
tcg_temp_free_ptr(tcg_rm_ptr);
|
||||
tcg_temp_free_i32(tcg_imm2);
|
||||
tcg_temp_free_i32(tcg_opcode);
|
||||
}
|
||||
|
||||
/* C3.6 Data processing - SIMD, inc Crypto
|
||||
*
|
||||
* As the decode gets a little complex we are using a table based
|
||||
@ -11616,6 +11951,11 @@ static const AArch64DecodeTable data_proc_simd[] = {
|
||||
{ 0x4e280800, 0xff3e0c00, disas_crypto_aes },
|
||||
{ 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha },
|
||||
{ 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha },
|
||||
{ 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512 },
|
||||
{ 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512 },
|
||||
{ 0xce000000, 0xff808000, disas_crypto_four_reg },
|
||||
{ 0xce800000, 0xffe00000, disas_crypto_xar },
|
||||
{ 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2 },
|
||||
{ 0x00000000, 0x00000000, NULL }
|
||||
};
|
||||
|
||||
@ -11718,6 +12058,8 @@ static int aarch64_tr_init_disas_context(DisasContextBase *dcbase,
|
||||
dc->user = (dc->current_el == 0);
|
||||
#endif
|
||||
dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
|
||||
dc->sve_excp_el = ARM_TBFLAG_SVEEXC_EL(dc->base.tb->flags);
|
||||
dc->sve_len = (ARM_TBFLAG_ZCR_LEN(dc->base.tb->flags) + 1) * 16;
|
||||
dc->vec_len = 0;
|
||||
dc->vec_stride = 0;
|
||||
dc->cp_regs = arm_cpu->cp_regs;
|
||||
|
@ -1512,13 +1512,12 @@ static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
|
||||
}
|
||||
}
|
||||
|
||||
static inline long
|
||||
vfp_reg_offset (int dp, int reg)
|
||||
static inline long vfp_reg_offset(bool dp, unsigned reg)
|
||||
{
|
||||
if (dp) {
|
||||
return offsetof(CPUARMState, vfp.regs[reg]);
|
||||
return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
|
||||
} else {
|
||||
long ofs = offsetof(CPUARMState, vfp.regs[reg >> 1]);
|
||||
long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
|
||||
if (reg & 1) {
|
||||
ofs += offsetof(CPU_DoubleU, l.upper);
|
||||
} else {
|
||||
@ -9926,6 +9925,7 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
|
||||
tcg_temp_free_i32(addr);
|
||||
tcg_temp_free_i32(op);
|
||||
store_reg(s, rd, ttresp);
|
||||
break;
|
||||
}
|
||||
goto illegal_op;
|
||||
}
|
||||
|
@ -29,6 +29,8 @@ typedef struct DisasContext {
|
||||
bool tbi1; /* TBI1 for EL0/1, not used for EL2/3 */
|
||||
bool ns; /* Use non-secure CPREG bank on access */
|
||||
int fp_excp_el; /* FP exception EL or 0 if enabled */
|
||||
int sve_excp_el; /* SVE exception EL or 0 if enabled */
|
||||
int sve_len; /* SVE vector length in bytes */
|
||||
/* Flag indicating that exceptions from secure mode are routed to EL3. */
|
||||
bool secure_routed_to_el3;
|
||||
bool vfp_enabled; /* FP enabled via FPSCR.EN */
|
||||
|
Loading…
Reference in New Issue
Block a user