target-arm queue:

* hw/intc/arm_gicv3: fix wrong values when reading IPRIORITYR
  * target/arm: fix read of freed memory in kvm_arm_machine_init_done()
  * virt: support up to 512 CPUs
  * virt: support 256MB ECAM PCI region (for more PCI devices)
  * xlnx-zynqmp: Use Cortex-R5F, not Cortex-R5
  * mps2-tz: Implement and use the TrustZone Memory Protection Controller
  * target/arm: enforce alignment checking for v6M cores
  * xen: Don't use memory_region_init_ram_nomigrate() in pci_assign_dev_load_option_rom()
  * vl.c: Don't zero-initialize statics for serial_hds
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABCAAGBQJbLPHgAAoJEDwlJe0UNgzeCugP/0yFWNkIQqgPX1D2fDLaea8z
 fj2QCkMHVDC/iM9s2Q7PpQDwfxMfE5MbvcavyfcVWe6lthyWQvvQ+5j4JHipgEup
 vdGX+ASA9sbC3kJ1u/OUekz6JliEWaxOyImnj2gyQfBw8zuMfiF+eTmtoKXcJC3u
 KNSNvArPIaFLNKsaQgTQE19Bu8CxIzfGEzsIgeAoD4gE7wv48EWqpOaYdIkbDo+0
 gJylBVCa46pmf56ESCuLTDZC+2FxBcw+uCFtD5zzt6YVV0Fli1ja9FNwLP17YHqg
 GOfNQ6melPeNUF+ByIEaPLWrq+Sy2P6wlnVlvKcKis8nXq497VjJdvv1txbbnyrn
 s4dKgVHjQP6EocvaVKCxXsLfjvPUCF2+f/uIdA8IR4WRilgTEfV3IdOYNuKjOFXb
 FcYap5UrX4ikEBkDBIBkh1BQXqOAU+lf8JjW1O6kn8PbfkEu2oRqGybWtEOjr+mz
 +iNsJ+4PydJ34WlvPKkzDG7GjEJcleStmgD3/DdL3r+jtjBYBT97xOAqWVnyVRYb
 NEUQEGmG894THftieuMw6r8vi4u24ZkI/3vGvBeSZ1od8IFEjzENRWkhYoDhLO5r
 LH16da19pVgWnLSXJnhxLTRaYNNAphNSIW30GDAwMbuXFK+LpCBufT8X7b6RerNx
 tOYET9DwlQRYEiuBVeSk
 =1T5q
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20180622' into staging

target-arm queue:
 * hw/intc/arm_gicv3: fix wrong values when reading IPRIORITYR
 * target/arm: fix read of freed memory in kvm_arm_machine_init_done()
 * virt: support up to 512 CPUs
 * virt: support 256MB ECAM PCI region (for more PCI devices)
 * xlnx-zynqmp: Use Cortex-R5F, not Cortex-R5
 * mps2-tz: Implement and use the TrustZone Memory Protection Controller
 * target/arm: enforce alignment checking for v6M cores
 * xen: Don't use memory_region_init_ram_nomigrate() in pci_assign_dev_load_option_rom()
 * vl.c: Don't zero-initialize statics for serial_hds

# gpg: Signature made Fri 22 Jun 2018 13:56:00 BST
# gpg:                using RSA key 3C2525ED14360CDE
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>"
# gpg:                 aka "Peter Maydell <pmaydell@gmail.com>"
# gpg:                 aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>"
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83  15CF 3C25 25ED 1436 0CDE

* remotes/pmaydell/tags/pull-target-arm-20180622: (28 commits)
  xen: Don't use memory_region_init_ram_nomigrate() in pci_assign_dev_load_option_rom()
  vl.c: Don't zero-initialize statics for serial_hds
  target/arm: Strict alignment for ARMv6-M and ARMv8-M Baseline
  target/arm: Introduce ARM_FEATURE_M_MAIN
  hw/arm/mps2-tz.c: Instantiate MPCs
  hw/arm/iotkit: Wire up MPC interrupt lines
  hw/arm/iotkit: Instantiate MPC
  hw/misc/iotkit-secctl.c: Implement SECMPCINTSTATUS
  hw/misc/tz_mpc.c: Honour the BLK_LUT settings in translate
  hw/misc/tz-mpc.c: Implement correct blocked-access behaviour
  hw/misc/tz-mpc.c: Implement registers
  hw/misc/tz-mpc.c: Implement the Arm TrustZone Memory Protection Controller
  xlnx-zynqmp: Swap Cortex-R5 for Cortex-R5F
  target-arm: Add the Cortex-R5F
  hw/arm/virt: Increase max_cpus to 512
  hw/arm/virt: Use 256MB ECAM region by default
  hw/arm/virt: Add virt-3.0 machine type
  hw/arm/virt: Add a new 256MB ECAM region
  hw/arm/virt: Register two redistributor regions when necessary
  hw/arm/virt-acpi-build: Advertise one or two GICR structures
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2018-06-22 16:03:31 +01:00
commit 5fce312200
48 changed files with 1250 additions and 111 deletions

View File

@ -457,6 +457,8 @@ F: hw/char/cmsdk-apb-uart.c
F: include/hw/char/cmsdk-apb-uart.h
F: hw/misc/tz-ppc.c
F: include/hw/misc/tz-ppc.h
F: hw/misc/tz-mpc.c
F: include/hw/misc/tz-mpc.h
ARM cores
M: Peter Maydell <peter.maydell@linaro.org>

View File

@ -108,6 +108,7 @@ CONFIG_CMSDK_APB_UART=y
CONFIG_MPS2_FPGAIO=y
CONFIG_MPS2_SCC=y
CONFIG_TZ_MPC=y
CONFIG_TZ_PPC=y
CONFIG_IOTKIT=y
CONFIG_IOTKIT_SECCTL=y

View File

@ -130,6 +130,19 @@ static void iotkit_init(Object *obj)
TYPE_TZ_PPC);
init_sysbus_child(obj, "apb-ppc1", &s->apb_ppc1, sizeof(s->apb_ppc1),
TYPE_TZ_PPC);
init_sysbus_child(obj, "mpc", &s->mpc, sizeof(s->mpc), TYPE_TZ_MPC);
object_initialize(&s->mpc_irq_orgate, sizeof(s->mpc_irq_orgate),
TYPE_OR_IRQ);
object_property_add_child(obj, "mpc-irq-orgate",
OBJECT(&s->mpc_irq_orgate), &error_abort);
for (i = 0; i < ARRAY_SIZE(s->mpc_irq_splitter); i++) {
char *name = g_strdup_printf("mpc-irq-splitter-%d", i);
SplitIRQ *splitter = &s->mpc_irq_splitter[i];
object_initialize(splitter, sizeof(*splitter), TYPE_SPLIT_IRQ);
object_property_add_child(obj, name, OBJECT(splitter), &error_abort);
g_free(name);
}
init_sysbus_child(obj, "timer0", &s->timer0, sizeof(s->timer0),
TYPE_CMSDK_APB_TIMER);
init_sysbus_child(obj, "timer1", &s->timer1, sizeof(s->timer1),
@ -162,6 +175,12 @@ static void iotkit_exp_irq(void *opaque, int n, int level)
qemu_set_irq(s->exp_irqs[n], level);
}
static void iotkit_mpcexp_status(void *opaque, int n, int level)
{
IoTKit *s = IOTKIT(opaque);
qemu_set_irq(s->mpcexp_status_in[n], level);
}
static void iotkit_realize(DeviceState *dev, Error **errp)
{
IoTKit *s = IOTKIT(dev);
@ -266,15 +285,6 @@ static void iotkit_realize(DeviceState *dev, Error **errp)
*/
make_alias(s, &s->alias3, "alias 3", 0x50000000, 0x10000000, 0x40000000);
/* This RAM should be behind a Memory Protection Controller, but we
* don't implement that yet.
*/
memory_region_init_ram(&s->sram0, NULL, "iotkit.sram0", 0x00008000, &err);
if (err) {
error_propagate(errp, err);
return;
}
memory_region_add_subregion(&s->container, 0x20000000, &s->sram0);
/* Security controller */
object_property_set_bool(OBJECT(&s->secctl), true, "realized", &err);
@ -310,6 +320,48 @@ static void iotkit_realize(DeviceState *dev, Error **errp)
qdev_connect_gpio_out_named(dev_secctl, "sec_resp_cfg", 0,
qdev_get_gpio_in(dev_splitter, 0));
/* This RAM lives behind the Memory Protection Controller */
memory_region_init_ram(&s->sram0, NULL, "iotkit.sram0", 0x00008000, &err);
if (err) {
error_propagate(errp, err);
return;
}
object_property_set_link(OBJECT(&s->mpc), OBJECT(&s->sram0),
"downstream", &err);
if (err) {
error_propagate(errp, err);
return;
}
object_property_set_bool(OBJECT(&s->mpc), true, "realized", &err);
if (err) {
error_propagate(errp, err);
return;
}
/* Map the upstream end of the MPC into the right place... */
memory_region_add_subregion(&s->container, 0x20000000,
sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->mpc),
1));
/* ...and its register interface */
memory_region_add_subregion(&s->container, 0x50083000,
sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->mpc),
0));
/* We must OR together lines from the MPC splitters to go to the NVIC */
object_property_set_int(OBJECT(&s->mpc_irq_orgate),
IOTS_NUM_EXP_MPC + IOTS_NUM_MPC, "num-lines", &err);
if (err) {
error_propagate(errp, err);
return;
}
object_property_set_bool(OBJECT(&s->mpc_irq_orgate), true,
"realized", &err);
if (err) {
error_propagate(errp, err);
return;
}
qdev_connect_gpio_out(DEVICE(&s->mpc_irq_orgate), 0,
qdev_get_gpio_in(DEVICE(&s->armv7m), 9));
/* Devices behind APB PPC0:
* 0x40000000: timer0
* 0x40001000: timer1
@ -473,8 +525,6 @@ static void iotkit_realize(DeviceState *dev, Error **errp)
create_unimplemented_device("NS watchdog", 0x40081000, 0x1000);
create_unimplemented_device("S watchdog", 0x50081000, 0x1000);
create_unimplemented_device("SRAM0 MPC", 0x50083000, 0x1000);
for (i = 0; i < ARRAY_SIZE(s->ppc_irq_splitter); i++) {
Object *splitter = OBJECT(&s->ppc_irq_splitter[i]);
@ -520,6 +570,46 @@ static void iotkit_realize(DeviceState *dev, Error **errp)
g_free(gpioname);
}
/* Wire up the splitters for the MPC IRQs */
for (i = 0; i < IOTS_NUM_EXP_MPC + IOTS_NUM_MPC; i++) {
SplitIRQ *splitter = &s->mpc_irq_splitter[i];
DeviceState *dev_splitter = DEVICE(splitter);
object_property_set_int(OBJECT(splitter), 2, "num-lines", &err);
if (err) {
error_propagate(errp, err);
return;
}
object_property_set_bool(OBJECT(splitter), true, "realized", &err);
if (err) {
error_propagate(errp, err);
return;
}
if (i < IOTS_NUM_EXP_MPC) {
/* Splitter input is from GPIO input line */
s->mpcexp_status_in[i] = qdev_get_gpio_in(dev_splitter, 0);
qdev_connect_gpio_out(dev_splitter, 0,
qdev_get_gpio_in_named(dev_secctl,
"mpcexp_status", i));
} else {
/* Splitter input is from our own MPC */
qdev_connect_gpio_out_named(DEVICE(&s->mpc), "irq", 0,
qdev_get_gpio_in(dev_splitter, 0));
qdev_connect_gpio_out(dev_splitter, 0,
qdev_get_gpio_in_named(dev_secctl,
"mpc_status", 0));
}
qdev_connect_gpio_out(dev_splitter, 1,
qdev_get_gpio_in(DEVICE(&s->mpc_irq_orgate), i));
}
/* Create GPIO inputs which will pass the line state for our
* mpcexp_irq inputs to the correct splitter devices.
*/
qdev_init_gpio_in_named(dev, iotkit_mpcexp_status, "mpcexp_status",
IOTS_NUM_EXP_MPC);
iotkit_forward_sec_resp_cfg(s);
system_clock_scale = NANOSECONDS_PER_SECOND / s->mainclk_frq;

View File

@ -44,6 +44,7 @@
#include "hw/timer/cmsdk-apb-timer.h"
#include "hw/misc/mps2-scc.h"
#include "hw/misc/mps2-fpgaio.h"
#include "hw/misc/tz-mpc.h"
#include "hw/arm/iotkit.h"
#include "hw/devices.h"
#include "net/net.h"
@ -64,13 +65,12 @@ typedef struct {
IoTKit iotkit;
MemoryRegion psram;
MemoryRegion ssram1;
MemoryRegion ssram[3];
MemoryRegion ssram1_m;
MemoryRegion ssram23;
MPS2SCC scc;
MPS2FPGAIO fpgaio;
TZPPC ppc[5];
UnimplementedDeviceState ssram_mpc[3];
TZMPC ssram_mpc[3];
UnimplementedDeviceState spi[5];
UnimplementedDeviceState i2c[4];
UnimplementedDeviceState i2s_audio;
@ -96,16 +96,6 @@ typedef struct {
/* Main SYSCLK frequency in Hz */
#define SYSCLK_FRQ 20000000
/* Initialize the auxiliary RAM region @mr and map it into
* the memory map at @base.
*/
static void make_ram(MemoryRegion *mr, const char *name,
hwaddr base, hwaddr size)
{
memory_region_init_ram(mr, NULL, name, size, &error_fatal);
memory_region_add_subregion(get_system_memory(), base, mr);
}
/* Create an alias of an entire original MemoryRegion @orig
* located at @base in the memory map.
*/
@ -245,6 +235,44 @@ static MemoryRegion *make_eth_dev(MPS2TZMachineState *mms, void *opaque,
return sysbus_mmio_get_region(s, 0);
}
static MemoryRegion *make_mpc(MPS2TZMachineState *mms, void *opaque,
const char *name, hwaddr size)
{
TZMPC *mpc = opaque;
int i = mpc - &mms->ssram_mpc[0];
MemoryRegion *ssram = &mms->ssram[i];
MemoryRegion *upstream;
char *mpcname = g_strdup_printf("%s-mpc", name);
static uint32_t ramsize[] = { 0x00400000, 0x00200000, 0x00200000 };
static uint32_t rambase[] = { 0x00000000, 0x28000000, 0x28200000 };
memory_region_init_ram(ssram, NULL, name, ramsize[i], &error_fatal);
init_sysbus_child(OBJECT(mms), mpcname, mpc,
sizeof(mms->ssram_mpc[0]), TYPE_TZ_MPC);
object_property_set_link(OBJECT(mpc), OBJECT(ssram),
"downstream", &error_fatal);
object_property_set_bool(OBJECT(mpc), true, "realized", &error_fatal);
/* Map the upstream end of the MPC into system memory */
upstream = sysbus_mmio_get_region(SYS_BUS_DEVICE(mpc), 1);
memory_region_add_subregion(get_system_memory(), rambase[i], upstream);
/* and connect its interrupt to the IoTKit */
qdev_connect_gpio_out_named(DEVICE(mpc), "irq", 0,
qdev_get_gpio_in_named(DEVICE(&mms->iotkit),
"mpcexp_status", i));
/* The first SSRAM is a special case as it has an alias; accesses to
* the alias region at 0x00400000 must also go to the MPC upstream.
*/
if (i == 0) {
make_ram_alias(&mms->ssram1_m, "mps.ssram1_m", upstream, 0x00400000);
}
g_free(mpcname);
/* Return the register interface MR for our caller to map behind the PPC */
return sysbus_mmio_get_region(SYS_BUS_DEVICE(mpc), 0);
}
static void mps2tz_common_init(MachineState *machine)
{
MPS2TZMachineState *mms = MPS2TZ_MACHINE(machine);
@ -306,14 +334,6 @@ static void mps2tz_common_init(MachineState *machine)
NULL, "mps.ram", 0x01000000);
memory_region_add_subregion(system_memory, 0x80000000, &mms->psram);
/* The SSRAM memories should all be behind Memory Protection Controllers,
* but we don't implement that yet.
*/
make_ram(&mms->ssram1, "mps.ssram1", 0x00000000, 0x00400000);
make_ram_alias(&mms->ssram1_m, "mps.ssram1_m", &mms->ssram1, 0x00400000);
make_ram(&mms->ssram23, "mps.ssram23", 0x28000000, 0x00400000);
/* The overflow IRQs for all UARTs are ORed together.
* Tx, Rx and "combined" IRQs are sent to the NVIC separately.
* Create the OR gate for this.
@ -343,12 +363,9 @@ static void mps2tz_common_init(MachineState *machine)
const PPCInfo ppcs[] = { {
.name = "apb_ppcexp0",
.ports = {
{ "ssram-mpc0", make_unimp_dev, &mms->ssram_mpc[0],
0x58007000, 0x1000 },
{ "ssram-mpc1", make_unimp_dev, &mms->ssram_mpc[1],
0x58008000, 0x1000 },
{ "ssram-mpc2", make_unimp_dev, &mms->ssram_mpc[2],
0x58009000, 0x1000 },
{ "ssram-0", make_mpc, &mms->ssram_mpc[0], 0x58007000, 0x1000 },
{ "ssram-1", make_mpc, &mms->ssram_mpc[1], 0x58008000, 0x1000 },
{ "ssram-2", make_mpc, &mms->ssram_mpc[2], 0x58009000, 0x1000 },
},
}, {
.name = "apb_ppcexp1",

View File

@ -150,16 +150,17 @@ static void acpi_dsdt_add_virtio(Aml *scope,
}
static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
uint32_t irq, bool use_highmem)
uint32_t irq, bool use_highmem, bool highmem_ecam)
{
int ecam_id = VIRT_ECAM_ID(highmem_ecam);
Aml *method, *crs, *ifctx, *UUID, *ifctx1, *elsectx, *buf;
int i, bus_no;
hwaddr base_mmio = memmap[VIRT_PCIE_MMIO].base;
hwaddr size_mmio = memmap[VIRT_PCIE_MMIO].size;
hwaddr base_pio = memmap[VIRT_PCIE_PIO].base;
hwaddr size_pio = memmap[VIRT_PCIE_PIO].size;
hwaddr base_ecam = memmap[VIRT_PCIE_ECAM].base;
hwaddr size_ecam = memmap[VIRT_PCIE_ECAM].size;
hwaddr base_ecam = memmap[ecam_id].base;
hwaddr size_ecam = memmap[ecam_id].size;
int nr_pcie_buses = size_ecam / PCIE_MMCFG_SIZE_MIN;
Aml *dev = aml_device("%s", "PCI0");
@ -173,7 +174,7 @@ static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
/* Declare the PCI Routing Table. */
Aml *rt_pkg = aml_package(nr_pcie_buses * PCI_NUM_PINS);
Aml *rt_pkg = aml_varpackage(nr_pcie_buses * PCI_NUM_PINS);
for (bus_no = 0; bus_no < nr_pcie_buses; bus_no++) {
for (i = 0; i < PCI_NUM_PINS; i++) {
int gsi = (i + bus_no) % PCI_NUM_PINS;
@ -316,7 +317,10 @@ static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
Aml *dev_res0 = aml_device("%s", "RES0");
aml_append(dev_res0, aml_name_decl("_HID", aml_string("PNP0C02")));
crs = aml_resource_template();
aml_append(crs, aml_memory32_fixed(base_ecam, size_ecam, AML_READ_WRITE));
aml_append(crs,
aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000, base_ecam,
base_ecam + size_ecam - 1, 0x0000, size_ecam));
aml_append(dev_res0, aml_name_decl("_CRS", crs));
aml_append(dev, dev_res0);
aml_append(scope, dev);
@ -573,16 +577,17 @@ build_mcfg(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
{
AcpiTableMcfg *mcfg;
const MemMapEntry *memmap = vms->memmap;
int ecam_id = VIRT_ECAM_ID(vms->highmem_ecam);
int len = sizeof(*mcfg) + sizeof(mcfg->allocation[0]);
int mcfg_start = table_data->len;
mcfg = acpi_data_push(table_data, len);
mcfg->allocation[0].address = cpu_to_le64(memmap[VIRT_PCIE_ECAM].base);
mcfg->allocation[0].address = cpu_to_le64(memmap[ecam_id].base);
/* Only a single allocation so no need to play with segments */
mcfg->allocation[0].pci_segment = cpu_to_le16(0);
mcfg->allocation[0].start_bus_number = 0;
mcfg->allocation[0].end_bus_number = (memmap[VIRT_PCIE_ECAM].size
mcfg->allocation[0].end_bus_number = (memmap[ecam_id].size
/ PCIE_MMCFG_SIZE_MIN) - 1;
build_header(linker, table_data, (void *)(table_data->data + mcfg_start),
@ -670,6 +675,7 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
if (vms->gic_version == 3) {
AcpiMadtGenericTranslator *gic_its;
int nb_redist_regions = virt_gicv3_redist_region_count(vms);
AcpiMadtGenericRedistributor *gicr = acpi_data_push(table_data,
sizeof *gicr);
@ -678,6 +684,14 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
gicr->base_address = cpu_to_le64(memmap[VIRT_GIC_REDIST].base);
gicr->range_length = cpu_to_le32(memmap[VIRT_GIC_REDIST].size);
if (nb_redist_regions == 2) {
gicr = acpi_data_push(table_data, sizeof(*gicr));
gicr->type = ACPI_APIC_GENERIC_REDISTRIBUTOR;
gicr->length = sizeof(*gicr);
gicr->base_address = cpu_to_le64(memmap[VIRT_GIC_REDIST2].base);
gicr->range_length = cpu_to_le32(memmap[VIRT_GIC_REDIST2].size);
}
if (its_class_name() && !vmc->no_its) {
gic_its = acpi_data_push(table_data, sizeof *gic_its);
gic_its->type = ACPI_APIC_GENERIC_TRANSLATOR;
@ -757,7 +771,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
acpi_dsdt_add_virtio(scope, &memmap[VIRT_MMIO],
(irqmap[VIRT_MMIO] + ARM_SPI_BASE), NUM_VIRTIO_TRANSPORTS);
acpi_dsdt_add_pci(scope, memmap, (irqmap[VIRT_PCIE] + ARM_SPI_BASE),
vms->highmem);
vms->highmem, vms->highmem_ecam);
acpi_dsdt_add_gpio(scope, &memmap[VIRT_GPIO],
(irqmap[VIRT_GPIO] + ARM_SPI_BASE));
acpi_dsdt_add_power_button(scope);

View File

@ -149,6 +149,9 @@ static const MemMapEntry a15memmap[] = {
[VIRT_PCIE_PIO] = { 0x3eff0000, 0x00010000 },
[VIRT_PCIE_ECAM] = { 0x3f000000, 0x01000000 },
[VIRT_MEM] = { 0x40000000, RAMLIMIT_BYTES },
/* Additional 64 MB redist region (can contain up to 512 redistributors) */
[VIRT_GIC_REDIST2] = { 0x4000000000ULL, 0x4000000 },
[VIRT_PCIE_ECAM_HIGH] = { 0x4010000000ULL, 0x10000000 },
/* Second PCIe window, 512GB wide at the 512GB boundary */
[VIRT_PCIE_MMIO_HIGH] = { 0x8000000000ULL, 0x8000000000ULL },
};
@ -402,13 +405,30 @@ static void fdt_add_gic_node(VirtMachineState *vms)
qemu_fdt_setprop_cell(vms->fdt, "/intc", "#size-cells", 0x2);
qemu_fdt_setprop(vms->fdt, "/intc", "ranges", NULL, 0);
if (vms->gic_version == 3) {
int nb_redist_regions = virt_gicv3_redist_region_count(vms);
qemu_fdt_setprop_string(vms->fdt, "/intc", "compatible",
"arm,gic-v3");
qemu_fdt_setprop_cell(vms->fdt, "/intc",
"#redistributor-regions", nb_redist_regions);
if (nb_redist_regions == 1) {
qemu_fdt_setprop_sized_cells(vms->fdt, "/intc", "reg",
2, vms->memmap[VIRT_GIC_DIST].base,
2, vms->memmap[VIRT_GIC_DIST].size,
2, vms->memmap[VIRT_GIC_REDIST].base,
2, vms->memmap[VIRT_GIC_REDIST].size);
} else {
qemu_fdt_setprop_sized_cells(vms->fdt, "/intc", "reg",
2, vms->memmap[VIRT_GIC_DIST].base,
2, vms->memmap[VIRT_GIC_DIST].size,
2, vms->memmap[VIRT_GIC_REDIST].base,
2, vms->memmap[VIRT_GIC_REDIST].size,
2, vms->memmap[VIRT_GIC_REDIST2].base,
2, vms->memmap[VIRT_GIC_REDIST2].size);
}
if (vms->virt) {
qemu_fdt_setprop_cells(vms->fdt, "/intc", "interrupts",
GIC_FDT_IRQ_TYPE_PPI, ARCH_GICV3_MAINT_IRQ,
@ -510,6 +530,7 @@ static void create_gic(VirtMachineState *vms, qemu_irq *pic)
SysBusDevice *gicbusdev;
const char *gictype;
int type = vms->gic_version, i;
uint32_t nb_redist_regions = 0;
gictype = (type == 3) ? gicv3_class_name() : gic_class_name();
@ -523,11 +544,34 @@ static void create_gic(VirtMachineState *vms, qemu_irq *pic)
if (!kvm_irqchip_in_kernel()) {
qdev_prop_set_bit(gicdev, "has-security-extensions", vms->secure);
}
if (type == 3) {
uint32_t redist0_capacity =
vms->memmap[VIRT_GIC_REDIST].size / GICV3_REDIST_SIZE;
uint32_t redist0_count = MIN(smp_cpus, redist0_capacity);
nb_redist_regions = virt_gicv3_redist_region_count(vms);
qdev_prop_set_uint32(gicdev, "len-redist-region-count",
nb_redist_regions);
qdev_prop_set_uint32(gicdev, "redist-region-count[0]", redist0_count);
if (nb_redist_regions == 2) {
uint32_t redist1_capacity =
vms->memmap[VIRT_GIC_REDIST2].size / GICV3_REDIST_SIZE;
qdev_prop_set_uint32(gicdev, "redist-region-count[1]",
MIN(smp_cpus - redist0_count, redist1_capacity));
}
}
qdev_init_nofail(gicdev);
gicbusdev = SYS_BUS_DEVICE(gicdev);
sysbus_mmio_map(gicbusdev, 0, vms->memmap[VIRT_GIC_DIST].base);
if (type == 3) {
sysbus_mmio_map(gicbusdev, 1, vms->memmap[VIRT_GIC_REDIST].base);
if (nb_redist_regions == 2) {
sysbus_mmio_map(gicbusdev, 2, vms->memmap[VIRT_GIC_REDIST2].base);
}
} else {
sysbus_mmio_map(gicbusdev, 1, vms->memmap[VIRT_GIC_CPU].base);
}
@ -1001,10 +1045,9 @@ static void create_pcie(VirtMachineState *vms, qemu_irq *pic)
hwaddr size_mmio_high = vms->memmap[VIRT_PCIE_MMIO_HIGH].size;
hwaddr base_pio = vms->memmap[VIRT_PCIE_PIO].base;
hwaddr size_pio = vms->memmap[VIRT_PCIE_PIO].size;
hwaddr base_ecam = vms->memmap[VIRT_PCIE_ECAM].base;
hwaddr size_ecam = vms->memmap[VIRT_PCIE_ECAM].size;
hwaddr base_ecam, size_ecam;
hwaddr base = base_mmio;
int nr_pcie_buses = size_ecam / PCIE_MMCFG_SIZE_MIN;
int nr_pcie_buses;
int irq = vms->irqmap[VIRT_PCIE];
MemoryRegion *mmio_alias;
MemoryRegion *mmio_reg;
@ -1012,12 +1055,16 @@ static void create_pcie(VirtMachineState *vms, qemu_irq *pic)
MemoryRegion *ecam_reg;
DeviceState *dev;
char *nodename;
int i;
int i, ecam_id;
PCIHostState *pci;
dev = qdev_create(NULL, TYPE_GPEX_HOST);
qdev_init_nofail(dev);
ecam_id = VIRT_ECAM_ID(vms->highmem_ecam);
base_ecam = vms->memmap[ecam_id].base;
size_ecam = vms->memmap[ecam_id].size;
nr_pcie_buses = size_ecam / PCIE_MMCFG_SIZE_MIN;
/* Map only the first size_ecam bytes of ECAM space */
ecam_alias = g_new0(MemoryRegion, 1);
ecam_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0);
@ -1271,6 +1318,7 @@ static void machvirt_init(MachineState *machine)
int n, virt_max_cpus;
MemoryRegion *ram = g_new(MemoryRegion, 1);
bool firmware_loaded = bios_name || drive_get(IF_PFLASH, 0, 0);
bool aarch64 = true;
/* We can probe only here because during property set
* KVM is not available yet
@ -1322,7 +1370,8 @@ static void machvirt_init(MachineState *machine)
* many redistributors we can fit into the memory map.
*/
if (vms->gic_version == 3) {
virt_max_cpus = vms->memmap[VIRT_GIC_REDIST].size / 0x20000;
virt_max_cpus = vms->memmap[VIRT_GIC_REDIST].size / GICV3_REDIST_SIZE;
virt_max_cpus += vms->memmap[VIRT_GIC_REDIST2].size / GICV3_REDIST_SIZE;
} else {
virt_max_cpus = GIC_NCPU;
}
@ -1385,6 +1434,8 @@ static void machvirt_init(MachineState *machine)
numa_cpu_pre_plug(&possible_cpus->cpus[cs->cpu_index], DEVICE(cpuobj),
&error_fatal);
aarch64 &= object_property_get_bool(cpuobj, "aarch64", NULL);
if (!vms->secure) {
object_property_set_bool(cpuobj, false, "has_el3", NULL);
}
@ -1443,6 +1494,8 @@ static void machvirt_init(MachineState *machine)
create_uart(vms, pic, VIRT_SECURE_UART, secure_sysmem, serial_hd(1));
}
vms->highmem_ecam &= vms->highmem && (!firmware_loaded || aarch64);
create_rtc(vms, pic);
create_pcie(vms, pic);
@ -1653,11 +1706,11 @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
mc->init = machvirt_init;
/* Start max_cpus at the maximum QEMU supports. We'll further restrict
* it later in machvirt_init, where we have more information about the
/* Start with max_cpus set to 512, which is the maximum supported by KVM.
* The value may be reduced later when we have more information about the
* configuration of the particular instance.
*/
mc->max_cpus = 255;
mc->max_cpus = 512;
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_VFIO_CALXEDA_XGMAC);
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_VFIO_AMD_XGBE);
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_RAMFB_DEVICE);
@ -1697,7 +1750,7 @@ type_init(machvirt_machine_init);
#define VIRT_COMPAT_2_12 \
HW_COMPAT_2_12
static void virt_2_12_instance_init(Object *obj)
static void virt_3_0_instance_init(Object *obj)
{
VirtMachineState *vms = VIRT_MACHINE(obj);
VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
@ -1740,6 +1793,8 @@ static void virt_2_12_instance_init(Object *obj)
"Set GIC version. "
"Valid values are 2, 3 and host", NULL);
vms->highmem_ecam = !vmc->no_highmem_ecam;
if (vmc->no_its) {
vms->its = false;
} else {
@ -1765,11 +1820,26 @@ static void virt_2_12_instance_init(Object *obj)
vms->irqmap = a15irqmap;
}
static void virt_machine_3_0_options(MachineClass *mc)
{
}
DEFINE_VIRT_MACHINE_AS_LATEST(3, 0)
static void virt_2_12_instance_init(Object *obj)
{
virt_3_0_instance_init(obj);
}
static void virt_machine_2_12_options(MachineClass *mc)
{
VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc));
virt_machine_3_0_options(mc);
SET_MACHINE_COMPAT(mc, VIRT_COMPAT_2_12);
vmc->no_highmem_ecam = true;
mc->max_cpus = 255;
}
DEFINE_VIRT_MACHINE_AS_LATEST(2, 12)
DEFINE_VIRT_MACHINE(2, 12)
#define VIRT_COMPAT_2_11 \
HW_COMPAT_2_11

View File

@ -208,7 +208,7 @@ static void xlnx_zcu102_machine_class_init(ObjectClass *oc, void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
mc->desc = "Xilinx ZynqMP ZCU102 board with 4xA53s and 2xR5s based on " \
mc->desc = "Xilinx ZynqMP ZCU102 board with 4xA53s and 2xR5Fs based on " \
"the value of smp";
mc->init = xlnx_zcu102_init;
mc->block_default_type = IF_IDE;

View File

@ -134,7 +134,7 @@ static void xlnx_zynqmp_create_rpu(XlnxZynqMPState *s, const char *boot_cpu,
char *name;
object_initialize(&s->rpu_cpu[i], sizeof(s->rpu_cpu[i]),
"cortex-r5-" TYPE_ARM_CPU);
"cortex-r5f-" TYPE_ARM_CPU);
object_property_add_child(OBJECT(s), "rpu-cpu[*]",
OBJECT(&s->rpu_cpu[i]), &error_abort);

View File

@ -558,7 +558,7 @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp)
| KVM_VGIC_V2_ADDR_TYPE_DIST,
KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V2_ADDR_TYPE_DIST,
s->dev_fd);
s->dev_fd, 0);
/* CPU interface for current core. Unlike arm_gic, we don't
* provide the "interface for core #N" memory regions, because
* cores with a VGIC don't have those.
@ -568,7 +568,7 @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp)
| KVM_VGIC_V2_ADDR_TYPE_CPU,
KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V2_ADDR_TYPE_CPU,
s->dev_fd);
s->dev_fd, 0);
if (kvm_has_gsi_routing()) {
/* set up irq routing */

View File

@ -373,7 +373,17 @@ static void arm_gic_realize(DeviceState *dev, Error **errp)
return;
}
gicv3_init_irqs_and_mmio(s, gicv3_set_irq, gic_ops);
if (s->nb_redist_regions != 1) {
error_setg(errp, "VGICv3 redist region number(%d) not equal to 1",
s->nb_redist_regions);
return;
}
gicv3_init_irqs_and_mmio(s, gicv3_set_irq, gic_ops, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
gicv3_init_cpuif(s);
}

View File

@ -247,11 +247,22 @@ static const VMStateDescription vmstate_gicv3 = {
};
void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler,
const MemoryRegionOps *ops)
const MemoryRegionOps *ops, Error **errp)
{
SysBusDevice *sbd = SYS_BUS_DEVICE(s);
int rdist_capacity = 0;
int i;
for (i = 0; i < s->nb_redist_regions; i++) {
rdist_capacity += s->redist_region_count[i];
}
if (rdist_capacity < s->num_cpu) {
error_setg(errp, "Capacity of the redist regions(%d) "
"is less than number of vcpus(%d)",
rdist_capacity, s->num_cpu);
return;
}
/* For the GIC, also expose incoming GPIO lines for PPIs for each CPU.
* GPIO array layout is thus:
* [0..N-1] spi
@ -277,11 +288,18 @@ void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler,
memory_region_init_io(&s->iomem_dist, OBJECT(s), ops, s,
"gicv3_dist", 0x10000);
memory_region_init_io(&s->iomem_redist, OBJECT(s), ops ? &ops[1] : NULL, s,
"gicv3_redist", 0x20000 * s->num_cpu);
sysbus_init_mmio(sbd, &s->iomem_dist);
sysbus_init_mmio(sbd, &s->iomem_redist);
s->iomem_redist = g_new0(MemoryRegion, s->nb_redist_regions);
for (i = 0; i < s->nb_redist_regions; i++) {
char *name = g_strdup_printf("gicv3_redist_region[%d]", i);
memory_region_init_io(&s->iomem_redist[i], OBJECT(s),
ops ? &ops[1] : NULL, s, name,
s->redist_region_count[i] * GICV3_REDIST_SIZE);
sysbus_init_mmio(sbd, &s->iomem_redist[i]);
g_free(name);
}
}
static void arm_gicv3_common_realize(DeviceState *dev, Error **errp)
@ -363,6 +381,13 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp)
}
}
static void arm_gicv3_finalize(Object *obj)
{
GICv3State *s = ARM_GICV3_COMMON(obj);
g_free(s->redist_region_count);
}
static void arm_gicv3_common_reset(DeviceState *dev)
{
GICv3State *s = ARM_GICV3_COMMON(dev);
@ -467,6 +492,8 @@ static Property arm_gicv3_common_properties[] = {
DEFINE_PROP_UINT32("num-irq", GICv3State, num_irq, 32),
DEFINE_PROP_UINT32("revision", GICv3State, revision, 3),
DEFINE_PROP_BOOL("has-security-extensions", GICv3State, security_extn, 0),
DEFINE_PROP_ARRAY("redist-region-count", GICv3State, nb_redist_regions,
redist_region_count, qdev_prop_uint32, uint32_t),
DEFINE_PROP_END_OF_LIST(),
};
@ -488,6 +515,7 @@ static const TypeInfo arm_gicv3_common_type = {
.instance_size = sizeof(GICv3State),
.class_size = sizeof(ARMGICv3CommonClass),
.class_init = arm_gicv3_common_class_init,
.instance_finalize = arm_gicv3_finalize,
.abstract = true,
.interfaces = (InterfaceInfo []) {
{ TYPE_ARM_LINUX_BOOT_IF },

View File

@ -441,7 +441,8 @@ static MemTxResult gicd_readl(GICv3State *s, hwaddr offset,
int i, irq = offset - GICD_IPRIORITYR;
uint32_t value = 0;
for (i = irq + 3; i >= irq; i--, value <<= 8) {
for (i = irq + 3; i >= irq; i--) {
value <<= 8;
value |= gicd_read_ipriorityr(s, attrs, i);
}
*data = value;

View File

@ -103,7 +103,7 @@ static void kvm_arm_its_realize(DeviceState *dev, Error **errp)
/* register the base address */
kvm_arm_register_device(&s->iomem_its_cntrl, -1, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_ITS_ADDR_TYPE, s->dev_fd);
KVM_VGIC_ITS_ADDR_TYPE, s->dev_fd, 0);
gicv3_its_init_mmio(s, NULL);

View File

@ -767,6 +767,7 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
{
GICv3State *s = KVM_ARM_GICV3(dev);
KVMARMGICv3Class *kgc = KVM_ARM_GICV3_GET_CLASS(s);
bool multiple_redist_region_allowed;
Error *local_err = NULL;
int i;
@ -784,7 +785,11 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
return;
}
gicv3_init_irqs_and_mmio(s, kvm_arm_gicv3_set_irq, NULL);
gicv3_init_irqs_and_mmio(s, kvm_arm_gicv3_set_irq, NULL, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
for (i = 0; i < s->num_cpu; i++) {
ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i));
@ -799,6 +804,18 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
return;
}
multiple_redist_region_allowed =
kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION);
if (!multiple_redist_region_allowed && s->nb_redist_regions > 1) {
error_setg(errp, "Multiple VGICv3 redistributor regions are not "
"supported by this host kernel");
error_append_hint(errp, "A maximum of %d VCPUs can be used",
s->redist_region_count[0]);
return;
}
kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS,
0, &s->num_irq, true, &error_abort);
@ -807,9 +824,28 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true, &error_abort);
kvm_arm_register_device(&s->iomem_dist, -1, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_DIST, s->dev_fd);
kvm_arm_register_device(&s->iomem_redist, -1, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST, s->dev_fd);
KVM_VGIC_V3_ADDR_TYPE_DIST, s->dev_fd, 0);
if (!multiple_redist_region_allowed) {
kvm_arm_register_device(&s->iomem_redist[0], -1,
KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST, s->dev_fd, 0);
} else {
/* we register regions in reverse order as "devices" are inserted at
* the head of a QSLIST and the list is then popped from the head
* onwards by kvm_arm_machine_init_done()
*/
for (i = s->nb_redist_regions - 1; i >= 0; i--) {
/* Address mask made of the rdist region index and count */
uint64_t addr_ormask =
i | ((uint64_t)s->redist_region_count[i] << 52);
kvm_arm_register_device(&s->iomem_redist[i], -1,
KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION,
s->dev_fd, addr_ormask);
}
}
if (kvm_has_gsi_routing()) {
/* set up irq routing */

View File

@ -192,7 +192,8 @@ static MemTxResult gicr_readl(GICv3CPUState *cs, hwaddr offset,
int i, irq = offset - GICR_IPRIORITYR;
uint32_t value = 0;
for (i = irq + 3; i >= irq; i--, value <<= 8) {
for (i = irq + 3; i >= irq; i--) {
value <<= 8;
value |= gicr_read_ipriorityr(cs, attrs, i);
}
*data = value;

View File

@ -62,6 +62,7 @@ obj-$(CONFIG_MIPS_ITU) += mips_itu.o
obj-$(CONFIG_MPS2_FPGAIO) += mps2-fpgaio.o
obj-$(CONFIG_MPS2_SCC) += mps2-scc.o
obj-$(CONFIG_TZ_MPC) += tz-mpc.o
obj-$(CONFIG_TZ_PPC) += tz-ppc.o
obj-$(CONFIG_IOTKIT_SECCTL) += iotkit-secctl.o

View File

@ -139,6 +139,9 @@ static MemTxResult iotkit_secctl_s_read(void *opaque, hwaddr addr,
case A_NSCCFG:
r = s->nsccfg;
break;
case A_SECMPCINTSTATUS:
r = s->mpcintstatus;
break;
case A_SECPPCINTSTAT:
r = s->secppcintstat;
break;
@ -186,7 +189,6 @@ static MemTxResult iotkit_secctl_s_read(void *opaque, hwaddr addr,
case A_APBSPPPCEXP3:
r = s->apbexp[offset_to_ppc_idx(offset)].sp;
break;
case A_SECMPCINTSTATUS:
case A_SECMSCINTSTAT:
case A_SECMSCINTEN:
case A_NSMSCEXP:
@ -572,6 +574,20 @@ static void iotkit_secctl_reset(DeviceState *dev)
foreach_ppc(s, iotkit_secctl_reset_ppc);
}
static void iotkit_secctl_mpc_status(void *opaque, int n, int level)
{
IoTKitSecCtl *s = IOTKIT_SECCTL(opaque);
s->mpcintstatus = deposit32(s->mpcintstatus, 0, 1, !!level);
}
static void iotkit_secctl_mpcexp_status(void *opaque, int n, int level)
{
IoTKitSecCtl *s = IOTKIT_SECCTL(opaque);
s->mpcintstatus = deposit32(s->mpcintstatus, n + 16, 1, !!level);
}
static void iotkit_secctl_ppc_irqstatus(void *opaque, int n, int level)
{
IoTKitSecCtlPPC *ppc = opaque;
@ -640,6 +656,10 @@ static void iotkit_secctl_init(Object *obj)
qdev_init_gpio_out_named(dev, &s->sec_resp_cfg, "sec_resp_cfg", 1);
qdev_init_gpio_out_named(dev, &s->nsc_cfg_irq, "nsc_cfg", 1);
qdev_init_gpio_in_named(dev, iotkit_secctl_mpc_status, "mpc_status", 1);
qdev_init_gpio_in_named(dev, iotkit_secctl_mpcexp_status,
"mpcexp_status", IOTS_NUM_EXP_MPC);
memory_region_init_io(&s->s_regs, obj, &iotkit_secctl_s_ops,
s, "iotkit-secctl-s-regs", 0x1000);
memory_region_init_io(&s->ns_regs, obj, &iotkit_secctl_ns_ops,
@ -660,6 +680,16 @@ static const VMStateDescription iotkit_secctl_ppc_vmstate = {
}
};
static const VMStateDescription iotkit_secctl_mpcintstatus_vmstate = {
.name = "iotkit-secctl-mpcintstatus",
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT32(mpcintstatus, IoTKitSecCtl),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription iotkit_secctl_vmstate = {
.name = "iotkit-secctl",
.version_id = 1,
@ -677,7 +707,11 @@ static const VMStateDescription iotkit_secctl_vmstate = {
VMSTATE_STRUCT_ARRAY(ahbexp, IoTKitSecCtl, IOTS_NUM_AHB_EXP_PPC, 1,
iotkit_secctl_ppc_vmstate, IoTKitSecCtlPPC),
VMSTATE_END_OF_LIST()
}
},
.subsections = (const VMStateDescription*[]) {
&iotkit_secctl_mpcintstatus_vmstate,
NULL
},
};
static void iotkit_secctl_class_init(ObjectClass *klass, void *data)

View File

@ -84,6 +84,14 @@ mos6522_set_sr_int(void) "set sr_int"
mos6522_write(uint64_t addr, uint64_t val) "reg=0x%"PRIx64 " val=0x%"PRIx64
mos6522_read(uint64_t addr, unsigned val) "reg=0x%"PRIx64 " val=0x%x"
# hw/misc/tz-mpc.c
tz_mpc_reg_read(uint32_t offset, uint64_t data, unsigned size) "TZ MPC regs read: offset 0x%x data 0x%" PRIx64 " size %u"
tz_mpc_reg_write(uint32_t offset, uint64_t data, unsigned size) "TZ MPC regs write: offset 0x%x data 0x%" PRIx64 " size %u"
tz_mpc_mem_blocked_read(uint64_t addr, unsigned size, bool secure) "TZ MPC blocked read: offset 0x%" PRIx64 " size %u secure %d"
tz_mpc_mem_blocked_write(uint64_t addr, uint64_t data, unsigned size, bool secure) "TZ MPC blocked write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d"
tz_mpc_translate(uint64_t addr, int flags, const char *idx, const char *res) "TZ MPC translate: addr 0x%" PRIx64 " flags 0x%x iommu_idx %s: %s"
tz_mpc_iommu_notify(uint64_t addr) "TZ MPC iommu: notifying UNMAP/MAP for 0x%" PRIx64
# hw/misc/tz-ppc.c
tz_ppc_reset(void) "TZ PPC: reset"
tz_ppc_cfg_nonsec(int n, int level) "TZ PPC: cfg_nonsec[%d] = %d"

628
hw/misc/tz-mpc.c Normal file
View File

@ -0,0 +1,628 @@
/*
* ARM AHB5 TrustZone Memory Protection Controller emulation
*
* Copyright (c) 2018 Linaro Limited
* Written by Peter Maydell
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 or
* (at your option) any later version.
*/
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qapi/error.h"
#include "trace.h"
#include "hw/sysbus.h"
#include "hw/registerfields.h"
#include "hw/misc/tz-mpc.h"
/* Our IOMMU has two IOMMU indexes, one for secure transactions and one for
* non-secure transactions.
*/
enum {
IOMMU_IDX_S,
IOMMU_IDX_NS,
IOMMU_NUM_INDEXES,
};
/* Config registers */
REG32(CTRL, 0x00)
FIELD(CTRL, SEC_RESP, 4, 1)
FIELD(CTRL, AUTOINC, 8, 1)
FIELD(CTRL, LOCKDOWN, 31, 1)
REG32(BLK_MAX, 0x10)
REG32(BLK_CFG, 0x14)
REG32(BLK_IDX, 0x18)
REG32(BLK_LUT, 0x1c)
REG32(INT_STAT, 0x20)
FIELD(INT_STAT, IRQ, 0, 1)
REG32(INT_CLEAR, 0x24)
FIELD(INT_CLEAR, IRQ, 0, 1)
REG32(INT_EN, 0x28)
FIELD(INT_EN, IRQ, 0, 1)
REG32(INT_INFO1, 0x2c)
REG32(INT_INFO2, 0x30)
FIELD(INT_INFO2, HMASTER, 0, 16)
FIELD(INT_INFO2, HNONSEC, 16, 1)
FIELD(INT_INFO2, CFG_NS, 17, 1)
REG32(INT_SET, 0x34)
FIELD(INT_SET, IRQ, 0, 1)
REG32(PIDR4, 0xfd0)
REG32(PIDR5, 0xfd4)
REG32(PIDR6, 0xfd8)
REG32(PIDR7, 0xfdc)
REG32(PIDR0, 0xfe0)
REG32(PIDR1, 0xfe4)
REG32(PIDR2, 0xfe8)
REG32(PIDR3, 0xfec)
REG32(CIDR0, 0xff0)
REG32(CIDR1, 0xff4)
REG32(CIDR2, 0xff8)
REG32(CIDR3, 0xffc)
static const uint8_t tz_mpc_idregs[] = {
0x04, 0x00, 0x00, 0x00,
0x60, 0xb8, 0x1b, 0x00,
0x0d, 0xf0, 0x05, 0xb1,
};
static void tz_mpc_irq_update(TZMPC *s)
{
qemu_set_irq(s->irq, s->int_stat && s->int_en);
}
static void tz_mpc_iommu_notify(TZMPC *s, uint32_t lutidx,
uint32_t oldlut, uint32_t newlut)
{
/* Called when the LUT word at lutidx has changed from oldlut to newlut;
* must call the IOMMU notifiers for the changed blocks.
*/
IOMMUTLBEntry entry = {
.addr_mask = s->blocksize - 1,
};
hwaddr addr = lutidx * s->blocksize * 32;
int i;
for (i = 0; i < 32; i++, addr += s->blocksize) {
bool block_is_ns;
if (!((oldlut ^ newlut) & (1 << i))) {
continue;
}
/* This changes the mappings for both the S and the NS space,
* so we need to do four notifies: an UNMAP then a MAP for each.
*/
block_is_ns = newlut & (1 << i);
trace_tz_mpc_iommu_notify(addr);
entry.iova = addr;
entry.translated_addr = addr;
entry.perm = IOMMU_NONE;
memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, entry);
memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, entry);
entry.perm = IOMMU_RW;
if (block_is_ns) {
entry.target_as = &s->blocked_io_as;
} else {
entry.target_as = &s->downstream_as;
}
memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, entry);
if (block_is_ns) {
entry.target_as = &s->downstream_as;
} else {
entry.target_as = &s->blocked_io_as;
}
memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, entry);
}
}
static void tz_mpc_autoinc_idx(TZMPC *s, unsigned access_size)
{
/* Auto-increment BLK_IDX if necessary */
if (access_size == 4 && (s->ctrl & R_CTRL_AUTOINC_MASK)) {
s->blk_idx++;
s->blk_idx %= s->blk_max;
}
}
static MemTxResult tz_mpc_reg_read(void *opaque, hwaddr addr,
uint64_t *pdata,
unsigned size, MemTxAttrs attrs)
{
TZMPC *s = TZ_MPC(opaque);
uint64_t r;
uint32_t offset = addr & ~0x3;
if (!attrs.secure && offset < A_PIDR4) {
/* NS accesses can only see the ID registers */
qemu_log_mask(LOG_GUEST_ERROR,
"TZ MPC register read: NS access to offset 0x%x\n",
offset);
r = 0;
goto read_out;
}
switch (offset) {
case A_CTRL:
r = s->ctrl;
break;
case A_BLK_MAX:
r = s->blk_max;
break;
case A_BLK_CFG:
/* We are never in "init in progress state", so this just indicates
* the block size. s->blocksize == (1 << BLK_CFG + 5), so
* BLK_CFG == ctz32(s->blocksize) - 5
*/
r = ctz32(s->blocksize) - 5;
break;
case A_BLK_IDX:
r = s->blk_idx;
break;
case A_BLK_LUT:
r = s->blk_lut[s->blk_idx];
tz_mpc_autoinc_idx(s, size);
break;
case A_INT_STAT:
r = s->int_stat;
break;
case A_INT_EN:
r = s->int_en;
break;
case A_INT_INFO1:
r = s->int_info1;
break;
case A_INT_INFO2:
r = s->int_info2;
break;
case A_PIDR4:
case A_PIDR5:
case A_PIDR6:
case A_PIDR7:
case A_PIDR0:
case A_PIDR1:
case A_PIDR2:
case A_PIDR3:
case A_CIDR0:
case A_CIDR1:
case A_CIDR2:
case A_CIDR3:
r = tz_mpc_idregs[(offset - A_PIDR4) / 4];
break;
case A_INT_CLEAR:
case A_INT_SET:
qemu_log_mask(LOG_GUEST_ERROR,
"TZ MPC register read: write-only offset 0x%x\n",
offset);
r = 0;
break;
default:
qemu_log_mask(LOG_GUEST_ERROR,
"TZ MPC register read: bad offset 0x%x\n", offset);
r = 0;
break;
}
if (size != 4) {
/* None of our registers are read-sensitive (except BLK_LUT,
* which can special case the "size not 4" case), so just
* pull the right bytes out of the word read result.
*/
r = extract32(r, (addr & 3) * 8, size * 8);
}
read_out:
trace_tz_mpc_reg_read(addr, r, size);
*pdata = r;
return MEMTX_OK;
}
static MemTxResult tz_mpc_reg_write(void *opaque, hwaddr addr,
uint64_t value,
unsigned size, MemTxAttrs attrs)
{
TZMPC *s = TZ_MPC(opaque);
uint32_t offset = addr & ~0x3;
trace_tz_mpc_reg_write(addr, value, size);
if (!attrs.secure && offset < A_PIDR4) {
/* NS accesses can only see the ID registers */
qemu_log_mask(LOG_GUEST_ERROR,
"TZ MPC register write: NS access to offset 0x%x\n",
offset);
return MEMTX_OK;
}
if (size != 4) {
/* Expand the byte or halfword write to a full word size.
* In most cases we can do this with zeroes; the exceptions
* are CTRL, BLK_IDX and BLK_LUT.
*/
uint32_t oldval;
switch (offset) {
case A_CTRL:
oldval = s->ctrl;
break;
case A_BLK_IDX:
oldval = s->blk_idx;
break;
case A_BLK_LUT:
oldval = s->blk_lut[s->blk_idx];
break;
default:
oldval = 0;
break;
}
value = deposit32(oldval, (addr & 3) * 8, size * 8, value);
}
if ((s->ctrl & R_CTRL_LOCKDOWN_MASK) &&
(offset == A_CTRL || offset == A_BLK_LUT || offset == A_INT_EN)) {
/* Lockdown mode makes these three registers read-only, and
* the only way out of it is to reset the device.
*/
qemu_log_mask(LOG_GUEST_ERROR, "TZ MPC register write to offset 0x%x "
"while MPC is in lockdown mode\n", offset);
return MEMTX_OK;
}
switch (offset) {
case A_CTRL:
/* We don't implement the 'data gating' feature so all other bits
* are reserved and we make them RAZ/WI.
*/
s->ctrl = value & (R_CTRL_SEC_RESP_MASK |
R_CTRL_AUTOINC_MASK |
R_CTRL_LOCKDOWN_MASK);
break;
case A_BLK_IDX:
s->blk_idx = value % s->blk_max;
break;
case A_BLK_LUT:
tz_mpc_iommu_notify(s, s->blk_idx, s->blk_lut[s->blk_idx], value);
s->blk_lut[s->blk_idx] = value;
tz_mpc_autoinc_idx(s, size);
break;
case A_INT_CLEAR:
if (value & R_INT_CLEAR_IRQ_MASK) {
s->int_stat = 0;
tz_mpc_irq_update(s);
}
break;
case A_INT_EN:
s->int_en = value & R_INT_EN_IRQ_MASK;
tz_mpc_irq_update(s);
break;
case A_INT_SET:
if (value & R_INT_SET_IRQ_MASK) {
s->int_stat = R_INT_STAT_IRQ_MASK;
tz_mpc_irq_update(s);
}
break;
case A_PIDR4:
case A_PIDR5:
case A_PIDR6:
case A_PIDR7:
case A_PIDR0:
case A_PIDR1:
case A_PIDR2:
case A_PIDR3:
case A_CIDR0:
case A_CIDR1:
case A_CIDR2:
case A_CIDR3:
qemu_log_mask(LOG_GUEST_ERROR,
"TZ MPC register write: read-only offset 0x%x\n", offset);
break;
default:
qemu_log_mask(LOG_GUEST_ERROR,
"TZ MPC register write: bad offset 0x%x\n", offset);
break;
}
return MEMTX_OK;
}
static const MemoryRegionOps tz_mpc_reg_ops = {
.read_with_attrs = tz_mpc_reg_read,
.write_with_attrs = tz_mpc_reg_write,
.endianness = DEVICE_LITTLE_ENDIAN,
.valid.min_access_size = 1,
.valid.max_access_size = 4,
.impl.min_access_size = 1,
.impl.max_access_size = 4,
};
static inline bool tz_mpc_cfg_ns(TZMPC *s, hwaddr addr)
{
/* Return the cfg_ns bit from the LUT for the specified address */
hwaddr blknum = addr / s->blocksize;
hwaddr blkword = blknum / 32;
uint32_t blkbit = 1U << (blknum % 32);
/* This would imply the address was larger than the size we
* defined this memory region to be, so it can't happen.
*/
assert(blkword < s->blk_max);
return s->blk_lut[blkword] & blkbit;
}
static MemTxResult tz_mpc_handle_block(TZMPC *s, hwaddr addr, MemTxAttrs attrs)
{
/* Handle a blocked transaction: raise IRQ, capture info, etc */
if (!s->int_stat) {
/* First blocked transfer: capture information into INT_INFO1 and
* INT_INFO2. Subsequent transfers are still blocked but don't
* capture information until the guest clears the interrupt.
*/
s->int_info1 = addr;
s->int_info2 = 0;
s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, HMASTER,
attrs.requester_id & 0xffff);
s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, HNONSEC,
~attrs.secure);
s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, CFG_NS,
tz_mpc_cfg_ns(s, addr));
s->int_stat |= R_INT_STAT_IRQ_MASK;
tz_mpc_irq_update(s);
}
/* Generate bus error if desired; otherwise RAZ/WI */
return (s->ctrl & R_CTRL_SEC_RESP_MASK) ? MEMTX_ERROR : MEMTX_OK;
}
/* Accesses only reach these read and write functions if the MPC is
* blocking them; non-blocked accesses go directly to the downstream
* memory region without passing through this code.
*/
static MemTxResult tz_mpc_mem_blocked_read(void *opaque, hwaddr addr,
uint64_t *pdata,
unsigned size, MemTxAttrs attrs)
{
TZMPC *s = TZ_MPC(opaque);
trace_tz_mpc_mem_blocked_read(addr, size, attrs.secure);
*pdata = 0;
return tz_mpc_handle_block(s, addr, attrs);
}
static MemTxResult tz_mpc_mem_blocked_write(void *opaque, hwaddr addr,
uint64_t value,
unsigned size, MemTxAttrs attrs)
{
TZMPC *s = TZ_MPC(opaque);
trace_tz_mpc_mem_blocked_write(addr, value, size, attrs.secure);
return tz_mpc_handle_block(s, addr, attrs);
}
static const MemoryRegionOps tz_mpc_mem_blocked_ops = {
.read_with_attrs = tz_mpc_mem_blocked_read,
.write_with_attrs = tz_mpc_mem_blocked_write,
.endianness = DEVICE_LITTLE_ENDIAN,
.valid.min_access_size = 1,
.valid.max_access_size = 8,
.impl.min_access_size = 1,
.impl.max_access_size = 8,
};
static IOMMUTLBEntry tz_mpc_translate(IOMMUMemoryRegion *iommu,
hwaddr addr, IOMMUAccessFlags flags,
int iommu_idx)
{
TZMPC *s = TZ_MPC(container_of(iommu, TZMPC, upstream));
bool ok;
IOMMUTLBEntry ret = {
.iova = addr & ~(s->blocksize - 1),
.translated_addr = addr & ~(s->blocksize - 1),
.addr_mask = s->blocksize - 1,
.perm = IOMMU_RW,
};
/* Look at the per-block configuration for this address, and
* return a TLB entry directing the transaction at either
* downstream_as or blocked_io_as, as appropriate.
* If the LUT cfg_ns bit is 1, only non-secure transactions
* may pass. If the bit is 0, only secure transactions may pass.
*/
ok = tz_mpc_cfg_ns(s, addr) == (iommu_idx == IOMMU_IDX_NS);
trace_tz_mpc_translate(addr, flags,
iommu_idx == IOMMU_IDX_S ? "S" : "NS",
ok ? "pass" : "block");
ret.target_as = ok ? &s->downstream_as : &s->blocked_io_as;
return ret;
}
static int tz_mpc_attrs_to_index(IOMMUMemoryRegion *iommu, MemTxAttrs attrs)
{
/* We treat unspecified attributes like secure. Transactions with
* unspecified attributes come from places like
* cpu_physical_memory_write_rom() for initial image load, and we want
* those to pass through the from-reset "everything is secure" config.
* All the real during-emulation transactions from the CPU will
* specify attributes.
*/
return (attrs.unspecified || attrs.secure) ? IOMMU_IDX_S : IOMMU_IDX_NS;
}
static int tz_mpc_num_indexes(IOMMUMemoryRegion *iommu)
{
return IOMMU_NUM_INDEXES;
}
static void tz_mpc_reset(DeviceState *dev)
{
TZMPC *s = TZ_MPC(dev);
s->ctrl = 0x00000100;
s->blk_idx = 0;
s->int_stat = 0;
s->int_en = 1;
s->int_info1 = 0;
s->int_info2 = 0;
memset(s->blk_lut, 0, s->blk_max * sizeof(uint32_t));
}
static void tz_mpc_init(Object *obj)
{
DeviceState *dev = DEVICE(obj);
TZMPC *s = TZ_MPC(obj);
qdev_init_gpio_out_named(dev, &s->irq, "irq", 1);
}
static void tz_mpc_realize(DeviceState *dev, Error **errp)
{
Object *obj = OBJECT(dev);
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
TZMPC *s = TZ_MPC(dev);
uint64_t size;
/* We can't create the upstream end of the port until realize,
* as we don't know the size of the MR used as the downstream until then.
* We insist on having a downstream, to avoid complicating the code
* with handling the "don't know how big this is" case. It's easy
* enough for the user to create an unimplemented_device as downstream
* if they have nothing else to plug into this.
*/
if (!s->downstream) {
error_setg(errp, "MPC 'downstream' link not set");
return;
}
size = memory_region_size(s->downstream);
memory_region_init_iommu(&s->upstream, sizeof(s->upstream),
TYPE_TZ_MPC_IOMMU_MEMORY_REGION,
obj, "tz-mpc-upstream", size);
/* In real hardware the block size is configurable. In QEMU we could
* make it configurable but will need it to be at least as big as the
* target page size so we can execute out of the resulting MRs. Guest
* software is supposed to check the block size using the BLK_CFG
* register, so make it fixed at the page size.
*/
s->blocksize = memory_region_iommu_get_min_page_size(&s->upstream);
if (size % s->blocksize != 0) {
error_setg(errp,
"MPC 'downstream' size %" PRId64
" is not a multiple of %" HWADDR_PRIx " bytes",
size, s->blocksize);
object_unref(OBJECT(&s->upstream));
return;
}
/* BLK_MAX is the max value of BLK_IDX, which indexes an array of 32-bit
* words, each bit of which indicates one block.
*/
s->blk_max = DIV_ROUND_UP(size / s->blocksize, 32);
memory_region_init_io(&s->regmr, obj, &tz_mpc_reg_ops,
s, "tz-mpc-regs", 0x1000);
sysbus_init_mmio(sbd, &s->regmr);
sysbus_init_mmio(sbd, MEMORY_REGION(&s->upstream));
/* This memory region is not exposed to users of this device as a
* sysbus MMIO region, but is instead used internally as something
* that our IOMMU translate function might direct accesses to.
*/
memory_region_init_io(&s->blocked_io, obj, &tz_mpc_mem_blocked_ops,
s, "tz-mpc-blocked-io", size);
address_space_init(&s->downstream_as, s->downstream,
"tz-mpc-downstream");
address_space_init(&s->blocked_io_as, &s->blocked_io,
"tz-mpc-blocked-io");
s->blk_lut = g_new(uint32_t, s->blk_max);
}
static int tz_mpc_post_load(void *opaque, int version_id)
{
TZMPC *s = TZ_MPC(opaque);
/* Check the incoming data doesn't point blk_idx off the end of blk_lut. */
if (s->blk_idx >= s->blk_max) {
return -1;
}
return 0;
}
static const VMStateDescription tz_mpc_vmstate = {
.name = "tz-mpc",
.version_id = 1,
.minimum_version_id = 1,
.post_load = tz_mpc_post_load,
.fields = (VMStateField[]) {
VMSTATE_UINT32(ctrl, TZMPC),
VMSTATE_UINT32(blk_idx, TZMPC),
VMSTATE_UINT32(int_stat, TZMPC),
VMSTATE_UINT32(int_en, TZMPC),
VMSTATE_UINT32(int_info1, TZMPC),
VMSTATE_UINT32(int_info2, TZMPC),
VMSTATE_VARRAY_UINT32(blk_lut, TZMPC, blk_max,
0, vmstate_info_uint32, uint32_t),
VMSTATE_END_OF_LIST()
}
};
static Property tz_mpc_properties[] = {
DEFINE_PROP_LINK("downstream", TZMPC, downstream,
TYPE_MEMORY_REGION, MemoryRegion *),
DEFINE_PROP_END_OF_LIST(),
};
static void tz_mpc_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = tz_mpc_realize;
dc->vmsd = &tz_mpc_vmstate;
dc->reset = tz_mpc_reset;
dc->props = tz_mpc_properties;
}
static const TypeInfo tz_mpc_info = {
.name = TYPE_TZ_MPC,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(TZMPC),
.instance_init = tz_mpc_init,
.class_init = tz_mpc_class_init,
};
static void tz_mpc_iommu_memory_region_class_init(ObjectClass *klass,
void *data)
{
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
imrc->translate = tz_mpc_translate;
imrc->attrs_to_index = tz_mpc_attrs_to_index;
imrc->num_indexes = tz_mpc_num_indexes;
}
static const TypeInfo tz_mpc_iommu_memory_region_info = {
.name = TYPE_TZ_MPC_IOMMU_MEMORY_REGION,
.parent = TYPE_IOMMU_MEMORY_REGION,
.class_init = tz_mpc_iommu_memory_region_class_init,
};
static void tz_mpc_register_types(void)
{
type_register_static(&tz_mpc_info);
type_register_static(&tz_mpc_iommu_memory_region_info);
}
type_init(tz_mpc_register_types);

View File

@ -319,7 +319,7 @@ static inline bool xen_pt_has_msix_mapping(XenPCIPassthroughState *s, int bar)
}
extern void *pci_assign_dev_load_option_rom(PCIDevice *dev,
struct Object *owner, int *size,
int *size,
unsigned int domain,
unsigned int bus, unsigned int slot,
unsigned int function);

View File

@ -132,7 +132,7 @@ int xen_pt_unregister_vga_regions(XenHostPCIDevice *dev)
static void *get_vgabios(XenPCIPassthroughState *s, int *size,
XenHostPCIDevice *dev)
{
return pci_assign_dev_load_option_rom(&s->dev, OBJECT(&s->dev), size,
return pci_assign_dev_load_option_rom(&s->dev, size,
dev->domain, dev->bus,
dev->dev, dev->func);
}

View File

@ -19,7 +19,7 @@
* load the corresponding ROM data to RAM. If an error occurs while loading an
* option ROM, we just ignore that option ROM and continue with the next one.
*/
void *pci_assign_dev_load_option_rom(PCIDevice *dev, struct Object *owner,
void *pci_assign_dev_load_option_rom(PCIDevice *dev,
int *size, unsigned int domain,
unsigned int bus, unsigned int slot,
unsigned int function)
@ -29,6 +29,7 @@ void *pci_assign_dev_load_option_rom(PCIDevice *dev, struct Object *owner,
uint8_t val;
struct stat st;
void *ptr = NULL;
Object *owner = OBJECT(dev);
/* If loading ROM from file, pci handles it */
if (dev->romfile || !dev->rom_bar) {
@ -59,8 +60,7 @@ void *pci_assign_dev_load_option_rom(PCIDevice *dev, struct Object *owner,
fseek(fp, 0, SEEK_SET);
snprintf(name, sizeof(name), "%s.rom", object_get_typename(owner));
memory_region_init_ram_nomigrate(&dev->rom, owner, name, st.st_size, &error_abort);
vmstate_register_ram(&dev->rom, &dev->qdev);
memory_region_init_ram(&dev->rom, owner, name, st.st_size, &error_abort);
ptr = memory_region_get_ram_ptr(&dev->rom);
memset(ptr, 0xff, st.st_size);

View File

@ -42,6 +42,9 @@
* + named GPIO outputs ahb_ppcexp{0,1,2,3}_irq_enable
* + named GPIO outputs ahb_ppcexp{0,1,2,3}_irq_clear
* + named GPIO inputs ahb_ppcexp{0,1,2,3}_irq_status
* Controlling each of the 16 expansion MPCs which a system using the IoTKit
* might provide:
* + named GPIO inputs mpcexp_status[0..15]
*/
#ifndef IOTKIT_H
@ -51,6 +54,7 @@
#include "hw/arm/armv7m.h"
#include "hw/misc/iotkit-secctl.h"
#include "hw/misc/tz-ppc.h"
#include "hw/misc/tz-mpc.h"
#include "hw/timer/cmsdk-apb-timer.h"
#include "hw/misc/unimp.h"
#include "hw/or-irq.h"
@ -74,11 +78,14 @@ typedef struct IoTKit {
IoTKitSecCtl secctl;
TZPPC apb_ppc0;
TZPPC apb_ppc1;
TZMPC mpc;
CMSDKAPBTIMER timer0;
CMSDKAPBTIMER timer1;
qemu_or_irq ppc_irq_orgate;
SplitIRQ sec_resp_splitter;
SplitIRQ ppc_irq_splitter[NUM_PPCS];
SplitIRQ mpc_irq_splitter[IOTS_NUM_EXP_MPC + IOTS_NUM_MPC];
qemu_or_irq mpc_irq_orgate;
UnimplementedDeviceState dualtimer;
UnimplementedDeviceState s32ktimer;
@ -97,6 +104,7 @@ typedef struct IoTKit {
qemu_irq nsc_cfg_in;
qemu_irq irq_status_in[NUM_EXTERNAL_PPCS];
qemu_irq mpcexp_status_in[IOTS_NUM_EXP_MPC];
uint32_t nsccfg;

View File

@ -35,6 +35,8 @@
#include "qemu/notify.h"
#include "hw/boards.h"
#include "hw/arm/arm.h"
#include "sysemu/kvm.h"
#include "hw/intc/arm_gicv3_common.h"
#define NUM_GICV2M_SPIS 64
#define NUM_VIRTIO_TRANSPORTS 32
@ -60,6 +62,7 @@ enum {
VIRT_GIC_V2M,
VIRT_GIC_ITS,
VIRT_GIC_REDIST,
VIRT_GIC_REDIST2,
VIRT_SMMU,
VIRT_UART,
VIRT_MMIO,
@ -69,6 +72,7 @@ enum {
VIRT_PCIE_MMIO,
VIRT_PCIE_PIO,
VIRT_PCIE_ECAM,
VIRT_PCIE_ECAM_HIGH,
VIRT_PLATFORM_BUS,
VIRT_PCIE_MMIO_HIGH,
VIRT_GPIO,
@ -94,6 +98,7 @@ typedef struct {
bool no_pmu;
bool claim_edge_triggered_timers;
bool smbios_old_sys_ver;
bool no_highmem_ecam;
} VirtMachineClass;
typedef struct {
@ -103,6 +108,7 @@ typedef struct {
FWCfgState *fw_cfg;
bool secure;
bool highmem;
bool highmem_ecam;
bool its;
bool virt;
int32_t gic_version;
@ -120,6 +126,8 @@ typedef struct {
int psci_conduit;
} VirtMachineState;
#define VIRT_ECAM_ID(high) (high ? VIRT_PCIE_ECAM_HIGH : VIRT_PCIE_ECAM)
#define TYPE_VIRT_MACHINE MACHINE_TYPE_NAME("virt")
#define VIRT_MACHINE(obj) \
OBJECT_CHECK(VirtMachineState, (obj), TYPE_VIRT_MACHINE)
@ -130,4 +138,15 @@ typedef struct {
void virt_acpi_setup(VirtMachineState *vms);
/* Return the number of used redistributor regions */
static inline int virt_gicv3_redist_region_count(VirtMachineState *vms)
{
uint32_t redist0_capacity =
vms->memmap[VIRT_GIC_REDIST].size / GICV3_REDIST_SIZE;
assert(vms->gic_version == 3);
return vms->smp_cpus > redist0_capacity ? 2 : 1;
}
#endif /* QEMU_ARM_VIRT_H */

View File

@ -35,6 +35,8 @@
#define GICV3_MAXIRQ 1020
#define GICV3_MAXSPI (GICV3_MAXIRQ - GIC_INTERNAL)
#define GICV3_REDIST_SIZE 0x20000
/* Number of SGI target-list bits */
#define GICV3_TARGETLIST_BITS 16
@ -210,7 +212,9 @@ struct GICv3State {
/*< public >*/
MemoryRegion iomem_dist; /* Distributor */
MemoryRegion iomem_redist; /* Redistributors */
MemoryRegion *iomem_redist; /* Redistributor Regions */
uint32_t *redist_region_count; /* redistributor count within each region */
uint32_t nb_redist_regions; /* number of redist regions */
uint32_t num_cpu;
uint32_t num_irq;
@ -292,6 +296,6 @@ typedef struct ARMGICv3CommonClass {
} ARMGICv3CommonClass;
void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler,
const MemoryRegionOps *ops);
const MemoryRegionOps *ops, Error **errp);
#endif

View File

@ -39,6 +39,11 @@
* + named GPIO outputs ahb_ppcexp{0,1,2,3}_irq_enable
* + named GPIO outputs ahb_ppcexp{0,1,2,3}_irq_clear
* + named GPIO inputs ahb_ppcexp{0,1,2,3}_irq_status
* Controlling the MPC in the IoTKit:
* + named GPIO input mpc_status
* Controlling each of the 16 expansion MPCs which a system using the IoTKit
* might provide:
* + named GPIO inputs mpcexp_status[0..15]
*/
#ifndef IOTKIT_SECCTL_H
@ -55,6 +60,8 @@
#define IOTS_NUM_APB_PPC 2
#define IOTS_NUM_APB_EXP_PPC 4
#define IOTS_NUM_AHB_EXP_PPC 4
#define IOTS_NUM_EXP_MPC 16
#define IOTS_NUM_MPC 1
typedef struct IoTKitSecCtl IoTKitSecCtl;
@ -94,6 +101,7 @@ struct IoTKitSecCtl {
uint32_t secrespcfg;
uint32_t nsccfg;
uint32_t brginten;
uint32_t mpcintstatus;
IoTKitSecCtlPPC apb[IOTS_NUM_APB_PPC];
IoTKitSecCtlPPC apbexp[IOTS_NUM_APB_EXP_PPC];

80
include/hw/misc/tz-mpc.h Normal file
View File

@ -0,0 +1,80 @@
/*
* ARM AHB5 TrustZone Memory Protection Controller emulation
*
* Copyright (c) 2018 Linaro Limited
* Written by Peter Maydell
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 or
* (at your option) any later version.
*/
/* This is a model of the TrustZone memory protection controller (MPC).
* It is documented in the ARM CoreLink SIE-200 System IP for Embedded TRM
* (DDI 0571G):
* https://developer.arm.com/products/architecture/m-profile/docs/ddi0571/g
*
* The MPC sits in front of memory and allows secure software to
* configure it to either pass through or reject transactions.
* Rejected transactions may be configured to either be aborted, or to
* behave as RAZ/WI. An interrupt can be signalled for a rejected transaction.
*
* The MPC has a register interface which the guest uses to configure it.
*
* QEMU interface:
* + sysbus MMIO region 0: MemoryRegion for the MPC's config registers
* + sysbus MMIO region 1: MemoryRegion for the upstream end of the MPC
* + Property "downstream": MemoryRegion defining the downstream memory
* + Named GPIO output "irq": set for a transaction-failed interrupt
*/
#ifndef TZ_MPC_H
#define TZ_MPC_H
#include "hw/sysbus.h"
#define TYPE_TZ_MPC "tz-mpc"
#define TZ_MPC(obj) OBJECT_CHECK(TZMPC, (obj), TYPE_TZ_MPC)
#define TZ_NUM_PORTS 16
#define TYPE_TZ_MPC_IOMMU_MEMORY_REGION "tz-mpc-iommu-memory-region"
typedef struct TZMPC TZMPC;
struct TZMPC {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
/* State */
uint32_t ctrl;
uint32_t blk_idx;
uint32_t int_stat;
uint32_t int_en;
uint32_t int_info1;
uint32_t int_info2;
uint32_t *blk_lut;
qemu_irq irq;
/* Properties */
MemoryRegion *downstream;
hwaddr blocksize;
uint32_t blk_max;
/* MemoryRegions exposed to user */
MemoryRegion regmr;
IOMMUMemoryRegion upstream;
/* MemoryRegion used internally */
MemoryRegion blocked_io;
AddressSpace downstream_as;
AddressSpace blocked_io_as;
};
#endif

View File

@ -506,6 +506,8 @@
#define PCI_EXP_DEVCTL_READRQ_256B 0x1000 /* 256 Bytes */
#define PCI_EXP_DEVCTL_READRQ_512B 0x2000 /* 512 Bytes */
#define PCI_EXP_DEVCTL_READRQ_1024B 0x3000 /* 1024 Bytes */
#define PCI_EXP_DEVCTL_READRQ_2048B 0x4000 /* 2048 Bytes */
#define PCI_EXP_DEVCTL_READRQ_4096B 0x5000 /* 4096 Bytes */
#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */
#define PCI_EXP_DEVSTA 10 /* Device Status */
#define PCI_EXP_DEVSTA_CED 0x0001 /* Correctable Error Detected */
@ -655,6 +657,11 @@
#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */
#define PCI_EXP_LNKCAP2_CROSSLINK 0x00000100 /* Crosslink supported */
#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
#define PCI_EXP_LNKCTL2_TLS 0x000f
#define PCI_EXP_LNKCTL2_TLS_2_5GT 0x0001 /* Supported Speed 2.5GT/s */
#define PCI_EXP_LNKCTL2_TLS_5_0GT 0x0002 /* Supported Speed 5GT/s */
#define PCI_EXP_LNKCTL2_TLS_8_0GT 0x0003 /* Supported Speed 8GT/s */
#define PCI_EXP_LNKCTL2_TLS_16_0GT 0x0004 /* Supported Speed 16GT/s */
#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */
#define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */
@ -981,6 +988,7 @@
#define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */
#define PCI_EXP_DPC_CTL 6 /* DPC control */
#define PCI_EXP_DPC_CTL_EN_FATAL 0x0001 /* Enable trigger on ERR_FATAL message */
#define PCI_EXP_DPC_CTL_EN_NONFATAL 0x0002 /* Enable trigger on ERR_NONFATAL message */
#define PCI_EXP_DPC_CTL_INT_EN 0x0008 /* DPC Interrupt Enable */

View File

@ -260,6 +260,7 @@ struct virtio_gpu_cmd_submit {
};
#define VIRTIO_GPU_CAPSET_VIRGL 1
#define VIRTIO_GPU_CAPSET_VIRGL2 2
/* VIRTIO_GPU_CMD_GET_CAPSET_INFO */
struct virtio_gpu_get_capset_info {

View File

@ -57,6 +57,9 @@
* Steering */
#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */
#define VIRTIO_NET_F_STANDBY 62 /* Act as standby for another device
* with the same MAC.
*/
#define VIRTIO_NET_F_SPEED_DUPLEX 63 /* Device set linkspeed and duplex */
#ifndef VIRTIO_NET_NO_LEGACY

View File

@ -1,6 +1,6 @@
SPDX-Exception-Identifier: Linux-syscall-note
SPDX-URL: https://spdx.org/licenses/Linux-syscall-note.html
SPDX-Licenses: GPL-2.0, GPL-2.0+, GPL-1.0+, LGPL-2.0, LGPL-2.0+, LGPL-2.1, LGPL-2.1+
SPDX-Licenses: GPL-2.0, GPL-2.0+, GPL-1.0+, LGPL-2.0, LGPL-2.0+, LGPL-2.1, LGPL-2.1+, GPL-2.0-only, GPL-2.0-or-later
Usage-Guide:
This exception is used together with one of the above SPDX-Licenses
to mark user space API (uapi) header files so they can be included

View File

@ -1,5 +1,7 @@
Valid-License-Identifier: GPL-2.0
Valid-License-Identifier: GPL-2.0-only
Valid-License-Identifier: GPL-2.0+
Valid-License-Identifier: GPL-2.0-or-later
SPDX-URL: https://spdx.org/licenses/GPL-2.0.html
Usage-Guide:
To use this license in source code, put one of the following SPDX
@ -7,8 +9,12 @@ Usage-Guide:
guidelines in the licensing rules documentation.
For 'GNU General Public License (GPL) version 2 only' use:
SPDX-License-Identifier: GPL-2.0
or
SPDX-License-Identifier: GPL-2.0-only
For 'GNU General Public License (GPL) version 2 or any later version' use:
SPDX-License-Identifier: GPL-2.0+
or
SPDX-License-Identifier: GPL-2.0-or-later
License-Text:
GNU GENERAL PUBLIC LICENSE

View File

@ -91,6 +91,7 @@ struct kvm_regs {
#define KVM_VGIC_V3_ADDR_TYPE_DIST 2
#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3
#define KVM_VGIC_ITS_ADDR_TYPE 4
#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION 5
#define KVM_VGIC_V3_DIST_SIZE SZ_64K
#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K)

View File

@ -354,5 +354,6 @@
#define __NR_pkey_alloc (__NR_SYSCALL_BASE + 395)
#define __NR_pkey_free (__NR_SYSCALL_BASE + 396)
#define __NR_statx (__NR_SYSCALL_BASE + 397)
#define __NR_rseq (__NR_SYSCALL_BASE + 398)
#endif /* _ASM_ARM_UNISTD_COMMON_H */

View File

@ -91,6 +91,7 @@ struct kvm_regs {
#define KVM_VGIC_V3_ADDR_TYPE_DIST 2
#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3
#define KVM_VGIC_ITS_ADDR_TYPE 4
#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION 5
#define KVM_VGIC_V3_DIST_SIZE SZ_64K
#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K)

View File

@ -732,9 +732,11 @@ __SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
__SYSCALL(__NR_pkey_free, sys_pkey_free)
#define __NR_statx 291
__SYSCALL(__NR_statx, sys_statx)
#define __NR_io_pgetevents 292
__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
#undef __NR_syscalls
#define __NR_syscalls 292
#define __NR_syscalls 293
/*
* 32 bit systems traditionally used different

View File

@ -398,5 +398,6 @@
#define __NR_pkey_alloc 384
#define __NR_pkey_free 385
#define __NR_pkey_mprotect 386
#define __NR_rseq 387
#endif /* _ASM_POWERPC_UNISTD_H_ */

View File

@ -382,5 +382,7 @@
#define __NR_pkey_free 382
#define __NR_statx 383
#define __NR_arch_prctl 384
#define __NR_io_pgetevents 385
#define __NR_rseq 386
#endif /* _ASM_X86_UNISTD_32_H */

View File

@ -334,5 +334,7 @@
#define __NR_pkey_alloc 330
#define __NR_pkey_free 331
#define __NR_statx 332
#define __NR_io_pgetevents 333
#define __NR_rseq 334
#endif /* _ASM_X86_UNISTD_64_H */

View File

@ -287,6 +287,8 @@
#define __NR_pkey_alloc (__X32_SYSCALL_BIT + 330)
#define __NR_pkey_free (__X32_SYSCALL_BIT + 331)
#define __NR_statx (__X32_SYSCALL_BIT + 332)
#define __NR_io_pgetevents (__X32_SYSCALL_BIT + 333)
#define __NR_rseq (__X32_SYSCALL_BIT + 334)
#define __NR_rt_sigaction (__X32_SYSCALL_BIT + 512)
#define __NR_rt_sigreturn (__X32_SYSCALL_BIT + 513)
#define __NR_ioctl (__X32_SYSCALL_BIT + 514)

View File

@ -677,10 +677,10 @@ struct kvm_ioeventfd {
};
#define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0)
#define KVM_X86_DISABLE_EXITS_HTL (1 << 1)
#define KVM_X86_DISABLE_EXITS_HLT (1 << 1)
#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
#define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \
KVM_X86_DISABLE_EXITS_HTL | \
KVM_X86_DISABLE_EXITS_HLT | \
KVM_X86_DISABLE_EXITS_PAUSE)
/* for KVM_ENABLE_CAP */
@ -948,6 +948,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_S390_BPB 152
#define KVM_CAP_GET_MSR_FEATURES 153
#define KVM_CAP_HYPERV_EVENTFD 154
#define KVM_CAP_HYPERV_TLBFLUSH 155
#ifdef KVM_CAP_IRQ_ROUTING

View File

@ -30,6 +30,7 @@ enum {
SEV_PDH_GEN,
SEV_PDH_CERT_EXPORT,
SEV_PEK_CERT_IMPORT,
SEV_GET_ID,
SEV_MAX,
};
@ -123,6 +124,17 @@ struct sev_user_data_pdh_cert_export {
__u32 cert_chain_len; /* In/Out */
} __attribute__((packed));
/**
* struct sev_user_data_get_id - GET_ID command parameters
*
* @socket1: Buffer to pass unique ID of first socket
* @socket2: Buffer to pass unique ID of second socket
*/
struct sev_user_data_get_id {
__u8 socket1[64]; /* Out */
__u8 socket2[64]; /* Out */
} __attribute__((packed));
/**
* struct sev_issue_cmd - SEV ioctl parameters
*

View File

@ -1238,6 +1238,7 @@ static void cortex_m3_initfn(Object *obj)
ARMCPU *cpu = ARM_CPU(obj);
set_feature(&cpu->env, ARM_FEATURE_V7);
set_feature(&cpu->env, ARM_FEATURE_M);
set_feature(&cpu->env, ARM_FEATURE_M_MAIN);
cpu->midr = 0x410fc231;
cpu->pmsav7_dregion = 8;
cpu->id_pfr0 = 0x00000030;
@ -1262,6 +1263,7 @@ static void cortex_m4_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_V7);
set_feature(&cpu->env, ARM_FEATURE_M);
set_feature(&cpu->env, ARM_FEATURE_M_MAIN);
set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP);
cpu->midr = 0x410fc240; /* r0p0 */
cpu->pmsav7_dregion = 8;
@ -1287,6 +1289,7 @@ static void cortex_m33_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_V8);
set_feature(&cpu->env, ARM_FEATURE_M);
set_feature(&cpu->env, ARM_FEATURE_M_MAIN);
set_feature(&cpu->env, ARM_FEATURE_M_SECURITY);
set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP);
cpu->midr = 0x410fd213; /* r0p3 */
@ -1361,6 +1364,14 @@ static void cortex_r5_initfn(Object *obj)
define_arm_cp_regs(cpu, cortexr5_cp_reginfo);
}
static void cortex_r5f_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
cortex_r5_initfn(obj);
set_feature(&cpu->env, ARM_FEATURE_VFP3);
}
static const ARMCPRegInfo cortexa8_cp_reginfo[] = {
{ .name = "L2LOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 0,
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
@ -1821,6 +1832,7 @@ static const ARMCPUInfo arm_cpus[] = {
{ .name = "cortex-m33", .initfn = cortex_m33_initfn,
.class_init = arm_v7m_class_init },
{ .name = "cortex-r5", .initfn = cortex_r5_initfn },
{ .name = "cortex-r5f", .initfn = cortex_r5f_initfn },
{ .name = "cortex-a7", .initfn = cortex_a7_initfn },
{ .name = "cortex-a8", .initfn = cortex_a8_initfn },
{ .name = "cortex-a9", .initfn = cortex_a9_initfn },

View File

@ -1482,6 +1482,7 @@ enum arm_features {
ARM_FEATURE_V8_RDM, /* implements v8.1 simd round multiply */
ARM_FEATURE_V8_FP16, /* implements v8.2 half-precision float */
ARM_FEATURE_V8_FCMA, /* has complex number part of v8.3 extensions. */
ARM_FEATURE_M_MAIN, /* M profile Main Extension */
};
static inline int arm_feature(CPUARMState *env, int feature)

View File

@ -184,10 +184,15 @@ unsigned long kvm_arch_vcpu_id(CPUState *cpu)
* We use a MemoryListener to track mapping and unmapping of
* the regions during board creation, so the board models don't
* need to do anything special for the KVM case.
*
* Sometimes the address must be OR'ed with some other fields
* (for example for KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION).
* @kda_addr_ormask aims at storing the value of those fields.
*/
typedef struct KVMDevice {
struct kvm_arm_device_addr kda;
struct kvm_device_attr kdattr;
uint64_t kda_addr_ormask;
MemoryRegion *mr;
QSLIST_ENTRY(KVMDevice) entries;
int dev_fd;
@ -234,6 +239,8 @@ static void kvm_arm_set_device_addr(KVMDevice *kd)
*/
if (kd->dev_fd >= 0) {
uint64_t addr = kd->kda.addr;
addr |= kd->kda_addr_ormask;
attr->addr = (uintptr_t)&addr;
ret = kvm_device_ioctl(kd->dev_fd, KVM_SET_DEVICE_ATTR, attr);
} else {
@ -256,6 +263,7 @@ static void kvm_arm_machine_init_done(Notifier *notifier, void *data)
kvm_arm_set_device_addr(kd);
}
memory_region_unref(kd->mr);
QSLIST_REMOVE_HEAD(&kvm_devices_head, entries);
g_free(kd);
}
memory_listener_unregister(&devlistener);
@ -266,7 +274,7 @@ static Notifier notify = {
};
void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group,
uint64_t attr, int dev_fd)
uint64_t attr, int dev_fd, uint64_t addr_ormask)
{
KVMDevice *kd;
@ -286,6 +294,7 @@ void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group,
kd->kdattr.group = group;
kd->kdattr.attr = attr;
kd->dev_fd = dev_fd;
kd->kda_addr_ormask = addr_ormask;
QSLIST_INSERT_HEAD(&kvm_devices_head, kd, entries);
memory_region_ref(kd->mr);
}

View File

@ -34,6 +34,7 @@ int kvm_arm_vcpu_init(CPUState *cs);
* @group: device control API group for setting addresses
* @attr: device control API address type
* @dev_fd: device control device file descriptor (or -1 if not supported)
* @addr_ormask: value to be OR'ed with resolved address
*
* Remember the memory region @mr, and when it is mapped by the
* machine model, tell the kernel that base address using the
@ -45,7 +46,7 @@ int kvm_arm_vcpu_init(CPUState *cs);
* address at the point where machine init is complete.
*/
void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group,
uint64_t attr, int dev_fd);
uint64_t attr, int dev_fd, uint64_t addr_ormask);
/**
* kvm_arm_init_cpreg_list:

View File

@ -1100,7 +1100,14 @@ static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
int index, TCGMemOp opc)
{
TCGv addr = gen_aa32_addr(s, a32, opc);
TCGv addr;
if (arm_dc_feature(s, ARM_FEATURE_M) &&
!arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
opc |= MO_ALIGN;
}
addr = gen_aa32_addr(s, a32, opc);
tcg_gen_qemu_ld_i32(val, addr, index, opc);
tcg_temp_free(addr);
}
@ -1108,7 +1115,14 @@ static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
int index, TCGMemOp opc)
{
TCGv addr = gen_aa32_addr(s, a32, opc);
TCGv addr;
if (arm_dc_feature(s, ARM_FEATURE_M) &&
!arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
opc |= MO_ALIGN;
}
addr = gen_aa32_addr(s, a32, opc);
tcg_gen_qemu_st_i32(val, addr, index, opc);
tcg_temp_free(addr);
}
@ -10095,13 +10109,13 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
!arm_dc_feature(s, ARM_FEATURE_V7)) {
int i;
bool found = false;
const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
0xf3b08040 /* dsb */,
0xf3b08050 /* dmb */,
0xf3b08060 /* isb */,
0xf3e08000 /* mrs */,
0xf000d000 /* bl */};
const uint32_t armv6m_mask[] = {0xffe0d000,
static const uint32_t armv6m_mask[] = {0xffe0d000,
0xfff0d0f0,
0xfff0d0f0,
0xfff0d0f0,
@ -11039,8 +11053,7 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
break;
case 3: /* Special control operations. */
if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
!(arm_dc_feature(s, ARM_FEATURE_V6) &&
arm_dc_feature(s, ARM_FEATURE_M))) {
!arm_dc_feature(s, ARM_FEATURE_M)) {
goto illegal_op;
}
op = (insn >> 4) & 0xf;

4
vl.c
View File

@ -151,8 +151,8 @@ QEMUClockType rtc_clock;
int vga_interface_type = VGA_NONE;
static DisplayOptions dpy;
int no_frame;
static int num_serial_hds = 0;
static Chardev **serial_hds = NULL;
static int num_serial_hds;
static Chardev **serial_hds;
Chardev *parallel_hds[MAX_PARALLEL_PORTS];
Chardev *virtcon_hds[MAX_VIRTIO_CONSOLES];
int win2k_install_hack = 0;