target-arm queue:

* hw/gpio/nrf51: implement DETECT signal
  * accel/kvm: Specify default IPA size for arm64
  * ptw: refactor, fix some FEAT_RME bugs
  * target/arm: Adjust PAR_EL1.SH for Device and Normal-NC memory types
  * target/arm/helper: Implement CNTHCTL_EL2.CNT[VP]MASK
  * Fix SME ST1Q
  * Fix 64-bit SSRA
 -----BEGIN PGP SIGNATURE-----
 
 iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmTnIoUZHHBldGVyLm1h
 eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3vufEACPJcwyFvSBHDv4VQ6tbgOU
 zwjpUMv4RMKhCOjuxBlJ2DICwOcGNuKer0tc6wkH2T5Ebhoego1osYbRZZoawAJf
 ntg+Ndrx1QH9ORuGqYccLXtHnP741KiKggDHM05BJqB7rqtuH+N4fEn7Cdsw/DNg
 XuCYD5QrxMYvkSOD1l8W0aqp81ucYPgkFqLufypgxrXUiRZ1RBAmPF47BFFdnM8f
 NmrmT1LTF5jr70ySRB+ukK6BAGDc0CUfs6R6nYRwUjRPmSG2rrtUDGo+nOQGDqJo
 PHWmt7rdZQG2w7HVyE/yc3h/CQ3NciwWKbCkRlaoujxHx/B6DRynSeO3NXsP8ELu
 Gizoi3ltwHDQVIGQA19P5phZKHZf7x3MXmK4fDBGB9znvoSFTcjJqkdaN/ARXXO3
 e1vnK1MqnPI8Z1nGdeVIAUIrqhtLHnrrM7jf1tI/e4sjpl3prHq2PvQkakXu8clr
 H8bPZ9zZzyrrSbl4NhpaFTsUiYVxeLoJsNKAmG8dHb+9YsFGXTvEBhtR9eUxnbaV
 XyZ3jEdeW7/ngQ4C6XMD2ZDiKVdx2xJ2Pp5npvljldjmtGUvwQabKo+fPDt2fKjM
 BwjhHA50I633k4fYIwm8YOb70I4oxoL9Lr6PkKriWPMTI5r7+dtwgigREVwnCn+Y
 RsiByKMkDO2TcoQjvBZlCA==
 =3MJ8
 -----END PGP SIGNATURE-----

Merge tag 'pull-target-arm-20230824' of https://git.linaro.org/people/pmaydell/qemu-arm into staging

target-arm queue:
 * hw/gpio/nrf51: implement DETECT signal
 * accel/kvm: Specify default IPA size for arm64
 * ptw: refactor, fix some FEAT_RME bugs
 * target/arm: Adjust PAR_EL1.SH for Device and Normal-NC memory types
 * target/arm/helper: Implement CNTHCTL_EL2.CNT[VP]MASK
 * Fix SME ST1Q
 * Fix 64-bit SSRA

# -----BEGIN PGP SIGNATURE-----
#
# iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmTnIoUZHHBldGVyLm1h
# eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3vufEACPJcwyFvSBHDv4VQ6tbgOU
# zwjpUMv4RMKhCOjuxBlJ2DICwOcGNuKer0tc6wkH2T5Ebhoego1osYbRZZoawAJf
# ntg+Ndrx1QH9ORuGqYccLXtHnP741KiKggDHM05BJqB7rqtuH+N4fEn7Cdsw/DNg
# XuCYD5QrxMYvkSOD1l8W0aqp81ucYPgkFqLufypgxrXUiRZ1RBAmPF47BFFdnM8f
# NmrmT1LTF5jr70ySRB+ukK6BAGDc0CUfs6R6nYRwUjRPmSG2rrtUDGo+nOQGDqJo
# PHWmt7rdZQG2w7HVyE/yc3h/CQ3NciwWKbCkRlaoujxHx/B6DRynSeO3NXsP8ELu
# Gizoi3ltwHDQVIGQA19P5phZKHZf7x3MXmK4fDBGB9znvoSFTcjJqkdaN/ARXXO3
# e1vnK1MqnPI8Z1nGdeVIAUIrqhtLHnrrM7jf1tI/e4sjpl3prHq2PvQkakXu8clr
# H8bPZ9zZzyrrSbl4NhpaFTsUiYVxeLoJsNKAmG8dHb+9YsFGXTvEBhtR9eUxnbaV
# XyZ3jEdeW7/ngQ4C6XMD2ZDiKVdx2xJ2Pp5npvljldjmtGUvwQabKo+fPDt2fKjM
# BwjhHA50I633k4fYIwm8YOb70I4oxoL9Lr6PkKriWPMTI5r7+dtwgigREVwnCn+Y
# RsiByKMkDO2TcoQjvBZlCA==
# =3MJ8
# -----END PGP SIGNATURE-----
# gpg: Signature made Thu 24 Aug 2023 05:27:33 EDT
# gpg:                using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE
# gpg:                issuer "peter.maydell@linaro.org"
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [full]
# gpg:                 aka "Peter Maydell <pmaydell@gmail.com>" [full]
# gpg:                 aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [full]
# gpg:                 aka "Peter Maydell <peter@archaic.org.uk>" [unknown]
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83  15CF 3C25 25ED 1436 0CDE

* tag 'pull-target-arm-20230824' of https://git.linaro.org/people/pmaydell/qemu-arm: (35 commits)
  target/arm: Fix 64-bit SSRA
  target/arm: Fix SME ST1Q
  target/arm/helper: Implement CNTHCTL_EL2.CNT[VP]MASK
  target/arm/helper: Check SCR_EL3.{NSE, NS} encoding for AT instructions
  target/arm: Pass security space rather than flag for AT instructions
  target/arm: Skip granule protection checks for AT instructions
  target/arm/helper: Fix tlbmask and tlbbits for TLBI VAE2*
  target/arm/ptw: Load stage-2 tables from realm physical space
  target/arm: Adjust PAR_EL1.SH for Device and Normal-NC memory types
  target/arm/ptw: Report stage 2 fault level for stage 2 faults on stage 1 ptw
  target/arm/ptw: Check for block descriptors at invalid levels
  target/arm/ptw: Set attributes correctly for MMU disabled data accesses
  target/arm/ptw: Drop S1Translate::out_secure
  target/arm/ptw: Remove S1Translate::in_secure
  target/arm/ptw: Remove last uses of ptw->in_secure
  target/arm/ptw: Only fold in NSTable bit effects in Secure state
  target/arm: Pass an ARMSecuritySpace to arm_is_el2_enabled_secstate()
  target/arm/ptw: Pass an ARMSecuritySpace to arm_hcr_el2_eff_secstate()
  target/arm/ptw: Pass ARMSecurityState to regime_translation_disabled()
  target/arm/ptw: Pass ptw into get_phys_addr_pmsa*() and get_phys_addr_disabled()
  ...

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2023-08-24 10:08:33 -04:00
commit 50e7a40af3
26 changed files with 494 additions and 199 deletions

View File

@ -1454,15 +1454,13 @@ static void *kvm_dirty_ring_reaper_thread(void *data)
return NULL;
}
static int kvm_dirty_ring_reaper_init(KVMState *s)
static void kvm_dirty_ring_reaper_init(KVMState *s)
{
struct KVMDirtyRingReaper *r = &s->reaper;
qemu_thread_create(&r->reaper_thr, "kvm-reaper",
kvm_dirty_ring_reaper_thread,
s, QEMU_THREAD_JOINABLE);
return 0;
}
static int kvm_dirty_ring_init(KVMState *s)
@ -2458,7 +2456,7 @@ static int kvm_init(MachineState *ms)
KVMState *s;
const KVMCapabilityInfo *missing_cap;
int ret;
int type = 0;
int type;
uint64_t dirty_log_manual_caps;
qemu_mutex_init(&kml_slots_lock);
@ -2523,6 +2521,13 @@ static int kvm_init(MachineState *ms)
type = mc->kvm_type(ms, kvm_type);
} else if (mc->kvm_type) {
type = mc->kvm_type(ms, NULL);
} else {
type = kvm_arch_get_default_type(ms);
}
if (type < 0) {
ret = -EINVAL;
goto err;
}
do {
@ -2737,10 +2742,7 @@ static int kvm_init(MachineState *ms)
}
if (s->kvm_dirty_ring_size) {
ret = kvm_dirty_ring_reaper_init(s);
if (ret) {
goto err;
}
kvm_dirty_ring_reaper_init(s);
}
if (kvm_check_extension(kvm_state, KVM_CAP_BINARY_STATS_FD)) {
@ -2758,6 +2760,7 @@ err:
if (s->fd != -1) {
close(s->fd);
}
g_free(s->as);
g_free(s->memory_listener.slots);
return ret;

View File

@ -2913,7 +2913,7 @@ static int virt_kvm_type(MachineState *ms, const char *type_str)
"require an IPA range (%d bits) larger than "
"the one supported by the host (%d bits)",
requested_pa_size, max_vm_pa_size);
exit(1);
return -1;
}
/*
* We return the requested PA log size, unless KVM only supports

View File

@ -78,6 +78,7 @@ static void update_state(NRF51GPIOState *s)
int pull;
size_t i;
bool connected_out, dir, connected_in, out, in, input;
bool assert_detect = false;
for (i = 0; i < NRF51_GPIO_PINS; i++) {
pull = pull_value(s->cnf[i]);
@ -99,7 +100,15 @@ static void update_state(NRF51GPIOState *s)
qemu_log_mask(LOG_GUEST_ERROR,
"GPIO pin %zu short circuited\n", i);
}
if (!connected_in) {
if (connected_in) {
uint32_t detect_config = extract32(s->cnf[i], 16, 2);
if ((detect_config == 2) && (in == 1)) {
assert_detect = true;
}
if ((detect_config == 3) && (in == 0)) {
assert_detect = true;
}
} else {
/*
* Floating input: the output stimulates IN if connected,
* otherwise pull-up/pull-down resistors put a value on both
@ -116,6 +125,8 @@ static void update_state(NRF51GPIOState *s)
}
update_output_irq(s, i, connected_out, out);
}
qemu_set_irq(s->detect, assert_detect);
}
/*
@ -291,6 +302,7 @@ static void nrf51_gpio_init(Object *obj)
qdev_init_gpio_in(DEVICE(s), nrf51_gpio_set, NRF51_GPIO_PINS);
qdev_init_gpio_out(DEVICE(s), s->output, NRF51_GPIO_PINS);
qdev_init_gpio_out_named(DEVICE(s), &s->detect, "detect", 1);
}
static void nrf51_gpio_class_init(ObjectClass *klass, void *data)

View File

@ -29,7 +29,6 @@
#include "qemu/datadir.h"
#include "qapi/error.h"
#include "elf.h"
#include "kvm_mips.h"
#include "hw/char/serial.h"
#include "hw/intc/loongson_liointc.h"
#include "hw/mips/mips.h"
@ -612,7 +611,6 @@ static void loongson3v_machine_class_init(ObjectClass *oc, void *data)
mc->max_cpus = LOONGSON_MAX_VCPUS;
mc->default_ram_id = "loongson3.highram";
mc->default_ram_size = 1600 * MiB;
mc->kvm_type = mips_kvm_type;
mc->minimum_page_bits = 14;
mc->default_nic = "virtio-net-pci";
}

View File

@ -3105,7 +3105,7 @@ static int spapr_kvm_type(MachineState *machine, const char *vm_type)
}
error_report("Unknown kvm-type specified '%s'", vm_type);
exit(1);
return -1;
}
/*

View File

@ -64,6 +64,7 @@ struct NRF51GPIOState {
uint32_t old_out_connected;
qemu_irq output[NRF51_GPIO_PINS];
qemu_irq detect;
};

View File

@ -369,6 +369,8 @@ int kvm_arch_get_registers(CPUState *cpu);
int kvm_arch_put_registers(CPUState *cpu, int level);
int kvm_arch_get_default_type(MachineState *ms);
int kvm_arch_init(MachineState *ms, KVMState *s);
int kvm_arch_init_vcpu(CPUState *cpu);

View File

@ -365,6 +365,15 @@ void qtest_set_command_cb(bool (*pc_cb)(CharBackend *chr, gchar **words))
process_command_cb = pc_cb;
}
static void qtest_install_gpio_out_intercept(DeviceState *dev, const char *name, int n)
{
qemu_irq *disconnected = g_new0(qemu_irq, 1);
qemu_irq icpt = qemu_allocate_irq(qtest_irq_handler,
disconnected, n);
*disconnected = qdev_intercept_gpio_out(dev, icpt, name, n);
}
static void qtest_process_command(CharBackend *chr, gchar **words)
{
const gchar *command;
@ -388,8 +397,13 @@ static void qtest_process_command(CharBackend *chr, gchar **words)
|| strcmp(words[0], "irq_intercept_in") == 0) {
DeviceState *dev;
NamedGPIOList *ngl;
bool is_named;
bool is_outbound;
bool interception_succeeded = false;
g_assert(words[1]);
is_named = words[2] != NULL;
is_outbound = words[0][14] == 'o';
dev = DEVICE(object_resolve_path(words[1], NULL));
if (!dev) {
qtest_send_prefix(chr);
@ -397,6 +411,12 @@ static void qtest_process_command(CharBackend *chr, gchar **words)
return;
}
if (is_named && !is_outbound) {
qtest_send_prefix(chr);
qtest_send(chr, "FAIL Interception of named in-GPIOs not yet supported\n");
return;
}
if (irq_intercept_dev) {
qtest_send_prefix(chr);
if (irq_intercept_dev != dev) {
@ -408,28 +428,30 @@ static void qtest_process_command(CharBackend *chr, gchar **words)
}
QLIST_FOREACH(ngl, &dev->gpios, node) {
/* We don't support intercept of named GPIOs yet */
if (ngl->name) {
continue;
}
if (words[0][14] == 'o') {
int i;
for (i = 0; i < ngl->num_out; ++i) {
qemu_irq *disconnected = g_new0(qemu_irq, 1);
qemu_irq icpt = qemu_allocate_irq(qtest_irq_handler,
disconnected, i);
*disconnected = qdev_intercept_gpio_out(dev, icpt,
ngl->name, i);
/* We don't support inbound interception of named GPIOs yet */
if (is_outbound) {
/* NULL is valid and matchable, for "unnamed GPIO" */
if (g_strcmp0(ngl->name, words[2]) == 0) {
int i;
for (i = 0; i < ngl->num_out; ++i) {
qtest_install_gpio_out_intercept(dev, ngl->name, i);
}
interception_succeeded = true;
}
} else {
qemu_irq_intercept_in(ngl->in, qtest_irq_handler,
ngl->num_in);
interception_succeeded = true;
}
}
irq_intercept_dev = dev;
qtest_send_prefix(chr);
qtest_send(chr, "OK\n");
if (interception_succeeded) {
irq_intercept_dev = dev;
qtest_send(chr, "OK\n");
} else {
qtest_send(chr, "FAIL No intercepts installed\n");
}
} else if (strcmp(words[0], "set_irq_in") == 0) {
DeviceState *dev;
qemu_irq irq;

View File

@ -2169,6 +2169,12 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
set_feature(env, ARM_FEATURE_VBAR);
}
#ifndef CONFIG_USER_ONLY
if (tcg_enabled() && cpu_isar_feature(aa64_rme, cpu)) {
arm_register_el_change_hook(cpu, &gt_rme_post_el_change, 0);
}
#endif
register_cp_regs_for_features(cpu);
arm_cpu_register_gdb_regs_for_features(cpu);

View File

@ -1115,6 +1115,7 @@ struct ArchCPU {
};
unsigned int gt_cntfrq_period_ns(ARMCPU *cpu);
void gt_rme_post_el_change(ARMCPU *cpu, void *opaque);
void arm_cpu_post_init(Object *obj);
@ -1743,6 +1744,9 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
#define HSTR_TTEE (1 << 16)
#define HSTR_TJDBX (1 << 17)
#define CNTHCTL_CNTVMASK (1 << 18)
#define CNTHCTL_CNTPMASK (1 << 19)
/* Return the current FPSCR value. */
uint32_t vfp_get_fpscr(CPUARMState *env);
void vfp_set_fpscr(CPUARMState *env, uint32_t val);
@ -2504,17 +2508,19 @@ static inline bool arm_is_secure(CPUARMState *env)
/*
* Return true if the current security state has AArch64 EL2 or AArch32 Hyp.
* This corresponds to the pseudocode EL2Enabled()
* This corresponds to the pseudocode EL2Enabled().
*/
static inline bool arm_is_el2_enabled_secstate(CPUARMState *env, bool secure)
static inline bool arm_is_el2_enabled_secstate(CPUARMState *env,
ARMSecuritySpace space)
{
assert(space != ARMSS_Root);
return arm_feature(env, ARM_FEATURE_EL2)
&& (!secure || (env->cp15.scr_el3 & SCR_EEL2));
&& (space != ARMSS_Secure || (env->cp15.scr_el3 & SCR_EEL2));
}
static inline bool arm_is_el2_enabled(CPUARMState *env)
{
return arm_is_el2_enabled_secstate(env, arm_is_secure_below_el3(env));
return arm_is_el2_enabled_secstate(env, arm_security_space_below_el3(env));
}
#else
@ -2538,7 +2544,8 @@ static inline bool arm_is_secure(CPUARMState *env)
return false;
}
static inline bool arm_is_el2_enabled_secstate(CPUARMState *env, bool secure)
static inline bool arm_is_el2_enabled_secstate(CPUARMState *env,
ARMSecuritySpace space)
{
return false;
}
@ -2555,7 +2562,7 @@ static inline bool arm_is_el2_enabled(CPUARMState *env)
* "for all purposes other than a direct read or write access of HCR_EL2."
* Not included here is HCR_RW.
*/
uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, bool secure);
uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, ARMSecuritySpace space);
uint64_t arm_hcr_el2_eff(CPUARMState *env);
uint64_t arm_hcrx_el2_eff(CPUARMState *env);

View File

@ -2608,6 +2608,39 @@ static uint64_t gt_get_countervalue(CPUARMState *env)
return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
}
static void gt_update_irq(ARMCPU *cpu, int timeridx)
{
CPUARMState *env = &cpu->env;
uint64_t cnthctl = env->cp15.cnthctl_el2;
ARMSecuritySpace ss = arm_security_space(env);
/* ISTATUS && !IMASK */
int irqstate = (env->cp15.c14_timer[timeridx].ctl & 6) == 4;
/*
* If bit CNTHCTL_EL2.CNT[VP]MASK is set, it overrides IMASK.
* It is RES0 in Secure and NonSecure state.
*/
if ((ss == ARMSS_Root || ss == ARMSS_Realm) &&
((timeridx == GTIMER_VIRT && (cnthctl & CNTHCTL_CNTVMASK)) ||
(timeridx == GTIMER_PHYS && (cnthctl & CNTHCTL_CNTPMASK)))) {
irqstate = 0;
}
qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
trace_arm_gt_update_irq(timeridx, irqstate);
}
void gt_rme_post_el_change(ARMCPU *cpu, void *ignored)
{
/*
* Changing security state between Root and Secure/NonSecure, which may
* happen when switching EL, can change the effective value of CNTHCTL_EL2
* mask bits. Update the IRQ state accordingly.
*/
gt_update_irq(cpu, GTIMER_VIRT);
gt_update_irq(cpu, GTIMER_PHYS);
}
static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
{
ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
@ -2623,13 +2656,9 @@ static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
/* Note that this must be unsigned 64 bit arithmetic: */
int istatus = count - offset >= gt->cval;
uint64_t nexttick;
int irqstate;
gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
irqstate = (istatus && !(gt->ctl & 2));
qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
if (istatus) {
/* Next transition is when count rolls back over to zero */
nexttick = UINT64_MAX;
@ -2648,14 +2677,14 @@ static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
} else {
timer_mod(cpu->gt_timer[timeridx], nexttick);
}
trace_arm_gt_recalc(timeridx, irqstate, nexttick);
trace_arm_gt_recalc(timeridx, nexttick);
} else {
/* Timer disabled: ISTATUS and timer output always clear */
gt->ctl &= ~4;
qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
timer_del(cpu->gt_timer[timeridx]);
trace_arm_gt_recalc_disabled(timeridx);
}
gt_update_irq(cpu, timeridx);
}
static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
@ -2759,10 +2788,8 @@ static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
* IMASK toggled: don't need to recalculate,
* just set the interrupt line based on ISTATUS
*/
int irqstate = (oldval & 4) && !(value & 2);
trace_arm_gt_imask_toggle(timeridx, irqstate);
qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
trace_arm_gt_imask_toggle(timeridx);
gt_update_irq(cpu, timeridx);
}
}
@ -2888,6 +2915,21 @@ static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
gt_ctl_write(env, ri, GTIMER_VIRT, value);
}
static void gt_cnthctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
ARMCPU *cpu = env_archcpu(env);
uint32_t oldval = env->cp15.cnthctl_el2;
raw_write(env, ri, value);
if ((oldval ^ value) & CNTHCTL_CNTVMASK) {
gt_update_irq(cpu, GTIMER_VIRT);
} else if ((oldval ^ value) & CNTHCTL_CNTPMASK) {
gt_update_irq(cpu, GTIMER_PHYS);
}
}
static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@ -3342,9 +3384,22 @@ static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
}
#ifdef CONFIG_TCG
static int par_el1_shareability(GetPhysAddrResult *res)
{
/*
* The PAR_EL1.SH field must be 0b10 for Device or Normal-NC
* memory -- see pseudocode PAREncodeShareability().
*/
if (((res->cacheattrs.attrs & 0xf0) == 0) ||
res->cacheattrs.attrs == 0x44 || res->cacheattrs.attrs == 0x40) {
return 2;
}
return res->cacheattrs.shareability;
}
static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
bool is_secure)
ARMSecuritySpace ss)
{
bool ret;
uint64_t par64;
@ -3352,8 +3407,12 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
ARMMMUFaultInfo fi = {};
GetPhysAddrResult res = {};
ret = get_phys_addr_with_secure(env, value, access_type, mmu_idx,
is_secure, &res, &fi);
/*
* I_MXTJT: Granule protection checks are not performed on the final address
* of a successful translation.
*/
ret = get_phys_addr_with_space_nogpc(env, value, access_type, mmu_idx, ss,
&res, &fi);
/*
* ATS operations only do S1 or S1+S2 translations, so we never
@ -3470,7 +3529,7 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
par64 |= (1 << 9); /* NS */
}
par64 |= (uint64_t)res.cacheattrs.attrs << 56; /* ATTR */
par64 |= res.cacheattrs.shareability << 7; /* SH */
par64 |= par_el1_shareability(&res) << 7; /* SH */
} else {
uint32_t fsr = arm_fi_to_lfsc(&fi);
@ -3518,7 +3577,7 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
uint64_t par64;
ARMMMUIdx mmu_idx;
int el = arm_current_el(env);
bool secure = arm_is_secure_below_el3(env);
ARMSecuritySpace ss = arm_security_space(env);
switch (ri->opc2 & 6) {
case 0:
@ -3526,10 +3585,9 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
switch (el) {
case 3:
mmu_idx = ARMMMUIdx_E3;
secure = true;
break;
case 2:
g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */
g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */
/* fall through */
case 1:
if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) {
@ -3547,10 +3605,9 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
switch (el) {
case 3:
mmu_idx = ARMMMUIdx_E10_0;
secure = true;
break;
case 2:
g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */
g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */
mmu_idx = ARMMMUIdx_Stage1_E0;
break;
case 1:
@ -3563,18 +3620,18 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
case 4:
/* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
mmu_idx = ARMMMUIdx_E10_1;
secure = false;
ss = ARMSS_NonSecure;
break;
case 6:
/* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
mmu_idx = ARMMMUIdx_E10_0;
secure = false;
ss = ARMSS_NonSecure;
break;
default:
g_assert_not_reached();
}
par64 = do_ats_write(env, value, access_type, mmu_idx, secure);
par64 = do_ats_write(env, value, access_type, mmu_idx, ss);
A32_BANKED_CURRENT_REG_SET(env, par, par64);
#else
@ -3591,7 +3648,8 @@ static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t par64;
/* There is no SecureEL2 for AArch32. */
par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2, false);
par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2,
ARMSS_NonSecure);
A32_BANKED_CURRENT_REG_SET(env, par, par64);
#else
@ -3600,6 +3658,22 @@ static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
#endif /* CONFIG_TCG */
}
static CPAccessResult at_e012_access(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
/*
* R_NYXTL: instruction is UNDEFINED if it applies to an Exception level
* lower than EL3 and the combination SCR_EL3.{NSE,NS} is reserved. This can
* only happen when executing at EL3 because that combination also causes an
* illegal exception return. We don't need to check FEAT_RME either, because
* scr_write() ensures that the NSE bit is not set otherwise.
*/
if ((env->cp15.scr_el3 & (SCR_NSE | SCR_NS)) == SCR_NSE) {
return CP_ACCESS_TRAP;
}
return CP_ACCESS_OK;
}
static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
@ -3607,7 +3681,7 @@ static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
!(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
return CP_ACCESS_TRAP;
}
return CP_ACCESS_OK;
return at_e012_access(env, ri, isread);
}
static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
@ -3616,7 +3690,6 @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
#ifdef CONFIG_TCG
MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
ARMMMUIdx mmu_idx;
int secure = arm_is_secure_below_el3(env);
uint64_t hcr_el2 = arm_hcr_el2_eff(env);
bool regime_e20 = (hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE);
@ -3636,7 +3709,6 @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
break;
case 6: /* AT S1E3R, AT S1E3W */
mmu_idx = ARMMMUIdx_E3;
secure = true;
break;
default:
g_assert_not_reached();
@ -3656,7 +3728,7 @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
}
env->cp15.par_el[1] = do_ats_write(env, value, access_type,
mmu_idx, secure);
mmu_idx, arm_security_space(env));
#else
/* Handled by hardware accelerator. */
g_assert_not_reached();
@ -4650,6 +4722,21 @@ static int vae1_tlbmask(CPUARMState *env)
return mask;
}
static int vae2_tlbmask(CPUARMState *env)
{
uint64_t hcr = arm_hcr_el2_eff(env);
uint16_t mask;
if (hcr & HCR_E2H) {
mask = ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
ARMMMUIdxBit_E20_0;
} else {
mask = ARMMMUIdxBit_E2;
}
return mask;
}
/* Return 56 if TBI is enabled, 64 otherwise. */
static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
uint64_t addr)
@ -4676,6 +4763,25 @@ static int vae1_tlbbits(CPUARMState *env, uint64_t addr)
return tlbbits_for_regime(env, mmu_idx, addr);
}
static int vae2_tlbbits(CPUARMState *env, uint64_t addr)
{
uint64_t hcr = arm_hcr_el2_eff(env);
ARMMMUIdx mmu_idx;
/*
* Only the regime of the mmu_idx below is significant.
* Regime EL2&0 has two ranges with separate TBI configuration, while EL2
* only has one.
*/
if (hcr & HCR_E2H) {
mmu_idx = ARMMMUIdx_E20_2;
} else {
mmu_idx = ARMMMUIdx_E2;
}
return tlbbits_for_regime(env, mmu_idx, addr);
}
static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@ -4768,10 +4874,11 @@ static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
* flush-last-level-only.
*/
CPUState *cs = env_cpu(env);
int mask = e2_tlbmask(env);
int mask = vae2_tlbmask(env);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
int bits = vae2_tlbbits(env, pageaddr);
tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits);
}
static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
@ -4825,11 +4932,11 @@ static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
CPUState *cs = env_cpu(env);
int mask = vae2_tlbmask(env);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
int bits = tlbbits_for_regime(env, ARMMMUIdx_E2, pageaddr);
int bits = vae2_tlbbits(env, pageaddr);
tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
ARMMMUIdxBit_E2, bits);
tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
}
static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@ -5001,11 +5108,6 @@ static void tlbi_aa64_rvae1is_write(CPUARMState *env,
do_rvae_write(env, value, vae1_tlbmask(env), true);
}
static int vae2_tlbmask(CPUARMState *env)
{
return ARMMMUIdxBit_E2;
}
static void tlbi_aa64_rvae2_write(CPUARMState *env,
const ARMCPRegInfo *ri,
uint64_t value)
@ -5461,38 +5563,38 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
.access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
.fgt = FGT_ATS1E1R,
.writefn = ats_write64 },
.accessfn = at_e012_access, .writefn = ats_write64 },
{ .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
.access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
.fgt = FGT_ATS1E1W,
.writefn = ats_write64 },
.accessfn = at_e012_access, .writefn = ats_write64 },
{ .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
.access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
.fgt = FGT_ATS1E0R,
.writefn = ats_write64 },
.accessfn = at_e012_access, .writefn = ats_write64 },
{ .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
.access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
.fgt = FGT_ATS1E0W,
.writefn = ats_write64 },
.accessfn = at_e012_access, .writefn = ats_write64 },
{ .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
.access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
.writefn = ats_write64 },
.accessfn = at_e012_access, .writefn = ats_write64 },
{ .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
.access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
.writefn = ats_write64 },
.accessfn = at_e012_access, .writefn = ats_write64 },
{ .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
.access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
.writefn = ats_write64 },
.accessfn = at_e012_access, .writefn = ats_write64 },
{ .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
.access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
.writefn = ats_write64 },
.accessfn = at_e012_access, .writefn = ats_write64 },
/* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
{ .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
@ -5772,11 +5874,13 @@ static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
* Bits that are not included here:
* RW (read from SCR_EL3.RW as needed)
*/
uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, bool secure)
uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, ARMSecuritySpace space)
{
uint64_t ret = env->cp15.hcr_el2;
if (!arm_is_el2_enabled_secstate(env, secure)) {
assert(space != ARMSS_Root);
if (!arm_is_el2_enabled_secstate(env, space)) {
/*
* "This register has no effect if EL2 is not enabled in the
* current Security state". This is ARMv8.4-SecEL2 speak for
@ -5840,7 +5944,7 @@ uint64_t arm_hcr_el2_eff(CPUARMState *env)
if (arm_feature(env, ARM_FEATURE_M)) {
return 0;
}
return arm_hcr_el2_eff_secstate(env, arm_is_secure_below_el3(env));
return arm_hcr_el2_eff_secstate(env, arm_security_space_below_el3(env));
}
/*
@ -6141,7 +6245,8 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
* reset values as IMPDEF. We choose to reset to 3 to comply with
* both ARMv7 and ARMv8.
*/
.access = PL2_RW, .resetvalue = 3,
.access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 3,
.writefn = gt_cnthctl_write, .raw_writefn = raw_write,
.fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
{ .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
@ -8033,12 +8138,12 @@ static const ARMCPRegInfo ats1e1_reginfo[] = {
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
.access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
.fgt = FGT_ATS1E1RP,
.writefn = ats_write64 },
.accessfn = at_e012_access, .writefn = ats_write64 },
{ .name = "AT_S1E1WP", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
.access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
.fgt = FGT_ATS1E1WP,
.writefn = ats_write64 },
.accessfn = at_e012_access, .writefn = ats_write64 },
};
static const ARMCPRegInfo ats1cp_reginfo[] = {

View File

@ -1190,12 +1190,11 @@ typedef struct GetPhysAddrResult {
} GetPhysAddrResult;
/**
* get_phys_addr_with_secure: get the physical address for a virtual address
* get_phys_addr: get the physical address for a virtual address
* @env: CPUARMState
* @address: virtual address to get physical address for
* @access_type: 0 for read, 1 for write, 2 for execute
* @mmu_idx: MMU index indicating required translation regime
* @is_secure: security state for the access
* @result: set on translation success.
* @fi: set to fault info if the translation fails
*
@ -1212,26 +1211,30 @@ typedef struct GetPhysAddrResult {
* * for PSMAv5 based systems we don't bother to return a full FSR format
* value.
*/
bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
MMUAccessType access_type,
ARMMMUIdx mmu_idx, bool is_secure,
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
bool get_phys_addr(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
__attribute__((nonnull));
/**
* get_phys_addr: get the physical address for a virtual address
* get_phys_addr_with_space_nogpc: get the physical address for a virtual
* address
* @env: CPUARMState
* @address: virtual address to get physical address for
* @access_type: 0 for read, 1 for write, 2 for execute
* @mmu_idx: MMU index indicating required translation regime
* @space: security space for the access
* @result: set on translation success.
* @fi: set to fault info if the translation fails
*
* Similarly, but use the security regime of @mmu_idx.
* Similar to get_phys_addr, but use the given security space and don't perform
* a Granule Protection Check on the resulting address.
*/
bool get_phys_addr(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
bool get_phys_addr_with_space_nogpc(CPUARMState *env, target_ulong address,
MMUAccessType access_type,
ARMMMUIdx mmu_idx, ARMSecuritySpace space,
GetPhysAddrResult *result,
ARMMMUFaultInfo *fi)
__attribute__((nonnull));
bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,

View File

@ -247,6 +247,13 @@ int kvm_arm_get_max_vm_ipa_size(MachineState *ms, bool *fixed_ipa)
return ret > 0 ? ret : 40;
}
int kvm_arch_get_default_type(MachineState *ms)
{
bool fixed_ipa;
int size = kvm_arm_get_max_vm_ipa_size(ms, &fixed_ipa);
return fixed_ipa ? 0 : size;
}
int kvm_arch_init(MachineState *ms, KVMState *s)
{
int ret = 0;

View File

@ -51,13 +51,6 @@ typedef struct S1Translate {
* value being Stage2 vs Stage2_S distinguishes those.
*/
ARMSecuritySpace in_space;
/*
* in_secure: whether the translation regime is a Secure one.
* This is always equal to arm_space_is_secure(in_space).
* If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
* this field is updated accordingly.
*/
bool in_secure;
/*
* in_debug: is this a QEMU debug access (gdbstub, etc)? Debug
* accesses will not update the guest page table access flags
@ -70,7 +63,6 @@ typedef struct S1Translate {
* Stage 2 is indicated by in_mmu_idx set to ARMMMUIdx_Stage2{,_S}.
*/
bool in_s1_is_el0;
bool out_secure;
bool out_rw;
bool out_be;
ARMSecuritySpace out_space;
@ -165,22 +157,32 @@ static ARMMMUIdx ptw_idx_for_stage_2(CPUARMState *env, ARMMMUIdx stage2idx)
/*
* We're OK to check the current state of the CPU here because
* (1) we always invalidate all TLBs when the SCR_EL3.NS bit changes
* (1) we always invalidate all TLBs when the SCR_EL3.NS or SCR_EL3.NSE bit
* changes.
* (2) there's no way to do a lookup that cares about Stage 2 for a
* different security state to the current one for AArch64, and AArch32
* never has a secure EL2. (AArch32 ATS12NSO[UP][RW] allow EL3 to do
* an NS stage 1+2 lookup while the NS bit is 0.)
*/
if (!arm_is_secure_below_el3(env) || !arm_el_is_aa64(env, 3)) {
if (!arm_el_is_aa64(env, 3)) {
return ARMMMUIdx_Phys_NS;
}
if (stage2idx == ARMMMUIdx_Stage2_S) {
s2walk_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
} else {
s2walk_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
}
return s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
switch (arm_security_space_below_el3(env)) {
case ARMSS_NonSecure:
return ARMMMUIdx_Phys_NS;
case ARMSS_Realm:
return ARMMMUIdx_Phys_Realm;
case ARMSS_Secure:
if (stage2idx == ARMMMUIdx_Stage2_S) {
s2walk_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
} else {
s2walk_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
}
return s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
default:
g_assert_not_reached();
}
}
static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx)
@ -206,11 +208,12 @@ static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn)
/* Return true if the specified stage of address translation is disabled */
static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
bool is_secure)
ARMSecuritySpace space)
{
uint64_t hcr_el2;
if (arm_feature(env, ARM_FEATURE_M)) {
bool is_secure = arm_space_is_secure(space);
switch (env->v7m.mpu_ctrl[is_secure] &
(R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
case R_V7M_MPU_CTRL_ENABLE_MASK:
@ -229,18 +232,19 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
}
}
hcr_el2 = arm_hcr_el2_eff_secstate(env, is_secure);
switch (mmu_idx) {
case ARMMMUIdx_Stage2:
case ARMMMUIdx_Stage2_S:
/* HCR.DC means HCR.VM behaves as 1 */
hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
case ARMMMUIdx_E10_0:
case ARMMMUIdx_E10_1:
case ARMMMUIdx_E10_1_PAN:
/* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */
hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
if (hcr_el2 & HCR_TGE) {
return true;
}
@ -250,6 +254,7 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_Stage1_E1_PAN:
/* HCR.DC means SCTLR_EL1.M behaves as 0 */
hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
if (hcr_el2 & HCR_DC) {
return true;
}
@ -514,11 +519,21 @@ static ARMSecuritySpace S2_security_space(ARMSecuritySpace s1_space,
}
}
static bool fault_s1ns(ARMSecuritySpace space, ARMMMUIdx s2_mmu_idx)
{
/*
* For stage 2 faults in Secure EL22, S1NS indicates
* whether the faulting IPA is in the Secure or NonSecure
* IPA space. For all other kinds of fault, it is false.
*/
return space == ARMSS_Secure && regime_is_stage2(s2_mmu_idx)
&& s2_mmu_idx == ARMMMUIdx_Stage2_S;
}
/* Translate a S1 pagetable walk through S2 if needed. */
static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
hwaddr addr, ARMMMUFaultInfo *fi)
{
bool is_secure = ptw->in_secure;
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx;
uint8_t pte_attrs;
@ -534,7 +549,6 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
S1Translate s2ptw = {
.in_mmu_idx = s2_mmu_idx,
.in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx),
.in_secure = arm_space_is_secure(s2_space),
.in_space = s2_space,
.in_debug = true,
};
@ -548,7 +562,6 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
pte_attrs = s2.cacheattrs.attrs;
ptw->out_host = NULL;
ptw->out_rw = false;
ptw->out_secure = s2.f.attrs.secure;
ptw->out_space = s2.f.attrs.space;
} else {
#ifdef CONFIG_TCG
@ -567,7 +580,6 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
ptw->out_phys = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
ptw->out_rw = full->prot & PAGE_WRITE;
pte_attrs = full->pte_attrs;
ptw->out_secure = full->attrs.secure;
ptw->out_space = full->attrs.space;
#else
g_assert_not_reached();
@ -575,7 +587,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
}
if (regime_is_stage2(s2_mmu_idx)) {
uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure);
uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space);
if ((hcr & HCR_PTW) && S2_attrs_are_device(hcr, pte_attrs)) {
/*
@ -586,7 +598,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
fi->s2addr = addr;
fi->stage2 = true;
fi->s1ptw = true;
fi->s1ns = !is_secure;
fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx);
return false;
}
}
@ -600,9 +612,9 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
fi->type = ARMFault_GPCFOnWalk;
}
fi->s2addr = addr;
fi->stage2 = true;
fi->s1ptw = true;
fi->s1ns = !is_secure;
fi->stage2 = regime_is_stage2(s2_mmu_idx);
fi->s1ptw = fi->stage2;
fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx);
return false;
}
@ -625,8 +637,8 @@ static uint32_t arm_ldl_ptw(CPUARMState *env, S1Translate *ptw,
} else {
/* Page tables are in MMIO. */
MemTxAttrs attrs = {
.secure = ptw->out_secure,
.space = ptw->out_space,
.secure = arm_space_is_secure(ptw->out_space),
};
AddressSpace *as = arm_addressspace(cs, attrs);
MemTxResult result = MEMTX_OK;
@ -671,8 +683,8 @@ static uint64_t arm_ldq_ptw(CPUARMState *env, S1Translate *ptw,
} else {
/* Page tables are in MMIO. */
MemTxAttrs attrs = {
.secure = ptw->out_secure,
.space = ptw->out_space,
.secure = arm_space_is_secure(ptw->out_space),
};
AddressSpace *as = arm_addressspace(cs, attrs);
MemTxResult result = MEMTX_OK;
@ -701,7 +713,6 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
if (unlikely(!host)) {
fi->type = ARMFault_UnsuppAtomicUpdate;
fi->s1ptw = true;
return 0;
}
@ -720,11 +731,17 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
env->tlb_fi = NULL;
if (unlikely(flags & TLB_INVALID_MASK)) {
/*
* We know this must be a stage 2 fault because the granule
* protection table does not separately track read and write
* permission, so all GPC faults are caught in S1_ptw_translate():
* we only get here for "readable but not writeable".
*/
assert(fi->type != ARMFault_None);
fi->s2addr = ptw->out_virt;
fi->stage2 = true;
fi->s1ptw = true;
fi->s1ns = !ptw->in_secure;
fi->s1ns = fault_s1ns(ptw->in_space, ptw->in_ptw_idx);
return 0;
}
@ -1544,6 +1561,25 @@ static int check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, uint64_t tcr,
return INT_MIN;
}
static bool lpae_block_desc_valid(ARMCPU *cpu, bool ds,
ARMGranuleSize gran, int level)
{
/*
* See pseudocode AArch46.BlockDescSupported(): block descriptors
* are not valid at all levels, depending on the page size.
*/
switch (gran) {
case Gran4K:
return (level == 0 && ds) || level == 1 || level == 2;
case Gran16K:
return (level == 1 && ds) || level == 2;
case Gran64K:
return (level == 1 && arm_pamax(cpu) == 52) || level == 2;
default:
g_assert_not_reached();
}
}
/**
* get_phys_addr_lpae: perform one stage of page table walk, LPAE format
*
@ -1766,7 +1802,6 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_S + 1 != ARMMMUIdx_Phys_NS);
QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2_S + 1 != ARMMMUIdx_Stage2);
ptw->in_ptw_idx += 1;
ptw->in_secure = false;
ptw->in_space = ARMSS_NonSecure;
}
@ -1780,8 +1815,10 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
new_descriptor = descriptor;
restart_atomic_update:
if (!(descriptor & 1) || (!(descriptor & 2) && (level == 3))) {
/* Invalid, or the Reserved level 3 encoding */
if (!(descriptor & 1) ||
(!(descriptor & 2) &&
!lpae_block_desc_valid(cpu, param.ds, param.gran, level))) {
/* Invalid, or a block descriptor at an invalid level */
goto do_translation_fault;
}
@ -1868,11 +1905,10 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
* Extract attributes from the (modified) descriptor, and apply
* table descriptors. Stage 2 table descriptors do not include
* any attribute fields. HPD disables all the table attributes
* except NSTable.
* except NSTable (which we have already handled).
*/
attrs = new_descriptor & (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14));
if (!regime_is_stage2(mmu_idx)) {
attrs |= !ptw->in_secure << 5; /* NS */
if (!param.hpd) {
attrs |= extract64(tableattrs, 0, 2) << 53; /* XN, PXN */
/*
@ -2022,24 +2058,31 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
do_translation_fault:
fi->type = ARMFault_Translation;
do_fault:
fi->level = level;
/* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
fi->stage2 = fi->s1ptw || regime_is_stage2(mmu_idx);
fi->s1ns = mmu_idx == ARMMMUIdx_Stage2;
if (fi->s1ptw) {
/* Retain the existing stage 2 fi->level */
assert(fi->stage2);
} else {
fi->level = level;
fi->stage2 = regime_is_stage2(mmu_idx);
}
fi->s1ns = fault_s1ns(ptw->in_space, mmu_idx);
return true;
}
static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
bool is_secure, GetPhysAddrResult *result,
static bool get_phys_addr_pmsav5(CPUARMState *env,
S1Translate *ptw,
uint32_t address,
MMUAccessType access_type,
GetPhysAddrResult *result,
ARMMMUFaultInfo *fi)
{
int n;
uint32_t mask;
uint32_t base;
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
bool is_user = regime_is_user(env, mmu_idx);
if (regime_translation_disabled(env, mmu_idx, is_secure)) {
if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) {
/* MPU disabled. */
result->f.phys_addr = address;
result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
@ -2194,20 +2237,24 @@ static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx,
return regime_sctlr(env, mmu_idx) & SCTLR_BR;
}
static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
bool secure, GetPhysAddrResult *result,
static bool get_phys_addr_pmsav7(CPUARMState *env,
S1Translate *ptw,
uint32_t address,
MMUAccessType access_type,
GetPhysAddrResult *result,
ARMMMUFaultInfo *fi)
{
ARMCPU *cpu = env_archcpu(env);
int n;
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
bool is_user = regime_is_user(env, mmu_idx);
bool secure = arm_space_is_secure(ptw->in_space);
result->f.phys_addr = address;
result->f.lg_page_size = TARGET_PAGE_BITS;
result->f.prot = 0;
if (regime_translation_disabled(env, mmu_idx, secure) ||
if (regime_translation_disabled(env, mmu_idx, ptw->in_space) ||
m_is_ppb_region(env, address)) {
/*
* MPU disabled or M profile PPB access: use default memory map.
@ -2451,7 +2498,8 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
* are done in arm_v7m_load_vector(), which always does a direct
* read using address_space_ldl(), rather than going via this function.
*/
if (regime_translation_disabled(env, mmu_idx, secure)) { /* MPU disabled */
if (regime_translation_disabled(env, mmu_idx, arm_secure_to_space(secure))) {
/* MPU disabled */
hit = true;
} else if (m_is_ppb_region(env, address)) {
hit = true;
@ -2720,12 +2768,16 @@ void v8m_security_lookup(CPUARMState *env, uint32_t address,
}
}
static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
bool secure, GetPhysAddrResult *result,
static bool get_phys_addr_pmsav8(CPUARMState *env,
S1Translate *ptw,
uint32_t address,
MMUAccessType access_type,
GetPhysAddrResult *result,
ARMMMUFaultInfo *fi)
{
V8M_SAttributes sattrs = {};
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
bool secure = arm_space_is_secure(ptw->in_space);
bool ret;
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
@ -3029,12 +3081,14 @@ static ARMCacheAttrs combine_cacheattrs(uint64_t hcr,
* MMU disabled. S1 addresses within aa64 translation regimes are
* still checked for bounds -- see AArch64.S1DisabledOutput().
*/
static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
static bool get_phys_addr_disabled(CPUARMState *env,
S1Translate *ptw,
target_ulong address,
MMUAccessType access_type,
ARMMMUIdx mmu_idx, bool is_secure,
GetPhysAddrResult *result,
ARMMMUFaultInfo *fi)
{
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
uint8_t memattr = 0x00; /* Device nGnRnE */
uint8_t shareability = 0; /* non-shareable */
int r_el;
@ -3080,7 +3134,7 @@ static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
/* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
if (r_el == 1) {
uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure);
uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space);
if (hcr & HCR_DC) {
if (hcr & HCR_DCT) {
memattr = 0xf0; /* Tagged, Normal, WB, RWA */
@ -3089,11 +3143,13 @@ static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
}
}
}
if (memattr == 0 && access_type == MMU_INST_FETCH) {
if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
memattr = 0xee; /* Normal, WT, RA, NT */
} else {
memattr = 0x44; /* Normal, NC, No */
if (memattr == 0) {
if (access_type == MMU_INST_FETCH) {
if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
memattr = 0xee; /* Normal, WT, RA, NT */
} else {
memattr = 0x44; /* Normal, NC, No */
}
}
shareability = 2; /* outer shareable */
}
@ -3117,7 +3173,6 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
{
hwaddr ipa;
int s1_prot, s1_lgpgsz;
bool is_secure = ptw->in_secure;
ARMSecuritySpace in_space = ptw->in_space;
bool ret, ipa_secure;
ARMCacheAttrs cacheattrs1;
@ -3137,7 +3192,6 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
ptw->in_s1_is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0;
ptw->in_mmu_idx = ipa_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
ptw->in_secure = ipa_secure;
ptw->in_space = ipa_space;
ptw->in_ptw_idx = ptw_idx_for_stage_2(env, ptw->in_mmu_idx);
@ -3180,7 +3234,7 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
}
/* Combine the S1 and S2 cache attributes. */
hcr = arm_hcr_el2_eff_secstate(env, is_secure);
hcr = arm_hcr_el2_eff_secstate(env, in_space);
if (hcr & HCR_DC) {
/*
* HCR.DC forces the first stage attributes to
@ -3219,7 +3273,6 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
ARMMMUFaultInfo *fi)
{
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
bool is_secure = ptw->in_secure;
ARMMMUIdx s1_mmu_idx;
/*
@ -3227,8 +3280,8 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
* cannot upgrade a NonSecure translation regime's attributes
* to Secure or Realm.
*/
result->f.attrs.secure = is_secure;
result->f.attrs.space = ptw->in_space;
result->f.attrs.secure = arm_space_is_secure(ptw->in_space);
switch (mmu_idx) {
case ARMMMUIdx_Phys_S:
@ -3236,14 +3289,18 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
case ARMMMUIdx_Phys_Root:
case ARMMMUIdx_Phys_Realm:
/* Checking Phys early avoids special casing later vs regime_el. */
return get_phys_addr_disabled(env, address, access_type, mmu_idx,
is_secure, result, fi);
return get_phys_addr_disabled(env, ptw, address, access_type,
result, fi);
case ARMMMUIdx_Stage1_E0:
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_Stage1_E1_PAN:
/* First stage lookup uses second stage for ptw. */
ptw->in_ptw_idx = is_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
/*
* First stage lookup uses second stage for ptw; only
* Secure has both S and NS IPA and starts with Stage2_S.
*/
ptw->in_ptw_idx = (ptw->in_space == ARMSS_Secure) ?
ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
break;
case ARMMMUIdx_Stage2:
@ -3272,7 +3329,7 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
*/
ptw->in_mmu_idx = mmu_idx = s1_mmu_idx;
if (arm_feature(env, ARM_FEATURE_EL2) &&
!regime_translation_disabled(env, ARMMMUIdx_Stage2, is_secure)) {
!regime_translation_disabled(env, ARMMMUIdx_Stage2, ptw->in_space)) {
return get_phys_addr_twostage(env, ptw, address, access_type,
result, fi);
}
@ -3305,16 +3362,16 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
if (arm_feature(env, ARM_FEATURE_V8)) {
/* PMSAv8 */
ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
is_secure, result, fi);
ret = get_phys_addr_pmsav8(env, ptw, address, access_type,
result, fi);
} else if (arm_feature(env, ARM_FEATURE_V7)) {
/* PMSAv7 */
ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
is_secure, result, fi);
ret = get_phys_addr_pmsav7(env, ptw, address, access_type,
result, fi);
} else {
/* Pre-v7 MPU */
ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
is_secure, result, fi);
ret = get_phys_addr_pmsav5(env, ptw, address, access_type,
result, fi);
}
qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
" mmu_idx %u -> %s (prot %c%c%c)\n",
@ -3331,9 +3388,9 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
/* Definitely a real MMU, not an MPU */
if (regime_translation_disabled(env, mmu_idx, is_secure)) {
return get_phys_addr_disabled(env, address, access_type, mmu_idx,
is_secure, result, fi);
if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) {
return get_phys_addr_disabled(env, ptw, address, access_type,
result, fi);
}
if (regime_using_lpae_format(env, mmu_idx)) {
@ -3363,17 +3420,17 @@ static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
return false;
}
bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
bool is_secure, GetPhysAddrResult *result,
ARMMMUFaultInfo *fi)
bool get_phys_addr_with_space_nogpc(CPUARMState *env, target_ulong address,
MMUAccessType access_type,
ARMMMUIdx mmu_idx, ARMSecuritySpace space,
GetPhysAddrResult *result,
ARMMMUFaultInfo *fi)
{
S1Translate ptw = {
.in_mmu_idx = mmu_idx,
.in_secure = is_secure,
.in_space = arm_secure_to_space(is_secure),
.in_space = space,
};
return get_phys_addr_gpc(env, &ptw, address, access_type, result, fi);
return get_phys_addr_nogpc(env, &ptw, address, access_type, result, fi);
}
bool get_phys_addr(CPUARMState *env, target_ulong address,
@ -3442,7 +3499,6 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
}
ptw.in_space = ss;
ptw.in_secure = arm_space_is_secure(ss);
return get_phys_addr_gpc(env, &ptw, address, access_type, result, fi);
}
@ -3456,7 +3512,6 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
S1Translate ptw = {
.in_mmu_idx = mmu_idx,
.in_space = ss,
.in_secure = arm_space_is_secure(ss),
.in_debug = true,
};
GetPhysAddrResult res = {};

View File

@ -379,7 +379,7 @@ static inline void HNAME##_host(void *za, intptr_t off, void *host) \
{ \
uint64_t *ptr = za + off; \
HOST(host, ptr[BE]); \
HOST(host + 1, ptr[!BE]); \
HOST(host + 8, ptr[!BE]); \
} \
static inline void VNAME##_v_host(void *za, intptr_t off, void *host) \
{ \

View File

@ -3053,7 +3053,7 @@ void gen_gvec_ssra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
.vece = MO_32 },
{ .fni8 = gen_ssra64_i64,
.fniv = gen_ssra_vec,
.fno = gen_helper_gvec_ssra_b,
.fno = gen_helper_gvec_ssra_d,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.opt_opc = vecop_list,
.load_dest = true,

View File

@ -1,13 +1,14 @@
# See docs/devel/tracing.rst for syntax documentation.
# helper.c
arm_gt_recalc(int timer, int irqstate, uint64_t nexttick) "gt recalc: timer %d irqstate %d next tick 0x%" PRIx64
arm_gt_recalc_disabled(int timer) "gt recalc: timer %d irqstate 0 timer disabled"
arm_gt_recalc(int timer, uint64_t nexttick) "gt recalc: timer %d next tick 0x%" PRIx64
arm_gt_recalc_disabled(int timer) "gt recalc: timer %d timer disabled"
arm_gt_cval_write(int timer, uint64_t value) "gt_cval_write: timer %d value 0x%" PRIx64
arm_gt_tval_write(int timer, uint64_t value) "gt_tval_write: timer %d value 0x%" PRIx64
arm_gt_ctl_write(int timer, uint64_t value) "gt_ctl_write: timer %d value 0x%" PRIx64
arm_gt_imask_toggle(int timer, int irqstate) "gt_ctl_write: timer %d IMASK toggle, new irqstate %d"
arm_gt_imask_toggle(int timer) "gt_ctl_write: timer %d IMASK toggle"
arm_gt_cntvoff_write(uint64_t value) "gt_cntvoff_write: value 0x%" PRIx64
arm_gt_update_irq(int timer, int irqstate) "gt_update_irq: timer %d irqstate %d"
# kvm.c
kvm_arm_fixup_msi_route(uint64_t iova, uint64_t gpa) "MSI iova = 0x%"PRIx64" is translated into 0x%"PRIx64

View File

@ -2556,6 +2556,11 @@ static void register_smram_listener(Notifier *n, void *unused)
&smram_address_space, 1, "kvm-smram");
}
int kvm_arch_get_default_type(MachineState *ms)
{
return 0;
}
int kvm_arch_init(MachineState *ms, KVMState *s)
{
uint64_t identity_base = 0xfffbc000;

View File

@ -1266,7 +1266,7 @@ int kvm_arch_msi_data_to_gsi(uint32_t data)
abort();
}
int mips_kvm_type(MachineState *machine, const char *vm_type)
int kvm_arch_get_default_type(MachineState *machine)
{
#if defined(KVM_CAP_MIPS_VZ)
int r;
@ -1278,6 +1278,7 @@ int mips_kvm_type(MachineState *machine, const char *vm_type)
}
#endif
error_report("KVM_VM_MIPS_VZ type is not available");
return -1;
}

View File

@ -25,13 +25,4 @@ void kvm_mips_reset_vcpu(MIPSCPU *cpu);
int kvm_mips_set_interrupt(MIPSCPU *cpu, int irq, int level);
int kvm_mips_set_ipi_interrupt(MIPSCPU *cpu, int irq, int level);
#ifdef CONFIG_KVM
int mips_kvm_type(MachineState *machine, const char *vm_type);
#else
static inline int mips_kvm_type(MachineState *machine, const char *vm_type)
{
return 0;
}
#endif
#endif /* KVM_MIPS_H */

View File

@ -108,6 +108,11 @@ static int kvm_ppc_register_host_cpu_type(void);
static void kvmppc_get_cpu_characteristics(KVMState *s);
static int kvmppc_get_dec_bits(void);
int kvm_arch_get_default_type(MachineState *ms)
{
return 0;
}
int kvm_arch_init(MachineState *ms, KVMState *s)
{
cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);

View File

@ -914,6 +914,11 @@ int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
return 0;
}
int kvm_arch_get_default_type(MachineState *ms)
{
return 0;
}
int kvm_arch_init(MachineState *ms, KVMState *s)
{
return 0;

View File

@ -330,6 +330,11 @@ static void ccw_machine_class_foreach(ObjectClass *oc, void *opaque)
mc->default_cpu_type = S390_CPU_TYPE_NAME("host");
}
int kvm_arch_get_default_type(MachineState *ms)
{
return 0;
}
int kvm_arch_init(MachineState *ms, KVMState *s)
{
object_class_foreach(ccw_machine_class_foreach, TYPE_S390_CCW_MACHINE,

View File

@ -993,6 +993,12 @@ void qtest_irq_intercept_out(QTestState *s, const char *qom_path)
qtest_rsp(s);
}
void qtest_irq_intercept_out_named(QTestState *s, const char *qom_path, const char *name)
{
qtest_sendf(s, "irq_intercept_out %s %s\n", qom_path, name);
qtest_rsp(s);
}
void qtest_irq_intercept_in(QTestState *s, const char *qom_path)
{
qtest_sendf(s, "irq_intercept_in %s\n", qom_path);

View File

@ -371,6 +371,17 @@ void qtest_irq_intercept_in(QTestState *s, const char *string);
*/
void qtest_irq_intercept_out(QTestState *s, const char *string);
/**
* qtest_irq_intercept_out_named:
* @s: #QTestState instance to operate on.
* @qom_path: QOM path of a device.
* @name: Name of the GPIO out pin
*
* Associate a qtest irq with the named GPIO-out pin of the device
* whose path is specified by @string and whose name is @name.
*/
void qtest_irq_intercept_out_named(QTestState *s, const char *qom_path, const char *name);
/**
* qtest_set_irq_in:
* @s: QTestState instance to operate on.

View File

@ -393,6 +393,49 @@ static void test_nrf51_gpio(void)
qtest_quit(qts);
}
static void test_nrf51_gpio_detect(void)
{
QTestState *qts = qtest_init("-M microbit");
int i;
/* Connect input buffer on pins 1-7, configure SENSE for high level */
for (i = 1; i <= 7; i++) {
qtest_writel(qts, NRF51_GPIO_BASE + NRF51_GPIO_REG_CNF_START + i * 4,
deposit32(0, 16, 2, 2));
}
qtest_irq_intercept_out_named(qts, "/machine/nrf51/gpio", "detect");
for (i = 1; i <= 7; i++) {
/* Set pin high */
qtest_set_irq_in(qts, "/machine/nrf51", "unnamed-gpio-in", i, 1);
uint32_t actual = qtest_readl(qts, NRF51_GPIO_BASE + NRF51_GPIO_REG_IN);
g_assert_cmpuint(actual, ==, 1 << i);
/* Check that DETECT is high */
g_assert_true(qtest_get_irq(qts, 0));
/* Set pin low, check that DETECT goes low. */
qtest_set_irq_in(qts, "/machine/nrf51", "unnamed-gpio-in", i, 0);
actual = qtest_readl(qts, NRF51_GPIO_BASE + NRF51_GPIO_REG_IN);
g_assert_cmpuint(actual, ==, 0x0);
g_assert_false(qtest_get_irq(qts, 0));
}
/* Set pin 0 high, check that DETECT doesn't fire */
qtest_set_irq_in(qts, "/machine/nrf51", "unnamed-gpio-in", 0, 1);
g_assert_false(qtest_get_irq(qts, 0));
qtest_set_irq_in(qts, "/machine/nrf51", "unnamed-gpio-in", 0, 0);
/* Set pins 1, 2, and 3 high, then set 3 low. Check DETECT is still high */
for (i = 1; i <= 3; i++) {
qtest_set_irq_in(qts, "/machine/nrf51", "unnamed-gpio-in", i, 1);
}
g_assert_true(qtest_get_irq(qts, 0));
qtest_set_irq_in(qts, "/machine/nrf51", "unnamed-gpio-in", 3, 0);
g_assert_true(qtest_get_irq(qts, 0));
}
static void timer_task(QTestState *qts, hwaddr task)
{
qtest_writel(qts, NRF51_TIMER_BASE + task, NRF51_TRIGGER_TASK);
@ -499,6 +542,7 @@ int main(int argc, char **argv)
qtest_add_func("/microbit/nrf51/uart", test_nrf51_uart);
qtest_add_func("/microbit/nrf51/gpio", test_nrf51_gpio);
qtest_add_func("/microbit/nrf51/gpio_detect", test_nrf51_gpio_detect);
qtest_add_func("/microbit/nrf51/nvmc", test_nrf51_nvmc);
qtest_add_func("/microbit/nrf51/timer", test_nrf51_timer);
qtest_add_func("/microbit/microbit/i2c", test_microbit_i2c);