target-arm queue:
* pass semihosting exit code out to system * more TrustZone support code (still not enabled yet) * allow user to direct semihosting to gdb or native explicitly rather than always auto-guessing the destination * fix memory leak in realview_init * fix coverity warning in hw/arm/boot * get state migration working for AArch64 CPUs * check errors in kvm_arm_reset_vcpu -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABCAAGBQJUiYsTAAoJEDwlJe0UNgzeIkQQAKVGNYVReFXkTv7nsd9oC/Ub geDklMVLvS/Pi9PXSudDl3IyWhTSQCg5P0lC2+2G699XCpzyKBfp+prFQ9zofrjY j0wDgphiDJefvjaphv3wScxOXmXTNl5ztGp6nzHPvi1SnEUrCZf1vKThwYnB3vv5 s6/R5i/HP8ipzO5B55kj5xvncmM2nhEhCo+qvLsy7QyNbhsRPguBSArsCfoJTB9+ /T8pyARGUEHI3yYrc3jWhX88N4fcJChuIICkmpqutVpaUoQCwQ8qV6arGc7vuUVv spHyvjFFxLEgm8gJEKsSU11bq2PQu0vxa7NP4tAxgPFVKCnIKHGfCYRqbiYfmZsy uwN3C9RGsbicwvXUKwx2+8DIZhePMahomKmLt3N6Reua5RNtslCXuh/gjVKeVYK0 VoiEBN9kTmYLM28yz9Dj3gh0KCk36lELi7jLx1J4vVD00QeE1fE46miXBWxikNNg ZAxfBErQItf5pHQOaXQOM0pZHKnQCFqd1kK7urz9GwakU2QG1d8ibBqgBH4FSs77 ycDJeFoxunoYtmDcereBUMOvtP5YWBFaogGsaY/s3jaT6bZeeQeG1PvKsFdNHCQB WJ1TSUSaqXx1cdDyooYNNNBBkrzSi3TP+fvY9Q1yPt5ZHr2q/WHD9teRh/LVFkBI K15w1QzX8fpHHRzsOxbk =pMcy -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20141211' into staging target-arm queue: * pass semihosting exit code out to system * more TrustZone support code (still not enabled yet) * allow user to direct semihosting to gdb or native explicitly rather than always auto-guessing the destination * fix memory leak in realview_init * fix coverity warning in hw/arm/boot * get state migration working for AArch64 CPUs * check errors in kvm_arm_reset_vcpu # gpg: Signature made Thu 11 Dec 2014 12:16:19 GMT using RSA key ID 14360CDE # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" * remotes/pmaydell/tags/pull-target-arm-20141211: (33 commits) target-arm: Check error conditions on kvm_arm_reset_vcpu target-arm: Support save/load for 64 bit CPUs target-arm/kvm: make reg sync code common between kvm32/64 arm_gic_kvm: Tell kernel about number of IRQs hw/arm/boot: fix uninitialized scalar variable warning reported by coverity hw/arm/realview.c: Fix memory leak in realview_init() target-arm: make MAIR0/1 banked target-arm: make c13 cp regs banked (FCSEIDR, ...) target-arm: make VBAR banked target-arm: make PAR banked target-arm: make IFAR/DFAR banked target-arm: make DFSR banked target-arm: make IFSR banked target-arm: make DACR banked target-arm: make TTBCR banked target-arm: make TTBR0/1 banked target-arm: make CSSELR banked target-arm: respect SCR.FW, SCR.AW and SCTLR.NMFI target-arm: add SCTLR_EL3 and make SCTLR banked target-arm: add MVBAR support ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
b141290478
15
gdbstub.c
15
gdbstub.c
@ -317,6 +317,8 @@ static GDBState *gdbserver_state;
|
||||
|
||||
bool gdb_has_xml;
|
||||
|
||||
int semihosting_target = SEMIHOSTING_TARGET_AUTO;
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
/* XXX: This is not thread safe. Do we care? */
|
||||
static int gdbserver_fd = -1;
|
||||
@ -351,10 +353,19 @@ static enum {
|
||||
GDB_SYS_DISABLED,
|
||||
} gdb_syscall_mode;
|
||||
|
||||
/* If gdb is connected when the first semihosting syscall occurs then use
|
||||
remote gdb syscalls. Otherwise use native file IO. */
|
||||
/* Decide if either remote gdb syscalls or native file IO should be used. */
|
||||
int use_gdb_syscalls(void)
|
||||
{
|
||||
if (semihosting_target == SEMIHOSTING_TARGET_NATIVE) {
|
||||
/* -semihosting-config target=native */
|
||||
return false;
|
||||
} else if (semihosting_target == SEMIHOSTING_TARGET_GDB) {
|
||||
/* -semihosting-config target=gdb */
|
||||
return true;
|
||||
}
|
||||
|
||||
/* -semihosting-config target=auto */
|
||||
/* On the first call check if gdb is connected and remember. */
|
||||
if (gdb_syscall_mode == GDB_SYS_UNKNOWN) {
|
||||
gdb_syscall_mode = (gdbserver_state ? GDB_SYS_ENABLED
|
||||
: GDB_SYS_DISABLED);
|
||||
|
@ -329,6 +329,8 @@ static void set_kernel_args_old(const struct arm_boot_info *info)
|
||||
* Returns: the size of the device tree image on success,
|
||||
* 0 if the image size exceeds the limit,
|
||||
* -1 on errors.
|
||||
*
|
||||
* Note: Must not be called unless have_dtb(binfo) is true.
|
||||
*/
|
||||
static int load_dtb(hwaddr addr, const struct arm_boot_info *binfo,
|
||||
hwaddr addr_limit)
|
||||
@ -352,7 +354,7 @@ static int load_dtb(hwaddr addr, const struct arm_boot_info *binfo,
|
||||
goto fail;
|
||||
}
|
||||
g_free(filename);
|
||||
} else if (binfo->get_dtb) {
|
||||
} else {
|
||||
fdt = binfo->get_dtb(binfo, &size);
|
||||
if (!fdt) {
|
||||
fprintf(stderr, "Board was unable to create a dtb blob\n");
|
||||
|
@ -273,10 +273,10 @@ static void pxa2xx_pwrmode_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
case 3:
|
||||
s->cpu->env.uncached_cpsr = ARM_CPU_MODE_SVC;
|
||||
s->cpu->env.daif = PSTATE_A | PSTATE_F | PSTATE_I;
|
||||
s->cpu->env.cp15.c1_sys = 0;
|
||||
s->cpu->env.cp15.sctlr_ns = 0;
|
||||
s->cpu->env.cp15.c1_coproc = 0;
|
||||
s->cpu->env.cp15.ttbr0_el1 = 0;
|
||||
s->cpu->env.cp15.c3 = 0;
|
||||
s->cpu->env.cp15.ttbr0_el[1] = 0;
|
||||
s->cpu->env.cp15.dacr_ns = 0;
|
||||
s->pm_regs[PSSR >> 2] |= 0x8; /* Set STS */
|
||||
s->pm_regs[RCSR >> 2] |= 0x8; /* Set GPR */
|
||||
|
||||
|
@ -52,7 +52,7 @@ static void realview_init(MachineState *machine,
|
||||
CPUARMState *env;
|
||||
ObjectClass *cpu_oc;
|
||||
MemoryRegion *sysmem = get_system_memory();
|
||||
MemoryRegion *ram_lo = g_new(MemoryRegion, 1);
|
||||
MemoryRegion *ram_lo;
|
||||
MemoryRegion *ram_hi = g_new(MemoryRegion, 1);
|
||||
MemoryRegion *ram_alias = g_new(MemoryRegion, 1);
|
||||
MemoryRegion *ram_hack = g_new(MemoryRegion, 1);
|
||||
@ -135,6 +135,7 @@ static void realview_init(MachineState *machine,
|
||||
|
||||
if (is_pb && ram_size > 0x20000000) {
|
||||
/* Core tile RAM. */
|
||||
ram_lo = g_new(MemoryRegion, 1);
|
||||
low_ram_size = ram_size - 0x20000000;
|
||||
ram_size = 0x20000000;
|
||||
memory_region_init_ram(ram_lo, NULL, "realview.lowmem", low_ram_size,
|
||||
|
@ -92,6 +92,21 @@ static bool kvm_arm_gic_can_save_restore(GICState *s)
|
||||
return s->dev_fd >= 0;
|
||||
}
|
||||
|
||||
static bool kvm_gic_supports_attr(GICState *s, int group, int attrnum)
|
||||
{
|
||||
struct kvm_device_attr attr = {
|
||||
.group = group,
|
||||
.attr = attrnum,
|
||||
.flags = 0,
|
||||
};
|
||||
|
||||
if (s->dev_fd == -1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return kvm_device_ioctl(s->dev_fd, KVM_HAS_DEVICE_ATTR, &attr) == 0;
|
||||
}
|
||||
|
||||
static void kvm_gic_access(GICState *s, int group, int offset,
|
||||
int cpu, uint32_t *val, bool write)
|
||||
{
|
||||
@ -553,6 +568,11 @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp)
|
||||
return;
|
||||
}
|
||||
|
||||
if (kvm_gic_supports_attr(s, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0)) {
|
||||
uint32_t numirqs = s->num_irq;
|
||||
kvm_gic_access(s, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0, 0, &numirqs, 1);
|
||||
}
|
||||
|
||||
/* Distributor */
|
||||
memory_region_init_reservation(&s->iomem, OBJECT(s),
|
||||
"kvm-gic_dist", 0x1000);
|
||||
|
@ -95,4 +95,10 @@ extern bool gdb_has_xml;
|
||||
/* in gdbstub-xml.c, generated by scripts/feature_to_c.sh */
|
||||
extern const char *const xml_builtin[][2];
|
||||
|
||||
/* Command line option defining whether semihosting should go via gdb or not */
|
||||
extern int semihosting_target;
|
||||
#define SEMIHOSTING_TARGET_AUTO 0
|
||||
#define SEMIHOSTING_TARGET_NATIVE 1
|
||||
#define SEMIHOSTING_TARGET_GDB 2
|
||||
|
||||
#endif
|
||||
|
@ -32,7 +32,7 @@ static inline void cpu_set_tls(CPUARMState *env, target_ulong newtls)
|
||||
/* Note that AArch64 Linux keeps the TLS pointer in TPIDR; this is
|
||||
* different from AArch32 Linux, which uses TPIDRRO.
|
||||
*/
|
||||
env->cp15.tpidr_el0 = newtls;
|
||||
env->cp15.tpidr_el[0] = newtls;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -29,7 +29,7 @@ static inline void cpu_clone_regs(CPUARMState *env, target_ulong newsp)
|
||||
|
||||
static inline void cpu_set_tls(CPUARMState *env, target_ulong newtls)
|
||||
{
|
||||
env->cp15.tpidrro_el0 = newtls;
|
||||
env->cp15.tpidrro_el[0] = newtls;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -564,7 +564,7 @@ do_kernel_trap(CPUARMState *env)
|
||||
end_exclusive();
|
||||
break;
|
||||
case 0xffff0fe0: /* __kernel_get_tls */
|
||||
env->regs[0] = env->cp15.tpidrro_el0;
|
||||
env->regs[0] = env->cp15.tpidrro_el[0];
|
||||
break;
|
||||
case 0xffff0f60: /* __kernel_cmpxchg64 */
|
||||
arm_kernel_cmpxchg64_helper(env);
|
||||
|
@ -3226,7 +3226,17 @@ DEF("semihosting", 0, QEMU_OPTION_semihosting,
|
||||
STEXI
|
||||
@item -semihosting
|
||||
@findex -semihosting
|
||||
Semihosting mode (ARM, M68K, Xtensa only).
|
||||
Enable semihosting mode (ARM, M68K, Xtensa only).
|
||||
ETEXI
|
||||
DEF("semihosting-config", HAS_ARG, QEMU_OPTION_semihosting_config,
|
||||
"-semihosting-config [enable=on|off,]target=native|gdb|auto semihosting configuration\n",
|
||||
QEMU_ARCH_ARM | QEMU_ARCH_M68K | QEMU_ARCH_XTENSA | QEMU_ARCH_LM32)
|
||||
STEXI
|
||||
@item -semihosting-config [enable=on|off,]target=native|gdb|auto
|
||||
@findex -semihosting-config
|
||||
Enable semihosting and define where the semihosting calls will be addressed,
|
||||
to QEMU (@code{native}) or to GDB (@code{gdb}). The default is @code{auto}, which means
|
||||
@code{gdb} during debug sessions and @code{native} otherwise (ARM, M68K, Xtensa only).
|
||||
ETEXI
|
||||
DEF("old-param", 0, QEMU_OPTION_old_param,
|
||||
"-old-param old param mode\n", QEMU_ARCH_ARM)
|
||||
|
@ -58,6 +58,10 @@
|
||||
#define TARGET_SYS_HEAPINFO 0x16
|
||||
#define TARGET_SYS_EXIT 0x18
|
||||
|
||||
/* ADP_Stopped_ApplicationExit is used for exit(0),
|
||||
* anything else is implemented as exit(1) */
|
||||
#define ADP_Stopped_ApplicationExit (0x20026)
|
||||
|
||||
#ifndef O_BINARY
|
||||
#define O_BINARY 0
|
||||
#endif
|
||||
@ -551,8 +555,11 @@ uint32_t do_arm_semihosting(CPUARMState *env)
|
||||
return 0;
|
||||
}
|
||||
case TARGET_SYS_EXIT:
|
||||
gdb_exit(env, 0);
|
||||
exit(0);
|
||||
/* ARM specifies only Stopped_ApplicationExit as normal
|
||||
* exit, everything else is considered an error */
|
||||
ret = (args == ADP_Stopped_ApplicationExit) ? 0 : 1;
|
||||
gdb_exit(env, ret);
|
||||
exit(ret);
|
||||
default:
|
||||
fprintf(stderr, "qemu: Unsupported SemiHosting SWI 0x%02x\n", nr);
|
||||
cpu_dump_state(cs, stderr, fprintf, 0);
|
||||
|
@ -109,7 +109,7 @@ static void arm_cpu_reset(CPUState *s)
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
env->pstate = PSTATE_MODE_EL0t;
|
||||
/* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */
|
||||
env->cp15.c1_sys |= SCTLR_UCT | SCTLR_UCI | SCTLR_DZE;
|
||||
env->cp15.sctlr_el[1] |= SCTLR_UCT | SCTLR_UCI | SCTLR_DZE;
|
||||
/* and to the FP/Neon instructions */
|
||||
env->cp15.c1_coproc = deposit64(env->cp15.c1_coproc, 20, 2, 3);
|
||||
#else
|
||||
@ -167,7 +167,11 @@ static void arm_cpu_reset(CPUState *s)
|
||||
env->thumb = initial_pc & 1;
|
||||
}
|
||||
|
||||
if (env->cp15.c1_sys & SCTLR_V) {
|
||||
/* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently
|
||||
* executing as AArch32 then check if highvecs are enabled and
|
||||
* adjust the PC accordingly.
|
||||
*/
|
||||
if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
|
||||
env->regs[15] = 0xFFFF0000;
|
||||
}
|
||||
|
||||
@ -548,7 +552,7 @@ static void arm1026_initfn(Object *obj)
|
||||
ARMCPRegInfo ifar = {
|
||||
.name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
|
||||
.access = PL1_RW,
|
||||
.fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[1]),
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.ifar_ns),
|
||||
.resetvalue = 0
|
||||
};
|
||||
define_one_arm_cp_reg(cpu, &ifar);
|
||||
|
362
target-arm/cpu.h
362
target-arm/cpu.h
@ -120,6 +120,12 @@ typedef struct ARMGenericTimer {
|
||||
#define GTIMER_VIRT 1
|
||||
#define NUM_GTIMERS 2
|
||||
|
||||
typedef struct {
|
||||
uint64_t raw_tcr;
|
||||
uint32_t mask;
|
||||
uint32_t base_mask;
|
||||
} TCR;
|
||||
|
||||
typedef struct CPUARMState {
|
||||
/* Regs for current mode. */
|
||||
uint32_t regs[16];
|
||||
@ -177,28 +183,111 @@ typedef struct CPUARMState {
|
||||
/* System control coprocessor (cp15) */
|
||||
struct {
|
||||
uint32_t c0_cpuid;
|
||||
uint64_t c0_cssel; /* Cache size selection. */
|
||||
uint64_t c1_sys; /* System control register. */
|
||||
union { /* Cache size selection */
|
||||
struct {
|
||||
uint64_t _unused_csselr0;
|
||||
uint64_t csselr_ns;
|
||||
uint64_t _unused_csselr1;
|
||||
uint64_t csselr_s;
|
||||
};
|
||||
uint64_t csselr_el[4];
|
||||
};
|
||||
union { /* System control register. */
|
||||
struct {
|
||||
uint64_t _unused_sctlr;
|
||||
uint64_t sctlr_ns;
|
||||
uint64_t hsctlr;
|
||||
uint64_t sctlr_s;
|
||||
};
|
||||
uint64_t sctlr_el[4];
|
||||
};
|
||||
uint64_t c1_coproc; /* Coprocessor access register. */
|
||||
uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */
|
||||
uint64_t ttbr0_el1; /* MMU translation table base 0. */
|
||||
uint64_t ttbr1_el1; /* MMU translation table base 1. */
|
||||
uint64_t c2_control; /* MMU translation table base control. */
|
||||
uint32_t c2_mask; /* MMU translation table base selection mask. */
|
||||
uint32_t c2_base_mask; /* MMU translation table base 0 mask. */
|
||||
uint64_t sder; /* Secure debug enable register. */
|
||||
uint32_t nsacr; /* Non-secure access control register. */
|
||||
union { /* MMU translation table base 0. */
|
||||
struct {
|
||||
uint64_t _unused_ttbr0_0;
|
||||
uint64_t ttbr0_ns;
|
||||
uint64_t _unused_ttbr0_1;
|
||||
uint64_t ttbr0_s;
|
||||
};
|
||||
uint64_t ttbr0_el[4];
|
||||
};
|
||||
union { /* MMU translation table base 1. */
|
||||
struct {
|
||||
uint64_t _unused_ttbr1_0;
|
||||
uint64_t ttbr1_ns;
|
||||
uint64_t _unused_ttbr1_1;
|
||||
uint64_t ttbr1_s;
|
||||
};
|
||||
uint64_t ttbr1_el[4];
|
||||
};
|
||||
/* MMU translation table base control. */
|
||||
TCR tcr_el[4];
|
||||
uint32_t c2_data; /* MPU data cachable bits. */
|
||||
uint32_t c2_insn; /* MPU instruction cachable bits. */
|
||||
uint32_t c3; /* MMU domain access control register
|
||||
MPU write buffer control. */
|
||||
union { /* MMU domain access control register
|
||||
* MPU write buffer control.
|
||||
*/
|
||||
struct {
|
||||
uint64_t dacr_ns;
|
||||
uint64_t dacr_s;
|
||||
};
|
||||
struct {
|
||||
uint64_t dacr32_el2;
|
||||
};
|
||||
};
|
||||
uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */
|
||||
uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */
|
||||
uint64_t hcr_el2; /* Hypervisor configuration register */
|
||||
uint64_t scr_el3; /* Secure configuration register. */
|
||||
uint32_t ifsr_el2; /* Fault status registers. */
|
||||
uint64_t esr_el[4];
|
||||
union { /* Fault status registers. */
|
||||
struct {
|
||||
uint64_t ifsr_ns;
|
||||
uint64_t ifsr_s;
|
||||
};
|
||||
struct {
|
||||
uint64_t ifsr32_el2;
|
||||
};
|
||||
};
|
||||
union {
|
||||
struct {
|
||||
uint64_t _unused_dfsr;
|
||||
uint64_t dfsr_ns;
|
||||
uint64_t hsr;
|
||||
uint64_t dfsr_s;
|
||||
};
|
||||
uint64_t esr_el[4];
|
||||
};
|
||||
uint32_t c6_region[8]; /* MPU base/size registers. */
|
||||
uint64_t far_el[4]; /* Fault address registers. */
|
||||
uint64_t par_el1; /* Translation result. */
|
||||
union { /* Fault address registers. */
|
||||
struct {
|
||||
uint64_t _unused_far0;
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
uint32_t ifar_ns;
|
||||
uint32_t dfar_ns;
|
||||
uint32_t ifar_s;
|
||||
uint32_t dfar_s;
|
||||
#else
|
||||
uint32_t dfar_ns;
|
||||
uint32_t ifar_ns;
|
||||
uint32_t dfar_s;
|
||||
uint32_t ifar_s;
|
||||
#endif
|
||||
uint64_t _unused_far3;
|
||||
};
|
||||
uint64_t far_el[4];
|
||||
};
|
||||
union { /* Translation result. */
|
||||
struct {
|
||||
uint64_t _unused_par_0;
|
||||
uint64_t par_ns;
|
||||
uint64_t _unused_par_1;
|
||||
uint64_t par_s;
|
||||
};
|
||||
uint64_t par_el[4];
|
||||
};
|
||||
uint32_t c9_insn; /* Cache lockdown registers. */
|
||||
uint32_t c9_data;
|
||||
uint64_t c9_pmcr; /* performance monitor control register */
|
||||
@ -207,13 +296,67 @@ typedef struct CPUARMState {
|
||||
uint32_t c9_pmxevtyper; /* perf monitor event type */
|
||||
uint32_t c9_pmuserenr; /* perf monitor user enable */
|
||||
uint32_t c9_pminten; /* perf monitor interrupt enables */
|
||||
uint64_t mair_el1;
|
||||
uint64_t vbar_el[4]; /* vector base address register */
|
||||
uint32_t c13_fcse; /* FCSE PID. */
|
||||
uint64_t contextidr_el1; /* Context ID. */
|
||||
uint64_t tpidr_el0; /* User RW Thread register. */
|
||||
uint64_t tpidrro_el0; /* User RO Thread register. */
|
||||
uint64_t tpidr_el1; /* Privileged Thread register. */
|
||||
union { /* Memory attribute redirection */
|
||||
struct {
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
uint64_t _unused_mair_0;
|
||||
uint32_t mair1_ns;
|
||||
uint32_t mair0_ns;
|
||||
uint64_t _unused_mair_1;
|
||||
uint32_t mair1_s;
|
||||
uint32_t mair0_s;
|
||||
#else
|
||||
uint64_t _unused_mair_0;
|
||||
uint32_t mair0_ns;
|
||||
uint32_t mair1_ns;
|
||||
uint64_t _unused_mair_1;
|
||||
uint32_t mair0_s;
|
||||
uint32_t mair1_s;
|
||||
#endif
|
||||
};
|
||||
uint64_t mair_el[4];
|
||||
};
|
||||
union { /* vector base address register */
|
||||
struct {
|
||||
uint64_t _unused_vbar;
|
||||
uint64_t vbar_ns;
|
||||
uint64_t hvbar;
|
||||
uint64_t vbar_s;
|
||||
};
|
||||
uint64_t vbar_el[4];
|
||||
};
|
||||
uint32_t mvbar; /* (monitor) vector base address register */
|
||||
struct { /* FCSE PID. */
|
||||
uint32_t fcseidr_ns;
|
||||
uint32_t fcseidr_s;
|
||||
};
|
||||
union { /* Context ID. */
|
||||
struct {
|
||||
uint64_t _unused_contextidr_0;
|
||||
uint64_t contextidr_ns;
|
||||
uint64_t _unused_contextidr_1;
|
||||
uint64_t contextidr_s;
|
||||
};
|
||||
uint64_t contextidr_el[4];
|
||||
};
|
||||
union { /* User RW Thread register. */
|
||||
struct {
|
||||
uint64_t tpidrurw_ns;
|
||||
uint64_t tpidrprw_ns;
|
||||
uint64_t htpidr;
|
||||
uint64_t _tpidr_el3;
|
||||
};
|
||||
uint64_t tpidr_el[4];
|
||||
};
|
||||
/* The secure banks of these registers don't map anywhere */
|
||||
uint64_t tpidrurw_s;
|
||||
uint64_t tpidrprw_s;
|
||||
uint64_t tpidruro_s;
|
||||
|
||||
union { /* User RO Thread register. */
|
||||
uint64_t tpidruro_ns;
|
||||
uint64_t tpidrro_el[1];
|
||||
};
|
||||
uint64_t c14_cntfrq; /* Counter Frequency register */
|
||||
uint64_t c14_cntkctl; /* Timer Control register */
|
||||
ARMGenericTimer c14_timer[NUM_GTIMERS];
|
||||
@ -817,6 +960,49 @@ static inline bool arm_el_is_aa64(CPUARMState *env, int el)
|
||||
return arm_feature(env, ARM_FEATURE_AARCH64);
|
||||
}
|
||||
|
||||
/* Function for determing whether guest cp register reads and writes should
|
||||
* access the secure or non-secure bank of a cp register. When EL3 is
|
||||
* operating in AArch32 state, the NS-bit determines whether the secure
|
||||
* instance of a cp register should be used. When EL3 is AArch64 (or if
|
||||
* it doesn't exist at all) then there is no register banking, and all
|
||||
* accesses are to the non-secure version.
|
||||
*/
|
||||
static inline bool access_secure_reg(CPUARMState *env)
|
||||
{
|
||||
bool ret = (arm_feature(env, ARM_FEATURE_EL3) &&
|
||||
!arm_el_is_aa64(env, 3) &&
|
||||
!(env->cp15.scr_el3 & SCR_NS));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Macros for accessing a specified CP register bank */
|
||||
#define A32_BANKED_REG_GET(_env, _regname, _secure) \
|
||||
((_secure) ? (_env)->cp15._regname##_s : (_env)->cp15._regname##_ns)
|
||||
|
||||
#define A32_BANKED_REG_SET(_env, _regname, _secure, _val) \
|
||||
do { \
|
||||
if (_secure) { \
|
||||
(_env)->cp15._regname##_s = (_val); \
|
||||
} else { \
|
||||
(_env)->cp15._regname##_ns = (_val); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/* Macros for automatically accessing a specific CP register bank depending on
|
||||
* the current secure state of the system. These macros are not intended for
|
||||
* supporting instruction translation reads/writes as these are dependent
|
||||
* solely on the SCR.NS bit and not the mode.
|
||||
*/
|
||||
#define A32_BANKED_CURRENT_REG_GET(_env, _regname) \
|
||||
A32_BANKED_REG_GET((_env), _regname, \
|
||||
((!arm_el_is_aa64((_env), 3) && arm_is_secure(_env))))
|
||||
|
||||
#define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val) \
|
||||
A32_BANKED_REG_SET((_env), _regname, \
|
||||
((!arm_el_is_aa64((_env), 3) && arm_is_secure(_env))), \
|
||||
(_val))
|
||||
|
||||
void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf);
|
||||
unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx);
|
||||
|
||||
@ -836,6 +1022,7 @@ void armv7m_nvic_complete_irq(void *opaque, int irq);
|
||||
* Crn, Crm, opc1, opc2 fields
|
||||
* 32 or 64 bit register (ie is it accessed via MRC/MCR
|
||||
* or via MRRC/MCRR?)
|
||||
* non-secure/secure bank (AArch32 only)
|
||||
* We allow 4 bits for opc1 because MRRC/MCRR have a 4 bit field.
|
||||
* (In this case crn and opc2 should be zero.)
|
||||
* For AArch64, there is no 32/64 bit size distinction;
|
||||
@ -853,9 +1040,16 @@ void armv7m_nvic_complete_irq(void *opaque, int irq);
|
||||
#define CP_REG_AA64_SHIFT 28
|
||||
#define CP_REG_AA64_MASK (1 << CP_REG_AA64_SHIFT)
|
||||
|
||||
#define ENCODE_CP_REG(cp, is64, crn, crm, opc1, opc2) \
|
||||
(((cp) << 16) | ((is64) << 15) | ((crn) << 11) | \
|
||||
((crm) << 7) | ((opc1) << 3) | (opc2))
|
||||
/* To enable banking of coprocessor registers depending on ns-bit we
|
||||
* add a bit to distinguish between secure and non-secure cpregs in the
|
||||
* hashtable.
|
||||
*/
|
||||
#define CP_REG_NS_SHIFT 29
|
||||
#define CP_REG_NS_MASK (1 << CP_REG_NS_SHIFT)
|
||||
|
||||
#define ENCODE_CP_REG(cp, is64, ns, crn, crm, opc1, opc2) \
|
||||
((ns) << CP_REG_NS_SHIFT | ((cp) << 16) | ((is64) << 15) | \
|
||||
((crn) << 11) | ((crm) << 7) | ((opc1) << 3) | (opc2))
|
||||
|
||||
#define ENCODE_AA64_CP_REG(cp, crn, crm, op0, op1, op2) \
|
||||
(CP_REG_AA64_MASK | \
|
||||
@ -874,8 +1068,15 @@ static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid)
|
||||
uint32_t cpregid = kvmid;
|
||||
if ((kvmid & CP_REG_ARCH_MASK) == CP_REG_ARM64) {
|
||||
cpregid |= CP_REG_AA64_MASK;
|
||||
} else if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) {
|
||||
cpregid |= (1 << 15);
|
||||
} else {
|
||||
if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) {
|
||||
cpregid |= (1 << 15);
|
||||
}
|
||||
|
||||
/* KVM is always non-secure so add the NS flag on AArch32 register
|
||||
* entries.
|
||||
*/
|
||||
cpregid |= 1 << CP_REG_NS_SHIFT;
|
||||
}
|
||||
return cpregid;
|
||||
}
|
||||
@ -950,6 +1151,21 @@ enum {
|
||||
ARM_CP_STATE_BOTH = 2,
|
||||
};
|
||||
|
||||
/* ARM CP register secure state flags. These flags identify security state
|
||||
* attributes for a given CP register entry.
|
||||
* The existence of both or neither secure and non-secure flags indicates that
|
||||
* the register has both a secure and non-secure hash entry. A single one of
|
||||
* these flags causes the register to only be hashed for the specified
|
||||
* security state.
|
||||
* Although definitions may have any combination of the S/NS bits, each
|
||||
* registered entry will only have one to identify whether the entry is secure
|
||||
* or non-secure.
|
||||
*/
|
||||
enum {
|
||||
ARM_CP_SECSTATE_S = (1 << 0), /* bit[0]: Secure state register */
|
||||
ARM_CP_SECSTATE_NS = (1 << 1), /* bit[1]: Non-secure state register */
|
||||
};
|
||||
|
||||
/* Return true if cptype is a valid type field. This is used to try to
|
||||
* catch errors where the sentinel has been accidentally left off the end
|
||||
* of a list of registers.
|
||||
@ -1084,6 +1300,8 @@ struct ARMCPRegInfo {
|
||||
int type;
|
||||
/* Access rights: PL*_[RW] */
|
||||
int access;
|
||||
/* Security state: ARM_CP_SECSTATE_* bits/values */
|
||||
int secure;
|
||||
/* The opaque pointer passed to define_arm_cp_regs_with_opaque() when
|
||||
* this register was defined: can be used to hand data through to the
|
||||
* register read/write functions, since they are passed the ARMCPRegInfo*.
|
||||
@ -1093,12 +1311,27 @@ struct ARMCPRegInfo {
|
||||
* fieldoffset is non-zero, the reset value of the register.
|
||||
*/
|
||||
uint64_t resetvalue;
|
||||
/* Offset of the field in CPUARMState for this register. This is not
|
||||
* needed if either:
|
||||
/* Offset of the field in CPUARMState for this register.
|
||||
*
|
||||
* This is not needed if either:
|
||||
* 1. type is ARM_CP_CONST or one of the ARM_CP_SPECIALs
|
||||
* 2. both readfn and writefn are specified
|
||||
*/
|
||||
ptrdiff_t fieldoffset; /* offsetof(CPUARMState, field) */
|
||||
|
||||
/* Offsets of the secure and non-secure fields in CPUARMState for the
|
||||
* register if it is banked. These fields are only used during the static
|
||||
* registration of a register. During hashing the bank associated
|
||||
* with a given security state is copied to fieldoffset which is used from
|
||||
* there on out.
|
||||
*
|
||||
* It is expected that register definitions use either fieldoffset or
|
||||
* bank_fieldoffsets in the definition but not both. It is also expected
|
||||
* that both bank offsets are set when defining a banked register. This
|
||||
* use indicates that a register is banked.
|
||||
*/
|
||||
ptrdiff_t bank_fieldoffsets[2];
|
||||
|
||||
/* Function for making any access checks for this register in addition to
|
||||
* those specified by the 'access' permissions bits. If NULL, no extra
|
||||
* checks required. The access check is performed at runtime, not at
|
||||
@ -1247,27 +1480,50 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx)
|
||||
CPUARMState *env = cs->env_ptr;
|
||||
unsigned int cur_el = arm_current_el(env);
|
||||
unsigned int target_el = arm_excp_target_el(cs, excp_idx);
|
||||
/* FIXME: Use actual secure state. */
|
||||
bool secure = false;
|
||||
/* If in EL1/0, Physical IRQ routing to EL2 only happens from NS state. */
|
||||
bool irq_can_hyp = !secure && cur_el < 2 && target_el == 2;
|
||||
bool secure = arm_is_secure(env);
|
||||
uint32_t scr;
|
||||
uint32_t hcr;
|
||||
bool pstate_unmasked;
|
||||
int8_t unmasked = 0;
|
||||
|
||||
/* Don't take exceptions if they target a lower EL. */
|
||||
/* Don't take exceptions if they target a lower EL.
|
||||
* This check should catch any exceptions that would not be taken but left
|
||||
* pending.
|
||||
*/
|
||||
if (cur_el > target_el) {
|
||||
return false;
|
||||
}
|
||||
|
||||
switch (excp_idx) {
|
||||
case EXCP_FIQ:
|
||||
if (irq_can_hyp && (env->cp15.hcr_el2 & HCR_FMO)) {
|
||||
return true;
|
||||
}
|
||||
return !(env->daif & PSTATE_F);
|
||||
/* If FIQs are routed to EL3 or EL2 then there are cases where we
|
||||
* override the CPSR.F in determining if the exception is masked or
|
||||
* not. If neither of these are set then we fall back to the CPSR.F
|
||||
* setting otherwise we further assess the state below.
|
||||
*/
|
||||
hcr = (env->cp15.hcr_el2 & HCR_FMO);
|
||||
scr = (env->cp15.scr_el3 & SCR_FIQ);
|
||||
|
||||
/* When EL3 is 32-bit, the SCR.FW bit controls whether the CPSR.F bit
|
||||
* masks FIQ interrupts when taken in non-secure state. If SCR.FW is
|
||||
* set then FIQs can be masked by CPSR.F when non-secure but only
|
||||
* when FIQs are only routed to EL3.
|
||||
*/
|
||||
scr &= !((env->cp15.scr_el3 & SCR_FW) && !hcr);
|
||||
pstate_unmasked = !(env->daif & PSTATE_F);
|
||||
break;
|
||||
|
||||
case EXCP_IRQ:
|
||||
if (irq_can_hyp && (env->cp15.hcr_el2 & HCR_IMO)) {
|
||||
return true;
|
||||
}
|
||||
return !(env->daif & PSTATE_I);
|
||||
/* When EL3 execution state is 32-bit, if HCR.IMO is set then we may
|
||||
* override the CPSR.I masking when in non-secure state. The SCR.IRQ
|
||||
* setting has already been taken into consideration when setting the
|
||||
* target EL, so it does not have a further affect here.
|
||||
*/
|
||||
hcr = (env->cp15.hcr_el2 & HCR_IMO);
|
||||
scr = false;
|
||||
pstate_unmasked = !(env->daif & PSTATE_I);
|
||||
break;
|
||||
|
||||
case EXCP_VFIQ:
|
||||
if (secure || !(env->cp15.hcr_el2 & HCR_FMO)) {
|
||||
/* VFIQs are only taken when hypervized and non-secure. */
|
||||
@ -1283,6 +1539,21 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx)
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
/* Use the target EL, current execution state and SCR/HCR settings to
|
||||
* determine whether the corresponding CPSR bit is used to mask the
|
||||
* interrupt.
|
||||
*/
|
||||
if ((target_el > cur_el) && (target_el != 1)) {
|
||||
if (arm_el_is_aa64(env, 3) || ((scr || hcr) && (!secure))) {
|
||||
unmasked = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* The PSTATE bits only mask the interrupt if we have not overriden the
|
||||
* ability above.
|
||||
*/
|
||||
return unmasked || pstate_unmasked;
|
||||
}
|
||||
|
||||
static inline CPUARMState *cpu_init(const char *cpu_model)
|
||||
@ -1402,6 +1673,12 @@ static inline bool arm_singlestep_active(CPUARMState *env)
|
||||
*/
|
||||
#define ARM_TBFLAG_XSCALE_CPAR_SHIFT 20
|
||||
#define ARM_TBFLAG_XSCALE_CPAR_MASK (3 << ARM_TBFLAG_XSCALE_CPAR_SHIFT)
|
||||
/* Indicates whether cp register reads and writes by guest code should access
|
||||
* the secure or nonsecure bank of banked registers; note that this is not
|
||||
* the same thing as the current security state of the processor!
|
||||
*/
|
||||
#define ARM_TBFLAG_NS_SHIFT 22
|
||||
#define ARM_TBFLAG_NS_MASK (1 << ARM_TBFLAG_NS_SHIFT)
|
||||
|
||||
/* Bit usage when in AArch64 state */
|
||||
#define ARM_TBFLAG_AA64_EL_SHIFT 0
|
||||
@ -1446,6 +1723,8 @@ static inline bool arm_singlestep_active(CPUARMState *env)
|
||||
(((F) & ARM_TBFLAG_AA64_SS_ACTIVE_MASK) >> ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT)
|
||||
#define ARM_TBFLAG_AA64_PSTATE_SS(F) \
|
||||
(((F) & ARM_TBFLAG_AA64_PSTATE_SS_MASK) >> ARM_TBFLAG_AA64_PSTATE_SS_SHIFT)
|
||||
#define ARM_TBFLAG_NS(F) \
|
||||
(((F) & ARM_TBFLAG_NS_MASK) >> ARM_TBFLAG_NS_SHIFT)
|
||||
|
||||
static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
|
||||
target_ulong *cs_base, int *flags)
|
||||
@ -1495,6 +1774,9 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
|
||||
if (privmode) {
|
||||
*flags |= ARM_TBFLAG_PRIV_MASK;
|
||||
}
|
||||
if (!(access_secure_reg(env))) {
|
||||
*flags |= ARM_TBFLAG_NS_MASK;
|
||||
}
|
||||
if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)
|
||||
|| arm_el_is_aa64(env, 1)) {
|
||||
*flags |= ARM_TBFLAG_VFPEN_MASK;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -153,9 +153,9 @@ static inline void update_spsel(CPUARMState *env, uint32_t imm)
|
||||
*/
|
||||
static inline bool extended_addresses_enabled(CPUARMState *env)
|
||||
{
|
||||
return arm_el_is_aa64(env, 1)
|
||||
|| ((arm_feature(env, ARM_FEATURE_LPAE)
|
||||
&& (env->cp15.c2_control & TTBCR_EAE)));
|
||||
TCR *tcr = &env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
|
||||
return arm_el_is_aa64(env, 1) ||
|
||||
(arm_feature(env, ARM_FEATURE_LPAE) && (tcr->raw_tcr & TTBCR_EAE));
|
||||
}
|
||||
|
||||
/* Valid Syndrome Register EC field values */
|
||||
|
107
target-arm/kvm.c
107
target-arm/kvm.c
@ -21,6 +21,7 @@
|
||||
#include "sysemu/kvm.h"
|
||||
#include "kvm_arm.h"
|
||||
#include "cpu.h"
|
||||
#include "internals.h"
|
||||
#include "hw/arm/arm.h"
|
||||
|
||||
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
|
||||
@ -279,6 +280,94 @@ void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group,
|
||||
memory_region_ref(kd->mr);
|
||||
}
|
||||
|
||||
static int compare_u64(const void *a, const void *b)
|
||||
{
|
||||
if (*(uint64_t *)a > *(uint64_t *)b) {
|
||||
return 1;
|
||||
}
|
||||
if (*(uint64_t *)a < *(uint64_t *)b) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Initialize the CPUState's cpreg list according to the kernel's
|
||||
* definition of what CPU registers it knows about (and throw away
|
||||
* the previous TCG-created cpreg list).
|
||||
*/
|
||||
int kvm_arm_init_cpreg_list(ARMCPU *cpu)
|
||||
{
|
||||
struct kvm_reg_list rl;
|
||||
struct kvm_reg_list *rlp;
|
||||
int i, ret, arraylen;
|
||||
CPUState *cs = CPU(cpu);
|
||||
|
||||
rl.n = 0;
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, &rl);
|
||||
if (ret != -E2BIG) {
|
||||
return ret;
|
||||
}
|
||||
rlp = g_malloc(sizeof(struct kvm_reg_list) + rl.n * sizeof(uint64_t));
|
||||
rlp->n = rl.n;
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, rlp);
|
||||
if (ret) {
|
||||
goto out;
|
||||
}
|
||||
/* Sort the list we get back from the kernel, since cpreg_tuples
|
||||
* must be in strictly ascending order.
|
||||
*/
|
||||
qsort(&rlp->reg, rlp->n, sizeof(rlp->reg[0]), compare_u64);
|
||||
|
||||
for (i = 0, arraylen = 0; i < rlp->n; i++) {
|
||||
if (!kvm_arm_reg_syncs_via_cpreg_list(rlp->reg[i])) {
|
||||
continue;
|
||||
}
|
||||
switch (rlp->reg[i] & KVM_REG_SIZE_MASK) {
|
||||
case KVM_REG_SIZE_U32:
|
||||
case KVM_REG_SIZE_U64:
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "Can't handle size of register in kernel list\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
arraylen++;
|
||||
}
|
||||
|
||||
cpu->cpreg_indexes = g_renew(uint64_t, cpu->cpreg_indexes, arraylen);
|
||||
cpu->cpreg_values = g_renew(uint64_t, cpu->cpreg_values, arraylen);
|
||||
cpu->cpreg_vmstate_indexes = g_renew(uint64_t, cpu->cpreg_vmstate_indexes,
|
||||
arraylen);
|
||||
cpu->cpreg_vmstate_values = g_renew(uint64_t, cpu->cpreg_vmstate_values,
|
||||
arraylen);
|
||||
cpu->cpreg_array_len = arraylen;
|
||||
cpu->cpreg_vmstate_array_len = arraylen;
|
||||
|
||||
for (i = 0, arraylen = 0; i < rlp->n; i++) {
|
||||
uint64_t regidx = rlp->reg[i];
|
||||
if (!kvm_arm_reg_syncs_via_cpreg_list(regidx)) {
|
||||
continue;
|
||||
}
|
||||
cpu->cpreg_indexes[arraylen] = regidx;
|
||||
arraylen++;
|
||||
}
|
||||
assert(cpu->cpreg_array_len == arraylen);
|
||||
|
||||
if (!write_kvmstate_to_list(cpu)) {
|
||||
/* Shouldn't happen unless kernel is inconsistent about
|
||||
* what registers exist.
|
||||
*/
|
||||
fprintf(stderr, "Initial read of kernel register state failed\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
g_free(rlp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool write_kvmstate_to_list(ARMCPU *cpu)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
@ -351,6 +440,24 @@ bool write_list_to_kvmstate(ARMCPU *cpu)
|
||||
return ok;
|
||||
}
|
||||
|
||||
void kvm_arm_reset_vcpu(ARMCPU *cpu)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Re-init VCPU so that all registers are set to
|
||||
* their respective reset values.
|
||||
*/
|
||||
ret = kvm_arm_vcpu_init(CPU(cpu));
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "kvm_arm_vcpu_init failed: %s\n", strerror(-ret));
|
||||
abort();
|
||||
}
|
||||
if (!write_kvmstate_to_list(cpu)) {
|
||||
fprintf(stderr, "write_kvmstate_to_list failed\n");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
|
||||
{
|
||||
}
|
||||
|
@ -51,17 +51,17 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
|
||||
struct kvm_one_reg idregs[] = {
|
||||
{
|
||||
.id = KVM_REG_ARM | KVM_REG_SIZE_U32
|
||||
| ENCODE_CP_REG(15, 0, 0, 0, 0, 0),
|
||||
| ENCODE_CP_REG(15, 0, 0, 0, 0, 0, 0),
|
||||
.addr = (uintptr_t)&midr,
|
||||
},
|
||||
{
|
||||
.id = KVM_REG_ARM | KVM_REG_SIZE_U32
|
||||
| ENCODE_CP_REG(15, 0, 0, 1, 0, 0),
|
||||
| ENCODE_CP_REG(15, 0, 0, 0, 1, 0, 0),
|
||||
.addr = (uintptr_t)&id_pfr0,
|
||||
},
|
||||
{
|
||||
.id = KVM_REG_ARM | KVM_REG_SIZE_U32
|
||||
| ENCODE_CP_REG(15, 0, 0, 2, 0, 0),
|
||||
| ENCODE_CP_REG(15, 0, 0, 0, 2, 0, 0),
|
||||
.addr = (uintptr_t)&id_isar0,
|
||||
},
|
||||
{
|
||||
@ -138,7 +138,7 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool reg_syncs_via_tuple_list(uint64_t regidx)
|
||||
bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
|
||||
{
|
||||
/* Return true if the regidx is a register we should synchronize
|
||||
* via the cpreg_tuples array (ie is not a core reg we sync by
|
||||
@ -153,24 +153,11 @@ static bool reg_syncs_via_tuple_list(uint64_t regidx)
|
||||
}
|
||||
}
|
||||
|
||||
static int compare_u64(const void *a, const void *b)
|
||||
{
|
||||
if (*(uint64_t *)a > *(uint64_t *)b) {
|
||||
return 1;
|
||||
}
|
||||
if (*(uint64_t *)a < *(uint64_t *)b) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_arch_init_vcpu(CPUState *cs)
|
||||
{
|
||||
int i, ret, arraylen;
|
||||
int ret;
|
||||
uint64_t v;
|
||||
struct kvm_one_reg r;
|
||||
struct kvm_reg_list rl;
|
||||
struct kvm_reg_list *rlp;
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
|
||||
if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE) {
|
||||
@ -206,73 +193,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Populate the cpreg list based on the kernel's idea
|
||||
* of what registers exist (and throw away the TCG-created list).
|
||||
*/
|
||||
rl.n = 0;
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, &rl);
|
||||
if (ret != -E2BIG) {
|
||||
return ret;
|
||||
}
|
||||
rlp = g_malloc(sizeof(struct kvm_reg_list) + rl.n * sizeof(uint64_t));
|
||||
rlp->n = rl.n;
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, rlp);
|
||||
if (ret) {
|
||||
goto out;
|
||||
}
|
||||
/* Sort the list we get back from the kernel, since cpreg_tuples
|
||||
* must be in strictly ascending order.
|
||||
*/
|
||||
qsort(&rlp->reg, rlp->n, sizeof(rlp->reg[0]), compare_u64);
|
||||
|
||||
for (i = 0, arraylen = 0; i < rlp->n; i++) {
|
||||
if (!reg_syncs_via_tuple_list(rlp->reg[i])) {
|
||||
continue;
|
||||
}
|
||||
switch (rlp->reg[i] & KVM_REG_SIZE_MASK) {
|
||||
case KVM_REG_SIZE_U32:
|
||||
case KVM_REG_SIZE_U64:
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "Can't handle size of register in kernel list\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
arraylen++;
|
||||
}
|
||||
|
||||
cpu->cpreg_indexes = g_renew(uint64_t, cpu->cpreg_indexes, arraylen);
|
||||
cpu->cpreg_values = g_renew(uint64_t, cpu->cpreg_values, arraylen);
|
||||
cpu->cpreg_vmstate_indexes = g_renew(uint64_t, cpu->cpreg_vmstate_indexes,
|
||||
arraylen);
|
||||
cpu->cpreg_vmstate_values = g_renew(uint64_t, cpu->cpreg_vmstate_values,
|
||||
arraylen);
|
||||
cpu->cpreg_array_len = arraylen;
|
||||
cpu->cpreg_vmstate_array_len = arraylen;
|
||||
|
||||
for (i = 0, arraylen = 0; i < rlp->n; i++) {
|
||||
uint64_t regidx = rlp->reg[i];
|
||||
if (!reg_syncs_via_tuple_list(regidx)) {
|
||||
continue;
|
||||
}
|
||||
cpu->cpreg_indexes[arraylen] = regidx;
|
||||
arraylen++;
|
||||
}
|
||||
assert(cpu->cpreg_array_len == arraylen);
|
||||
|
||||
if (!write_kvmstate_to_list(cpu)) {
|
||||
/* Shouldn't happen unless kernel is inconsistent about
|
||||
* what registers exist.
|
||||
*/
|
||||
fprintf(stderr, "Initial read of kernel register state failed\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
g_free(rlp);
|
||||
return ret;
|
||||
return kvm_arm_init_cpreg_list(cpu);
|
||||
}
|
||||
|
||||
typedef struct Reg {
|
||||
@ -508,12 +429,3 @@ int kvm_arch_get_registers(CPUState *cs)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_arm_reset_vcpu(ARMCPU *cpu)
|
||||
{
|
||||
/* Re-init VCPU so that all registers are set to
|
||||
* their respective reset values.
|
||||
*/
|
||||
kvm_arm_vcpu_init(CPU(cpu));
|
||||
write_kvmstate_to_list(cpu);
|
||||
}
|
||||
|
@ -103,9 +103,21 @@ int kvm_arch_init_vcpu(CPUState *cs)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* TODO : support for save/restore/reset of system regs via tuple list */
|
||||
return kvm_arm_init_cpreg_list(cpu);
|
||||
}
|
||||
|
||||
return 0;
|
||||
bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
|
||||
{
|
||||
/* Return true if the regidx is a register we should synchronize
|
||||
* via the cpreg_tuples array (ie is not a core reg we sync by
|
||||
* hand in kvm_arch_get/put_registers())
|
||||
*/
|
||||
switch (regidx & KVM_REG_ARM_COPROC_MASK) {
|
||||
case KVM_REG_ARM_CORE:
|
||||
return false;
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
|
||||
@ -260,11 +272,3 @@ int kvm_arch_get_registers(CPUState *cs)
|
||||
/* TODO: other registers */
|
||||
return ret;
|
||||
}
|
||||
|
||||
void kvm_arm_reset_vcpu(ARMCPU *cpu)
|
||||
{
|
||||
/* Re-init VCPU so that all registers are set to
|
||||
* their respective reset values.
|
||||
*/
|
||||
kvm_arm_vcpu_init(CPU(cpu));
|
||||
}
|
||||
|
@ -46,6 +46,28 @@ int kvm_arm_vcpu_init(CPUState *cs);
|
||||
void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group,
|
||||
uint64_t attr, int dev_fd);
|
||||
|
||||
/**
|
||||
* kvm_arm_init_cpreg_list:
|
||||
* @cs: CPUState
|
||||
*
|
||||
* Initialize the CPUState's cpreg list according to the kernel's
|
||||
* definition of what CPU registers it knows about (and throw away
|
||||
* the previous TCG-created cpreg list).
|
||||
*
|
||||
* Returns: 0 if success, else < 0 error code
|
||||
*/
|
||||
int kvm_arm_init_cpreg_list(ARMCPU *cpu);
|
||||
|
||||
/**
|
||||
* kvm_arm_reg_syncs_via_cpreg_list
|
||||
* regidx: KVM register index
|
||||
*
|
||||
* Return true if this KVM register should be synchronized via the
|
||||
* cpreg list of arbitrary system registers, false if it is synchronized
|
||||
* by hand using code in kvm_arch_get/put_registers().
|
||||
*/
|
||||
bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx);
|
||||
|
||||
/**
|
||||
* write_list_to_kvmstate:
|
||||
* @cpu: ARMCPU
|
||||
|
@ -127,6 +127,13 @@ static int get_cpsr(QEMUFile *f, void *opaque, size_t size)
|
||||
CPUARMState *env = &cpu->env;
|
||||
uint32_t val = qemu_get_be32(f);
|
||||
|
||||
env->aarch64 = ((val & PSTATE_nRW) == 0);
|
||||
|
||||
if (is_a64(env)) {
|
||||
pstate_write(env, val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Avoid mode switch when restoring CPSR */
|
||||
env->uncached_cpsr = val & CPSR_M;
|
||||
cpsr_write(env, val, 0xffffffff);
|
||||
@ -137,8 +144,15 @@ static void put_cpsr(QEMUFile *f, void *opaque, size_t size)
|
||||
{
|
||||
ARMCPU *cpu = opaque;
|
||||
CPUARMState *env = &cpu->env;
|
||||
uint32_t val;
|
||||
|
||||
qemu_put_be32(f, cpsr_read(env));
|
||||
if (is_a64(env)) {
|
||||
val = pstate_read(env);
|
||||
} else {
|
||||
val = cpsr_read(env);
|
||||
}
|
||||
|
||||
qemu_put_be32(f, val);
|
||||
}
|
||||
|
||||
static const VMStateInfo vmstate_cpsr = {
|
||||
@ -222,12 +236,14 @@ static int cpu_post_load(void *opaque, int version_id)
|
||||
|
||||
const VMStateDescription vmstate_arm_cpu = {
|
||||
.name = "cpu",
|
||||
.version_id = 21,
|
||||
.minimum_version_id = 21,
|
||||
.version_id = 22,
|
||||
.minimum_version_id = 22,
|
||||
.pre_save = cpu_pre_save,
|
||||
.post_load = cpu_post_load,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16),
|
||||
VMSTATE_UINT64_ARRAY(env.xregs, ARMCPU, 32),
|
||||
VMSTATE_UINT64(env.pc, ARMCPU),
|
||||
{
|
||||
.name = "cpsr",
|
||||
.version_id = 0,
|
||||
|
@ -361,7 +361,7 @@ void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
|
||||
* Note that SPSel is never OK from EL0; we rely on handle_msr_i()
|
||||
* to catch that case at translate time.
|
||||
*/
|
||||
if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) {
|
||||
if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
|
||||
raise_exception(env, EXCP_UDEF);
|
||||
}
|
||||
|
||||
@ -575,7 +575,7 @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn)
|
||||
* short descriptor format (in which case it holds both PROCID and ASID),
|
||||
* since we don't implement the optional v7 context ID masking.
|
||||
*/
|
||||
contextidr = extract64(env->cp15.contextidr_el1, 0, 32);
|
||||
contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
|
||||
|
||||
switch (bt) {
|
||||
case 3: /* linked context ID match */
|
||||
|
@ -7091,7 +7091,7 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
|
||||
rt = (insn >> 12) & 0xf;
|
||||
|
||||
ri = get_arm_cp_reginfo(s->cp_regs,
|
||||
ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
|
||||
ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
|
||||
if (ri) {
|
||||
/* Check access permissions */
|
||||
if (!cp_access_ok(s->current_el, ri, isread)) {
|
||||
@ -7281,12 +7281,16 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
|
||||
*/
|
||||
if (is64) {
|
||||
qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
|
||||
"64 bit system register cp:%d opc1: %d crm:%d\n",
|
||||
isread ? "read" : "write", cpnum, opc1, crm);
|
||||
"64 bit system register cp:%d opc1: %d crm:%d "
|
||||
"(%s)\n",
|
||||
isread ? "read" : "write", cpnum, opc1, crm,
|
||||
s->ns ? "non-secure" : "secure");
|
||||
} else {
|
||||
qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
|
||||
"system register cp:%d opc1:%d crn:%d crm:%d opc2:%d\n",
|
||||
isread ? "read" : "write", cpnum, opc1, crn, crm, opc2);
|
||||
"system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
|
||||
"(%s)\n",
|
||||
isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
|
||||
s->ns ? "non-secure" : "secure");
|
||||
}
|
||||
|
||||
return 1;
|
||||
@ -11031,6 +11035,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
|
||||
#endif
|
||||
dc->ns = ARM_TBFLAG_NS(tb->flags);
|
||||
dc->cpacr_fpen = ARM_TBFLAG_CPACR_FPEN(tb->flags);
|
||||
dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
|
||||
dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
|
||||
|
@ -20,6 +20,7 @@ typedef struct DisasContext {
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
int user;
|
||||
#endif
|
||||
bool ns; /* Use non-secure CPREG bank on access */
|
||||
bool cpacr_fpen; /* FP enabled via CPACR.FPEN */
|
||||
bool vfp_enabled; /* FP enabled via FPSCR.EN */
|
||||
int vec_len;
|
||||
|
48
vl.c
48
vl.c
@ -554,6 +554,22 @@ static QemuOptsList qemu_icount_opts = {
|
||||
},
|
||||
};
|
||||
|
||||
static QemuOptsList qemu_semihosting_config_opts = {
|
||||
.name = "semihosting-config",
|
||||
.implied_opt_name = "enable",
|
||||
.head = QTAILQ_HEAD_INITIALIZER(qemu_semihosting_config_opts.head),
|
||||
.desc = {
|
||||
{
|
||||
.name = "enable",
|
||||
.type = QEMU_OPT_BOOL,
|
||||
}, {
|
||||
.name = "target",
|
||||
.type = QEMU_OPT_STRING,
|
||||
},
|
||||
{ /* end of list */ }
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
* Get machine options
|
||||
*
|
||||
@ -2812,6 +2828,7 @@ int main(int argc, char **argv, char **envp)
|
||||
qemu_add_opts(&qemu_name_opts);
|
||||
qemu_add_opts(&qemu_numa_opts);
|
||||
qemu_add_opts(&qemu_icount_opts);
|
||||
qemu_add_opts(&qemu_semihosting_config_opts);
|
||||
|
||||
runstate_init();
|
||||
|
||||
@ -3623,6 +3640,37 @@ int main(int argc, char **argv, char **envp)
|
||||
break;
|
||||
case QEMU_OPTION_semihosting:
|
||||
semihosting_enabled = 1;
|
||||
semihosting_target = SEMIHOSTING_TARGET_AUTO;
|
||||
break;
|
||||
case QEMU_OPTION_semihosting_config:
|
||||
semihosting_enabled = 1;
|
||||
opts = qemu_opts_parse(qemu_find_opts("semihosting-config"),
|
||||
optarg, 0);
|
||||
if (opts != NULL) {
|
||||
semihosting_enabled = qemu_opt_get_bool(opts, "enable",
|
||||
true);
|
||||
const char *target = qemu_opt_get(opts, "target");
|
||||
if (target != NULL) {
|
||||
if (strcmp("native", target) == 0) {
|
||||
semihosting_target = SEMIHOSTING_TARGET_NATIVE;
|
||||
} else if (strcmp("gdb", target) == 0) {
|
||||
semihosting_target = SEMIHOSTING_TARGET_GDB;
|
||||
} else if (strcmp("auto", target) == 0) {
|
||||
semihosting_target = SEMIHOSTING_TARGET_AUTO;
|
||||
} else {
|
||||
fprintf(stderr, "Unsupported semihosting-config"
|
||||
" %s\n",
|
||||
optarg);
|
||||
exit(1);
|
||||
}
|
||||
} else {
|
||||
semihosting_target = SEMIHOSTING_TARGET_AUTO;
|
||||
}
|
||||
} else {
|
||||
fprintf(stderr, "Unsupported semihosting-config %s\n",
|
||||
optarg);
|
||||
exit(1);
|
||||
}
|
||||
break;
|
||||
case QEMU_OPTION_tdf:
|
||||
fprintf(stderr, "Warning: user space PIT time drift fix "
|
||||
|
Loading…
Reference in New Issue
Block a user