target-arm queue:
* v8M: SG, BLXNS, secure-return * v8M: fixes for coverity issues in previous patches * arm: fix armv7m_init() declaration to match definition * watchdog/aspeed: fix variable type to store reload value -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABCAAGBQJZ35IpAAoJEDwlJe0UNgzerIkP/2Ikozj+wZeHXNltgB7AWoFl OG987IQ6ZC2akN5mVAqDGUi3UUhYz/ehByk5VXWD7rRHb6lO7WbSuDH4NjCLeBNf vJBGOlio6pzgxChE6nNVuXvDTXh1QhBCAADUM0wvyn1kNlvzMmbmAfE5d+Kr9dGl pf/YR2aOhxle1hhxRRrvlPRXGQrS5zn2kAcdtErv7Yjc/NrHTmYLiEcUukPK5R7W AuZh+eaUdDxfWuFyg2O95bQqD9XLg+gPrOvH29tMko6Uqb3wVuG2CS4BXtPmSbGZ QVISABueF0i9Bd4RYBziIzDmN0fQS+1P6YzMv7g7s2MBj6nQwPLenOHrB3+C2TAg qelA2XfEv8kEQFD/iHvRAflecDGyogLE+BKqFo5s5MJn74Fxm+YNAaTva1gbcsyG XgeS1zSlwdfCApv0SbforOpZRQ8opagVb/J8SQK4NxUSFOnqVYtV1ixwzII5r1T9 i2SZiE7Ig0TeYw4yi6rtYCQ6d/0vPy2xYHzBfJu3tx1Bz/7XHqB3JYrjO3BeS0fv Q6ek8rBKCJB71YvDeHmNhysTzxV5/GDErH+QRcps3InaV9zTFYpaUCfx2lBwk6vJ 7bUjmfLG4MOpy7sXkJC+80HHl1AvkhYy9OU8oZhhLmrcJPS16KL1vtCtqPIo+1ey ofiBbeBPWNoFQ0TFftJ8 =/H5Z -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20171012' into staging target-arm queue: * v8M: SG, BLXNS, secure-return * v8M: fixes for coverity issues in previous patches * arm: fix armv7m_init() declaration to match definition * watchdog/aspeed: fix variable type to store reload value # gpg: Signature made Thu 12 Oct 2017 17:02:49 BST # gpg: using RSA key 0x3C2525ED14360CDE # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" # gpg: aka "Peter Maydell <pmaydell@gmail.com>" # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * remotes/pmaydell/tags/pull-target-arm-20171012: nvic: Fix miscalculation of offsets into ITNS array nvic: Add missing 'break' target/arm: Implement SG instruction corner cases target/arm: Support some Thumb insns being always unconditional target-arm: Simplify insn_crosses_page() target/arm: Pull Thumb insn word loads up to top level target-arm: Don't check for "Thumb2 or M profile" for not-Thumb1 target/arm: Implement secure function return target/arm: Implement BLXNS target/arm: Implement SG instruction target/arm: Add M profile secure MMU index values to get_a32_user_mem_index() arm: fix armv7m_init() declaration to match definition watchdog/aspeed: fix variable type to store reload value Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
f90ea7ba7c
@ -698,7 +698,7 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
|
||||
return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1;
|
||||
case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
|
||||
{
|
||||
int startvec = 32 * (offset - 0x380) + NVIC_FIRST_IRQ;
|
||||
int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
|
||||
int i;
|
||||
|
||||
if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
|
||||
@ -1102,7 +1102,7 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
|
||||
switch (offset) {
|
||||
case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
|
||||
{
|
||||
int startvec = 32 * (offset - 0x380) + NVIC_FIRST_IRQ;
|
||||
int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
|
||||
int i;
|
||||
|
||||
if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
|
||||
@ -1447,6 +1447,7 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
|
||||
return;
|
||||
}
|
||||
cpu->env.sau.ctrl = value & 3;
|
||||
break;
|
||||
case 0xdd4: /* SAU_TYPE */
|
||||
if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
|
||||
goto bad_offset;
|
||||
|
@ -100,13 +100,13 @@ static uint64_t aspeed_wdt_read(void *opaque, hwaddr offset, unsigned size)
|
||||
|
||||
static void aspeed_wdt_reload(AspeedWDTState *s, bool pclk)
|
||||
{
|
||||
uint32_t reload;
|
||||
uint64_t reload;
|
||||
|
||||
if (pclk) {
|
||||
reload = muldiv64(s->regs[WDT_RELOAD_VALUE], NANOSECONDS_PER_SECOND,
|
||||
s->pclk_freq);
|
||||
} else {
|
||||
reload = s->regs[WDT_RELOAD_VALUE] * 1000;
|
||||
reload = s->regs[WDT_RELOAD_VALUE] * 1000ULL;
|
||||
}
|
||||
|
||||
if (aspeed_wdt_is_enabled(s)) {
|
||||
|
@ -25,7 +25,7 @@ typedef enum {
|
||||
|
||||
/* armv7m.c */
|
||||
DeviceState *armv7m_init(MemoryRegion *system_memory, int mem_size, int num_irq,
|
||||
const char *kernel_filename, const char *cpu_model);
|
||||
const char *kernel_filename, const char *cpu_type);
|
||||
/**
|
||||
* armv7m_load_kernel:
|
||||
* @cpu: CPU
|
||||
|
@ -41,6 +41,10 @@ typedef struct V8M_SAttributes {
|
||||
bool irvalid;
|
||||
} V8M_SAttributes;
|
||||
|
||||
static void v8m_security_lookup(CPUARMState *env, uint32_t address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
V8M_SAttributes *sattrs);
|
||||
|
||||
/* Definitions for the PMCCNTR and PMCR registers */
|
||||
#define PMCRD 0x8
|
||||
#define PMCRC 0x4
|
||||
@ -5893,6 +5897,12 @@ void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
|
||||
{
|
||||
/* translate.c should never generate calls here in user-only mode */
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
void switch_mode(CPUARMState *env, int mode)
|
||||
{
|
||||
ARMCPU *cpu = arm_env_get_cpu(env);
|
||||
@ -6164,7 +6174,17 @@ void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
|
||||
* - if the return value is a magic value, do exception return (like BX)
|
||||
* - otherwise bit 0 of the return value is the target security state
|
||||
*/
|
||||
if (dest >= 0xff000000) {
|
||||
uint32_t min_magic;
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
|
||||
/* Covers FNC_RETURN and EXC_RETURN magic */
|
||||
min_magic = FNC_RETURN_MIN_MAGIC;
|
||||
} else {
|
||||
/* EXC_RETURN magic only */
|
||||
min_magic = EXC_RETURN_MIN_MAGIC;
|
||||
}
|
||||
|
||||
if (dest >= min_magic) {
|
||||
/* This is an exception return magic value; put it where
|
||||
* do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
|
||||
* Note that if we ever add gen_ss_advance() singlestep support to
|
||||
@ -6185,6 +6205,59 @@ void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
|
||||
env->regs[15] = dest & ~1;
|
||||
}
|
||||
|
||||
void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
|
||||
{
|
||||
/* Handle v7M BLXNS:
|
||||
* - bit 0 of the destination address is the target security state
|
||||
*/
|
||||
|
||||
/* At this point regs[15] is the address just after the BLXNS */
|
||||
uint32_t nextinst = env->regs[15] | 1;
|
||||
uint32_t sp = env->regs[13] - 8;
|
||||
uint32_t saved_psr;
|
||||
|
||||
/* translate.c will have made BLXNS UNDEF unless we're secure */
|
||||
assert(env->v7m.secure);
|
||||
|
||||
if (dest & 1) {
|
||||
/* target is Secure, so this is just a normal BLX,
|
||||
* except that the low bit doesn't indicate Thumb/not.
|
||||
*/
|
||||
env->regs[14] = nextinst;
|
||||
env->thumb = 1;
|
||||
env->regs[15] = dest & ~1;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Target is non-secure: first push a stack frame */
|
||||
if (!QEMU_IS_ALIGNED(sp, 8)) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"BLXNS with misaligned SP is UNPREDICTABLE\n");
|
||||
}
|
||||
|
||||
saved_psr = env->v7m.exception;
|
||||
if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
|
||||
saved_psr |= XPSR_SFPA;
|
||||
}
|
||||
|
||||
/* Note that these stores can throw exceptions on MPU faults */
|
||||
cpu_stl_data(env, sp, nextinst);
|
||||
cpu_stl_data(env, sp + 4, saved_psr);
|
||||
|
||||
env->regs[13] = sp;
|
||||
env->regs[14] = 0xfeffffff;
|
||||
if (arm_v7m_is_handler_mode(env)) {
|
||||
/* Write a dummy value to IPSR, to avoid leaking the current secure
|
||||
* exception number to non-secure code. This is guaranteed not
|
||||
* to cause write_v7m_exception() to actually change stacks.
|
||||
*/
|
||||
write_v7m_exception(env, 1);
|
||||
}
|
||||
switch_v7m_security_state(env, 0);
|
||||
env->thumb = 1;
|
||||
env->regs[15] = dest;
|
||||
}
|
||||
|
||||
static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
|
||||
bool spsel)
|
||||
{
|
||||
@ -6407,12 +6480,19 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
|
||||
bool exc_secure = false;
|
||||
bool return_to_secure;
|
||||
|
||||
/* We can only get here from an EXCP_EXCEPTION_EXIT, and
|
||||
* gen_bx_excret() enforces the architectural rule
|
||||
* that jumps to magic addresses don't have magic behaviour unless
|
||||
* we're in Handler mode (compare pseudocode BXWritePC()).
|
||||
/* If we're not in Handler mode then jumps to magic exception-exit
|
||||
* addresses don't have magic behaviour. However for the v8M
|
||||
* security extensions the magic secure-function-return has to
|
||||
* work in thread mode too, so to avoid doing an extra check in
|
||||
* the generated code we allow exception-exit magic to also cause the
|
||||
* internal exception and bring us here in thread mode. Correct code
|
||||
* will never try to do this (the following insn fetch will always
|
||||
* fault) so we the overhead of having taken an unnecessary exception
|
||||
* doesn't matter.
|
||||
*/
|
||||
assert(arm_v7m_is_handler_mode(env));
|
||||
if (!arm_v7m_is_handler_mode(env)) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* In the spec pseudocode ExceptionReturn() is called directly
|
||||
* from BXWritePC() and gets the full target PC value including
|
||||
@ -6702,6 +6782,78 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
|
||||
qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
|
||||
}
|
||||
|
||||
static bool do_v7m_function_return(ARMCPU *cpu)
|
||||
{
|
||||
/* v8M security extensions magic function return.
|
||||
* We may either:
|
||||
* (1) throw an exception (longjump)
|
||||
* (2) return true if we successfully handled the function return
|
||||
* (3) return false if we failed a consistency check and have
|
||||
* pended a UsageFault that needs to be taken now
|
||||
*
|
||||
* At this point the magic return value is split between env->regs[15]
|
||||
* and env->thumb. We don't bother to reconstitute it because we don't
|
||||
* need it (all values are handled the same way).
|
||||
*/
|
||||
CPUARMState *env = &cpu->env;
|
||||
uint32_t newpc, newpsr, newpsr_exc;
|
||||
|
||||
qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
|
||||
|
||||
{
|
||||
bool threadmode, spsel;
|
||||
TCGMemOpIdx oi;
|
||||
ARMMMUIdx mmu_idx;
|
||||
uint32_t *frame_sp_p;
|
||||
uint32_t frameptr;
|
||||
|
||||
/* Pull the return address and IPSR from the Secure stack */
|
||||
threadmode = !arm_v7m_is_handler_mode(env);
|
||||
spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
|
||||
|
||||
frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
|
||||
frameptr = *frame_sp_p;
|
||||
|
||||
/* These loads may throw an exception (for MPU faults). We want to
|
||||
* do them as secure, so work out what MMU index that is.
|
||||
*/
|
||||
mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
|
||||
oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx));
|
||||
newpc = helper_le_ldul_mmu(env, frameptr, oi, 0);
|
||||
newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0);
|
||||
|
||||
/* Consistency checks on new IPSR */
|
||||
newpsr_exc = newpsr & XPSR_EXCP;
|
||||
if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
|
||||
(env->v7m.exception == 1 && newpsr_exc != 0))) {
|
||||
/* Pend the fault and tell our caller to take it */
|
||||
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
|
||||
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
|
||||
env->v7m.secure);
|
||||
qemu_log_mask(CPU_LOG_INT,
|
||||
"...taking INVPC UsageFault: "
|
||||
"IPSR consistency check failed\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
*frame_sp_p = frameptr + 8;
|
||||
}
|
||||
|
||||
/* This invalidates frame_sp_p */
|
||||
switch_v7m_security_state(env, true);
|
||||
env->v7m.exception = newpsr_exc;
|
||||
env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
|
||||
if (newpsr & XPSR_SFPA) {
|
||||
env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
|
||||
}
|
||||
xpsr_write(env, 0, XPSR_IT);
|
||||
env->thumb = newpc & 1;
|
||||
env->regs[15] = newpc & ~1;
|
||||
|
||||
qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
static void arm_log_exception(int idx)
|
||||
{
|
||||
if (qemu_loglevel_mask(CPU_LOG_INT)) {
|
||||
@ -6736,6 +6888,126 @@ static void arm_log_exception(int idx)
|
||||
}
|
||||
}
|
||||
|
||||
static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
|
||||
uint32_t addr, uint16_t *insn)
|
||||
{
|
||||
/* Load a 16-bit portion of a v7M instruction, returning true on success,
|
||||
* or false on failure (in which case we will have pended the appropriate
|
||||
* exception).
|
||||
* We need to do the instruction fetch's MPU and SAU checks
|
||||
* like this because there is no MMU index that would allow
|
||||
* doing the load with a single function call. Instead we must
|
||||
* first check that the security attributes permit the load
|
||||
* and that they don't mismatch on the two halves of the instruction,
|
||||
* and then we do the load as a secure load (ie using the security
|
||||
* attributes of the address, not the CPU, as architecturally required).
|
||||
*/
|
||||
CPUState *cs = CPU(cpu);
|
||||
CPUARMState *env = &cpu->env;
|
||||
V8M_SAttributes sattrs = {};
|
||||
MemTxAttrs attrs = {};
|
||||
ARMMMUFaultInfo fi = {};
|
||||
MemTxResult txres;
|
||||
target_ulong page_size;
|
||||
hwaddr physaddr;
|
||||
int prot;
|
||||
uint32_t fsr;
|
||||
|
||||
v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
|
||||
if (!sattrs.nsc || sattrs.ns) {
|
||||
/* This must be the second half of the insn, and it straddles a
|
||||
* region boundary with the second half not being S&NSC.
|
||||
*/
|
||||
env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
|
||||
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
|
||||
qemu_log_mask(CPU_LOG_INT,
|
||||
"...really SecureFault with SFSR.INVEP\n");
|
||||
return false;
|
||||
}
|
||||
if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
|
||||
&physaddr, &attrs, &prot, &page_size, &fsr, &fi)) {
|
||||
/* the MPU lookup failed */
|
||||
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
|
||||
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
|
||||
qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
|
||||
return false;
|
||||
}
|
||||
*insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
|
||||
attrs, &txres);
|
||||
if (txres != MEMTX_OK) {
|
||||
env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
|
||||
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
|
||||
qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool v7m_handle_execute_nsc(ARMCPU *cpu)
|
||||
{
|
||||
/* Check whether this attempt to execute code in a Secure & NS-Callable
|
||||
* memory region is for an SG instruction; if so, then emulate the
|
||||
* effect of the SG instruction and return true. Otherwise pend
|
||||
* the correct kind of exception and return false.
|
||||
*/
|
||||
CPUARMState *env = &cpu->env;
|
||||
ARMMMUIdx mmu_idx;
|
||||
uint16_t insn;
|
||||
|
||||
/* We should never get here unless get_phys_addr_pmsav8() caused
|
||||
* an exception for NS executing in S&NSC memory.
|
||||
*/
|
||||
assert(!env->v7m.secure);
|
||||
assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
|
||||
|
||||
/* We want to do the MPU lookup as secure; work out what mmu_idx that is */
|
||||
mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
|
||||
|
||||
if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!env->thumb) {
|
||||
goto gen_invep;
|
||||
}
|
||||
|
||||
if (insn != 0xe97f) {
|
||||
/* Not an SG instruction first half (we choose the IMPDEF
|
||||
* early-SG-check option).
|
||||
*/
|
||||
goto gen_invep;
|
||||
}
|
||||
|
||||
if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (insn != 0xe97f) {
|
||||
/* Not an SG instruction second half (yes, both halves of the SG
|
||||
* insn have the same hex value)
|
||||
*/
|
||||
goto gen_invep;
|
||||
}
|
||||
|
||||
/* OK, we have confirmed that we really have an SG instruction.
|
||||
* We know we're NS in S memory so don't need to repeat those checks.
|
||||
*/
|
||||
qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
|
||||
", executing it\n", env->regs[15]);
|
||||
env->regs[14] &= ~1;
|
||||
switch_v7m_security_state(env, true);
|
||||
xpsr_write(env, 0, XPSR_IT);
|
||||
env->regs[15] += 4;
|
||||
return true;
|
||||
|
||||
gen_invep:
|
||||
env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
|
||||
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
|
||||
qemu_log_mask(CPU_LOG_INT,
|
||||
"...really SecureFault with SFSR.INVEP\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
void arm_v7m_cpu_do_interrupt(CPUState *cs)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
@ -6778,12 +7050,10 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
|
||||
* the SG instruction have the same security attributes.)
|
||||
* Everything else must generate an INVEP SecureFault, so we
|
||||
* emulate the SG instruction here.
|
||||
* TODO: actually emulate SG.
|
||||
*/
|
||||
env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
|
||||
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
|
||||
qemu_log_mask(CPU_LOG_INT,
|
||||
"...really SecureFault with SFSR.INVEP\n");
|
||||
if (v7m_handle_execute_nsc(cpu)) {
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case M_FAKE_FSR_SFAULT:
|
||||
/* Various flavours of SecureFault for attempts to execute or
|
||||
@ -6868,8 +7138,18 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
|
||||
case EXCP_IRQ:
|
||||
break;
|
||||
case EXCP_EXCEPTION_EXIT:
|
||||
do_v7m_exception_exit(cpu);
|
||||
return;
|
||||
if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
|
||||
/* Must be v8M security extension function return */
|
||||
assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
|
||||
assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
|
||||
if (do_v7m_function_return(cpu)) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
do_v7m_exception_exit(cpu);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
|
||||
return; /* Never happens. Keep compiler happy. */
|
||||
|
@ -64,6 +64,7 @@ DEF_HELPER_3(v7m_msr, void, env, i32, i32)
|
||||
DEF_HELPER_2(v7m_mrs, i32, env, i32)
|
||||
|
||||
DEF_HELPER_2(v7m_bxns, void, env, i32)
|
||||
DEF_HELPER_2(v7m_blxns, void, env, i32)
|
||||
|
||||
DEF_HELPER_4(access_check_cp_reg, void, env, ptr, i32, i32)
|
||||
DEF_HELPER_3(set_cp_reg, void, env, ptr, i32)
|
||||
|
@ -60,6 +60,7 @@ static inline bool excp_is_internal(int excp)
|
||||
FIELD(V7M_CONTROL, NPRIV, 0, 1)
|
||||
FIELD(V7M_CONTROL, SPSEL, 1, 1)
|
||||
FIELD(V7M_CONTROL, FPCA, 2, 1)
|
||||
FIELD(V7M_CONTROL, SFPA, 3, 1)
|
||||
|
||||
/* Bit definitions for v7M exception return payload */
|
||||
FIELD(V7M_EXCRET, ES, 0, 1)
|
||||
@ -71,6 +72,13 @@ FIELD(V7M_EXCRET, DCRS, 5, 1)
|
||||
FIELD(V7M_EXCRET, S, 6, 1)
|
||||
FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
|
||||
|
||||
/* Minimum value which is a magic number for exception return */
|
||||
#define EXC_RETURN_MIN_MAGIC 0xff000000
|
||||
/* Minimum number which is a magic number for function or exception return
|
||||
* when using v8M security extension
|
||||
*/
|
||||
#define FNC_RETURN_MIN_MAGIC 0xfefffffe
|
||||
|
||||
/* We use a few fake FSR values for internal purposes in M profile.
|
||||
* M profile cores don't have A/R format FSRs, but currently our
|
||||
* get_phys_addr() code assumes A/R profile and reports failures via
|
||||
|
@ -165,6 +165,10 @@ static inline int get_a32_user_mem_index(DisasContext *s)
|
||||
case ARMMMUIdx_MPriv:
|
||||
case ARMMMUIdx_MNegPri:
|
||||
return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
|
||||
case ARMMMUIdx_MSUser:
|
||||
case ARMMMUIdx_MSPriv:
|
||||
case ARMMMUIdx_MSNegPri:
|
||||
return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
|
||||
case ARMMMUIdx_S2NS:
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
@ -960,7 +964,8 @@ static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
|
||||
* s->base.is_jmp that we need to do the rest of the work later.
|
||||
*/
|
||||
gen_bx(s, var);
|
||||
if (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M)) {
|
||||
if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
|
||||
(s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
|
||||
s->base.is_jmp = DISAS_BX_EXCRET;
|
||||
}
|
||||
}
|
||||
@ -969,9 +974,18 @@ static inline void gen_bx_excret_final_code(DisasContext *s)
|
||||
{
|
||||
/* Generate the code to finish possible exception return and end the TB */
|
||||
TCGLabel *excret_label = gen_new_label();
|
||||
uint32_t min_magic;
|
||||
|
||||
if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
|
||||
/* Covers FNC_RETURN and EXC_RETURN magic */
|
||||
min_magic = FNC_RETURN_MIN_MAGIC;
|
||||
} else {
|
||||
/* EXC_RETURN magic only */
|
||||
min_magic = EXC_RETURN_MIN_MAGIC;
|
||||
}
|
||||
|
||||
/* Is the new PC value in the magic range indicating exception return? */
|
||||
tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], 0xff000000, excret_label);
|
||||
tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
|
||||
/* No: end the TB as we would for a DISAS_JMP */
|
||||
if (is_singlestepping(s)) {
|
||||
gen_singlestep_exception(s);
|
||||
@ -1013,6 +1027,20 @@ static inline void gen_bxns(DisasContext *s, int rm)
|
||||
s->base.is_jmp = DISAS_EXIT;
|
||||
}
|
||||
|
||||
static inline void gen_blxns(DisasContext *s, int rm)
|
||||
{
|
||||
TCGv_i32 var = load_reg(s, rm);
|
||||
|
||||
/* We don't need to sync condexec state, for the same reason as bxns.
|
||||
* We do however need to set the PC, because the blxns helper reads it.
|
||||
* The blxns helper may throw an exception.
|
||||
*/
|
||||
gen_set_pc_im(s, s->pc);
|
||||
gen_helper_v7m_blxns(cpu_env, var);
|
||||
tcg_temp_free_i32(var);
|
||||
s->base.is_jmp = DISAS_EXIT;
|
||||
}
|
||||
|
||||
/* Variant of store_reg which uses branch&exchange logic when storing
|
||||
to r15 in ARM architecture v7 and above. The source must be a temporary
|
||||
and will be marked as dead. */
|
||||
@ -9592,6 +9620,44 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
|
||||
}
|
||||
}
|
||||
|
||||
static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
|
||||
{
|
||||
/* Return true if this is a 16 bit instruction. We must be precise
|
||||
* about this (matching the decode). We assume that s->pc still
|
||||
* points to the first 16 bits of the insn.
|
||||
*/
|
||||
if ((insn >> 11) < 0x1d) {
|
||||
/* Definitely a 16-bit instruction */
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
|
||||
* first half of a 32-bit Thumb insn. Thumb-1 cores might
|
||||
* end up actually treating this as two 16-bit insns, though,
|
||||
* if it's half of a bl/blx pair that might span a page boundary.
|
||||
*/
|
||||
if (arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
|
||||
/* Thumb2 cores (including all M profile ones) always treat
|
||||
* 32-bit insns as 32-bit.
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((insn >> 11) == 0x1e && (s->pc < s->next_page_start - 3)) {
|
||||
/* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
|
||||
* is not on the next page; we merge this into a 32-bit
|
||||
* insn.
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
/* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
|
||||
* 0b1111_1xxx_xxxx_xxxx : BL suffix;
|
||||
* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
|
||||
* -- handle as single 16 bit insn
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Return true if this is a Thumb-2 logical op. */
|
||||
static int
|
||||
thumb2_logic_op(int op)
|
||||
@ -9677,9 +9743,9 @@ gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
|
||||
|
||||
/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
|
||||
is not legal. */
|
||||
static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
|
||||
static int disas_thumb2_insn(DisasContext *s, uint32_t insn)
|
||||
{
|
||||
uint32_t insn, imm, shift, offset;
|
||||
uint32_t imm, shift, offset;
|
||||
uint32_t rd, rn, rm, rs;
|
||||
TCGv_i32 tmp;
|
||||
TCGv_i32 tmp2;
|
||||
@ -9691,52 +9757,9 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
|
||||
int conds;
|
||||
int logic_cc;
|
||||
|
||||
if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
|
||||
|| arm_dc_feature(s, ARM_FEATURE_M))) {
|
||||
/* Thumb-1 cores may need to treat bl and blx as a pair of
|
||||
16-bit instructions to get correct prefetch abort behavior. */
|
||||
insn = insn_hw1;
|
||||
if ((insn & (1 << 12)) == 0) {
|
||||
ARCH(5);
|
||||
/* Second half of blx. */
|
||||
offset = ((insn & 0x7ff) << 1);
|
||||
tmp = load_reg(s, 14);
|
||||
tcg_gen_addi_i32(tmp, tmp, offset);
|
||||
tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
|
||||
|
||||
tmp2 = tcg_temp_new_i32();
|
||||
tcg_gen_movi_i32(tmp2, s->pc | 1);
|
||||
store_reg(s, 14, tmp2);
|
||||
gen_bx(s, tmp);
|
||||
return 0;
|
||||
}
|
||||
if (insn & (1 << 11)) {
|
||||
/* Second half of bl. */
|
||||
offset = ((insn & 0x7ff) << 1) | 1;
|
||||
tmp = load_reg(s, 14);
|
||||
tcg_gen_addi_i32(tmp, tmp, offset);
|
||||
|
||||
tmp2 = tcg_temp_new_i32();
|
||||
tcg_gen_movi_i32(tmp2, s->pc | 1);
|
||||
store_reg(s, 14, tmp2);
|
||||
gen_bx(s, tmp);
|
||||
return 0;
|
||||
}
|
||||
if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
|
||||
/* Instruction spans a page boundary. Implement it as two
|
||||
16-bit instructions in case the second half causes an
|
||||
prefetch abort. */
|
||||
offset = ((int32_t)insn << 21) >> 9;
|
||||
tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
|
||||
return 0;
|
||||
}
|
||||
/* Fall through to 32-bit decode. */
|
||||
}
|
||||
|
||||
insn = arm_lduw_code(env, s->pc, s->sctlr_b);
|
||||
s->pc += 2;
|
||||
insn |= (uint32_t)insn_hw1 << 16;
|
||||
|
||||
/* The only 32 bit insn that's allowed for Thumb1 is the combined
|
||||
* BL/BLX prefix and suffix.
|
||||
*/
|
||||
if ((insn & 0xf800e800) != 0xf000e800) {
|
||||
ARCH(6T2);
|
||||
}
|
||||
@ -9755,7 +9778,28 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
|
||||
* - load/store doubleword, load/store exclusive, ldacq/strel,
|
||||
* table branch.
|
||||
*/
|
||||
if (insn & 0x01200000) {
|
||||
if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
|
||||
arm_dc_feature(s, ARM_FEATURE_V8)) {
|
||||
/* 0b1110_1001_0111_1111_1110_1001_0111_111
|
||||
* - SG (v8M only)
|
||||
* The bulk of the behaviour for this instruction is implemented
|
||||
* in v7m_handle_execute_nsc(), which deals with the insn when
|
||||
* it is executed by a CPU in non-secure state from memory
|
||||
* which is Secure & NonSecure-Callable.
|
||||
* Here we only need to handle the remaining cases:
|
||||
* * in NS memory (including the "security extension not
|
||||
* implemented" case) : NOP
|
||||
* * in S memory but CPU already secure (clear IT bits)
|
||||
* We know that the attribute for the memory this insn is
|
||||
* in must match the current CPU state, because otherwise
|
||||
* get_phys_addr_pmsav8 would have generated an exception.
|
||||
*/
|
||||
if (s->v8m_secure) {
|
||||
/* Like the IT insn, we don't need to generate any code */
|
||||
s->condexec_cond = 0;
|
||||
s->condexec_mask = 0;
|
||||
}
|
||||
} else if (insn & 0x01200000) {
|
||||
/* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
|
||||
* - load/store dual (post-indexed)
|
||||
* 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
|
||||
@ -11051,27 +11095,15 @@ illegal_op:
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
|
||||
static void disas_thumb_insn(DisasContext *s, uint32_t insn)
|
||||
{
|
||||
uint32_t val, insn, op, rm, rn, rd, shift, cond;
|
||||
uint32_t val, op, rm, rn, rd, shift, cond;
|
||||
int32_t offset;
|
||||
int i;
|
||||
TCGv_i32 tmp;
|
||||
TCGv_i32 tmp2;
|
||||
TCGv_i32 addr;
|
||||
|
||||
if (s->condexec_mask) {
|
||||
cond = s->condexec_cond;
|
||||
if (cond != 0x0e) { /* Skip conditional when condition is AL. */
|
||||
s->condlabel = gen_new_label();
|
||||
arm_gen_test_cc(cond ^ 1, s->condlabel);
|
||||
s->condjmp = 1;
|
||||
}
|
||||
}
|
||||
|
||||
insn = arm_lduw_code(env, s->pc, s->sctlr_b);
|
||||
s->pc += 2;
|
||||
|
||||
switch (insn >> 12) {
|
||||
case 0: case 1:
|
||||
|
||||
@ -11218,8 +11250,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
|
||||
goto undef;
|
||||
}
|
||||
if (link) {
|
||||
/* BLXNS: not yet implemented */
|
||||
goto undef;
|
||||
gen_blxns(s, rm);
|
||||
} else {
|
||||
gen_bxns(s, rm);
|
||||
}
|
||||
@ -11803,8 +11834,21 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
|
||||
|
||||
case 14:
|
||||
if (insn & (1 << 11)) {
|
||||
if (disas_thumb2_insn(env, s, insn))
|
||||
goto undef32;
|
||||
/* thumb_insn_is_16bit() ensures we can't get here for
|
||||
* a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
|
||||
* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
|
||||
*/
|
||||
assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
|
||||
ARCH(5);
|
||||
offset = ((insn & 0x7ff) << 1);
|
||||
tmp = load_reg(s, 14);
|
||||
tcg_gen_addi_i32(tmp, tmp, offset);
|
||||
tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
|
||||
|
||||
tmp2 = tcg_temp_new_i32();
|
||||
tcg_gen_movi_i32(tmp2, s->pc | 1);
|
||||
store_reg(s, 14, tmp2);
|
||||
gen_bx(s, tmp);
|
||||
break;
|
||||
}
|
||||
/* unconditional branch */
|
||||
@ -11815,15 +11859,30 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
|
||||
break;
|
||||
|
||||
case 15:
|
||||
if (disas_thumb2_insn(env, s, insn))
|
||||
goto undef32;
|
||||
/* thumb_insn_is_16bit() ensures we can't get here for
|
||||
* a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
|
||||
*/
|
||||
assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
|
||||
|
||||
if (insn & (1 << 11)) {
|
||||
/* 0b1111_1xxx_xxxx_xxxx : BL suffix */
|
||||
offset = ((insn & 0x7ff) << 1) | 1;
|
||||
tmp = load_reg(s, 14);
|
||||
tcg_gen_addi_i32(tmp, tmp, offset);
|
||||
|
||||
tmp2 = tcg_temp_new_i32();
|
||||
tcg_gen_movi_i32(tmp2, s->pc | 1);
|
||||
store_reg(s, 14, tmp2);
|
||||
gen_bx(s, tmp);
|
||||
} else {
|
||||
/* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
|
||||
uint32_t uoffset = ((int32_t)insn << 21) >> 9;
|
||||
|
||||
tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
|
||||
}
|
||||
break;
|
||||
}
|
||||
return;
|
||||
undef32:
|
||||
gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
|
||||
default_exception_el(s));
|
||||
return;
|
||||
illegal_op:
|
||||
undef:
|
||||
gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
|
||||
@ -11834,29 +11893,14 @@ static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
|
||||
{
|
||||
/* Return true if the insn at dc->pc might cross a page boundary.
|
||||
* (False positives are OK, false negatives are not.)
|
||||
* We know this is a Thumb insn, and our caller ensures we are
|
||||
* only called if dc->pc is less than 4 bytes from the page
|
||||
* boundary, so we cross the page if the first 16 bits indicate
|
||||
* that this is a 32 bit insn.
|
||||
*/
|
||||
uint16_t insn;
|
||||
uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
|
||||
|
||||
if ((s->pc & 3) == 0) {
|
||||
/* At a 4-aligned address we can't be crossing a page */
|
||||
return false;
|
||||
}
|
||||
|
||||
/* This must be a Thumb insn */
|
||||
insn = arm_lduw_code(env, s->pc, s->sctlr_b);
|
||||
|
||||
if ((insn >> 11) >= 0x1d) {
|
||||
/* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
|
||||
* First half of a 32-bit Thumb insn. Thumb-1 cores might
|
||||
* end up actually treating this as two 16-bit insns (see the
|
||||
* code at the start of disas_thumb2_insn()) but we don't bother
|
||||
* to check for that as it is unlikely, and false positives here
|
||||
* are harmless.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
/* Definitely a 16-bit insn, can't be crossing a page. */
|
||||
return false;
|
||||
return !thumb_insn_is_16bit(s, insn);
|
||||
}
|
||||
|
||||
static int arm_tr_init_disas_context(DisasContextBase *dcbase,
|
||||
@ -12089,16 +12133,88 @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
||||
in init_disas_context by adjusting max_insns. */
|
||||
}
|
||||
|
||||
static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
|
||||
{
|
||||
/* Return true if this Thumb insn is always unconditional,
|
||||
* even inside an IT block. This is true of only a very few
|
||||
* instructions: BKPT, HLT, and SG.
|
||||
*
|
||||
* A larger class of instructions are UNPREDICTABLE if used
|
||||
* inside an IT block; we do not need to detect those here, because
|
||||
* what we do by default (perform the cc check and update the IT
|
||||
* bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
|
||||
* choice for those situations.
|
||||
*
|
||||
* insn is either a 16-bit or a 32-bit instruction; the two are
|
||||
* distinguishable because for the 16-bit case the top 16 bits
|
||||
* are zeroes, and that isn't a valid 32-bit encoding.
|
||||
*/
|
||||
if ((insn & 0xffffff00) == 0xbe00) {
|
||||
/* BKPT */
|
||||
return true;
|
||||
}
|
||||
|
||||
if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
|
||||
!arm_dc_feature(s, ARM_FEATURE_M)) {
|
||||
/* HLT: v8A only. This is unconditional even when it is going to
|
||||
* UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
|
||||
* For v7 cores this was a plain old undefined encoding and so
|
||||
* honours its cc check. (We might be using the encoding as
|
||||
* a semihosting trap, but we don't change the cc check behaviour
|
||||
* on that account, because a debugger connected to a real v7A
|
||||
* core and emulating semihosting traps by catching the UNDEF
|
||||
* exception would also only see cases where the cc check passed.
|
||||
* No guest code should be trying to do a HLT semihosting trap
|
||||
* in an IT block anyway.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
||||
if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
|
||||
arm_dc_feature(s, ARM_FEATURE_M)) {
|
||||
/* SG: v8M only */
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
||||
{
|
||||
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||
CPUARMState *env = cpu->env_ptr;
|
||||
uint32_t insn;
|
||||
bool is_16bit;
|
||||
|
||||
if (arm_pre_translate_insn(dc)) {
|
||||
return;
|
||||
}
|
||||
|
||||
disas_thumb_insn(env, dc);
|
||||
insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
|
||||
is_16bit = thumb_insn_is_16bit(dc, insn);
|
||||
dc->pc += 2;
|
||||
if (!is_16bit) {
|
||||
uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
|
||||
|
||||
insn = insn << 16 | insn2;
|
||||
dc->pc += 2;
|
||||
}
|
||||
|
||||
if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
|
||||
uint32_t cond = dc->condexec_cond;
|
||||
|
||||
if (cond != 0x0e) { /* Skip conditional when condition is AL. */
|
||||
dc->condlabel = gen_new_label();
|
||||
arm_gen_test_cc(cond ^ 1, dc->condlabel);
|
||||
dc->condjmp = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (is_16bit) {
|
||||
disas_thumb_insn(dc, insn);
|
||||
} else {
|
||||
disas_thumb2_insn(dc, insn);
|
||||
}
|
||||
|
||||
/* Advance the Thumb condexec condition. */
|
||||
if (dc->condexec_mask) {
|
||||
|
Loading…
Reference in New Issue
Block a user