From f958537a0d2037134b998dff597f8ae3cc6611fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Thu, 12 Oct 2017 13:20:06 +0100 Subject: [PATCH 01/13] watchdog/aspeed: fix variable type to store reload value MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Initially from Anton D. Kachalov" but the SoB was missing. Signed-off-by: Cédric Le Goater Acked-by: Andrew Jeffery Message-id: 20170920064915.30027-1-clg@kaod.org [clg: change commit log and subject replace UL suffix by ULL ] Signed-off-by: Cédric Le Goater Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- hw/watchdog/wdt_aspeed.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hw/watchdog/wdt_aspeed.c b/hw/watchdog/wdt_aspeed.c index 22bce364d7..95f6ad186d 100644 --- a/hw/watchdog/wdt_aspeed.c +++ b/hw/watchdog/wdt_aspeed.c @@ -100,13 +100,13 @@ static uint64_t aspeed_wdt_read(void *opaque, hwaddr offset, unsigned size) static void aspeed_wdt_reload(AspeedWDTState *s, bool pclk) { - uint32_t reload; + uint64_t reload; if (pclk) { reload = muldiv64(s->regs[WDT_RELOAD_VALUE], NANOSECONDS_PER_SECOND, s->pclk_freq); } else { - reload = s->regs[WDT_RELOAD_VALUE] * 1000; + reload = s->regs[WDT_RELOAD_VALUE] * 1000ULL; } if (aspeed_wdt_is_enabled(s)) { From 8602beb7fad4b2abd5341f4303ec3c799fa0217c Mon Sep 17 00:00:00 2001 From: Igor Mammedov Date: Thu, 12 Oct 2017 13:20:07 +0100 Subject: [PATCH 02/13] arm: fix armv7m_init() declaration to match definition s/cpu_model/cpu_type/ that has been forgotten during conversion (ba1ba5cc), while touching the line also fixup alignment. Signed-off-by: Igor Mammedov Message-id: 1507710805-221721-1-git-send-email-imammedo@redhat.com Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- include/hw/arm/arm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/hw/arm/arm.h b/include/hw/arm/arm.h index a3f79d3379..ce769bde6a 100644 --- a/include/hw/arm/arm.h +++ b/include/hw/arm/arm.h @@ -25,7 +25,7 @@ typedef enum { /* armv7m.c */ DeviceState *armv7m_init(MemoryRegion *system_memory, int mem_size, int num_irq, - const char *kernel_filename, const char *cpu_model); + const char *kernel_filename, const char *cpu_type); /** * armv7m_load_kernel: * @cpu: CPU From b9f587d62cebed427206539750ebf59bde4df422 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 9 Oct 2017 14:48:31 +0100 Subject: [PATCH 03/13] target/arm: Add M profile secure MMU index values to get_a32_user_mem_index() Add the M profile secure MMU index values to the switch in get_a32_user_mem_index() so that LDRT/STRT work correctly rather than asserting at translate time. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 1507556919-24992-2-git-send-email-peter.maydell@linaro.org --- target/arm/translate.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/target/arm/translate.c b/target/arm/translate.c index fdc46cc525..61fd0ef6b5 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -165,6 +165,10 @@ static inline int get_a32_user_mem_index(DisasContext *s) case ARMMMUIdx_MPriv: case ARMMMUIdx_MNegPri: return arm_to_core_mmu_idx(ARMMMUIdx_MUser); + case ARMMMUIdx_MSUser: + case ARMMMUIdx_MSPriv: + case ARMMMUIdx_MSNegPri: + return arm_to_core_mmu_idx(ARMMMUIdx_MSUser); case ARMMMUIdx_S2NS: default: g_assert_not_reached(); From 333e10c51ef5876ced26f77b61b69ce0f83161a9 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 9 Oct 2017 14:48:32 +0100 Subject: [PATCH 04/13] target/arm: Implement SG instruction Implement the SG instruction, which we emulate 'by hand' in the exception handling code path. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 1507556919-24992-3-git-send-email-peter.maydell@linaro.org --- target/arm/helper.c | 132 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 127 insertions(+), 5 deletions(-) diff --git a/target/arm/helper.c b/target/arm/helper.c index 1d689f00b3..9cc881eab7 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -41,6 +41,10 @@ typedef struct V8M_SAttributes { bool irvalid; } V8M_SAttributes; +static void v8m_security_lookup(CPUARMState *env, uint32_t address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + V8M_SAttributes *sattrs); + /* Definitions for the PMCCNTR and PMCR registers */ #define PMCRD 0x8 #define PMCRC 0x4 @@ -6736,6 +6740,126 @@ static void arm_log_exception(int idx) } } +static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, + uint32_t addr, uint16_t *insn) +{ + /* Load a 16-bit portion of a v7M instruction, returning true on success, + * or false on failure (in which case we will have pended the appropriate + * exception). + * We need to do the instruction fetch's MPU and SAU checks + * like this because there is no MMU index that would allow + * doing the load with a single function call. Instead we must + * first check that the security attributes permit the load + * and that they don't mismatch on the two halves of the instruction, + * and then we do the load as a secure load (ie using the security + * attributes of the address, not the CPU, as architecturally required). + */ + CPUState *cs = CPU(cpu); + CPUARMState *env = &cpu->env; + V8M_SAttributes sattrs = {}; + MemTxAttrs attrs = {}; + ARMMMUFaultInfo fi = {}; + MemTxResult txres; + target_ulong page_size; + hwaddr physaddr; + int prot; + uint32_t fsr; + + v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs); + if (!sattrs.nsc || sattrs.ns) { + /* This must be the second half of the insn, and it straddles a + * region boundary with the second half not being S&NSC. + */ + env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); + qemu_log_mask(CPU_LOG_INT, + "...really SecureFault with SFSR.INVEP\n"); + return false; + } + if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, + &physaddr, &attrs, &prot, &page_size, &fsr, &fi)) { + /* the MPU lookup failed */ + env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK; + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure); + qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n"); + return false; + } + *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr, + attrs, &txres); + if (txres != MEMTX_OK) { + env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK; + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false); + qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n"); + return false; + } + return true; +} + +static bool v7m_handle_execute_nsc(ARMCPU *cpu) +{ + /* Check whether this attempt to execute code in a Secure & NS-Callable + * memory region is for an SG instruction; if so, then emulate the + * effect of the SG instruction and return true. Otherwise pend + * the correct kind of exception and return false. + */ + CPUARMState *env = &cpu->env; + ARMMMUIdx mmu_idx; + uint16_t insn; + + /* We should never get here unless get_phys_addr_pmsav8() caused + * an exception for NS executing in S&NSC memory. + */ + assert(!env->v7m.secure); + assert(arm_feature(env, ARM_FEATURE_M_SECURITY)); + + /* We want to do the MPU lookup as secure; work out what mmu_idx that is */ + mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); + + if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) { + return false; + } + + if (!env->thumb) { + goto gen_invep; + } + + if (insn != 0xe97f) { + /* Not an SG instruction first half (we choose the IMPDEF + * early-SG-check option). + */ + goto gen_invep; + } + + if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) { + return false; + } + + if (insn != 0xe97f) { + /* Not an SG instruction second half (yes, both halves of the SG + * insn have the same hex value) + */ + goto gen_invep; + } + + /* OK, we have confirmed that we really have an SG instruction. + * We know we're NS in S memory so don't need to repeat those checks. + */ + qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32 + ", executing it\n", env->regs[15]); + env->regs[14] &= ~1; + switch_v7m_security_state(env, true); + xpsr_write(env, 0, XPSR_IT); + env->regs[15] += 4; + return true; + +gen_invep: + env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); + qemu_log_mask(CPU_LOG_INT, + "...really SecureFault with SFSR.INVEP\n"); + return false; +} + void arm_v7m_cpu_do_interrupt(CPUState *cs) { ARMCPU *cpu = ARM_CPU(cs); @@ -6778,12 +6902,10 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs) * the SG instruction have the same security attributes.) * Everything else must generate an INVEP SecureFault, so we * emulate the SG instruction here. - * TODO: actually emulate SG. */ - env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; - armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); - qemu_log_mask(CPU_LOG_INT, - "...really SecureFault with SFSR.INVEP\n"); + if (v7m_handle_execute_nsc(cpu)) { + return; + } break; case M_FAKE_FSR_SFAULT: /* Various flavours of SecureFault for attempts to execute or From 3e3fa230e3b8ffe119f14ba57a6bc677a411be57 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 9 Oct 2017 14:48:33 +0100 Subject: [PATCH 05/13] target/arm: Implement BLXNS Implement the BLXNS instruction, which allows secure code to call non-secure code. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 1507556919-24992-4-git-send-email-peter.maydell@linaro.org --- target/arm/helper.c | 59 ++++++++++++++++++++++++++++++++++++++++++ target/arm/helper.h | 1 + target/arm/internals.h | 1 + target/arm/translate.c | 17 ++++++++++-- 4 files changed, 76 insertions(+), 2 deletions(-) diff --git a/target/arm/helper.c b/target/arm/helper.c index 9cc881eab7..47c57670c8 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -5897,6 +5897,12 @@ void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest) g_assert_not_reached(); } +void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest) +{ + /* translate.c should never generate calls here in user-only mode */ + g_assert_not_reached(); +} + void switch_mode(CPUARMState *env, int mode) { ARMCPU *cpu = arm_env_get_cpu(env); @@ -6189,6 +6195,59 @@ void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest) env->regs[15] = dest & ~1; } +void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest) +{ + /* Handle v7M BLXNS: + * - bit 0 of the destination address is the target security state + */ + + /* At this point regs[15] is the address just after the BLXNS */ + uint32_t nextinst = env->regs[15] | 1; + uint32_t sp = env->regs[13] - 8; + uint32_t saved_psr; + + /* translate.c will have made BLXNS UNDEF unless we're secure */ + assert(env->v7m.secure); + + if (dest & 1) { + /* target is Secure, so this is just a normal BLX, + * except that the low bit doesn't indicate Thumb/not. + */ + env->regs[14] = nextinst; + env->thumb = 1; + env->regs[15] = dest & ~1; + return; + } + + /* Target is non-secure: first push a stack frame */ + if (!QEMU_IS_ALIGNED(sp, 8)) { + qemu_log_mask(LOG_GUEST_ERROR, + "BLXNS with misaligned SP is UNPREDICTABLE\n"); + } + + saved_psr = env->v7m.exception; + if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) { + saved_psr |= XPSR_SFPA; + } + + /* Note that these stores can throw exceptions on MPU faults */ + cpu_stl_data(env, sp, nextinst); + cpu_stl_data(env, sp + 4, saved_psr); + + env->regs[13] = sp; + env->regs[14] = 0xfeffffff; + if (arm_v7m_is_handler_mode(env)) { + /* Write a dummy value to IPSR, to avoid leaking the current secure + * exception number to non-secure code. This is guaranteed not + * to cause write_v7m_exception() to actually change stacks. + */ + write_v7m_exception(env, 1); + } + switch_v7m_security_state(env, 0); + env->thumb = 1; + env->regs[15] = dest; +} + static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode, bool spsel) { diff --git a/target/arm/helper.h b/target/arm/helper.h index 64afbac59f..2cf6f74152 100644 --- a/target/arm/helper.h +++ b/target/arm/helper.h @@ -64,6 +64,7 @@ DEF_HELPER_3(v7m_msr, void, env, i32, i32) DEF_HELPER_2(v7m_mrs, i32, env, i32) DEF_HELPER_2(v7m_bxns, void, env, i32) +DEF_HELPER_2(v7m_blxns, void, env, i32) DEF_HELPER_4(access_check_cp_reg, void, env, ptr, i32, i32) DEF_HELPER_3(set_cp_reg, void, env, ptr, i32) diff --git a/target/arm/internals.h b/target/arm/internals.h index fd9a7e8181..1746737c27 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -60,6 +60,7 @@ static inline bool excp_is_internal(int excp) FIELD(V7M_CONTROL, NPRIV, 0, 1) FIELD(V7M_CONTROL, SPSEL, 1, 1) FIELD(V7M_CONTROL, FPCA, 2, 1) +FIELD(V7M_CONTROL, SFPA, 3, 1) /* Bit definitions for v7M exception return payload */ FIELD(V7M_EXCRET, ES, 0, 1) diff --git a/target/arm/translate.c b/target/arm/translate.c index 61fd0ef6b5..caf0d5849a 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -1017,6 +1017,20 @@ static inline void gen_bxns(DisasContext *s, int rm) s->base.is_jmp = DISAS_EXIT; } +static inline void gen_blxns(DisasContext *s, int rm) +{ + TCGv_i32 var = load_reg(s, rm); + + /* We don't need to sync condexec state, for the same reason as bxns. + * We do however need to set the PC, because the blxns helper reads it. + * The blxns helper may throw an exception. + */ + gen_set_pc_im(s, s->pc); + gen_helper_v7m_blxns(cpu_env, var); + tcg_temp_free_i32(var); + s->base.is_jmp = DISAS_EXIT; +} + /* Variant of store_reg which uses branch&exchange logic when storing to r15 in ARM architecture v7 and above. The source must be a temporary and will be marked as dead. */ @@ -11222,8 +11236,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) goto undef; } if (link) { - /* BLXNS: not yet implemented */ - goto undef; + gen_blxns(s, rm); } else { gen_bxns(s, rm); } From d02a8698d7ae2bfed3b11fe5b064cb0aa406863b Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 9 Oct 2017 14:48:34 +0100 Subject: [PATCH 06/13] target/arm: Implement secure function return MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Secure function return happens when a non-secure function has been called using BLXNS and so has a particular magic LR value (either 0xfefffffe or 0xfeffffff). The function return via BX behaves specially when the new PC value is this magic value, in the same way that exception returns are handled. Adjust our BX excret guards so that they recognize the function return magic number as well, and perform the function-return unstacking in do_v7m_exception_exit(). Signed-off-by: Peter Maydell Acked-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-id: 1507556919-24992-5-git-send-email-peter.maydell@linaro.org --- target/arm/helper.c | 115 ++++++++++++++++++++++++++++++++++++++--- target/arm/internals.h | 7 +++ target/arm/translate.c | 14 ++++- 3 files changed, 126 insertions(+), 10 deletions(-) diff --git a/target/arm/helper.c b/target/arm/helper.c index 47c57670c8..96113fe989 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -6174,7 +6174,17 @@ void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest) * - if the return value is a magic value, do exception return (like BX) * - otherwise bit 0 of the return value is the target security state */ - if (dest >= 0xff000000) { + uint32_t min_magic; + + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { + /* Covers FNC_RETURN and EXC_RETURN magic */ + min_magic = FNC_RETURN_MIN_MAGIC; + } else { + /* EXC_RETURN magic only */ + min_magic = EXC_RETURN_MIN_MAGIC; + } + + if (dest >= min_magic) { /* This is an exception return magic value; put it where * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT. * Note that if we ever add gen_ss_advance() singlestep support to @@ -6470,12 +6480,19 @@ static void do_v7m_exception_exit(ARMCPU *cpu) bool exc_secure = false; bool return_to_secure; - /* We can only get here from an EXCP_EXCEPTION_EXIT, and - * gen_bx_excret() enforces the architectural rule - * that jumps to magic addresses don't have magic behaviour unless - * we're in Handler mode (compare pseudocode BXWritePC()). + /* If we're not in Handler mode then jumps to magic exception-exit + * addresses don't have magic behaviour. However for the v8M + * security extensions the magic secure-function-return has to + * work in thread mode too, so to avoid doing an extra check in + * the generated code we allow exception-exit magic to also cause the + * internal exception and bring us here in thread mode. Correct code + * will never try to do this (the following insn fetch will always + * fault) so we the overhead of having taken an unnecessary exception + * doesn't matter. */ - assert(arm_v7m_is_handler_mode(env)); + if (!arm_v7m_is_handler_mode(env)) { + return; + } /* In the spec pseudocode ExceptionReturn() is called directly * from BXWritePC() and gets the full target PC value including @@ -6765,6 +6782,78 @@ static void do_v7m_exception_exit(ARMCPU *cpu) qemu_log_mask(CPU_LOG_INT, "...successful exception return\n"); } +static bool do_v7m_function_return(ARMCPU *cpu) +{ + /* v8M security extensions magic function return. + * We may either: + * (1) throw an exception (longjump) + * (2) return true if we successfully handled the function return + * (3) return false if we failed a consistency check and have + * pended a UsageFault that needs to be taken now + * + * At this point the magic return value is split between env->regs[15] + * and env->thumb. We don't bother to reconstitute it because we don't + * need it (all values are handled the same way). + */ + CPUARMState *env = &cpu->env; + uint32_t newpc, newpsr, newpsr_exc; + + qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n"); + + { + bool threadmode, spsel; + TCGMemOpIdx oi; + ARMMMUIdx mmu_idx; + uint32_t *frame_sp_p; + uint32_t frameptr; + + /* Pull the return address and IPSR from the Secure stack */ + threadmode = !arm_v7m_is_handler_mode(env); + spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK; + + frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel); + frameptr = *frame_sp_p; + + /* These loads may throw an exception (for MPU faults). We want to + * do them as secure, so work out what MMU index that is. + */ + mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); + oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx)); + newpc = helper_le_ldul_mmu(env, frameptr, oi, 0); + newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0); + + /* Consistency checks on new IPSR */ + newpsr_exc = newpsr & XPSR_EXCP; + if (!((env->v7m.exception == 0 && newpsr_exc == 0) || + (env->v7m.exception == 1 && newpsr_exc != 0))) { + /* Pend the fault and tell our caller to take it */ + env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, + env->v7m.secure); + qemu_log_mask(CPU_LOG_INT, + "...taking INVPC UsageFault: " + "IPSR consistency check failed\n"); + return false; + } + + *frame_sp_p = frameptr + 8; + } + + /* This invalidates frame_sp_p */ + switch_v7m_security_state(env, true); + env->v7m.exception = newpsr_exc; + env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK; + if (newpsr & XPSR_SFPA) { + env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK; + } + xpsr_write(env, 0, XPSR_IT); + env->thumb = newpc & 1; + env->regs[15] = newpc & ~1; + + qemu_log_mask(CPU_LOG_INT, "...function return successful\n"); + return true; +} + static void arm_log_exception(int idx) { if (qemu_loglevel_mask(CPU_LOG_INT)) { @@ -7049,8 +7138,18 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs) case EXCP_IRQ: break; case EXCP_EXCEPTION_EXIT: - do_v7m_exception_exit(cpu); - return; + if (env->regs[15] < EXC_RETURN_MIN_MAGIC) { + /* Must be v8M security extension function return */ + assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC); + assert(arm_feature(env, ARM_FEATURE_M_SECURITY)); + if (do_v7m_function_return(cpu)) { + return; + } + } else { + do_v7m_exception_exit(cpu); + return; + } + break; default: cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); return; /* Never happens. Keep compiler happy. */ diff --git a/target/arm/internals.h b/target/arm/internals.h index 1746737c27..43106a2d6c 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -72,6 +72,13 @@ FIELD(V7M_EXCRET, DCRS, 5, 1) FIELD(V7M_EXCRET, S, 6, 1) FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */ +/* Minimum value which is a magic number for exception return */ +#define EXC_RETURN_MIN_MAGIC 0xff000000 +/* Minimum number which is a magic number for function or exception return + * when using v8M security extension + */ +#define FNC_RETURN_MIN_MAGIC 0xfefffffe + /* We use a few fake FSR values for internal purposes in M profile. * M profile cores don't have A/R format FSRs, but currently our * get_phys_addr() code assumes A/R profile and reports failures via diff --git a/target/arm/translate.c b/target/arm/translate.c index caf0d5849a..5c6f9fea1b 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -964,7 +964,8 @@ static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var) * s->base.is_jmp that we need to do the rest of the work later. */ gen_bx(s, var); - if (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M)) { + if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) || + (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) { s->base.is_jmp = DISAS_BX_EXCRET; } } @@ -973,9 +974,18 @@ static inline void gen_bx_excret_final_code(DisasContext *s) { /* Generate the code to finish possible exception return and end the TB */ TCGLabel *excret_label = gen_new_label(); + uint32_t min_magic; + + if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) { + /* Covers FNC_RETURN and EXC_RETURN magic */ + min_magic = FNC_RETURN_MIN_MAGIC; + } else { + /* EXC_RETURN magic only */ + min_magic = EXC_RETURN_MIN_MAGIC; + } /* Is the new PC value in the magic range indicating exception return? */ - tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], 0xff000000, excret_label); + tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label); /* No: end the TB as we would for a DISAS_JMP */ if (is_singlestepping(s)) { gen_singlestep_exception(s); From 6b8acf256df09c8a8dd7dcaa79b06eaff4ad63f7 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 9 Oct 2017 14:48:35 +0100 Subject: [PATCH 07/13] target-arm: Don't check for "Thumb2 or M profile" for not-Thumb1 The code which implements the Thumb1 split BL/BLX instructions is guarded by a check on "not M or THUMB2". All we really need to check here is "not THUMB2" (and we assume that elsewhere too, eg in the ARCH(6T2) test that UNDEFs the Thumb2 insns). This doesn't change behaviour because all M profile cores have Thumb2 and so ARM_FEATURE_M implies ARM_FEATURE_THUMB2. (v6M implements a very restricted subset of Thumb2, but we can cross that bridge when we get to it with appropriate feature bits.) Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 1507556919-24992-6-git-send-email-peter.maydell@linaro.org --- target/arm/translate.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/target/arm/translate.c b/target/arm/translate.c index 5c6f9fea1b..530a5c4ac0 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -9719,8 +9719,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw int conds; int logic_cc; - if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2) - || arm_dc_feature(s, ARM_FEATURE_M))) { + if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) { /* Thumb-1 cores may need to treat bl and blx as a pair of 16-bit instructions to get correct prefetch abort behavior. */ insn = insn_hw1; From 296e5a0a6c393553079a641c50521ae33ff89324 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 9 Oct 2017 14:48:36 +0100 Subject: [PATCH 08/13] target/arm: Pull Thumb insn word loads up to top level Refactor the Thumb decode to do the loads of the instruction words at the top level rather than only loading the second half of a 32-bit Thumb insn in the middle of the decode. This is simple apart from the awkward case of Thumb1, where the BL/BLX prefix and suffix instructions live in what in Thumb2 is the 32-bit insn space. To handle these we decode enough to identify whether we're looking at a prefix/suffix that we handle as a 16 bit insn, or a prefix that we're going to merge with the following suffix to consider as a 32 bit insn. The translation of the 16 bit cases then moves from disas_thumb2_insn() to disas_thumb_insn(). The refactoring has the benefit that we don't need to pass the CPUARMState* down into the decoder code any more, but the major reason for doing this is that some Thumb instructions must be always unconditional regardless of the IT state bits, so we need to know the whole insn before we emit the "skip this insn if the IT bits and cond state tell us to" code. (The always unconditional insns are BKPT, HLT and SG; the last of these is 32 bits.) Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 1507556919-24992-7-git-send-email-peter.maydell@linaro.org --- target/arm/translate.c | 178 +++++++++++++++++++++++++---------------- 1 file changed, 108 insertions(+), 70 deletions(-) diff --git a/target/arm/translate.c b/target/arm/translate.c index 530a5c4ac0..19c136c651 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -9620,6 +9620,44 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) } } +static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn) +{ + /* Return true if this is a 16 bit instruction. We must be precise + * about this (matching the decode). We assume that s->pc still + * points to the first 16 bits of the insn. + */ + if ((insn >> 11) < 0x1d) { + /* Definitely a 16-bit instruction */ + return true; + } + + /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the + * first half of a 32-bit Thumb insn. Thumb-1 cores might + * end up actually treating this as two 16-bit insns, though, + * if it's half of a bl/blx pair that might span a page boundary. + */ + if (arm_dc_feature(s, ARM_FEATURE_THUMB2)) { + /* Thumb2 cores (including all M profile ones) always treat + * 32-bit insns as 32-bit. + */ + return false; + } + + if ((insn >> 11) == 0x1e && (s->pc < s->next_page_start - 3)) { + /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix + * is not on the next page; we merge this into a 32-bit + * insn. + */ + return false; + } + /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF); + * 0b1111_1xxx_xxxx_xxxx : BL suffix; + * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page + * -- handle as single 16 bit insn + */ + return true; +} + /* Return true if this is a Thumb-2 logical op. */ static int thumb2_logic_op(int op) @@ -9705,9 +9743,9 @@ gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction is not legal. */ -static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1) +static int disas_thumb2_insn(DisasContext *s, uint32_t insn) { - uint32_t insn, imm, shift, offset; + uint32_t imm, shift, offset; uint32_t rd, rn, rm, rs; TCGv_i32 tmp; TCGv_i32 tmp2; @@ -9719,51 +9757,9 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw int conds; int logic_cc; - if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) { - /* Thumb-1 cores may need to treat bl and blx as a pair of - 16-bit instructions to get correct prefetch abort behavior. */ - insn = insn_hw1; - if ((insn & (1 << 12)) == 0) { - ARCH(5); - /* Second half of blx. */ - offset = ((insn & 0x7ff) << 1); - tmp = load_reg(s, 14); - tcg_gen_addi_i32(tmp, tmp, offset); - tcg_gen_andi_i32(tmp, tmp, 0xfffffffc); - - tmp2 = tcg_temp_new_i32(); - tcg_gen_movi_i32(tmp2, s->pc | 1); - store_reg(s, 14, tmp2); - gen_bx(s, tmp); - return 0; - } - if (insn & (1 << 11)) { - /* Second half of bl. */ - offset = ((insn & 0x7ff) << 1) | 1; - tmp = load_reg(s, 14); - tcg_gen_addi_i32(tmp, tmp, offset); - - tmp2 = tcg_temp_new_i32(); - tcg_gen_movi_i32(tmp2, s->pc | 1); - store_reg(s, 14, tmp2); - gen_bx(s, tmp); - return 0; - } - if ((s->pc & ~TARGET_PAGE_MASK) == 0) { - /* Instruction spans a page boundary. Implement it as two - 16-bit instructions in case the second half causes an - prefetch abort. */ - offset = ((int32_t)insn << 21) >> 9; - tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset); - return 0; - } - /* Fall through to 32-bit decode. */ - } - - insn = arm_lduw_code(env, s->pc, s->sctlr_b); - s->pc += 2; - insn |= (uint32_t)insn_hw1 << 16; - + /* The only 32 bit insn that's allowed for Thumb1 is the combined + * BL/BLX prefix and suffix. + */ if ((insn & 0xf800e800) != 0xf000e800) { ARCH(6T2); } @@ -11078,27 +11074,15 @@ illegal_op: return 1; } -static void disas_thumb_insn(CPUARMState *env, DisasContext *s) +static void disas_thumb_insn(DisasContext *s, uint32_t insn) { - uint32_t val, insn, op, rm, rn, rd, shift, cond; + uint32_t val, op, rm, rn, rd, shift, cond; int32_t offset; int i; TCGv_i32 tmp; TCGv_i32 tmp2; TCGv_i32 addr; - if (s->condexec_mask) { - cond = s->condexec_cond; - if (cond != 0x0e) { /* Skip conditional when condition is AL. */ - s->condlabel = gen_new_label(); - arm_gen_test_cc(cond ^ 1, s->condlabel); - s->condjmp = 1; - } - } - - insn = arm_lduw_code(env, s->pc, s->sctlr_b); - s->pc += 2; - switch (insn >> 12) { case 0: case 1: @@ -11829,8 +11813,21 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) case 14: if (insn & (1 << 11)) { - if (disas_thumb2_insn(env, s, insn)) - goto undef32; + /* thumb_insn_is_16bit() ensures we can't get here for + * a Thumb2 CPU, so this must be a thumb1 split BL/BLX: + * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF) + */ + assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2)); + ARCH(5); + offset = ((insn & 0x7ff) << 1); + tmp = load_reg(s, 14); + tcg_gen_addi_i32(tmp, tmp, offset); + tcg_gen_andi_i32(tmp, tmp, 0xfffffffc); + + tmp2 = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp2, s->pc | 1); + store_reg(s, 14, tmp2); + gen_bx(s, tmp); break; } /* unconditional branch */ @@ -11841,15 +11838,30 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) break; case 15: - if (disas_thumb2_insn(env, s, insn)) - goto undef32; + /* thumb_insn_is_16bit() ensures we can't get here for + * a Thumb2 CPU, so this must be a thumb1 split BL/BLX. + */ + assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2)); + + if (insn & (1 << 11)) { + /* 0b1111_1xxx_xxxx_xxxx : BL suffix */ + offset = ((insn & 0x7ff) << 1) | 1; + tmp = load_reg(s, 14); + tcg_gen_addi_i32(tmp, tmp, offset); + + tmp2 = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp2, s->pc | 1); + store_reg(s, 14, tmp2); + gen_bx(s, tmp); + } else { + /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */ + uint32_t uoffset = ((int32_t)insn << 21) >> 9; + + tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset); + } break; } return; -undef32: - gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), - default_exception_el(s)); - return; illegal_op: undef: gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(), @@ -12119,12 +12131,38 @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); CPUARMState *env = cpu->env_ptr; + uint32_t insn; + bool is_16bit; if (arm_pre_translate_insn(dc)) { return; } - disas_thumb_insn(env, dc); + insn = arm_lduw_code(env, dc->pc, dc->sctlr_b); + is_16bit = thumb_insn_is_16bit(dc, insn); + dc->pc += 2; + if (!is_16bit) { + uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b); + + insn = insn << 16 | insn2; + dc->pc += 2; + } + + if (dc->condexec_mask) { + uint32_t cond = dc->condexec_cond; + + if (cond != 0x0e) { /* Skip conditional when condition is AL. */ + dc->condlabel = gen_new_label(); + arm_gen_test_cc(cond ^ 1, dc->condlabel); + dc->condjmp = 1; + } + } + + if (is_16bit) { + disas_thumb_insn(dc, insn); + } else { + disas_thumb2_insn(dc, insn); + } /* Advance the Thumb condexec condition. */ if (dc->condexec_mask) { From 5b8d7289e9e92a0d7bcecb93cd189e245fef10cd Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 9 Oct 2017 14:48:37 +0100 Subject: [PATCH 09/13] target-arm: Simplify insn_crosses_page() Recent changes have left insn_crosses_page() more complicated than it needed to be: * it's only called from thumb_tr_translate_insn() so we know for certain that we're looking at a Thumb insn * the caller's check for dc->pc >= dc->next_page_start - 3 means that dc->pc can't possibly be 4 aligned, so there's no need to check that (the check was partly there to ensure that we didn't treat an ARM insn as Thumb, I think) * we now have thumb_insn_is_16bit() which lets us do a precise check of the length of the next insn, rather than opencoding an inaccurate check Simplify it down to just loading the first half of the insn and calling thumb_insn_is_16bit() on it. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 1507556919-24992-8-git-send-email-peter.maydell@linaro.org --- target/arm/translate.c | 27 ++++++--------------------- 1 file changed, 6 insertions(+), 21 deletions(-) diff --git a/target/arm/translate.c b/target/arm/translate.c index 19c136c651..a5abdfa420 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -11872,29 +11872,14 @@ static bool insn_crosses_page(CPUARMState *env, DisasContext *s) { /* Return true if the insn at dc->pc might cross a page boundary. * (False positives are OK, false negatives are not.) + * We know this is a Thumb insn, and our caller ensures we are + * only called if dc->pc is less than 4 bytes from the page + * boundary, so we cross the page if the first 16 bits indicate + * that this is a 32 bit insn. */ - uint16_t insn; + uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b); - if ((s->pc & 3) == 0) { - /* At a 4-aligned address we can't be crossing a page */ - return false; - } - - /* This must be a Thumb insn */ - insn = arm_lduw_code(env, s->pc, s->sctlr_b); - - if ((insn >> 11) >= 0x1d) { - /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the - * First half of a 32-bit Thumb insn. Thumb-1 cores might - * end up actually treating this as two 16-bit insns (see the - * code at the start of disas_thumb2_insn()) but we don't bother - * to check for that as it is unlikely, and false positives here - * are harmless. - */ - return true; - } - /* Definitely a 16-bit insn, can't be crossing a page. */ - return false; + return !thumb_insn_is_16bit(s, insn); } static int arm_tr_init_disas_context(DisasContextBase *dcbase, From dcf14dfb704519846f396a376339ebdb93eaf049 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 9 Oct 2017 14:48:38 +0100 Subject: [PATCH 10/13] target/arm: Support some Thumb insns being always unconditional A few Thumb instructions are always unconditional even inside an IT block (as opposed to being UNPREDICTABLE if used inside an IT block): BKPT, the v8M SG instruction, and the A profile HLT (debug halt) instruction. This means we need to suppress the jump-over-instruction-on-condfail code generation (though the IT state still advances as usual and subsequent insns in the IT block may be conditional). Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 1507556919-24992-9-git-send-email-peter.maydell@linaro.org --- target/arm/translate.c | 48 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 47 insertions(+), 1 deletion(-) diff --git a/target/arm/translate.c b/target/arm/translate.c index a5abdfa420..6634badb5a 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -12112,6 +12112,52 @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) in init_disas_context by adjusting max_insns. */ } +static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn) +{ + /* Return true if this Thumb insn is always unconditional, + * even inside an IT block. This is true of only a very few + * instructions: BKPT, HLT, and SG. + * + * A larger class of instructions are UNPREDICTABLE if used + * inside an IT block; we do not need to detect those here, because + * what we do by default (perform the cc check and update the IT + * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE + * choice for those situations. + * + * insn is either a 16-bit or a 32-bit instruction; the two are + * distinguishable because for the 16-bit case the top 16 bits + * are zeroes, and that isn't a valid 32-bit encoding. + */ + if ((insn & 0xffffff00) == 0xbe00) { + /* BKPT */ + return true; + } + + if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) && + !arm_dc_feature(s, ARM_FEATURE_M)) { + /* HLT: v8A only. This is unconditional even when it is going to + * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3. + * For v7 cores this was a plain old undefined encoding and so + * honours its cc check. (We might be using the encoding as + * a semihosting trap, but we don't change the cc check behaviour + * on that account, because a debugger connected to a real v7A + * core and emulating semihosting traps by catching the UNDEF + * exception would also only see cases where the cc check passed. + * No guest code should be trying to do a HLT semihosting trap + * in an IT block anyway. + */ + return true; + } + + if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) && + arm_dc_feature(s, ARM_FEATURE_M)) { + /* SG: v8M only */ + return true; + } + + return false; +} + static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); @@ -12133,7 +12179,7 @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) dc->pc += 2; } - if (dc->condexec_mask) { + if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) { uint32_t cond = dc->condexec_cond; if (cond != 0x0e) { /* Skip conditional when condition is AL. */ From 76eff04d166b8fe747adbe82de8b7e060e668ff9 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 9 Oct 2017 14:48:39 +0100 Subject: [PATCH 11/13] target/arm: Implement SG instruction corner cases The common situation of the SG instruction is that it is executed from S&NSC memory by a CPU in NS state. That case is handled by v7m_handle_execute_nsc(). However the instruction also has defined behaviour in a couple of other cases: * SG instruction in NS memory (behaves as a NOP) * SG in S memory but CPU already secure (clears IT bits and does nothing else) * SG instruction in v8M without Security Extension (NOP) These can be implemented in translate.c. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 1507556919-24992-10-git-send-email-peter.maydell@linaro.org --- target/arm/translate.c | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/target/arm/translate.c b/target/arm/translate.c index 6634badb5a..4da1a4cbc6 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -9778,7 +9778,28 @@ static int disas_thumb2_insn(DisasContext *s, uint32_t insn) * - load/store doubleword, load/store exclusive, ldacq/strel, * table branch. */ - if (insn & 0x01200000) { + if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) && + arm_dc_feature(s, ARM_FEATURE_V8)) { + /* 0b1110_1001_0111_1111_1110_1001_0111_111 + * - SG (v8M only) + * The bulk of the behaviour for this instruction is implemented + * in v7m_handle_execute_nsc(), which deals with the insn when + * it is executed by a CPU in non-secure state from memory + * which is Secure & NonSecure-Callable. + * Here we only need to handle the remaining cases: + * * in NS memory (including the "security extension not + * implemented" case) : NOP + * * in S memory but CPU already secure (clear IT bits) + * We know that the attribute for the memory this insn is + * in must match the current CPU state, because otherwise + * get_phys_addr_pmsav8 would have generated an exception. + */ + if (s->v8m_secure) { + /* Like the IT insn, we don't need to generate any code */ + s->condexec_cond = 0; + s->condexec_mask = 0; + } + } else if (insn & 0x01200000) { /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx * - load/store dual (post-indexed) * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx From a94bb9cd586c50d13b68e5fa4628cc36e29805c4 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Wed, 11 Oct 2017 18:24:36 +0100 Subject: [PATCH 12/13] nvic: Add missing 'break' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Coverity points out that we forgot the 'break' for the SAU_CTRL write case (CID1381683). This has no actual visible consequences because it happens that the following case is effectively a no-op. Signed-off-by: Peter Maydell Reviewed-by: Philippe Mathieu-Daudé Message-id: 1507742676-9908-1-git-send-email-peter.maydell@linaro.org Reviewed-by: Richard Henderson --- hw/intc/armv7m_nvic.c | 1 + 1 file changed, 1 insertion(+) diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c index 22d5e6e6af..a42961c643 100644 --- a/hw/intc/armv7m_nvic.c +++ b/hw/intc/armv7m_nvic.c @@ -1447,6 +1447,7 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value, return; } cpu->env.sau.ctrl = value & 3; + break; case 0xdd4: /* SAU_TYPE */ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { goto bad_offset; From cf5f7937b05c84d5565134f058c00cd48304a117 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Tue, 10 Oct 2017 16:54:16 +0100 Subject: [PATCH 13/13] nvic: Fix miscalculation of offsets into ITNS array This calculation of the first exception vector in the ITNS register being accessed: int startvec = 32 * (offset - 0x380) + NVIC_FIRST_IRQ; is incorrect, because offset is in bytes, so we only want to multiply by 8. Spotted by Coverity (CID 1381484, CID 1381488), though it is not correct that it actually overflows the buffer, because we have a 'startvec + i < s->num_irq' guard. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 1507650856-11718-1-git-send-email-peter.maydell@linaro.org --- hw/intc/armv7m_nvic.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c index a42961c643..be46639b63 100644 --- a/hw/intc/armv7m_nvic.c +++ b/hw/intc/armv7m_nvic.c @@ -698,7 +698,7 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1; case 0x380 ... 0x3bf: /* NVIC_ITNS */ { - int startvec = 32 * (offset - 0x380) + NVIC_FIRST_IRQ; + int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ; int i; if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { @@ -1102,7 +1102,7 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value, switch (offset) { case 0x380 ... 0x3bf: /* NVIC_ITNS */ { - int startvec = 32 * (offset - 0x380) + NVIC_FIRST_IRQ; + int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ; int i; if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {