target-arm queue:

* more code-movement to separate TCG-only functions into their own files
  * Correct VMOV_imm_dp handling of short vectors
  * Execute Thumb instructions when their condbits are 0xf
  * armv7m_systick: Forbid non-privileged accesses
  * Use _ra versions of cpu_stl_data() in v7M helpers
  * v8M: Check state of exception being returned from
  * v8M: Forcibly clear negative-priority exceptions on deactivate
 -----BEGIN PGP SIGNATURE-----
 
 iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAl0eKdoZHHBldGVyLm1h
 eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3uESD/9VRv/N44W2jszHn23alpFU
 Oe/9fSMiYDbhYbWzRSTWOrUGPDvfnJSvr2DPN/6QZjSH+UpWpS5AxlvkKtgsHW3C
 R1X7thwLfs+hIwvH0SgoqWxtmFffC6+sMMfrUZUJ/ggPDJwpgyEY6Pc5CCMsvvOi
 7YIYTdWe4n4Bt3m6zJ0wVOvcFKTGGCq5CIA9Vrel2Vw98+Ryjh2QkpSV1Dt8Ynlk
 /F781Mg1GvxO5nLbZO/HO1zZ4vy1Uo8Lu+pkyjCFWahN0vUqaIaOkZjC6PrAvsjD
 sBwAkqzWZMnq39+A2OmHlbwF+KgZysPo7RR28J/aPNXC6k8SviVEtLQdS9Bh18jy
 0HDkduimHAm72XWbzAx5G/K/L62yMYrfHfLr60n0muREvH23ArINIY1FbDWVMTAa
 3UV/y8f+p3hBN2fmLSO+SHNhY+9sE7R5rBBZTFes6H1dEt2HxAKYrOuCdP3ICE9g
 hq9fvW1eEzlYn3+2SPj/azaaTTT+igpdD+5D6oyegPq4tBYmwVPqNBLCL9Jk3WNd
 UU+NeV9LkB/8XRPFwvtbocgEfmxTDNCkvs5XapLoxuucFJDqeGyVfz3nezqLxt+i
 /sB/FYY2xZvaR5Sv+DbiORQ2F2Tmc7PyeKNeVtZbFt32EHAu46yY748r4AoIYqkg
 l7Uo1VLf7N4ec71n6DIcKg==
 =vttM
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20190704-1' into staging

target-arm queue:
 * more code-movement to separate TCG-only functions into their own files
 * Correct VMOV_imm_dp handling of short vectors
 * Execute Thumb instructions when their condbits are 0xf
 * armv7m_systick: Forbid non-privileged accesses
 * Use _ra versions of cpu_stl_data() in v7M helpers
 * v8M: Check state of exception being returned from
 * v8M: Forcibly clear negative-priority exceptions on deactivate

# gpg: Signature made Thu 04 Jul 2019 17:31:22 BST
# gpg:                using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE
# gpg:                issuer "peter.maydell@linaro.org"
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [ultimate]
# gpg:                 aka "Peter Maydell <pmaydell@gmail.com>" [ultimate]
# gpg:                 aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [ultimate]
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83  15CF 3C25 25ED 1436 0CDE

* remotes/pmaydell/tags/pull-target-arm-20190704-1:
  target/arm: Correct VMOV_imm_dp handling of short vectors
  target/arm: Execute Thumb instructions when their condbits are 0xf
  hw/timer/armv7m_systick: Forbid non-privileged accesses
  target/arm: Use _ra versions of cpu_stl_data() in v7M helpers
  target/arm: v8M: Check state of exception being returned from
  arm v8M: Forcibly clear negative-priority exceptions on deactivate
  target/arm/helper: Move M profile routines to m_helper.c
  target/arm: Restrict semi-hosting to TCG
  target/arm: Move debug routines to debug_helper.c

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2019-07-04 17:32:24 +01:00
commit 57dfc2c4d5
11 changed files with 3096 additions and 2953 deletions

View File

@ -812,15 +812,45 @@ void armv7m_nvic_get_pending_irq_info(void *opaque,
int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure)
{
NVICState *s = (NVICState *)opaque;
VecInfo *vec;
VecInfo *vec = NULL;
int ret;
assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
if (secure && exc_is_banked(irq)) {
vec = &s->sec_vectors[irq];
} else {
vec = &s->vectors[irq];
/*
* For negative priorities, v8M will forcibly deactivate the appropriate
* NMI or HardFault regardless of what interrupt we're being asked to
* deactivate (compare the DeActivate() pseudocode). This is a guard
* against software returning from NMI or HardFault with a corrupted
* IPSR and leaving the CPU in a negative-priority state.
* v7M does not do this, but simply deactivates the requested interrupt.
*/
if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) {
switch (armv7m_nvic_raw_execution_priority(s)) {
case -1:
if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
vec = &s->vectors[ARMV7M_EXCP_HARD];
} else {
vec = &s->sec_vectors[ARMV7M_EXCP_HARD];
}
break;
case -2:
vec = &s->vectors[ARMV7M_EXCP_NMI];
break;
case -3:
vec = &s->sec_vectors[ARMV7M_EXCP_HARD];
break;
default:
break;
}
}
if (!vec) {
if (secure && exc_is_banked(irq)) {
vec = &s->sec_vectors[irq];
} else {
vec = &s->vectors[irq];
}
}
trace_nvic_complete_irq(irq, secure);
@ -830,7 +860,19 @@ int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure)
return -1;
}
ret = nvic_rettobase(s);
/*
* If this is a configurable exception and it is currently
* targeting the opposite security state from the one we're trying
* to complete it for, this counts as an illegal exception return.
* We still need to deactivate whatever vector the logic above has
* selected, though, as it might not be the same as the one for the
* requested exception number.
*/
if (!exc_is_banked(irq) && exc_targets_secure(s, irq) != secure) {
ret = -1;
} else {
ret = nvic_rettobase(s);
}
vec->active = 0;
if (vec->level) {

View File

@ -75,11 +75,17 @@ static void systick_timer_tick(void *opaque)
}
}
static uint64_t systick_read(void *opaque, hwaddr addr, unsigned size)
static MemTxResult systick_read(void *opaque, hwaddr addr, uint64_t *data,
unsigned size, MemTxAttrs attrs)
{
SysTickState *s = opaque;
uint32_t val;
if (attrs.user) {
/* Generate BusFault for unprivileged accesses */
return MEMTX_ERROR;
}
switch (addr) {
case 0x0: /* SysTick Control and Status. */
val = s->control;
@ -121,14 +127,21 @@ static uint64_t systick_read(void *opaque, hwaddr addr, unsigned size)
}
trace_systick_read(addr, val, size);
return val;
*data = val;
return MEMTX_OK;
}
static void systick_write(void *opaque, hwaddr addr,
uint64_t value, unsigned size)
static MemTxResult systick_write(void *opaque, hwaddr addr,
uint64_t value, unsigned size,
MemTxAttrs attrs)
{
SysTickState *s = opaque;
if (attrs.user) {
/* Generate BusFault for unprivileged accesses */
return MEMTX_ERROR;
}
trace_systick_write(addr, value, size);
switch (addr) {
@ -172,11 +185,12 @@ static void systick_write(void *opaque, hwaddr addr,
qemu_log_mask(LOG_GUEST_ERROR,
"SysTick: Bad write offset 0x%" HWADDR_PRIx "\n", addr);
}
return MEMTX_OK;
}
static const MemoryRegionOps systick_ops = {
.read = systick_read,
.write = systick_write,
.read_with_attrs = systick_read,
.write_with_attrs = systick_write,
.endianness = DEVICE_NATIVE_ENDIAN,
.valid.min_access_size = 4,
.valid.max_access_size = 4,

View File

@ -1,4 +1,4 @@
obj-y += arm-semi.o
obj-$(CONFIG_TCG) += arm-semi.o
obj-y += helper.o vfp_helper.o
obj-y += cpu.o gdbstub.o
obj-$(TARGET_AARCH64) += cpu64.o gdbstub64.o
@ -32,10 +32,11 @@ target/arm/translate-sve.o: target/arm/decode-sve.inc.c
target/arm/translate.o: target/arm/decode-vfp.inc.c
target/arm/translate.o: target/arm/decode-vfp-uncond.inc.c
obj-y += tlb_helper.o
obj-y += tlb_helper.o debug_helper.o
obj-y += translate.o op_helper.o
obj-y += crypto_helper.o
obj-y += iwmmxt_helper.o vec_helper.o neon_helper.o
obj-y += m_helper.o
obj-$(CONFIG_SOFTMMU) += psci.o

View File

@ -2578,19 +2578,16 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_arch_name = arm_gdb_arch_name;
cc->gdb_get_dynamic_xml = arm_gdb_get_dynamic_xml;
cc->gdb_stop_before_watchpoint = true;
cc->debug_excp_handler = arm_debug_excp_handler;
cc->debug_check_watchpoint = arm_debug_check_watchpoint;
#if !defined(CONFIG_USER_ONLY)
cc->adjust_watchpoint_address = arm_adjust_watchpoint_address;
#endif
cc->disas_set_info = arm_disas_set_info;
#ifdef CONFIG_TCG
cc->tcg_initialize = arm_translate_init;
cc->tlb_fill = arm_cpu_tlb_fill;
cc->debug_excp_handler = arm_debug_excp_handler;
cc->debug_check_watchpoint = arm_debug_check_watchpoint;
#if !defined(CONFIG_USER_ONLY)
cc->do_unaligned_access = arm_cpu_do_unaligned_access;
cc->do_transaction_failed = arm_cpu_do_transaction_failed;
cc->adjust_watchpoint_address = arm_adjust_watchpoint_address;
#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
#endif
}

View File

@ -964,7 +964,14 @@ static inline void aarch64_sve_change_el(CPUARMState *env, int o,
{ }
#endif
#if !defined(CONFIG_TCG)
static inline target_ulong do_arm_semihosting(CPUARMState *env)
{
g_assert_not_reached();
}
#else
target_ulong do_arm_semihosting(CPUARMState *env);
#endif
void aarch64_sync_32_to_64(CPUARMState *env);
void aarch64_sync_64_to_32(CPUARMState *env);

311
target/arm/debug_helper.c Normal file
View File

@ -0,0 +1,311 @@
/*
* ARM debug helpers.
*
* This code is licensed under the GNU GPL v2 or later.
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "internals.h"
#include "exec/exec-all.h"
#include "exec/helper-proto.h"
/* Return true if the linked breakpoint entry lbn passes its checks */
static bool linked_bp_matches(ARMCPU *cpu, int lbn)
{
CPUARMState *env = &cpu->env;
uint64_t bcr = env->cp15.dbgbcr[lbn];
int brps = extract32(cpu->dbgdidr, 24, 4);
int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
int bt;
uint32_t contextidr;
/*
* Links to unimplemented or non-context aware breakpoints are
* CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
* as if linked to an UNKNOWN context-aware breakpoint (in which
* case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
* We choose the former.
*/
if (lbn > brps || lbn < (brps - ctx_cmps)) {
return false;
}
bcr = env->cp15.dbgbcr[lbn];
if (extract64(bcr, 0, 1) == 0) {
/* Linked breakpoint disabled : generate no events */
return false;
}
bt = extract64(bcr, 20, 4);
/*
* We match the whole register even if this is AArch32 using the
* short descriptor format (in which case it holds both PROCID and ASID),
* since we don't implement the optional v7 context ID masking.
*/
contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
switch (bt) {
case 3: /* linked context ID match */
if (arm_current_el(env) > 1) {
/* Context matches never fire in EL2 or (AArch64) EL3 */
return false;
}
return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
case 5: /* linked address mismatch (reserved in AArch64) */
case 9: /* linked VMID match (reserved if no EL2) */
case 11: /* linked context ID and VMID match (reserved if no EL2) */
default:
/*
* Links to Unlinked context breakpoints must generate no
* events; we choose to do the same for reserved values too.
*/
return false;
}
return false;
}
static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
{
CPUARMState *env = &cpu->env;
uint64_t cr;
int pac, hmc, ssc, wt, lbn;
/*
* Note that for watchpoints the check is against the CPU security
* state, not the S/NS attribute on the offending data access.
*/
bool is_secure = arm_is_secure(env);
int access_el = arm_current_el(env);
if (is_wp) {
CPUWatchpoint *wp = env->cpu_watchpoint[n];
if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
return false;
}
cr = env->cp15.dbgwcr[n];
if (wp->hitattrs.user) {
/*
* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
* match watchpoints as if they were accesses done at EL0, even if
* the CPU is at EL1 or higher.
*/
access_el = 0;
}
} else {
uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
return false;
}
cr = env->cp15.dbgbcr[n];
}
/*
* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
* enabled and that the address and access type match; for breakpoints
* we know the address matched; check the remaining fields, including
* linked breakpoints. We rely on WCR and BCR having the same layout
* for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
* Note that some combinations of {PAC, HMC, SSC} are reserved and
* must act either like some valid combination or as if the watchpoint
* were disabled. We choose the former, and use this together with
* the fact that EL3 must always be Secure and EL2 must always be
* Non-Secure to simplify the code slightly compared to the full
* table in the ARM ARM.
*/
pac = extract64(cr, 1, 2);
hmc = extract64(cr, 13, 1);
ssc = extract64(cr, 14, 2);
switch (ssc) {
case 0:
break;
case 1:
case 3:
if (is_secure) {
return false;
}
break;
case 2:
if (!is_secure) {
return false;
}
break;
}
switch (access_el) {
case 3:
case 2:
if (!hmc) {
return false;
}
break;
case 1:
if (extract32(pac, 0, 1) == 0) {
return false;
}
break;
case 0:
if (extract32(pac, 1, 1) == 0) {
return false;
}
break;
default:
g_assert_not_reached();
}
wt = extract64(cr, 20, 1);
lbn = extract64(cr, 16, 4);
if (wt && !linked_bp_matches(cpu, lbn)) {
return false;
}
return true;
}
static bool check_watchpoints(ARMCPU *cpu)
{
CPUARMState *env = &cpu->env;
int n;
/*
* If watchpoints are disabled globally or we can't take debug
* exceptions here then watchpoint firings are ignored.
*/
if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
|| !arm_generate_debug_exceptions(env)) {
return false;
}
for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
if (bp_wp_matches(cpu, n, true)) {
return true;
}
}
return false;
}
static bool check_breakpoints(ARMCPU *cpu)
{
CPUARMState *env = &cpu->env;
int n;
/*
* If breakpoints are disabled globally or we can't take debug
* exceptions here then breakpoint firings are ignored.
*/
if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
|| !arm_generate_debug_exceptions(env)) {
return false;
}
for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
if (bp_wp_matches(cpu, n, false)) {
return true;
}
}
return false;
}
void HELPER(check_breakpoints)(CPUARMState *env)
{
ARMCPU *cpu = env_archcpu(env);
if (check_breakpoints(cpu)) {
HELPER(exception_internal(env, EXCP_DEBUG));
}
}
bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
{
/*
* Called by core code when a CPU watchpoint fires; need to check if this
* is also an architectural watchpoint match.
*/
ARMCPU *cpu = ARM_CPU(cs);
return check_watchpoints(cpu);
}
void arm_debug_excp_handler(CPUState *cs)
{
/*
* Called by core code when a watchpoint or breakpoint fires;
* need to check which one and raise the appropriate exception.
*/
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
CPUWatchpoint *wp_hit = cs->watchpoint_hit;
if (wp_hit) {
if (wp_hit->flags & BP_CPU) {
bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
bool same_el = arm_debug_target_el(env) == arm_current_el(env);
cs->watchpoint_hit = NULL;
env->exception.fsr = arm_debug_exception_fsr(env);
env->exception.vaddress = wp_hit->hitaddr;
raise_exception(env, EXCP_DATA_ABORT,
syn_watchpoint(same_el, 0, wnr),
arm_debug_target_el(env));
}
} else {
uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
/*
* (1) GDB breakpoints should be handled first.
* (2) Do not raise a CPU exception if no CPU breakpoint has fired,
* since singlestep is also done by generating a debug internal
* exception.
*/
if (cpu_breakpoint_test(cs, pc, BP_GDB)
|| !cpu_breakpoint_test(cs, pc, BP_CPU)) {
return;
}
env->exception.fsr = arm_debug_exception_fsr(env);
/*
* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
* values to the guest that it shouldn't be able to see at its
* exception/security level.
*/
env->exception.vaddress = 0;
raise_exception(env, EXCP_PREFETCH_ABORT,
syn_breakpoint(same_el),
arm_debug_target_el(env));
}
}
#if !defined(CONFIG_USER_ONLY)
vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
/*
* In BE32 system mode, target memory is stored byteswapped (on a
* little-endian host system), and by the time we reach here (via an
* opcode helper) the addresses of subword accesses have been adjusted
* to account for that, which means that watchpoints will not match.
* Undo the adjustment here.
*/
if (arm_sctlr_b(env)) {
if (len == 1) {
addr ^= 3;
} else if (len == 2) {
addr ^= 2;
}
}
return addr;
}
#endif

File diff suppressed because it is too large Load Diff

2679
target/arm/m_helper.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -831,301 +831,6 @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
}
}
/* Return true if the linked breakpoint entry lbn passes its checks */
static bool linked_bp_matches(ARMCPU *cpu, int lbn)
{
CPUARMState *env = &cpu->env;
uint64_t bcr = env->cp15.dbgbcr[lbn];
int brps = extract32(cpu->dbgdidr, 24, 4);
int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
int bt;
uint32_t contextidr;
/*
* Links to unimplemented or non-context aware breakpoints are
* CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
* as if linked to an UNKNOWN context-aware breakpoint (in which
* case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
* We choose the former.
*/
if (lbn > brps || lbn < (brps - ctx_cmps)) {
return false;
}
bcr = env->cp15.dbgbcr[lbn];
if (extract64(bcr, 0, 1) == 0) {
/* Linked breakpoint disabled : generate no events */
return false;
}
bt = extract64(bcr, 20, 4);
/*
* We match the whole register even if this is AArch32 using the
* short descriptor format (in which case it holds both PROCID and ASID),
* since we don't implement the optional v7 context ID masking.
*/
contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
switch (bt) {
case 3: /* linked context ID match */
if (arm_current_el(env) > 1) {
/* Context matches never fire in EL2 or (AArch64) EL3 */
return false;
}
return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
case 5: /* linked address mismatch (reserved in AArch64) */
case 9: /* linked VMID match (reserved if no EL2) */
case 11: /* linked context ID and VMID match (reserved if no EL2) */
default:
/*
* Links to Unlinked context breakpoints must generate no
* events; we choose to do the same for reserved values too.
*/
return false;
}
return false;
}
static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
{
CPUARMState *env = &cpu->env;
uint64_t cr;
int pac, hmc, ssc, wt, lbn;
/*
* Note that for watchpoints the check is against the CPU security
* state, not the S/NS attribute on the offending data access.
*/
bool is_secure = arm_is_secure(env);
int access_el = arm_current_el(env);
if (is_wp) {
CPUWatchpoint *wp = env->cpu_watchpoint[n];
if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
return false;
}
cr = env->cp15.dbgwcr[n];
if (wp->hitattrs.user) {
/*
* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
* match watchpoints as if they were accesses done at EL0, even if
* the CPU is at EL1 or higher.
*/
access_el = 0;
}
} else {
uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
return false;
}
cr = env->cp15.dbgbcr[n];
}
/*
* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
* enabled and that the address and access type match; for breakpoints
* we know the address matched; check the remaining fields, including
* linked breakpoints. We rely on WCR and BCR having the same layout
* for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
* Note that some combinations of {PAC, HMC, SSC} are reserved and
* must act either like some valid combination or as if the watchpoint
* were disabled. We choose the former, and use this together with
* the fact that EL3 must always be Secure and EL2 must always be
* Non-Secure to simplify the code slightly compared to the full
* table in the ARM ARM.
*/
pac = extract64(cr, 1, 2);
hmc = extract64(cr, 13, 1);
ssc = extract64(cr, 14, 2);
switch (ssc) {
case 0:
break;
case 1:
case 3:
if (is_secure) {
return false;
}
break;
case 2:
if (!is_secure) {
return false;
}
break;
}
switch (access_el) {
case 3:
case 2:
if (!hmc) {
return false;
}
break;
case 1:
if (extract32(pac, 0, 1) == 0) {
return false;
}
break;
case 0:
if (extract32(pac, 1, 1) == 0) {
return false;
}
break;
default:
g_assert_not_reached();
}
wt = extract64(cr, 20, 1);
lbn = extract64(cr, 16, 4);
if (wt && !linked_bp_matches(cpu, lbn)) {
return false;
}
return true;
}
static bool check_watchpoints(ARMCPU *cpu)
{
CPUARMState *env = &cpu->env;
int n;
/*
* If watchpoints are disabled globally or we can't take debug
* exceptions here then watchpoint firings are ignored.
*/
if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
|| !arm_generate_debug_exceptions(env)) {
return false;
}
for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
if (bp_wp_matches(cpu, n, true)) {
return true;
}
}
return false;
}
static bool check_breakpoints(ARMCPU *cpu)
{
CPUARMState *env = &cpu->env;
int n;
/*
* If breakpoints are disabled globally or we can't take debug
* exceptions here then breakpoint firings are ignored.
*/
if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
|| !arm_generate_debug_exceptions(env)) {
return false;
}
for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
if (bp_wp_matches(cpu, n, false)) {
return true;
}
}
return false;
}
void HELPER(check_breakpoints)(CPUARMState *env)
{
ARMCPU *cpu = env_archcpu(env);
if (check_breakpoints(cpu)) {
HELPER(exception_internal(env, EXCP_DEBUG));
}
}
bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
{
/*
* Called by core code when a CPU watchpoint fires; need to check if this
* is also an architectural watchpoint match.
*/
ARMCPU *cpu = ARM_CPU(cs);
return check_watchpoints(cpu);
}
vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
/*
* In BE32 system mode, target memory is stored byteswapped (on a
* little-endian host system), and by the time we reach here (via an
* opcode helper) the addresses of subword accesses have been adjusted
* to account for that, which means that watchpoints will not match.
* Undo the adjustment here.
*/
if (arm_sctlr_b(env)) {
if (len == 1) {
addr ^= 3;
} else if (len == 2) {
addr ^= 2;
}
}
return addr;
}
void arm_debug_excp_handler(CPUState *cs)
{
/*
* Called by core code when a watchpoint or breakpoint fires;
* need to check which one and raise the appropriate exception.
*/
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
CPUWatchpoint *wp_hit = cs->watchpoint_hit;
if (wp_hit) {
if (wp_hit->flags & BP_CPU) {
bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
bool same_el = arm_debug_target_el(env) == arm_current_el(env);
cs->watchpoint_hit = NULL;
env->exception.fsr = arm_debug_exception_fsr(env);
env->exception.vaddress = wp_hit->hitaddr;
raise_exception(env, EXCP_DATA_ABORT,
syn_watchpoint(same_el, 0, wnr),
arm_debug_target_el(env));
}
} else {
uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
/*
* (1) GDB breakpoints should be handled first.
* (2) Do not raise a CPU exception if no CPU breakpoint has fired,
* since singlestep is also done by generating a debug internal
* exception.
*/
if (cpu_breakpoint_test(cs, pc, BP_GDB)
|| !cpu_breakpoint_test(cs, pc, BP_CPU)) {
return;
}
env->exception.fsr = arm_debug_exception_fsr(env);
/*
* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
* values to the guest that it shouldn't be able to see at its
* exception/security level.
*/
env->exception.vaddress = 0;
raise_exception(env, EXCP_PREFETCH_ABORT,
syn_breakpoint(same_el),
arm_debug_target_el(env));
}
}
/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
The only way to do that in TCG is a conditional branch, which clobbers
all our temporaries. For now implement these as helper functions. */

View File

@ -1971,7 +1971,7 @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
/* Set up the operands for the next iteration */
veclen--;
vfp_advance_dreg(vd, delta_d);
vd = vfp_advance_dreg(vd, delta_d);
}
tcg_temp_free_i64(fd);

View File

@ -11594,7 +11594,14 @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
gen_nop_hint(s, (insn >> 4) & 0xf);
break;
}
/* If Then. */
/*
* IT (If-Then)
*
* Combinations of firstcond and mask which set up an 0b1111
* condition are UNPREDICTABLE; we take the CONSTRAINED
* UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
* i.e. both meaning "execute always".
*/
s->condexec_cond = (insn >> 4) & 0xe;
s->condexec_mask = insn & 0x1f;
/* No actual code generated for this insn, just setup state. */
@ -12128,7 +12135,11 @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
uint32_t cond = dc->condexec_cond;
if (cond != 0x0e) { /* Skip conditional when condition is AL. */
/*
* Conditionally skip the insn. Note that both 0xe and 0xf mean
* "always"; 0xf is not "never".
*/
if (cond < 0x0e) {
arm_skip_unless(dc, cond);
}
}