target/arm: Set PAN bit as required on exception entry
The PAN bit is preserved, or set as per SCTLR_ELx.SPAN, plus several other conditions listed in the ARM ARM. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Message-id: 20200208125816.14954-15-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
81636b70c2
commit
4a2696c0d4
@ -8763,8 +8763,12 @@ static void take_aarch32_exception(CPUARMState *env, int new_mode,
|
|||||||
uint32_t mask, uint32_t offset,
|
uint32_t mask, uint32_t offset,
|
||||||
uint32_t newpc)
|
uint32_t newpc)
|
||||||
{
|
{
|
||||||
|
int new_el;
|
||||||
|
|
||||||
/* Change the CPU state so as to actually take the exception. */
|
/* Change the CPU state so as to actually take the exception. */
|
||||||
switch_mode(env, new_mode);
|
switch_mode(env, new_mode);
|
||||||
|
new_el = arm_current_el(env);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For exceptions taken to AArch32 we must clear the SS bit in both
|
* For exceptions taken to AArch32 we must clear the SS bit in both
|
||||||
* PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
|
* PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
|
||||||
@ -8777,7 +8781,7 @@ static void take_aarch32_exception(CPUARMState *env, int new_mode,
|
|||||||
env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
|
env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
|
||||||
/* Set new mode endianness */
|
/* Set new mode endianness */
|
||||||
env->uncached_cpsr &= ~CPSR_E;
|
env->uncached_cpsr &= ~CPSR_E;
|
||||||
if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) {
|
if (env->cp15.sctlr_el[new_el] & SCTLR_EE) {
|
||||||
env->uncached_cpsr |= CPSR_E;
|
env->uncached_cpsr |= CPSR_E;
|
||||||
}
|
}
|
||||||
/* J and IL must always be cleared for exception entry */
|
/* J and IL must always be cleared for exception entry */
|
||||||
@ -8788,6 +8792,25 @@ static void take_aarch32_exception(CPUARMState *env, int new_mode,
|
|||||||
env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
|
env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
|
||||||
env->elr_el[2] = env->regs[15];
|
env->elr_el[2] = env->regs[15];
|
||||||
} else {
|
} else {
|
||||||
|
/* CPSR.PAN is normally preserved preserved unless... */
|
||||||
|
if (cpu_isar_feature(aa64_pan, env_archcpu(env))) {
|
||||||
|
switch (new_el) {
|
||||||
|
case 3:
|
||||||
|
if (!arm_is_secure_below_el3(env)) {
|
||||||
|
/* ... the target is EL3, from non-secure state. */
|
||||||
|
env->uncached_cpsr &= ~CPSR_PAN;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
/* ... the target is EL3, from secure state ... */
|
||||||
|
/* fall through */
|
||||||
|
case 1:
|
||||||
|
/* ... the target is EL1 and SCTLR.SPAN is 0. */
|
||||||
|
if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) {
|
||||||
|
env->uncached_cpsr |= CPSR_PAN;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* this is a lie, as there was no c1_sys on V4T/V5, but who cares
|
* this is a lie, as there was no c1_sys on V4T/V5, but who cares
|
||||||
* and we should just guard the thumb mode on V4
|
* and we should just guard the thumb mode on V4
|
||||||
@ -9050,6 +9073,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
|
|||||||
unsigned int new_el = env->exception.target_el;
|
unsigned int new_el = env->exception.target_el;
|
||||||
target_ulong addr = env->cp15.vbar_el[new_el];
|
target_ulong addr = env->cp15.vbar_el[new_el];
|
||||||
unsigned int new_mode = aarch64_pstate_mode(new_el, true);
|
unsigned int new_mode = aarch64_pstate_mode(new_el, true);
|
||||||
|
unsigned int old_mode;
|
||||||
unsigned int cur_el = arm_current_el(env);
|
unsigned int cur_el = arm_current_el(env);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -9129,20 +9153,43 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (is_a64(env)) {
|
if (is_a64(env)) {
|
||||||
env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env);
|
old_mode = pstate_read(env);
|
||||||
aarch64_save_sp(env, arm_current_el(env));
|
aarch64_save_sp(env, arm_current_el(env));
|
||||||
env->elr_el[new_el] = env->pc;
|
env->elr_el[new_el] = env->pc;
|
||||||
} else {
|
} else {
|
||||||
env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env);
|
old_mode = cpsr_read(env);
|
||||||
env->elr_el[new_el] = env->regs[15];
|
env->elr_el[new_el] = env->regs[15];
|
||||||
|
|
||||||
aarch64_sync_32_to_64(env);
|
aarch64_sync_32_to_64(env);
|
||||||
|
|
||||||
env->condexec_bits = 0;
|
env->condexec_bits = 0;
|
||||||
}
|
}
|
||||||
|
env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
|
||||||
|
|
||||||
qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
|
qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
|
||||||
env->elr_el[new_el]);
|
env->elr_el[new_el]);
|
||||||
|
|
||||||
|
if (cpu_isar_feature(aa64_pan, cpu)) {
|
||||||
|
/* The value of PSTATE.PAN is normally preserved, except when ... */
|
||||||
|
new_mode |= old_mode & PSTATE_PAN;
|
||||||
|
switch (new_el) {
|
||||||
|
case 2:
|
||||||
|
/* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */
|
||||||
|
if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE))
|
||||||
|
!= (HCR_E2H | HCR_TGE)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
/* fall through */
|
||||||
|
case 1:
|
||||||
|
/* ... the target is EL1 ... */
|
||||||
|
/* ... and SCTLR_ELx.SPAN == 0, then set to 1. */
|
||||||
|
if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) {
|
||||||
|
new_mode |= PSTATE_PAN;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pstate_write(env, PSTATE_DAIF | new_mode);
|
pstate_write(env, PSTATE_DAIF | new_mode);
|
||||||
env->aarch64 = 1;
|
env->aarch64 = 1;
|
||||||
aarch64_restore_sp(env, new_el);
|
aarch64_restore_sp(env, new_el);
|
||||||
|
Loading…
Reference in New Issue
Block a user