target/arm: Split out rebuild_hflags_a64
Create a function to compute the values of the TBFLAG_A64 bits that will be cached. For now, the env->hflags variable is not used, and the results are fed back to cpu_get_tb_cpu_state. Note that not all BTI related flags are cached, so we have to test the BTI feature twice -- once for those bits moved out to rebuild_hflags_a64 and once for those bits that remain in cpu_get_tb_cpu_state. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20191023150057.25731-3-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
fdd1b228c2
commit
d4d7503ac6
@ -11070,6 +11070,71 @@ static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el,
|
||||
return flags;
|
||||
}
|
||||
|
||||
static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
|
||||
ARMMMUIdx mmu_idx)
|
||||
{
|
||||
ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
|
||||
ARMVAParameters p0 = aa64_va_parameters_both(env, 0, stage1);
|
||||
uint32_t flags = 0;
|
||||
uint64_t sctlr;
|
||||
int tbii, tbid;
|
||||
|
||||
flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1);
|
||||
|
||||
/* FIXME: ARMv8.1-VHE S2 translation regime. */
|
||||
if (regime_el(env, stage1) < 2) {
|
||||
ARMVAParameters p1 = aa64_va_parameters_both(env, -1, stage1);
|
||||
tbid = (p1.tbi << 1) | p0.tbi;
|
||||
tbii = tbid & ~((p1.tbid << 1) | p0.tbid);
|
||||
} else {
|
||||
tbid = p0.tbi;
|
||||
tbii = tbid & !p0.tbid;
|
||||
}
|
||||
|
||||
flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii);
|
||||
flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid);
|
||||
|
||||
if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
|
||||
int sve_el = sve_exception_el(env, el);
|
||||
uint32_t zcr_len;
|
||||
|
||||
/*
|
||||
* If SVE is disabled, but FP is enabled,
|
||||
* then the effective len is 0.
|
||||
*/
|
||||
if (sve_el != 0 && fp_el == 0) {
|
||||
zcr_len = 0;
|
||||
} else {
|
||||
zcr_len = sve_zcr_len_for_el(env, el);
|
||||
}
|
||||
flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el);
|
||||
flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len);
|
||||
}
|
||||
|
||||
sctlr = arm_sctlr(env, el);
|
||||
|
||||
if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
|
||||
/*
|
||||
* In order to save space in flags, we record only whether
|
||||
* pauth is "inactive", meaning all insns are implemented as
|
||||
* a nop, or "active" when some action must be performed.
|
||||
* The decision of which action to take is left to a helper.
|
||||
*/
|
||||
if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
|
||||
flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1);
|
||||
}
|
||||
}
|
||||
|
||||
if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
|
||||
/* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
|
||||
if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
|
||||
flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1);
|
||||
}
|
||||
}
|
||||
|
||||
return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
|
||||
}
|
||||
|
||||
void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
|
||||
target_ulong *cs_base, uint32_t *pflags)
|
||||
{
|
||||
@ -11079,67 +11144,9 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
|
||||
uint32_t flags = 0;
|
||||
|
||||
if (is_a64(env)) {
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
uint64_t sctlr;
|
||||
|
||||
*pc = env->pc;
|
||||
flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1);
|
||||
|
||||
/* Get control bits for tagged addresses. */
|
||||
{
|
||||
ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
|
||||
ARMVAParameters p0 = aa64_va_parameters_both(env, 0, stage1);
|
||||
int tbii, tbid;
|
||||
|
||||
/* FIXME: ARMv8.1-VHE S2 translation regime. */
|
||||
if (regime_el(env, stage1) < 2) {
|
||||
ARMVAParameters p1 = aa64_va_parameters_both(env, -1, stage1);
|
||||
tbid = (p1.tbi << 1) | p0.tbi;
|
||||
tbii = tbid & ~((p1.tbid << 1) | p0.tbid);
|
||||
} else {
|
||||
tbid = p0.tbi;
|
||||
tbii = tbid & !p0.tbid;
|
||||
}
|
||||
|
||||
flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii);
|
||||
flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid);
|
||||
}
|
||||
|
||||
if (cpu_isar_feature(aa64_sve, cpu)) {
|
||||
int sve_el = sve_exception_el(env, current_el);
|
||||
uint32_t zcr_len;
|
||||
|
||||
/* If SVE is disabled, but FP is enabled,
|
||||
* then the effective len is 0.
|
||||
*/
|
||||
if (sve_el != 0 && fp_el == 0) {
|
||||
zcr_len = 0;
|
||||
} else {
|
||||
zcr_len = sve_zcr_len_for_el(env, current_el);
|
||||
}
|
||||
flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el);
|
||||
flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len);
|
||||
}
|
||||
|
||||
sctlr = arm_sctlr(env, current_el);
|
||||
|
||||
if (cpu_isar_feature(aa64_pauth, cpu)) {
|
||||
/*
|
||||
* In order to save space in flags, we record only whether
|
||||
* pauth is "inactive", meaning all insns are implemented as
|
||||
* a nop, or "active" when some action must be performed.
|
||||
* The decision of which action to take is left to a helper.
|
||||
*/
|
||||
if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
|
||||
flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1);
|
||||
}
|
||||
}
|
||||
|
||||
if (cpu_isar_feature(aa64_bti, cpu)) {
|
||||
/* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
|
||||
if (sctlr & (current_el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
|
||||
flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1);
|
||||
}
|
||||
flags = rebuild_hflags_a64(env, current_el, fp_el, mmu_idx);
|
||||
if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
|
||||
flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype);
|
||||
}
|
||||
} else {
|
||||
@ -11159,9 +11166,9 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
|
||||
flags = FIELD_DP32(flags, TBFLAG_A32,
|
||||
XSCALE_CPAR, env->cp15.c15_cpar);
|
||||
}
|
||||
}
|
||||
|
||||
flags = rebuild_hflags_common(env, fp_el, mmu_idx, flags);
|
||||
flags = rebuild_hflags_common(env, fp_el, mmu_idx, flags);
|
||||
}
|
||||
|
||||
/* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
|
||||
* states defined in the ARM ARM for software singlestep:
|
||||
|
Loading…
Reference in New Issue
Block a user