target/arm: Change CPUArchState.aarch64 to bool

Bool is a more appropriate type for this value.
Adjust the assignments to use true/false.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Richard Henderson 2022-04-17 10:43:32 -07:00 committed by Peter Maydell
parent a3bc906f8e
commit 5322155240
5 changed files with 6 additions and 6 deletions

View File

@ -189,7 +189,7 @@ static void arm_cpu_reset(DeviceState *dev)
if (arm_feature(env, ARM_FEATURE_AARCH64)) { if (arm_feature(env, ARM_FEATURE_AARCH64)) {
/* 64 bit CPUs always start in 64 bit mode */ /* 64 bit CPUs always start in 64 bit mode */
env->aarch64 = 1; env->aarch64 = true;
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
env->pstate = PSTATE_MODE_EL0t; env->pstate = PSTATE_MODE_EL0t;
/* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */ /* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */

View File

@ -259,7 +259,7 @@ typedef struct CPUArchState {
* all other bits are stored in their correct places in env->pstate * all other bits are stored in their correct places in env->pstate
*/ */
uint32_t pstate; uint32_t pstate;
uint32_t aarch64; /* 1 if CPU is in aarch64 state; inverse of PSTATE.nRW */ bool aarch64; /* True if CPU is in aarch64 state; inverse of PSTATE.nRW */
/* Cached TBFLAGS state. See below for which bits are included. */ /* Cached TBFLAGS state. See below for which bits are included. */
CPUARMTBFlags hflags; CPUARMTBFlags hflags;

View File

@ -952,7 +952,7 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
qemu_mutex_unlock_iothread(); qemu_mutex_unlock_iothread();
if (!return_to_aa64) { if (!return_to_aa64) {
env->aarch64 = 0; env->aarch64 = false;
/* We do a raw CPSR write because aarch64_sync_64_to_32() /* We do a raw CPSR write because aarch64_sync_64_to_32()
* will sort the register banks out for us, and we've already * will sort the register banks out for us, and we've already
* caught all the bad-mode cases in el_from_spsr(). * caught all the bad-mode cases in el_from_spsr().
@ -975,7 +975,7 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
} else { } else {
int tbii; int tbii;
env->aarch64 = 1; env->aarch64 = true;
spsr &= aarch64_pstate_valid_mask(&env_archcpu(env)->isar); spsr &= aarch64_pstate_valid_mask(&env_archcpu(env)->isar);
pstate_write(env, spsr); pstate_write(env, spsr);
if (!arm_singlestep_active(env)) { if (!arm_singlestep_active(env)) {

View File

@ -10181,7 +10181,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
} }
pstate_write(env, PSTATE_DAIF | new_mode); pstate_write(env, PSTATE_DAIF | new_mode);
env->aarch64 = 1; env->aarch64 = true;
aarch64_restore_sp(env, new_el); aarch64_restore_sp(env, new_el);
helper_rebuild_hflags_a64(env, new_el); helper_rebuild_hflags_a64(env, new_el);

View File

@ -564,7 +564,7 @@ int hvf_arch_init_vcpu(CPUState *cpu)
hv_return_t ret; hv_return_t ret;
int i; int i;
env->aarch64 = 1; env->aarch64 = true;
asm volatile("mrs %0, cntfrq_el0" : "=r"(arm_cpu->gt_cntfrq_hz)); asm volatile("mrs %0, cntfrq_el0" : "=r"(arm_cpu->gt_cntfrq_hz));
/* Allocate enough space for our sysreg sync */ /* Allocate enough space for our sysreg sync */