bulk: Call in place single use cpu_env()
Avoid CPUArchState local variable when cpu_env() is used once. Mechanical patch using the following Coccinelle spatch script: @@ type CPUArchState; identifier env; expression cs; @@ { - CPUArchState *env = cpu_env(cs); ... when != env - env + cpu_env(cs) ... when != env } Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-ID: <20240129164514.73104-5-philmd@linaro.org> Signed-off-by: Thomas Huth <thuth@redhat.com>
This commit is contained in:
parent
97e0310601
commit
94956d7b51
@ -436,7 +436,6 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
|
||||
static inline TranslationBlock * QEMU_DISABLE_CFI
|
||||
cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
|
||||
{
|
||||
CPUArchState *env = cpu_env(cpu);
|
||||
uintptr_t ret;
|
||||
TranslationBlock *last_tb;
|
||||
const void *tb_ptr = itb->tc.ptr;
|
||||
@ -446,7 +445,7 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
|
||||
}
|
||||
|
||||
qemu_thread_jit_execute();
|
||||
ret = tcg_qemu_tb_exec(env, tb_ptr);
|
||||
ret = tcg_qemu_tb_exec(cpu_env(cpu), tb_ptr);
|
||||
cpu->neg.can_do_io = true;
|
||||
qemu_plugin_disable_mem_helpers(cpu);
|
||||
/*
|
||||
|
@ -323,8 +323,8 @@ void cpu_loop(CPUX86State *env)
|
||||
|
||||
static void target_cpu_free(void *obj)
|
||||
{
|
||||
CPUArchState *env = cpu_env(obj);
|
||||
target_munmap(env->gdt.base, sizeof(uint64_t) * TARGET_GDT_ENTRIES);
|
||||
target_munmap(cpu_env(obj)->gdt.base,
|
||||
sizeof(uint64_t) * TARGET_GDT_ENTRIES);
|
||||
g_free(obj);
|
||||
}
|
||||
|
||||
|
@ -538,7 +538,6 @@ void HELPER(iitlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
|
||||
/* Purge (Insn/Data) TLB. */
|
||||
static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
|
||||
{
|
||||
CPUHPPAState *env = cpu_env(cpu);
|
||||
vaddr start = data.target_ptr;
|
||||
vaddr end;
|
||||
|
||||
@ -552,7 +551,7 @@ static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
|
||||
end = (vaddr)TARGET_PAGE_SIZE << (2 * end);
|
||||
end = start + end - 1;
|
||||
|
||||
hppa_flush_tlb_range(env, start, end);
|
||||
hppa_flush_tlb_range(cpu_env(cpu), start, end);
|
||||
}
|
||||
|
||||
/* This is local to the current cpu. */
|
||||
|
@ -3811,8 +3811,7 @@ static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
if (ctx->tb_flags & PSW_C) {
|
||||
CPUHPPAState *env = cpu_env(ctx->cs);
|
||||
int type = hppa_artype_for_page(env, ctx->base.pc_next);
|
||||
int type = hppa_artype_for_page(cpu_env(ctx->cs), ctx->base.pc_next);
|
||||
/* If we could not find a TLB entry, then we need to generate an
|
||||
ITLB miss exception so the kernel will provide it.
|
||||
The resulting TLB fill operation will invalidate this TB and
|
||||
|
@ -340,7 +340,6 @@ nvmm_get_registers(CPUState *cpu)
|
||||
static bool
|
||||
nvmm_can_take_int(CPUState *cpu)
|
||||
{
|
||||
CPUX86State *env = cpu_env(cpu);
|
||||
AccelCPUState *qcpu = cpu->accel;
|
||||
struct nvmm_vcpu *vcpu = &qcpu->vcpu;
|
||||
struct nvmm_machine *mach = get_nvmm_mach();
|
||||
@ -349,7 +348,7 @@ nvmm_can_take_int(CPUState *cpu)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (qcpu->int_shadow || !(env->eflags & IF_MASK)) {
|
||||
if (qcpu->int_shadow || !(cpu_env(cpu)->eflags & IF_MASK)) {
|
||||
struct nvmm_x64_state *state = vcpu->state;
|
||||
|
||||
/* Exit on interrupt window. */
|
||||
@ -645,13 +644,12 @@ static int
|
||||
nvmm_handle_halted(struct nvmm_machine *mach, CPUState *cpu,
|
||||
struct nvmm_vcpu_exit *exit)
|
||||
{
|
||||
CPUX86State *env = cpu_env(cpu);
|
||||
int ret = 0;
|
||||
|
||||
bql_lock();
|
||||
|
||||
if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
(env->eflags & IF_MASK)) &&
|
||||
(cpu_env(cpu)->eflags & IF_MASK)) &&
|
||||
!(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
|
||||
cpu->exception_index = EXCP_HLT;
|
||||
cpu->halted = true;
|
||||
|
@ -300,7 +300,6 @@ static SegmentCache whpx_seg_h2q(const WHV_X64_SEGMENT_REGISTER *hs)
|
||||
/* X64 Extended Control Registers */
|
||||
static void whpx_set_xcrs(CPUState *cpu)
|
||||
{
|
||||
CPUX86State *env = cpu_env(cpu);
|
||||
HRESULT hr;
|
||||
struct whpx_state *whpx = &whpx_global;
|
||||
WHV_REGISTER_VALUE xcr0;
|
||||
@ -311,7 +310,7 @@ static void whpx_set_xcrs(CPUState *cpu)
|
||||
}
|
||||
|
||||
/* Only xcr0 is supported by the hypervisor currently */
|
||||
xcr0.Reg64 = env->xcr0;
|
||||
xcr0.Reg64 = cpu_env(cpu)->xcr0;
|
||||
hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
|
||||
whpx->partition, cpu->cpu_index, &xcr0_name, 1, &xcr0);
|
||||
if (FAILED(hr)) {
|
||||
@ -321,7 +320,6 @@ static void whpx_set_xcrs(CPUState *cpu)
|
||||
|
||||
static int whpx_set_tsc(CPUState *cpu)
|
||||
{
|
||||
CPUX86State *env = cpu_env(cpu);
|
||||
WHV_REGISTER_NAME tsc_reg = WHvX64RegisterTsc;
|
||||
WHV_REGISTER_VALUE tsc_val;
|
||||
HRESULT hr;
|
||||
@ -345,7 +343,7 @@ static int whpx_set_tsc(CPUState *cpu)
|
||||
}
|
||||
}
|
||||
|
||||
tsc_val.Reg64 = env->tsc;
|
||||
tsc_val.Reg64 = cpu_env(cpu)->tsc;
|
||||
hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
|
||||
whpx->partition, cpu->cpu_index, &tsc_reg, 1, &tsc_val);
|
||||
if (FAILED(hr)) {
|
||||
@ -556,7 +554,6 @@ static void whpx_set_registers(CPUState *cpu, int level)
|
||||
|
||||
static int whpx_get_tsc(CPUState *cpu)
|
||||
{
|
||||
CPUX86State *env = cpu_env(cpu);
|
||||
WHV_REGISTER_NAME tsc_reg = WHvX64RegisterTsc;
|
||||
WHV_REGISTER_VALUE tsc_val;
|
||||
HRESULT hr;
|
||||
@ -569,14 +566,13 @@ static int whpx_get_tsc(CPUState *cpu)
|
||||
return -1;
|
||||
}
|
||||
|
||||
env->tsc = tsc_val.Reg64;
|
||||
cpu_env(cpu)->tsc = tsc_val.Reg64;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* X64 Extended Control Registers */
|
||||
static void whpx_get_xcrs(CPUState *cpu)
|
||||
{
|
||||
CPUX86State *env = cpu_env(cpu);
|
||||
HRESULT hr;
|
||||
struct whpx_state *whpx = &whpx_global;
|
||||
WHV_REGISTER_VALUE xcr0;
|
||||
@ -594,7 +590,7 @@ static void whpx_get_xcrs(CPUState *cpu)
|
||||
return;
|
||||
}
|
||||
|
||||
env->xcr0 = xcr0.Reg64;
|
||||
cpu_env(cpu)->xcr0 = xcr0.Reg64;
|
||||
}
|
||||
|
||||
static void whpx_get_registers(CPUState *cpu)
|
||||
@ -1400,8 +1396,7 @@ static vaddr whpx_vcpu_get_pc(CPUState *cpu, bool exit_context_valid)
|
||||
{
|
||||
if (cpu->vcpu_dirty) {
|
||||
/* The CPU registers have been modified by other parts of QEMU. */
|
||||
CPUArchState *env = cpu_env(cpu);
|
||||
return env->eip;
|
||||
return cpu_env(cpu)->eip;
|
||||
} else if (exit_context_valid) {
|
||||
/*
|
||||
* The CPU registers have not been modified by neither other parts
|
||||
@ -1439,12 +1434,11 @@ static vaddr whpx_vcpu_get_pc(CPUState *cpu, bool exit_context_valid)
|
||||
|
||||
static int whpx_handle_halt(CPUState *cpu)
|
||||
{
|
||||
CPUX86State *env = cpu_env(cpu);
|
||||
int ret = 0;
|
||||
|
||||
bql_lock();
|
||||
if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
(env->eflags & IF_MASK)) &&
|
||||
(cpu_env(cpu)->eflags & IF_MASK)) &&
|
||||
!(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
|
||||
cpu->exception_index = EXCP_HLT;
|
||||
cpu->halted = true;
|
||||
|
@ -282,10 +282,9 @@ static uint64_t make_address_pc(DisasContext *ctx, uint64_t addr)
|
||||
|
||||
static void loongarch_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
|
||||
{
|
||||
CPULoongArchState *env = cpu_env(cs);
|
||||
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
||||
|
||||
ctx->opcode = translator_ldl(env, &ctx->base, ctx->base.pc_next);
|
||||
ctx->opcode = translator_ldl(cpu_env(cs), &ctx->base, ctx->base.pc_next);
|
||||
|
||||
if (!decode(ctx, ctx->opcode)) {
|
||||
qemu_log_mask(LOG_UNIMP, "Error: unknown opcode. "
|
||||
|
@ -2195,9 +2195,8 @@ static bool trans_WAIT(DisasContext *ctx, arg_WAIT *a)
|
||||
|
||||
static void rx_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
||||
{
|
||||
CPURXState *env = cpu_env(cs);
|
||||
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
||||
ctx->env = env;
|
||||
ctx->env = cpu_env(cs);
|
||||
ctx->tb_flags = ctx->base.tb->flags;
|
||||
}
|
||||
|
||||
|
@ -29,9 +29,7 @@ void superh_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
|
||||
MMUAccessType access_type,
|
||||
int mmu_idx, uintptr_t retaddr)
|
||||
{
|
||||
CPUSH4State *env = cpu_env(cs);
|
||||
|
||||
env->tea = addr;
|
||||
cpu_env(cs)->tea = addr;
|
||||
switch (access_type) {
|
||||
case MMU_INST_FETCH:
|
||||
case MMU_DATA_LOAD:
|
||||
|
Loading…
x
Reference in New Issue
Block a user