Rename CPUState -> CPUArchState

Scripted conversion:
  for file in *.[hc] hw/*.[hc] hw/kvm/*.[hc] linux-user/*.[hc] linux-user/m68k/*.[hc] bsd-user/*.[hc] darwin-user/*.[hc] tcg/*/*.[hc] target-*/cpu.h; do
    sed -i "s/CPUState/CPUArchState/g" $file
  done

All occurrences of CPUArchState are expected to be replaced by QOM CPUState,
once all targets are QOM'ified and common fields have been extracted.

Signed-off-by: Andreas Färber <afaerber@suse.de>
Reviewed-by: Anthony Liguori <aliguori@us.ibm.com>
This commit is contained in:
Andreas Färber 2012-03-14 01:38:32 +01:00
parent 5bfcb36ec4
commit 9349b4f9fd
59 changed files with 419 additions and 419 deletions

View File

@ -70,11 +70,11 @@ int cpu_get_pic_interrupt(CPUX86State *env)
#endif #endif
/* These are no-ops because we are not threadsafe. */ /* These are no-ops because we are not threadsafe. */
static inline void cpu_exec_start(CPUState *env) static inline void cpu_exec_start(CPUArchState *env)
{ {
} }
static inline void cpu_exec_end(CPUState *env) static inline void cpu_exec_end(CPUArchState *env)
{ {
} }
@ -713,7 +713,7 @@ static void usage(void)
exit(1); exit(1);
} }
THREAD CPUState *thread_env; THREAD CPUArchState *thread_env;
/* Assumes contents are already zeroed. */ /* Assumes contents are already zeroed. */
void init_task_state(TaskState *ts) void init_task_state(TaskState *ts)
@ -737,7 +737,7 @@ int main(int argc, char **argv)
struct target_pt_regs regs1, *regs = &regs1; struct target_pt_regs regs1, *regs = &regs1;
struct image_info info1, *info = &info1; struct image_info info1, *info = &info1;
TaskState ts1, *ts = &ts1; TaskState ts1, *ts = &ts1;
CPUState *env; CPUArchState *env;
int optind; int optind;
const char *r; const char *r;
int gdbstub_port = 0; int gdbstub_port = 0;

View File

@ -139,8 +139,8 @@ abi_long do_openbsd_syscall(void *cpu_env, int num, abi_long arg1,
abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg2, abi_long arg3, abi_long arg4,
abi_long arg5, abi_long arg6); abi_long arg5, abi_long arg6);
void gemu_log(const char *fmt, ...) GCC_FMT_ATTR(1, 2); void gemu_log(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
extern THREAD CPUState *thread_env; extern THREAD CPUArchState *thread_env;
void cpu_loop(CPUState *env); void cpu_loop(CPUArchState *env);
char *target_strerror(int err); char *target_strerror(int err);
int get_osversion(void); int get_osversion(void);
void fork_start(void); void fork_start(void);
@ -167,13 +167,13 @@ void print_openbsd_syscall_ret(int num, abi_long ret);
extern int do_strace; extern int do_strace;
/* signal.c */ /* signal.c */
void process_pending_signals(CPUState *cpu_env); void process_pending_signals(CPUArchState *cpu_env);
void signal_init(void); void signal_init(void);
//int queue_signal(CPUState *env, int sig, target_siginfo_t *info); //int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info);
//void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info); //void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info);
//void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo); //void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo);
long do_sigreturn(CPUState *env); long do_sigreturn(CPUArchState *env);
long do_rt_sigreturn(CPUState *env); long do_rt_sigreturn(CPUArchState *env);
abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp); abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp);
/* mmap.c */ /* mmap.c */

View File

@ -33,6 +33,6 @@ void signal_init(void)
{ {
} }
void process_pending_signals(CPUState *cpu_env) void process_pending_signals(CPUArchState *cpu_env)
{ {
} }

View File

@ -322,20 +322,20 @@ void page_set_flags(target_ulong start, target_ulong end, int flags);
int page_check_range(target_ulong start, target_ulong len, int flags); int page_check_range(target_ulong start, target_ulong len, int flags);
#endif #endif
CPUState *cpu_copy(CPUState *env); CPUArchState *cpu_copy(CPUArchState *env);
CPUState *qemu_get_cpu(int cpu); CPUArchState *qemu_get_cpu(int cpu);
#define CPU_DUMP_CODE 0x00010000 #define CPU_DUMP_CODE 0x00010000
void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf, void cpu_dump_state(CPUArchState *env, FILE *f, fprintf_function cpu_fprintf,
int flags); int flags);
void cpu_dump_statistics(CPUState *env, FILE *f, fprintf_function cpu_fprintf, void cpu_dump_statistics(CPUArchState *env, FILE *f, fprintf_function cpu_fprintf,
int flags); int flags);
void QEMU_NORETURN cpu_abort(CPUState *env, const char *fmt, ...) void QEMU_NORETURN cpu_abort(CPUArchState *env, const char *fmt, ...)
GCC_FMT_ATTR(2, 3); GCC_FMT_ATTR(2, 3);
extern CPUState *first_cpu; extern CPUArchState *first_cpu;
DECLARE_TLS(CPUState *,cpu_single_env); DECLARE_TLS(CPUArchState *,cpu_single_env);
#define cpu_single_env tls_var(cpu_single_env) #define cpu_single_env tls_var(cpu_single_env)
/* Flags for use in ENV->INTERRUPT_PENDING. /* Flags for use in ENV->INTERRUPT_PENDING.
@ -389,23 +389,23 @@ DECLARE_TLS(CPUState *,cpu_single_env);
| CPU_INTERRUPT_TGT_EXT_4) | CPU_INTERRUPT_TGT_EXT_4)
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
typedef void (*CPUInterruptHandler)(CPUState *, int); typedef void (*CPUInterruptHandler)(CPUArchState *, int);
extern CPUInterruptHandler cpu_interrupt_handler; extern CPUInterruptHandler cpu_interrupt_handler;
static inline void cpu_interrupt(CPUState *s, int mask) static inline void cpu_interrupt(CPUArchState *s, int mask)
{ {
cpu_interrupt_handler(s, mask); cpu_interrupt_handler(s, mask);
} }
#else /* USER_ONLY */ #else /* USER_ONLY */
void cpu_interrupt(CPUState *env, int mask); void cpu_interrupt(CPUArchState *env, int mask);
#endif /* USER_ONLY */ #endif /* USER_ONLY */
void cpu_reset_interrupt(CPUState *env, int mask); void cpu_reset_interrupt(CPUArchState *env, int mask);
void cpu_exit(CPUState *s); void cpu_exit(CPUArchState *s);
bool qemu_cpu_has_work(CPUState *env); bool qemu_cpu_has_work(CPUArchState *env);
/* Breakpoint/watchpoint flags */ /* Breakpoint/watchpoint flags */
#define BP_MEM_READ 0x01 #define BP_MEM_READ 0x01
@ -416,26 +416,26 @@ bool qemu_cpu_has_work(CPUState *env);
#define BP_GDB 0x10 #define BP_GDB 0x10
#define BP_CPU 0x20 #define BP_CPU 0x20
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags, int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
CPUBreakpoint **breakpoint); CPUBreakpoint **breakpoint);
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags); int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags);
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint); void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint);
void cpu_breakpoint_remove_all(CPUState *env, int mask); void cpu_breakpoint_remove_all(CPUArchState *env, int mask);
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
int flags, CPUWatchpoint **watchpoint); int flags, CPUWatchpoint **watchpoint);
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr,
target_ulong len, int flags); target_ulong len, int flags);
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint); void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint);
void cpu_watchpoint_remove_all(CPUState *env, int mask); void cpu_watchpoint_remove_all(CPUArchState *env, int mask);
#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */ #define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */ #define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */ #define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
void cpu_single_step(CPUState *env, int enabled); void cpu_single_step(CPUArchState *env, int enabled);
void cpu_state_reset(CPUState *s); void cpu_state_reset(CPUArchState *s);
int cpu_is_stopped(CPUState *env); int cpu_is_stopped(CPUArchState *env);
void run_on_cpu(CPUState *env, void (*func)(void *data), void *data); void run_on_cpu(CPUArchState *env, void (*func)(void *data), void *data);
#define CPU_LOG_TB_OUT_ASM (1 << 0) #define CPU_LOG_TB_OUT_ASM (1 << 0)
#define CPU_LOG_TB_IN_ASM (1 << 1) #define CPU_LOG_TB_IN_ASM (1 << 1)
@ -466,7 +466,7 @@ int cpu_str_to_log_mask(const char *str);
/* Return the physical page corresponding to a virtual one. Use it /* Return the physical page corresponding to a virtual one. Use it
only for debugging because no protection checks are done. Return -1 only for debugging because no protection checks are done. Return -1
if no page found. */ if no page found. */
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr); target_phys_addr_t cpu_get_phys_page_debug(CPUArchState *env, target_ulong addr);
/* memory API */ /* memory API */
@ -508,12 +508,12 @@ extern int mem_prealloc;
/* Set if TLB entry is an IO callback. */ /* Set if TLB entry is an IO callback. */
#define TLB_MMIO (1 << 5) #define TLB_MMIO (1 << 5)
void cpu_tlb_update_dirty(CPUState *env); void cpu_tlb_update_dirty(CPUArchState *env);
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf); void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
#endif /* !CONFIG_USER_ONLY */ #endif /* !CONFIG_USER_ONLY */
int cpu_memory_rw_debug(CPUState *env, target_ulong addr, int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
uint8_t *buf, int len, int is_write); uint8_t *buf, int len, int is_write);
#endif /* CPU_ALL_H */ #endif /* CPU_ALL_H */

View File

@ -202,7 +202,7 @@ typedef struct CPUWatchpoint {
jmp_buf jmp_env; \ jmp_buf jmp_env; \
int exception_index; \ int exception_index; \
\ \
CPUState *next_cpu; /* next CPU sharing TB cache */ \ CPUArchState *next_cpu; /* next CPU sharing TB cache */ \
int cpu_index; /* CPU index (informative) */ \ int cpu_index; /* CPU index (informative) */ \
uint32_t host_tid; /* host thread ID */ \ uint32_t host_tid; /* host thread ID */ \
int numa_node; /* NUMA node this cpu is belonging to */ \ int numa_node; /* NUMA node this cpu is belonging to */ \

View File

@ -26,12 +26,12 @@ int tb_invalidated_flag;
//#define CONFIG_DEBUG_EXEC //#define CONFIG_DEBUG_EXEC
bool qemu_cpu_has_work(CPUState *env) bool qemu_cpu_has_work(CPUArchState *env)
{ {
return cpu_has_work(env); return cpu_has_work(env);
} }
void cpu_loop_exit(CPUState *env) void cpu_loop_exit(CPUArchState *env)
{ {
env->current_tb = NULL; env->current_tb = NULL;
longjmp(env->jmp_env, 1); longjmp(env->jmp_env, 1);
@ -41,7 +41,7 @@ void cpu_loop_exit(CPUState *env)
restored in a state compatible with the CPU emulator restored in a state compatible with the CPU emulator
*/ */
#if defined(CONFIG_SOFTMMU) #if defined(CONFIG_SOFTMMU)
void cpu_resume_from_signal(CPUState *env, void *puc) void cpu_resume_from_signal(CPUArchState *env, void *puc)
{ {
/* XXX: restore cpu registers saved in host registers */ /* XXX: restore cpu registers saved in host registers */
@ -52,7 +52,7 @@ void cpu_resume_from_signal(CPUState *env, void *puc)
/* Execute the code without caching the generated code. An interpreter /* Execute the code without caching the generated code. An interpreter
could be used if available. */ could be used if available. */
static void cpu_exec_nocache(CPUState *env, int max_cycles, static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
TranslationBlock *orig_tb) TranslationBlock *orig_tb)
{ {
unsigned long next_tb; unsigned long next_tb;
@ -79,7 +79,7 @@ static void cpu_exec_nocache(CPUState *env, int max_cycles,
tb_free(tb); tb_free(tb);
} }
static TranslationBlock *tb_find_slow(CPUState *env, static TranslationBlock *tb_find_slow(CPUArchState *env,
target_ulong pc, target_ulong pc,
target_ulong cs_base, target_ulong cs_base,
uint64_t flags) uint64_t flags)
@ -135,7 +135,7 @@ static TranslationBlock *tb_find_slow(CPUState *env,
return tb; return tb;
} }
static inline TranslationBlock *tb_find_fast(CPUState *env) static inline TranslationBlock *tb_find_fast(CPUArchState *env)
{ {
TranslationBlock *tb; TranslationBlock *tb;
target_ulong cs_base, pc; target_ulong cs_base, pc;
@ -163,7 +163,7 @@ CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
return old_handler; return old_handler;
} }
static void cpu_handle_debug_exception(CPUState *env) static void cpu_handle_debug_exception(CPUArchState *env)
{ {
CPUWatchpoint *wp; CPUWatchpoint *wp;
@ -181,7 +181,7 @@ static void cpu_handle_debug_exception(CPUState *env)
volatile sig_atomic_t exit_request; volatile sig_atomic_t exit_request;
int cpu_exec(CPUState *env) int cpu_exec(CPUArchState *env)
{ {
int ret, interrupt_request; int ret, interrupt_request;
TranslationBlock *tb; TranslationBlock *tb;

76
cpus.c
View File

@ -58,7 +58,7 @@
#endif /* CONFIG_LINUX */ #endif /* CONFIG_LINUX */
static CPUState *next_cpu; static CPUArchState *next_cpu;
/***********************************************************/ /***********************************************************/
/* guest cycle counter */ /* guest cycle counter */
@ -89,7 +89,7 @@ TimersState timers_state;
int64_t cpu_get_icount(void) int64_t cpu_get_icount(void)
{ {
int64_t icount; int64_t icount;
CPUState *env = cpu_single_env; CPUArchState *env = cpu_single_env;
icount = qemu_icount; icount = qemu_icount;
if (env) { if (env) {
@ -339,7 +339,7 @@ void configure_icount(const char *option)
void hw_error(const char *fmt, ...) void hw_error(const char *fmt, ...)
{ {
va_list ap; va_list ap;
CPUState *env; CPUArchState *env;
va_start(ap, fmt); va_start(ap, fmt);
fprintf(stderr, "qemu: hardware error: "); fprintf(stderr, "qemu: hardware error: ");
@ -359,7 +359,7 @@ void hw_error(const char *fmt, ...)
void cpu_synchronize_all_states(void) void cpu_synchronize_all_states(void)
{ {
CPUState *cpu; CPUArchState *cpu;
for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) { for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
cpu_synchronize_state(cpu); cpu_synchronize_state(cpu);
@ -368,7 +368,7 @@ void cpu_synchronize_all_states(void)
void cpu_synchronize_all_post_reset(void) void cpu_synchronize_all_post_reset(void)
{ {
CPUState *cpu; CPUArchState *cpu;
for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) { for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
cpu_synchronize_post_reset(cpu); cpu_synchronize_post_reset(cpu);
@ -377,14 +377,14 @@ void cpu_synchronize_all_post_reset(void)
void cpu_synchronize_all_post_init(void) void cpu_synchronize_all_post_init(void)
{ {
CPUState *cpu; CPUArchState *cpu;
for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) { for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
cpu_synchronize_post_init(cpu); cpu_synchronize_post_init(cpu);
} }
} }
int cpu_is_stopped(CPUState *env) int cpu_is_stopped(CPUArchState *env)
{ {
return !runstate_is_running() || env->stopped; return !runstate_is_running() || env->stopped;
} }
@ -402,7 +402,7 @@ static void do_vm_stop(RunState state)
} }
} }
static int cpu_can_run(CPUState *env) static int cpu_can_run(CPUArchState *env)
{ {
if (env->stop) { if (env->stop) {
return 0; return 0;
@ -413,7 +413,7 @@ static int cpu_can_run(CPUState *env)
return 1; return 1;
} }
static bool cpu_thread_is_idle(CPUState *env) static bool cpu_thread_is_idle(CPUArchState *env)
{ {
if (env->stop || env->queued_work_first) { if (env->stop || env->queued_work_first) {
return false; return false;
@ -430,7 +430,7 @@ static bool cpu_thread_is_idle(CPUState *env)
bool all_cpu_threads_idle(void) bool all_cpu_threads_idle(void)
{ {
CPUState *env; CPUArchState *env;
for (env = first_cpu; env != NULL; env = env->next_cpu) { for (env = first_cpu; env != NULL; env = env->next_cpu) {
if (!cpu_thread_is_idle(env)) { if (!cpu_thread_is_idle(env)) {
@ -440,7 +440,7 @@ bool all_cpu_threads_idle(void)
return true; return true;
} }
static void cpu_handle_guest_debug(CPUState *env) static void cpu_handle_guest_debug(CPUArchState *env)
{ {
gdb_set_stop_cpu(env); gdb_set_stop_cpu(env);
qemu_system_debug_request(); qemu_system_debug_request();
@ -494,7 +494,7 @@ static void qemu_init_sigbus(void)
prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0); prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
} }
static void qemu_kvm_eat_signals(CPUState *env) static void qemu_kvm_eat_signals(CPUArchState *env)
{ {
struct timespec ts = { 0, 0 }; struct timespec ts = { 0, 0 };
siginfo_t siginfo; siginfo_t siginfo;
@ -537,7 +537,7 @@ static void qemu_init_sigbus(void)
{ {
} }
static void qemu_kvm_eat_signals(CPUState *env) static void qemu_kvm_eat_signals(CPUArchState *env)
{ {
} }
#endif /* !CONFIG_LINUX */ #endif /* !CONFIG_LINUX */
@ -547,7 +547,7 @@ static void dummy_signal(int sig)
{ {
} }
static void qemu_kvm_init_cpu_signals(CPUState *env) static void qemu_kvm_init_cpu_signals(CPUArchState *env)
{ {
int r; int r;
sigset_t set; sigset_t set;
@ -582,7 +582,7 @@ static void qemu_tcg_init_cpu_signals(void)
} }
#else /* _WIN32 */ #else /* _WIN32 */
static void qemu_kvm_init_cpu_signals(CPUState *env) static void qemu_kvm_init_cpu_signals(CPUArchState *env)
{ {
abort(); abort();
} }
@ -619,7 +619,7 @@ void qemu_init_cpu_loop(void)
qemu_thread_get_self(&io_thread); qemu_thread_get_self(&io_thread);
} }
void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) void run_on_cpu(CPUArchState *env, void (*func)(void *data), void *data)
{ {
struct qemu_work_item wi; struct qemu_work_item wi;
@ -641,14 +641,14 @@ void run_on_cpu(CPUState *env, void (*func)(void *data), void *data)
qemu_cpu_kick(env); qemu_cpu_kick(env);
while (!wi.done) { while (!wi.done) {
CPUState *self_env = cpu_single_env; CPUArchState *self_env = cpu_single_env;
qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex); qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
cpu_single_env = self_env; cpu_single_env = self_env;
} }
} }
static void flush_queued_work(CPUState *env) static void flush_queued_work(CPUArchState *env)
{ {
struct qemu_work_item *wi; struct qemu_work_item *wi;
@ -665,7 +665,7 @@ static void flush_queued_work(CPUState *env)
qemu_cond_broadcast(&qemu_work_cond); qemu_cond_broadcast(&qemu_work_cond);
} }
static void qemu_wait_io_event_common(CPUState *env) static void qemu_wait_io_event_common(CPUArchState *env)
{ {
if (env->stop) { if (env->stop) {
env->stop = 0; env->stop = 0;
@ -678,7 +678,7 @@ static void qemu_wait_io_event_common(CPUState *env)
static void qemu_tcg_wait_io_event(void) static void qemu_tcg_wait_io_event(void)
{ {
CPUState *env; CPUArchState *env;
while (all_cpu_threads_idle()) { while (all_cpu_threads_idle()) {
/* Start accounting real time to the virtual clock if the CPUs /* Start accounting real time to the virtual clock if the CPUs
@ -696,7 +696,7 @@ static void qemu_tcg_wait_io_event(void)
} }
} }
static void qemu_kvm_wait_io_event(CPUState *env) static void qemu_kvm_wait_io_event(CPUArchState *env)
{ {
while (cpu_thread_is_idle(env)) { while (cpu_thread_is_idle(env)) {
qemu_cond_wait(env->halt_cond, &qemu_global_mutex); qemu_cond_wait(env->halt_cond, &qemu_global_mutex);
@ -708,7 +708,7 @@ static void qemu_kvm_wait_io_event(CPUState *env)
static void *qemu_kvm_cpu_thread_fn(void *arg) static void *qemu_kvm_cpu_thread_fn(void *arg)
{ {
CPUState *env = arg; CPUArchState *env = arg;
int r; int r;
qemu_mutex_lock(&qemu_global_mutex); qemu_mutex_lock(&qemu_global_mutex);
@ -745,7 +745,7 @@ static void tcg_exec_all(void);
static void *qemu_tcg_cpu_thread_fn(void *arg) static void *qemu_tcg_cpu_thread_fn(void *arg)
{ {
CPUState *env = arg; CPUArchState *env = arg;
qemu_tcg_init_cpu_signals(); qemu_tcg_init_cpu_signals();
qemu_thread_get_self(env->thread); qemu_thread_get_self(env->thread);
@ -779,7 +779,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
return NULL; return NULL;
} }
static void qemu_cpu_kick_thread(CPUState *env) static void qemu_cpu_kick_thread(CPUArchState *env)
{ {
#ifndef _WIN32 #ifndef _WIN32
int err; int err;
@ -800,7 +800,7 @@ static void qemu_cpu_kick_thread(CPUState *env)
void qemu_cpu_kick(void *_env) void qemu_cpu_kick(void *_env)
{ {
CPUState *env = _env; CPUArchState *env = _env;
qemu_cond_broadcast(env->halt_cond); qemu_cond_broadcast(env->halt_cond);
if (kvm_enabled() && !env->thread_kicked) { if (kvm_enabled() && !env->thread_kicked) {
@ -825,7 +825,7 @@ void qemu_cpu_kick_self(void)
int qemu_cpu_is_self(void *_env) int qemu_cpu_is_self(void *_env)
{ {
CPUState *env = _env; CPUArchState *env = _env;
return qemu_thread_is_self(env->thread); return qemu_thread_is_self(env->thread);
} }
@ -852,7 +852,7 @@ void qemu_mutex_unlock_iothread(void)
static int all_vcpus_paused(void) static int all_vcpus_paused(void)
{ {
CPUState *penv = first_cpu; CPUArchState *penv = first_cpu;
while (penv) { while (penv) {
if (!penv->stopped) { if (!penv->stopped) {
@ -866,7 +866,7 @@ static int all_vcpus_paused(void)
void pause_all_vcpus(void) void pause_all_vcpus(void)
{ {
CPUState *penv = first_cpu; CPUArchState *penv = first_cpu;
qemu_clock_enable(vm_clock, false); qemu_clock_enable(vm_clock, false);
while (penv) { while (penv) {
@ -899,7 +899,7 @@ void pause_all_vcpus(void)
void resume_all_vcpus(void) void resume_all_vcpus(void)
{ {
CPUState *penv = first_cpu; CPUArchState *penv = first_cpu;
qemu_clock_enable(vm_clock, true); qemu_clock_enable(vm_clock, true);
while (penv) { while (penv) {
@ -912,7 +912,7 @@ void resume_all_vcpus(void)
static void qemu_tcg_init_vcpu(void *_env) static void qemu_tcg_init_vcpu(void *_env)
{ {
CPUState *env = _env; CPUArchState *env = _env;
/* share a single thread for all cpus with TCG */ /* share a single thread for all cpus with TCG */
if (!tcg_cpu_thread) { if (!tcg_cpu_thread) {
@ -935,7 +935,7 @@ static void qemu_tcg_init_vcpu(void *_env)
} }
} }
static void qemu_kvm_start_vcpu(CPUState *env) static void qemu_kvm_start_vcpu(CPUArchState *env)
{ {
env->thread = g_malloc0(sizeof(QemuThread)); env->thread = g_malloc0(sizeof(QemuThread));
env->halt_cond = g_malloc0(sizeof(QemuCond)); env->halt_cond = g_malloc0(sizeof(QemuCond));
@ -949,7 +949,7 @@ static void qemu_kvm_start_vcpu(CPUState *env)
void qemu_init_vcpu(void *_env) void qemu_init_vcpu(void *_env)
{ {
CPUState *env = _env; CPUArchState *env = _env;
env->nr_cores = smp_cores; env->nr_cores = smp_cores;
env->nr_threads = smp_threads; env->nr_threads = smp_threads;
@ -996,7 +996,7 @@ void vm_stop_force_state(RunState state)
} }
} }
static int tcg_cpu_exec(CPUState *env) static int tcg_cpu_exec(CPUArchState *env)
{ {
int ret; int ret;
#ifdef CONFIG_PROFILER #ifdef CONFIG_PROFILER
@ -1045,7 +1045,7 @@ static void tcg_exec_all(void)
next_cpu = first_cpu; next_cpu = first_cpu;
} }
for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) {
CPUState *env = next_cpu; CPUArchState *env = next_cpu;
qemu_clock_enable(vm_clock, qemu_clock_enable(vm_clock,
(env->singlestep_enabled & SSTEP_NOTIMER) == 0); (env->singlestep_enabled & SSTEP_NOTIMER) == 0);
@ -1065,7 +1065,7 @@ static void tcg_exec_all(void)
void set_numa_modes(void) void set_numa_modes(void)
{ {
CPUState *env; CPUArchState *env;
int i; int i;
for (env = first_cpu; env != NULL; env = env->next_cpu) { for (env = first_cpu; env != NULL; env = env->next_cpu) {
@ -1111,7 +1111,7 @@ void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
CpuInfoList *qmp_query_cpus(Error **errp) CpuInfoList *qmp_query_cpus(Error **errp)
{ {
CpuInfoList *head = NULL, *cur_item = NULL; CpuInfoList *head = NULL, *cur_item = NULL;
CPUState *env; CPUArchState *env;
for(env = first_cpu; env != NULL; env = env->next_cpu) { for(env = first_cpu; env != NULL; env = env->next_cpu) {
CpuInfoList *info; CpuInfoList *info;
@ -1157,7 +1157,7 @@ void qmp_memsave(int64_t addr, int64_t size, const char *filename,
{ {
FILE *f; FILE *f;
uint32_t l; uint32_t l;
CPUState *env; CPUArchState *env;
uint8_t buf[1024]; uint8_t buf[1024];
if (!has_cpu) { if (!has_cpu) {
@ -1232,7 +1232,7 @@ exit:
void qmp_inject_nmi(Error **errp) void qmp_inject_nmi(Error **errp)
{ {
#if defined(TARGET_I386) #if defined(TARGET_I386)
CPUState *env; CPUArchState *env;
for (env = first_cpu; env != NULL; env = env->next_cpu) { for (env = first_cpu; env != NULL; env = env->next_cpu) {
if (!env->apic_state) { if (!env->apic_state) {

View File

@ -71,7 +71,7 @@ void gemu_log(const char *fmt, ...)
va_end(ap); va_end(ap);
} }
int cpu_get_pic_interrupt(CPUState *env) int cpu_get_pic_interrupt(CPUArchState *env)
{ {
return -1; return -1;
} }
@ -729,7 +729,7 @@ static void usage(void)
} }
/* XXX: currently only used for async signals (see signal.c) */ /* XXX: currently only used for async signals (see signal.c) */
CPUState *global_env; CPUArchState *global_env;
/* used to free thread contexts */ /* used to free thread contexts */
TaskState *first_task_state; TaskState *first_task_state;
@ -741,7 +741,7 @@ int main(int argc, char **argv)
const char *log_mask = NULL; const char *log_mask = NULL;
struct target_pt_regs regs1, *regs = &regs1; struct target_pt_regs regs1, *regs = &regs1;
TaskState ts1, *ts = &ts1; TaskState ts1, *ts = &ts1;
CPUState *env; CPUArchState *env;
int optind; int optind;
short use_gdbstub = 0; short use_gdbstub = 0;
const char *r; const char *r;

View File

@ -104,8 +104,8 @@ void qerror(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
void write_dt(void *ptr, unsigned long addr, unsigned long limit, int flags); void write_dt(void *ptr, unsigned long addr, unsigned long limit, int flags);
extern CPUState *global_env; extern CPUArchState *global_env;
void cpu_loop(CPUState *env); void cpu_loop(CPUArchState *env);
void init_paths(const char *prefix); void init_paths(const char *prefix);
const char *path(const char *pathname); const char *path(const char *pathname);
@ -122,7 +122,7 @@ void signal_init(void);
int queue_signal(int sig, target_siginfo_t *info); int queue_signal(int sig, target_siginfo_t *info);
void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info); void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info);
void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo); void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo);
long do_sigreturn(CPUState *env, int num); long do_sigreturn(CPUArchState *env, int num);
/* machload.c */ /* machload.c */
int mach_exec(const char * filename, char ** argv, char ** envp, int mach_exec(const char * filename, char ** argv, char ** envp,

View File

@ -377,12 +377,12 @@ long do_sigreturn(CPUX86State *env, int num)
#else #else
static void setup_frame(int sig, struct emulated_sigaction *ka, static void setup_frame(int sig, struct emulated_sigaction *ka,
void *set, CPUState *env) void *set, CPUArchState *env)
{ {
fprintf(stderr, "setup_frame: not implemented\n"); fprintf(stderr, "setup_frame: not implemented\n");
} }
long do_sigreturn(CPUState *env, int num) long do_sigreturn(CPUArchState *env, int num)
{ {
int i = 0; int i = 0;
struct target_sigcontext *scp = get_int_arg(&i, env); struct target_sigcontext *scp = get_int_arg(&i, env);

View File

@ -52,7 +52,7 @@
#define dh_ctype_tl target_ulong #define dh_ctype_tl target_ulong
#define dh_ctype_ptr void * #define dh_ctype_ptr void *
#define dh_ctype_void void #define dh_ctype_void void
#define dh_ctype_env CPUState * #define dh_ctype_env CPUArchState *
#define dh_ctype(t) dh_ctype_##t #define dh_ctype(t) dh_ctype_##t
/* We can't use glue() here because it falls foul of C preprocessor /* We can't use glue() here because it falls foul of C preprocessor

View File

@ -339,7 +339,7 @@ const char *lookup_symbol(target_ulong orig_addr)
#include "monitor.h" #include "monitor.h"
static int monitor_disas_is_physical; static int monitor_disas_is_physical;
static CPUState *monitor_disas_env; static CPUArchState *monitor_disas_env;
static int static int
monitor_read_memory (bfd_vma memaddr, bfd_byte *myaddr, int length, monitor_read_memory (bfd_vma memaddr, bfd_byte *myaddr, int length,
@ -363,7 +363,7 @@ monitor_fprintf(FILE *stream, const char *fmt, ...)
return 0; return 0;
} }
void monitor_disas(Monitor *mon, CPUState *env, void monitor_disas(Monitor *mon, CPUArchState *env,
target_ulong pc, int nb_insn, int is_physical, int flags) target_ulong pc, int nb_insn, int is_physical, int flags)
{ {
int count, i; int count, i;

View File

@ -8,7 +8,7 @@
void disas(FILE *out, void *code, unsigned long size); void disas(FILE *out, void *code, unsigned long size);
void target_disas(FILE *out, target_ulong code, target_ulong size, int flags); void target_disas(FILE *out, target_ulong code, target_ulong size, int flags);
void monitor_disas(Monitor *mon, CPUState *env, void monitor_disas(Monitor *mon, CPUArchState *env,
target_ulong pc, int nb_insn, int is_physical, int flags); target_ulong pc, int nb_insn, int is_physical, int flags);
/* Look up symbol for debugging purpose. Returns "" if unknown. */ /* Look up symbol for debugging purpose. Returns "" if unknown. */

View File

@ -61,10 +61,10 @@
#endif #endif
#if defined(AREG0) #if defined(AREG0)
register CPUState *env asm(AREG0); register CPUArchState *env asm(AREG0);
#else #else
/* TODO: Try env = cpu_single_env. */ /* TODO: Try env = cpu_single_env. */
extern CPUState *env; extern CPUArchState *env;
#endif #endif
#endif /* !defined(__DYNGEN_EXEC_H__) */ #endif /* !defined(__DYNGEN_EXEC_H__) */

View File

@ -76,30 +76,30 @@ extern uint16_t gen_opc_icount[OPC_BUF_SIZE];
#include "qemu-log.h" #include "qemu-log.h"
void gen_intermediate_code(CPUState *env, struct TranslationBlock *tb); void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
void gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb); void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb);
void restore_state_to_opc(CPUState *env, struct TranslationBlock *tb, void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
int pc_pos); int pc_pos);
void cpu_gen_init(void); void cpu_gen_init(void);
int cpu_gen_code(CPUState *env, struct TranslationBlock *tb, int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
int *gen_code_size_ptr); int *gen_code_size_ptr);
int cpu_restore_state(struct TranslationBlock *tb, int cpu_restore_state(struct TranslationBlock *tb,
CPUState *env, unsigned long searched_pc); CPUArchState *env, unsigned long searched_pc);
void cpu_resume_from_signal(CPUState *env1, void *puc); void cpu_resume_from_signal(CPUArchState *env1, void *puc);
void cpu_io_recompile(CPUState *env, void *retaddr); void cpu_io_recompile(CPUArchState *env, void *retaddr);
TranslationBlock *tb_gen_code(CPUState *env, TranslationBlock *tb_gen_code(CPUArchState *env,
target_ulong pc, target_ulong cs_base, int flags, target_ulong pc, target_ulong cs_base, int flags,
int cflags); int cflags);
void cpu_exec_init(CPUState *env); void cpu_exec_init(CPUArchState *env);
void QEMU_NORETURN cpu_loop_exit(CPUState *env1); void QEMU_NORETURN cpu_loop_exit(CPUArchState *env1);
int page_unprotect(target_ulong address, unsigned long pc, void *puc); int page_unprotect(target_ulong address, unsigned long pc, void *puc);
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access); int is_cpu_write_access);
void tlb_flush_page(CPUState *env, target_ulong addr); void tlb_flush_page(CPUArchState *env, target_ulong addr);
void tlb_flush(CPUState *env, int flush_global); void tlb_flush(CPUArchState *env, int flush_global);
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
void tlb_set_page(CPUState *env, target_ulong vaddr, void tlb_set_page(CPUArchState *env, target_ulong vaddr,
target_phys_addr_t paddr, int prot, target_phys_addr_t paddr, int prot,
int mmu_idx, target_ulong size); int mmu_idx, target_ulong size);
#endif #endif
@ -182,7 +182,7 @@ static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
} }
void tb_free(TranslationBlock *tb); void tb_free(TranslationBlock *tb);
void tb_flush(CPUState *env); void tb_flush(CPUArchState *env);
void tb_link_page(TranslationBlock *tb, void tb_link_page(TranslationBlock *tb,
tb_page_addr_t phys_pc, tb_page_addr_t phys_page2); tb_page_addr_t phys_pc, tb_page_addr_t phys_page2);
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
@ -305,7 +305,7 @@ uint64_t io_mem_read(struct MemoryRegion *mr, target_phys_addr_t addr,
void io_mem_write(struct MemoryRegion *mr, target_phys_addr_t addr, void io_mem_write(struct MemoryRegion *mr, target_phys_addr_t addr,
uint64_t value, unsigned size); uint64_t value, unsigned size);
void tlb_fill(CPUState *env1, target_ulong addr, int is_write, int mmu_idx, void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx,
void *retaddr); void *retaddr);
#include "softmmu_defs.h" #include "softmmu_defs.h"
@ -333,15 +333,15 @@ void tlb_fill(CPUState *env1, target_ulong addr, int is_write, int mmu_idx,
#endif #endif
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
static inline tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr) static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
{ {
return addr; return addr;
} }
#else #else
tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr); tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
#endif #endif
typedef void (CPUDebugExcpHandler)(CPUState *env); typedef void (CPUDebugExcpHandler)(CPUArchState *env);
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler); CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
@ -353,7 +353,7 @@ extern volatile sig_atomic_t exit_request;
/* Deterministic execution requires that IO only be performed on the last /* Deterministic execution requires that IO only be performed on the last
instruction of a TB so that interrupts take effect immediately. */ instruction of a TB so that interrupts take effect immediately. */
static inline int can_do_io(CPUState *env) static inline int can_do_io(CPUArchState *env)
{ {
if (!use_icount) { if (!use_icount) {
return 1; return 1;

114
exec.c
View File

@ -123,10 +123,10 @@ static MemoryRegion io_mem_subpage_ram;
#endif #endif
CPUState *first_cpu; CPUArchState *first_cpu;
/* current CPU in the current thread. It is only valid inside /* current CPU in the current thread. It is only valid inside
cpu_exec() */ cpu_exec() */
DEFINE_TLS(CPUState *,cpu_single_env); DEFINE_TLS(CPUArchState *,cpu_single_env);
/* 0 = Do not count executed instructions. /* 0 = Do not count executed instructions.
1 = Precise instruction counting. 1 = Precise instruction counting.
2 = Adaptive rate instruction counting. */ 2 = Adaptive rate instruction counting. */
@ -509,7 +509,7 @@ static target_phys_addr_t section_addr(MemoryRegionSection *section,
} }
static void tlb_protect_code(ram_addr_t ram_addr); static void tlb_protect_code(ram_addr_t ram_addr);
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, static void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
target_ulong vaddr); target_ulong vaddr);
#define mmap_lock() do { } while(0) #define mmap_lock() do { } while(0)
#define mmap_unlock() do { } while(0) #define mmap_unlock() do { } while(0)
@ -661,7 +661,7 @@ void cpu_exec_init_all(void)
static int cpu_common_post_load(void *opaque, int version_id) static int cpu_common_post_load(void *opaque, int version_id)
{ {
CPUState *env = opaque; CPUArchState *env = opaque;
/* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
version_id is increased. */ version_id is increased. */
@ -678,16 +678,16 @@ static const VMStateDescription vmstate_cpu_common = {
.minimum_version_id_old = 1, .minimum_version_id_old = 1,
.post_load = cpu_common_post_load, .post_load = cpu_common_post_load,
.fields = (VMStateField []) { .fields = (VMStateField []) {
VMSTATE_UINT32(halted, CPUState), VMSTATE_UINT32(halted, CPUArchState),
VMSTATE_UINT32(interrupt_request, CPUState), VMSTATE_UINT32(interrupt_request, CPUArchState),
VMSTATE_END_OF_LIST() VMSTATE_END_OF_LIST()
} }
}; };
#endif #endif
CPUState *qemu_get_cpu(int cpu) CPUArchState *qemu_get_cpu(int cpu)
{ {
CPUState *env = first_cpu; CPUArchState *env = first_cpu;
while (env) { while (env) {
if (env->cpu_index == cpu) if (env->cpu_index == cpu)
@ -698,9 +698,9 @@ CPUState *qemu_get_cpu(int cpu)
return env; return env;
} }
void cpu_exec_init(CPUState *env) void cpu_exec_init(CPUArchState *env)
{ {
CPUState **penv; CPUArchState **penv;
int cpu_index; int cpu_index;
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
@ -799,9 +799,9 @@ static void page_flush_tb(void)
/* flush all the translation blocks */ /* flush all the translation blocks */
/* XXX: tb_flush is currently not thread safe */ /* XXX: tb_flush is currently not thread safe */
void tb_flush(CPUState *env1) void tb_flush(CPUArchState *env1)
{ {
CPUState *env; CPUArchState *env;
#if defined(DEBUG_FLUSH) #if defined(DEBUG_FLUSH)
printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n", printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
(unsigned long)(code_gen_ptr - code_gen_buffer), (unsigned long)(code_gen_ptr - code_gen_buffer),
@ -934,7 +934,7 @@ static inline void tb_reset_jump(TranslationBlock *tb, int n)
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
{ {
CPUState *env; CPUArchState *env;
PageDesc *p; PageDesc *p;
unsigned int h, n1; unsigned int h, n1;
tb_page_addr_t phys_pc; tb_page_addr_t phys_pc;
@ -1043,7 +1043,7 @@ static void build_page_bitmap(PageDesc *p)
} }
} }
TranslationBlock *tb_gen_code(CPUState *env, TranslationBlock *tb_gen_code(CPUArchState *env,
target_ulong pc, target_ulong cs_base, target_ulong pc, target_ulong cs_base,
int flags, int cflags) int flags, int cflags)
{ {
@ -1090,7 +1090,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access) int is_cpu_write_access)
{ {
TranslationBlock *tb, *tb_next, *saved_tb; TranslationBlock *tb, *tb_next, *saved_tb;
CPUState *env = cpu_single_env; CPUArchState *env = cpu_single_env;
tb_page_addr_t tb_start, tb_end; tb_page_addr_t tb_start, tb_end;
PageDesc *p; PageDesc *p;
int n; int n;
@ -1227,7 +1227,7 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr,
int n; int n;
#ifdef TARGET_HAS_PRECISE_SMC #ifdef TARGET_HAS_PRECISE_SMC
TranslationBlock *current_tb = NULL; TranslationBlock *current_tb = NULL;
CPUState *env = cpu_single_env; CPUArchState *env = cpu_single_env;
int current_tb_modified = 0; int current_tb_modified = 0;
target_ulong current_pc = 0; target_ulong current_pc = 0;
target_ulong current_cs_base = 0; target_ulong current_cs_base = 0;
@ -1457,12 +1457,12 @@ static void tb_reset_jump_recursive(TranslationBlock *tb)
#if defined(TARGET_HAS_ICE) #if defined(TARGET_HAS_ICE)
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
static void breakpoint_invalidate(CPUState *env, target_ulong pc) static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
{ {
tb_invalidate_phys_page_range(pc, pc + 1, 0); tb_invalidate_phys_page_range(pc, pc + 1, 0);
} }
#else #else
static void breakpoint_invalidate(CPUState *env, target_ulong pc) static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
{ {
target_phys_addr_t addr; target_phys_addr_t addr;
ram_addr_t ram_addr; ram_addr_t ram_addr;
@ -1482,19 +1482,19 @@ static void breakpoint_invalidate(CPUState *env, target_ulong pc)
#endif /* TARGET_HAS_ICE */ #endif /* TARGET_HAS_ICE */
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
void cpu_watchpoint_remove_all(CPUState *env, int mask) void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
{ {
} }
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
int flags, CPUWatchpoint **watchpoint) int flags, CPUWatchpoint **watchpoint)
{ {
return -ENOSYS; return -ENOSYS;
} }
#else #else
/* Add a watchpoint. */ /* Add a watchpoint. */
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
int flags, CPUWatchpoint **watchpoint) int flags, CPUWatchpoint **watchpoint)
{ {
target_ulong len_mask = ~(len - 1); target_ulong len_mask = ~(len - 1);
@ -1527,7 +1527,7 @@ int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
} }
/* Remove a specific watchpoint. */ /* Remove a specific watchpoint. */
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len, int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
int flags) int flags)
{ {
target_ulong len_mask = ~(len - 1); target_ulong len_mask = ~(len - 1);
@ -1544,7 +1544,7 @@ int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
} }
/* Remove a specific watchpoint by reference. */ /* Remove a specific watchpoint by reference. */
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint) void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
{ {
QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry); QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
@ -1554,7 +1554,7 @@ void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
} }
/* Remove all matching watchpoints. */ /* Remove all matching watchpoints. */
void cpu_watchpoint_remove_all(CPUState *env, int mask) void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
{ {
CPUWatchpoint *wp, *next; CPUWatchpoint *wp, *next;
@ -1566,7 +1566,7 @@ void cpu_watchpoint_remove_all(CPUState *env, int mask)
#endif #endif
/* Add a breakpoint. */ /* Add a breakpoint. */
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags, int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
CPUBreakpoint **breakpoint) CPUBreakpoint **breakpoint)
{ {
#if defined(TARGET_HAS_ICE) #if defined(TARGET_HAS_ICE)
@ -1594,7 +1594,7 @@ int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
} }
/* Remove a specific breakpoint. */ /* Remove a specific breakpoint. */
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags) int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
{ {
#if defined(TARGET_HAS_ICE) #if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp; CPUBreakpoint *bp;
@ -1612,7 +1612,7 @@ int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
} }
/* Remove a specific breakpoint by reference. */ /* Remove a specific breakpoint by reference. */
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint) void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
{ {
#if defined(TARGET_HAS_ICE) #if defined(TARGET_HAS_ICE)
QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry); QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
@ -1624,7 +1624,7 @@ void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
} }
/* Remove all matching breakpoints. */ /* Remove all matching breakpoints. */
void cpu_breakpoint_remove_all(CPUState *env, int mask) void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
{ {
#if defined(TARGET_HAS_ICE) #if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp, *next; CPUBreakpoint *bp, *next;
@ -1638,7 +1638,7 @@ void cpu_breakpoint_remove_all(CPUState *env, int mask)
/* enable or disable single step mode. EXCP_DEBUG is returned by the /* enable or disable single step mode. EXCP_DEBUG is returned by the
CPU loop after each instruction */ CPU loop after each instruction */
void cpu_single_step(CPUState *env, int enabled) void cpu_single_step(CPUArchState *env, int enabled)
{ {
#if defined(TARGET_HAS_ICE) #if defined(TARGET_HAS_ICE)
if (env->singlestep_enabled != enabled) { if (env->singlestep_enabled != enabled) {
@ -1694,7 +1694,7 @@ void cpu_set_log_filename(const char *filename)
cpu_set_log(loglevel); cpu_set_log(loglevel);
} }
static void cpu_unlink_tb(CPUState *env) static void cpu_unlink_tb(CPUArchState *env)
{ {
/* FIXME: TB unchaining isn't SMP safe. For now just ignore the /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
problem and hope the cpu will stop of its own accord. For userspace problem and hope the cpu will stop of its own accord. For userspace
@ -1716,7 +1716,7 @@ static void cpu_unlink_tb(CPUState *env)
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
/* mask must never be zero, except for A20 change call */ /* mask must never be zero, except for A20 change call */
static void tcg_handle_interrupt(CPUState *env, int mask) static void tcg_handle_interrupt(CPUArchState *env, int mask)
{ {
int old_mask; int old_mask;
@ -1747,19 +1747,19 @@ CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
#else /* CONFIG_USER_ONLY */ #else /* CONFIG_USER_ONLY */
void cpu_interrupt(CPUState *env, int mask) void cpu_interrupt(CPUArchState *env, int mask)
{ {
env->interrupt_request |= mask; env->interrupt_request |= mask;
cpu_unlink_tb(env); cpu_unlink_tb(env);
} }
#endif /* CONFIG_USER_ONLY */ #endif /* CONFIG_USER_ONLY */
void cpu_reset_interrupt(CPUState *env, int mask) void cpu_reset_interrupt(CPUArchState *env, int mask)
{ {
env->interrupt_request &= ~mask; env->interrupt_request &= ~mask;
} }
void cpu_exit(CPUState *env) void cpu_exit(CPUArchState *env)
{ {
env->exit_request = 1; env->exit_request = 1;
cpu_unlink_tb(env); cpu_unlink_tb(env);
@ -1837,7 +1837,7 @@ int cpu_str_to_log_mask(const char *str)
return mask; return mask;
} }
void cpu_abort(CPUState *env, const char *fmt, ...) void cpu_abort(CPUArchState *env, const char *fmt, ...)
{ {
va_list ap; va_list ap;
va_list ap2; va_list ap2;
@ -1877,17 +1877,17 @@ void cpu_abort(CPUState *env, const char *fmt, ...)
abort(); abort();
} }
CPUState *cpu_copy(CPUState *env) CPUArchState *cpu_copy(CPUArchState *env)
{ {
CPUState *new_env = cpu_init(env->cpu_model_str); CPUArchState *new_env = cpu_init(env->cpu_model_str);
CPUState *next_cpu = new_env->next_cpu; CPUArchState *next_cpu = new_env->next_cpu;
int cpu_index = new_env->cpu_index; int cpu_index = new_env->cpu_index;
#if defined(TARGET_HAS_ICE) #if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp; CPUBreakpoint *bp;
CPUWatchpoint *wp; CPUWatchpoint *wp;
#endif #endif
memcpy(new_env, env, sizeof(CPUState)); memcpy(new_env, env, sizeof(CPUArchState));
/* Preserve chaining and index. */ /* Preserve chaining and index. */
new_env->next_cpu = next_cpu; new_env->next_cpu = next_cpu;
@ -1913,7 +1913,7 @@ CPUState *cpu_copy(CPUState *env)
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr) static inline void tlb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
{ {
unsigned int i; unsigned int i;
@ -1947,7 +1947,7 @@ static CPUTLBEntry s_cputlb_empty_entry = {
* entries from the TLB at any time, so flushing more entries than * entries from the TLB at any time, so flushing more entries than
* required is only an efficiency issue, not a correctness issue. * required is only an efficiency issue, not a correctness issue.
*/ */
void tlb_flush(CPUState *env, int flush_global) void tlb_flush(CPUArchState *env, int flush_global)
{ {
int i; int i;
@ -1984,7 +1984,7 @@ static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
} }
} }
void tlb_flush_page(CPUState *env, target_ulong addr) void tlb_flush_page(CPUArchState *env, target_ulong addr)
{ {
int i; int i;
int mmu_idx; int mmu_idx;
@ -2025,7 +2025,7 @@ static void tlb_protect_code(ram_addr_t ram_addr)
/* update the TLB so that writes in physical page 'phys_addr' are no longer /* update the TLB so that writes in physical page 'phys_addr' are no longer
tested for self modifying code */ tested for self modifying code */
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, static void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
target_ulong vaddr) target_ulong vaddr)
{ {
cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG); cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
@ -2047,7 +2047,7 @@ static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
int dirty_flags) int dirty_flags)
{ {
CPUState *env; CPUArchState *env;
unsigned long length, start1; unsigned long length, start1;
int i; int i;
@ -2102,7 +2102,7 @@ static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
} }
/* update the TLB according to the current state of the dirty bits */ /* update the TLB according to the current state of the dirty bits */
void cpu_tlb_update_dirty(CPUState *env) void cpu_tlb_update_dirty(CPUArchState *env)
{ {
int i; int i;
int mmu_idx; int mmu_idx;
@ -2120,7 +2120,7 @@ static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
/* update the TLB corresponding to virtual page vaddr /* update the TLB corresponding to virtual page vaddr
so that it is no longer dirty */ so that it is no longer dirty */
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr) static inline void tlb_set_dirty(CPUArchState *env, target_ulong vaddr)
{ {
int i; int i;
int mmu_idx; int mmu_idx;
@ -2133,7 +2133,7 @@ static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
/* Our TLB does not support large pages, so remember the area covered by /* Our TLB does not support large pages, so remember the area covered by
large pages and trigger a full TLB flush if these are invalidated. */ large pages and trigger a full TLB flush if these are invalidated. */
static void tlb_add_large_page(CPUState *env, target_ulong vaddr, static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
target_ulong size) target_ulong size)
{ {
target_ulong mask = ~(size - 1); target_ulong mask = ~(size - 1);
@ -2174,7 +2174,7 @@ static bool is_ram_rom_romd(MemoryRegionSection *s)
/* Add a new TLB entry. At most one entry for a given virtual address /* Add a new TLB entry. At most one entry for a given virtual address
is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
supplied size is only used by tlb_flush_page. */ supplied size is only used by tlb_flush_page. */
void tlb_set_page(CPUState *env, target_ulong vaddr, void tlb_set_page(CPUArchState *env, target_ulong vaddr,
target_phys_addr_t paddr, int prot, target_phys_addr_t paddr, int prot,
int mmu_idx, target_ulong size) int mmu_idx, target_ulong size)
{ {
@ -2277,11 +2277,11 @@ void tlb_set_page(CPUState *env, target_ulong vaddr,
#else #else
void tlb_flush(CPUState *env, int flush_global) void tlb_flush(CPUArchState *env, int flush_global)
{ {
} }
void tlb_flush_page(CPUState *env, target_ulong addr) void tlb_flush_page(CPUArchState *env, target_ulong addr)
{ {
} }
@ -2542,7 +2542,7 @@ int page_unprotect(target_ulong address, unsigned long pc, void *puc)
return 0; return 0;
} }
static inline void tlb_set_dirty(CPUState *env, static inline void tlb_set_dirty(CPUArchState *env,
unsigned long addr, target_ulong vaddr) unsigned long addr, target_ulong vaddr)
{ {
} }
@ -3299,7 +3299,7 @@ static const MemoryRegionOps notdirty_mem_ops = {
/* Generate a debug exception if a watchpoint has been hit. */ /* Generate a debug exception if a watchpoint has been hit. */
static void check_watchpoint(int offset, int len_mask, int flags) static void check_watchpoint(int offset, int len_mask, int flags)
{ {
CPUState *env = cpu_single_env; CPUArchState *env = cpu_single_env;
target_ulong pc, cs_base; target_ulong pc, cs_base;
TranslationBlock *tb; TranslationBlock *tb;
target_ulong vaddr; target_ulong vaddr;
@ -3544,7 +3544,7 @@ static void core_begin(MemoryListener *listener)
static void core_commit(MemoryListener *listener) static void core_commit(MemoryListener *listener)
{ {
CPUState *env; CPUArchState *env;
/* since each CPU stores ram addresses in its TLB cache, we must /* since each CPU stores ram addresses in its TLB cache, we must
reset the modified entries */ reset the modified entries */
@ -3734,7 +3734,7 @@ MemoryRegion *get_system_io(void)
/* physical memory access (slow version, mainly for debug) */ /* physical memory access (slow version, mainly for debug) */
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
int cpu_memory_rw_debug(CPUState *env, target_ulong addr, int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
uint8_t *buf, int len, int is_write) uint8_t *buf, int len, int is_write)
{ {
int l, flags; int l, flags;
@ -4440,7 +4440,7 @@ void stq_be_phys(target_phys_addr_t addr, uint64_t val)
} }
/* virtual memory access for debug (includes writing to ROM) */ /* virtual memory access for debug (includes writing to ROM) */
int cpu_memory_rw_debug(CPUState *env, target_ulong addr, int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
uint8_t *buf, int len, int is_write) uint8_t *buf, int len, int is_write)
{ {
int l; int l;
@ -4471,7 +4471,7 @@ int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
/* in deterministic execution mode, instructions doing device I/Os /* in deterministic execution mode, instructions doing device I/Os
must be at the end of the TB */ must be at the end of the TB */
void cpu_io_recompile(CPUState *env, void *retaddr) void cpu_io_recompile(CPUArchState *env, void *retaddr)
{ {
TranslationBlock *tb; TranslationBlock *tb;
uint32_t n, cflags; uint32_t n, cflags;
@ -4585,7 +4585,7 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
/* NOTE: this function can trigger an exception */ /* NOTE: this function can trigger an exception */
/* NOTE2: the returned address is not exactly the physical address: it /* NOTE2: the returned address is not exactly the physical address: it
is the offset relative to phys_ram_base */ is the offset relative to phys_ram_base */
tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr) tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
{ {
int mmu_idx, page_index, pd; int mmu_idx, page_index, pd;
void *p; void *p;

View File

@ -42,7 +42,7 @@
#include "kvm.h" #include "kvm.h"
#ifndef TARGET_CPU_MEMORY_RW_DEBUG #ifndef TARGET_CPU_MEMORY_RW_DEBUG
static inline int target_memory_rw_debug(CPUState *env, target_ulong addr, static inline int target_memory_rw_debug(CPUArchState *env, target_ulong addr,
uint8_t *buf, int len, int is_write) uint8_t *buf, int len, int is_write)
{ {
return cpu_memory_rw_debug(env, addr, buf, len, is_write); return cpu_memory_rw_debug(env, addr, buf, len, is_write);
@ -287,9 +287,9 @@ enum RSState {
RS_SYSCALL, RS_SYSCALL,
}; };
typedef struct GDBState { typedef struct GDBState {
CPUState *c_cpu; /* current CPU for step/continue ops */ CPUArchState *c_cpu; /* current CPU for step/continue ops */
CPUState *g_cpu; /* current CPU for other ops */ CPUArchState *g_cpu; /* current CPU for other ops */
CPUState *query_cpu; /* for q{f|s}ThreadInfo */ CPUArchState *query_cpu; /* for q{f|s}ThreadInfo */
enum RSState state; /* parsing state */ enum RSState state; /* parsing state */
char line_buf[MAX_PACKET_LENGTH]; char line_buf[MAX_PACKET_LENGTH];
int line_buf_index; int line_buf_index;
@ -1655,12 +1655,12 @@ static int cpu_gdb_write_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
#define NUM_CORE_REGS 0 #define NUM_CORE_REGS 0
static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n) static int cpu_gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int n)
{ {
return 0; return 0;
} }
static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n) static int cpu_gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int n)
{ {
return 0; return 0;
} }
@ -1736,7 +1736,7 @@ static const char *get_feature_xml(const char *p, const char **newp)
} }
#endif #endif
static int gdb_read_register(CPUState *env, uint8_t *mem_buf, int reg) static int gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int reg)
{ {
GDBRegisterState *r; GDBRegisterState *r;
@ -1751,7 +1751,7 @@ static int gdb_read_register(CPUState *env, uint8_t *mem_buf, int reg)
return 0; return 0;
} }
static int gdb_write_register(CPUState *env, uint8_t *mem_buf, int reg) static int gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int reg)
{ {
GDBRegisterState *r; GDBRegisterState *r;
@ -1773,7 +1773,7 @@ static int gdb_write_register(CPUState *env, uint8_t *mem_buf, int reg)
gdb reading a CPU register, and set_reg is gdb modifying a CPU register. gdb reading a CPU register, and set_reg is gdb modifying a CPU register.
*/ */
void gdb_register_coprocessor(CPUState * env, void gdb_register_coprocessor(CPUArchState * env,
gdb_reg_cb get_reg, gdb_reg_cb set_reg, gdb_reg_cb get_reg, gdb_reg_cb set_reg,
int num_regs, const char *xml, int g_pos) int num_regs, const char *xml, int g_pos)
{ {
@ -1820,7 +1820,7 @@ static const int xlat_gdb_type[] = {
static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type) static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
{ {
CPUState *env; CPUArchState *env;
int err = 0; int err = 0;
if (kvm_enabled()) if (kvm_enabled())
@ -1854,7 +1854,7 @@ static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type) static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
{ {
CPUState *env; CPUArchState *env;
int err = 0; int err = 0;
if (kvm_enabled()) if (kvm_enabled())
@ -1887,7 +1887,7 @@ static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
static void gdb_breakpoint_remove_all(void) static void gdb_breakpoint_remove_all(void)
{ {
CPUState *env; CPUArchState *env;
if (kvm_enabled()) { if (kvm_enabled()) {
kvm_remove_all_breakpoints(gdbserver_state->c_cpu); kvm_remove_all_breakpoints(gdbserver_state->c_cpu);
@ -1939,7 +1939,7 @@ static void gdb_set_cpu_pc(GDBState *s, target_ulong pc)
#endif #endif
} }
static inline int gdb_id(CPUState *env) static inline int gdb_id(CPUArchState *env)
{ {
#if defined(CONFIG_USER_ONLY) && defined(CONFIG_USE_NPTL) #if defined(CONFIG_USER_ONLY) && defined(CONFIG_USE_NPTL)
return env->host_tid; return env->host_tid;
@ -1948,9 +1948,9 @@ static inline int gdb_id(CPUState *env)
#endif #endif
} }
static CPUState *find_cpu(uint32_t thread_id) static CPUArchState *find_cpu(uint32_t thread_id)
{ {
CPUState *env; CPUArchState *env;
for (env = first_cpu; env != NULL; env = env->next_cpu) { for (env = first_cpu; env != NULL; env = env->next_cpu) {
if (gdb_id(env) == thread_id) { if (gdb_id(env) == thread_id) {
@ -1963,7 +1963,7 @@ static CPUState *find_cpu(uint32_t thread_id)
static int gdb_handle_packet(GDBState *s, const char *line_buf) static int gdb_handle_packet(GDBState *s, const char *line_buf)
{ {
CPUState *env; CPUArchState *env;
const char *p; const char *p;
uint32_t thread; uint32_t thread;
int ch, reg_size, type, res; int ch, reg_size, type, res;
@ -2383,7 +2383,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
return RS_IDLE; return RS_IDLE;
} }
void gdb_set_stop_cpu(CPUState *env) void gdb_set_stop_cpu(CPUArchState *env)
{ {
gdbserver_state->c_cpu = env; gdbserver_state->c_cpu = env;
gdbserver_state->g_cpu = env; gdbserver_state->g_cpu = env;
@ -2393,7 +2393,7 @@ void gdb_set_stop_cpu(CPUState *env)
static void gdb_vm_state_change(void *opaque, int running, RunState state) static void gdb_vm_state_change(void *opaque, int running, RunState state)
{ {
GDBState *s = gdbserver_state; GDBState *s = gdbserver_state;
CPUState *env = s->c_cpu; CPUArchState *env = s->c_cpu;
char buf[256]; char buf[256];
const char *type; const char *type;
int ret; int ret;
@ -2602,7 +2602,7 @@ static void gdb_read_byte(GDBState *s, int ch)
} }
/* Tell the remote gdb that the process has exited. */ /* Tell the remote gdb that the process has exited. */
void gdb_exit(CPUState *env, int code) void gdb_exit(CPUArchState *env, int code)
{ {
GDBState *s; GDBState *s;
char buf[4]; char buf[4];
@ -2642,7 +2642,7 @@ gdb_queuesig (void)
} }
int int
gdb_handlesig (CPUState *env, int sig) gdb_handlesig (CPUArchState *env, int sig)
{ {
GDBState *s; GDBState *s;
char buf[256]; char buf[256];
@ -2691,7 +2691,7 @@ gdb_handlesig (CPUState *env, int sig)
} }
/* Tell the remote gdb that the process has exited due to SIG. */ /* Tell the remote gdb that the process has exited due to SIG. */
void gdb_signalled(CPUState *env, int sig) void gdb_signalled(CPUArchState *env, int sig)
{ {
GDBState *s; GDBState *s;
char buf[4]; char buf[4];
@ -2787,7 +2787,7 @@ int gdbserver_start(int port)
} }
/* Disable gdb stub for child processes. */ /* Disable gdb stub for child processes. */
void gdbserver_fork(CPUState *env) void gdbserver_fork(CPUArchState *env)
{ {
GDBState *s = gdbserver_state; GDBState *s = gdbserver_state;
if (gdbserver_fd < 0 || s->fd < 0) if (gdbserver_fd < 0 || s->fd < 0)

View File

@ -11,22 +11,22 @@
#define GDB_WATCHPOINT_ACCESS 4 #define GDB_WATCHPOINT_ACCESS 4
#ifdef NEED_CPU_H #ifdef NEED_CPU_H
typedef void (*gdb_syscall_complete_cb)(CPUState *env, typedef void (*gdb_syscall_complete_cb)(CPUArchState *env,
target_ulong ret, target_ulong err); target_ulong ret, target_ulong err);
void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...); void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...);
int use_gdb_syscalls(void); int use_gdb_syscalls(void);
void gdb_set_stop_cpu(CPUState *env); void gdb_set_stop_cpu(CPUArchState *env);
void gdb_exit(CPUState *, int); void gdb_exit(CPUArchState *, int);
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
int gdb_queuesig (void); int gdb_queuesig (void);
int gdb_handlesig (CPUState *, int); int gdb_handlesig (CPUArchState *, int);
void gdb_signalled(CPUState *, int); void gdb_signalled(CPUArchState *, int);
void gdbserver_fork(CPUState *); void gdbserver_fork(CPUArchState *);
#endif #endif
/* Get or set a register. Returns the size of the register. */ /* Get or set a register. Returns the size of the register. */
typedef int (*gdb_reg_cb)(CPUState *env, uint8_t *buf, int reg); typedef int (*gdb_reg_cb)(CPUArchState *env, uint8_t *buf, int reg);
void gdb_register_coprocessor(CPUState *env, void gdb_register_coprocessor(CPUArchState *env,
gdb_reg_cb get_reg, gdb_reg_cb set_reg, gdb_reg_cb get_reg, gdb_reg_cb set_reg,
int num_regs, const char *xml, int g_pos); int num_regs, const char *xml, int g_pos);

View File

@ -14,13 +14,13 @@ static inline void gen_icount_start(void)
icount_label = gen_new_label(); icount_label = gen_new_label();
count = tcg_temp_local_new_i32(); count = tcg_temp_local_new_i32();
tcg_gen_ld_i32(count, cpu_env, offsetof(CPUState, icount_decr.u32)); tcg_gen_ld_i32(count, cpu_env, offsetof(CPUArchState, icount_decr.u32));
/* This is a horrid hack to allow fixing up the value later. */ /* This is a horrid hack to allow fixing up the value later. */
icount_arg = gen_opparam_ptr + 1; icount_arg = gen_opparam_ptr + 1;
tcg_gen_subi_i32(count, count, 0xdeadbeef); tcg_gen_subi_i32(count, count, 0xdeadbeef);
tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, icount_label); tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, icount_label);
tcg_gen_st16_i32(count, cpu_env, offsetof(CPUState, icount_decr.u16.low)); tcg_gen_st16_i32(count, cpu_env, offsetof(CPUArchState, icount_decr.u16.low));
tcg_temp_free_i32(count); tcg_temp_free_i32(count);
} }
@ -36,13 +36,13 @@ static void gen_icount_end(TranslationBlock *tb, int num_insns)
static inline void gen_io_start(void) static inline void gen_io_start(void)
{ {
TCGv_i32 tmp = tcg_const_i32(1); TCGv_i32 tmp = tcg_const_i32(1);
tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, can_do_io)); tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUArchState, can_do_io));
tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp);
} }
static inline void gen_io_end(void) static inline void gen_io_end(void)
{ {
TCGv_i32 tmp = tcg_const_i32(0); TCGv_i32 tmp = tcg_const_i32(0);
tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, can_do_io)); tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUArchState, can_do_io));
tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp);
} }

View File

@ -190,7 +190,7 @@ static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
static void kvm_reset_vcpu(void *opaque) static void kvm_reset_vcpu(void *opaque)
{ {
CPUState *env = opaque; CPUArchState *env = opaque;
kvm_arch_reset_vcpu(env); kvm_arch_reset_vcpu(env);
} }
@ -200,7 +200,7 @@ int kvm_pit_in_kernel(void)
return kvm_state->pit_in_kernel; return kvm_state->pit_in_kernel;
} }
int kvm_init_vcpu(CPUState *env) int kvm_init_vcpu(CPUArchState *env)
{ {
KVMState *s = kvm_state; KVMState *s = kvm_state;
long mmap_size; long mmap_size;
@ -830,7 +830,7 @@ static MemoryListener kvm_memory_listener = {
.priority = 10, .priority = 10,
}; };
static void kvm_handle_interrupt(CPUState *env, int mask) static void kvm_handle_interrupt(CPUArchState *env, int mask)
{ {
env->interrupt_request |= mask; env->interrupt_request |= mask;
@ -1135,7 +1135,7 @@ static void kvm_handle_io(uint16_t port, void *data, int direction, int size,
} }
} }
static int kvm_handle_internal_error(CPUState *env, struct kvm_run *run) static int kvm_handle_internal_error(CPUArchState *env, struct kvm_run *run)
{ {
fprintf(stderr, "KVM internal error."); fprintf(stderr, "KVM internal error.");
if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) { if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
@ -1190,7 +1190,7 @@ void kvm_flush_coalesced_mmio_buffer(void)
static void do_kvm_cpu_synchronize_state(void *_env) static void do_kvm_cpu_synchronize_state(void *_env)
{ {
CPUState *env = _env; CPUArchState *env = _env;
if (!env->kvm_vcpu_dirty) { if (!env->kvm_vcpu_dirty) {
kvm_arch_get_registers(env); kvm_arch_get_registers(env);
@ -1198,26 +1198,26 @@ static void do_kvm_cpu_synchronize_state(void *_env)
} }
} }
void kvm_cpu_synchronize_state(CPUState *env) void kvm_cpu_synchronize_state(CPUArchState *env)
{ {
if (!env->kvm_vcpu_dirty) { if (!env->kvm_vcpu_dirty) {
run_on_cpu(env, do_kvm_cpu_synchronize_state, env); run_on_cpu(env, do_kvm_cpu_synchronize_state, env);
} }
} }
void kvm_cpu_synchronize_post_reset(CPUState *env) void kvm_cpu_synchronize_post_reset(CPUArchState *env)
{ {
kvm_arch_put_registers(env, KVM_PUT_RESET_STATE); kvm_arch_put_registers(env, KVM_PUT_RESET_STATE);
env->kvm_vcpu_dirty = 0; env->kvm_vcpu_dirty = 0;
} }
void kvm_cpu_synchronize_post_init(CPUState *env) void kvm_cpu_synchronize_post_init(CPUArchState *env)
{ {
kvm_arch_put_registers(env, KVM_PUT_FULL_STATE); kvm_arch_put_registers(env, KVM_PUT_FULL_STATE);
env->kvm_vcpu_dirty = 0; env->kvm_vcpu_dirty = 0;
} }
int kvm_cpu_exec(CPUState *env) int kvm_cpu_exec(CPUArchState *env)
{ {
struct kvm_run *run = env->kvm_run; struct kvm_run *run = env->kvm_run;
int ret, run_ret; int ret, run_ret;
@ -1350,7 +1350,7 @@ int kvm_vm_ioctl(KVMState *s, int type, ...)
return ret; return ret;
} }
int kvm_vcpu_ioctl(CPUState *env, int type, ...) int kvm_vcpu_ioctl(CPUArchState *env, int type, ...)
{ {
int ret; int ret;
void *arg; void *arg;
@ -1439,7 +1439,7 @@ void kvm_setup_guest_memory(void *start, size_t size)
} }
#ifdef KVM_CAP_SET_GUEST_DEBUG #ifdef KVM_CAP_SET_GUEST_DEBUG
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUArchState *env,
target_ulong pc) target_ulong pc)
{ {
struct kvm_sw_breakpoint *bp; struct kvm_sw_breakpoint *bp;
@ -1452,26 +1452,26 @@ struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
return NULL; return NULL;
} }
int kvm_sw_breakpoints_active(CPUState *env) int kvm_sw_breakpoints_active(CPUArchState *env)
{ {
return !QTAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints); return !QTAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints);
} }
struct kvm_set_guest_debug_data { struct kvm_set_guest_debug_data {
struct kvm_guest_debug dbg; struct kvm_guest_debug dbg;
CPUState *env; CPUArchState *env;
int err; int err;
}; };
static void kvm_invoke_set_guest_debug(void *data) static void kvm_invoke_set_guest_debug(void *data)
{ {
struct kvm_set_guest_debug_data *dbg_data = data; struct kvm_set_guest_debug_data *dbg_data = data;
CPUState *env = dbg_data->env; CPUArchState *env = dbg_data->env;
dbg_data->err = kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg); dbg_data->err = kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg);
} }
int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap) int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap)
{ {
struct kvm_set_guest_debug_data data; struct kvm_set_guest_debug_data data;
@ -1487,11 +1487,11 @@ int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
return data.err; return data.err;
} }
int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr, int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
target_ulong len, int type) target_ulong len, int type)
{ {
struct kvm_sw_breakpoint *bp; struct kvm_sw_breakpoint *bp;
CPUState *env; CPUArchState *env;
int err; int err;
if (type == GDB_BREAKPOINT_SW) { if (type == GDB_BREAKPOINT_SW) {
@ -1532,11 +1532,11 @@ int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
return 0; return 0;
} }
int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr, int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
target_ulong len, int type) target_ulong len, int type)
{ {
struct kvm_sw_breakpoint *bp; struct kvm_sw_breakpoint *bp;
CPUState *env; CPUArchState *env;
int err; int err;
if (type == GDB_BREAKPOINT_SW) { if (type == GDB_BREAKPOINT_SW) {
@ -1573,11 +1573,11 @@ int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
return 0; return 0;
} }
void kvm_remove_all_breakpoints(CPUState *current_env) void kvm_remove_all_breakpoints(CPUArchState *current_env)
{ {
struct kvm_sw_breakpoint *bp, *next; struct kvm_sw_breakpoint *bp, *next;
KVMState *s = current_env->kvm_state; KVMState *s = current_env->kvm_state;
CPUState *env; CPUArchState *env;
QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) { QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) { if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) {
@ -1598,29 +1598,29 @@ void kvm_remove_all_breakpoints(CPUState *current_env)
#else /* !KVM_CAP_SET_GUEST_DEBUG */ #else /* !KVM_CAP_SET_GUEST_DEBUG */
int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap) int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap)
{ {
return -EINVAL; return -EINVAL;
} }
int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr, int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
target_ulong len, int type) target_ulong len, int type)
{ {
return -EINVAL; return -EINVAL;
} }
int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr, int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
target_ulong len, int type) target_ulong len, int type)
{ {
return -EINVAL; return -EINVAL;
} }
void kvm_remove_all_breakpoints(CPUState *current_env) void kvm_remove_all_breakpoints(CPUArchState *current_env)
{ {
} }
#endif /* !KVM_CAP_SET_GUEST_DEBUG */ #endif /* !KVM_CAP_SET_GUEST_DEBUG */
int kvm_set_signal_mask(CPUState *env, const sigset_t *sigset) int kvm_set_signal_mask(CPUArchState *env, const sigset_t *sigset)
{ {
struct kvm_signal_mask *sigmask; struct kvm_signal_mask *sigmask;
int r; int r;
@ -1690,7 +1690,7 @@ int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign)
return 0; return 0;
} }
int kvm_on_sigbus_vcpu(CPUState *env, int code, void *addr) int kvm_on_sigbus_vcpu(CPUArchState *env, int code, void *addr)
{ {
return kvm_arch_on_sigbus_vcpu(env, code, addr); return kvm_arch_on_sigbus_vcpu(env, code, addr);
} }

View File

@ -22,7 +22,7 @@ int kvm_pit_in_kernel(void)
} }
int kvm_init_vcpu(CPUState *env) int kvm_init_vcpu(CPUArchState *env)
{ {
return -ENOSYS; return -ENOSYS;
} }
@ -46,19 +46,19 @@ void kvm_flush_coalesced_mmio_buffer(void)
{ {
} }
void kvm_cpu_synchronize_state(CPUState *env) void kvm_cpu_synchronize_state(CPUArchState *env)
{ {
} }
void kvm_cpu_synchronize_post_reset(CPUState *env) void kvm_cpu_synchronize_post_reset(CPUArchState *env)
{ {
} }
void kvm_cpu_synchronize_post_init(CPUState *env) void kvm_cpu_synchronize_post_init(CPUArchState *env)
{ {
} }
int kvm_cpu_exec(CPUState *env) int kvm_cpu_exec(CPUArchState *env)
{ {
abort (); abort ();
} }
@ -87,29 +87,29 @@ void kvm_setup_guest_memory(void *start, size_t size)
{ {
} }
int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap) int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap)
{ {
return -ENOSYS; return -ENOSYS;
} }
int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr, int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
target_ulong len, int type) target_ulong len, int type)
{ {
return -EINVAL; return -EINVAL;
} }
int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr, int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
target_ulong len, int type) target_ulong len, int type)
{ {
return -EINVAL; return -EINVAL;
} }
void kvm_remove_all_breakpoints(CPUState *current_env) void kvm_remove_all_breakpoints(CPUArchState *current_env)
{ {
} }
#ifndef _WIN32 #ifndef _WIN32
int kvm_set_signal_mask(CPUState *env, const sigset_t *sigset) int kvm_set_signal_mask(CPUArchState *env, const sigset_t *sigset)
{ {
abort(); abort();
} }
@ -125,7 +125,7 @@ int kvm_set_ioeventfd_mmio_long(int fd, uint32_t adr, uint32_t val, bool assign)
return -ENOSYS; return -ENOSYS;
} }
int kvm_on_sigbus_vcpu(CPUState *env, int code, void *addr) int kvm_on_sigbus_vcpu(CPUArchState *env, int code, void *addr)
{ {
return 1; return 1;
} }

60
kvm.h
View File

@ -61,9 +61,9 @@ int kvm_has_gsi_routing(void);
int kvm_allows_irq0_override(void); int kvm_allows_irq0_override(void);
#ifdef NEED_CPU_H #ifdef NEED_CPU_H
int kvm_init_vcpu(CPUState *env); int kvm_init_vcpu(CPUArchState *env);
int kvm_cpu_exec(CPUState *env); int kvm_cpu_exec(CPUArchState *env);
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
void kvm_setup_guest_memory(void *start, size_t size); void kvm_setup_guest_memory(void *start, size_t size);
@ -73,19 +73,19 @@ int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size);
void kvm_flush_coalesced_mmio_buffer(void); void kvm_flush_coalesced_mmio_buffer(void);
#endif #endif
int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr, int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
target_ulong len, int type); target_ulong len, int type);
int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr, int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
target_ulong len, int type); target_ulong len, int type);
void kvm_remove_all_breakpoints(CPUState *current_env); void kvm_remove_all_breakpoints(CPUArchState *current_env);
int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap); int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap);
#ifndef _WIN32 #ifndef _WIN32
int kvm_set_signal_mask(CPUState *env, const sigset_t *sigset); int kvm_set_signal_mask(CPUArchState *env, const sigset_t *sigset);
#endif #endif
int kvm_pit_in_kernel(void); int kvm_pit_in_kernel(void);
int kvm_on_sigbus_vcpu(CPUState *env, int code, void *addr); int kvm_on_sigbus_vcpu(CPUArchState *env, int code, void *addr);
int kvm_on_sigbus(int code, void *addr); int kvm_on_sigbus(int code, void *addr);
/* internal API */ /* internal API */
@ -98,20 +98,20 @@ int kvm_ioctl(KVMState *s, int type, ...);
int kvm_vm_ioctl(KVMState *s, int type, ...); int kvm_vm_ioctl(KVMState *s, int type, ...);
int kvm_vcpu_ioctl(CPUState *env, int type, ...); int kvm_vcpu_ioctl(CPUArchState *env, int type, ...);
/* Arch specific hooks */ /* Arch specific hooks */
extern const KVMCapabilityInfo kvm_arch_required_capabilities[]; extern const KVMCapabilityInfo kvm_arch_required_capabilities[];
void kvm_arch_pre_run(CPUState *env, struct kvm_run *run); void kvm_arch_pre_run(CPUArchState *env, struct kvm_run *run);
void kvm_arch_post_run(CPUState *env, struct kvm_run *run); void kvm_arch_post_run(CPUArchState *env, struct kvm_run *run);
int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run); int kvm_arch_handle_exit(CPUArchState *env, struct kvm_run *run);
int kvm_arch_process_async_events(CPUState *env); int kvm_arch_process_async_events(CPUArchState *env);
int kvm_arch_get_registers(CPUState *env); int kvm_arch_get_registers(CPUArchState *env);
/* state subset only touched by the VCPU itself during runtime */ /* state subset only touched by the VCPU itself during runtime */
#define KVM_PUT_RUNTIME_STATE 1 #define KVM_PUT_RUNTIME_STATE 1
@ -120,15 +120,15 @@ int kvm_arch_get_registers(CPUState *env);
/* full state set, modified during initialization or on vmload */ /* full state set, modified during initialization or on vmload */
#define KVM_PUT_FULL_STATE 3 #define KVM_PUT_FULL_STATE 3
int kvm_arch_put_registers(CPUState *env, int level); int kvm_arch_put_registers(CPUArchState *env, int level);
int kvm_arch_init(KVMState *s); int kvm_arch_init(KVMState *s);
int kvm_arch_init_vcpu(CPUState *env); int kvm_arch_init_vcpu(CPUArchState *env);
void kvm_arch_reset_vcpu(CPUState *env); void kvm_arch_reset_vcpu(CPUArchState *env);
int kvm_arch_on_sigbus_vcpu(CPUState *env, int code, void *addr); int kvm_arch_on_sigbus_vcpu(CPUArchState *env, int code, void *addr);
int kvm_arch_on_sigbus(int code, void *addr); int kvm_arch_on_sigbus(int code, void *addr);
void kvm_arch_init_irq_routing(KVMState *s); void kvm_arch_init_irq_routing(KVMState *s);
@ -153,14 +153,14 @@ struct kvm_sw_breakpoint {
QTAILQ_HEAD(kvm_sw_breakpoint_head, kvm_sw_breakpoint); QTAILQ_HEAD(kvm_sw_breakpoint_head, kvm_sw_breakpoint);
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUArchState *env,
target_ulong pc); target_ulong pc);
int kvm_sw_breakpoints_active(CPUState *env); int kvm_sw_breakpoints_active(CPUArchState *env);
int kvm_arch_insert_sw_breakpoint(CPUState *current_env, int kvm_arch_insert_sw_breakpoint(CPUArchState *current_env,
struct kvm_sw_breakpoint *bp); struct kvm_sw_breakpoint *bp);
int kvm_arch_remove_sw_breakpoint(CPUState *current_env, int kvm_arch_remove_sw_breakpoint(CPUArchState *current_env,
struct kvm_sw_breakpoint *bp); struct kvm_sw_breakpoint *bp);
int kvm_arch_insert_hw_breakpoint(target_ulong addr, int kvm_arch_insert_hw_breakpoint(target_ulong addr,
target_ulong len, int type); target_ulong len, int type);
@ -168,35 +168,35 @@ int kvm_arch_remove_hw_breakpoint(target_ulong addr,
target_ulong len, int type); target_ulong len, int type);
void kvm_arch_remove_all_hw_breakpoints(void); void kvm_arch_remove_all_hw_breakpoints(void);
void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg); void kvm_arch_update_guest_debug(CPUArchState *env, struct kvm_guest_debug *dbg);
bool kvm_arch_stop_on_emulation_error(CPUState *env); bool kvm_arch_stop_on_emulation_error(CPUArchState *env);
int kvm_check_extension(KVMState *s, unsigned int extension); int kvm_check_extension(KVMState *s, unsigned int extension);
uint32_t kvm_arch_get_supported_cpuid(KVMState *env, uint32_t function, uint32_t kvm_arch_get_supported_cpuid(KVMState *env, uint32_t function,
uint32_t index, int reg); uint32_t index, int reg);
void kvm_cpu_synchronize_state(CPUState *env); void kvm_cpu_synchronize_state(CPUArchState *env);
void kvm_cpu_synchronize_post_reset(CPUState *env); void kvm_cpu_synchronize_post_reset(CPUArchState *env);
void kvm_cpu_synchronize_post_init(CPUState *env); void kvm_cpu_synchronize_post_init(CPUArchState *env);
/* generic hooks - to be moved/refactored once there are more users */ /* generic hooks - to be moved/refactored once there are more users */
static inline void cpu_synchronize_state(CPUState *env) static inline void cpu_synchronize_state(CPUArchState *env)
{ {
if (kvm_enabled()) { if (kvm_enabled()) {
kvm_cpu_synchronize_state(env); kvm_cpu_synchronize_state(env);
} }
} }
static inline void cpu_synchronize_post_reset(CPUState *env) static inline void cpu_synchronize_post_reset(CPUArchState *env)
{ {
if (kvm_enabled()) { if (kvm_enabled()) {
kvm_cpu_synchronize_post_reset(env); kvm_cpu_synchronize_post_reset(env);
} }
} }
static inline void cpu_synchronize_post_init(CPUState *env) static inline void cpu_synchronize_post_init(CPUArchState *env)
{ {
if (kvm_enabled()) { if (kvm_enabled()) {
kvm_cpu_synchronize_post_init(env); kvm_cpu_synchronize_post_init(env);

View File

@ -1044,7 +1044,7 @@ static inline void bswap_sym(struct elf_sym *sym) { }
#endif #endif
#ifdef USE_ELF_CORE_DUMP #ifdef USE_ELF_CORE_DUMP
static int elf_core_dump(int, const CPUState *); static int elf_core_dump(int, const CPUArchState *);
#endif /* USE_ELF_CORE_DUMP */ #endif /* USE_ELF_CORE_DUMP */
static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias); static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias);
@ -1930,7 +1930,7 @@ int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
* from given cpu into just specified register set. Prototype is: * from given cpu into just specified register set. Prototype is:
* *
* static void elf_core_copy_regs(taret_elf_gregset_t *regs, * static void elf_core_copy_regs(taret_elf_gregset_t *regs,
* const CPUState *env); * const CPUArchState *env);
* *
* Parameters: * Parameters:
* regs - copy register values into here (allocated and zeroed by caller) * regs - copy register values into here (allocated and zeroed by caller)
@ -2054,8 +2054,8 @@ static void fill_auxv_note(struct memelfnote *, const TaskState *);
static void fill_elf_note_phdr(struct elf_phdr *, int, off_t); static void fill_elf_note_phdr(struct elf_phdr *, int, off_t);
static size_t note_size(const struct memelfnote *); static size_t note_size(const struct memelfnote *);
static void free_note_info(struct elf_note_info *); static void free_note_info(struct elf_note_info *);
static int fill_note_info(struct elf_note_info *, long, const CPUState *); static int fill_note_info(struct elf_note_info *, long, const CPUArchState *);
static void fill_thread_info(struct elf_note_info *, const CPUState *); static void fill_thread_info(struct elf_note_info *, const CPUArchState *);
static int core_dump_filename(const TaskState *, char *, size_t); static int core_dump_filename(const TaskState *, char *, size_t);
static int dump_write(int, const void *, size_t); static int dump_write(int, const void *, size_t);
@ -2448,7 +2448,7 @@ static int write_note(struct memelfnote *men, int fd)
return (0); return (0);
} }
static void fill_thread_info(struct elf_note_info *info, const CPUState *env) static void fill_thread_info(struct elf_note_info *info, const CPUArchState *env)
{ {
TaskState *ts = (TaskState *)env->opaque; TaskState *ts = (TaskState *)env->opaque;
struct elf_thread_status *ets; struct elf_thread_status *ets;
@ -2466,10 +2466,10 @@ static void fill_thread_info(struct elf_note_info *info, const CPUState *env)
} }
static int fill_note_info(struct elf_note_info *info, static int fill_note_info(struct elf_note_info *info,
long signr, const CPUState *env) long signr, const CPUArchState *env)
{ {
#define NUMNOTES 3 #define NUMNOTES 3
CPUState *cpu = NULL; CPUArchState *cpu = NULL;
TaskState *ts = (TaskState *)env->opaque; TaskState *ts = (TaskState *)env->opaque;
int i; int i;
@ -2595,7 +2595,7 @@ static int write_note_info(struct elf_note_info *info, int fd)
* handler (provided that target process haven't registered * handler (provided that target process haven't registered
* handler for that) that does the dump when signal is received. * handler for that) that does the dump when signal is received.
*/ */
static int elf_core_dump(int signr, const CPUState *env) static int elf_core_dump(int signr, const CPUArchState *env)
{ {
const TaskState *ts = (const TaskState *)env->opaque; const TaskState *ts = (const TaskState *)env->opaque;
struct vm_area_struct *vma = NULL; struct vm_area_struct *vma = NULL;

View File

@ -146,7 +146,7 @@ static inline void exclusive_idle(void)
Must only be called from outside cpu_arm_exec. */ Must only be called from outside cpu_arm_exec. */
static inline void start_exclusive(void) static inline void start_exclusive(void)
{ {
CPUState *other; CPUArchState *other;
pthread_mutex_lock(&exclusive_lock); pthread_mutex_lock(&exclusive_lock);
exclusive_idle(); exclusive_idle();
@ -172,7 +172,7 @@ static inline void end_exclusive(void)
} }
/* Wait for exclusive ops to finish, and begin cpu execution. */ /* Wait for exclusive ops to finish, and begin cpu execution. */
static inline void cpu_exec_start(CPUState *env) static inline void cpu_exec_start(CPUArchState *env)
{ {
pthread_mutex_lock(&exclusive_lock); pthread_mutex_lock(&exclusive_lock);
exclusive_idle(); exclusive_idle();
@ -181,7 +181,7 @@ static inline void cpu_exec_start(CPUState *env)
} }
/* Mark cpu as not executing, and release pending exclusive ops. */ /* Mark cpu as not executing, and release pending exclusive ops. */
static inline void cpu_exec_end(CPUState *env) static inline void cpu_exec_end(CPUArchState *env)
{ {
pthread_mutex_lock(&exclusive_lock); pthread_mutex_lock(&exclusive_lock);
env->running = 0; env->running = 0;
@ -206,11 +206,11 @@ void cpu_list_unlock(void)
} }
#else /* if !CONFIG_USE_NPTL */ #else /* if !CONFIG_USE_NPTL */
/* These are no-ops because we are not threadsafe. */ /* These are no-ops because we are not threadsafe. */
static inline void cpu_exec_start(CPUState *env) static inline void cpu_exec_start(CPUArchState *env)
{ {
} }
static inline void cpu_exec_end(CPUState *env) static inline void cpu_exec_end(CPUArchState *env)
{ {
} }
@ -2888,7 +2888,7 @@ void cpu_loop(CPUS390XState *env)
#endif /* TARGET_S390X */ #endif /* TARGET_S390X */
THREAD CPUState *thread_env; THREAD CPUArchState *thread_env;
void task_settid(TaskState *ts) void task_settid(TaskState *ts)
{ {
@ -3277,7 +3277,7 @@ int main(int argc, char **argv, char **envp)
struct image_info info1, *info = &info1; struct image_info info1, *info = &info1;
struct linux_binprm bprm; struct linux_binprm bprm;
TaskState *ts; TaskState *ts;
CPUState *env; CPUArchState *env;
int optind; int optind;
char **target_environ, **wrk; char **target_environ, **wrk;
char **target_argv; char **target_argv;

View File

@ -171,7 +171,7 @@ struct linux_binprm {
char **argv; char **argv;
char **envp; char **envp;
char * filename; /* Name of binary */ char * filename; /* Name of binary */
int (*core_dump)(int, const CPUState *); /* coredump routine */ int (*core_dump)(int, const CPUArchState *); /* coredump routine */
}; };
void do_init_thread(struct target_pt_regs *regs, struct image_info *infop); void do_init_thread(struct target_pt_regs *regs, struct image_info *infop);
@ -196,8 +196,8 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
abi_long arg5, abi_long arg6, abi_long arg7, abi_long arg5, abi_long arg6, abi_long arg7,
abi_long arg8); abi_long arg8);
void gemu_log(const char *fmt, ...) GCC_FMT_ATTR(1, 2); void gemu_log(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
extern THREAD CPUState *thread_env; extern THREAD CPUArchState *thread_env;
void cpu_loop(CPUState *env); void cpu_loop(CPUArchState *env);
char *target_strerror(int err); char *target_strerror(int err);
int get_osversion(void); int get_osversion(void);
void fork_start(void); void fork_start(void);
@ -219,15 +219,15 @@ void print_syscall_ret(int num, abi_long arg1);
extern int do_strace; extern int do_strace;
/* signal.c */ /* signal.c */
void process_pending_signals(CPUState *cpu_env); void process_pending_signals(CPUArchState *cpu_env);
void signal_init(void); void signal_init(void);
int queue_signal(CPUState *env, int sig, target_siginfo_t *info); int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info);
void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info); void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info);
void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo); void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo);
int target_to_host_signal(int sig); int target_to_host_signal(int sig);
int host_to_target_signal(int sig); int host_to_target_signal(int sig);
long do_sigreturn(CPUState *env); long do_sigreturn(CPUArchState *env);
long do_rt_sigreturn(CPUState *env); long do_rt_sigreturn(CPUArchState *env);
abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp); abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp);
#ifdef TARGET_I386 #ifdef TARGET_I386

View File

@ -347,7 +347,7 @@ void signal_init(void)
/* signal queue handling */ /* signal queue handling */
static inline struct sigqueue *alloc_sigqueue(CPUState *env) static inline struct sigqueue *alloc_sigqueue(CPUArchState *env)
{ {
TaskState *ts = env->opaque; TaskState *ts = env->opaque;
struct sigqueue *q = ts->first_free; struct sigqueue *q = ts->first_free;
@ -357,7 +357,7 @@ static inline struct sigqueue *alloc_sigqueue(CPUState *env)
return q; return q;
} }
static inline void free_sigqueue(CPUState *env, struct sigqueue *q) static inline void free_sigqueue(CPUArchState *env, struct sigqueue *q)
{ {
TaskState *ts = env->opaque; TaskState *ts = env->opaque;
q->next = ts->first_free; q->next = ts->first_free;
@ -415,7 +415,7 @@ static void QEMU_NORETURN force_sig(int target_sig)
/* queue a signal so that it will be send to the virtual CPU as soon /* queue a signal so that it will be send to the virtual CPU as soon
as possible */ as possible */
int queue_signal(CPUState *env, int sig, target_siginfo_t *info) int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info)
{ {
TaskState *ts = env->opaque; TaskState *ts = env->opaque;
struct emulated_sigtable *k; struct emulated_sigtable *k;
@ -5214,25 +5214,25 @@ long do_rt_sigreturn(CPUAlphaState *env)
#else #else
static void setup_frame(int sig, struct target_sigaction *ka, static void setup_frame(int sig, struct target_sigaction *ka,
target_sigset_t *set, CPUState *env) target_sigset_t *set, CPUArchState *env)
{ {
fprintf(stderr, "setup_frame: not implemented\n"); fprintf(stderr, "setup_frame: not implemented\n");
} }
static void setup_rt_frame(int sig, struct target_sigaction *ka, static void setup_rt_frame(int sig, struct target_sigaction *ka,
target_siginfo_t *info, target_siginfo_t *info,
target_sigset_t *set, CPUState *env) target_sigset_t *set, CPUArchState *env)
{ {
fprintf(stderr, "setup_rt_frame: not implemented\n"); fprintf(stderr, "setup_rt_frame: not implemented\n");
} }
long do_sigreturn(CPUState *env) long do_sigreturn(CPUArchState *env)
{ {
fprintf(stderr, "do_sigreturn: not implemented\n"); fprintf(stderr, "do_sigreturn: not implemented\n");
return -TARGET_ENOSYS; return -TARGET_ENOSYS;
} }
long do_rt_sigreturn(CPUState *env) long do_rt_sigreturn(CPUArchState *env)
{ {
fprintf(stderr, "do_rt_sigreturn: not implemented\n"); fprintf(stderr, "do_rt_sigreturn: not implemented\n");
return -TARGET_ENOSYS; return -TARGET_ENOSYS;
@ -5240,7 +5240,7 @@ long do_rt_sigreturn(CPUState *env)
#endif #endif
void process_pending_signals(CPUState *cpu_env) void process_pending_signals(CPUArchState *cpu_env)
{ {
int sig; int sig;
abi_ulong handler; abi_ulong handler;

View File

@ -3955,7 +3955,7 @@ static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
typedef struct { typedef struct {
CPUState *env; CPUArchState *env;
pthread_mutex_t mutex; pthread_mutex_t mutex;
pthread_cond_t cond; pthread_cond_t cond;
pthread_t thread; pthread_t thread;
@ -3968,7 +3968,7 @@ typedef struct {
static void *clone_func(void *arg) static void *clone_func(void *arg)
{ {
new_thread_info *info = arg; new_thread_info *info = arg;
CPUState *env; CPUArchState *env;
TaskState *ts; TaskState *ts;
env = info->env; env = info->env;
@ -3998,7 +3998,7 @@ static void *clone_func(void *arg)
static int clone_func(void *arg) static int clone_func(void *arg)
{ {
CPUState *env = arg; CPUArchState *env = arg;
cpu_loop(env); cpu_loop(env);
/* never exits */ /* never exits */
return 0; return 0;
@ -4007,13 +4007,13 @@ static int clone_func(void *arg)
/* do_fork() Must return host values and target errnos (unlike most /* do_fork() Must return host values and target errnos (unlike most
do_*() functions). */ do_*() functions). */
static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp, static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
abi_ulong parent_tidptr, target_ulong newtls, abi_ulong parent_tidptr, target_ulong newtls,
abi_ulong child_tidptr) abi_ulong child_tidptr)
{ {
int ret; int ret;
TaskState *ts; TaskState *ts;
CPUState *new_env; CPUArchState *new_env;
#if defined(CONFIG_USE_NPTL) #if defined(CONFIG_USE_NPTL)
unsigned int nptl_flags; unsigned int nptl_flags;
sigset_t sigmask; sigset_t sigmask;
@ -4640,7 +4640,7 @@ int get_osversion(void)
static int open_self_maps(void *cpu_env, int fd) static int open_self_maps(void *cpu_env, int fd)
{ {
TaskState *ts = ((CPUState *)cpu_env)->opaque; TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n", dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
(unsigned long long)ts->info->stack_limit, (unsigned long long)ts->info->stack_limit,
@ -4653,7 +4653,7 @@ static int open_self_maps(void *cpu_env, int fd)
static int open_self_stat(void *cpu_env, int fd) static int open_self_stat(void *cpu_env, int fd)
{ {
TaskState *ts = ((CPUState *)cpu_env)->opaque; TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
abi_ulong start_stack = ts->info->start_stack; abi_ulong start_stack = ts->info->start_stack;
int i; int i;
@ -4678,7 +4678,7 @@ static int open_self_stat(void *cpu_env, int fd)
static int open_self_auxv(void *cpu_env, int fd) static int open_self_auxv(void *cpu_env, int fd)
{ {
TaskState *ts = ((CPUState *)cpu_env)->opaque; TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
abi_ulong auxv = ts->info->saved_auxv; abi_ulong auxv = ts->info->saved_auxv;
abi_ulong len = ts->info->auxv_len; abi_ulong len = ts->info->auxv_len;
char *ptr; char *ptr;
@ -4784,13 +4784,13 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
be disabling signals. */ be disabling signals. */
if (first_cpu->next_cpu) { if (first_cpu->next_cpu) {
TaskState *ts; TaskState *ts;
CPUState **lastp; CPUArchState **lastp;
CPUState *p; CPUArchState *p;
cpu_list_lock(); cpu_list_lock();
lastp = &first_cpu; lastp = &first_cpu;
p = first_cpu; p = first_cpu;
while (p && p != (CPUState *)cpu_env) { while (p && p != (CPUArchState *)cpu_env) {
lastp = &p->next_cpu; lastp = &p->next_cpu;
p = p->next_cpu; p = p->next_cpu;
} }
@ -4801,7 +4801,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
/* Remove the CPU from the list. */ /* Remove the CPU from the list. */
*lastp = p->next_cpu; *lastp = p->next_cpu;
cpu_list_unlock(); cpu_list_unlock();
ts = ((CPUState *)cpu_env)->opaque; ts = ((CPUArchState *)cpu_env)->opaque;
if (ts->child_tidptr) { if (ts->child_tidptr) {
put_user_u32(0, ts->child_tidptr); put_user_u32(0, ts->child_tidptr);
sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
@ -6091,7 +6091,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
break; break;
case TARGET_NR_mprotect: case TARGET_NR_mprotect:
{ {
TaskState *ts = ((CPUState *)cpu_env)->opaque; TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
/* Special hack to detect libc making the stack executable. */ /* Special hack to detect libc making the stack executable. */
if ((arg3 & PROT_GROWSDOWN) if ((arg3 & PROT_GROWSDOWN)
&& arg1 >= ts->info->stack_limit && arg1 >= ts->info->stack_limit
@ -7076,7 +7076,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
#if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \ #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \ defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
defined(TARGET_M68K) || defined(TARGET_S390X) defined(TARGET_M68K) || defined(TARGET_S390X)
ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env)); ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
break; break;
#else #else
goto unimplemented; goto unimplemented;

View File

@ -156,7 +156,7 @@ struct Monitor {
int outbuf_index; int outbuf_index;
ReadLineState *rs; ReadLineState *rs;
MonitorControl *mc; MonitorControl *mc;
CPUState *mon_cpu; CPUArchState *mon_cpu;
BlockDriverCompletionFunc *password_completion_cb; BlockDriverCompletionFunc *password_completion_cb;
void *password_opaque; void *password_opaque;
#ifdef CONFIG_DEBUG_MONITOR #ifdef CONFIG_DEBUG_MONITOR
@ -742,7 +742,7 @@ CommandInfoList *qmp_query_commands(Error **errp)
/* set the current CPU defined by the user */ /* set the current CPU defined by the user */
int monitor_set_cpu(int cpu_index) int monitor_set_cpu(int cpu_index)
{ {
CPUState *env; CPUArchState *env;
for(env = first_cpu; env != NULL; env = env->next_cpu) { for(env = first_cpu; env != NULL; env = env->next_cpu) {
if (env->cpu_index == cpu_index) { if (env->cpu_index == cpu_index) {
@ -753,7 +753,7 @@ int monitor_set_cpu(int cpu_index)
return -1; return -1;
} }
static CPUState *mon_get_cpu(void) static CPUArchState *mon_get_cpu(void)
{ {
if (!cur_mon->mon_cpu) { if (!cur_mon->mon_cpu) {
monitor_set_cpu(0); monitor_set_cpu(0);
@ -769,7 +769,7 @@ int monitor_get_cpu_index(void)
static void do_info_registers(Monitor *mon) static void do_info_registers(Monitor *mon)
{ {
CPUState *env; CPUArchState *env;
env = mon_get_cpu(); env = mon_get_cpu();
#ifdef TARGET_I386 #ifdef TARGET_I386
cpu_dump_state(env, (FILE *)mon, monitor_fprintf, cpu_dump_state(env, (FILE *)mon, monitor_fprintf,
@ -806,7 +806,7 @@ static void do_info_history(Monitor *mon)
/* XXX: not implemented in other targets */ /* XXX: not implemented in other targets */
static void do_info_cpu_stats(Monitor *mon) static void do_info_cpu_stats(Monitor *mon)
{ {
CPUState *env; CPUArchState *env;
env = mon_get_cpu(); env = mon_get_cpu();
cpu_dump_statistics(env, (FILE *)mon, &monitor_fprintf, 0); cpu_dump_statistics(env, (FILE *)mon, &monitor_fprintf, 0);
@ -987,7 +987,7 @@ static void monitor_printc(Monitor *mon, int c)
static void memory_dump(Monitor *mon, int count, int format, int wsize, static void memory_dump(Monitor *mon, int count, int format, int wsize,
target_phys_addr_t addr, int is_physical) target_phys_addr_t addr, int is_physical)
{ {
CPUState *env; CPUArchState *env;
int l, line_size, i, max_digits, len; int l, line_size, i, max_digits, len;
uint8_t buf[16]; uint8_t buf[16];
uint64_t v; uint64_t v;
@ -1547,7 +1547,7 @@ static void print_pte(Monitor *mon, target_phys_addr_t addr,
pte & PG_RW_MASK ? 'W' : '-'); pte & PG_RW_MASK ? 'W' : '-');
} }
static void tlb_info_32(Monitor *mon, CPUState *env) static void tlb_info_32(Monitor *mon, CPUArchState *env)
{ {
unsigned int l1, l2; unsigned int l1, l2;
uint32_t pgd, pde, pte; uint32_t pgd, pde, pte;
@ -1575,7 +1575,7 @@ static void tlb_info_32(Monitor *mon, CPUState *env)
} }
} }
static void tlb_info_pae32(Monitor *mon, CPUState *env) static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
{ {
unsigned int l1, l2, l3; unsigned int l1, l2, l3;
uint64_t pdpe, pde, pte; uint64_t pdpe, pde, pte;
@ -1615,7 +1615,7 @@ static void tlb_info_pae32(Monitor *mon, CPUState *env)
} }
#ifdef TARGET_X86_64 #ifdef TARGET_X86_64
static void tlb_info_64(Monitor *mon, CPUState *env) static void tlb_info_64(Monitor *mon, CPUArchState *env)
{ {
uint64_t l1, l2, l3, l4; uint64_t l1, l2, l3, l4;
uint64_t pml4e, pdpe, pde, pte; uint64_t pml4e, pdpe, pde, pte;
@ -1674,7 +1674,7 @@ static void tlb_info_64(Monitor *mon, CPUState *env)
static void tlb_info(Monitor *mon) static void tlb_info(Monitor *mon)
{ {
CPUState *env; CPUArchState *env;
env = mon_get_cpu(); env = mon_get_cpu();
@ -1719,7 +1719,7 @@ static void mem_print(Monitor *mon, target_phys_addr_t *pstart,
} }
} }
static void mem_info_32(Monitor *mon, CPUState *env) static void mem_info_32(Monitor *mon, CPUArchState *env)
{ {
unsigned int l1, l2; unsigned int l1, l2;
int prot, last_prot; int prot, last_prot;
@ -1760,7 +1760,7 @@ static void mem_info_32(Monitor *mon, CPUState *env)
mem_print(mon, &start, &last_prot, (target_phys_addr_t)1 << 32, 0); mem_print(mon, &start, &last_prot, (target_phys_addr_t)1 << 32, 0);
} }
static void mem_info_pae32(Monitor *mon, CPUState *env) static void mem_info_pae32(Monitor *mon, CPUArchState *env)
{ {
unsigned int l1, l2, l3; unsigned int l1, l2, l3;
int prot, last_prot; int prot, last_prot;
@ -1817,7 +1817,7 @@ static void mem_info_pae32(Monitor *mon, CPUState *env)
#ifdef TARGET_X86_64 #ifdef TARGET_X86_64
static void mem_info_64(Monitor *mon, CPUState *env) static void mem_info_64(Monitor *mon, CPUArchState *env)
{ {
int prot, last_prot; int prot, last_prot;
uint64_t l1, l2, l3, l4; uint64_t l1, l2, l3, l4;
@ -1897,7 +1897,7 @@ static void mem_info_64(Monitor *mon, CPUState *env)
static void mem_info(Monitor *mon) static void mem_info(Monitor *mon)
{ {
CPUState *env; CPUArchState *env;
env = mon_get_cpu(); env = mon_get_cpu();
@ -1936,7 +1936,7 @@ static void print_tlb(Monitor *mon, int idx, tlb_t *tlb)
static void tlb_info(Monitor *mon) static void tlb_info(Monitor *mon)
{ {
CPUState *env = mon_get_cpu(); CPUArchState *env = mon_get_cpu();
int i; int i;
monitor_printf (mon, "ITLB:\n"); monitor_printf (mon, "ITLB:\n");
@ -1952,7 +1952,7 @@ static void tlb_info(Monitor *mon)
#if defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_XTENSA) #if defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_XTENSA)
static void tlb_info(Monitor *mon) static void tlb_info(Monitor *mon)
{ {
CPUState *env1 = mon_get_cpu(); CPUArchState *env1 = mon_get_cpu();
dump_mmu((FILE*)mon, (fprintf_function)monitor_printf, env1); dump_mmu((FILE*)mon, (fprintf_function)monitor_printf, env1);
} }
@ -1966,7 +1966,7 @@ static void do_info_mtree(Monitor *mon)
static void do_info_numa(Monitor *mon) static void do_info_numa(Monitor *mon)
{ {
int i; int i;
CPUState *env; CPUArchState *env;
monitor_printf(mon, "%d nodes\n", nb_numa_nodes); monitor_printf(mon, "%d nodes\n", nb_numa_nodes);
for (i = 0; i < nb_numa_nodes; i++) { for (i = 0; i < nb_numa_nodes; i++) {
@ -2173,7 +2173,7 @@ static void do_acl_remove(Monitor *mon, const QDict *qdict)
#if defined(TARGET_I386) #if defined(TARGET_I386)
static void do_inject_mce(Monitor *mon, const QDict *qdict) static void do_inject_mce(Monitor *mon, const QDict *qdict)
{ {
CPUState *cenv; CPUArchState *cenv;
int cpu_index = qdict_get_int(qdict, "cpu_index"); int cpu_index = qdict_get_int(qdict, "cpu_index");
int bank = qdict_get_int(qdict, "bank"); int bank = qdict_get_int(qdict, "bank");
uint64_t status = qdict_get_int(qdict, "status"); uint64_t status = qdict_get_int(qdict, "status");
@ -2625,7 +2625,7 @@ typedef struct MonitorDef {
#if defined(TARGET_I386) #if defined(TARGET_I386)
static target_long monitor_get_pc (const struct MonitorDef *md, int val) static target_long monitor_get_pc (const struct MonitorDef *md, int val)
{ {
CPUState *env = mon_get_cpu(); CPUArchState *env = mon_get_cpu();
return env->eip + env->segs[R_CS].base; return env->eip + env->segs[R_CS].base;
} }
#endif #endif
@ -2633,7 +2633,7 @@ static target_long monitor_get_pc (const struct MonitorDef *md, int val)
#if defined(TARGET_PPC) #if defined(TARGET_PPC)
static target_long monitor_get_ccr (const struct MonitorDef *md, int val) static target_long monitor_get_ccr (const struct MonitorDef *md, int val)
{ {
CPUState *env = mon_get_cpu(); CPUArchState *env = mon_get_cpu();
unsigned int u; unsigned int u;
int i; int i;
@ -2646,31 +2646,31 @@ static target_long monitor_get_ccr (const struct MonitorDef *md, int val)
static target_long monitor_get_msr (const struct MonitorDef *md, int val) static target_long monitor_get_msr (const struct MonitorDef *md, int val)
{ {
CPUState *env = mon_get_cpu(); CPUArchState *env = mon_get_cpu();
return env->msr; return env->msr;
} }
static target_long monitor_get_xer (const struct MonitorDef *md, int val) static target_long monitor_get_xer (const struct MonitorDef *md, int val)
{ {
CPUState *env = mon_get_cpu(); CPUArchState *env = mon_get_cpu();
return env->xer; return env->xer;
} }
static target_long monitor_get_decr (const struct MonitorDef *md, int val) static target_long monitor_get_decr (const struct MonitorDef *md, int val)
{ {
CPUState *env = mon_get_cpu(); CPUArchState *env = mon_get_cpu();
return cpu_ppc_load_decr(env); return cpu_ppc_load_decr(env);
} }
static target_long monitor_get_tbu (const struct MonitorDef *md, int val) static target_long monitor_get_tbu (const struct MonitorDef *md, int val)
{ {
CPUState *env = mon_get_cpu(); CPUArchState *env = mon_get_cpu();
return cpu_ppc_load_tbu(env); return cpu_ppc_load_tbu(env);
} }
static target_long monitor_get_tbl (const struct MonitorDef *md, int val) static target_long monitor_get_tbl (const struct MonitorDef *md, int val)
{ {
CPUState *env = mon_get_cpu(); CPUArchState *env = mon_get_cpu();
return cpu_ppc_load_tbl(env); return cpu_ppc_load_tbl(env);
} }
#endif #endif
@ -2679,7 +2679,7 @@ static target_long monitor_get_tbl (const struct MonitorDef *md, int val)
#ifndef TARGET_SPARC64 #ifndef TARGET_SPARC64
static target_long monitor_get_psr (const struct MonitorDef *md, int val) static target_long monitor_get_psr (const struct MonitorDef *md, int val)
{ {
CPUState *env = mon_get_cpu(); CPUArchState *env = mon_get_cpu();
return cpu_get_psr(env); return cpu_get_psr(env);
} }
@ -2687,7 +2687,7 @@ static target_long monitor_get_psr (const struct MonitorDef *md, int val)
static target_long monitor_get_reg(const struct MonitorDef *md, int val) static target_long monitor_get_reg(const struct MonitorDef *md, int val)
{ {
CPUState *env = mon_get_cpu(); CPUArchState *env = mon_get_cpu();
return env->regwptr[val]; return env->regwptr[val];
} }
#endif #endif
@ -3019,7 +3019,7 @@ static int get_monitor_def(target_long *pval, const char *name)
if (md->get_value) { if (md->get_value) {
*pval = md->get_value(md, md->offset); *pval = md->get_value(md, md->offset);
} else { } else {
CPUState *env = mon_get_cpu(); CPUArchState *env = mon_get_cpu();
ptr = (uint8_t *)env + md->offset; ptr = (uint8_t *)env + md->offset;
switch(md->type) { switch(md->type) {
case MD_I32: case MD_I32:

View File

@ -34,7 +34,7 @@
#pragma GCC poison TARGET_PAGE_BITS #pragma GCC poison TARGET_PAGE_BITS
#pragma GCC poison TARGET_PAGE_ALIGN #pragma GCC poison TARGET_PAGE_ALIGN
#pragma GCC poison CPUState #pragma GCC poison CPUArchState
#pragma GCC poison env #pragma GCC poison env
#pragma GCC poison lduw_phys #pragma GCC poison lduw_phys

View File

@ -7,14 +7,14 @@
* This code is licensed under the GPL * This code is licensed under the GPL
*/ */
static inline uint32_t softmmu_tget32(CPUState *env, uint32_t addr) static inline uint32_t softmmu_tget32(CPUArchState *env, uint32_t addr)
{ {
uint32_t val; uint32_t val;
cpu_memory_rw_debug(env, addr, (uint8_t *)&val, 4, 0); cpu_memory_rw_debug(env, addr, (uint8_t *)&val, 4, 0);
return tswap32(val); return tswap32(val);
} }
static inline uint32_t softmmu_tget8(CPUState *env, uint32_t addr) static inline uint32_t softmmu_tget8(CPUArchState *env, uint32_t addr)
{ {
uint8_t val; uint8_t val;
@ -26,7 +26,7 @@ static inline uint32_t softmmu_tget8(CPUState *env, uint32_t addr)
#define get_user_u8(arg, p) ({ arg = softmmu_tget8(env, p) ; 0; }) #define get_user_u8(arg, p) ({ arg = softmmu_tget8(env, p) ; 0; })
#define get_user_ual(arg, p) get_user_u32(arg, p) #define get_user_ual(arg, p) get_user_u32(arg, p)
static inline void softmmu_tput32(CPUState *env, uint32_t addr, uint32_t val) static inline void softmmu_tput32(CPUArchState *env, uint32_t addr, uint32_t val)
{ {
val = tswap32(val); val = tswap32(val);
cpu_memory_rw_debug(env, addr, (uint8_t *)&val, 4, 1); cpu_memory_rw_debug(env, addr, (uint8_t *)&val, 4, 1);
@ -34,7 +34,7 @@ static inline void softmmu_tput32(CPUState *env, uint32_t addr, uint32_t val)
#define put_user_u32(arg, p) ({ softmmu_tput32(env, p, arg) ; 0; }) #define put_user_u32(arg, p) ({ softmmu_tput32(env, p, arg) ; 0; })
#define put_user_ual(arg, p) put_user_u32(arg, p) #define put_user_ual(arg, p) put_user_u32(arg, p)
static void *softmmu_lock_user(CPUState *env, uint32_t addr, uint32_t len, static void *softmmu_lock_user(CPUArchState *env, uint32_t addr, uint32_t len,
int copy) int copy)
{ {
uint8_t *p; uint8_t *p;
@ -45,7 +45,7 @@ static void *softmmu_lock_user(CPUState *env, uint32_t addr, uint32_t len,
return p; return p;
} }
#define lock_user(type, p, len, copy) softmmu_lock_user(env, p, len, copy) #define lock_user(type, p, len, copy) softmmu_lock_user(env, p, len, copy)
static char *softmmu_lock_user_string(CPUState *env, uint32_t addr) static char *softmmu_lock_user_string(CPUArchState *env, uint32_t addr)
{ {
char *p; char *p;
char *s; char *s;
@ -60,7 +60,7 @@ static char *softmmu_lock_user_string(CPUState *env, uint32_t addr)
return s; return s;
} }
#define lock_user_string(p) softmmu_lock_user_string(env, p) #define lock_user_string(p) softmmu_lock_user_string(env, p)
static void softmmu_unlock_user(CPUState *env, void *p, target_ulong addr, static void softmmu_unlock_user(CPUArchState *env, void *p, target_ulong addr,
target_ulong len) target_ulong len)
{ {
if (len) if (len)

View File

@ -25,7 +25,7 @@
#define TARGET_LONG_BITS 64 #define TARGET_LONG_BITS 64
#define CPUState struct CPUAlphaState #define CPUArchState struct CPUAlphaState
#include "cpu-defs.h" #include "cpu-defs.h"

View File

@ -23,7 +23,7 @@
#define ELF_MACHINE EM_ARM #define ELF_MACHINE EM_ARM
#define CPUState struct CPUARMState #define CPUArchState struct CPUARMState
#include "config.h" #include "config.h"
#include "qemu-common.h" #include "qemu-common.h"

View File

@ -25,7 +25,7 @@
#define TARGET_LONG_BITS 32 #define TARGET_LONG_BITS 32
#define CPUState struct CPUCRISState #define CPUArchState struct CPUCRISState
#include "cpu-defs.h" #include "cpu-defs.h"

View File

@ -42,7 +42,7 @@
#define ELF_MACHINE EM_386 #define ELF_MACHINE EM_386
#endif #endif
#define CPUState struct CPUX86State #define CPUArchState struct CPUX86State
#include "cpu-defs.h" #include "cpu-defs.h"

View File

@ -22,7 +22,7 @@
#define TARGET_LONG_BITS 32 #define TARGET_LONG_BITS 32
#define CPUState struct CPULM32State #define CPUArchState struct CPULM32State
#include "config.h" #include "config.h"
#include "qemu-common.h" #include "qemu-common.h"

View File

@ -22,7 +22,7 @@
#define TARGET_LONG_BITS 32 #define TARGET_LONG_BITS 32
#define CPUState struct CPUM68KState #define CPUArchState struct CPUM68KState
#include "config.h" #include "config.h"
#include "qemu-common.h" #include "qemu-common.h"

View File

@ -24,7 +24,7 @@
#define TARGET_LONG_BITS 32 #define TARGET_LONG_BITS 32
#define CPUState struct CPUMBState #define CPUArchState struct CPUMBState
#include "cpu-defs.h" #include "cpu-defs.h"
#include "softfloat.h" #include "softfloat.h"

View File

@ -7,7 +7,7 @@
#define ELF_MACHINE EM_MIPS #define ELF_MACHINE EM_MIPS
#define CPUState struct CPUMIPSState #define CPUArchState struct CPUMIPSState
#include "config.h" #include "config.h"
#include "qemu-common.h" #include "qemu-common.h"

View File

@ -71,7 +71,7 @@
#endif /* defined (TARGET_PPC64) */ #endif /* defined (TARGET_PPC64) */
#define CPUState struct CPUPPCState #define CPUArchState struct CPUPPCState
#include "cpu-defs.h" #include "cpu-defs.h"

View File

@ -26,7 +26,7 @@
#define ELF_MACHINE EM_S390 #define ELF_MACHINE EM_S390
#define CPUState struct CPUS390XState #define CPUArchState struct CPUS390XState
#include "cpu-defs.h" #include "cpu-defs.h"
#define TARGET_PAGE_BITS 12 #define TARGET_PAGE_BITS 12

View File

@ -37,7 +37,7 @@
#define SH_CPU_SH7750_ALL (SH_CPU_SH7750 | SH_CPU_SH7750S | SH_CPU_SH7750R) #define SH_CPU_SH7750_ALL (SH_CPU_SH7750 | SH_CPU_SH7750S | SH_CPU_SH7750R)
#define SH_CPU_SH7751_ALL (SH_CPU_SH7751 | SH_CPU_SH7751R) #define SH_CPU_SH7751_ALL (SH_CPU_SH7751 | SH_CPU_SH7751R)
#define CPUState struct CPUSH4State #define CPUArchState struct CPUSH4State
#include "cpu-defs.h" #include "cpu-defs.h"

View File

@ -23,7 +23,7 @@
# endif # endif
#endif #endif
#define CPUState struct CPUSPARCState #define CPUArchState struct CPUSPARCState
#include "cpu-defs.h" #include "cpu-defs.h"

View File

@ -18,7 +18,7 @@
#define ELF_MACHINE EM_UNICORE32 #define ELF_MACHINE EM_UNICORE32
#define CPUState struct CPUUniCore32State #define CPUArchState struct CPUUniCore32State
#include "config.h" #include "config.h"
#include "qemu-common.h" #include "qemu-common.h"

View File

@ -31,7 +31,7 @@
#define TARGET_LONG_BITS 32 #define TARGET_LONG_BITS 32
#define ELF_MACHINE EM_XTENSA #define ELF_MACHINE EM_XTENSA
#define CPUState struct CPUXtensaState #define CPUArchState struct CPUXtensaState
#include "config.h" #include "config.h"
#include "qemu-common.h" #include "qemu-common.h"

View File

@ -990,10 +990,10 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_AREG0, tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_AREG0,
TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS)); TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
/* In the /* In the
* ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))] * ldr r1 [r0, #(offsetof(CPUArchState, tlb_table[mem_index][0].addr_read))]
* below, the offset is likely to exceed 12 bits if mem_index != 0 and * below, the offset is likely to exceed 12 bits if mem_index != 0 and
* not exceed otherwise, so use an * not exceed otherwise, so use an
* add r0, r0, #(mem_index * sizeof *CPUState.tlb_table) * add r0, r0, #(mem_index * sizeof *CPUArchState.tlb_table)
* before. * before.
*/ */
if (mem_index) if (mem_index)
@ -1001,7 +1001,7 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
(mem_index << (TLB_SHIFT & 1)) | (mem_index << (TLB_SHIFT & 1)) |
((16 - (TLB_SHIFT >> 1)) << 8)); ((16 - (TLB_SHIFT >> 1)) << 8));
tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0, tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0,
offsetof(CPUState, tlb_table[0][0].addr_read)); offsetof(CPUArchState, tlb_table[0][0].addr_read));
tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1, tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
/* Check alignment. */ /* Check alignment. */
@ -1012,12 +1012,12 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
/* XXX: possibly we could use a block data load or writeback in /* XXX: possibly we could use a block data load or writeback in
* the first access. */ * the first access. */
tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
offsetof(CPUState, tlb_table[0][0].addr_read) + 4); offsetof(CPUArchState, tlb_table[0][0].addr_read) + 4);
tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0)); TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
# endif # endif
tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
offsetof(CPUState, tlb_table[0][0].addend)); offsetof(CPUArchState, tlb_table[0][0].addend));
switch (opc) { switch (opc) {
case 0: case 0:
@ -1210,10 +1210,10 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0, tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0,
TCG_AREG0, TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS)); TCG_AREG0, TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
/* In the /* In the
* ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))] * ldr r1 [r0, #(offsetof(CPUArchState, tlb_table[mem_index][0].addr_write))]
* below, the offset is likely to exceed 12 bits if mem_index != 0 and * below, the offset is likely to exceed 12 bits if mem_index != 0 and
* not exceed otherwise, so use an * not exceed otherwise, so use an
* add r0, r0, #(mem_index * sizeof *CPUState.tlb_table) * add r0, r0, #(mem_index * sizeof *CPUArchState.tlb_table)
* before. * before.
*/ */
if (mem_index) if (mem_index)
@ -1221,7 +1221,7 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
(mem_index << (TLB_SHIFT & 1)) | (mem_index << (TLB_SHIFT & 1)) |
((16 - (TLB_SHIFT >> 1)) << 8)); ((16 - (TLB_SHIFT >> 1)) << 8));
tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0, tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0,
offsetof(CPUState, tlb_table[0][0].addr_write)); offsetof(CPUArchState, tlb_table[0][0].addr_write));
tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1, tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
/* Check alignment. */ /* Check alignment. */
@ -1232,12 +1232,12 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
/* XXX: possibly we could use a block data load or writeback in /* XXX: possibly we could use a block data load or writeback in
* the first access. */ * the first access. */
tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
offsetof(CPUState, tlb_table[0][0].addr_write) + 4); offsetof(CPUArchState, tlb_table[0][0].addr_write) + 4);
tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0)); TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
# endif # endif
tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
offsetof(CPUState, tlb_table[0][0].addend)); offsetof(CPUArchState, tlb_table[0][0].addend));
switch (opc) { switch (opc) {
case 0: case 0:
@ -1797,7 +1797,7 @@ static void tcg_target_init(TCGContext *s)
tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC); tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
tcg_add_target_add_op_defs(arm_op_defs); tcg_add_target_add_op_defs(arm_op_defs);
tcg_set_frame(s, TCG_AREG0, offsetof(CPUState, temp_buf), tcg_set_frame(s, TCG_AREG0, offsetof(CPUArchState, temp_buf),
CPU_TEMP_BUF_NLONGS * sizeof(long)); CPU_TEMP_BUF_NLONGS * sizeof(long));
} }

View File

@ -1040,13 +1040,13 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
lab1 = gen_new_label(); lab1 = gen_new_label();
lab2 = gen_new_label(); lab2 = gen_new_label();
offset = offsetof(CPUState, tlb_table[mem_index][0].addr_read); offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg, addrhi_reg, offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg, addrhi_reg,
opc & 3, lab1, offset); opc & 3, lab1, offset);
/* TLB Hit. */ /* TLB Hit. */
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : TCG_REG_R25), tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : TCG_REG_R25),
offsetof(CPUState, tlb_table[mem_index][0].addend) - offset); offsetof(CPUArchState, tlb_table[mem_index][0].addend) - offset);
tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg, TCG_REG_R20, opc); tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg, TCG_REG_R20, opc);
tcg_out_branch(s, lab2, 1); tcg_out_branch(s, lab2, 1);
@ -1155,13 +1155,13 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
lab1 = gen_new_label(); lab1 = gen_new_label();
lab2 = gen_new_label(); lab2 = gen_new_label();
offset = offsetof(CPUState, tlb_table[mem_index][0].addr_write); offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg, addrhi_reg, offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg, addrhi_reg,
opc, lab1, offset); opc, lab1, offset);
/* TLB Hit. */ /* TLB Hit. */
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : TCG_REG_R25), tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : TCG_REG_R25),
offsetof(CPUState, tlb_table[mem_index][0].addend) - offset); offsetof(CPUArchState, tlb_table[mem_index][0].addend) - offset);
/* There are no indexed stores, so we must do this addition explitly. /* There are no indexed stores, so we must do this addition explitly.
Careful to avoid R20, which is used for the bswaps to follow. */ Careful to avoid R20, which is used for the bswaps to follow. */

View File

@ -1031,7 +1031,7 @@ static inline void tcg_out_tlb_load(TCGContext *s, int addrlo_idx,
(CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0); (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
tcg_out_modrm_sib_offset(s, OPC_LEA + P_REXW, r1, TCG_AREG0, r1, 0, tcg_out_modrm_sib_offset(s, OPC_LEA + P_REXW, r1, TCG_AREG0, r1, 0,
offsetof(CPUState, tlb_table[mem_index][0]) offsetof(CPUArchState, tlb_table[mem_index][0])
+ which); + which);
/* cmp 0(r1), r0 */ /* cmp 0(r1), r0 */

View File

@ -1479,8 +1479,8 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
/* Read the TLB entry */ /* Read the TLB entry */
tcg_out_qemu_tlb(s, addr_reg, s_bits, tcg_out_qemu_tlb(s, addr_reg, s_bits,
offsetof(CPUState, tlb_table[mem_index][0].addr_read), offsetof(CPUArchState, tlb_table[mem_index][0].addr_read),
offsetof(CPUState, tlb_table[mem_index][0].addend)); offsetof(CPUArchState, tlb_table[mem_index][0].addend));
/* P6 is the fast path, and P7 the slow path */ /* P6 is the fast path, and P7 the slow path */
tcg_out_bundle(s, mLX, tcg_out_bundle(s, mLX,
@ -1570,8 +1570,8 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
#endif #endif
tcg_out_qemu_tlb(s, addr_reg, opc, tcg_out_qemu_tlb(s, addr_reg, opc,
offsetof(CPUState, tlb_table[mem_index][0].addr_write), offsetof(CPUArchState, tlb_table[mem_index][0].addr_write),
offsetof(CPUState, tlb_table[mem_index][0].addend)); offsetof(CPUArchState, tlb_table[mem_index][0].addend));
/* P6 is the fast path, and P7 the slow path */ /* P6 is the fast path, and P7 the slow path */
tcg_out_bundle(s, mLX, tcg_out_bundle(s, mLX,
@ -2368,6 +2368,6 @@ static void tcg_target_init(TCGContext *s)
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R6); tcg_regset_set_reg(s->reserved_regs, TCG_REG_R6);
tcg_add_target_add_op_defs(ia64_op_defs); tcg_add_target_add_op_defs(ia64_op_defs);
tcg_set_frame(s, TCG_AREG0, offsetof(CPUState, temp_buf), tcg_set_frame(s, TCG_AREG0, offsetof(CPUArchState, temp_buf),
CPU_TEMP_BUF_NLONGS * sizeof(long)); CPU_TEMP_BUF_NLONGS * sizeof(long));
} }

View File

@ -827,7 +827,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_A0, TCG_REG_A0, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_A0, TCG_REG_A0, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_A0, TCG_REG_A0, TCG_AREG0); tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_A0, TCG_REG_A0, TCG_AREG0);
tcg_out_opc_imm(s, OPC_LW, TCG_REG_AT, TCG_REG_A0, tcg_out_opc_imm(s, OPC_LW, TCG_REG_AT, TCG_REG_A0,
offsetof(CPUState, tlb_table[mem_index][0].addr_read) + addr_meml); offsetof(CPUArchState, tlb_table[mem_index][0].addr_read) + addr_meml);
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T0, TARGET_PAGE_MASK | ((1 << s_bits) - 1)); tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T0, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
tcg_out_opc_reg(s, OPC_AND, TCG_REG_T0, TCG_REG_T0, addr_regl); tcg_out_opc_reg(s, OPC_AND, TCG_REG_T0, TCG_REG_T0, addr_regl);
@ -837,7 +837,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
tcg_out_nop(s); tcg_out_nop(s);
tcg_out_opc_imm(s, OPC_LW, TCG_REG_AT, TCG_REG_A0, tcg_out_opc_imm(s, OPC_LW, TCG_REG_AT, TCG_REG_A0,
offsetof(CPUState, tlb_table[mem_index][0].addr_read) + addr_memh); offsetof(CPUArchState, tlb_table[mem_index][0].addr_read) + addr_memh);
label1_ptr = s->code_ptr; label1_ptr = s->code_ptr;
tcg_out_opc_br(s, OPC_BEQ, addr_regh, TCG_REG_AT); tcg_out_opc_br(s, OPC_BEQ, addr_regh, TCG_REG_AT);
@ -893,7 +893,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
reloc_pc16(label1_ptr, (tcg_target_long) s->code_ptr); reloc_pc16(label1_ptr, (tcg_target_long) s->code_ptr);
tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0, tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0,
offsetof(CPUState, tlb_table[mem_index][0].addend)); offsetof(CPUArchState, tlb_table[mem_index][0].addend));
tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_V0, TCG_REG_A0, addr_regl); tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_V0, TCG_REG_A0, addr_regl);
#else #else
if (GUEST_BASE == (int16_t)GUEST_BASE) { if (GUEST_BASE == (int16_t)GUEST_BASE) {
@ -1013,7 +1013,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_A0, TCG_REG_A0, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_A0, TCG_REG_A0, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_A0, TCG_REG_A0, TCG_AREG0); tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_A0, TCG_REG_A0, TCG_AREG0);
tcg_out_opc_imm(s, OPC_LW, TCG_REG_AT, TCG_REG_A0, tcg_out_opc_imm(s, OPC_LW, TCG_REG_AT, TCG_REG_A0,
offsetof(CPUState, tlb_table[mem_index][0].addr_write) + addr_meml); offsetof(CPUArchState, tlb_table[mem_index][0].addr_write) + addr_meml);
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T0, TARGET_PAGE_MASK | ((1 << s_bits) - 1)); tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T0, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
tcg_out_opc_reg(s, OPC_AND, TCG_REG_T0, TCG_REG_T0, addr_regl); tcg_out_opc_reg(s, OPC_AND, TCG_REG_T0, TCG_REG_T0, addr_regl);
@ -1023,7 +1023,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
tcg_out_nop(s); tcg_out_nop(s);
tcg_out_opc_imm(s, OPC_LW, TCG_REG_AT, TCG_REG_A0, tcg_out_opc_imm(s, OPC_LW, TCG_REG_AT, TCG_REG_A0,
offsetof(CPUState, tlb_table[mem_index][0].addr_write) + addr_memh); offsetof(CPUArchState, tlb_table[mem_index][0].addr_write) + addr_memh);
label1_ptr = s->code_ptr; label1_ptr = s->code_ptr;
tcg_out_opc_br(s, OPC_BEQ, addr_regh, TCG_REG_AT); tcg_out_opc_br(s, OPC_BEQ, addr_regh, TCG_REG_AT);
@ -1080,7 +1080,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
reloc_pc16(label1_ptr, (tcg_target_long) s->code_ptr); reloc_pc16(label1_ptr, (tcg_target_long) s->code_ptr);
tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0, tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0,
offsetof(CPUState, tlb_table[mem_index][0].addend)); offsetof(CPUArchState, tlb_table[mem_index][0].addend));
tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_A0, TCG_REG_A0, addr_regl); tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_A0, TCG_REG_A0, addr_regl);
#else #else
if (GUEST_BASE == (int16_t)GUEST_BASE) { if (GUEST_BASE == (int16_t)GUEST_BASE) {
@ -1529,6 +1529,6 @@ static void tcg_target_init(TCGContext *s)
tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */
tcg_add_target_add_op_defs(mips_op_defs); tcg_add_target_add_op_defs(mips_op_defs);
tcg_set_frame(s, TCG_AREG0, offsetof(CPUState, temp_buf), tcg_set_frame(s, TCG_AREG0, offsetof(CPUArchState, temp_buf),
CPU_TEMP_BUF_NLONGS * sizeof(long)); CPU_TEMP_BUF_NLONGS * sizeof(long));
} }

View File

@ -564,7 +564,7 @@ static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc)
tcg_out32 (s, (LWZU tcg_out32 (s, (LWZU
| RT (r1) | RT (r1)
| RA (r0) | RA (r0)
| offsetof (CPUState, tlb_table[mem_index][0].addr_read) | offsetof (CPUArchState, tlb_table[mem_index][0].addr_read)
) )
); );
tcg_out32 (s, (RLWINM tcg_out32 (s, (RLWINM
@ -760,7 +760,7 @@ static void tcg_out_qemu_st (TCGContext *s, const TCGArg *args, int opc)
tcg_out32 (s, (LWZU tcg_out32 (s, (LWZU
| RT (r1) | RT (r1)
| RA (r0) | RA (r0)
| offsetof (CPUState, tlb_table[mem_index][0].addr_write) | offsetof (CPUArchState, tlb_table[mem_index][0].addr_write)
) )
); );
tcg_out32 (s, (RLWINM tcg_out32 (s, (RLWINM

View File

@ -635,7 +635,7 @@ static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc)
rbase = 0; rbase = 0;
tcg_out_tlb_read (s, r0, r1, r2, addr_reg, s_bits, tcg_out_tlb_read (s, r0, r1, r2, addr_reg, s_bits,
offsetof (CPUState, tlb_table[mem_index][0].addr_read)); offsetof (CPUArchState, tlb_table[mem_index][0].addr_read));
tcg_out32 (s, CMP | BF (7) | RA (r2) | RB (r1) | CMP_L); tcg_out32 (s, CMP | BF (7) | RA (r2) | RB (r1) | CMP_L);
@ -782,7 +782,7 @@ static void tcg_out_qemu_st (TCGContext *s, const TCGArg *args, int opc)
rbase = 0; rbase = 0;
tcg_out_tlb_read (s, r0, r1, r2, addr_reg, opc, tcg_out_tlb_read (s, r0, r1, r2, addr_reg, opc,
offsetof (CPUState, tlb_table[mem_index][0].addr_write)); offsetof (CPUArchState, tlb_table[mem_index][0].addr_write));
tcg_out32 (s, CMP | BF (7) | RA (r2) | RB (r1) | CMP_L); tcg_out32 (s, CMP | BF (7) | RA (r2) | RB (r1) | CMP_L);

View File

@ -1439,9 +1439,9 @@ static void tcg_prepare_qemu_ldst(TCGContext* s, TCGReg data_reg,
tgen64_andi_tmp(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); tgen64_andi_tmp(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
if (is_store) { if (is_store) {
ofs = offsetof(CPUState, tlb_table[mem_index][0].addr_write); ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
} else { } else {
ofs = offsetof(CPUState, tlb_table[mem_index][0].addr_read); ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
} }
assert(ofs < 0x80000); assert(ofs < 0x80000);
@ -1515,7 +1515,7 @@ static void tcg_prepare_qemu_ldst(TCGContext* s, TCGReg data_reg,
*(label1_ptr + 1) = ((unsigned long)s->code_ptr - *(label1_ptr + 1) = ((unsigned long)s->code_ptr -
(unsigned long)label1_ptr) >> 1; (unsigned long)label1_ptr) >> 1;
ofs = offsetof(CPUState, tlb_table[mem_index][0].addend); ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
assert(ofs < 0x80000); assert(ofs < 0x80000);
tcg_out_mem(s, 0, RXY_AG, arg0, arg1, TCG_AREG0, ofs); tcg_out_mem(s, 0, RXY_AG, arg0, arg1, TCG_AREG0, ofs);
@ -2293,7 +2293,7 @@ static void tcg_target_init(TCGContext *s)
tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
tcg_add_target_add_op_defs(s390_op_defs); tcg_add_target_add_op_defs(s390_op_defs);
tcg_set_frame(s, TCG_AREG0, offsetof(CPUState, temp_buf), tcg_set_frame(s, TCG_AREG0, offsetof(CPUArchState, temp_buf),
CPU_TEMP_BUF_NLONGS * sizeof(long)); CPU_TEMP_BUF_NLONGS * sizeof(long));
} }

View File

@ -776,7 +776,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
tcg_out_andi(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); tcg_out_andi(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
/* add arg1, x, arg1 */ /* add arg1, x, arg1 */
tcg_out_addi(s, arg1, offsetof(CPUState, tcg_out_addi(s, arg1, offsetof(CPUArchState,
tlb_table[mem_index][0].addr_read)); tlb_table[mem_index][0].addr_read));
/* add env, arg1, arg1 */ /* add env, arg1, arg1 */
@ -988,7 +988,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
tcg_out_andi(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); tcg_out_andi(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
/* add arg1, x, arg1 */ /* add arg1, x, arg1 */
tcg_out_addi(s, arg1, offsetof(CPUState, tcg_out_addi(s, arg1, offsetof(CPUArchState,
tlb_table[mem_index][0].addr_write)); tlb_table[mem_index][0].addr_write));
/* add env, arg1, arg1 */ /* add env, arg1, arg1 */

View File

@ -891,7 +891,7 @@ static void tcg_target_init(TCGContext *s)
tcg_regset_clear(s->reserved_regs); tcg_regset_clear(s->reserved_regs);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
tcg_add_target_add_op_defs(tcg_target_op_defs); tcg_add_target_add_op_defs(tcg_target_op_defs);
tcg_set_frame(s, TCG_AREG0, offsetof(CPUState, temp_buf), tcg_set_frame(s, TCG_AREG0, offsetof(CPUArchState, temp_buf),
CPU_TEMP_BUF_NLONGS * sizeof(long)); CPU_TEMP_BUF_NLONGS * sizeof(long));
} }

View File

@ -154,7 +154,7 @@ typedef enum {
void tci_disas(uint8_t opc); void tci_disas(uint8_t opc);
unsigned long tcg_qemu_tb_exec(CPUState *env, uint8_t *tb_ptr); unsigned long tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr);
#define tcg_qemu_tb_exec tcg_qemu_tb_exec #define tcg_qemu_tb_exec tcg_qemu_tb_exec
static inline void flush_icache_range(tcg_target_ulong start, static inline void flush_icache_range(tcg_target_ulong start,

4
tci.c
View File

@ -52,7 +52,7 @@ typedef uint64_t (*helper_function)(tcg_target_ulong, tcg_target_ulong,
/* TCI can optionally use a global register variable for env. */ /* TCI can optionally use a global register variable for env. */
#if !defined(AREG0) #if !defined(AREG0)
CPUState *env; CPUArchState *env;
#endif #endif
/* Targets which don't use GETPC also don't need tci_tb_ptr /* Targets which don't use GETPC also don't need tci_tb_ptr
@ -429,7 +429,7 @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
} }
/* Interpret pseudo code in tb. */ /* Interpret pseudo code in tb. */
unsigned long tcg_qemu_tb_exec(CPUState *cpustate, uint8_t *tb_ptr) unsigned long tcg_qemu_tb_exec(CPUArchState *cpustate, uint8_t *tb_ptr)
{ {
unsigned long next_tb = 0; unsigned long next_tb = 0;

View File

@ -51,7 +51,7 @@ void cpu_gen_init(void)
'*gen_code_size_ptr' contains the size of the generated code (host '*gen_code_size_ptr' contains the size of the generated code (host
code). code).
*/ */
int cpu_gen_code(CPUState *env, TranslationBlock *tb, int *gen_code_size_ptr) int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr)
{ {
TCGContext *s = &tcg_ctx; TCGContext *s = &tcg_ctx;
uint8_t *gen_code_buf; uint8_t *gen_code_buf;
@ -109,7 +109,7 @@ int cpu_gen_code(CPUState *env, TranslationBlock *tb, int *gen_code_size_ptr)
/* The cpu state corresponding to 'searched_pc' is restored. /* The cpu state corresponding to 'searched_pc' is restored.
*/ */
int cpu_restore_state(TranslationBlock *tb, int cpu_restore_state(TranslationBlock *tb,
CPUState *env, unsigned long searched_pc) CPUArchState *env, unsigned long searched_pc)
{ {
TCGContext *s = &tcg_ctx; TCGContext *s = &tcg_ctx;
int j; int j;

View File

@ -38,7 +38,7 @@
//#define DEBUG_SIGNAL //#define DEBUG_SIGNAL
static void exception_action(CPUState *env1) static void exception_action(CPUArchState *env1)
{ {
#if defined(TARGET_I386) #if defined(TARGET_I386)
raise_exception_err_env(env1, env1->exception_index, env1->error_code); raise_exception_err_env(env1, env1->exception_index, env1->error_code);
@ -50,7 +50,7 @@ static void exception_action(CPUState *env1)
/* exit the current TB from a signal handler. The host registers are /* exit the current TB from a signal handler. The host registers are
restored in a state compatible with the CPU emulator restored in a state compatible with the CPU emulator
*/ */
void cpu_resume_from_signal(CPUState *env1, void *puc) void cpu_resume_from_signal(CPUArchState *env1, void *puc)
{ {
#ifdef __linux__ #ifdef __linux__
struct ucontext *uc = puc; struct ucontext *uc = puc;

View File

@ -530,14 +530,14 @@ static MemoryListener xen_memory_listener = {
static void xen_reset_vcpu(void *opaque) static void xen_reset_vcpu(void *opaque)
{ {
CPUState *env = opaque; CPUArchState *env = opaque;
env->halted = 1; env->halted = 1;
} }
void xen_vcpu_init(void) void xen_vcpu_init(void)
{ {
CPUState *first_cpu; CPUArchState *first_cpu;
if ((first_cpu = qemu_get_cpu(0))) { if ((first_cpu = qemu_get_cpu(0))) {
qemu_register_reset(xen_reset_vcpu, first_cpu); qemu_register_reset(xen_reset_vcpu, first_cpu);