TCGCPUOps cleanups (claudio)

tcg/s390 compare fix (phil)
 tcg/aarch64 rotli_vec fix
 tcg/tci cleanups and fixes
 -----BEGIN PGP SIGNATURE-----
 
 iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmAdzM4dHHJpY2hhcmQu
 aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV9CBAgAgd93WNC5hEnW8Ew4
 g9ALwWznxr/TElsRr96phCiKrW6OFWZ4J1g1NkQ6MNA9Y1sbXkZBPRlnzCz6GkON
 j+lEJvqXkwHqTCE1wZYaQ4ZUSo25JOyUq3yoxS1/0KLOiDjKkZ6hHH/hnsuQSXcI
 JiB1uKfzwM5RQ35BecxNzm7QUpjC0nvO1oZuA+qMvnc7eWKjP1GFeKetqIJV9HqE
 1SNo9SBLXXq4VOZKGS/M8keRXaRom0zjR+b4tE1UIkNcDTOfkVt/BVnSJhv47sLx
 2VVCHeSmgYyJv1/J0Tcekxd1perntRONpwAVcnpI+3tmff5qD26TuCkCU+3KlU2V
 X+AOPg==
 =zd2q
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/rth-gitlab/tags/pull-tcg-20210205' into staging

TCGCPUOps cleanups (claudio)
tcg/s390 compare fix (phil)
tcg/aarch64 rotli_vec fix
tcg/tci cleanups and fixes

# gpg: Signature made Fri 05 Feb 2021 22:55:10 GMT
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* remotes/rth-gitlab/tags/pull-tcg-20210205: (46 commits)
  accel: introduce AccelCPUClass extending CPUClass
  accel: replace struct CpusAccel with AccelOpsClass
  accel: extend AccelState and AccelClass to user-mode
  cpu: tcg_ops: move to tcg-cpu-ops.h, keep a pointer in CPUClass
  cpu: move debug_check_watchpoint to tcg_ops
  cpu: move adjust_watchpoint_address to tcg_ops
  physmem: make watchpoint checking code TCG-only
  cpu: move do_unaligned_access to tcg_ops
  cpu: move cc->transaction_failed to tcg_ops
  cpu: move cc->do_interrupt to tcg_ops
  target/arm: do not use cc->do_interrupt for KVM directly
  cpu: Move debug_excp_handler to tcg_ops
  cpu: Move tlb_fill to tcg_ops
  cpu: Move cpu_exec_* to tcg_ops
  cpu: Move synchronize_from_tb() to tcg_ops
  accel/tcg: split TCG-only code from cpu_exec_realizefn
  target/riscv: remove CONFIG_TCG, as it is always TCG
  cpu: Introduce TCGCpuOperations struct
  tcg/tci: Remove TCG_CONST
  tcg/tci: Fix TCG_REG_R4 misusage
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2021-02-05 22:59:12 +00:00
commit 5b19cb63d9
108 changed files with 1565 additions and 1065 deletions

View File

@ -129,6 +129,7 @@ F: include/exec/helper*.h
F: include/exec/tb-hash.h
F: include/sysemu/cpus.h
F: include/sysemu/tcg.h
F: include/hw/core/tcg-cpu-ops.h
FPU emulation
M: Aurelien Jarno <aurelien@aurel32.net>
@ -437,8 +438,10 @@ Overall
M: Richard Henderson <richard.henderson@linaro.org>
R: Paolo Bonzini <pbonzini@redhat.com>
S: Maintained
F: include/sysemu/accel.h
F: accel/accel.c
F: include/qemu/accel.h
F: include/sysemu/accel-ops.h
F: include/hw/core/accel-cpu.h
F: accel/accel-*.c
F: accel/Makefile.objs
F: accel/stubs/Makefile.objs

105
accel/accel-common.c Normal file
View File

@ -0,0 +1,105 @@
/*
* QEMU accel class, components common to system emulation and user mode
*
* Copyright (c) 2003-2008 Fabrice Bellard
* Copyright (c) 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
#include "qemu/accel.h"
#include "cpu.h"
#include "hw/core/accel-cpu.h"
#ifndef CONFIG_USER_ONLY
#include "accel-softmmu.h"
#endif /* !CONFIG_USER_ONLY */
static const TypeInfo accel_type = {
.name = TYPE_ACCEL,
.parent = TYPE_OBJECT,
.class_size = sizeof(AccelClass),
.instance_size = sizeof(AccelState),
};
/* Lookup AccelClass from opt_name. Returns NULL if not found */
AccelClass *accel_find(const char *opt_name)
{
char *class_name = g_strdup_printf(ACCEL_CLASS_NAME("%s"), opt_name);
AccelClass *ac = ACCEL_CLASS(object_class_by_name(class_name));
g_free(class_name);
return ac;
}
static void accel_init_cpu_int_aux(ObjectClass *klass, void *opaque)
{
CPUClass *cc = CPU_CLASS(klass);
AccelCPUClass *accel_cpu = opaque;
cc->accel_cpu = accel_cpu;
if (accel_cpu->cpu_class_init) {
accel_cpu->cpu_class_init(cc);
}
}
/* initialize the arch-specific accel CpuClass interfaces */
static void accel_init_cpu_interfaces(AccelClass *ac)
{
const char *ac_name; /* AccelClass name */
char *acc_name; /* AccelCPUClass name */
ObjectClass *acc; /* AccelCPUClass */
ac_name = object_class_get_name(OBJECT_CLASS(ac));
g_assert(ac_name != NULL);
acc_name = g_strdup_printf("%s-%s", ac_name, CPU_RESOLVING_TYPE);
acc = object_class_by_name(acc_name);
g_free(acc_name);
if (acc) {
object_class_foreach(accel_init_cpu_int_aux,
CPU_RESOLVING_TYPE, false, acc);
}
}
void accel_init_interfaces(AccelClass *ac)
{
#ifndef CONFIG_USER_ONLY
accel_init_ops_interfaces(ac);
#endif /* !CONFIG_USER_ONLY */
accel_init_cpu_interfaces(ac);
}
static const TypeInfo accel_cpu_type = {
.name = TYPE_ACCEL_CPU,
.parent = TYPE_OBJECT,
.abstract = true,
.class_size = sizeof(AccelCPUClass),
};
static void register_accel_types(void)
{
type_register_static(&accel_type);
type_register_static(&accel_cpu_type);
}
type_init(register_accel_types);

View File

@ -1,5 +1,5 @@
/*
* QEMU System Emulator, accelerator interfaces
* QEMU accel class, system emulation components
*
* Copyright (c) 2003-2008 Fabrice Bellard
* Copyright (c) 2014 Red Hat Inc.
@ -24,27 +24,11 @@
*/
#include "qemu/osdep.h"
#include "sysemu/accel.h"
#include "qemu/accel.h"
#include "hw/boards.h"
#include "sysemu/arch_init.h"
#include "sysemu/sysemu.h"
#include "qom/object.h"
#include "sysemu/cpus.h"
static const TypeInfo accel_type = {
.name = TYPE_ACCEL,
.parent = TYPE_OBJECT,
.class_size = sizeof(AccelClass),
.instance_size = sizeof(AccelState),
};
/* Lookup AccelClass from opt_name. Returns NULL if not found */
AccelClass *accel_find(const char *opt_name)
{
char *class_name = g_strdup_printf(ACCEL_CLASS_NAME("%s"), opt_name);
AccelClass *ac = ACCEL_CLASS(object_class_by_name(class_name));
g_free(class_name);
return ac;
}
#include "accel-softmmu.h"
int accel_init_machine(AccelState *accel, MachineState *ms)
{
@ -77,9 +61,40 @@ void accel_setup_post(MachineState *ms)
}
}
static void register_accel_types(void)
/* initialize the arch-independent accel operation interfaces */
void accel_init_ops_interfaces(AccelClass *ac)
{
type_register_static(&accel_type);
const char *ac_name;
char *ops_name;
AccelOpsClass *ops;
ac_name = object_class_get_name(OBJECT_CLASS(ac));
g_assert(ac_name != NULL);
ops_name = g_strdup_printf("%s" ACCEL_OPS_SUFFIX, ac_name);
ops = ACCEL_OPS_CLASS(object_class_by_name(ops_name));
g_free(ops_name);
/*
* all accelerators need to define ops, providing at least a mandatory
* non-NULL create_vcpu_thread operation.
*/
g_assert(ops != NULL);
if (ops->ops_init) {
ops->ops_init(ops);
}
cpus_register_accel(ops);
}
type_init(register_accel_types);
static const TypeInfo accel_ops_type_info = {
.name = TYPE_ACCEL_OPS,
.parent = TYPE_OBJECT,
.abstract = true,
.class_size = sizeof(AccelOpsClass),
};
static void accel_softmmu_register_types(void)
{
type_register_static(&accel_ops_type_info);
}
type_init(accel_softmmu_register_types);

15
accel/accel-softmmu.h Normal file
View File

@ -0,0 +1,15 @@
/*
* QEMU System Emulation accel internal functions
*
* Copyright 2021 SUSE LLC
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef ACCEL_SOFTMMU_H
#define ACCEL_SOFTMMU_H
void accel_init_ops_interfaces(AccelClass *ac);
#endif /* ACCEL_SOFTMMU_H */

24
accel/accel-user.c Normal file
View File

@ -0,0 +1,24 @@
/*
* QEMU accel class, user-mode components
*
* Copyright 2021 SUSE LLC
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "qemu/accel.h"
AccelState *current_accel(void)
{
static AccelState *accel;
if (!accel) {
AccelClass *ac = accel_find("tcg");
g_assert(ac != NULL);
accel = ACCEL(object_new_with_class(OBJECT_CLASS(ac)));
}
return accel;
}

View File

@ -74,11 +74,27 @@ static void kvm_start_vcpu_thread(CPUState *cpu)
cpu, QEMU_THREAD_JOINABLE);
}
const CpusAccel kvm_cpus = {
.create_vcpu_thread = kvm_start_vcpu_thread,
static void kvm_accel_ops_class_init(ObjectClass *oc, void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
.synchronize_post_reset = kvm_cpu_synchronize_post_reset,
.synchronize_post_init = kvm_cpu_synchronize_post_init,
.synchronize_state = kvm_cpu_synchronize_state,
.synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm,
ops->create_vcpu_thread = kvm_start_vcpu_thread;
ops->synchronize_post_reset = kvm_cpu_synchronize_post_reset;
ops->synchronize_post_init = kvm_cpu_synchronize_post_init;
ops->synchronize_state = kvm_cpu_synchronize_state;
ops->synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm;
}
static const TypeInfo kvm_accel_ops_type = {
.name = ACCEL_OPS_NAME("kvm"),
.parent = TYPE_ACCEL_OPS,
.class_init = kvm_accel_ops_class_init,
.abstract = true,
};
static void kvm_accel_ops_register_types(void)
{
type_register_static(&kvm_accel_ops_type);
}
type_init(kvm_accel_ops_register_types);

View File

@ -2256,8 +2256,6 @@ static int kvm_init(MachineState *ms)
ret = ram_block_discard_disable(true);
assert(!ret);
}
cpus_register_accel(&kvm_cpus);
return 0;
err:

View File

@ -12,8 +12,6 @@
#include "sysemu/cpus.h"
extern const CpusAccel kvm_cpus;
int kvm_init_vcpu(CPUState *cpu, Error **errp);
int kvm_cpu_exec(CPUState *cpu);
void kvm_destroy_vcpu(CPUState *cpu);

View File

@ -1,7 +1,7 @@
kvm_ss = ss.source_set()
kvm_ss.add(files(
'kvm-all.c',
'kvm-cpus.c',
'kvm-accel-ops.c',
))
kvm_ss.add(when: 'CONFIG_SEV', if_false: files('sev-stub.c'))

View File

@ -1,4 +1,6 @@
softmmu_ss.add(files('accel.c'))
specific_ss.add(files('accel-common.c'))
softmmu_ss.add(files('accel-softmmu.c'))
user_ss.add(files('accel-user.c'))
subdir('qtest')
subdir('kvm')

View File

@ -17,7 +17,7 @@
#include "qemu/module.h"
#include "qemu/option.h"
#include "qemu/config-file.h"
#include "sysemu/accel.h"
#include "qemu/accel.h"
#include "sysemu/qtest.h"
#include "sysemu/cpus.h"
#include "sysemu/cpu-timers.h"
@ -25,14 +25,8 @@
#include "qemu/main-loop.h"
#include "hw/core/cpu.h"
const CpusAccel qtest_cpus = {
.create_vcpu_thread = dummy_start_vcpu_thread,
.get_virtual_clock = qtest_get_virtual_clock,
};
static int qtest_init_accel(MachineState *ms)
{
cpus_register_accel(&qtest_cpus);
return 0;
}
@ -52,9 +46,26 @@ static const TypeInfo qtest_accel_type = {
.class_init = qtest_accel_class_init,
};
static void qtest_accel_ops_class_init(ObjectClass *oc, void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
ops->create_vcpu_thread = dummy_start_vcpu_thread;
ops->get_virtual_clock = qtest_get_virtual_clock;
};
static const TypeInfo qtest_accel_ops_type = {
.name = ACCEL_OPS_NAME("qtest"),
.parent = TYPE_ACCEL_OPS,
.class_init = qtest_accel_ops_class_init,
.abstract = true,
};
static void qtest_type_init(void)
{
type_register_static(&qtest_accel_type);
type_register_static(&qtest_accel_ops_type);
}
type_init(qtest_type_init);

View File

@ -21,6 +21,7 @@
#include "qemu-common.h"
#include "qemu/qemu-print.h"
#include "cpu.h"
#include "hw/core/tcg-cpu-ops.h"
#include "trace.h"
#include "disas/disas.h"
#include "exec/exec-all.h"
@ -213,8 +214,8 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
TARGET_FMT_lx "] %s\n",
last_tb->tc.ptr, last_tb->pc,
lookup_symbol(last_tb->pc));
if (cc->synchronize_from_tb) {
cc->synchronize_from_tb(cpu, last_tb);
if (cc->tcg_ops->synchronize_from_tb) {
cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
} else {
assert(cc->set_pc);
cc->set_pc(cpu, last_tb->pc);
@ -262,8 +263,8 @@ static void cpu_exec_enter(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
if (cc->cpu_exec_enter) {
cc->cpu_exec_enter(cpu);
if (cc->tcg_ops->cpu_exec_enter) {
cc->tcg_ops->cpu_exec_enter(cpu);
}
}
@ -271,8 +272,8 @@ static void cpu_exec_exit(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
if (cc->cpu_exec_exit) {
cc->cpu_exec_exit(cpu);
if (cc->tcg_ops->cpu_exec_exit) {
cc->tcg_ops->cpu_exec_exit(cpu);
}
}
@ -512,8 +513,8 @@ static inline void cpu_handle_debug_exception(CPUState *cpu)
}
}
if (cc->debug_excp_handler) {
cc->debug_excp_handler(cpu);
if (cc->tcg_ops->debug_excp_handler) {
cc->tcg_ops->debug_excp_handler(cpu);
}
}
@ -547,7 +548,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
loop */
#if defined(TARGET_I386)
CPUClass *cc = CPU_GET_CLASS(cpu);
cc->do_interrupt(cpu);
cc->tcg_ops->do_interrupt(cpu);
#endif
*ret = cpu->exception_index;
cpu->exception_index = -1;
@ -556,7 +557,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
if (replay_exception()) {
CPUClass *cc = CPU_GET_CLASS(cpu);
qemu_mutex_lock_iothread();
cc->do_interrupt(cpu);
cc->tcg_ops->do_interrupt(cpu);
qemu_mutex_unlock_iothread();
cpu->exception_index = -1;
@ -655,8 +656,8 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
True when it is, and we should restart on a new TB,
and via longjmp via cpu_loop_exit. */
else {
if (cc->cpu_exec_interrupt &&
cc->cpu_exec_interrupt(cpu, interrupt_request)) {
if (cc->tcg_ops->cpu_exec_interrupt &&
cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
if (need_replay_interrupt(interrupt_request)) {
replay_interrupt();
}
@ -828,6 +829,34 @@ int cpu_exec(CPUState *cpu)
return ret;
}
void tcg_exec_realizefn(CPUState *cpu, Error **errp)
{
static bool tcg_target_initialized;
CPUClass *cc = CPU_GET_CLASS(cpu);
if (!tcg_target_initialized) {
cc->tcg_ops->initialize();
tcg_target_initialized = true;
}
tlb_init(cpu);
qemu_plugin_vcpu_init_hook(cpu);
#ifndef CONFIG_USER_ONLY
tcg_iommu_init_notifier_list(cpu);
#endif /* !CONFIG_USER_ONLY */
}
/* undo the initializations in reverse order */
void tcg_exec_unrealizefn(CPUState *cpu)
{
#ifndef CONFIG_USER_ONLY
tcg_iommu_free_notifier_list(cpu);
#endif /* !CONFIG_USER_ONLY */
qemu_plugin_vcpu_exit_hook(cpu);
tlb_destroy(cpu);
}
#ifndef CONFIG_USER_ONLY
void dump_drift_info(void)

View File

@ -20,6 +20,7 @@
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "cpu.h"
#include "hw/core/tcg-cpu-ops.h"
#include "exec/exec-all.h"
#include "exec/memory.h"
#include "exec/address-spaces.h"
@ -1305,10 +1306,37 @@ static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
* This is not a probe, so only valid return is success; failure
* should result in exception + longjmp to the cpu loop.
*/
ok = cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr);
ok = cc->tcg_ops->tlb_fill(cpu, addr, size,
access_type, mmu_idx, false, retaddr);
assert(ok);
}
static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
}
static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
vaddr addr, unsigned size,
MMUAccessType access_type,
int mmu_idx, MemTxAttrs attrs,
MemTxResult response,
uintptr_t retaddr)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
if (!cpu->ignore_memory_transaction_failures &&
cc->tcg_ops->do_transaction_failed) {
cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
access_type, mmu_idx, attrs,
response, retaddr);
}
}
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
int mmu_idx, target_ulong addr, uintptr_t retaddr,
MMUAccessType access_type, MemOp op)
@ -1576,8 +1604,8 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
CPUState *cs = env_cpu(env);
CPUClass *cc = CPU_GET_CLASS(cs);
if (!cc->tlb_fill(cs, addr, fault_size, access_type,
mmu_idx, nonfault, retaddr)) {
if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
mmu_idx, nonfault, retaddr)) {
/* Non-faulting page table read failed. */
*phost = NULL;
return TLB_INVALID_MASK;

View File

@ -1,5 +1,6 @@
tcg_ss = ss.source_set()
tcg_ss.add(files(
'tcg-all.c',
'cpu-exec-common.c',
'cpu-exec.c',
'tcg-runtime-gvec.c',
@ -13,10 +14,9 @@ tcg_ss.add(when: 'CONFIG_PLUGIN', if_true: [files('plugin-gen.c'), libdl])
specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss)
specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files(
'tcg-all.c',
'cputlb.c',
'tcg-cpus.c',
'tcg-cpus-mttcg.c',
'tcg-cpus-icount.c',
'tcg-cpus-rr.c'
'tcg-accel-ops.c',
'tcg-accel-ops-mttcg.c',
'tcg-accel-ops-icount.c',
'tcg-accel-ops-rr.c'
))

View File

@ -32,9 +32,9 @@
#include "exec/exec-all.h"
#include "hw/boards.h"
#include "tcg-cpus.h"
#include "tcg-cpus-icount.h"
#include "tcg-cpus-rr.h"
#include "tcg-accel-ops.h"
#include "tcg-accel-ops-icount.h"
#include "tcg-accel-ops-rr.h"
static int64_t icount_get_limit(void)
{
@ -93,7 +93,7 @@ void icount_prepare_for_run(CPUState *cpu)
/*
* These should always be cleared by icount_process_data after
* each vCPU execution. However u16.high can be raised
* asynchronously by cpu_exit/cpu_interrupt/tcg_cpus_handle_interrupt
* asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
*/
g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
g_assert(cpu->icount_extra == 0);
@ -125,23 +125,14 @@ void icount_process_data(CPUState *cpu)
replay_mutex_unlock();
}
static void icount_handle_interrupt(CPUState *cpu, int mask)
void icount_handle_interrupt(CPUState *cpu, int mask)
{
int old_mask = cpu->interrupt_request;
tcg_cpus_handle_interrupt(cpu, mask);
tcg_handle_interrupt(cpu, mask);
if (qemu_cpu_is_self(cpu) &&
!cpu->can_do_io
&& (mask & ~old_mask) != 0) {
cpu_abort(cpu, "Raised interrupt while not in I/O function");
}
}
const CpusAccel tcg_cpus_icount = {
.create_vcpu_thread = rr_start_vcpu_thread,
.kick_vcpu_thread = rr_kick_vcpu_thread,
.handle_interrupt = icount_handle_interrupt,
.get_virtual_clock = icount_get,
.get_elapsed_ticks = icount_get,
};

View File

@ -14,4 +14,6 @@ void icount_handle_deadline(void);
void icount_prepare_for_run(CPUState *cpu);
void icount_process_data(CPUState *cpu);
void icount_handle_interrupt(CPUState *cpu, int mask);
#endif /* TCG_CPUS_ICOUNT_H */

View File

@ -32,7 +32,8 @@
#include "exec/exec-all.h"
#include "hw/boards.h"
#include "tcg-cpus.h"
#include "tcg-accel-ops.h"
#include "tcg-accel-ops-mttcg.h"
/*
* In the multi-threaded case each vCPU has its own thread. The TLS
@ -103,12 +104,12 @@ static void *mttcg_cpu_thread_fn(void *arg)
return NULL;
}
static void mttcg_kick_vcpu_thread(CPUState *cpu)
void mttcg_kick_vcpu_thread(CPUState *cpu)
{
cpu_exit(cpu);
}
static void mttcg_start_vcpu_thread(CPUState *cpu)
void mttcg_start_vcpu_thread(CPUState *cpu)
{
char thread_name[VCPU_THREAD_NAME_SIZE];
@ -131,10 +132,3 @@ static void mttcg_start_vcpu_thread(CPUState *cpu)
cpu->hThread = qemu_thread_get_handle(cpu->thread);
#endif
}
const CpusAccel tcg_cpus_mttcg = {
.create_vcpu_thread = mttcg_start_vcpu_thread,
.kick_vcpu_thread = mttcg_kick_vcpu_thread,
.handle_interrupt = tcg_cpus_handle_interrupt,
};

View File

@ -0,0 +1,19 @@
/*
* QEMU TCG Multi Threaded vCPUs implementation
*
* Copyright 2021 SUSE LLC
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef TCG_CPUS_MTTCG_H
#define TCG_CPUS_MTTCG_H
/* kick MTTCG vCPU thread */
void mttcg_kick_vcpu_thread(CPUState *cpu);
/* start an mttcg vCPU thread */
void mttcg_start_vcpu_thread(CPUState *cpu);
#endif /* TCG_CPUS_MTTCG_H */

View File

@ -32,9 +32,9 @@
#include "exec/exec-all.h"
#include "hw/boards.h"
#include "tcg-cpus.h"
#include "tcg-cpus-rr.h"
#include "tcg-cpus-icount.h"
#include "tcg-accel-ops.h"
#include "tcg-accel-ops-rr.h"
#include "tcg-accel-ops-icount.h"
/* Kick all RR vCPUs */
void rr_kick_vcpu_thread(CPUState *unused)
@ -296,10 +296,3 @@ void rr_start_vcpu_thread(CPUState *cpu)
cpu->created = true;
}
}
const CpusAccel tcg_cpus_rr = {
.create_vcpu_thread = rr_start_vcpu_thread,
.kick_vcpu_thread = rr_kick_vcpu_thread,
.handle_interrupt = tcg_cpus_handle_interrupt,
};

View File

@ -34,7 +34,10 @@
#include "exec/exec-all.h"
#include "hw/boards.h"
#include "tcg-cpus.h"
#include "tcg-accel-ops.h"
#include "tcg-accel-ops-mttcg.h"
#include "tcg-accel-ops-rr.h"
#include "tcg-accel-ops-icount.h"
/* common functionality among all TCG variants */
@ -64,7 +67,7 @@ int tcg_cpus_exec(CPUState *cpu)
}
/* mask must never be zero, except for A20 change call */
void tcg_cpus_handle_interrupt(CPUState *cpu, int mask)
void tcg_handle_interrupt(CPUState *cpu, int mask)
{
g_assert(qemu_mutex_iothread_locked());
@ -80,3 +83,43 @@ void tcg_cpus_handle_interrupt(CPUState *cpu, int mask)
qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
}
}
static void tcg_accel_ops_init(AccelOpsClass *ops)
{
if (qemu_tcg_mttcg_enabled()) {
ops->create_vcpu_thread = mttcg_start_vcpu_thread;
ops->kick_vcpu_thread = mttcg_kick_vcpu_thread;
ops->handle_interrupt = tcg_handle_interrupt;
} else if (icount_enabled()) {
ops->create_vcpu_thread = rr_start_vcpu_thread;
ops->kick_vcpu_thread = rr_kick_vcpu_thread;
ops->handle_interrupt = icount_handle_interrupt;
ops->get_virtual_clock = icount_get;
ops->get_elapsed_ticks = icount_get;
} else {
ops->create_vcpu_thread = rr_start_vcpu_thread;
ops->kick_vcpu_thread = rr_kick_vcpu_thread;
ops->handle_interrupt = tcg_handle_interrupt;
}
}
static void tcg_accel_ops_class_init(ObjectClass *oc, void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
ops->ops_init = tcg_accel_ops_init;
}
static const TypeInfo tcg_accel_ops_type = {
.name = ACCEL_OPS_NAME("tcg"),
.parent = TYPE_ACCEL_OPS,
.class_init = tcg_accel_ops_class_init,
.abstract = true,
};
static void tcg_accel_ops_register_types(void)
{
type_register_static(&tcg_accel_ops_type);
}
type_init(tcg_accel_ops_register_types);

View File

@ -14,12 +14,8 @@
#include "sysemu/cpus.h"
extern const CpusAccel tcg_cpus_mttcg;
extern const CpusAccel tcg_cpus_icount;
extern const CpusAccel tcg_cpus_rr;
void tcg_cpus_destroy(CPUState *cpu);
int tcg_cpus_exec(CPUState *cpu);
void tcg_cpus_handle_interrupt(CPUState *cpu, int mask);
void tcg_handle_interrupt(CPUState *cpu, int mask);
#endif /* TCG_CPUS_H */

View File

@ -30,9 +30,8 @@
#include "tcg/tcg.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "hw/boards.h"
#include "qemu/accel.h"
#include "qapi/qapi-builtin-visit.h"
#include "tcg-cpus.h"
struct TCGState {
AccelState parent_obj;
@ -97,7 +96,7 @@ static void tcg_accel_instance_init(Object *obj)
s->mttcg_enabled = default_mttcg_enabled();
/* If debugging enabled, default "auto on", otherwise off. */
#ifdef CONFIG_DEBUG_TCG
#if defined(CONFIG_DEBUG_TCG) && !defined(CONFIG_USER_ONLY)
s->splitwx_enabled = -1;
#else
s->splitwx_enabled = 0;
@ -114,17 +113,15 @@ static int tcg_init(MachineState *ms)
mttcg_enabled = s->mttcg_enabled;
/*
* Initialize TCG regions
* Initialize TCG regions only for softmmu.
*
* This needs to be done later for user mode, because the prologue
* generation needs to be delayed so that GUEST_BASE is already set.
*/
#ifndef CONFIG_USER_ONLY
tcg_region_init();
#endif /* !CONFIG_USER_ONLY */
if (mttcg_enabled) {
cpus_register_accel(&tcg_cpus_mttcg);
} else if (icount_enabled()) {
cpus_register_accel(&tcg_cpus_icount);
} else {
cpus_register_accel(&tcg_cpus_rr);
}
return 0;
}

View File

@ -18,6 +18,7 @@
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "hw/core/tcg-cpu-ops.h"
#include "disas/disas.h"
#include "exec/exec-all.h"
#include "tcg/tcg.h"
@ -187,7 +188,8 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
clear_helper_retaddr();
cc = CPU_GET_CLASS(cpu);
cc->tlb_fill(cpu, address, 0, access_type, MMU_USER_IDX, false, pc);
cc->tcg_ops->tlb_fill(cpu, address, 0, access_type,
MMU_USER_IDX, false, pc);
g_assert_not_reached();
}
@ -217,8 +219,8 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
} else {
CPUState *cpu = env_cpu(env);
CPUClass *cc = CPU_GET_CLASS(cpu);
cc->tlb_fill(cpu, addr, fault_size, access_type,
MMU_USER_IDX, false, ra);
cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type,
MMU_USER_IDX, false, ra);
g_assert_not_reached();
}
}

View File

@ -15,7 +15,7 @@
#include "hw/xen/xen-legacy-backend.h"
#include "hw/xen/xen_pt.h"
#include "chardev/char.h"
#include "sysemu/accel.h"
#include "qemu/accel.h"
#include "sysemu/cpus.h"
#include "sysemu/xen.h"
#include "sysemu/runstate.h"
@ -154,10 +154,6 @@ static void xen_setup_post(MachineState *ms, AccelState *accel)
}
}
const CpusAccel xen_cpus = {
.create_vcpu_thread = dummy_start_vcpu_thread,
};
static int xen_init(MachineState *ms)
{
MachineClass *mc = MACHINE_GET_CLASS(ms);
@ -185,9 +181,6 @@ static int xen_init(MachineState *ms)
* opt out of system RAM being allocated by generic code
*/
mc->default_ram_id = NULL;
cpus_register_accel(&xen_cpus);
return 0;
}
@ -222,9 +215,24 @@ static const TypeInfo xen_accel_type = {
.class_init = xen_accel_class_init,
};
static void xen_accel_ops_class_init(ObjectClass *oc, void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
ops->create_vcpu_thread = dummy_start_vcpu_thread;
}
static const TypeInfo xen_accel_ops_type = {
.name = ACCEL_OPS_NAME("xen"),
.parent = TYPE_ACCEL_OPS,
.class_init = xen_accel_ops_class_init,
.abstract = true,
};
static void xen_type_init(void)
{
type_register_static(&xen_accel_type);
type_register_static(&xen_accel_ops_type);
}
type_init(xen_type_init);

View File

@ -20,6 +20,7 @@
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "qemu/units.h"
#include "qemu/accel.h"
#include "sysemu/tcg.h"
#include "qemu-version.h"
#include <machine/trap.h>
@ -908,10 +909,14 @@ int main(int argc, char **argv)
#endif
}
/* init tcg before creating CPUs and to get qemu_host_page_size */
tcg_exec_init(0, false);
cpu_type = parse_cpu_option(cpu_model);
/* init tcg before creating CPUs and to get qemu_host_page_size */
{
AccelClass *ac = ACCEL_GET_CLASS(current_accel());
ac->init_machine(NULL);
accel_init_interfaces(ac);
}
cpu = cpu_create(cpu_type);
env = cpu->env_ptr;
#if defined(TARGET_SPARC) || defined(TARGET_PPC)

5
configure vendored
View File

@ -1110,9 +1110,9 @@ for opt do
;;
--enable-whpx) whpx="enabled"
;;
--disable-tcg-interpreter) tcg_interpreter="true"
--disable-tcg-interpreter) tcg_interpreter="false"
;;
--enable-tcg-interpreter) tcg_interpreter="false"
--enable-tcg-interpreter) tcg_interpreter="true"
;;
--disable-cap-ng) cap_ng="disabled"
;;
@ -6417,6 +6417,7 @@ NINJA=$ninja $meson setup \
-Dvhost_user_blk_server=$vhost_user_blk_server \
-Dfuse=$fuse -Dfuse_lseek=$fuse_lseek -Dguest_agent_msi=$guest_agent_msi \
$(if test "$default_features" = no; then echo "-Dauto_features=disabled"; fi) \
-Dtcg_interpreter=$tcg_interpreter \
$cross_arg \
"$PWD" "$source_path"

66
cpu.c
View File

@ -124,12 +124,34 @@ const VMStateDescription vmstate_cpu_common = {
};
#endif
void cpu_exec_unrealizefn(CPUState *cpu)
void cpu_exec_realizefn(CPUState *cpu, Error **errp)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
tlb_destroy(cpu);
cpu_list_remove(cpu);
cpu_list_add(cpu);
#ifdef CONFIG_TCG
/* NB: errp parameter is unused currently */
if (tcg_enabled()) {
tcg_exec_realizefn(cpu, errp);
}
#endif /* CONFIG_TCG */
#ifdef CONFIG_USER_ONLY
assert(cc->vmsd == NULL);
#else
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
}
if (cc->vmsd != NULL) {
vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
}
#endif /* CONFIG_USER_ONLY */
}
void cpu_exec_unrealizefn(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
#ifdef CONFIG_USER_ONLY
assert(cc->vmsd == NULL);
@ -140,8 +162,15 @@ void cpu_exec_unrealizefn(CPUState *cpu)
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
}
tcg_iommu_free_notifier_list(cpu);
#endif
#ifdef CONFIG_TCG
/* NB: errp parameter is unused currently */
if (tcg_enabled()) {
tcg_exec_unrealizefn(cpu);
}
#endif /* CONFIG_TCG */
cpu_list_remove(cpu);
}
void cpu_exec_initfn(CPUState *cpu)
@ -156,35 +185,6 @@ void cpu_exec_initfn(CPUState *cpu)
#endif
}
void cpu_exec_realizefn(CPUState *cpu, Error **errp)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
static bool tcg_target_initialized;
cpu_list_add(cpu);
if (tcg_enabled() && !tcg_target_initialized) {
tcg_target_initialized = true;
cc->tcg_initialize();
}
tlb_init(cpu);
qemu_plugin_vcpu_init_hook(cpu);
#ifdef CONFIG_USER_ONLY
assert(cc->vmsd == NULL);
#else /* !CONFIG_USER_ONLY */
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
}
if (cc->vmsd != NULL) {
vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
}
tcg_iommu_init_notifier_list(cpu);
#endif
}
const char *parse_cpu_option(const char *cpu_option)
{
ObjectClass *oc;

View File

@ -186,19 +186,15 @@ static int cpu_common_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg)
return 0;
}
static bool cpu_common_debug_check_watchpoint(CPUState *cpu, CPUWatchpoint *wp)
{
/* If no extra check is required, QEMU watchpoint match can be considered
* as an architectural match.
*/
return true;
}
static bool cpu_common_virtio_is_big_endian(CPUState *cpu)
{
return target_words_bigendian();
}
/*
* XXX the following #if is always true because this is a common_ss
* module, so target CONFIG_* is never defined.
*/
#if !defined(CONFIG_USER_ONLY)
GuestPanicInformation *cpu_get_crash_info(CPUState *cpu)
{
@ -340,9 +336,9 @@ static void cpu_common_realizefn(DeviceState *dev, Error **errp)
static void cpu_common_unrealizefn(DeviceState *dev)
{
CPUState *cpu = CPU(dev);
/* NOTE: latest generic point before the cpu is fully unrealized */
trace_fini_vcpu(cpu);
qemu_plugin_vcpu_exit_hook(cpu);
cpu_exec_unrealizefn(cpu);
}
@ -379,11 +375,6 @@ static int64_t cpu_common_get_arch_id(CPUState *cpu)
return cpu->cpu_index;
}
static vaddr cpu_adjust_watchpoint_address(CPUState *cpu, vaddr addr, int len)
{
return addr;
}
static Property cpu_common_props[] = {
#ifndef CONFIG_USER_ONLY
/* Create a memory property for softmmu CPU object,
@ -416,8 +407,6 @@ static void cpu_class_init(ObjectClass *klass, void *data)
k->gdb_read_register = cpu_common_gdb_read_register;
k->gdb_write_register = cpu_common_gdb_write_register;
k->virtio_is_big_endian = cpu_common_virtio_is_big_endian;
k->debug_check_watchpoint = cpu_common_debug_check_watchpoint;
k->adjust_watchpoint_address = cpu_adjust_watchpoint_address;
set_bit(DEVICE_CATEGORY_CPU, dc->categories);
dc->realize = cpu_common_realizefn;
dc->unrealize = cpu_common_unrealizefn;

View File

@ -53,6 +53,9 @@
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/help_option.h"
#ifdef CONFIG_TCG
#include "hw/core/tcg-cpu-ops.h"
#endif /* CONFIG_TCG */
enum jazz_model_e {
JAZZ_MAGNUM,
@ -116,6 +119,8 @@ static const MemoryRegionOps dma_dummy_ops = {
#define MAGNUM_BIOS_SIZE_MAX 0x7e000
#define MAGNUM_BIOS_SIZE \
(BIOS_SIZE < MAGNUM_BIOS_SIZE_MAX ? BIOS_SIZE : MAGNUM_BIOS_SIZE_MAX)
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
static void (*real_do_transaction_failed)(CPUState *cpu, hwaddr physaddr,
vaddr addr, unsigned size,
MMUAccessType access_type,
@ -137,6 +142,7 @@ static void mips_jazz_do_transaction_failed(CPUState *cs, hwaddr physaddr,
(*real_do_transaction_failed)(cs, physaddr, addr, size, access_type,
mmu_idx, attrs, response, retaddr);
}
#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
static void mips_jazz_init(MachineState *machine,
enum jazz_model_e jazz_model)
@ -205,8 +211,10 @@ static void mips_jazz_init(MachineState *machine,
* memory region that catches all memory accesses, as we do on Malta.
*/
cc = CPU_GET_CLASS(cpu);
real_do_transaction_failed = cc->do_transaction_failed;
cc->do_transaction_failed = mips_jazz_do_transaction_failed;
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
real_do_transaction_failed = cc->tcg_ops->do_transaction_failed;
cc->tcg_ops->do_transaction_failed = mips_jazz_do_transaction_failed;
#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
/* allocate RAM */
memory_region_add_subregion(address_space, 0, machine->ram);

View File

@ -410,19 +410,26 @@ static inline bool tlb_hit(target_ulong tlb_addr, target_ulong addr)
}
#ifdef CONFIG_TCG
/* accel/tcg/cpu-exec.c */
void dump_drift_info(void);
/* accel/tcg/translate-all.c */
void dump_exec_info(void);
void dump_opcount_info(void);
#endif /* CONFIG_TCG */
#endif /* !CONFIG_USER_ONLY */
#ifdef CONFIG_TCG
/* accel/tcg/cpu-exec.c */
int cpu_exec(CPUState *cpu);
void tcg_exec_realizefn(CPUState *cpu, Error **errp);
void tcg_exec_unrealizefn(CPUState *cpu);
#endif /* CONFIG_TCG */
/* Returns: 0 on success, -1 on error */
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
void *ptr, target_ulong len, bool is_write);
int cpu_exec(CPUState *cpu);
/**
* cpu_set_cpustate_pointers(cpu)
* @cpu: The cpu object

View File

@ -25,9 +25,6 @@
#include "qemu/host-utils.h"
#include "qemu/thread.h"
#ifdef CONFIG_TCG
#include "tcg-target.h"
#endif
#ifndef CONFIG_USER_ONLY
#include "exec/hwaddr.h"
#endif

View File

@ -544,7 +544,7 @@ void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
/* GETPC is the true target of the return instruction that we'll execute. */
#if defined(CONFIG_TCG_INTERPRETER)
extern uintptr_t tci_tb_ptr;
extern __thread uintptr_t tci_tb_ptr;
# define GETPC() tci_tb_ptr
#else
# define GETPC() \

View File

@ -6,7 +6,7 @@
#include "exec/memory.h"
#include "sysemu/hostmem.h"
#include "sysemu/blockdev.h"
#include "sysemu/accel.h"
#include "qemu/accel.h"
#include "qapi/qapi-types-machine.h"
#include "qemu/module.h"
#include "qom/object.h"

View File

@ -0,0 +1,38 @@
/*
* Accelerator interface, specializes CPUClass
* This header is used only by target-specific code.
*
* Copyright 2021 SUSE LLC
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef ACCEL_CPU_H
#define ACCEL_CPU_H
/*
* This header is used to define new accelerator-specific target-specific
* accelerator cpu subclasses.
* It uses CPU_RESOLVING_TYPE, so this is clearly target-specific.
*
* Do not try to use for any other purpose than the implementation of new
* subclasses in target/, or the accel implementation itself in accel/
*/
#define TYPE_ACCEL_CPU "accel-" CPU_RESOLVING_TYPE
#define ACCEL_CPU_NAME(name) (name "-" TYPE_ACCEL_CPU)
typedef struct AccelCPUClass AccelCPUClass;
DECLARE_CLASS_CHECKERS(AccelCPUClass, ACCEL_CPU, TYPE_ACCEL_CPU)
typedef struct AccelCPUClass {
/*< private >*/
ObjectClass parent_class;
/*< public >*/
void (*cpu_class_init)(CPUClass *cc);
void (*cpu_instance_init)(CPUState *cpu);
void (*cpu_realizefn)(CPUState *cpu, Error **errp);
} AccelCPUClass;
#endif /* ACCEL_CPU_H */

View File

@ -76,6 +76,12 @@ typedef struct CPUWatchpoint CPUWatchpoint;
struct TranslationBlock;
/* see tcg-cpu-ops.h */
struct TCGCPUOps;
/* see accel-cpu.h */
struct AccelCPUClass;
/**
* CPUClass:
* @class_by_name: Callback to map -cpu command line model name to an
@ -83,11 +89,6 @@ struct TranslationBlock;
* @parse_features: Callback to parse command line arguments.
* @reset_dump_flags: #CPUDumpFlags to use for reset logging.
* @has_work: Callback for checking if there is work to do.
* @do_interrupt: Callback for interrupt handling.
* @do_unaligned_access: Callback for unaligned access handling, if
* the target defines #TARGET_ALIGNED_ONLY.
* @do_transaction_failed: Callback for handling failed memory transactions
* (ie bus faults or external aborts; not MMU faults)
* @virtio_is_big_endian: Callback to return %true if a CPU which supports
* runtime configurable endianness is currently big-endian. Non-configurable
* CPUs can use the default implementation of this method. This method should
@ -106,19 +107,6 @@ struct TranslationBlock;
* If the target behaviour here is anything other than "set
* the PC register to the value passed in" then the target must
* also implement the synchronize_from_tb hook.
* @synchronize_from_tb: Callback for synchronizing state from a TCG
* #TranslationBlock. This is called when we abandon execution
* of a TB before starting it, and must set all parts of the CPU
* state which the previous TB in the chain may not have updated.
* This always includes at least the program counter; some targets
* will need to do more. If this hook is not implemented then the
* default is to call @set_pc(tb->pc).
* @tlb_fill: Callback for handling a softmmu tlb miss or user-only
* address fault. For system mode, if the access is valid, call
* tlb_set_page and return true; if the access is invalid, and
* probe is true, return false; otherwise raise an exception and
* do not return. For user-only mode, always raise an exception
* and do not return.
* @get_phys_page_debug: Callback for obtaining a physical address.
* @get_phys_page_attrs_debug: Callback for obtaining a physical address and the
* associated memory transaction attributes to use for the access.
@ -128,9 +116,6 @@ struct TranslationBlock;
* a memory access with the specified memory transaction attributes.
* @gdb_read_register: Callback for letting GDB read a register.
* @gdb_write_register: Callback for letting GDB write a register.
* @debug_check_watchpoint: Callback: return true if the architectural
* watchpoint whose address has matched should really fire.
* @debug_excp_handler: Callback for handling debug exceptions.
* @write_elf64_note: Callback for writing a CPU-specific ELF note to a
* 64-bit VM coredump.
* @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
@ -149,9 +134,6 @@ struct TranslationBlock;
* @gdb_get_dynamic_xml: Callback to return dynamically generated XML for the
* gdb stub. Returns a pointer to the XML contents for the specified XML file
* or NULL if the CPU doesn't have a dynamically generated content for it.
* @cpu_exec_enter: Callback for cpu_exec preparation.
* @cpu_exec_exit: Callback for cpu_exec cleanup.
* @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec.
* @disas_set_info: Setup architecture specific components of disassembly info
* @adjust_watchpoint_address: Perform a target-specific adjustment to an
* address before attempting to match it against watchpoints.
@ -170,14 +152,6 @@ struct CPUClass {
int reset_dump_flags;
bool (*has_work)(CPUState *cpu);
void (*do_interrupt)(CPUState *cpu);
void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr);
void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr,
unsigned size, MMUAccessType access_type,
int mmu_idx, MemTxAttrs attrs,
MemTxResult response, uintptr_t retaddr);
bool (*virtio_is_big_endian)(CPUState *cpu);
int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
uint8_t *buf, int len, bool is_write);
@ -189,19 +163,12 @@ struct CPUClass {
void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
Error **errp);
void (*set_pc)(CPUState *cpu, vaddr value);
void (*synchronize_from_tb)(CPUState *cpu,
const struct TranslationBlock *tb);
bool (*tlb_fill)(CPUState *cpu, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
MemTxAttrs *attrs);
int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs);
int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg);
int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
void (*debug_excp_handler)(CPUState *cpu);
int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
int cpuid, void *opaque);
@ -216,18 +183,17 @@ struct CPUClass {
const char *gdb_core_xml_file;
gchar * (*gdb_arch_name)(CPUState *cpu);
const char * (*gdb_get_dynamic_xml)(CPUState *cpu, const char *xmlname);
void (*cpu_exec_enter)(CPUState *cpu);
void (*cpu_exec_exit)(CPUState *cpu);
bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
void (*tcg_initialize)(void);
const char *deprecation_note;
/* Keep non-pointer data at the end to minimize holes. */
int gdb_num_core_regs;
bool gdb_stop_before_watchpoint;
struct AccelCPUClass *accel_cpu;
/* when TCG is not available, this pointer is NULL */
struct TCGCPUOps *tcg_ops;
};
/*
@ -858,36 +824,6 @@ CPUState *cpu_by_arch_id(int64_t id);
void cpu_interrupt(CPUState *cpu, int mask);
#ifdef NEED_CPU_H
#ifdef CONFIG_SOFTMMU
static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
cc->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
}
static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
vaddr addr, unsigned size,
MMUAccessType access_type,
int mmu_idx, MemTxAttrs attrs,
MemTxResult response,
uintptr_t retaddr)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
if (!cpu->ignore_memory_transaction_failures && cc->do_transaction_failed) {
cc->do_transaction_failed(cpu, physaddr, addr, size, access_type,
mmu_idx, attrs, response, retaddr);
}
}
#endif
#endif /* NEED_CPU_H */
/**
* cpu_set_pc:
* @cpu: The CPU to set the program counter for.
@ -1112,6 +1048,8 @@ AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx);
void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
GCC_FMT_ATTR(2, 3);
/* $(top_srcdir)/cpu.c */
void cpu_exec_initfn(CPUState *cpu);
void cpu_exec_realizefn(CPUState *cpu, Error **errp);
void cpu_exec_unrealizefn(CPUState *cpu);

View File

@ -0,0 +1,97 @@
/*
* TCG CPU-specific operations
*
* Copyright 2021 SUSE LLC
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef TCG_CPU_OPS_H
#define TCG_CPU_OPS_H
#include "hw/core/cpu.h"
struct TCGCPUOps {
/**
* @initialize: Initalize TCG state
*
* Called when the first CPU is realized.
*/
void (*initialize)(void);
/**
* @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
*
* This is called when we abandon execution of a TB before starting it,
* and must set all parts of the CPU state which the previous TB in the
* chain may not have updated.
* By default, when this is NULL, a call is made to @set_pc(tb->pc).
*
* If more state needs to be restored, the target must implement a
* function to restore all the state, and register it here.
*/
void (*synchronize_from_tb)(CPUState *cpu,
const struct TranslationBlock *tb);
/** @cpu_exec_enter: Callback for cpu_exec preparation */
void (*cpu_exec_enter)(CPUState *cpu);
/** @cpu_exec_exit: Callback for cpu_exec cleanup */
void (*cpu_exec_exit)(CPUState *cpu);
/** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
/**
* @do_interrupt: Callback for interrupt handling.
*
* note that this is in general SOFTMMU only, but it actually isn't
* because of an x86 hack (accel/tcg/cpu-exec.c), so we cannot put it
* in the SOFTMMU section in general.
*/
void (*do_interrupt)(CPUState *cpu);
/**
* @tlb_fill: Handle a softmmu tlb miss or user-only address fault
*
* For system mode, if the access is valid, call tlb_set_page
* and return true; if the access is invalid, and probe is
* true, return false; otherwise raise an exception and do
* not return. For user-only mode, always raise an exception
* and do not return.
*/
bool (*tlb_fill)(CPUState *cpu, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
/** @debug_excp_handler: Callback for handling debug exceptions */
void (*debug_excp_handler)(CPUState *cpu);
#ifdef NEED_CPU_H
#ifdef CONFIG_SOFTMMU
/**
* @do_transaction_failed: Callback for handling failed memory transactions
* (ie bus faults or external aborts; not MMU faults)
*/
void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr,
unsigned size, MMUAccessType access_type,
int mmu_idx, MemTxAttrs attrs,
MemTxResult response, uintptr_t retaddr);
/**
* @do_unaligned_access: Callback for unaligned access handling
*/
void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr);
/**
* @adjust_watchpoint_address: hack for cpu_check_watchpoint used by ARM
*/
vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
/**
* @debug_check_watchpoint: return true if the architectural
* watchpoint whose address has matched should really fire, used by ARM
*/
bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
#endif /* CONFIG_SOFTMMU */
#endif /* NEED_CPU_H */
};
#endif /* TCG_CPU_OPS_H */

View File

@ -20,8 +20,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef HW_ACCEL_H
#define HW_ACCEL_H
#ifndef QEMU_ACCEL_H
#define QEMU_ACCEL_H
#include "qom/object.h"
#include "exec/hwaddr.h"
@ -37,8 +37,8 @@ typedef struct AccelClass {
/*< public >*/
const char *name;
#ifndef CONFIG_USER_ONLY
int (*init_machine)(MachineState *ms);
#ifndef CONFIG_USER_ONLY
void (*setup_post)(MachineState *ms, AccelState *accel);
bool (*has_memory)(MachineState *ms, AddressSpace *as,
hwaddr start_addr, hwaddr size);
@ -67,11 +67,15 @@ typedef struct AccelClass {
OBJECT_GET_CLASS(AccelClass, (obj), TYPE_ACCEL)
AccelClass *accel_find(const char *opt_name);
AccelState *current_accel(void);
void accel_init_interfaces(AccelClass *ac);
#ifndef CONFIG_USER_ONLY
int accel_init_machine(AccelState *accel, MachineState *ms);
/* Called just before os_setup_post (ie just before drop OS privs) */
void accel_setup_post(MachineState *ms);
#endif /* !CONFIG_USER_ONLY */
AccelState *current_accel(void);
#endif
#endif /* QEMU_ACCEL_H */

View File

@ -0,0 +1,45 @@
/*
* Accelerator OPS, used for cpus.c module
*
* Copyright 2021 SUSE LLC
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef ACCEL_OPS_H
#define ACCEL_OPS_H
#include "qom/object.h"
#define ACCEL_OPS_SUFFIX "-ops"
#define TYPE_ACCEL_OPS "accel" ACCEL_OPS_SUFFIX
#define ACCEL_OPS_NAME(name) (name "-" TYPE_ACCEL_OPS)
typedef struct AccelOpsClass AccelOpsClass;
DECLARE_CLASS_CHECKERS(AccelOpsClass, ACCEL_OPS, TYPE_ACCEL_OPS)
/* cpus.c operations interface */
struct AccelOpsClass {
/*< private >*/
ObjectClass parent_class;
/*< public >*/
/* initialization function called when accel is chosen */
void (*ops_init)(AccelOpsClass *ops);
void (*create_vcpu_thread)(CPUState *cpu); /* MANDATORY NON-NULL */
void (*kick_vcpu_thread)(CPUState *cpu);
void (*synchronize_post_reset)(CPUState *cpu);
void (*synchronize_post_init)(CPUState *cpu);
void (*synchronize_state)(CPUState *cpu);
void (*synchronize_pre_loadvm)(CPUState *cpu);
void (*handle_interrupt)(CPUState *cpu, int mask);
int64_t (*get_virtual_clock)(void);
int64_t (*get_elapsed_ticks)(void);
};
#endif /* ACCEL_OPS_H */

View File

@ -2,30 +2,14 @@
#define QEMU_CPUS_H
#include "qemu/timer.h"
#include "sysemu/accel-ops.h"
/* cpus.c */
/* register accel-specific operations */
void cpus_register_accel(const AccelOpsClass *i);
/* CPU execution threads */
/* accel/dummy-cpus.c */
typedef struct CpusAccel {
void (*create_vcpu_thread)(CPUState *cpu); /* MANDATORY */
void (*kick_vcpu_thread)(CPUState *cpu);
void (*synchronize_post_reset)(CPUState *cpu);
void (*synchronize_post_init)(CPUState *cpu);
void (*synchronize_state)(CPUState *cpu);
void (*synchronize_pre_loadvm)(CPUState *cpu);
void (*handle_interrupt)(CPUState *cpu, int mask);
int64_t (*get_virtual_clock)(void);
int64_t (*get_elapsed_ticks)(void);
} CpusAccel;
/* register accel-specific cpus interface implementation */
void cpus_register_accel(const CpusAccel *i);
/* Create a dummy vcpu for CpusAccel->create_vcpu_thread */
/* Create a dummy vcpu for AccelOpsClass->create_vcpu_thread */
void dummy_start_vcpu_thread(CPUState *);
/* interface available for cpus accelerator threads */

View File

@ -13,7 +13,7 @@
#ifndef HVF_H
#define HVF_H
#include "sysemu/accel.h"
#include "qemu/accel.h"
#include "qom/object.h"
#ifdef CONFIG_HVF

View File

@ -17,7 +17,7 @@
#include "qemu/queue.h"
#include "hw/core/cpu.h"
#include "exec/memattrs.h"
#include "sysemu/accel.h"
#include "qemu/accel.h"
#include "qom/object.h"
#ifdef NEED_CPU_H

View File

@ -10,7 +10,7 @@
#define QEMU_KVM_INT_H
#include "exec/memory.h"
#include "sysemu/accel.h"
#include "qemu/accel.h"
#include "sysemu/kvm.h"
typedef struct KVMSlot

View File

@ -20,6 +20,7 @@
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "qemu/units.h"
#include "qemu/accel.h"
#include "sysemu/tcg.h"
#include "qemu-version.h"
#include <sys/syscall.h>
@ -701,8 +702,12 @@ int main(int argc, char **argv, char **envp)
cpu_type = parse_cpu_option(cpu_model);
/* init tcg before creating CPUs and to get qemu_host_page_size */
tcg_exec_init(0, false);
{
AccelClass *ac = ACCEL_GET_CLASS(current_accel());
ac->init_machine(NULL);
accel_init_interfaces(ac);
}
cpu = cpu_create(cpu_type);
env = cpu->env_ptr;
cpu_reset(cpu);

View File

@ -128,7 +128,7 @@ void hw_error(const char *fmt, ...)
/*
* The chosen accelerator is supposed to register this.
*/
static const CpusAccel *cpus_accel;
static const AccelOpsClass *cpus_accel;
void cpu_synchronize_all_states(void)
{
@ -594,11 +594,11 @@ void cpu_remove_sync(CPUState *cpu)
qemu_mutex_lock_iothread();
}
void cpus_register_accel(const CpusAccel *ca)
void cpus_register_accel(const AccelOpsClass *ops)
{
assert(ca != NULL);
assert(ca->create_vcpu_thread != NULL); /* mandatory */
cpus_accel = ca;
assert(ops != NULL);
assert(ops->create_vcpu_thread != NULL); /* mandatory */
cpus_accel = ops;
}
void qemu_init_vcpu(CPUState *cpu)
@ -618,7 +618,7 @@ void qemu_init_vcpu(CPUState *cpu)
cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
}
/* accelerators all implement the CpusAccel interface */
/* accelerators all implement the AccelOpsClass */
g_assert(cpus_accel != NULL && cpus_accel->create_vcpu_thread != NULL);
cpus_accel->create_vcpu_thread(cpu);

View File

@ -32,7 +32,7 @@
#include "sysemu/kvm.h"
#include "sysemu/runstate.h"
#include "sysemu/tcg.h"
#include "sysemu/accel.h"
#include "qemu/accel.h"
#include "hw/boards.h"
#include "migration/vmstate.h"

View File

@ -24,6 +24,11 @@
#include "qemu/cutils.h"
#include "qemu/cacheflush.h"
#include "cpu.h"
#ifdef CONFIG_TCG
#include "hw/core/tcg-cpu-ops.h"
#endif /* CONFIG_TCG */
#include "exec/exec-all.h"
#include "exec/target_page.h"
#include "hw/qdev-core.h"
@ -840,6 +845,7 @@ void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
}
}
#ifdef CONFIG_TCG
/* Return true if this watchpoint address matches the specified
* access (ie the address range covered by the watchpoint overlaps
* partially or completely with the address range covered by the
@ -873,6 +879,80 @@ int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len)
return ret;
}
/* Generate a debug exception if a watchpoint has been hit. */
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
MemTxAttrs attrs, int flags, uintptr_t ra)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
CPUWatchpoint *wp;
assert(tcg_enabled());
if (cpu->watchpoint_hit) {
/*
* We re-entered the check after replacing the TB.
* Now raise the debug interrupt so that it will
* trigger after the current instruction.
*/
qemu_mutex_lock_iothread();
cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
qemu_mutex_unlock_iothread();
return;
}
if (cc->tcg_ops->adjust_watchpoint_address) {
/* this is currently used only by ARM BE32 */
addr = cc->tcg_ops->adjust_watchpoint_address(cpu, addr, len);
}
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
if (watchpoint_address_matches(wp, addr, len)
&& (wp->flags & flags)) {
if (replay_running_debug()) {
/*
* Don't process the watchpoints when we are
* in a reverse debugging operation.
*/
replay_breakpoint();
return;
}
if (flags == BP_MEM_READ) {
wp->flags |= BP_WATCHPOINT_HIT_READ;
} else {
wp->flags |= BP_WATCHPOINT_HIT_WRITE;
}
wp->hitaddr = MAX(addr, wp->vaddr);
wp->hitattrs = attrs;
if (!cpu->watchpoint_hit) {
if (wp->flags & BP_CPU && cc->tcg_ops->debug_check_watchpoint &&
!cc->tcg_ops->debug_check_watchpoint(cpu, wp)) {
wp->flags &= ~BP_WATCHPOINT_HIT;
continue;
}
cpu->watchpoint_hit = wp;
mmap_lock();
tb_check_watchpoint(cpu, ra);
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
cpu->exception_index = EXCP_DEBUG;
mmap_unlock();
cpu_loop_exit_restore(cpu, ra);
} else {
/* Force execution of one insn next time. */
cpu->cflags_next_tb = 1 | curr_cflags();
mmap_unlock();
if (ra) {
cpu_restore_state(cpu, ra, true);
}
cpu_loop_exit_noexc(cpu);
}
}
} else {
wp->flags &= ~BP_WATCHPOINT_HIT;
}
}
}
#endif /* CONFIG_TCG */
/* Called from RCU critical section */
static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
{
@ -2359,75 +2439,6 @@ ram_addr_t qemu_ram_addr_from_host(void *ptr)
return block->offset + offset;
}
/* Generate a debug exception if a watchpoint has been hit. */
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
MemTxAttrs attrs, int flags, uintptr_t ra)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
CPUWatchpoint *wp;
assert(tcg_enabled());
if (cpu->watchpoint_hit) {
/*
* We re-entered the check after replacing the TB.
* Now raise the debug interrupt so that it will
* trigger after the current instruction.
*/
qemu_mutex_lock_iothread();
cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
qemu_mutex_unlock_iothread();
return;
}
addr = cc->adjust_watchpoint_address(cpu, addr, len);
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
if (watchpoint_address_matches(wp, addr, len)
&& (wp->flags & flags)) {
if (replay_running_debug()) {
/*
* Don't process the watchpoints when we are
* in a reverse debugging operation.
*/
replay_breakpoint();
return;
}
if (flags == BP_MEM_READ) {
wp->flags |= BP_WATCHPOINT_HIT_READ;
} else {
wp->flags |= BP_WATCHPOINT_HIT_WRITE;
}
wp->hitaddr = MAX(addr, wp->vaddr);
wp->hitattrs = attrs;
if (!cpu->watchpoint_hit) {
if (wp->flags & BP_CPU &&
!cc->debug_check_watchpoint(cpu, wp)) {
wp->flags &= ~BP_WATCHPOINT_HIT;
continue;
}
cpu->watchpoint_hit = wp;
mmap_lock();
tb_check_watchpoint(cpu, ra);
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
cpu->exception_index = EXCP_DEBUG;
mmap_unlock();
cpu_loop_exit_restore(cpu, ra);
} else {
/* Force execution of one insn next time. */
cpu->cflags_next_tb = 1 | curr_cflags();
mmap_unlock();
if (ra) {
cpu_restore_state(cpu, ra, true);
}
cpu_loop_exit_noexc(cpu);
}
}
} else {
wp->flags &= ~BP_WATCHPOINT_HIT;
}
}
}
static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
MemTxAttrs attrs, void *buf, hwaddr len);
static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,

View File

@ -20,7 +20,7 @@
#include "exec/ioport.h"
#include "exec/memory.h"
#include "hw/irq.h"
#include "sysemu/accel.h"
#include "qemu/accel.h"
#include "sysemu/cpu-timers.h"
#include "qemu/config-file.h"
#include "qemu/option.h"

View File

@ -44,7 +44,7 @@
#include "qemu/error-report.h"
#include "qemu/sockets.h"
#include "sysemu/accel.h"
#include "qemu/accel.h"
#include "hw/usb.h"
#include "hw/isa/isa.h"
#include "hw/scsi/scsi.h"
@ -1726,7 +1726,8 @@ static bool object_create_early(const char *type, QemuOpts *opts)
return false;
}
/* Allocation of large amounts of memory may delay
/*
* Allocation of large amounts of memory may delay
* chardev initialization for too long, and trigger timeouts
* on software that waits for a monitor socket to be created
* (e.g. libvirt).
@ -3497,7 +3498,7 @@ void qemu_init(int argc, char **argv, char **envp)
*
* Machine compat properties: object_set_machine_compat_props().
* Accelerator compat props: object_set_accelerator_compat_props(),
* called from configure_accelerator().
* called from do_configure_accelerator().
*/
machine_class = MACHINE_GET_CLASS(current_machine);
@ -3519,6 +3520,8 @@ void qemu_init(int argc, char **argv, char **envp)
if (cpu_option) {
current_machine->cpu_type = parse_cpu_option(cpu_option);
}
/* NB: for machine none cpu_type could STILL be NULL here! */
accel_init_interfaces(ACCEL_GET_CLASS(current_machine->accelerator));
qemu_resolve_machine_memdev();
parse_numa_opts(current_machine);

View File

@ -206,6 +206,20 @@ static void alpha_cpu_initfn(Object *obj)
#endif
}
#include "hw/core/tcg-cpu-ops.h"
static struct TCGCPUOps alpha_tcg_ops = {
.initialize = alpha_translate_init,
.cpu_exec_interrupt = alpha_cpu_exec_interrupt,
.tlb_fill = alpha_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
.do_interrupt = alpha_cpu_do_interrupt,
.do_transaction_failed = alpha_cpu_do_transaction_failed,
.do_unaligned_access = alpha_cpu_do_unaligned_access,
#endif /* !CONFIG_USER_ONLY */
};
static void alpha_cpu_class_init(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@ -217,22 +231,17 @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data)
cc->class_by_name = alpha_cpu_class_by_name;
cc->has_work = alpha_cpu_has_work;
cc->do_interrupt = alpha_cpu_do_interrupt;
cc->cpu_exec_interrupt = alpha_cpu_exec_interrupt;
cc->dump_state = alpha_cpu_dump_state;
cc->set_pc = alpha_cpu_set_pc;
cc->gdb_read_register = alpha_cpu_gdb_read_register;
cc->gdb_write_register = alpha_cpu_gdb_write_register;
cc->tlb_fill = alpha_cpu_tlb_fill;
#ifndef CONFIG_USER_ONLY
cc->do_transaction_failed = alpha_cpu_do_transaction_failed;
cc->do_unaligned_access = alpha_cpu_do_unaligned_access;
cc->get_phys_page_debug = alpha_cpu_get_phys_page_debug;
dc->vmsd = &vmstate_alpha_cpu;
#endif
cc->disas_set_info = alpha_cpu_disas_set_info;
cc->tcg_initialize = alpha_translate_init;
cc->tcg_ops = &alpha_tcg_ops;
cc->gdb_num_core_regs = 67;
}

View File

@ -26,6 +26,9 @@
#include "qapi/error.h"
#include "qapi/visitor.h"
#include "cpu.h"
#ifdef CONFIG_TCG
#include "hw/core/tcg-cpu-ops.h"
#endif /* CONFIG_TCG */
#include "internals.h"
#include "exec/exec-all.h"
#include "hw/qdev-properties.h"
@ -54,8 +57,9 @@ static void arm_cpu_set_pc(CPUState *cs, vaddr value)
}
}
static void arm_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
#ifdef CONFIG_TCG
void arm_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
@ -70,6 +74,7 @@ static void arm_cpu_synchronize_from_tb(CPUState *cs,
env->regs[15] = tb->pc;
}
}
#endif /* CONFIG_TCG */
static bool arm_cpu_has_work(CPUState *cs)
{
@ -588,7 +593,7 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
found:
cs->exception_index = excp_idx;
env->exception.target_el = target_el;
cc->do_interrupt(cs);
cc->tcg_ops->do_interrupt(cs);
return true;
}
@ -2240,6 +2245,24 @@ static gchar *arm_gdb_arch_name(CPUState *cs)
return g_strdup("arm");
}
#ifdef CONFIG_TCG
static struct TCGCPUOps arm_tcg_ops = {
.initialize = arm_translate_init,
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
.cpu_exec_interrupt = arm_cpu_exec_interrupt,
.tlb_fill = arm_cpu_tlb_fill,
.debug_excp_handler = arm_debug_excp_handler,
#if !defined(CONFIG_USER_ONLY)
.do_interrupt = arm_cpu_do_interrupt,
.do_transaction_failed = arm_cpu_do_transaction_failed,
.do_unaligned_access = arm_cpu_do_unaligned_access,
.adjust_watchpoint_address = arm_adjust_watchpoint_address,
.debug_check_watchpoint = arm_debug_check_watchpoint,
#endif /* !CONFIG_USER_ONLY */
};
#endif /* CONFIG_TCG */
static void arm_cpu_class_init(ObjectClass *oc, void *data)
{
ARMCPUClass *acc = ARM_CPU_CLASS(oc);
@ -2254,14 +2277,11 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
cc->class_by_name = arm_cpu_class_by_name;
cc->has_work = arm_cpu_has_work;
cc->cpu_exec_interrupt = arm_cpu_exec_interrupt;
cc->dump_state = arm_cpu_dump_state;
cc->set_pc = arm_cpu_set_pc;
cc->synchronize_from_tb = arm_cpu_synchronize_from_tb;
cc->gdb_read_register = arm_cpu_gdb_read_register;
cc->gdb_write_register = arm_cpu_gdb_write_register;
#ifndef CONFIG_USER_ONLY
cc->do_interrupt = arm_cpu_do_interrupt;
cc->get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug;
cc->asidx_from_attrs = arm_asidx_from_attrs;
cc->vmsd = &vmstate_arm_cpu;
@ -2275,17 +2295,10 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_get_dynamic_xml = arm_gdb_get_dynamic_xml;
cc->gdb_stop_before_watchpoint = true;
cc->disas_set_info = arm_disas_set_info;
#ifdef CONFIG_TCG
cc->tcg_initialize = arm_translate_init;
cc->tlb_fill = arm_cpu_tlb_fill;
cc->debug_excp_handler = arm_debug_excp_handler;
cc->debug_check_watchpoint = arm_debug_check_watchpoint;
cc->do_unaligned_access = arm_cpu_do_unaligned_access;
#if !defined(CONFIG_USER_ONLY)
cc->do_transaction_failed = arm_cpu_do_transaction_failed;
cc->adjust_watchpoint_address = arm_adjust_watchpoint_address;
#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
#endif
cc->tcg_ops = &arm_tcg_ops;
#endif /* CONFIG_TCG */
}
#ifdef CONFIG_KVM

View File

@ -21,6 +21,9 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "cpu.h"
#ifdef CONFIG_TCG
#include "hw/core/tcg-cpu-ops.h"
#endif /* CONFIG_TCG */
#include "qemu/module.h"
#if !defined(CONFIG_USER_ONLY)
#include "hw/loader.h"
@ -805,7 +808,6 @@ static void aarch64_cpu_class_init(ObjectClass *oc, void *data)
{
CPUClass *cc = CPU_CLASS(oc);
cc->cpu_exec_interrupt = arm_cpu_exec_interrupt;
cc->gdb_read_register = aarch64_cpu_gdb_read_register;
cc->gdb_write_register = aarch64_cpu_gdb_write_register;
cc->gdb_num_core_regs = 34;

View File

@ -10,11 +10,15 @@
#include "qemu/osdep.h"
#include "cpu.h"
#ifdef CONFIG_TCG
#include "hw/core/tcg-cpu-ops.h"
#endif /* CONFIG_TCG */
#include "internals.h"
/* CPU models. These are not needed for the AArch64 linux-user build. */
#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
#ifdef CONFIG_TCG
static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
CPUClass *cc = CPU_GET_CLASS(cs);
@ -33,11 +37,12 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
if (interrupt_request & CPU_INTERRUPT_HARD
&& (armv7m_nvic_can_take_pending_exception(env->nvic))) {
cs->exception_index = EXCP_IRQ;
cc->do_interrupt(cs);
cc->tcg_ops->do_interrupt(cs);
ret = true;
}
return ret;
}
#endif /* CONFIG_TCG */
static void arm926_initfn(Object *obj)
{
@ -658,17 +663,34 @@ static void pxa270c5_initfn(Object *obj)
cpu->reset_sctlr = 0x00000078;
}
#ifdef CONFIG_TCG
static struct TCGCPUOps arm_v7m_tcg_ops = {
.initialize = arm_translate_init,
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
.cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt,
.tlb_fill = arm_cpu_tlb_fill,
.debug_excp_handler = arm_debug_excp_handler,
#if !defined(CONFIG_USER_ONLY)
.do_interrupt = arm_v7m_cpu_do_interrupt,
.do_transaction_failed = arm_cpu_do_transaction_failed,
.do_unaligned_access = arm_cpu_do_unaligned_access,
.adjust_watchpoint_address = arm_adjust_watchpoint_address,
.debug_check_watchpoint = arm_debug_check_watchpoint,
#endif /* !CONFIG_USER_ONLY */
};
#endif /* CONFIG_TCG */
static void arm_v7m_class_init(ObjectClass *oc, void *data)
{
ARMCPUClass *acc = ARM_CPU_CLASS(oc);
CPUClass *cc = CPU_CLASS(oc);
acc->info = data;
#ifndef CONFIG_USER_ONLY
cc->do_interrupt = arm_v7m_cpu_do_interrupt;
#endif
#ifdef CONFIG_TCG
cc->tcg_ops = &arm_v7m_tcg_ops;
#endif /* CONFIG_TCG */
cc->cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt;
cc->gdb_core_xml_file = "arm-m-profile.xml";
}

View File

@ -9969,6 +9969,10 @@ static void handle_semihosting(CPUState *cs)
* Do any appropriate logging, handle PSCI calls, and then hand off
* to the AArch64-entry or AArch32-entry function depending on the
* target exception level's register width.
*
* Note: this is used for both TCG (as the do_interrupt tcg op),
* and KVM to re-inject guest debug exceptions, and to
* inject a Synchronous-External-Abort.
*/
void arm_cpu_do_interrupt(CPUState *cs)
{

View File

@ -171,6 +171,12 @@ static inline int r14_bank_number(int mode)
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
void arm_translate_init(void);
#ifdef CONFIG_TCG
void arm_cpu_synchronize_from_tb(CPUState *cs,
const struct TranslationBlock *tb);
#endif /* CONFIG_TCG */
enum arm_fprounding {
FPROUNDING_TIEEVEN,
FPROUNDING_POSINF,

View File

@ -946,7 +946,6 @@ static void kvm_inject_arm_sea(CPUState *c)
{
ARMCPU *cpu = ARM_CPU(c);
CPUARMState *env = &cpu->env;
CPUClass *cc = CPU_GET_CLASS(c);
uint32_t esr;
bool same_el;
@ -962,7 +961,7 @@ static void kvm_inject_arm_sea(CPUState *c)
env->exception.syndrome = esr;
cc->do_interrupt(c);
arm_cpu_do_interrupt(c);
}
#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
@ -1493,7 +1492,6 @@ bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
{
int hsr_ec = syn_get_ec(debug_exit->hsr);
ARMCPU *cpu = ARM_CPU(cs);
CPUClass *cc = CPU_GET_CLASS(cs);
CPUARMState *env = &cpu->env;
/* Ensure PC is synchronised */
@ -1547,7 +1545,7 @@ bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
env->exception.vaddress = debug_exit->far;
env->exception.target_el = 1;
qemu_mutex_lock_iothread();
cc->do_interrupt(cs);
arm_cpu_do_interrupt(cs);
qemu_mutex_unlock_iothread();
return false;

View File

@ -184,6 +184,19 @@ static void avr_cpu_dump_state(CPUState *cs, FILE *f, int flags)
qemu_fprintf(f, "\n");
}
#include "hw/core/tcg-cpu-ops.h"
static struct TCGCPUOps avr_tcg_ops = {
.initialize = avr_cpu_tcg_init,
.synchronize_from_tb = avr_cpu_synchronize_from_tb,
.cpu_exec_interrupt = avr_cpu_exec_interrupt,
.tlb_fill = avr_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
.do_interrupt = avr_cpu_do_interrupt,
#endif /* !CONFIG_USER_ONLY */
};
static void avr_cpu_class_init(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@ -198,21 +211,17 @@ static void avr_cpu_class_init(ObjectClass *oc, void *data)
cc->class_by_name = avr_cpu_class_by_name;
cc->has_work = avr_cpu_has_work;
cc->do_interrupt = avr_cpu_do_interrupt;
cc->cpu_exec_interrupt = avr_cpu_exec_interrupt;
cc->dump_state = avr_cpu_dump_state;
cc->set_pc = avr_cpu_set_pc;
cc->memory_rw_debug = avr_cpu_memory_rw_debug;
cc->get_phys_page_debug = avr_cpu_get_phys_page_debug;
cc->tlb_fill = avr_cpu_tlb_fill;
cc->vmsd = &vms_avr_cpu;
cc->disas_set_info = avr_cpu_disas_set_info;
cc->tcg_initialize = avr_cpu_tcg_init;
cc->synchronize_from_tb = avr_cpu_synchronize_from_tb;
cc->gdb_read_register = avr_cpu_gdb_read_register;
cc->gdb_write_register = avr_cpu_gdb_write_register;
cc->gdb_num_core_regs = 35;
cc->gdb_core_xml_file = "avr-cpu.xml";
cc->tcg_ops = &avr_tcg_ops;
}
/*

View File

@ -20,6 +20,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "hw/core/tcg-cpu-ops.h"
#include "exec/exec-all.h"
#include "exec/address-spaces.h"
#include "exec/helper-proto.h"
@ -34,7 +35,7 @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
if (interrupt_request & CPU_INTERRUPT_RESET) {
if (cpu_interrupts_enabled(env)) {
cs->exception_index = EXCP_RESET;
cc->do_interrupt(cs);
cc->tcg_ops->do_interrupt(cs);
cs->interrupt_request &= ~CPU_INTERRUPT_RESET;
@ -45,7 +46,7 @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
if (cpu_interrupts_enabled(env) && env->intsrc != 0) {
int index = ctz32(env->intsrc);
cs->exception_index = EXCP_INT(index);
cc->do_interrupt(cs);
cc->tcg_ops->do_interrupt(cs);
env->intsrc &= env->intsrc - 1; /* clear the interrupt */
cs->interrupt_request &= ~CPU_INTERRUPT_HARD;

View File

@ -193,15 +193,36 @@ static void cris_cpu_initfn(Object *obj)
#endif
}
#include "hw/core/tcg-cpu-ops.h"
static struct TCGCPUOps crisv10_tcg_ops = {
.initialize = cris_initialize_crisv10_tcg,
.cpu_exec_interrupt = cris_cpu_exec_interrupt,
.tlb_fill = cris_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
.do_interrupt = crisv10_cpu_do_interrupt,
#endif /* !CONFIG_USER_ONLY */
};
static struct TCGCPUOps crisv32_tcg_ops = {
.initialize = cris_initialize_tcg,
.cpu_exec_interrupt = cris_cpu_exec_interrupt,
.tlb_fill = cris_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
.do_interrupt = cris_cpu_do_interrupt,
#endif /* !CONFIG_USER_ONLY */
};
static void crisv8_cpu_class_init(ObjectClass *oc, void *data)
{
CPUClass *cc = CPU_CLASS(oc);
CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
ccc->vr = 8;
cc->do_interrupt = crisv10_cpu_do_interrupt;
cc->gdb_read_register = crisv10_cpu_gdb_read_register;
cc->tcg_initialize = cris_initialize_crisv10_tcg;
cc->tcg_ops = &crisv10_tcg_ops;
}
static void crisv9_cpu_class_init(ObjectClass *oc, void *data)
@ -210,9 +231,8 @@ static void crisv9_cpu_class_init(ObjectClass *oc, void *data)
CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
ccc->vr = 9;
cc->do_interrupt = crisv10_cpu_do_interrupt;
cc->gdb_read_register = crisv10_cpu_gdb_read_register;
cc->tcg_initialize = cris_initialize_crisv10_tcg;
cc->tcg_ops = &crisv10_tcg_ops;
}
static void crisv10_cpu_class_init(ObjectClass *oc, void *data)
@ -221,9 +241,8 @@ static void crisv10_cpu_class_init(ObjectClass *oc, void *data)
CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
ccc->vr = 10;
cc->do_interrupt = crisv10_cpu_do_interrupt;
cc->gdb_read_register = crisv10_cpu_gdb_read_register;
cc->tcg_initialize = cris_initialize_crisv10_tcg;
cc->tcg_ops = &crisv10_tcg_ops;
}
static void crisv11_cpu_class_init(ObjectClass *oc, void *data)
@ -232,9 +251,8 @@ static void crisv11_cpu_class_init(ObjectClass *oc, void *data)
CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
ccc->vr = 11;
cc->do_interrupt = crisv10_cpu_do_interrupt;
cc->gdb_read_register = crisv10_cpu_gdb_read_register;
cc->tcg_initialize = cris_initialize_crisv10_tcg;
cc->tcg_ops = &crisv10_tcg_ops;
}
static void crisv17_cpu_class_init(ObjectClass *oc, void *data)
@ -243,16 +261,17 @@ static void crisv17_cpu_class_init(ObjectClass *oc, void *data)
CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
ccc->vr = 17;
cc->do_interrupt = crisv10_cpu_do_interrupt;
cc->gdb_read_register = crisv10_cpu_gdb_read_register;
cc->tcg_initialize = cris_initialize_crisv10_tcg;
cc->tcg_ops = &crisv10_tcg_ops;
}
static void crisv32_cpu_class_init(ObjectClass *oc, void *data)
{
CPUClass *cc = CPU_CLASS(oc);
CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
ccc->vr = 32;
cc->tcg_ops = &crisv32_tcg_ops;
}
static void cris_cpu_class_init(ObjectClass *oc, void *data)
@ -268,13 +287,10 @@ static void cris_cpu_class_init(ObjectClass *oc, void *data)
cc->class_by_name = cris_cpu_class_by_name;
cc->has_work = cris_cpu_has_work;
cc->do_interrupt = cris_cpu_do_interrupt;
cc->cpu_exec_interrupt = cris_cpu_exec_interrupt;
cc->dump_state = cris_cpu_dump_state;
cc->set_pc = cris_cpu_set_pc;
cc->gdb_read_register = cris_cpu_gdb_read_register;
cc->gdb_write_register = cris_cpu_gdb_write_register;
cc->tlb_fill = cris_cpu_tlb_fill;
#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = cris_cpu_get_phys_page_debug;
dc->vmsd = &vmstate_cris_cpu;
@ -284,7 +300,6 @@ static void cris_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_stop_before_watchpoint = true;
cc->disas_set_info = cris_disas_set_info;
cc->tcg_initialize = cris_initialize_tcg;
}
#define DEFINE_CRIS_CPU_TYPE(cpu_model, initfn) \

View File

@ -20,6 +20,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "hw/core/tcg-cpu-ops.h"
#include "mmu.h"
#include "qemu/host-utils.h"
#include "exec/exec-all.h"
@ -299,7 +300,7 @@ bool cris_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
&& (env->pregs[PR_CCS] & I_FLAG)
&& !env->locked_irq) {
cs->exception_index = EXCP_IRQ;
cc->do_interrupt(cs);
cc->tcg_ops->do_interrupt(cs);
ret = true;
}
if (interrupt_request & CPU_INTERRUPT_NMI) {
@ -311,7 +312,7 @@ bool cris_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
}
if ((env->pregs[PR_CCS] & m_flag_archval)) {
cs->exception_index = EXCP_NMI;
cc->do_interrupt(cs);
cc->tcg_ops->do_interrupt(cs);
ret = true;
}
}

View File

@ -71,6 +71,7 @@ static void hppa_cpu_disas_set_info(CPUState *cs, disassemble_info *info)
info->print_insn = print_insn_hppa;
}
#ifndef CONFIG_USER_ONLY
static void hppa_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
@ -87,6 +88,7 @@ static void hppa_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
cpu_loop_exit_restore(cs, retaddr);
}
#endif /* CONFIG_USER_ONLY */
static void hppa_cpu_realizefn(DeviceState *dev, Error **errp)
{
@ -129,6 +131,20 @@ static ObjectClass *hppa_cpu_class_by_name(const char *cpu_model)
return object_class_by_name(TYPE_HPPA_CPU);
}
#include "hw/core/tcg-cpu-ops.h"
static struct TCGCPUOps hppa_tcg_ops = {
.initialize = hppa_translate_init,
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
.cpu_exec_interrupt = hppa_cpu_exec_interrupt,
.tlb_fill = hppa_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
.do_interrupt = hppa_cpu_do_interrupt,
.do_unaligned_access = hppa_cpu_do_unaligned_access,
#endif /* !CONFIG_USER_ONLY */
};
static void hppa_cpu_class_init(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@ -140,23 +156,17 @@ static void hppa_cpu_class_init(ObjectClass *oc, void *data)
cc->class_by_name = hppa_cpu_class_by_name;
cc->has_work = hppa_cpu_has_work;
cc->do_interrupt = hppa_cpu_do_interrupt;
cc->cpu_exec_interrupt = hppa_cpu_exec_interrupt;
cc->dump_state = hppa_cpu_dump_state;
cc->set_pc = hppa_cpu_set_pc;
cc->synchronize_from_tb = hppa_cpu_synchronize_from_tb;
cc->gdb_read_register = hppa_cpu_gdb_read_register;
cc->gdb_write_register = hppa_cpu_gdb_write_register;
cc->tlb_fill = hppa_cpu_tlb_fill;
#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = hppa_cpu_get_phys_page_debug;
dc->vmsd = &vmstate_hppa_cpu;
#endif
cc->do_unaligned_access = hppa_cpu_do_unaligned_access;
cc->disas_set_info = hppa_cpu_disas_set_info;
cc->tcg_initialize = hppa_translate_init;
cc->gdb_num_core_regs = 128;
cc->tcg_ops = &hppa_tcg_ops;
}
static const TypeInfo hppa_cpu_type_info = {

View File

@ -26,7 +26,7 @@
#include "sysemu/cpus.h"
#include "qemu/guest-random.h"
#include "hax-cpus.h"
#include "hax-accel-ops.h"
static void *hax_cpu_thread_fn(void *arg)
{
@ -74,12 +74,29 @@ static void hax_start_vcpu_thread(CPUState *cpu)
#endif
}
const CpusAccel hax_cpus = {
.create_vcpu_thread = hax_start_vcpu_thread,
.kick_vcpu_thread = hax_kick_vcpu_thread,
static void hax_accel_ops_class_init(ObjectClass *oc, void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
.synchronize_post_reset = hax_cpu_synchronize_post_reset,
.synchronize_post_init = hax_cpu_synchronize_post_init,
.synchronize_state = hax_cpu_synchronize_state,
.synchronize_pre_loadvm = hax_cpu_synchronize_pre_loadvm,
ops->create_vcpu_thread = hax_start_vcpu_thread;
ops->kick_vcpu_thread = hax_kick_vcpu_thread;
ops->synchronize_post_reset = hax_cpu_synchronize_post_reset;
ops->synchronize_post_init = hax_cpu_synchronize_post_init;
ops->synchronize_state = hax_cpu_synchronize_state;
ops->synchronize_pre_loadvm = hax_cpu_synchronize_pre_loadvm;
}
static const TypeInfo hax_accel_ops_type = {
.name = ACCEL_OPS_NAME("hax"),
.parent = TYPE_ACCEL_OPS,
.class_init = hax_accel_ops_class_init,
.abstract = true,
};
static void hax_accel_ops_register_types(void)
{
type_register_static(&hax_accel_ops_type);
}
type_init(hax_accel_ops_register_types);

View File

@ -12,8 +12,6 @@
#include "sysemu/cpus.h"
extern const CpusAccel hax_cpus;
#include "hax-interface.h"
#include "hax-i386.h"

View File

@ -28,12 +28,12 @@
#include "exec/address-spaces.h"
#include "qemu-common.h"
#include "sysemu/accel.h"
#include "qemu/accel.h"
#include "sysemu/reset.h"
#include "sysemu/runstate.h"
#include "hw/boards.h"
#include "hax-cpus.h"
#include "hax-accel-ops.h"
#define DEBUG_HAX 0
@ -364,9 +364,6 @@ static int hax_accel_init(MachineState *ms)
!ret ? "working" : "not working",
!ret ? "fast virt" : "emulation");
}
if (ret == 0) {
cpus_register_accel(&hax_cpus);
}
return ret;
}

View File

@ -13,7 +13,7 @@
#include "exec/address-spaces.h"
#include "qemu/error-report.h"
#include "hax-cpus.h"
#include "hax-accel-ops.h"
#include "qemu/queue.h"
#define DEBUG_HAX_MEM 0

View File

@ -15,7 +15,7 @@
#include <sys/ioctl.h>
#include "sysemu/cpus.h"
#include "hax-cpus.h"
#include "hax-accel-ops.h"
hax_fd hax_mod_open(void)
{

View File

@ -12,7 +12,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "hax-cpus.h"
#include "hax-accel-ops.h"
/*
* return 0 when success, -1 when driver not loaded,

View File

@ -23,7 +23,7 @@
#include <winioctl.h>
#include <windef.h>
#include "hax-cpus.h"
#include "hax-accel-ops.h"
#define HAX_INVALID_FD INVALID_HANDLE_VALUE

View File

@ -1,7 +1,7 @@
i386_softmmu_ss.add(when: 'CONFIG_HAX', if_true: files(
'hax-all.c',
'hax-mem.c',
'hax-cpus.c',
'hax-accel-ops.c',
))
i386_softmmu_ss.add(when: ['CONFIG_HAX', 'CONFIG_POSIX'], if_true: files('hax-posix.c'))
i386_softmmu_ss.add(when: ['CONFIG_HAX', 'CONFIG_WIN32'], if_true: files('hax-windows.c'))

View File

@ -55,7 +55,7 @@
#include "target/i386/cpu.h"
#include "qemu/guest-random.h"
#include "hvf-cpus.h"
#include "hvf-accel-ops.h"
/*
* The HVF-specific vCPU thread function. This one should only run when the host
@ -121,11 +121,26 @@ static void hvf_start_vcpu_thread(CPUState *cpu)
cpu, QEMU_THREAD_JOINABLE);
}
const CpusAccel hvf_cpus = {
.create_vcpu_thread = hvf_start_vcpu_thread,
static void hvf_accel_ops_class_init(ObjectClass *oc, void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
.synchronize_post_reset = hvf_cpu_synchronize_post_reset,
.synchronize_post_init = hvf_cpu_synchronize_post_init,
.synchronize_state = hvf_cpu_synchronize_state,
.synchronize_pre_loadvm = hvf_cpu_synchronize_pre_loadvm,
ops->create_vcpu_thread = hvf_start_vcpu_thread;
ops->synchronize_post_reset = hvf_cpu_synchronize_post_reset;
ops->synchronize_post_init = hvf_cpu_synchronize_post_init;
ops->synchronize_state = hvf_cpu_synchronize_state;
ops->synchronize_pre_loadvm = hvf_cpu_synchronize_pre_loadvm;
};
static const TypeInfo hvf_accel_ops_type = {
.name = ACCEL_OPS_NAME("hvf"),
.parent = TYPE_ACCEL_OPS,
.class_init = hvf_accel_ops_class_init,
.abstract = true,
};
static void hvf_accel_ops_register_types(void)
{
type_register_static(&hvf_accel_ops_type);
}
type_init(hvf_accel_ops_register_types);

View File

@ -12,8 +12,6 @@
#include "sysemu/cpus.h"
extern const CpusAccel hvf_cpus;
int hvf_init_vcpu(CPUState *);
int hvf_vcpu_exec(CPUState *);
void hvf_cpu_synchronize_state(CPUState *);

View File

@ -16,7 +16,7 @@
#ifndef HVF_I386_H
#define HVF_I386_H
#include "sysemu/accel.h"
#include "qemu/accel.h"
#include "sysemu/hvf.h"
#include "cpu.h"
#include "x86.h"

View File

@ -69,10 +69,10 @@
#include "exec/address-spaces.h"
#include "hw/i386/apic_internal.h"
#include "qemu/main-loop.h"
#include "sysemu/accel.h"
#include "qemu/accel.h"
#include "target/i386/cpu.h"
#include "hvf-cpus.h"
#include "hvf-accel-ops.h"
HVFState *hvf_state;
@ -887,7 +887,6 @@ static int hvf_accel_init(MachineState *ms)
hvf_state = s;
memory_listener_register(&hvf_memory_listener, &address_space_memory);
cpus_register_accel(&hvf_cpus);
return 0;
}

View File

@ -1,6 +1,6 @@
i386_softmmu_ss.add(when: [hvf, 'CONFIG_HVF'], if_true: files(
'hvf.c',
'hvf-cpus.c',
'hvf-accel-ops.c',
'x86.c',
'x86_cpuid.c',
'x86_decode.c',

View File

@ -28,7 +28,7 @@
#include "hw/i386/apic_internal.h"
#include "qemu/main-loop.h"
#include "sysemu/accel.h"
#include "qemu/accel.h"
#include "target/i386/cpu.h"
// TODO: taskswitch handling

View File

@ -32,7 +32,7 @@
#include <Hypervisor/hv.h>
#include <Hypervisor/hv_vmx.h>
#include "hvf-cpus.h"
#include "hvf-accel-ops.h"
void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg,
SegmentCache *qseg, bool is_tr)

View File

@ -57,16 +57,22 @@ static void x86_cpu_synchronize_from_tb(CPUState *cs,
cpu->env.eip = tb->pc - tb->cs_base;
}
#include "hw/core/tcg-cpu-ops.h"
static struct TCGCPUOps x86_tcg_ops = {
.initialize = tcg_x86_init,
.synchronize_from_tb = x86_cpu_synchronize_from_tb,
.cpu_exec_enter = x86_cpu_exec_enter,
.cpu_exec_exit = x86_cpu_exec_exit,
.cpu_exec_interrupt = x86_cpu_exec_interrupt,
.do_interrupt = x86_cpu_do_interrupt,
.tlb_fill = x86_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
.debug_excp_handler = breakpoint_handler,
#endif /* !CONFIG_USER_ONLY */
};
void tcg_cpu_common_class_init(CPUClass *cc)
{
cc->do_interrupt = x86_cpu_do_interrupt;
cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
cc->cpu_exec_enter = x86_cpu_exec_enter;
cc->cpu_exec_exit = x86_cpu_exec_exit;
cc->tcg_initialize = tcg_x86_init;
cc->tlb_fill = x86_cpu_tlb_fill;
#ifndef CONFIG_USER_ONLY
cc->debug_excp_handler = breakpoint_handler;
#endif
cc->tcg_ops = &x86_tcg_ops;
}

View File

@ -1,5 +1,5 @@
i386_softmmu_ss.add(when: 'CONFIG_WHPX', if_true: files(
'whpx-all.c',
'whpx-apic.c',
'whpx-cpus.c',
'whpx-accel-ops.c',
))

View File

@ -16,7 +16,7 @@
#include "sysemu/whpx.h"
#include "whpx-internal.h"
#include "whpx-cpus.h"
#include "whpx-accel-ops.h"
static void *whpx_cpu_thread_fn(void *arg)
{
@ -83,12 +83,29 @@ static void whpx_kick_vcpu_thread(CPUState *cpu)
}
}
const CpusAccel whpx_cpus = {
.create_vcpu_thread = whpx_start_vcpu_thread,
.kick_vcpu_thread = whpx_kick_vcpu_thread,
static void whpx_accel_ops_class_init(ObjectClass *oc, void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
.synchronize_post_reset = whpx_cpu_synchronize_post_reset,
.synchronize_post_init = whpx_cpu_synchronize_post_init,
.synchronize_state = whpx_cpu_synchronize_state,
.synchronize_pre_loadvm = whpx_cpu_synchronize_pre_loadvm,
ops->create_vcpu_thread = whpx_start_vcpu_thread;
ops->kick_vcpu_thread = whpx_kick_vcpu_thread;
ops->synchronize_post_reset = whpx_cpu_synchronize_post_reset;
ops->synchronize_post_init = whpx_cpu_synchronize_post_init;
ops->synchronize_state = whpx_cpu_synchronize_state;
ops->synchronize_pre_loadvm = whpx_cpu_synchronize_pre_loadvm;
}
static const TypeInfo whpx_accel_ops_type = {
.name = ACCEL_OPS_NAME("whpx"),
.parent = TYPE_ACCEL_OPS,
.class_init = whpx_accel_ops_class_init,
.abstract = true,
};
static void whpx_accel_ops_register_types(void)
{
type_register_static(&whpx_accel_ops_type);
}
type_init(whpx_accel_ops_register_types);

View File

@ -12,8 +12,6 @@
#include "sysemu/cpus.h"
extern const CpusAccel whpx_cpus;
int whpx_init_vcpu(CPUState *cpu);
int whpx_vcpu_exec(CPUState *cpu);
void whpx_destroy_vcpu(CPUState *cpu);

View File

@ -13,7 +13,7 @@
#include "exec/address-spaces.h"
#include "exec/ioport.h"
#include "qemu-common.h"
#include "sysemu/accel.h"
#include "qemu/accel.h"
#include "sysemu/whpx.h"
#include "sysemu/cpus.h"
#include "sysemu/runstate.h"
@ -28,8 +28,11 @@
#include "migration/blocker.h"
#include <winerror.h>
#include "whpx-cpus.h"
#include "whpx-internal.h"
#include "whpx-accel-ops.h"
#include <WinHvPlatform.h>
#include <WinHvEmulation.h>
#define HYPERV_APIC_BUS_FREQUENCY (200000000ULL)
@ -1846,8 +1849,6 @@ static int whpx_accel_init(MachineState *ms)
whpx_memory_init();
cpus_register_accel(&whpx_cpus);
printf("Windows Hypervisor Platform accelerator is operational\n");
return 0;

View File

@ -210,6 +210,19 @@ static ObjectClass *lm32_cpu_class_by_name(const char *cpu_model)
return oc;
}
#include "hw/core/tcg-cpu-ops.h"
static struct TCGCPUOps lm32_tcg_ops = {
.initialize = lm32_translate_init,
.cpu_exec_interrupt = lm32_cpu_exec_interrupt,
.tlb_fill = lm32_cpu_tlb_fill,
.debug_excp_handler = lm32_debug_excp_handler,
#ifndef CONFIG_USER_ONLY
.do_interrupt = lm32_cpu_do_interrupt,
#endif /* !CONFIG_USER_ONLY */
};
static void lm32_cpu_class_init(ObjectClass *oc, void *data)
{
LM32CPUClass *lcc = LM32_CPU_CLASS(oc);
@ -222,22 +235,18 @@ static void lm32_cpu_class_init(ObjectClass *oc, void *data)
cc->class_by_name = lm32_cpu_class_by_name;
cc->has_work = lm32_cpu_has_work;
cc->do_interrupt = lm32_cpu_do_interrupt;
cc->cpu_exec_interrupt = lm32_cpu_exec_interrupt;
cc->dump_state = lm32_cpu_dump_state;
cc->set_pc = lm32_cpu_set_pc;
cc->gdb_read_register = lm32_cpu_gdb_read_register;
cc->gdb_write_register = lm32_cpu_gdb_write_register;
cc->tlb_fill = lm32_cpu_tlb_fill;
#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = lm32_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_lm32_cpu;
#endif
cc->gdb_num_core_regs = 32 + 7;
cc->gdb_stop_before_watchpoint = true;
cc->debug_excp_handler = lm32_debug_excp_handler;
cc->disas_set_info = lm32_cpu_disas_set_info;
cc->tcg_initialize = lm32_translate_init;
cc->tcg_ops = &lm32_tcg_ops;
}
#define DEFINE_LM32_CPU_TYPE(cpu_model, initfn) \

View File

@ -453,6 +453,19 @@ static const VMStateDescription vmstate_m68k_cpu = {
};
#endif
#include "hw/core/tcg-cpu-ops.h"
static struct TCGCPUOps m68k_tcg_ops = {
.initialize = m68k_tcg_init,
.cpu_exec_interrupt = m68k_cpu_exec_interrupt,
.tlb_fill = m68k_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
.do_interrupt = m68k_cpu_do_interrupt,
.do_transaction_failed = m68k_cpu_transaction_failed,
#endif /* !CONFIG_USER_ONLY */
};
static void m68k_cpu_class_init(ObjectClass *c, void *data)
{
M68kCPUClass *mcc = M68K_CPU_CLASS(c);
@ -465,22 +478,18 @@ static void m68k_cpu_class_init(ObjectClass *c, void *data)
cc->class_by_name = m68k_cpu_class_by_name;
cc->has_work = m68k_cpu_has_work;
cc->do_interrupt = m68k_cpu_do_interrupt;
cc->cpu_exec_interrupt = m68k_cpu_exec_interrupt;
cc->dump_state = m68k_cpu_dump_state;
cc->set_pc = m68k_cpu_set_pc;
cc->gdb_read_register = m68k_cpu_gdb_read_register;
cc->gdb_write_register = m68k_cpu_gdb_write_register;
cc->tlb_fill = m68k_cpu_tlb_fill;
#if defined(CONFIG_SOFTMMU)
cc->do_transaction_failed = m68k_cpu_transaction_failed;
cc->get_phys_page_debug = m68k_cpu_get_phys_page_debug;
dc->vmsd = &vmstate_m68k_cpu;
#endif
cc->disas_set_info = m68k_cpu_disas_set_info;
cc->tcg_initialize = m68k_tcg_init;
cc->gdb_num_core_regs = 18;
cc->tcg_ops = &m68k_tcg_ops;
}
static void m68k_cpu_class_init_cf_core(ObjectClass *c, void *data)

View File

@ -352,6 +352,21 @@ static ObjectClass *mb_cpu_class_by_name(const char *cpu_model)
return object_class_by_name(TYPE_MICROBLAZE_CPU);
}
#include "hw/core/tcg-cpu-ops.h"
static struct TCGCPUOps mb_tcg_ops = {
.initialize = mb_tcg_init,
.synchronize_from_tb = mb_cpu_synchronize_from_tb,
.cpu_exec_interrupt = mb_cpu_exec_interrupt,
.tlb_fill = mb_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
.do_interrupt = mb_cpu_do_interrupt,
.do_transaction_failed = mb_cpu_transaction_failed,
.do_unaligned_access = mb_cpu_do_unaligned_access,
#endif /* !CONFIG_USER_ONLY */
};
static void mb_cpu_class_init(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@ -364,17 +379,13 @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
cc->class_by_name = mb_cpu_class_by_name;
cc->has_work = mb_cpu_has_work;
cc->do_interrupt = mb_cpu_do_interrupt;
cc->do_unaligned_access = mb_cpu_do_unaligned_access;
cc->cpu_exec_interrupt = mb_cpu_exec_interrupt;
cc->dump_state = mb_cpu_dump_state;
cc->set_pc = mb_cpu_set_pc;
cc->synchronize_from_tb = mb_cpu_synchronize_from_tb;
cc->gdb_read_register = mb_cpu_gdb_read_register;
cc->gdb_write_register = mb_cpu_gdb_write_register;
cc->tlb_fill = mb_cpu_tlb_fill;
#ifndef CONFIG_USER_ONLY
cc->do_transaction_failed = mb_cpu_transaction_failed;
cc->get_phys_page_attrs_debug = mb_cpu_get_phys_page_attrs_debug;
dc->vmsd = &vmstate_mb_cpu;
#endif
@ -382,7 +393,7 @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_num_core_regs = 32 + 27;
cc->disas_set_info = mb_disas_set_info;
cc->tcg_initialize = mb_tcg_init;
cc->tcg_ops = &mb_tcg_ops;
}
static const TypeInfo mb_cpu_type_info = {

View File

@ -257,6 +257,7 @@ static void mips_cpu_set_pc(CPUState *cs, vaddr value)
}
}
#ifdef CONFIG_TCG
static void mips_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@ -267,6 +268,7 @@ static void mips_cpu_synchronize_from_tb(CPUState *cs,
env->hflags &= ~MIPS_HFLAG_BMASK;
env->hflags |= tb->flags & MIPS_HFLAG_BMASK;
}
#endif /* CONFIG_TCG */
static bool mips_cpu_has_work(CPUState *cs)
{
@ -661,6 +663,26 @@ static Property mips_cpu_properties[] = {
DEFINE_PROP_END_OF_LIST()
};
#ifdef CONFIG_TCG
#include "hw/core/tcg-cpu-ops.h"
/*
* NB: cannot be const, as some elements are changed for specific
* mips hardware (see hw/mips/jazz.c).
*/
static struct TCGCPUOps mips_tcg_ops = {
.initialize = mips_tcg_init,
.synchronize_from_tb = mips_cpu_synchronize_from_tb,
.cpu_exec_interrupt = mips_cpu_exec_interrupt,
.tlb_fill = mips_cpu_tlb_fill,
#if !defined(CONFIG_USER_ONLY)
.do_interrupt = mips_cpu_do_interrupt,
.do_transaction_failed = mips_cpu_do_transaction_failed,
.do_unaligned_access = mips_cpu_do_unaligned_access,
#endif /* !CONFIG_USER_ONLY */
};
#endif /* CONFIG_TCG */
static void mips_cpu_class_init(ObjectClass *c, void *data)
{
MIPSCPUClass *mcc = MIPS_CPU_CLASS(c);
@ -674,27 +696,20 @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
cc->class_by_name = mips_cpu_class_by_name;
cc->has_work = mips_cpu_has_work;
cc->do_interrupt = mips_cpu_do_interrupt;
cc->cpu_exec_interrupt = mips_cpu_exec_interrupt;
cc->dump_state = mips_cpu_dump_state;
cc->set_pc = mips_cpu_set_pc;
cc->synchronize_from_tb = mips_cpu_synchronize_from_tb;
cc->gdb_read_register = mips_cpu_gdb_read_register;
cc->gdb_write_register = mips_cpu_gdb_write_register;
#ifndef CONFIG_USER_ONLY
cc->do_transaction_failed = mips_cpu_do_transaction_failed;
cc->do_unaligned_access = mips_cpu_do_unaligned_access;
cc->get_phys_page_debug = mips_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_mips_cpu;
#endif
cc->disas_set_info = mips_cpu_disas_set_info;
#ifdef CONFIG_TCG
cc->tcg_initialize = mips_tcg_init;
cc->tlb_fill = mips_cpu_tlb_fill;
#endif
cc->gdb_num_core_regs = 73;
cc->gdb_stop_before_watchpoint = true;
#ifdef CONFIG_TCG
cc->tcg_ops = &mips_tcg_ops;
#endif /* CONFIG_TCG */
}
static const TypeInfo mips_cpu_type_info = {

View File

@ -94,6 +94,17 @@ static ObjectClass *moxie_cpu_class_by_name(const char *cpu_model)
return oc;
}
#include "hw/core/tcg-cpu-ops.h"
static struct TCGCPUOps moxie_tcg_ops = {
.initialize = moxie_translate_init,
.tlb_fill = moxie_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
.do_interrupt = moxie_cpu_do_interrupt,
#endif /* !CONFIG_USER_ONLY */
};
static void moxie_cpu_class_init(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@ -107,16 +118,14 @@ static void moxie_cpu_class_init(ObjectClass *oc, void *data)
cc->class_by_name = moxie_cpu_class_by_name;
cc->has_work = moxie_cpu_has_work;
cc->do_interrupt = moxie_cpu_do_interrupt;
cc->dump_state = moxie_cpu_dump_state;
cc->set_pc = moxie_cpu_set_pc;
cc->tlb_fill = moxie_cpu_tlb_fill;
#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = moxie_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_moxie_cpu;
#endif
cc->disas_set_info = moxie_cpu_disas_set_info;
cc->tcg_initialize = moxie_translate_init;
cc->tcg_ops = &moxie_tcg_ops;
}
static void moxielite_initfn(Object *obj)

View File

@ -207,6 +207,18 @@ static Property nios2_properties[] = {
DEFINE_PROP_END_OF_LIST(),
};
#include "hw/core/tcg-cpu-ops.h"
static struct TCGCPUOps nios2_tcg_ops = {
.initialize = nios2_tcg_init,
.cpu_exec_interrupt = nios2_cpu_exec_interrupt,
.tlb_fill = nios2_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
.do_interrupt = nios2_cpu_do_interrupt,
.do_unaligned_access = nios2_cpu_do_unaligned_access,
#endif /* !CONFIG_USER_ONLY */
};
static void nios2_cpu_class_init(ObjectClass *oc, void *data)
{
@ -221,20 +233,16 @@ static void nios2_cpu_class_init(ObjectClass *oc, void *data)
cc->class_by_name = nios2_cpu_class_by_name;
cc->has_work = nios2_cpu_has_work;
cc->do_interrupt = nios2_cpu_do_interrupt;
cc->cpu_exec_interrupt = nios2_cpu_exec_interrupt;
cc->dump_state = nios2_cpu_dump_state;
cc->set_pc = nios2_cpu_set_pc;
cc->disas_set_info = nios2_cpu_disas_set_info;
cc->tlb_fill = nios2_cpu_tlb_fill;
#ifndef CONFIG_USER_ONLY
cc->do_unaligned_access = nios2_cpu_do_unaligned_access;
cc->get_phys_page_debug = nios2_cpu_get_phys_page_debug;
#endif
cc->gdb_read_register = nios2_cpu_gdb_read_register;
cc->gdb_write_register = nios2_cpu_gdb_write_register;
cc->gdb_num_core_regs = 49;
cc->tcg_initialize = nios2_tcg_init;
cc->tcg_ops = &nios2_tcg_ops;
}
static const TypeInfo nios2_cpu_type_info = {

View File

@ -174,6 +174,18 @@ static void openrisc_any_initfn(Object *obj)
| (IMMUCFGR_NTS & (ctz32(TLB_SIZE) << 2));
}
#include "hw/core/tcg-cpu-ops.h"
static struct TCGCPUOps openrisc_tcg_ops = {
.initialize = openrisc_translate_init,
.cpu_exec_interrupt = openrisc_cpu_exec_interrupt,
.tlb_fill = openrisc_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
.do_interrupt = openrisc_cpu_do_interrupt,
#endif /* !CONFIG_USER_ONLY */
};
static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
{
OpenRISCCPUClass *occ = OPENRISC_CPU_CLASS(oc);
@ -186,20 +198,17 @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
cc->class_by_name = openrisc_cpu_class_by_name;
cc->has_work = openrisc_cpu_has_work;
cc->do_interrupt = openrisc_cpu_do_interrupt;
cc->cpu_exec_interrupt = openrisc_cpu_exec_interrupt;
cc->dump_state = openrisc_cpu_dump_state;
cc->set_pc = openrisc_cpu_set_pc;
cc->gdb_read_register = openrisc_cpu_gdb_read_register;
cc->gdb_write_register = openrisc_cpu_gdb_write_register;
cc->tlb_fill = openrisc_cpu_tlb_fill;
#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = openrisc_cpu_get_phys_page_debug;
dc->vmsd = &vmstate_openrisc_cpu;
#endif
cc->gdb_num_core_regs = 32 + 3;
cc->tcg_initialize = openrisc_translate_init;
cc->disas_set_info = openrisc_disas_set_info;
cc->tcg_ops = &openrisc_tcg_ops;
}
/* Sort alphabetically by type name, except for "any". */

View File

@ -10700,6 +10700,7 @@ static void ppc_cpu_reset(DeviceState *dev)
}
#ifndef CONFIG_USER_ONLY
static bool ppc_cpu_is_big_endian(CPUState *cs)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
@ -10710,6 +10711,7 @@ static bool ppc_cpu_is_big_endian(CPUState *cs)
return !msr_le;
}
#ifdef CONFIG_TCG
static void ppc_cpu_exec_enter(CPUState *cs)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
@ -10731,7 +10733,9 @@ static void ppc_cpu_exec_exit(CPUState *cs)
vhc->cpu_exec_exit(cpu->vhyp, cpu);
}
}
#endif
#endif /* CONFIG_TCG */
#endif /* !CONFIG_USER_ONLY */
static void ppc_cpu_instance_init(Object *obj)
{
@ -10823,6 +10827,23 @@ static Property ppc_cpu_properties[] = {
DEFINE_PROP_END_OF_LIST(),
};
#ifdef CONFIG_TCG
#include "hw/core/tcg-cpu-ops.h"
static struct TCGCPUOps ppc_tcg_ops = {
.initialize = ppc_translate_init,
.cpu_exec_interrupt = ppc_cpu_exec_interrupt,
.tlb_fill = ppc_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
.do_interrupt = ppc_cpu_do_interrupt,
.cpu_exec_enter = ppc_cpu_exec_enter,
.cpu_exec_exit = ppc_cpu_exec_exit,
.do_unaligned_access = ppc_cpu_do_unaligned_access,
#endif /* !CONFIG_USER_ONLY */
};
#endif /* CONFIG_TCG */
static void ppc_cpu_class_init(ObjectClass *oc, void *data)
{
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@ -10841,14 +10862,11 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
cc->class_by_name = ppc_cpu_class_by_name;
cc->has_work = ppc_cpu_has_work;
cc->do_interrupt = ppc_cpu_do_interrupt;
cc->cpu_exec_interrupt = ppc_cpu_exec_interrupt;
cc->dump_state = ppc_cpu_dump_state;
cc->dump_statistics = ppc_cpu_dump_statistics;
cc->set_pc = ppc_cpu_set_pc;
cc->gdb_read_register = ppc_cpu_gdb_read_register;
cc->gdb_write_register = ppc_cpu_gdb_write_register;
cc->do_unaligned_access = ppc_cpu_do_unaligned_access;
#ifndef CONFIG_USER_ONLY
cc->get_phys_page_debug = ppc_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_ppc_cpu;
@ -10877,18 +10895,13 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
#ifndef CONFIG_USER_ONLY
cc->virtio_is_big_endian = ppc_cpu_is_big_endian;
#endif
#ifdef CONFIG_TCG
cc->tcg_initialize = ppc_translate_init;
cc->tlb_fill = ppc_cpu_tlb_fill;
#endif
#ifndef CONFIG_USER_ONLY
cc->cpu_exec_enter = ppc_cpu_exec_enter;
cc->cpu_exec_exit = ppc_cpu_exec_exit;
#endif
cc->disas_set_info = ppc_disas_set_info;
dc->fw_name = "PowerPC,UNKNOWN";
#ifdef CONFIG_TCG
cc->tcg_ops = &ppc_tcg_ops;
#endif /* CONFIG_TCG */
}
static const TypeInfo ppc_cpu_type_info = {

View File

@ -580,6 +580,21 @@ static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
return NULL;
}
#include "hw/core/tcg-cpu-ops.h"
static struct TCGCPUOps riscv_tcg_ops = {
.initialize = riscv_translate_init,
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
.cpu_exec_interrupt = riscv_cpu_exec_interrupt,
.tlb_fill = riscv_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
.do_interrupt = riscv_cpu_do_interrupt,
.do_transaction_failed = riscv_cpu_do_transaction_failed,
.do_unaligned_access = riscv_cpu_do_unaligned_access,
#endif /* !CONFIG_USER_ONLY */
};
static void riscv_cpu_class_init(ObjectClass *c, void *data)
{
RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
@ -593,11 +608,8 @@ static void riscv_cpu_class_init(ObjectClass *c, void *data)
cc->class_by_name = riscv_cpu_class_by_name;
cc->has_work = riscv_cpu_has_work;
cc->do_interrupt = riscv_cpu_do_interrupt;
cc->cpu_exec_interrupt = riscv_cpu_exec_interrupt;
cc->dump_state = riscv_cpu_dump_state;
cc->set_pc = riscv_cpu_set_pc;
cc->synchronize_from_tb = riscv_cpu_synchronize_from_tb;
cc->gdb_read_register = riscv_cpu_gdb_read_register;
cc->gdb_write_register = riscv_cpu_gdb_write_register;
cc->gdb_num_core_regs = 33;
@ -609,18 +621,14 @@ static void riscv_cpu_class_init(ObjectClass *c, void *data)
cc->gdb_stop_before_watchpoint = true;
cc->disas_set_info = riscv_cpu_disas_set_info;
#ifndef CONFIG_USER_ONLY
cc->do_transaction_failed = riscv_cpu_do_transaction_failed;
cc->do_unaligned_access = riscv_cpu_do_unaligned_access;
cc->get_phys_page_debug = riscv_cpu_get_phys_page_debug;
/* For now, mark unmigratable: */
cc->vmsd = &vmstate_riscv_cpu;
#endif
cc->gdb_arch_name = riscv_gdb_arch_name;
cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
#ifdef CONFIG_TCG
cc->tcg_initialize = riscv_translate_init;
cc->tlb_fill = riscv_cpu_tlb_fill;
#endif
cc->tcg_ops = &riscv_tcg_ops;
device_class_set_props(dc, riscv_cpu_properties);
}

View File

@ -671,7 +671,7 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
env->badaddr = addr;
riscv_raise_exception(env, cs->exception_index, retaddr);
}
#endif
#endif /* !CONFIG_USER_ONLY */
bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,

View File

@ -173,6 +173,19 @@ static void rx_cpu_init(Object *obj)
qdev_init_gpio_in(DEVICE(cpu), rx_cpu_set_irq, 2);
}
#include "hw/core/tcg-cpu-ops.h"
static struct TCGCPUOps rx_tcg_ops = {
.initialize = rx_translate_init,
.synchronize_from_tb = rx_cpu_synchronize_from_tb,
.cpu_exec_interrupt = rx_cpu_exec_interrupt,
.tlb_fill = rx_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
.do_interrupt = rx_cpu_do_interrupt,
#endif /* !CONFIG_USER_ONLY */
};
static void rx_cpu_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@ -186,20 +199,17 @@ static void rx_cpu_class_init(ObjectClass *klass, void *data)
cc->class_by_name = rx_cpu_class_by_name;
cc->has_work = rx_cpu_has_work;
cc->do_interrupt = rx_cpu_do_interrupt;
cc->cpu_exec_interrupt = rx_cpu_exec_interrupt;
cc->dump_state = rx_cpu_dump_state;
cc->set_pc = rx_cpu_set_pc;
cc->synchronize_from_tb = rx_cpu_synchronize_from_tb;
cc->gdb_read_register = rx_cpu_gdb_read_register;
cc->gdb_write_register = rx_cpu_gdb_write_register;
cc->get_phys_page_debug = rx_cpu_get_phys_page_debug;
cc->disas_set_info = rx_cpu_disas_set_info;
cc->tcg_initialize = rx_translate_init;
cc->tlb_fill = rx_cpu_tlb_fill;
cc->gdb_num_core_regs = 26;
cc->gdb_core_xml_file = "rx-core.xml";
cc->tcg_ops = &rx_tcg_ops;
}
static const TypeInfo rx_cpu_info = {

View File

@ -477,6 +477,22 @@ static void s390_cpu_reset_full(DeviceState *dev)
return s390_cpu_reset(s, S390_CPU_RESET_CLEAR);
}
#ifdef CONFIG_TCG
#include "hw/core/tcg-cpu-ops.h"
static struct TCGCPUOps s390_tcg_ops = {
.initialize = s390x_translate_init,
.tlb_fill = s390_cpu_tlb_fill,
#if !defined(CONFIG_USER_ONLY)
.cpu_exec_interrupt = s390_cpu_exec_interrupt,
.do_interrupt = s390_cpu_do_interrupt,
.debug_excp_handler = s390x_cpu_debug_excp_handler,
.do_unaligned_access = s390x_cpu_do_unaligned_access,
#endif /* !CONFIG_USER_ONLY */
};
#endif /* CONFIG_TCG */
static void s390_cpu_class_init(ObjectClass *oc, void *data)
{
S390CPUClass *scc = S390_CPU_CLASS(oc);
@ -495,9 +511,6 @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
scc->reset = s390_cpu_reset;
cc->class_by_name = s390_cpu_class_by_name,
cc->has_work = s390_cpu_has_work;
#ifdef CONFIG_TCG
cc->do_interrupt = s390_cpu_do_interrupt;
#endif
cc->dump_state = s390_cpu_dump_state;
cc->set_pc = s390_cpu_set_pc;
cc->gdb_read_register = s390_cpu_gdb_read_register;
@ -507,23 +520,17 @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
cc->vmsd = &vmstate_s390_cpu;
cc->get_crash_info = s390_cpu_get_crash_info;
cc->write_elf64_note = s390_cpu_write_elf64_note;
#ifdef CONFIG_TCG
cc->cpu_exec_interrupt = s390_cpu_exec_interrupt;
cc->debug_excp_handler = s390x_cpu_debug_excp_handler;
cc->do_unaligned_access = s390x_cpu_do_unaligned_access;
#endif
#endif
cc->disas_set_info = s390_cpu_disas_set_info;
#ifdef CONFIG_TCG
cc->tcg_initialize = s390x_translate_init;
cc->tlb_fill = s390_cpu_tlb_fill;
#endif
cc->gdb_num_core_regs = S390_NUM_CORE_REGS;
cc->gdb_core_xml_file = "s390x-core64.xml";
cc->gdb_arch_name = s390_gdb_arch_name;
s390_cpu_model_class_register_props(oc);
#ifdef CONFIG_TCG
cc->tcg_ops = &s390_tcg_ops;
#endif /* CONFIG_TCG */
}
static const TypeInfo s390_cpu_type_info = {

View File

@ -634,4 +634,4 @@ void HELPER(monitor_call)(CPUS390XState *env, uint64_t monitor_code,
}
}
#endif /* CONFIG_USER_ONLY */
#endif /* !CONFIG_USER_ONLY */

View File

@ -206,6 +206,20 @@ static const VMStateDescription vmstate_sh_cpu = {
.unmigratable = 1,
};
#include "hw/core/tcg-cpu-ops.h"
static struct TCGCPUOps superh_tcg_ops = {
.initialize = sh4_translate_init,
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
.cpu_exec_interrupt = superh_cpu_exec_interrupt,
.tlb_fill = superh_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
.do_interrupt = superh_cpu_do_interrupt,
.do_unaligned_access = superh_cpu_do_unaligned_access,
#endif /* !CONFIG_USER_ONLY */
};
static void superh_cpu_class_init(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@ -219,24 +233,19 @@ static void superh_cpu_class_init(ObjectClass *oc, void *data)
cc->class_by_name = superh_cpu_class_by_name;
cc->has_work = superh_cpu_has_work;
cc->do_interrupt = superh_cpu_do_interrupt;
cc->cpu_exec_interrupt = superh_cpu_exec_interrupt;
cc->dump_state = superh_cpu_dump_state;
cc->set_pc = superh_cpu_set_pc;
cc->synchronize_from_tb = superh_cpu_synchronize_from_tb;
cc->gdb_read_register = superh_cpu_gdb_read_register;
cc->gdb_write_register = superh_cpu_gdb_write_register;
cc->tlb_fill = superh_cpu_tlb_fill;
#ifndef CONFIG_USER_ONLY
cc->do_unaligned_access = superh_cpu_do_unaligned_access;
cc->get_phys_page_debug = superh_cpu_get_phys_page_debug;
#endif
cc->disas_set_info = superh_cpu_disas_set_info;
cc->tcg_initialize = sh4_translate_init;
cc->gdb_num_core_regs = 59;
dc->vmsd = &vmstate_sh_cpu;
cc->tcg_ops = &superh_tcg_ops;
}
#define DEFINE_SUPERH_CPU_TYPE(type_name, cinit, initfn) \

View File

@ -848,6 +848,23 @@ static Property sparc_cpu_properties[] = {
DEFINE_PROP_END_OF_LIST()
};
#ifdef CONFIG_TCG
#include "hw/core/tcg-cpu-ops.h"
static struct TCGCPUOps sparc_tcg_ops = {
.initialize = sparc_tcg_init,
.synchronize_from_tb = sparc_cpu_synchronize_from_tb,
.cpu_exec_interrupt = sparc_cpu_exec_interrupt,
.tlb_fill = sparc_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
.do_interrupt = sparc_cpu_do_interrupt,
.do_transaction_failed = sparc_cpu_do_transaction_failed,
.do_unaligned_access = sparc_cpu_do_unaligned_access,
#endif /* !CONFIG_USER_ONLY */
};
#endif /* CONFIG_TCG */
static void sparc_cpu_class_init(ObjectClass *oc, void *data)
{
SPARCCPUClass *scc = SPARC_CPU_CLASS(oc);
@ -863,31 +880,25 @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data)
cc->class_by_name = sparc_cpu_class_by_name;
cc->parse_features = sparc_cpu_parse_features;
cc->has_work = sparc_cpu_has_work;
cc->do_interrupt = sparc_cpu_do_interrupt;
cc->cpu_exec_interrupt = sparc_cpu_exec_interrupt;
cc->dump_state = sparc_cpu_dump_state;
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
cc->memory_rw_debug = sparc_cpu_memory_rw_debug;
#endif
cc->set_pc = sparc_cpu_set_pc;
cc->synchronize_from_tb = sparc_cpu_synchronize_from_tb;
cc->gdb_read_register = sparc_cpu_gdb_read_register;
cc->gdb_write_register = sparc_cpu_gdb_write_register;
cc->tlb_fill = sparc_cpu_tlb_fill;
#ifndef CONFIG_USER_ONLY
cc->do_transaction_failed = sparc_cpu_do_transaction_failed;
cc->do_unaligned_access = sparc_cpu_do_unaligned_access;
cc->get_phys_page_debug = sparc_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_sparc_cpu;
#endif
cc->disas_set_info = cpu_sparc_disas_set_info;
cc->tcg_initialize = sparc_tcg_init;
#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
cc->gdb_num_core_regs = 86;
#else
cc->gdb_num_core_regs = 72;
#endif
cc->tcg_ops = &sparc_tcg_ops;
}
static const TypeInfo sparc_cpu_type_info = {

View File

@ -134,6 +134,18 @@ static bool tilegx_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
return false;
}
#include "hw/core/tcg-cpu-ops.h"
static struct TCGCPUOps tilegx_tcg_ops = {
.initialize = tilegx_tcg_init,
.cpu_exec_interrupt = tilegx_cpu_exec_interrupt,
.tlb_fill = tilegx_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
.do_interrupt = tilegx_cpu_do_interrupt,
#endif /* !CONFIG_USER_ONLY */
};
static void tilegx_cpu_class_init(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@ -147,13 +159,10 @@ static void tilegx_cpu_class_init(ObjectClass *oc, void *data)
cc->class_by_name = tilegx_cpu_class_by_name;
cc->has_work = tilegx_cpu_has_work;
cc->do_interrupt = tilegx_cpu_do_interrupt;
cc->cpu_exec_interrupt = tilegx_cpu_exec_interrupt;
cc->dump_state = tilegx_cpu_dump_state;
cc->set_pc = tilegx_cpu_set_pc;
cc->tlb_fill = tilegx_cpu_tlb_fill;
cc->gdb_num_core_regs = 0;
cc->tcg_initialize = tilegx_tcg_init;
cc->tcg_ops = &tilegx_tcg_ops;
}
static const TypeInfo tilegx_cpu_type_info = {

View File

@ -142,6 +142,14 @@ static void tc27x_initfn(Object *obj)
set_feature(&cpu->env, TRICORE_FEATURE_161);
}
#include "hw/core/tcg-cpu-ops.h"
static struct TCGCPUOps tricore_tcg_ops = {
.initialize = tricore_tcg_init,
.synchronize_from_tb = tricore_cpu_synchronize_from_tb,
.tlb_fill = tricore_cpu_tlb_fill,
};
static void tricore_cpu_class_init(ObjectClass *c, void *data)
{
TriCoreCPUClass *mcc = TRICORE_CPU_CLASS(c);
@ -162,10 +170,8 @@ static void tricore_cpu_class_init(ObjectClass *c, void *data)
cc->dump_state = tricore_cpu_dump_state;
cc->set_pc = tricore_cpu_set_pc;
cc->synchronize_from_tb = tricore_cpu_synchronize_from_tb;
cc->get_phys_page_debug = tricore_cpu_get_phys_page_debug;
cc->tcg_initialize = tricore_tcg_init;
cc->tlb_fill = tricore_cpu_tlb_fill;
cc->tcg_ops = &tricore_tcg_ops;
}
#define DEFINE_TRICORE_CPU_TYPE(cpu_model, initfn) \

View File

@ -120,6 +120,18 @@ static const VMStateDescription vmstate_uc32_cpu = {
.unmigratable = 1,
};
#include "hw/core/tcg-cpu-ops.h"
static struct TCGCPUOps uc32_tcg_ops = {
.initialize = uc32_translate_init,
.cpu_exec_interrupt = uc32_cpu_exec_interrupt,
.tlb_fill = uc32_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
.do_interrupt = uc32_cpu_do_interrupt,
#endif /* !CONFIG_USER_ONLY */
};
static void uc32_cpu_class_init(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@ -131,14 +143,11 @@ static void uc32_cpu_class_init(ObjectClass *oc, void *data)
cc->class_by_name = uc32_cpu_class_by_name;
cc->has_work = uc32_cpu_has_work;
cc->do_interrupt = uc32_cpu_do_interrupt;
cc->cpu_exec_interrupt = uc32_cpu_exec_interrupt;
cc->dump_state = uc32_cpu_dump_state;
cc->set_pc = uc32_cpu_set_pc;
cc->tlb_fill = uc32_cpu_tlb_fill;
cc->get_phys_page_debug = uc32_cpu_get_phys_page_debug;
cc->tcg_initialize = uc32_translate_init;
dc->vmsd = &vmstate_uc32_cpu;
cc->tcg_ops = &uc32_tcg_ops;
}
#define DEFINE_UNICORE32_CPU_TYPE(cpu_model, initfn) \

View File

@ -181,6 +181,21 @@ static const VMStateDescription vmstate_xtensa_cpu = {
.unmigratable = 1,
};
#include "hw/core/tcg-cpu-ops.h"
static struct TCGCPUOps xtensa_tcg_ops = {
.initialize = xtensa_translate_init,
.cpu_exec_interrupt = xtensa_cpu_exec_interrupt,
.tlb_fill = xtensa_cpu_tlb_fill,
.debug_excp_handler = xtensa_breakpoint_handler,
#ifndef CONFIG_USER_ONLY
.do_interrupt = xtensa_cpu_do_interrupt,
.do_transaction_failed = xtensa_cpu_do_transaction_failed,
.do_unaligned_access = xtensa_cpu_do_unaligned_access,
#endif /* !CONFIG_USER_ONLY */
};
static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@ -194,23 +209,17 @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
cc->class_by_name = xtensa_cpu_class_by_name;
cc->has_work = xtensa_cpu_has_work;
cc->do_interrupt = xtensa_cpu_do_interrupt;
cc->cpu_exec_interrupt = xtensa_cpu_exec_interrupt;
cc->dump_state = xtensa_cpu_dump_state;
cc->set_pc = xtensa_cpu_set_pc;
cc->gdb_read_register = xtensa_cpu_gdb_read_register;
cc->gdb_write_register = xtensa_cpu_gdb_write_register;
cc->gdb_stop_before_watchpoint = true;
cc->tlb_fill = xtensa_cpu_tlb_fill;
#ifndef CONFIG_USER_ONLY
cc->do_unaligned_access = xtensa_cpu_do_unaligned_access;
cc->get_phys_page_debug = xtensa_cpu_get_phys_page_debug;
cc->do_transaction_failed = xtensa_cpu_do_transaction_failed;
#endif
cc->debug_excp_handler = xtensa_breakpoint_handler;
cc->disas_set_info = xtensa_cpu_disas_set_info;
cc->tcg_initialize = xtensa_translate_init;
dc->vmsd = &vmstate_xtensa_cpu;
cc->tcg_ops = &xtensa_tcg_ops;
}
static const TypeInfo xtensa_cpu_type_info = {

Some files were not shown because too many files have changed in this diff Show More