cpus: add handle_interrupt to the CpusAccel interface
kvm: uses the generic handler qtest: uses the generic handler whpx: changed to use the generic handler (identical implementation) hax: changed to use the generic handler (identical implementation) hvf: changed to use the generic handler (identical implementation) tcg: adapt tcg-cpus to point to the tcg-specific handler Signed-off-by: Claudio Fontana <cfontana@suse.de> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
994aa17200
commit
bb4776be77
@ -47,31 +47,6 @@ typedef struct TCGState TCGState;
|
||||
DECLARE_INSTANCE_CHECKER(TCGState, TCG_STATE,
|
||||
TYPE_TCG_ACCEL)
|
||||
|
||||
/* mask must never be zero, except for A20 change call */
|
||||
static void tcg_handle_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
int old_mask;
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
|
||||
old_mask = cpu->interrupt_request;
|
||||
cpu->interrupt_request |= mask;
|
||||
|
||||
/*
|
||||
* If called from iothread context, wake the target cpu in
|
||||
* case its halted.
|
||||
*/
|
||||
if (!qemu_cpu_is_self(cpu)) {
|
||||
qemu_cpu_kick(cpu);
|
||||
} else {
|
||||
qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
|
||||
if (icount_enabled() &&
|
||||
!cpu->can_do_io
|
||||
&& (mask & ~old_mask) != 0) {
|
||||
cpu_abort(cpu, "Raised interrupt while not in I/O function");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We default to false if we know other options have been enabled
|
||||
* which are currently incompatible with MTTCG. Otherwise when each
|
||||
@ -128,7 +103,6 @@ static int tcg_init(MachineState *ms)
|
||||
TCGState *s = TCG_STATE(current_accel());
|
||||
|
||||
tcg_exec_init(s->tb_size * 1024 * 1024);
|
||||
cpu_interrupt_handler = tcg_handle_interrupt;
|
||||
mttcg_enabled = s->mttcg_enabled;
|
||||
cpus_register_accel(&tcg_cpus);
|
||||
|
||||
|
@ -543,9 +543,37 @@ static int64_t tcg_get_elapsed_ticks(void)
|
||||
return cpu_get_ticks();
|
||||
}
|
||||
|
||||
/* mask must never be zero, except for A20 change call */
|
||||
static void tcg_handle_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
int old_mask;
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
|
||||
old_mask = cpu->interrupt_request;
|
||||
cpu->interrupt_request |= mask;
|
||||
|
||||
/*
|
||||
* If called from iothread context, wake the target cpu in
|
||||
* case its halted.
|
||||
*/
|
||||
if (!qemu_cpu_is_self(cpu)) {
|
||||
qemu_cpu_kick(cpu);
|
||||
} else {
|
||||
qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
|
||||
if (icount_enabled() &&
|
||||
!cpu->can_do_io
|
||||
&& (mask & ~old_mask) != 0) {
|
||||
cpu_abort(cpu, "Raised interrupt while not in I/O function");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const CpusAccel tcg_cpus = {
|
||||
.create_vcpu_thread = tcg_start_vcpu_thread,
|
||||
.kick_vcpu_thread = tcg_kick_vcpu_thread,
|
||||
|
||||
.handle_interrupt = tcg_handle_interrupt,
|
||||
|
||||
.get_virtual_clock = tcg_get_virtual_clock,
|
||||
.get_elapsed_ticks = tcg_get_elapsed_ticks,
|
||||
};
|
||||
|
@ -35,8 +35,6 @@
|
||||
#include "qemu/plugin.h"
|
||||
#include "sysemu/hw_accel.h"
|
||||
|
||||
CPUInterruptHandler cpu_interrupt_handler;
|
||||
|
||||
CPUState *cpu_by_arch_id(int64_t id)
|
||||
{
|
||||
CPUState *cpu;
|
||||
@ -394,17 +392,6 @@ static vaddr cpu_adjust_watchpoint_address(CPUState *cpu, vaddr addr, int len)
|
||||
return addr;
|
||||
}
|
||||
|
||||
static void generic_handle_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
cpu->interrupt_request |= mask;
|
||||
|
||||
if (!qemu_cpu_is_self(cpu)) {
|
||||
qemu_cpu_kick(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
CPUInterruptHandler cpu_interrupt_handler = generic_handle_interrupt;
|
||||
|
||||
static void cpu_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
|
@ -844,12 +844,6 @@ bool cpu_exists(int64_t id);
|
||||
*/
|
||||
CPUState *cpu_by_arch_id(int64_t id);
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
|
||||
typedef void (*CPUInterruptHandler)(CPUState *, int);
|
||||
|
||||
extern CPUInterruptHandler cpu_interrupt_handler;
|
||||
|
||||
/**
|
||||
* cpu_interrupt:
|
||||
* @cpu: The CPU to set an interrupt on.
|
||||
@ -857,17 +851,9 @@ extern CPUInterruptHandler cpu_interrupt_handler;
|
||||
*
|
||||
* Invokes the interrupt handler.
|
||||
*/
|
||||
static inline void cpu_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
cpu_interrupt_handler(cpu, mask);
|
||||
}
|
||||
|
||||
#else /* USER_ONLY */
|
||||
|
||||
void cpu_interrupt(CPUState *cpu, int mask);
|
||||
|
||||
#endif /* USER_ONLY */
|
||||
|
||||
#ifdef NEED_CPU_H
|
||||
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
|
@ -16,6 +16,8 @@ typedef struct CpusAccel {
|
||||
void (*synchronize_state)(CPUState *cpu);
|
||||
void (*synchronize_pre_loadvm)(CPUState *cpu);
|
||||
|
||||
void (*handle_interrupt)(CPUState *cpu, int mask);
|
||||
|
||||
int64_t (*get_virtual_clock)(void);
|
||||
int64_t (*get_elapsed_ticks)(void);
|
||||
} CpusAccel;
|
||||
|
@ -225,6 +225,24 @@ int64_t cpus_get_elapsed_ticks(void)
|
||||
return cpu_get_ticks();
|
||||
}
|
||||
|
||||
static void generic_handle_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
cpu->interrupt_request |= mask;
|
||||
|
||||
if (!qemu_cpu_is_self(cpu)) {
|
||||
qemu_cpu_kick(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
void cpu_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
if (cpus_accel->handle_interrupt) {
|
||||
cpus_accel->handle_interrupt(cpu, mask);
|
||||
} else {
|
||||
generic_handle_interrupt(cpu, mask);
|
||||
}
|
||||
}
|
||||
|
||||
static int do_vm_stop(RunState state, bool send_stop)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -297,15 +297,6 @@ int hax_vm_destroy(struct hax_vm *vm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hax_handle_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
cpu->interrupt_request |= mask;
|
||||
|
||||
if (!qemu_cpu_is_self(cpu)) {
|
||||
qemu_cpu_kick(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static int hax_init(ram_addr_t ram_size, int max_cpus)
|
||||
{
|
||||
struct hax_state *hax = NULL;
|
||||
@ -350,7 +341,6 @@ static int hax_init(ram_addr_t ram_size, int max_cpus)
|
||||
qversion.cur_version = hax_cur_version;
|
||||
qversion.min_version = hax_min_version;
|
||||
hax_notify_qemu_version(hax->vm->fd, &qversion);
|
||||
cpu_interrupt_handler = hax_handle_interrupt;
|
||||
|
||||
return ret;
|
||||
error:
|
||||
|
@ -262,14 +262,6 @@ static void update_apic_tpr(CPUState *cpu)
|
||||
|
||||
#define VECTORING_INFO_VECTOR_MASK 0xff
|
||||
|
||||
static void hvf_handle_interrupt(CPUState * cpu, int mask)
|
||||
{
|
||||
cpu->interrupt_request |= mask;
|
||||
if (!qemu_cpu_is_self(cpu)) {
|
||||
qemu_cpu_kick(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
void hvf_handle_io(CPUArchState *env, uint16_t port, void *buffer,
|
||||
int direction, int size, int count)
|
||||
{
|
||||
@ -894,7 +886,6 @@ static int hvf_accel_init(MachineState *ms)
|
||||
}
|
||||
|
||||
hvf_state = s;
|
||||
cpu_interrupt_handler = hvf_handle_interrupt;
|
||||
memory_listener_register(&hvf_memory_listener, &address_space_memory);
|
||||
cpus_register_accel(&hvf_cpus);
|
||||
return 0;
|
||||
|
@ -1488,15 +1488,6 @@ static void whpx_memory_init(void)
|
||||
memory_listener_register(&whpx_memory_listener, &address_space_memory);
|
||||
}
|
||||
|
||||
static void whpx_handle_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
cpu->interrupt_request |= mask;
|
||||
|
||||
if (!qemu_cpu_is_self(cpu)) {
|
||||
qemu_cpu_kick(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Load the functions from the given library, using the given handle. If a
|
||||
* handle is provided, it is used, otherwise the library is opened. The
|
||||
@ -1651,7 +1642,6 @@ static int whpx_accel_init(MachineState *ms)
|
||||
|
||||
whpx_memory_init();
|
||||
|
||||
cpu_interrupt_handler = whpx_handle_interrupt;
|
||||
cpus_register_accel(&whpx_cpus);
|
||||
|
||||
printf("Windows Hypervisor Platform accelerator is operational\n");
|
||||
|
Loading…
Reference in New Issue
Block a user