qemu-e2k/target/i386/kvm/hyperv.c
Stefan Hajnoczi 195801d700 system/cpus: rename qemu_mutex_lock_iothread() to bql_lock()
The Big QEMU Lock (BQL) has many names and they are confusing. The
actual QemuMutex variable is called qemu_global_mutex but it's commonly
referred to as the BQL in discussions and some code comments. The
locking APIs, however, are called qemu_mutex_lock_iothread() and
qemu_mutex_unlock_iothread().

The "iothread" name is historic and comes from when the main thread was
split into into KVM vcpu threads and the "iothread" (now called the main
loop thread). I have contributed to the confusion myself by introducing
a separate --object iothread, a separate concept unrelated to the BQL.

The "iothread" name is no longer appropriate for the BQL. Rename the
locking APIs to:
- void bql_lock(void)
- void bql_unlock(void)
- bool bql_locked(void)

There are more APIs with "iothread" in their names. Subsequent patches
will rename them. There are also comments and documentation that will be
updated in later patches.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Paul Durrant <paul@xen.org>
Acked-by: Fabiano Rosas <farosas@suse.de>
Acked-by: David Woodhouse <dwmw@amazon.co.uk>
Reviewed-by: Cédric Le Goater <clg@kaod.org>
Acked-by: Peter Xu <peterx@redhat.com>
Acked-by: Eric Farman <farman@linux.ibm.com>
Reviewed-by: Harsh Prateek Bora <harshpb@linux.ibm.com>
Acked-by: Hyman Huang <yong.huang@smartx.com>
Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Message-id: 20240102153529.486531-2-stefanha@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2024-01-08 10:45:43 -05:00

152 lines
4.7 KiB
C

/*
* QEMU KVM Hyper-V support
*
* Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
*
* Authors:
* Andrey Smetanin <asmetanin@virtuozzo.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "hyperv.h"
#include "hw/hyperv/hyperv.h"
#include "hyperv-proto.h"
int hyperv_x86_synic_add(X86CPU *cpu)
{
hyperv_synic_add(CPU(cpu));
return 0;
}
/*
* All devices possibly using SynIC have to be reset before calling this to let
* them remove their SINT routes first.
*/
void hyperv_x86_synic_reset(X86CPU *cpu)
{
hyperv_synic_reset(CPU(cpu));
}
void hyperv_x86_synic_update(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
bool enable = env->msr_hv_synic_control & HV_SYNIC_ENABLE;
hwaddr msg_page_addr = (env->msr_hv_synic_msg_page & HV_SIMP_ENABLE) ?
(env->msr_hv_synic_msg_page & TARGET_PAGE_MASK) : 0;
hwaddr event_page_addr = (env->msr_hv_synic_evt_page & HV_SIEFP_ENABLE) ?
(env->msr_hv_synic_evt_page & TARGET_PAGE_MASK) : 0;
hyperv_synic_update(CPU(cpu), enable, msg_page_addr, event_page_addr);
}
static void async_synic_update(CPUState *cs, run_on_cpu_data data)
{
bql_lock();
hyperv_x86_synic_update(X86_CPU(cs));
bql_unlock();
}
int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit)
{
CPUX86State *env = &cpu->env;
switch (exit->type) {
case KVM_EXIT_HYPERV_SYNIC:
if (!hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
return -1;
}
switch (exit->u.synic.msr) {
case HV_X64_MSR_SCONTROL:
env->msr_hv_synic_control = exit->u.synic.control;
break;
case HV_X64_MSR_SIMP:
env->msr_hv_synic_msg_page = exit->u.synic.msg_page;
break;
case HV_X64_MSR_SIEFP:
env->msr_hv_synic_evt_page = exit->u.synic.evt_page;
break;
default:
return -1;
}
/*
* this will run in this cpu thread before it returns to KVM, but in a
* safe environment (i.e. when all cpus are quiescent) -- this is
* necessary because memory hierarchy is being changed
*/
async_safe_run_on_cpu(CPU(cpu), async_synic_update, RUN_ON_CPU_NULL);
return 0;
case KVM_EXIT_HYPERV_HCALL: {
uint16_t code = exit->u.hcall.input & 0xffff;
bool fast = exit->u.hcall.input & HV_HYPERCALL_FAST;
uint64_t in_param = exit->u.hcall.params[0];
uint64_t out_param = exit->u.hcall.params[1];
switch (code) {
case HV_POST_MESSAGE:
exit->u.hcall.result = hyperv_hcall_post_message(in_param, fast);
break;
case HV_SIGNAL_EVENT:
exit->u.hcall.result = hyperv_hcall_signal_event(in_param, fast);
break;
case HV_POST_DEBUG_DATA:
exit->u.hcall.result =
hyperv_hcall_post_dbg_data(in_param, out_param, fast);
break;
case HV_RETRIEVE_DEBUG_DATA:
exit->u.hcall.result =
hyperv_hcall_retreive_dbg_data(in_param, out_param, fast);
break;
case HV_RESET_DEBUG_SESSION:
exit->u.hcall.result =
hyperv_hcall_reset_dbg_session(out_param);
break;
default:
exit->u.hcall.result = HV_STATUS_INVALID_HYPERCALL_CODE;
}
return 0;
}
case KVM_EXIT_HYPERV_SYNDBG:
if (!hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG)) {
return -1;
}
switch (exit->u.syndbg.msr) {
case HV_X64_MSR_SYNDBG_CONTROL: {
uint64_t control = exit->u.syndbg.control;
env->msr_hv_syndbg_control = control;
env->msr_hv_syndbg_send_page = exit->u.syndbg.send_page;
env->msr_hv_syndbg_recv_page = exit->u.syndbg.recv_page;
exit->u.syndbg.status = HV_STATUS_SUCCESS;
if (control & HV_SYNDBG_CONTROL_SEND) {
exit->u.syndbg.status =
hyperv_syndbg_send(env->msr_hv_syndbg_send_page,
HV_SYNDBG_CONTROL_SEND_SIZE(control));
} else if (control & HV_SYNDBG_CONTROL_RECV) {
exit->u.syndbg.status =
hyperv_syndbg_recv(env->msr_hv_syndbg_recv_page,
TARGET_PAGE_SIZE);
}
break;
}
case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
env->msr_hv_syndbg_pending_page = exit->u.syndbg.pending_page;
hyperv_syndbg_set_pending_page(env->msr_hv_syndbg_pending_page);
break;
default:
return -1;
}
return 0;
default:
return -1;
}
}