system/cpus: rename qemu_mutex_lock_iothread() to bql_lock()

The Big QEMU Lock (BQL) has many names and they are confusing. The
actual QemuMutex variable is called qemu_global_mutex but it's commonly
referred to as the BQL in discussions and some code comments. The
locking APIs, however, are called qemu_mutex_lock_iothread() and
qemu_mutex_unlock_iothread().

The "iothread" name is historic and comes from when the main thread was
split into into KVM vcpu threads and the "iothread" (now called the main
loop thread). I have contributed to the confusion myself by introducing
a separate --object iothread, a separate concept unrelated to the BQL.

The "iothread" name is no longer appropriate for the BQL. Rename the
locking APIs to:
- void bql_lock(void)
- void bql_unlock(void)
- bool bql_locked(void)

There are more APIs with "iothread" in their names. Subsequent patches
will rename them. There are also comments and documentation that will be
updated in later patches.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Paul Durrant <paul@xen.org>
Acked-by: Fabiano Rosas <farosas@suse.de>
Acked-by: David Woodhouse <dwmw@amazon.co.uk>
Reviewed-by: Cédric Le Goater <clg@kaod.org>
Acked-by: Peter Xu <peterx@redhat.com>
Acked-by: Eric Farman <farman@linux.ibm.com>
Reviewed-by: Harsh Prateek Bora <harshpb@linux.ibm.com>
Acked-by: Hyman Huang <yong.huang@smartx.com>
Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Message-id: 20240102153529.486531-2-stefanha@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2024-01-02 10:35:25 -05:00
parent 897a06c6d7
commit 195801d700
95 changed files with 529 additions and 529 deletions

View File

@ -41,7 +41,7 @@ void accel_blocker_init(void)
void accel_ioctl_begin(void)
{
if (likely(qemu_mutex_iothread_locked())) {
if (likely(bql_locked())) {
return;
}
@ -51,7 +51,7 @@ void accel_ioctl_begin(void)
void accel_ioctl_end(void)
{
if (likely(qemu_mutex_iothread_locked())) {
if (likely(bql_locked())) {
return;
}
@ -62,7 +62,7 @@ void accel_ioctl_end(void)
void accel_cpu_ioctl_begin(CPUState *cpu)
{
if (unlikely(qemu_mutex_iothread_locked())) {
if (unlikely(bql_locked())) {
return;
}
@ -72,7 +72,7 @@ void accel_cpu_ioctl_begin(CPUState *cpu)
void accel_cpu_ioctl_end(CPUState *cpu)
{
if (unlikely(qemu_mutex_iothread_locked())) {
if (unlikely(bql_locked())) {
return;
}
@ -105,7 +105,7 @@ void accel_ioctl_inhibit_begin(void)
* We allow to inhibit only when holding the BQL, so we can identify
* when an inhibitor wants to issue an ioctl easily.
*/
g_assert(qemu_mutex_iothread_locked());
g_assert(bql_locked());
/* Block further invocations of the ioctls outside the BQL. */
CPU_FOREACH(cpu) {

View File

@ -24,7 +24,7 @@ static void *dummy_cpu_thread_fn(void *arg)
rcu_register_thread();
qemu_mutex_lock_iothread();
bql_lock();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
cpu->neg.can_do_io = true;
@ -43,7 +43,7 @@ static void *dummy_cpu_thread_fn(void *arg)
qemu_guest_random_seed_thread_part2(cpu->random_seed);
do {
qemu_mutex_unlock_iothread();
bql_unlock();
#ifndef _WIN32
do {
int sig;
@ -56,11 +56,11 @@ static void *dummy_cpu_thread_fn(void *arg)
#else
qemu_sem_wait(&cpu->sem);
#endif
qemu_mutex_lock_iothread();
bql_lock();
qemu_wait_io_event(cpu);
} while (!cpu->unplug);
qemu_mutex_unlock_iothread();
bql_unlock();
rcu_unregister_thread();
return NULL;
}

View File

@ -424,7 +424,7 @@ static void *hvf_cpu_thread_fn(void *arg)
rcu_register_thread();
qemu_mutex_lock_iothread();
bql_lock();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
@ -449,7 +449,7 @@ static void *hvf_cpu_thread_fn(void *arg)
hvf_vcpu_destroy(cpu);
cpu_thread_signal_destroyed(cpu);
qemu_mutex_unlock_iothread();
bql_unlock();
rcu_unregister_thread();
return NULL;
}

View File

@ -33,7 +33,7 @@ static void *kvm_vcpu_thread_fn(void *arg)
rcu_register_thread();
qemu_mutex_lock_iothread();
bql_lock();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
cpu->neg.can_do_io = true;
@ -58,7 +58,7 @@ static void *kvm_vcpu_thread_fn(void *arg)
kvm_destroy_vcpu(cpu);
cpu_thread_signal_destroyed(cpu);
qemu_mutex_unlock_iothread();
bql_unlock();
rcu_unregister_thread();
return NULL;
}

View File

@ -806,7 +806,7 @@ static void kvm_dirty_ring_flush(void)
* should always be with BQL held, serialization is guaranteed.
* However, let's be sure of it.
*/
assert(qemu_mutex_iothread_locked());
assert(bql_locked());
/*
* First make sure to flush the hardware buffers by kicking all
* vcpus out in a synchronous way.
@ -1391,9 +1391,9 @@ static void *kvm_dirty_ring_reaper_thread(void *data)
trace_kvm_dirty_ring_reaper("wakeup");
r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING;
qemu_mutex_lock_iothread();
bql_lock();
kvm_dirty_ring_reap(s, NULL);
qemu_mutex_unlock_iothread();
bql_unlock();
r->reaper_iteration++;
}
@ -2817,7 +2817,7 @@ int kvm_cpu_exec(CPUState *cpu)
return EXCP_HLT;
}
qemu_mutex_unlock_iothread();
bql_unlock();
cpu_exec_start(cpu);
do {
@ -2857,11 +2857,11 @@ int kvm_cpu_exec(CPUState *cpu)
#ifdef KVM_HAVE_MCE_INJECTION
if (unlikely(have_sigbus_pending)) {
qemu_mutex_lock_iothread();
bql_lock();
kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
pending_sigbus_addr);
have_sigbus_pending = false;
qemu_mutex_unlock_iothread();
bql_unlock();
}
#endif
@ -2927,7 +2927,7 @@ int kvm_cpu_exec(CPUState *cpu)
* still full. Got kicked by KVM_RESET_DIRTY_RINGS.
*/
trace_kvm_dirty_ring_full(cpu->cpu_index);
qemu_mutex_lock_iothread();
bql_lock();
/*
* We throttle vCPU by making it sleep once it exit from kernel
* due to dirty ring full. In the dirtylimit scenario, reaping
@ -2939,7 +2939,7 @@ int kvm_cpu_exec(CPUState *cpu)
} else {
kvm_dirty_ring_reap(kvm_state, NULL);
}
qemu_mutex_unlock_iothread();
bql_unlock();
dirtylimit_vcpu_execute(cpu);
ret = 0;
break;
@ -2956,9 +2956,9 @@ int kvm_cpu_exec(CPUState *cpu)
break;
case KVM_SYSTEM_EVENT_CRASH:
kvm_cpu_synchronize_state(cpu);
qemu_mutex_lock_iothread();
bql_lock();
qemu_system_guest_panicked(cpu_get_crash_info(cpu));
qemu_mutex_unlock_iothread();
bql_unlock();
ret = 0;
break;
default:
@ -2973,7 +2973,7 @@ int kvm_cpu_exec(CPUState *cpu)
} while (ret == 0);
cpu_exec_end(cpu);
qemu_mutex_lock_iothread();
bql_lock();
if (ret < 0) {
cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);

View File

@ -558,8 +558,8 @@ static void cpu_exec_longjmp_cleanup(CPUState *cpu)
tcg_ctx->gen_tb = NULL;
}
#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
if (bql_locked()) {
bql_unlock();
}
assert_no_pages_locked();
}
@ -680,10 +680,10 @@ static inline bool cpu_handle_halt(CPUState *cpu)
#if defined(TARGET_I386)
if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
X86CPU *x86_cpu = X86_CPU(cpu);
qemu_mutex_lock_iothread();
bql_lock();
apic_poll_irq(x86_cpu->apic_state);
cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
qemu_mutex_unlock_iothread();
bql_unlock();
}
#endif /* TARGET_I386 */
if (!cpu_has_work(cpu)) {
@ -749,9 +749,9 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
#else
if (replay_exception()) {
CPUClass *cc = CPU_GET_CLASS(cpu);
qemu_mutex_lock_iothread();
bql_lock();
cc->tcg_ops->do_interrupt(cpu);
qemu_mutex_unlock_iothread();
bql_unlock();
cpu->exception_index = -1;
if (unlikely(cpu->singlestep_enabled)) {
@ -812,7 +812,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
if (unlikely(qatomic_read(&cpu->interrupt_request))) {
int interrupt_request;
qemu_mutex_lock_iothread();
bql_lock();
interrupt_request = cpu->interrupt_request;
if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
/* Mask out external interrupts for this step. */
@ -821,7 +821,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
cpu->exception_index = EXCP_DEBUG;
qemu_mutex_unlock_iothread();
bql_unlock();
return true;
}
#if !defined(CONFIG_USER_ONLY)
@ -832,7 +832,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
cpu->halted = 1;
cpu->exception_index = EXCP_HLT;
qemu_mutex_unlock_iothread();
bql_unlock();
return true;
}
#if defined(TARGET_I386)
@ -843,14 +843,14 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
do_cpu_init(x86_cpu);
cpu->exception_index = EXCP_HALTED;
qemu_mutex_unlock_iothread();
bql_unlock();
return true;
}
#else
else if (interrupt_request & CPU_INTERRUPT_RESET) {
replay_interrupt();
cpu_reset(cpu);
qemu_mutex_unlock_iothread();
bql_unlock();
return true;
}
#endif /* !TARGET_I386 */
@ -873,7 +873,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
*/
if (unlikely(cpu->singlestep_enabled)) {
cpu->exception_index = EXCP_DEBUG;
qemu_mutex_unlock_iothread();
bql_unlock();
return true;
}
cpu->exception_index = -1;
@ -892,7 +892,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
}
/* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
qemu_mutex_unlock_iothread();
bql_unlock();
}
/* Finally, check if we need to exit to the main loop. */

View File

@ -2030,10 +2030,10 @@ static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;
qemu_mutex_lock_iothread();
bql_lock();
ret = int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx,
type, ra, mr, mr_offset);
qemu_mutex_unlock_iothread();
bql_unlock();
return ret;
}
@ -2054,12 +2054,12 @@ static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;
qemu_mutex_lock_iothread();
bql_lock();
a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx,
MMU_DATA_LOAD, ra, mr, mr_offset);
b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx,
MMU_DATA_LOAD, ra, mr, mr_offset + size - 8);
qemu_mutex_unlock_iothread();
bql_unlock();
return int128_make128(b, a);
}
@ -2577,10 +2577,10 @@ static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;
qemu_mutex_lock_iothread();
bql_lock();
ret = int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx,
ra, mr, mr_offset);
qemu_mutex_unlock_iothread();
bql_unlock();
return ret;
}
@ -2601,12 +2601,12 @@ static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;
qemu_mutex_lock_iothread();
bql_lock();
int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8,
mmu_idx, ra, mr, mr_offset);
ret = int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8,
size - 8, mmu_idx, ra, mr, mr_offset + 8);
qemu_mutex_unlock_iothread();
bql_unlock();
return ret;
}

View File

@ -126,9 +126,9 @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
* We're called without the iothread lock, so must take it while
* we're calling timer handlers.
*/
qemu_mutex_lock_iothread();
bql_lock();
icount_notify_aio_contexts();
qemu_mutex_unlock_iothread();
bql_unlock();
}
}

View File

@ -76,7 +76,7 @@ static void *mttcg_cpu_thread_fn(void *arg)
rcu_add_force_rcu_notifier(&force_rcu.notifier);
tcg_register_thread();
qemu_mutex_lock_iothread();
bql_lock();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
@ -91,9 +91,9 @@ static void *mttcg_cpu_thread_fn(void *arg)
do {
if (cpu_can_run(cpu)) {
int r;
qemu_mutex_unlock_iothread();
bql_unlock();
r = tcg_cpus_exec(cpu);
qemu_mutex_lock_iothread();
bql_lock();
switch (r) {
case EXCP_DEBUG:
cpu_handle_guest_debug(cpu);
@ -105,9 +105,9 @@ static void *mttcg_cpu_thread_fn(void *arg)
*/
break;
case EXCP_ATOMIC:
qemu_mutex_unlock_iothread();
bql_unlock();
cpu_exec_step_atomic(cpu);
qemu_mutex_lock_iothread();
bql_lock();
default:
/* Ignore everything else? */
break;
@ -119,7 +119,7 @@ static void *mttcg_cpu_thread_fn(void *arg)
} while (!cpu->unplug || cpu_can_run(cpu));
tcg_cpus_destroy(cpu);
qemu_mutex_unlock_iothread();
bql_unlock();
rcu_remove_force_rcu_notifier(&force_rcu.notifier);
rcu_unregister_thread();
return NULL;

View File

@ -188,7 +188,7 @@ static void *rr_cpu_thread_fn(void *arg)
rcu_add_force_rcu_notifier(&force_rcu);
tcg_register_thread();
qemu_mutex_lock_iothread();
bql_lock();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
@ -218,9 +218,9 @@ static void *rr_cpu_thread_fn(void *arg)
/* Only used for icount_enabled() */
int64_t cpu_budget = 0;
qemu_mutex_unlock_iothread();
bql_unlock();
replay_mutex_lock();
qemu_mutex_lock_iothread();
bql_lock();
if (icount_enabled()) {
int cpu_count = rr_cpu_count();
@ -254,7 +254,7 @@ static void *rr_cpu_thread_fn(void *arg)
if (cpu_can_run(cpu)) {
int r;
qemu_mutex_unlock_iothread();
bql_unlock();
if (icount_enabled()) {
icount_prepare_for_run(cpu, cpu_budget);
}
@ -262,15 +262,15 @@ static void *rr_cpu_thread_fn(void *arg)
if (icount_enabled()) {
icount_process_data(cpu);
}
qemu_mutex_lock_iothread();
bql_lock();
if (r == EXCP_DEBUG) {
cpu_handle_guest_debug(cpu);
break;
} else if (r == EXCP_ATOMIC) {
qemu_mutex_unlock_iothread();
bql_unlock();
cpu_exec_step_atomic(cpu);
qemu_mutex_lock_iothread();
bql_lock();
break;
}
} else if (cpu->stop) {

View File

@ -88,7 +88,7 @@ static void tcg_cpu_reset_hold(CPUState *cpu)
/* mask must never be zero, except for A20 change call */
void tcg_handle_interrupt(CPUState *cpu, int mask)
{
g_assert(qemu_mutex_iothread_locked());
g_assert(bql_locked());
cpu->interrupt_request |= mask;

View File

@ -649,7 +649,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
void cpu_interrupt(CPUState *cpu, int mask)
{
g_assert(qemu_mutex_iothread_locked());
g_assert(bql_locked());
cpu->interrupt_request |= mask;
qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
}

View File

@ -547,7 +547,7 @@ static OSStatus handle_voice_change(
{
coreaudioVoiceOut *core = in_client_data;
qemu_mutex_lock_iothread();
bql_lock();
if (core->outputDeviceID) {
fini_out_device(core);
@ -557,7 +557,7 @@ static OSStatus handle_voice_change(
update_device_playback_state(core);
}
qemu_mutex_unlock_iothread();
bql_unlock();
return 0;
}

View File

@ -351,11 +351,11 @@ void process_queued_cpu_work(CPUState *cpu)
* BQL, so it goes to sleep; start_exclusive() is sleeping too, so
* neither CPU can proceed.
*/
qemu_mutex_unlock_iothread();
bql_unlock();
start_exclusive();
wi->func(cpu, wi->data);
end_exclusive();
qemu_mutex_lock_iothread();
bql_lock();
} else {
wi->func(cpu, wi->data);
}

View File

@ -108,11 +108,11 @@ static int dump_cleanup(DumpState *s)
s->guest_note = NULL;
if (s->resume) {
if (s->detached) {
qemu_mutex_lock_iothread();
bql_lock();
}
vm_start();
if (s->detached) {
qemu_mutex_unlock_iothread();
bql_unlock();
}
}
migrate_del_blocker(&dump_migration_blocker);

View File

@ -70,14 +70,14 @@ CPUState *cpu_create(const char *typename)
* BQL here if we need to. cpu_interrupt assumes it is held.*/
void cpu_reset_interrupt(CPUState *cpu, int mask)
{
bool need_lock = !qemu_mutex_iothread_locked();
bool need_lock = !bql_locked();
if (need_lock) {
qemu_mutex_lock_iothread();
bql_lock();
}
cpu->interrupt_request &= ~mask;
if (need_lock) {
qemu_mutex_unlock_iothread();
bql_unlock();
}
}

View File

@ -1665,7 +1665,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as)
{
bool use_iommu, pt;
/* Whether we need to take the BQL on our own */
bool take_bql = !qemu_mutex_iothread_locked();
bool take_bql = !bql_locked();
assert(as);
@ -1683,7 +1683,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as)
* it. We'd better make sure we have had it already, or, take it.
*/
if (take_bql) {
qemu_mutex_lock_iothread();
bql_lock();
}
/* Turn off first then on the other */
@ -1738,7 +1738,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as)
}
if (take_bql) {
qemu_mutex_unlock_iothread();
bql_unlock();
}
return use_iommu;

View File

@ -425,7 +425,7 @@ void xen_evtchn_set_callback_level(int level)
* effect immediately. That just leaves interdomain loopback as the case
* which uses the BH.
*/
if (!qemu_mutex_iothread_locked()) {
if (!bql_locked()) {
qemu_bh_schedule(s->gsi_bh);
return;
}
@ -459,7 +459,7 @@ int xen_evtchn_set_callback_param(uint64_t param)
* We need the BQL because set_callback_pci_intx() may call into PCI code,
* and because we may need to manipulate the old and new GSI levels.
*/
assert(qemu_mutex_iothread_locked());
assert(bql_locked());
qemu_mutex_lock(&s->port_lock);
switch (type) {
@ -1037,7 +1037,7 @@ static int close_port(XenEvtchnState *s, evtchn_port_t port,
XenEvtchnPort *p = &s->port_table[port];
/* Because it *might* be a PIRQ port */
assert(qemu_mutex_iothread_locked());
assert(bql_locked());
switch (p->type) {
case EVTCHNSTAT_closed:
@ -1104,7 +1104,7 @@ int xen_evtchn_soft_reset(void)
return -ENOTSUP;
}
assert(qemu_mutex_iothread_locked());
assert(bql_locked());
qemu_mutex_lock(&s->port_lock);
@ -1601,7 +1601,7 @@ bool xen_evtchn_set_gsi(int gsi, int level)
XenEvtchnState *s = xen_evtchn_singleton;
int pirq;
assert(qemu_mutex_iothread_locked());
assert(bql_locked());
if (!s || gsi < 0 || gsi >= IOAPIC_NUM_PINS) {
return false;
@ -1712,7 +1712,7 @@ void xen_evtchn_snoop_msi(PCIDevice *dev, bool is_msix, unsigned int vector,
return;
}
assert(qemu_mutex_iothread_locked());
assert(bql_locked());
pirq = msi_pirq_target(addr, data);
@ -1749,7 +1749,7 @@ int xen_evtchn_translate_pirq_msi(struct kvm_irq_routing_entry *route,
return 1; /* Not a PIRQ */
}
assert(qemu_mutex_iothread_locked());
assert(bql_locked());
pirq = msi_pirq_target(address, data);
if (!pirq || pirq >= s->nr_pirqs) {
@ -1796,7 +1796,7 @@ bool xen_evtchn_deliver_pirq_msi(uint64_t address, uint32_t data)
return false;
}
assert(qemu_mutex_iothread_locked());
assert(bql_locked());
pirq = msi_pirq_target(address, data);
if (!pirq || pirq >= s->nr_pirqs) {

View File

@ -194,7 +194,7 @@ int xen_overlay_map_shinfo_page(uint64_t gpa)
return -ENOENT;
}
assert(qemu_mutex_iothread_locked());
assert(bql_locked());
if (s->shinfo_gpa) {
/* If removing shinfo page, turn the kernel magic off first */

View File

@ -1341,7 +1341,7 @@ static void fire_watch_cb(void *opaque, const char *path, const char *token)
{
XenXenstoreState *s = opaque;
assert(qemu_mutex_iothread_locked());
assert(bql_locked());
/*
* If there's a response pending, we obviously can't scribble over

View File

@ -934,7 +934,7 @@ void gicv3_cpuif_update(GICv3CPUState *cs)
ARMCPU *cpu = ARM_CPU(cs->cpu);
CPUARMState *env = &cpu->env;
g_assert(qemu_mutex_iothread_locked());
g_assert(bql_locked());
trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq,
cs->hppi.grp, cs->hppi.prio);

View File

@ -106,7 +106,7 @@ static int qemu_s390_clear_io_flic(S390FLICState *fs, uint16_t subchannel_id,
QEMUS390FlicIO *cur, *next;
uint8_t isc;
g_assert(qemu_mutex_iothread_locked());
g_assert(bql_locked());
if (!(flic->pending & FLIC_PENDING_IO)) {
return 0;
}
@ -223,7 +223,7 @@ uint32_t qemu_s390_flic_dequeue_service(QEMUS390FLICState *flic)
{
uint32_t tmp;
g_assert(qemu_mutex_iothread_locked());
g_assert(bql_locked());
g_assert(flic->pending & FLIC_PENDING_SERVICE);
tmp = flic->service_param;
flic->service_param = 0;
@ -238,7 +238,7 @@ QEMUS390FlicIO *qemu_s390_flic_dequeue_io(QEMUS390FLICState *flic, uint64_t cr6)
QEMUS390FlicIO *io;
uint8_t isc;
g_assert(qemu_mutex_iothread_locked());
g_assert(bql_locked());
if (!(flic->pending & CR6_TO_PENDING_IO(cr6))) {
return NULL;
}
@ -262,7 +262,7 @@ QEMUS390FlicIO *qemu_s390_flic_dequeue_io(QEMUS390FLICState *flic, uint64_t cr6)
void qemu_s390_flic_dequeue_crw_mchk(QEMUS390FLICState *flic)
{
g_assert(qemu_mutex_iothread_locked());
g_assert(bql_locked());
g_assert(flic->pending & FLIC_PENDING_MCHK_CR);
flic->pending &= ~FLIC_PENDING_MCHK_CR;
}
@ -271,7 +271,7 @@ static void qemu_s390_inject_service(S390FLICState *fs, uint32_t parm)
{
QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
g_assert(qemu_mutex_iothread_locked());
g_assert(bql_locked());
/* multiplexing is good enough for sclp - kvm does it internally as well */
flic->service_param |= parm;
flic->pending |= FLIC_PENDING_SERVICE;
@ -287,7 +287,7 @@ static void qemu_s390_inject_io(S390FLICState *fs, uint16_t subchannel_id,
QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
QEMUS390FlicIO *io;
g_assert(qemu_mutex_iothread_locked());
g_assert(bql_locked());
io = g_new0(QEMUS390FlicIO, 1);
io->id = subchannel_id;
io->nr = subchannel_nr;
@ -304,7 +304,7 @@ static void qemu_s390_inject_crw_mchk(S390FLICState *fs)
{
QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
g_assert(qemu_mutex_iothread_locked());
g_assert(bql_locked());
flic->pending |= FLIC_PENDING_MCHK_CR;
qemu_s390_flic_notify(FLIC_PENDING_MCHK_CR);
@ -330,7 +330,7 @@ bool qemu_s390_flic_has_crw_mchk(QEMUS390FLICState *flic)
bool qemu_s390_flic_has_any(QEMUS390FLICState *flic)
{
g_assert(qemu_mutex_iothread_locked());
g_assert(bql_locked());
return !!flic->pending;
}
@ -340,7 +340,7 @@ static void qemu_s390_flic_reset(DeviceState *dev)
QEMUS390FlicIO *cur, *next;
int isc;
g_assert(qemu_mutex_iothread_locked());
g_assert(bql_locked());
flic->simm = 0;
flic->nimm = 0;
flic->pending = 0;

View File

@ -355,9 +355,9 @@ static void *edu_fact_thread(void *opaque)
smp_mb__after_rmw();
if (qatomic_read(&edu->status) & EDU_STATUS_IRQFACT) {
qemu_mutex_lock_iothread();
bql_lock();
edu_raise_irq(edu, FACT_IRQ);
qemu_mutex_unlock_iothread();
bql_unlock();
}
}

View File

@ -131,7 +131,7 @@ static void imx6_clear_reset_bit(CPUState *cpu, run_on_cpu_data data)
struct SRCSCRResetInfo *ri = data.host_ptr;
IMX6SRCState *s = ri->s;
assert(qemu_mutex_iothread_locked());
assert(bql_locked());
s->regs[SRC_SCR] = deposit32(s->regs[SRC_SCR], ri->reset_bit, 1, 0);
DPRINTF("reg[%s] <= 0x%" PRIx32 "\n",

View File

@ -136,7 +136,7 @@ static void imx7_clear_reset_bit(CPUState *cpu, run_on_cpu_data data)
struct SRCSCRResetInfo *ri = data.host_ptr;
IMX7SRCState *s = ri->s;
assert(qemu_mutex_iothread_locked());
assert(bql_locked());
s->regs[SRC_A7RCR0] = deposit32(s->regs[SRC_A7RCR0], ri->reset_bit, 1, 0);

View File

@ -133,7 +133,7 @@ static bool net_tx_packets(struct XenNetDev *netdev)
void *page;
void *tmpbuf = NULL;
assert(qemu_mutex_iothread_locked());
assert(bql_locked());
for (;;) {
rc = netdev->tx_ring.req_cons;
@ -260,7 +260,7 @@ static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size
RING_IDX rc, rp;
void *page;
assert(qemu_mutex_iothread_locked());
assert(bql_locked());
if (xen_device_backend_get_state(&netdev->xendev) != XenbusStateConnected) {
return -1;
@ -354,7 +354,7 @@ static bool xen_netdev_connect(XenDevice *xendev, Error **errp)
XenNetDev *netdev = XEN_NET_DEVICE(xendev);
unsigned int port, rx_copy;
assert(qemu_mutex_iothread_locked());
assert(bql_locked());
if (xen_device_frontend_scanf(xendev, "tx-ring-ref", "%u",
&netdev->tx_ring_ref) != 1) {
@ -425,7 +425,7 @@ static void xen_netdev_disconnect(XenDevice *xendev, Error **errp)
trace_xen_netdev_disconnect(netdev->dev);
assert(qemu_mutex_iothread_locked());
assert(bql_locked());
netdev->tx_ring.sring = NULL;
netdev->rx_ring.sring = NULL;

View File

@ -515,7 +515,7 @@ static void pegasos2_hypercall(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
CPUPPCState *env = &cpu->env;
/* The TCG path should also be holding the BQL at this point */
g_assert(qemu_mutex_iothread_locked());
g_assert(bql_locked());
if (FIELD_EX64(env->msr, MSR, PR)) {
qemu_log_mask(LOG_GUEST_ERROR, "Hypercall made with MSR[PR]=1\n");

View File

@ -314,7 +314,7 @@ void store_40x_dbcr0(CPUPPCState *env, uint32_t val)
{
PowerPCCPU *cpu = env_archcpu(env);
qemu_mutex_lock_iothread();
bql_lock();
switch ((val >> 28) & 0x3) {
case 0x0:
@ -334,7 +334,7 @@ void store_40x_dbcr0(CPUPPCState *env, uint32_t val)
break;
}
qemu_mutex_unlock_iothread();
bql_unlock();
}
/* PowerPC 40x internal IRQ controller */

View File

@ -1304,7 +1304,7 @@ static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp,
CPUPPCState *env = &cpu->env;
/* The TCG path should also be holding the BQL at this point */
g_assert(qemu_mutex_iothread_locked());
g_assert(bql_locked());
g_assert(!vhyp_cpu_in_nested(cpu));

View File

@ -82,9 +82,9 @@ static target_ulong h_random(PowerPCCPU *cpu, SpaprMachineState *spapr,
while (hrdata.received < 8) {
rng_backend_request_entropy(rngstate->backend, 8 - hrdata.received,
random_recv, &hrdata);
qemu_mutex_unlock_iothread();
bql_unlock();
qemu_sem_wait(&hrdata.sem);
qemu_mutex_lock_iothread();
bql_lock();
}
qemu_sem_destroy(&hrdata.sem);

View File

@ -334,7 +334,7 @@ static void *hpt_prepare_thread(void *opaque)
pending->ret = H_NO_MEM;
}
qemu_mutex_lock_iothread();
bql_lock();
if (SPAPR_MACHINE(qdev_get_machine())->pending_hpt == pending) {
/* Ready to go */
@ -344,7 +344,7 @@ static void *hpt_prepare_thread(void *opaque)
free_pending_hpt(pending);
}
qemu_mutex_unlock_iothread();
bql_unlock();
return NULL;
}

View File

@ -33,7 +33,7 @@
*/
bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp)
{
bool iolock = qemu_mutex_iothread_locked();
bool drop_bql = bql_locked();
bool iothread = qemu_in_iothread();
struct iovec send[2] = {};
int *fds = NULL;
@ -63,8 +63,8 @@ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp)
* for IOThread case.
* Also skip lock handling while in a co-routine in the main context.
*/
if (iolock && !iothread && !qemu_in_coroutine()) {
qemu_mutex_unlock_iothread();
if (drop_bql && !iothread && !qemu_in_coroutine()) {
bql_unlock();
}
if (!qio_channel_writev_full_all(ioc, send, G_N_ELEMENTS(send),
@ -74,9 +74,9 @@ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp)
trace_mpqemu_send_io_error(msg->cmd, msg->size, nfds);
}
if (iolock && !iothread && !qemu_in_coroutine()) {
if (drop_bql && !iothread && !qemu_in_coroutine()) {
/* See above comment why skip locking here. */
qemu_mutex_lock_iothread();
bql_lock();
}
return ret;
@ -96,7 +96,7 @@ static ssize_t mpqemu_read(QIOChannel *ioc, void *buf, size_t len, int **fds,
size_t *nfds, Error **errp)
{
struct iovec iov = { .iov_base = buf, .iov_len = len };
bool iolock = qemu_mutex_iothread_locked();
bool drop_bql = bql_locked();
bool iothread = qemu_in_iothread();
int ret = -1;
@ -106,14 +106,14 @@ static ssize_t mpqemu_read(QIOChannel *ioc, void *buf, size_t len, int **fds,
*/
assert(qemu_in_coroutine() || !iothread);
if (iolock && !iothread && !qemu_in_coroutine()) {
qemu_mutex_unlock_iothread();
if (drop_bql && !iothread && !qemu_in_coroutine()) {
bql_unlock();
}
ret = qio_channel_readv_full_all_eof(ioc, &iov, 1, fds, nfds, errp);
if (iolock && !iothread && !qemu_in_coroutine()) {
qemu_mutex_lock_iothread();
if (drop_bql && !iothread && !qemu_in_coroutine()) {
bql_lock();
}
return (ret <= 0) ? ret : iov.iov_len;

View File

@ -400,7 +400,7 @@ static int vfu_object_mr_rw(MemoryRegion *mr, uint8_t *buf, hwaddr offset,
}
if (release_lock) {
qemu_mutex_unlock_iothread();
bql_unlock();
release_lock = false;
}

View File

@ -153,7 +153,7 @@ void qmp_dump_skeys(const char *filename, Error **errp)
goto out;
}
assert(qemu_mutex_iothread_locked());
assert(bql_locked());
guest_phys_blocks_init(&guest_phys_blocks);
guest_phys_blocks_append(&guest_phys_blocks);

View File

@ -143,7 +143,7 @@ static inline bool in_aio_context_home_thread(AioContext *ctx)
}
if (ctx == qemu_get_aio_context()) {
return qemu_mutex_iothread_locked();
return bql_locked();
} else {
return false;
}

View File

@ -248,19 +248,19 @@ GSource *iohandler_get_g_source(void);
AioContext *iohandler_get_aio_context(void);
/**
* qemu_mutex_iothread_locked: Return lock status of the main loop mutex.
* bql_locked: Return lock status of the Big QEMU Lock (BQL)
*
* The main loop mutex is the coarsest lock in QEMU, and as such it
* The Big QEMU Lock (BQL) is the coarsest lock in QEMU, and as such it
* must always be taken outside other locks. This function helps
* functions take different paths depending on whether the current
* thread is running within the main loop mutex.
* thread is running within the BQL.
*
* This function should never be used in the block layer, because
* unit tests, block layer tools and qemu-storage-daemon do not
* have a BQL.
* Please instead refer to qemu_in_main_thread().
*/
bool qemu_mutex_iothread_locked(void);
bool bql_locked(void);
/**
* qemu_in_main_thread: return whether it's possible to safely access
@ -312,58 +312,57 @@ bool qemu_in_main_thread(void);
} while (0)
/**
* qemu_mutex_lock_iothread: Lock the main loop mutex.
* bql_lock: Lock the Big QEMU Lock (BQL).
*
* This function locks the main loop mutex. The mutex is taken by
* This function locks the Big QEMU Lock (BQL). The lock is taken by
* main() in vl.c and always taken except while waiting on
* external events (such as with select). The mutex should be taken
* external events (such as with select). The lock should be taken
* by threads other than the main loop thread when calling
* qemu_bh_new(), qemu_set_fd_handler() and basically all other
* functions documented in this file.
*
* NOTE: tools currently are single-threaded and qemu_mutex_lock_iothread
* NOTE: tools currently are single-threaded and bql_lock
* is a no-op there.
*/
#define qemu_mutex_lock_iothread() \
qemu_mutex_lock_iothread_impl(__FILE__, __LINE__)
void qemu_mutex_lock_iothread_impl(const char *file, int line);
#define bql_lock() bql_lock_impl(__FILE__, __LINE__)
void bql_lock_impl(const char *file, int line);
/**
* qemu_mutex_unlock_iothread: Unlock the main loop mutex.
* bql_unlock: Unlock the Big QEMU Lock (BQL).
*
* This function unlocks the main loop mutex. The mutex is taken by
* This function unlocks the Big QEMU Lock. The lock is taken by
* main() in vl.c and always taken except while waiting on
* external events (such as with select). The mutex should be unlocked
* external events (such as with select). The lock should be unlocked
* as soon as possible by threads other than the main loop thread,
* because it prevents the main loop from processing callbacks,
* including timers and bottom halves.
*
* NOTE: tools currently are single-threaded and qemu_mutex_unlock_iothread
* NOTE: tools currently are single-threaded and bql_unlock
* is a no-op there.
*/
void qemu_mutex_unlock_iothread(void);