Pull request
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEhpWov9P5fNqsNXdanKSrs4Grc8gFAmWcJMUACgkQnKSrs4Gr c8hh/Qf/Wt177UlhBR49OWmmegs8c8yS1mhyawo7YIJM4pqoXCYLaACpcKECXcGU rlgyR4ow68EXnnU8+/s2cp2UqHxrla+E2eNqBoTDmkNt3Cko5sJn5G5PM5EYK+mO JjFRzn7awRyxD6mGOuaMVoj6OuHbAA/U4JF7FhW0YuRl8v0/mvAxRSfQ4U6Crq/y 19Aa1CXHD1GH2CUJsMCY8zT47Dr4DJcvZx5IpcDFaHaYDCkktFwNzdo5IDnCx2M2 xnP37Qp/Q93cu12lWkVOu8HCT6yhoszahyOqlBxDmo7QeGkskrxGbMyE+vHM3fFI aGSxiw193U7/QWu+Cq2/727C3YIq1g== =pKUb -----END PGP SIGNATURE----- Merge tag 'block-pull-request' of https://gitlab.com/stefanha/qemu into staging Pull request # -----BEGIN PGP SIGNATURE----- # # iQEzBAABCAAdFiEEhpWov9P5fNqsNXdanKSrs4Grc8gFAmWcJMUACgkQnKSrs4Gr # c8hh/Qf/Wt177UlhBR49OWmmegs8c8yS1mhyawo7YIJM4pqoXCYLaACpcKECXcGU # rlgyR4ow68EXnnU8+/s2cp2UqHxrla+E2eNqBoTDmkNt3Cko5sJn5G5PM5EYK+mO # JjFRzn7awRyxD6mGOuaMVoj6OuHbAA/U4JF7FhW0YuRl8v0/mvAxRSfQ4U6Crq/y # 19Aa1CXHD1GH2CUJsMCY8zT47Dr4DJcvZx5IpcDFaHaYDCkktFwNzdo5IDnCx2M2 # xnP37Qp/Q93cu12lWkVOu8HCT6yhoszahyOqlBxDmo7QeGkskrxGbMyE+vHM3fFI # aGSxiw193U7/QWu+Cq2/727C3YIq1g== # =pKUb # -----END PGP SIGNATURE----- # gpg: Signature made Mon 08 Jan 2024 16:37:25 GMT # gpg: using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [full] # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" [full] # Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35 775A 9CA4 ABB3 81AB 73C8 * tag 'block-pull-request' of https://gitlab.com/stefanha/qemu: Rename "QEMU global mutex" to "BQL" in comments and docs Replace "iothread lock" with "BQL" in comments qemu/main-loop: rename qemu_cond_wait_iothread() to qemu_cond_wait_bql() qemu/main-loop: rename QEMU_IOTHREAD_LOCK_GUARD to BQL_LOCK_GUARD system/cpus: rename qemu_mutex_lock_iothread() to bql_lock() iothread: Remove unused Error** argument in aio_context_set_aio_params Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
9468484fe9
@ -41,7 +41,7 @@ void accel_blocker_init(void)
|
||||
|
||||
void accel_ioctl_begin(void)
|
||||
{
|
||||
if (likely(qemu_mutex_iothread_locked())) {
|
||||
if (likely(bql_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -51,7 +51,7 @@ void accel_ioctl_begin(void)
|
||||
|
||||
void accel_ioctl_end(void)
|
||||
{
|
||||
if (likely(qemu_mutex_iothread_locked())) {
|
||||
if (likely(bql_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -62,7 +62,7 @@ void accel_ioctl_end(void)
|
||||
|
||||
void accel_cpu_ioctl_begin(CPUState *cpu)
|
||||
{
|
||||
if (unlikely(qemu_mutex_iothread_locked())) {
|
||||
if (unlikely(bql_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -72,7 +72,7 @@ void accel_cpu_ioctl_begin(CPUState *cpu)
|
||||
|
||||
void accel_cpu_ioctl_end(CPUState *cpu)
|
||||
{
|
||||
if (unlikely(qemu_mutex_iothread_locked())) {
|
||||
if (unlikely(bql_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -105,7 +105,7 @@ void accel_ioctl_inhibit_begin(void)
|
||||
* We allow to inhibit only when holding the BQL, so we can identify
|
||||
* when an inhibitor wants to issue an ioctl easily.
|
||||
*/
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
|
||||
/* Block further invocations of the ioctls outside the BQL. */
|
||||
CPU_FOREACH(cpu) {
|
||||
|
@ -24,7 +24,7 @@ static void *dummy_cpu_thread_fn(void *arg)
|
||||
|
||||
rcu_register_thread();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->neg.can_do_io = true;
|
||||
@ -43,7 +43,7 @@ static void *dummy_cpu_thread_fn(void *arg)
|
||||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
|
||||
do {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
#ifndef _WIN32
|
||||
do {
|
||||
int sig;
|
||||
@ -56,11 +56,11 @@ static void *dummy_cpu_thread_fn(void *arg)
|
||||
#else
|
||||
qemu_sem_wait(&cpu->sem);
|
||||
#endif
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_wait_io_event(cpu);
|
||||
} while (!cpu->unplug);
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
@ -424,7 +424,7 @@ static void *hvf_cpu_thread_fn(void *arg)
|
||||
|
||||
rcu_register_thread();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
@ -449,7 +449,7 @@ static void *hvf_cpu_thread_fn(void *arg)
|
||||
|
||||
hvf_vcpu_destroy(cpu);
|
||||
cpu_thread_signal_destroyed(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ static void *kvm_vcpu_thread_fn(void *arg)
|
||||
|
||||
rcu_register_thread();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->neg.can_do_io = true;
|
||||
@ -58,7 +58,7 @@ static void *kvm_vcpu_thread_fn(void *arg)
|
||||
|
||||
kvm_destroy_vcpu(cpu);
|
||||
cpu_thread_signal_destroyed(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
@ -806,7 +806,7 @@ static void kvm_dirty_ring_flush(void)
|
||||
* should always be with BQL held, serialization is guaranteed.
|
||||
* However, let's be sure of it.
|
||||
*/
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
/*
|
||||
* First make sure to flush the hardware buffers by kicking all
|
||||
* vcpus out in a synchronous way.
|
||||
@ -1391,9 +1391,9 @@ static void *kvm_dirty_ring_reaper_thread(void *data)
|
||||
trace_kvm_dirty_ring_reaper("wakeup");
|
||||
r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
kvm_dirty_ring_reap(s, NULL);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
r->reaper_iteration++;
|
||||
}
|
||||
@ -2817,7 +2817,7 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
return EXCP_HLT;
|
||||
}
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
cpu_exec_start(cpu);
|
||||
|
||||
do {
|
||||
@ -2857,11 +2857,11 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
|
||||
#ifdef KVM_HAVE_MCE_INJECTION
|
||||
if (unlikely(have_sigbus_pending)) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
|
||||
pending_sigbus_addr);
|
||||
have_sigbus_pending = false;
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -2927,7 +2927,7 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
* still full. Got kicked by KVM_RESET_DIRTY_RINGS.
|
||||
*/
|
||||
trace_kvm_dirty_ring_full(cpu->cpu_index);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
/*
|
||||
* We throttle vCPU by making it sleep once it exit from kernel
|
||||
* due to dirty ring full. In the dirtylimit scenario, reaping
|
||||
@ -2939,7 +2939,7 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
} else {
|
||||
kvm_dirty_ring_reap(kvm_state, NULL);
|
||||
}
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
dirtylimit_vcpu_execute(cpu);
|
||||
ret = 0;
|
||||
break;
|
||||
@ -2956,9 +2956,9 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
break;
|
||||
case KVM_SYSTEM_EVENT_CRASH:
|
||||
kvm_cpu_synchronize_state(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_system_guest_panicked(cpu_get_crash_info(cpu));
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
ret = 0;
|
||||
break;
|
||||
default:
|
||||
@ -2973,7 +2973,7 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
} while (ret == 0);
|
||||
|
||||
cpu_exec_end(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
|
||||
if (ret < 0) {
|
||||
cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
|
||||
|
@ -558,8 +558,8 @@ static void cpu_exec_longjmp_cleanup(CPUState *cpu)
|
||||
tcg_ctx->gen_tb = NULL;
|
||||
}
|
||||
#endif
|
||||
if (qemu_mutex_iothread_locked()) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
if (bql_locked()) {
|
||||
bql_unlock();
|
||||
}
|
||||
assert_no_pages_locked();
|
||||
}
|
||||
@ -680,10 +680,10 @@ static inline bool cpu_handle_halt(CPUState *cpu)
|
||||
#if defined(TARGET_I386)
|
||||
if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
apic_poll_irq(x86_cpu->apic_state);
|
||||
cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
#endif /* TARGET_I386 */
|
||||
if (!cpu_has_work(cpu)) {
|
||||
@ -749,9 +749,9 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
|
||||
#else
|
||||
if (replay_exception()) {
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
cc->tcg_ops->do_interrupt(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
cpu->exception_index = -1;
|
||||
|
||||
if (unlikely(cpu->singlestep_enabled)) {
|
||||
@ -812,7 +812,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
|
||||
if (unlikely(qatomic_read(&cpu->interrupt_request))) {
|
||||
int interrupt_request;
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
interrupt_request = cpu->interrupt_request;
|
||||
if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
|
||||
/* Mask out external interrupts for this step. */
|
||||
@ -821,7 +821,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
|
||||
cpu->exception_index = EXCP_DEBUG;
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return true;
|
||||
}
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
@ -832,7 +832,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
|
||||
cpu->halted = 1;
|
||||
cpu->exception_index = EXCP_HLT;
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return true;
|
||||
}
|
||||
#if defined(TARGET_I386)
|
||||
@ -843,14 +843,14 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
|
||||
do_cpu_init(x86_cpu);
|
||||
cpu->exception_index = EXCP_HALTED;
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
else if (interrupt_request & CPU_INTERRUPT_RESET) {
|
||||
replay_interrupt();
|
||||
cpu_reset(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return true;
|
||||
}
|
||||
#endif /* !TARGET_I386 */
|
||||
@ -873,7 +873,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
*/
|
||||
if (unlikely(cpu->singlestep_enabled)) {
|
||||
cpu->exception_index = EXCP_DEBUG;
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return true;
|
||||
}
|
||||
cpu->exception_index = -1;
|
||||
@ -892,7 +892,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
}
|
||||
|
||||
/* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
/* Finally, check if we need to exit to the main loop. */
|
||||
|
@ -1975,7 +1975,7 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||
* @size: number of bytes
|
||||
* @mmu_idx: virtual address context
|
||||
* @ra: return address into tcg generated code, or 0
|
||||
* Context: iothread lock held
|
||||
* Context: BQL held
|
||||
*
|
||||
* Load @size bytes from @addr, which is memory-mapped i/o.
|
||||
* The bytes are concatenated in big-endian order with @ret_be.
|
||||
@ -2030,10 +2030,10 @@ static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
|
||||
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
|
||||
mr = section->mr;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
ret = int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx,
|
||||
type, ra, mr, mr_offset);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -2054,12 +2054,12 @@ static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
|
||||
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
|
||||
mr = section->mr;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx,
|
||||
MMU_DATA_LOAD, ra, mr, mr_offset);
|
||||
b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx,
|
||||
MMU_DATA_LOAD, ra, mr, mr_offset + size - 8);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
return int128_make128(b, a);
|
||||
}
|
||||
@ -2521,7 +2521,7 @@ static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
|
||||
* @size: number of bytes
|
||||
* @mmu_idx: virtual address context
|
||||
* @ra: return address into tcg generated code, or 0
|
||||
* Context: iothread lock held
|
||||
* Context: BQL held
|
||||
*
|
||||
* Store @size bytes at @addr, which is memory-mapped i/o.
|
||||
* The bytes to store are extracted in little-endian order from @val_le;
|
||||
@ -2577,10 +2577,10 @@ static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
|
||||
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
|
||||
mr = section->mr;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
ret = int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx,
|
||||
ra, mr, mr_offset);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -2601,12 +2601,12 @@ static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
|
||||
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
|
||||
mr = section->mr;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8,
|
||||
mmu_idx, ra, mr, mr_offset);
|
||||
ret = int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8,
|
||||
size - 8, mmu_idx, ra, mr, mr_offset + 8);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -123,12 +123,12 @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
|
||||
|
||||
if (cpu->icount_budget == 0) {
|
||||
/*
|
||||
* We're called without the iothread lock, so must take it while
|
||||
* We're called without the BQL, so must take it while
|
||||
* we're calling timer handlers.
|
||||
*/
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
icount_notify_aio_contexts();
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,7 @@ static void *mttcg_cpu_thread_fn(void *arg)
|
||||
rcu_add_force_rcu_notifier(&force_rcu.notifier);
|
||||
tcg_register_thread();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
@ -91,9 +91,9 @@ static void *mttcg_cpu_thread_fn(void *arg)
|
||||
do {
|
||||
if (cpu_can_run(cpu)) {
|
||||
int r;
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
r = tcg_cpus_exec(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
switch (r) {
|
||||
case EXCP_DEBUG:
|
||||
cpu_handle_guest_debug(cpu);
|
||||
@ -105,9 +105,9 @@ static void *mttcg_cpu_thread_fn(void *arg)
|
||||
*/
|
||||
break;
|
||||
case EXCP_ATOMIC:
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
cpu_exec_step_atomic(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
default:
|
||||
/* Ignore everything else? */
|
||||
break;
|
||||
@ -119,7 +119,7 @@ static void *mttcg_cpu_thread_fn(void *arg)
|
||||
} while (!cpu->unplug || cpu_can_run(cpu));
|
||||
|
||||
tcg_cpus_destroy(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
rcu_remove_force_rcu_notifier(&force_rcu.notifier);
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
|
@ -111,7 +111,7 @@ static void rr_wait_io_event(void)
|
||||
|
||||
while (all_cpu_threads_idle() && replay_can_wait()) {
|
||||
rr_stop_kick_timer();
|
||||
qemu_cond_wait_iothread(first_cpu->halt_cond);
|
||||
qemu_cond_wait_bql(first_cpu->halt_cond);
|
||||
}
|
||||
|
||||
rr_start_kick_timer();
|
||||
@ -188,7 +188,7 @@ static void *rr_cpu_thread_fn(void *arg)
|
||||
rcu_add_force_rcu_notifier(&force_rcu);
|
||||
tcg_register_thread();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
@ -198,7 +198,7 @@ static void *rr_cpu_thread_fn(void *arg)
|
||||
|
||||
/* wait for initial kick-off after machine start */
|
||||
while (first_cpu->stopped) {
|
||||
qemu_cond_wait_iothread(first_cpu->halt_cond);
|
||||
qemu_cond_wait_bql(first_cpu->halt_cond);
|
||||
|
||||
/* process any pending work */
|
||||
CPU_FOREACH(cpu) {
|
||||
@ -218,9 +218,9 @@ static void *rr_cpu_thread_fn(void *arg)
|
||||
/* Only used for icount_enabled() */
|
||||
int64_t cpu_budget = 0;
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
replay_mutex_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
|
||||
if (icount_enabled()) {
|
||||
int cpu_count = rr_cpu_count();
|
||||
@ -254,7 +254,7 @@ static void *rr_cpu_thread_fn(void *arg)
|
||||
if (cpu_can_run(cpu)) {
|
||||
int r;
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
if (icount_enabled()) {
|
||||
icount_prepare_for_run(cpu, cpu_budget);
|
||||
}
|
||||
@ -262,15 +262,15 @@ static void *rr_cpu_thread_fn(void *arg)
|
||||
if (icount_enabled()) {
|
||||
icount_process_data(cpu);
|
||||
}
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
|
||||
if (r == EXCP_DEBUG) {
|
||||
cpu_handle_guest_debug(cpu);
|
||||
break;
|
||||
} else if (r == EXCP_ATOMIC) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
cpu_exec_step_atomic(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
break;
|
||||
}
|
||||
} else if (cpu->stop) {
|
||||
|
@ -88,7 +88,7 @@ static void tcg_cpu_reset_hold(CPUState *cpu)
|
||||
/* mask must never be zero, except for A20 change call */
|
||||
void tcg_handle_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
|
||||
cpu->interrupt_request |= mask;
|
||||
|
||||
|
@ -649,7 +649,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
|
||||
|
||||
void cpu_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
cpu->interrupt_request |= mask;
|
||||
qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
|
||||
}
|
||||
|
@ -299,7 +299,7 @@ COREAUDIO_WRAPPER_FUNC(write, size_t, (HWVoiceOut *hw, void *buf, size_t size),
|
||||
#undef COREAUDIO_WRAPPER_FUNC
|
||||
|
||||
/*
|
||||
* callback to feed audiooutput buffer. called without iothread lock.
|
||||
* callback to feed audiooutput buffer. called without BQL.
|
||||
* allowed to lock "buf_mutex", but disallowed to have any other locks.
|
||||
*/
|
||||
static OSStatus audioDeviceIOProc(
|
||||
@ -538,7 +538,7 @@ static void update_device_playback_state(coreaudioVoiceOut *core)
|
||||
}
|
||||
}
|
||||
|
||||
/* called without iothread lock. */
|
||||
/* called without BQL. */
|
||||
static OSStatus handle_voice_change(
|
||||
AudioObjectID in_object_id,
|
||||
UInt32 in_number_addresses,
|
||||
@ -547,7 +547,7 @@ static OSStatus handle_voice_change(
|
||||
{
|
||||
coreaudioVoiceOut *core = in_client_data;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
|
||||
if (core->outputDeviceID) {
|
||||
fini_out_device(core);
|
||||
@ -557,7 +557,7 @@ static OSStatus handle_voice_change(
|
||||
update_device_playback_state(core);
|
||||
}
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -351,11 +351,11 @@ void process_queued_cpu_work(CPUState *cpu)
|
||||
* BQL, so it goes to sleep; start_exclusive() is sleeping too, so
|
||||
* neither CPU can proceed.
|
||||
*/
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
start_exclusive();
|
||||
wi->func(cpu, wi->data);
|
||||
end_exclusive();
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
} else {
|
||||
wi->func(cpu, wi->data);
|
||||
}
|
||||
|
@ -226,10 +226,9 @@ instruction. This could be a future optimisation.
|
||||
Emulated hardware state
|
||||
-----------------------
|
||||
|
||||
Currently thanks to KVM work any access to IO memory is automatically
|
||||
protected by the global iothread mutex, also known as the BQL (Big
|
||||
QEMU Lock). Any IO region that doesn't use global mutex is expected to
|
||||
do its own locking.
|
||||
Currently thanks to KVM work any access to IO memory is automatically protected
|
||||
by the BQL (Big QEMU Lock). Any IO region that doesn't use the BQL is expected
|
||||
to do its own locking.
|
||||
|
||||
However IO memory isn't the only way emulated hardware state can be
|
||||
modified. Some architectures have model specific registers that
|
||||
|
@ -5,7 +5,7 @@ the COPYING file in the top-level directory.
|
||||
|
||||
|
||||
This document explains the IOThread feature and how to write code that runs
|
||||
outside the QEMU global mutex.
|
||||
outside the BQL.
|
||||
|
||||
The main loop and IOThreads
|
||||
---------------------------
|
||||
@ -29,13 +29,13 @@ scalability bottleneck on hosts with many CPUs. Work can be spread across
|
||||
several IOThreads instead of just one main loop. When set up correctly this
|
||||
can improve I/O latency and reduce jitter seen by the guest.
|
||||
|
||||
The main loop is also deeply associated with the QEMU global mutex, which is a
|
||||
scalability bottleneck in itself. vCPU threads and the main loop use the QEMU
|
||||
global mutex to serialize execution of QEMU code. This mutex is necessary
|
||||
because a lot of QEMU's code historically was not thread-safe.
|
||||
The main loop is also deeply associated with the BQL, which is a
|
||||
scalability bottleneck in itself. vCPU threads and the main loop use the BQL
|
||||
to serialize execution of QEMU code. This mutex is necessary because a lot of
|
||||
QEMU's code historically was not thread-safe.
|
||||
|
||||
The fact that all I/O processing is done in a single main loop and that the
|
||||
QEMU global mutex is contended by all vCPU threads and the main loop explain
|
||||
BQL is contended by all vCPU threads and the main loop explain
|
||||
why it is desirable to place work into IOThreads.
|
||||
|
||||
The experimental virtio-blk data-plane implementation has been benchmarked and
|
||||
@ -66,7 +66,7 @@ There are several old APIs that use the main loop AioContext:
|
||||
|
||||
Since they implicitly work on the main loop they cannot be used in code that
|
||||
runs in an IOThread. They might cause a crash or deadlock if called from an
|
||||
IOThread since the QEMU global mutex is not held.
|
||||
IOThread since the BQL is not held.
|
||||
|
||||
Instead, use the AioContext functions directly (see include/block/aio.h):
|
||||
* aio_set_fd_handler() - monitor a file descriptor
|
||||
|
@ -594,7 +594,7 @@ blocking the guest and other background operations.
|
||||
Coroutine safety can be hard to prove, similar to thread safety. Common
|
||||
pitfalls are:
|
||||
|
||||
- The global mutex isn't held across ``qemu_coroutine_yield()``, so
|
||||
- The BQL isn't held across ``qemu_coroutine_yield()``, so
|
||||
operations that used to assume that they execute atomically may have
|
||||
to be more careful to protect against changes in the global state.
|
||||
|
||||
|
@ -184,7 +184,7 @@ modes.
|
||||
Reading and writing requests are created by CPU thread of QEMU. Later these
|
||||
requests proceed to block layer which creates "bottom halves". Bottom
|
||||
halves consist of callback and its parameters. They are processed when
|
||||
main loop locks the global mutex. These locks are not synchronized with
|
||||
main loop locks the BQL. These locks are not synchronized with
|
||||
replaying process because main loop also processes the events that do not
|
||||
affect the virtual machine state (like user interaction with monitor).
|
||||
|
||||
|
@ -19,7 +19,7 @@ Triggering reset
|
||||
|
||||
This section documents the APIs which "users" of a resettable object should use
|
||||
to control it. All resettable control functions must be called while holding
|
||||
the iothread lock.
|
||||
the BQL.
|
||||
|
||||
You can apply a reset to an object using ``resettable_assert_reset()``. You need
|
||||
to call ``resettable_release_reset()`` to release the object from reset. To
|
||||
|
@ -108,11 +108,11 @@ static int dump_cleanup(DumpState *s)
|
||||
s->guest_note = NULL;
|
||||
if (s->resume) {
|
||||
if (s->detached) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
}
|
||||
vm_start();
|
||||
if (s->detached) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
}
|
||||
migrate_del_blocker(&dump_migration_blocker);
|
||||
|
@ -84,7 +84,7 @@ apply_vq_mapping(IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
|
||||
}
|
||||
}
|
||||
|
||||
/* Context: QEMU global mutex held */
|
||||
/* Context: BQL held */
|
||||
bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
|
||||
VirtIOBlockDataPlane **dataplane,
|
||||
Error **errp)
|
||||
@ -148,7 +148,7 @@ bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Context: QEMU global mutex held */
|
||||
/* Context: BQL held */
|
||||
void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
|
||||
{
|
||||
VirtIOBlock *vblk;
|
||||
@ -179,7 +179,7 @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
|
||||
g_free(s);
|
||||
}
|
||||
|
||||
/* Context: QEMU global mutex held */
|
||||
/* Context: BQL held */
|
||||
int virtio_blk_data_plane_start(VirtIODevice *vdev)
|
||||
{
|
||||
VirtIOBlock *vblk = VIRTIO_BLK(vdev);
|
||||
@ -310,7 +310,7 @@ static void virtio_blk_data_plane_stop_vq_bh(void *opaque)
|
||||
virtio_queue_host_notifier_read(host_notifier);
|
||||
}
|
||||
|
||||
/* Context: QEMU global mutex held */
|
||||
/* Context: BQL held */
|
||||
void virtio_blk_data_plane_stop(VirtIODevice *vdev)
|
||||
{
|
||||
VirtIOBlock *vblk = VIRTIO_BLK(vdev);
|
||||
|
@ -1539,7 +1539,7 @@ static void virtio_blk_resize(void *opaque)
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
|
||||
|
||||
/*
|
||||
* virtio_notify_config() needs to acquire the global mutex,
|
||||
* virtio_notify_config() needs to acquire the BQL,
|
||||
* so it can't be called from an iothread. Instead, schedule
|
||||
* it to be run in the main context BH.
|
||||
*/
|
||||
|
@ -70,14 +70,14 @@ CPUState *cpu_create(const char *typename)
|
||||
* BQL here if we need to. cpu_interrupt assumes it is held.*/
|
||||
void cpu_reset_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
bool need_lock = !qemu_mutex_iothread_locked();
|
||||
bool need_lock = !bql_locked();
|
||||
|
||||
if (need_lock) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
}
|
||||
cpu->interrupt_request &= ~mask;
|
||||
if (need_lock) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -159,7 +159,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(PCIQXLDevice, PCI_QXL)
|
||||
*
|
||||
* Use with care; by the time this function returns, the returned pointer is
|
||||
* not protected by RCU anymore. If the caller is not within an RCU critical
|
||||
* section and does not hold the iothread lock, it must have other means of
|
||||
* section and does not hold the BQL, it must have other means of
|
||||
* protecting the pointer, such as a reference to the region that includes
|
||||
* the incoming ram_addr_t.
|
||||
*
|
||||
|
@ -1512,7 +1512,7 @@ void virtio_gpu_reset(VirtIODevice *vdev)
|
||||
g->reset_finished = false;
|
||||
qemu_bh_schedule(g->reset_bh);
|
||||
while (!g->reset_finished) {
|
||||
qemu_cond_wait_iothread(&g->reset_cond);
|
||||
qemu_cond_wait_bql(&g->reset_cond);
|
||||
}
|
||||
} else {
|
||||
virtio_gpu_reset_bh(g);
|
||||
|
@ -1665,7 +1665,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as)
|
||||
{
|
||||
bool use_iommu, pt;
|
||||
/* Whether we need to take the BQL on our own */
|
||||
bool take_bql = !qemu_mutex_iothread_locked();
|
||||
bool take_bql = !bql_locked();
|
||||
|
||||
assert(as);
|
||||
|
||||
@ -1683,7 +1683,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as)
|
||||
* it. We'd better make sure we have had it already, or, take it.
|
||||
*/
|
||||
if (take_bql) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
}
|
||||
|
||||
/* Turn off first then on the other */
|
||||
@ -1738,7 +1738,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as)
|
||||
}
|
||||
|
||||
if (take_bql) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
return use_iommu;
|
||||
|
@ -425,7 +425,7 @@ void xen_evtchn_set_callback_level(int level)
|
||||
* effect immediately. That just leaves interdomain loopback as the case
|
||||
* which uses the BH.
|
||||
*/
|
||||
if (!qemu_mutex_iothread_locked()) {
|
||||
if (!bql_locked()) {
|
||||
qemu_bh_schedule(s->gsi_bh);
|
||||
return;
|
||||
}
|
||||
@ -459,7 +459,7 @@ int xen_evtchn_set_callback_param(uint64_t param)
|
||||
* We need the BQL because set_callback_pci_intx() may call into PCI code,
|
||||
* and because we may need to manipulate the old and new GSI levels.
|
||||
*/
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
qemu_mutex_lock(&s->port_lock);
|
||||
|
||||
switch (type) {
|
||||
@ -1037,7 +1037,7 @@ static int close_port(XenEvtchnState *s, evtchn_port_t port,
|
||||
XenEvtchnPort *p = &s->port_table[port];
|
||||
|
||||
/* Because it *might* be a PIRQ port */
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
switch (p->type) {
|
||||
case EVTCHNSTAT_closed:
|
||||
@ -1104,7 +1104,7 @@ int xen_evtchn_soft_reset(void)
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
qemu_mutex_lock(&s->port_lock);
|
||||
|
||||
@ -1127,7 +1127,7 @@ int xen_evtchn_reset_op(struct evtchn_reset *reset)
|
||||
return -ESRCH;
|
||||
}
|
||||
|
||||
QEMU_IOTHREAD_LOCK_GUARD();
|
||||
BQL_LOCK_GUARD();
|
||||
return xen_evtchn_soft_reset();
|
||||
}
|
||||
|
||||
@ -1145,7 +1145,7 @@ int xen_evtchn_close_op(struct evtchn_close *close)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
QEMU_IOTHREAD_LOCK_GUARD();
|
||||
BQL_LOCK_GUARD();
|
||||
qemu_mutex_lock(&s->port_lock);
|
||||
|
||||
ret = close_port(s, close->port, &flush_kvm_routes);
|
||||
@ -1272,7 +1272,7 @@ int xen_evtchn_bind_pirq_op(struct evtchn_bind_pirq *pirq)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
QEMU_IOTHREAD_LOCK_GUARD();
|
||||
BQL_LOCK_GUARD();
|
||||
|
||||
if (s->pirq[pirq->pirq].port) {
|
||||
return -EBUSY;
|
||||
@ -1601,7 +1601,7 @@ bool xen_evtchn_set_gsi(int gsi, int level)
|
||||
XenEvtchnState *s = xen_evtchn_singleton;
|
||||
int pirq;
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
if (!s || gsi < 0 || gsi >= IOAPIC_NUM_PINS) {
|
||||
return false;
|
||||
@ -1712,7 +1712,7 @@ void xen_evtchn_snoop_msi(PCIDevice *dev, bool is_msix, unsigned int vector,
|
||||
return;
|
||||
}
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
pirq = msi_pirq_target(addr, data);
|
||||
|
||||
@ -1749,7 +1749,7 @@ int xen_evtchn_translate_pirq_msi(struct kvm_irq_routing_entry *route,
|
||||
return 1; /* Not a PIRQ */
|
||||
}
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
pirq = msi_pirq_target(address, data);
|
||||
if (!pirq || pirq >= s->nr_pirqs) {
|
||||
@ -1796,7 +1796,7 @@ bool xen_evtchn_deliver_pirq_msi(uint64_t address, uint32_t data)
|
||||
return false;
|
||||
}
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
pirq = msi_pirq_target(address, data);
|
||||
if (!pirq || pirq >= s->nr_pirqs) {
|
||||
@ -1824,7 +1824,7 @@ int xen_physdev_map_pirq(struct physdev_map_pirq *map)
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
QEMU_IOTHREAD_LOCK_GUARD();
|
||||
BQL_LOCK_GUARD();
|
||||
QEMU_LOCK_GUARD(&s->port_lock);
|
||||
|
||||
if (map->domid != DOMID_SELF && map->domid != xen_domid) {
|
||||
@ -1884,7 +1884,7 @@ int xen_physdev_unmap_pirq(struct physdev_unmap_pirq *unmap)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
QEMU_IOTHREAD_LOCK_GUARD();
|
||||
BQL_LOCK_GUARD();
|
||||
qemu_mutex_lock(&s->port_lock);
|
||||
|
||||
if (!pirq_inuse(s, pirq)) {
|
||||
@ -1924,7 +1924,7 @@ int xen_physdev_eoi_pirq(struct physdev_eoi *eoi)
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
QEMU_IOTHREAD_LOCK_GUARD();
|
||||
BQL_LOCK_GUARD();
|
||||
QEMU_LOCK_GUARD(&s->port_lock);
|
||||
|
||||
if (!pirq_inuse(s, pirq)) {
|
||||
@ -1956,7 +1956,7 @@ int xen_physdev_query_pirq(struct physdev_irq_status_query *query)
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
QEMU_IOTHREAD_LOCK_GUARD();
|
||||
BQL_LOCK_GUARD();
|
||||
QEMU_LOCK_GUARD(&s->port_lock);
|
||||
|
||||
if (!pirq_inuse(s, pirq)) {
|
||||
|
@ -176,7 +176,7 @@ int xen_gnttab_map_page(uint64_t idx, uint64_t gfn)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
QEMU_IOTHREAD_LOCK_GUARD();
|
||||
BQL_LOCK_GUARD();
|
||||
QEMU_LOCK_GUARD(&s->gnt_lock);
|
||||
|
||||
xen_overlay_do_map_page(&s->gnt_aliases[idx], gpa);
|
||||
|
@ -194,7 +194,7 @@ int xen_overlay_map_shinfo_page(uint64_t gpa)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
if (s->shinfo_gpa) {
|
||||
/* If removing shinfo page, turn the kernel magic off first */
|
||||
|
@ -1341,7 +1341,7 @@ static void fire_watch_cb(void *opaque, const char *path, const char *token)
|
||||
{
|
||||
XenXenstoreState *s = opaque;
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
/*
|
||||
* If there's a response pending, we obviously can't scribble over
|
||||
|
@ -934,7 +934,7 @@ void gicv3_cpuif_update(GICv3CPUState *cs)
|
||||
ARMCPU *cpu = ARM_CPU(cs->cpu);
|
||||
CPUARMState *env = &cpu->env;
|
||||
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
|
||||
trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq,
|
||||
cs->hppi.grp, cs->hppi.prio);
|
||||
|
@ -106,7 +106,7 @@ static int qemu_s390_clear_io_flic(S390FLICState *fs, uint16_t subchannel_id,
|
||||
QEMUS390FlicIO *cur, *next;
|
||||
uint8_t isc;
|
||||
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
if (!(flic->pending & FLIC_PENDING_IO)) {
|
||||
return 0;
|
||||
}
|
||||
@ -223,7 +223,7 @@ uint32_t qemu_s390_flic_dequeue_service(QEMUS390FLICState *flic)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
g_assert(flic->pending & FLIC_PENDING_SERVICE);
|
||||
tmp = flic->service_param;
|
||||
flic->service_param = 0;
|
||||
@ -238,7 +238,7 @@ QEMUS390FlicIO *qemu_s390_flic_dequeue_io(QEMUS390FLICState *flic, uint64_t cr6)
|
||||
QEMUS390FlicIO *io;
|
||||
uint8_t isc;
|
||||
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
if (!(flic->pending & CR6_TO_PENDING_IO(cr6))) {
|
||||
return NULL;
|
||||
}
|
||||
@ -262,7 +262,7 @@ QEMUS390FlicIO *qemu_s390_flic_dequeue_io(QEMUS390FLICState *flic, uint64_t cr6)
|
||||
|
||||
void qemu_s390_flic_dequeue_crw_mchk(QEMUS390FLICState *flic)
|
||||
{
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
g_assert(flic->pending & FLIC_PENDING_MCHK_CR);
|
||||
flic->pending &= ~FLIC_PENDING_MCHK_CR;
|
||||
}
|
||||
@ -271,7 +271,7 @@ static void qemu_s390_inject_service(S390FLICState *fs, uint32_t parm)
|
||||
{
|
||||
QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
|
||||
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
/* multiplexing is good enough for sclp - kvm does it internally as well */
|
||||
flic->service_param |= parm;
|
||||
flic->pending |= FLIC_PENDING_SERVICE;
|
||||
@ -287,7 +287,7 @@ static void qemu_s390_inject_io(S390FLICState *fs, uint16_t subchannel_id,
|
||||
QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
|
||||
QEMUS390FlicIO *io;
|
||||
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
io = g_new0(QEMUS390FlicIO, 1);
|
||||
io->id = subchannel_id;
|
||||
io->nr = subchannel_nr;
|
||||
@ -304,7 +304,7 @@ static void qemu_s390_inject_crw_mchk(S390FLICState *fs)
|
||||
{
|
||||
QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
|
||||
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
flic->pending |= FLIC_PENDING_MCHK_CR;
|
||||
|
||||
qemu_s390_flic_notify(FLIC_PENDING_MCHK_CR);
|
||||
@ -330,7 +330,7 @@ bool qemu_s390_flic_has_crw_mchk(QEMUS390FLICState *flic)
|
||||
|
||||
bool qemu_s390_flic_has_any(QEMUS390FLICState *flic)
|
||||
{
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
return !!flic->pending;
|
||||
}
|
||||
|
||||
@ -340,7 +340,7 @@ static void qemu_s390_flic_reset(DeviceState *dev)
|
||||
QEMUS390FlicIO *cur, *next;
|
||||
int isc;
|
||||
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
flic->simm = 0;
|
||||
flic->nimm = 0;
|
||||
flic->pending = 0;
|
||||
|
@ -36,7 +36,7 @@ static void cpu_mips_irq_request(void *opaque, int irq, int level)
|
||||
return;
|
||||
}
|
||||
|
||||
QEMU_IOTHREAD_LOCK_GUARD();
|
||||
BQL_LOCK_GUARD();
|
||||
|
||||
if (level) {
|
||||
env->CP0_Cause |= 1 << (irq + CP0Ca_IP);
|
||||
|
@ -355,9 +355,9 @@ static void *edu_fact_thread(void *opaque)
|
||||
smp_mb__after_rmw();
|
||||
|
||||
if (qatomic_read(&edu->status) & EDU_STATUS_IRQFACT) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
edu_raise_irq(edu, FACT_IRQ);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -131,7 +131,7 @@ static void imx6_clear_reset_bit(CPUState *cpu, run_on_cpu_data data)
|
||||
struct SRCSCRResetInfo *ri = data.host_ptr;
|
||||
IMX6SRCState *s = ri->s;
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
s->regs[SRC_SCR] = deposit32(s->regs[SRC_SCR], ri->reset_bit, 1, 0);
|
||||
DPRINTF("reg[%s] <= 0x%" PRIx32 "\n",
|
||||
|
@ -136,7 +136,7 @@ static void imx7_clear_reset_bit(CPUState *cpu, run_on_cpu_data data)
|
||||
struct SRCSCRResetInfo *ri = data.host_ptr;
|
||||
IMX7SRCState *s = ri->s;
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
s->regs[SRC_A7RCR0] = deposit32(s->regs[SRC_A7RCR0], ri->reset_bit, 1, 0);
|
||||
|
||||
|
@ -133,7 +133,7 @@ static bool net_tx_packets(struct XenNetDev *netdev)
|
||||
void *page;
|
||||
void *tmpbuf = NULL;
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
for (;;) {
|
||||
rc = netdev->tx_ring.req_cons;
|
||||
@ -260,7 +260,7 @@ static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size
|
||||
RING_IDX rc, rp;
|
||||
void *page;
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
if (xen_device_backend_get_state(&netdev->xendev) != XenbusStateConnected) {
|
||||
return -1;
|
||||
@ -354,7 +354,7 @@ static bool xen_netdev_connect(XenDevice *xendev, Error **errp)
|
||||
XenNetDev *netdev = XEN_NET_DEVICE(xendev);
|
||||
unsigned int port, rx_copy;
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
if (xen_device_frontend_scanf(xendev, "tx-ring-ref", "%u",
|
||||
&netdev->tx_ring_ref) != 1) {
|
||||
@ -425,7 +425,7 @@ static void xen_netdev_disconnect(XenDevice *xendev, Error **errp)
|
||||
|
||||
trace_xen_netdev_disconnect(netdev->dev);
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
netdev->tx_ring.sring = NULL;
|
||||
netdev->rx_ring.sring = NULL;
|
||||
|
@ -515,7 +515,7 @@ static void pegasos2_hypercall(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
|
||||
CPUPPCState *env = &cpu->env;
|
||||
|
||||
/* The TCG path should also be holding the BQL at this point */
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
|
||||
if (FIELD_EX64(env->msr, MSR, PR)) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "Hypercall made with MSR[PR]=1\n");
|
||||
|
@ -47,7 +47,7 @@ void ppc_set_irq(PowerPCCPU *cpu, int irq, int level)
|
||||
unsigned int old_pending;
|
||||
|
||||
/* We may already have the BQL if coming from the reset path */
|
||||
QEMU_IOTHREAD_LOCK_GUARD();
|
||||
BQL_LOCK_GUARD();
|
||||
|
||||
old_pending = env->pending_interrupts;
|
||||
|
||||
@ -314,7 +314,7 @@ void store_40x_dbcr0(CPUPPCState *env, uint32_t val)
|
||||
{
|
||||
PowerPCCPU *cpu = env_archcpu(env);
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
|
||||
switch ((val >> 28) & 0x3) {
|
||||
case 0x0:
|
||||
@ -334,7 +334,7 @@ void store_40x_dbcr0(CPUPPCState *env, uint32_t val)
|
||||
break;
|
||||
}
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
/* PowerPC 40x internal IRQ controller */
|
||||
|
@ -1304,7 +1304,7 @@ static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp,
|
||||
CPUPPCState *env = &cpu->env;
|
||||
|
||||
/* The TCG path should also be holding the BQL at this point */
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
|
||||
g_assert(!vhyp_cpu_in_nested(cpu));
|
||||
|
||||
|
@ -899,7 +899,7 @@ void spapr_mce_req_event(PowerPCCPU *cpu, bool recovered)
|
||||
}
|
||||
return;
|
||||
}
|
||||
qemu_cond_wait_iothread(&spapr->fwnmi_machine_check_interlock_cond);
|
||||
qemu_cond_wait_bql(&spapr->fwnmi_machine_check_interlock_cond);
|
||||
if (spapr->fwnmi_machine_check_addr == -1) {
|
||||
/*
|
||||
* If the machine was reset while waiting for the interlock,
|
||||
|
@ -82,9 +82,9 @@ static target_ulong h_random(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
||||
while (hrdata.received < 8) {
|
||||
rng_backend_request_entropy(rngstate->backend, 8 - hrdata.received,
|
||||
random_recv, &hrdata);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
qemu_sem_wait(&hrdata.sem);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
}
|
||||
|
||||
qemu_sem_destroy(&hrdata.sem);
|
||||
|
@ -334,7 +334,7 @@ static void *hpt_prepare_thread(void *opaque)
|
||||
pending->ret = H_NO_MEM;
|
||||
}
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
|
||||
if (SPAPR_MACHINE(qdev_get_machine())->pending_hpt == pending) {
|
||||
/* Ready to go */
|
||||
@ -344,7 +344,7 @@ static void *hpt_prepare_thread(void *opaque)
|
||||
free_pending_hpt(pending);
|
||||
}
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -33,7 +33,7 @@
|
||||
*/
|
||||
bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp)
|
||||
{
|
||||
bool iolock = qemu_mutex_iothread_locked();
|
||||
bool drop_bql = bql_locked();
|
||||
bool iothread = qemu_in_iothread();
|
||||
struct iovec send[2] = {};
|
||||
int *fds = NULL;
|
||||
@ -58,13 +58,13 @@ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp)
|
||||
assert(qemu_in_coroutine() || !iothread);
|
||||
|
||||
/*
|
||||
* Skip unlocking/locking iothread lock when the IOThread is running
|
||||
* Skip unlocking/locking BQL when the IOThread is running
|
||||
* in co-routine context. Co-routine context is asserted above
|
||||
* for IOThread case.
|
||||
* Also skip lock handling while in a co-routine in the main context.
|
||||
*/
|
||||
if (iolock && !iothread && !qemu_in_coroutine()) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
if (drop_bql && !iothread && !qemu_in_coroutine()) {
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
if (!qio_channel_writev_full_all(ioc, send, G_N_ELEMENTS(send),
|
||||
@ -74,9 +74,9 @@ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp)
|
||||
trace_mpqemu_send_io_error(msg->cmd, msg->size, nfds);
|
||||
}
|
||||
|
||||
if (iolock && !iothread && !qemu_in_coroutine()) {
|
||||
if (drop_bql && !iothread && !qemu_in_coroutine()) {
|
||||
/* See above comment why skip locking here. */
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -96,7 +96,7 @@ static ssize_t mpqemu_read(QIOChannel *ioc, void *buf, size_t len, int **fds,
|
||||
size_t *nfds, Error **errp)
|
||||
{
|
||||
struct iovec iov = { .iov_base = buf, .iov_len = len };
|
||||
bool iolock = qemu_mutex_iothread_locked();
|
||||
bool drop_bql = bql_locked();
|
||||
bool iothread = qemu_in_iothread();
|
||||
int ret = -1;
|
||||
|
||||
@ -106,14 +106,14 @@ static ssize_t mpqemu_read(QIOChannel *ioc, void *buf, size_t len, int **fds,
|
||||
*/
|
||||
assert(qemu_in_coroutine() || !iothread);
|
||||
|
||||
if (iolock && !iothread && !qemu_in_coroutine()) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
if (drop_bql && !iothread && !qemu_in_coroutine()) {
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
ret = qio_channel_readv_full_all_eof(ioc, &iov, 1, fds, nfds, errp);
|
||||
|
||||
if (iolock && !iothread && !qemu_in_coroutine()) {
|
||||
qemu_mutex_lock_iothread();
|
||||
if (drop_bql && !iothread && !qemu_in_coroutine()) {
|
||||
bql_lock();
|
||||
}
|
||||
|
||||
return (ret <= 0) ? ret : iov.iov_len;
|
||||
|
@ -400,7 +400,7 @@ static int vfu_object_mr_rw(MemoryRegion *mr, uint8_t *buf, hwaddr offset,
|
||||
}
|
||||
|
||||
if (release_lock) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
release_lock = false;
|
||||
}
|
||||
|
||||
|
@ -153,7 +153,7 @@ void qmp_dump_skeys(const char *filename, Error **errp)
|
||||
goto out;
|
||||
}
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
guest_phys_blocks_init(&guest_phys_blocks);
|
||||
guest_phys_blocks_append(&guest_phys_blocks);
|
||||
|
||||
|
@ -20,7 +20,7 @@
|
||||
#include "scsi/constants.h"
|
||||
#include "hw/virtio/virtio-bus.h"
|
||||
|
||||
/* Context: QEMU global mutex held */
|
||||
/* Context: BQL held */
|
||||
void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp)
|
||||
{
|
||||
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
|
||||
@ -93,7 +93,7 @@ static void virtio_scsi_dataplane_stop_bh(void *opaque)
|
||||
}
|
||||
}
|
||||
|
||||
/* Context: QEMU global mutex held */
|
||||
/* Context: BQL held */
|
||||
int virtio_scsi_dataplane_start(VirtIODevice *vdev)
|
||||
{
|
||||
int i;
|
||||
@ -185,7 +185,7 @@ fail_guest_notifiers:
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
/* Context: QEMU global mutex held */
|
||||
/* Context: BQL held */
|
||||
void virtio_scsi_dataplane_stop(VirtIODevice *vdev)
|
||||
{
|
||||
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
|
||||
|
@ -143,7 +143,7 @@ static inline bool in_aio_context_home_thread(AioContext *ctx)
|
||||
}
|
||||
|
||||
if (ctx == qemu_get_aio_context()) {
|
||||
return qemu_mutex_iothread_locked();
|
||||
return bql_locked();
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
@ -699,8 +699,7 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
|
||||
* @max_batch: maximum number of requests in a batch, 0 means that the
|
||||
* engine will use its default
|
||||
*/
|
||||
void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
|
||||
Error **errp);
|
||||
void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch);
|
||||
|
||||
/**
|
||||
* aio_context_set_thread_pool_params:
|
||||
|
@ -54,7 +54,7 @@ typedef struct BlockJob {
|
||||
|
||||
/**
|
||||
* Speed that was set with @block_job_set_speed.
|
||||
* Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE).
|
||||
* Always modified and read under the BQL (GLOBAL_STATE_CODE).
|
||||
*/
|
||||
int64_t speed;
|
||||
|
||||
@ -66,7 +66,7 @@ typedef struct BlockJob {
|
||||
|
||||
/**
|
||||
* Block other operations when block job is running.
|
||||
* Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE).
|
||||
* Always modified and read under the BQL (GLOBAL_STATE_CODE).
|
||||
*/
|
||||
Error *blocker;
|
||||
|
||||
@ -89,7 +89,7 @@ typedef struct BlockJob {
|
||||
|
||||
/**
|
||||
* BlockDriverStates that are involved in this block job.
|
||||
* Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE).
|
||||
* Always modified and read under the BQL (GLOBAL_STATE_CODE).
|
||||
*/
|
||||
GSList *nodes;
|
||||
} BlockJob;
|
||||
|
@ -92,7 +92,7 @@ RAMBlock *qemu_ram_block_by_name(const char *name);
|
||||
*
|
||||
* By the time this function returns, the returned pointer is not protected
|
||||
* by RCU anymore. If the caller is not within an RCU critical section and
|
||||
* does not hold the iothread lock, it must have other means of protecting the
|
||||
* does not hold the BQL, it must have other means of protecting the
|
||||
* pointer, such as a reference to the memory region that owns the RAMBlock.
|
||||
*/
|
||||
RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
|
||||
|
@ -1982,7 +1982,7 @@ int memory_region_get_fd(MemoryRegion *mr);
|
||||
*
|
||||
* Use with care; by the time this function returns, the returned pointer is
|
||||
* not protected by RCU anymore. If the caller is not within an RCU critical
|
||||
* section and does not hold the iothread lock, it must have other means of
|
||||
* section and does not hold the BQL, it must have other means of
|
||||
* protecting the pointer, such as a reference to the region that includes
|
||||
* the incoming ram_addr_t.
|
||||
*
|
||||
@ -1999,7 +1999,7 @@ MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
|
||||
*
|
||||
* Use with care; by the time this function returns, the returned pointer is
|
||||
* not protected by RCU anymore. If the caller is not within an RCU critical
|
||||
* section and does not hold the iothread lock, it must have other means of
|
||||
* section and does not hold the BQL, it must have other means of
|
||||
* protecting the pointer, such as a reference to the region that includes
|
||||
* the incoming ram_addr_t.
|
||||
*
|
||||
|
@ -34,7 +34,7 @@ struct RAMBlock {
|
||||
ram_addr_t max_length;
|
||||
void (*resized)(const char*, uint64_t length, void *host);
|
||||
uint32_t flags;
|
||||
/* Protected by iothread lock. */
|
||||
/* Protected by the BQL. */
|
||||
char idstr[256];
|
||||
/* RCU-enabled, writes protected by the ramlist lock */
|
||||
QLIST_ENTRY(RAMBlock) next;
|
||||
|
@ -149,7 +149,7 @@ typedef void (*QIOTaskWorker)(QIOTask *task,
|
||||
* lookups) to be easily run non-blocking. Reporting the
|
||||
* results in the main thread context means that the caller
|
||||
* typically does not need to be concerned about thread
|
||||
* safety wrt the QEMU global mutex.
|
||||
* safety wrt the BQL.
|
||||
*
|
||||
* For example, the socket_listen() method will block the caller
|
||||
* while DNS lookups take place if given a name, instead of IP
|
||||
|
@ -17,7 +17,7 @@
|
||||
#include "hw/vmstate-if.h"
|
||||
|
||||
typedef struct SaveVMHandlers {
|
||||
/* This runs inside the iothread lock. */
|
||||
/* This runs inside the BQL. */
|
||||
SaveStateHandler *save_state;
|
||||
|
||||
/*
|
||||
@ -30,7 +30,7 @@ typedef struct SaveVMHandlers {
|
||||
int (*save_live_complete_postcopy)(QEMUFile *f, void *opaque);
|
||||
int (*save_live_complete_precopy)(QEMUFile *f, void *opaque);
|
||||
|
||||
/* This runs both outside and inside the iothread lock. */
|
||||
/* This runs both outside and inside the BQL. */
|
||||
bool (*is_active)(void *opaque);
|
||||
bool (*has_postcopy)(void *opaque);
|
||||
|
||||
@ -43,14 +43,14 @@ typedef struct SaveVMHandlers {
|
||||
*/
|
||||
bool (*is_active_iterate)(void *opaque);
|
||||
|
||||
/* This runs outside the iothread lock in the migration case, and
|
||||
/* This runs outside the BQL in the migration case, and
|
||||
* within the lock in the savevm case. The callback had better only
|
||||
* use data that is local to the migration thread or protected
|
||||
* by other locks.
|
||||
*/
|
||||
int (*save_live_iterate)(QEMUFile *f, void *opaque);
|
||||
|
||||
/* This runs outside the iothread lock! */
|
||||
/* This runs outside the BQL! */
|
||||
/* Note for save_live_pending:
|
||||
* must_precopy:
|
||||
* - must be migrated in precopy or in stopped state
|
||||
|
@ -22,7 +22,7 @@
|
||||
* rather than callbacks, for operations that need to give up control while
|
||||
* waiting for events to complete.
|
||||
*
|
||||
* These functions are re-entrant and may be used outside the global mutex.
|
||||
* These functions are re-entrant and may be used outside the BQL.
|
||||
*
|
||||
* Functions that execute in coroutine context cannot be called
|
||||
* directly from normal functions. Use @coroutine_fn to mark such
|
||||
|
@ -26,7 +26,7 @@
|
||||
* rather than callbacks, for operations that need to give up control while
|
||||
* waiting for events to complete.
|
||||
*
|
||||
* These functions are re-entrant and may be used outside the global mutex.
|
||||
* These functions are re-entrant and may be used outside the BQL.
|
||||
*
|
||||
* Functions that execute in coroutine context cannot be called
|
||||
* directly from normal functions. Use @coroutine_fn to mark such
|
||||
|
@ -248,19 +248,19 @@ GSource *iohandler_get_g_source(void);
|
||||
AioContext *iohandler_get_aio_context(void);
|
||||
|
||||
/**
|
||||
* qemu_mutex_iothread_locked: Return lock status of the main loop mutex.
|
||||
* bql_locked: Return lock status of the Big QEMU Lock (BQL)
|
||||
*
|
||||
* The main loop mutex is the coarsest lock in QEMU, and as such it
|
||||
* The Big QEMU Lock (BQL) is the coarsest lock in QEMU, and as such it
|
||||
* must always be taken outside other locks. This function helps
|
||||
* functions take different paths depending on whether the current
|
||||
* thread is running within the main loop mutex.
|
||||
* thread is running within the BQL.
|
||||
*
|
||||
* This function should never be used in the block layer, because
|
||||
* unit tests, block layer tools and qemu-storage-daemon do not
|
||||
* have a BQL.
|
||||
* Please instead refer to qemu_in_main_thread().
|
||||
*/
|
||||
bool qemu_mutex_iothread_locked(void);
|
||||
bool bql_locked(void);
|
||||
|
||||
/**
|
||||
* qemu_in_main_thread: return whether it's possible to safely access
|
||||
@ -312,78 +312,76 @@ bool qemu_in_main_thread(void);
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* qemu_mutex_lock_iothread: Lock the main loop mutex.
|
||||
* bql_lock: Lock the Big QEMU Lock (BQL).
|
||||
*
|
||||
* This function locks the main loop mutex. The mutex is taken by
|
||||
* This function locks the Big QEMU Lock (BQL). The lock is taken by
|
||||
* main() in vl.c and always taken except while waiting on
|
||||
* external events (such as with select). The mutex should be taken
|
||||
* external events (such as with select). The lock should be taken
|
||||
* by threads other than the main loop thread when calling
|
||||
* qemu_bh_new(), qemu_set_fd_handler() and basically all other
|
||||
* functions documented in this file.
|
||||
*
|
||||
* NOTE: tools currently are single-threaded and qemu_mutex_lock_iothread
|
||||
* NOTE: tools currently are single-threaded and bql_lock
|
||||
* is a no-op there.
|
||||
*/
|
||||
#define qemu_mutex_lock_iothread() \
|
||||
qemu_mutex_lock_iothread_impl(__FILE__, __LINE__)
|
||||
void qemu_mutex_lock_iothread_impl(const char *file, int line);
|
||||
#define bql_lock() bql_lock_impl(__FILE__, __LINE__)
|
||||
void bql_lock_impl(const char *file, int line);
|
||||
|
||||
/**
|
||||
* qemu_mutex_unlock_iothread: Unlock the main loop mutex.
|
||||
* bql_unlock: Unlock the Big QEMU Lock (BQL).
|
||||
*
|
||||
* This function unlocks the main loop mutex. The mutex is taken by
|
||||
* This function unlocks the Big QEMU Lock. The lock is taken by
|
||||
* main() in vl.c and always taken except while waiting on
|
||||
* external events (such as with select). The mutex should be unlocked
|
||||
* external events (such as with select). The lock should be unlocked
|
||||
* as soon as possible by threads other than the main loop thread,
|
||||
* because it prevents the main loop from processing callbacks,
|
||||
* including timers and bottom halves.
|
||||
*
|
||||
* NOTE: tools currently are single-threaded and qemu_mutex_unlock_iothread
|
||||
* NOTE: tools currently are single-threaded and bql_unlock
|
||||
* is a no-op there.
|
||||
*/
|
||||
void qemu_mutex_unlock_iothread(void);
|
||||
void bql_unlock(void);
|
||||
|
||||
/**
|
||||
* QEMU_IOTHREAD_LOCK_GUARD
|
||||
* BQL_LOCK_GUARD
|
||||
*
|
||||
* Wrap a block of code in a conditional qemu_mutex_{lock,unlock}_iothread.
|
||||
* Wrap a block of code in a conditional bql_{lock,unlock}.
|
||||
*/
|
||||
typedef struct IOThreadLockAuto IOThreadLockAuto;
|
||||
typedef struct BQLLockAuto BQLLockAuto;
|
||||
|
||||
static inline IOThreadLockAuto *qemu_iothread_auto_lock(const char *file,
|
||||
int line)
|
||||
static inline BQLLockAuto *bql_auto_lock(const char *file, int line)
|
||||
{
|
||||
if (qemu_mutex_iothread_locked()) {
|
||||
if (bql_locked()) {
|
||||
return NULL;
|
||||
}
|
||||
qemu_mutex_lock_iothread_impl(file, line);
|
||||
bql_lock_impl(file, line);
|
||||
/* Anything non-NULL causes the cleanup function to be called */
|
||||
return (IOThreadLockAuto *)(uintptr_t)1;
|
||||
return (BQLLockAuto *)(uintptr_t)1;
|
||||
}
|
||||
|
||||
static inline void qemu_iothread_auto_unlock(IOThreadLockAuto *l)
|
||||
static inline void bql_auto_unlock(BQLLockAuto *l)
|
||||
{
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
G_DEFINE_AUTOPTR_CLEANUP_FUNC(IOThreadLockAuto, qemu_iothread_auto_unlock)
|
||||
G_DEFINE_AUTOPTR_CLEANUP_FUNC(BQLLockAuto, bql_auto_unlock)
|
||||
|
||||
#define QEMU_IOTHREAD_LOCK_GUARD() \
|
||||
g_autoptr(IOThreadLockAuto) _iothread_lock_auto __attribute__((unused)) \
|
||||
= qemu_iothread_auto_lock(__FILE__, __LINE__)
|
||||
#define BQL_LOCK_GUARD() \
|
||||
g_autoptr(BQLLockAuto) _bql_lock_auto __attribute__((unused)) \
|
||||
= bql_auto_lock(__FILE__, __LINE__)
|
||||
|
||||
/*
|
||||
* qemu_cond_wait_iothread: Wait on condition for the main loop mutex
|
||||
* qemu_cond_wait_bql: Wait on condition for the Big QEMU Lock (BQL)
|
||||
*
|
||||
* This function atomically releases the main loop mutex and causes
|
||||
* This function atomically releases the Big QEMU Lock (BQL) and causes
|
||||
* the calling thread to block on the condition.
|
||||
*/
|
||||
void qemu_cond_wait_iothread(QemuCond *cond);
|
||||
void qemu_cond_wait_bql(QemuCond *cond);
|
||||
|
||||
/*
|
||||
* qemu_cond_timedwait_iothread: like the previous, but with timeout
|
||||
* qemu_cond_timedwait_bql: like the previous, but with timeout
|
||||
*/
|
||||
void qemu_cond_timedwait_iothread(QemuCond *cond, int ms);
|
||||
void qemu_cond_timedwait_bql(QemuCond *cond, int ms);
|
||||
|
||||
/* internal interfaces */
|
||||
|
||||
|
@ -47,7 +47,7 @@ typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f,
|
||||
typedef bool (*QemuCondTimedWaitFunc)(QemuCond *c, QemuMutex *m, int ms,
|
||||
const char *f, int l);
|
||||
|
||||
extern QemuMutexLockFunc qemu_bql_mutex_lock_func;
|
||||
extern QemuMutexLockFunc bql_mutex_lock_func;
|
||||
extern QemuMutexLockFunc qemu_mutex_lock_func;
|
||||
extern QemuMutexTrylockFunc qemu_mutex_trylock_func;
|
||||
extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func;
|
||||
|
@ -170,8 +170,7 @@ static void iothread_set_aio_context_params(EventLoopBase *base, Error **errp)
|
||||
}
|
||||
|
||||
aio_context_set_aio_params(iothread->ctx,
|
||||
iothread->parent_obj.aio_max_batch,
|
||||
errp);
|
||||
iothread->parent_obj.aio_max_batch);
|
||||
|
||||
aio_context_set_thread_pool_params(iothread->ctx, base->thread_pool_min,
|
||||
base->thread_pool_max, errp);
|
||||
|
@ -61,7 +61,7 @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
|
||||
*result = r;
|
||||
}
|
||||
if (release_lock) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
RCU_READ_UNLOCK();
|
||||
return val;
|
||||
@ -130,7 +130,7 @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL,
|
||||
*result = r;
|
||||
}
|
||||
if (release_lock) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
RCU_READ_UNLOCK();
|
||||
return val;
|
||||
@ -186,7 +186,7 @@ uint8_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
|
||||
*result = r;
|
||||
}
|
||||
if (release_lock) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
RCU_READ_UNLOCK();
|
||||
return val;
|
||||
@ -234,7 +234,7 @@ static inline uint16_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL,
|
||||
*result = r;
|
||||
}
|
||||
if (release_lock) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
RCU_READ_UNLOCK();
|
||||
return val;
|
||||
@ -295,7 +295,7 @@ void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
|
||||
*result = r;
|
||||
}
|
||||
if (release_lock) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
RCU_READ_UNLOCK();
|
||||
}
|
||||
@ -339,7 +339,7 @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL,
|
||||
*result = r;
|
||||
}
|
||||
if (release_lock) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
RCU_READ_UNLOCK();
|
||||
}
|
||||
@ -391,7 +391,7 @@ void glue(address_space_stb, SUFFIX)(ARG1_DECL,
|
||||
*result = r;
|
||||
}
|
||||
if (release_lock) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
RCU_READ_UNLOCK();
|
||||
}
|
||||
@ -435,7 +435,7 @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL,
|
||||
*result = r;
|
||||
}
|
||||
if (release_lock) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
RCU_READ_UNLOCK();
|
||||
}
|
||||
@ -499,7 +499,7 @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL,
|
||||
*result = r;
|
||||
}
|
||||
if (release_lock) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
RCU_READ_UNLOCK();
|
||||
}
|
||||
|
@ -464,7 +464,7 @@ static void send_bitmap_bits(QEMUFile *f, DBMSaveState *s,
|
||||
g_free(buf);
|
||||
}
|
||||
|
||||
/* Called with iothread lock taken. */
|
||||
/* Called with the BQL taken. */
|
||||
static void dirty_bitmap_do_save_cleanup(DBMSaveState *s)
|
||||
{
|
||||
SaveBitmapState *dbms;
|
||||
@ -479,7 +479,7 @@ static void dirty_bitmap_do_save_cleanup(DBMSaveState *s)
|
||||
}
|
||||
}
|
||||
|
||||
/* Called with iothread lock taken. */
|
||||
/* Called with the BQL taken. */
|
||||
static int add_bitmaps_to_list(DBMSaveState *s, BlockDriverState *bs,
|
||||
const char *bs_name, GHashTable *alias_map)
|
||||
{
|
||||
@ -598,7 +598,7 @@ static int add_bitmaps_to_list(DBMSaveState *s, BlockDriverState *bs,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Called with iothread lock taken. */
|
||||
/* Called with the BQL taken. */
|
||||
static int init_dirty_bitmap_migration(DBMSaveState *s)
|
||||
{
|
||||
BlockDriverState *bs;
|
||||
@ -607,7 +607,7 @@ static int init_dirty_bitmap_migration(DBMSaveState *s)
|
||||
BlockBackend *blk;
|
||||
GHashTable *alias_map = NULL;
|
||||
|
||||
/* Runs in the migration thread, but holds the iothread lock */
|
||||
/* Runs in the migration thread, but holds the BQL */
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
@ -742,7 +742,7 @@ static int dirty_bitmap_save_iterate(QEMUFile *f, void *opaque)
|
||||
return s->bulk_completed;
|
||||
}
|
||||
|
||||
/* Called with iothread lock taken. */
|
||||
/* Called with the BQL taken. */
|
||||
|
||||
static int dirty_bitmap_save_complete(QEMUFile *f, void *opaque)
|
||||
{
|
||||
@ -774,7 +774,7 @@ static void dirty_bitmap_state_pending(void *opaque,
|
||||
SaveBitmapState *dbms;
|
||||
uint64_t pending = 0;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
|
||||
QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) {
|
||||
uint64_t gran = bdrv_dirty_bitmap_granularity(dbms->bitmap);
|
||||
@ -784,7 +784,7 @@ static void dirty_bitmap_state_pending(void *opaque,
|
||||
pending += DIV_ROUND_UP(sectors * BDRV_SECTOR_SIZE, gran);
|
||||
}
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
trace_dirty_bitmap_state_pending(pending);
|
||||
|
||||
|
@ -101,7 +101,7 @@ typedef struct BlkMigState {
|
||||
int prev_progress;
|
||||
int bulk_completed;
|
||||
|
||||
/* Lock must be taken _inside_ the iothread lock. */
|
||||
/* Lock must be taken _inside_ the BQL. */
|
||||
QemuMutex lock;
|
||||
} BlkMigState;
|
||||
|
||||
@ -117,7 +117,7 @@ static void blk_mig_unlock(void)
|
||||
qemu_mutex_unlock(&block_mig_state.lock);
|
||||
}
|
||||
|
||||
/* Must run outside of the iothread lock during the bulk phase,
|
||||
/* Must run outside of the BQL during the bulk phase,
|
||||
* or the VM will stall.
|
||||
*/
|
||||
|
||||
@ -269,7 +269,7 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
|
||||
int64_t count;
|
||||
|
||||
if (bmds->shared_base) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
/* Skip unallocated sectors; intentionally treats failure or
|
||||
* partial sector as an allocated sector */
|
||||
while (cur_sector < total_sectors &&
|
||||
@ -280,7 +280,7 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
|
||||
}
|
||||
cur_sector += count >> BDRV_SECTOR_BITS;
|
||||
}
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
if (cur_sector >= total_sectors) {
|
||||
@ -316,18 +316,18 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
|
||||
* I/O runs in the main loop AioContext (see
|
||||
* qemu_get_current_aio_context()).
|
||||
*/
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector * BDRV_SECTOR_SIZE,
|
||||
nr_sectors * BDRV_SECTOR_SIZE);
|
||||
blk->aiocb = blk_aio_preadv(bb, cur_sector * BDRV_SECTOR_SIZE, &blk->qiov,
|
||||
0, blk_mig_read_cb, blk);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
bmds->cur_sector = cur_sector + nr_sectors;
|
||||
return (bmds->cur_sector >= total_sectors);
|
||||
}
|
||||
|
||||
/* Called with iothread lock taken. */
|
||||
/* Called with the BQL taken. */
|
||||
|
||||
static int set_dirty_tracking(void)
|
||||
{
|
||||
@ -354,7 +354,7 @@ fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Called with iothread lock taken. */
|
||||
/* Called with the BQL taken. */
|
||||
|
||||
static void unset_dirty_tracking(void)
|
||||
{
|
||||
@ -505,7 +505,7 @@ static void blk_mig_reset_dirty_cursor(void)
|
||||
}
|
||||
}
|
||||
|
||||
/* Called with iothread lock taken. */
|
||||
/* Called with the BQL taken. */
|
||||
|
||||
static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
|
||||
int is_async)
|
||||
@ -587,7 +587,7 @@ error:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Called with iothread lock taken.
|
||||
/* Called with the BQL taken.
|
||||
*
|
||||
* return value:
|
||||
* 0: too much data for max_downtime
|
||||
@ -649,7 +649,7 @@ static int flush_blks(QEMUFile *f)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Called with iothread lock taken. */
|
||||
/* Called with the BQL taken. */
|
||||
|
||||
static int64_t get_remaining_dirty(void)
|
||||
{
|
||||
@ -667,7 +667,7 @@ static int64_t get_remaining_dirty(void)
|
||||
|
||||
|
||||
|
||||
/* Called with iothread lock taken. */
|
||||
/* Called with the BQL taken. */
|
||||
static void block_migration_cleanup_bmds(void)
|
||||
{
|
||||
BlkMigDevState *bmds;
|
||||
@ -690,7 +690,7 @@ static void block_migration_cleanup_bmds(void)
|
||||
}
|
||||
}
|
||||
|
||||
/* Called with iothread lock taken. */
|
||||
/* Called with the BQL taken. */
|
||||
static void block_migration_cleanup(void *opaque)
|
||||
{
|
||||
BlkMigBlock *blk;
|
||||
@ -767,12 +767,12 @@ static int block_save_iterate(QEMUFile *f, void *opaque)
|
||||
}
|
||||
ret = 0;
|
||||
} else {
|
||||
/* Always called with iothread lock taken for
|
||||
/* Always called with the BQL taken for
|
||||
* simplicity, block_save_complete also calls it.
|
||||
*/
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
ret = blk_mig_save_dirty_block(f, 1);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
@ -795,7 +795,7 @@ static int block_save_iterate(QEMUFile *f, void *opaque)
|
||||
return (delta_bytes > 0);
|
||||
}
|
||||
|
||||
/* Called with iothread lock taken. */
|
||||
/* Called with the BQL taken. */
|
||||
|
||||
static int block_save_complete(QEMUFile *f, void *opaque)
|
||||
{
|
||||
@ -844,9 +844,9 @@ static void block_state_pending(void *opaque, uint64_t *must_precopy,
|
||||
/* Estimate pending number of bytes to send */
|
||||
uint64_t pending;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
pending = get_remaining_dirty();
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
blk_mig_lock();
|
||||
pending += block_mig_state.submitted * BLK_MIG_BLOCK_SIZE +
|
||||
|
@ -420,13 +420,13 @@ static int colo_do_checkpoint_transaction(MigrationState *s,
|
||||
qio_channel_io_seek(QIO_CHANNEL(bioc), 0, 0, NULL);
|
||||
bioc->usage = 0;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
if (failover_get_state() != FAILOVER_STATUS_NONE) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
goto out;
|
||||
}
|
||||
vm_stop_force_state(RUN_STATE_COLO);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
trace_colo_vm_state_change("run", "stop");
|
||||
/*
|
||||
* Failover request bh could be called after vm_stop_force_state(),
|
||||
@ -435,23 +435,23 @@ static int colo_do_checkpoint_transaction(MigrationState *s,
|
||||
if (failover_get_state() != FAILOVER_STATUS_NONE) {
|
||||
goto out;
|
||||
}
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
|
||||
replication_do_checkpoint_all(&local_err);
|
||||
if (local_err) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
goto out;
|
||||
}
|
||||
|
||||
colo_send_message(s->to_dst_file, COLO_MESSAGE_VMSTATE_SEND, &local_err);
|
||||
if (local_err) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
goto out;
|
||||
}
|
||||
/* Note: device state is saved into buffer */
|
||||
ret = qemu_save_device_state(fb);
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
@ -504,9 +504,9 @@ static int colo_do_checkpoint_transaction(MigrationState *s,
|
||||
|
||||
ret = 0;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
vm_start();
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
trace_colo_vm_state_change("stop", "run");
|
||||
|
||||
out:
|
||||
@ -557,15 +557,15 @@ static void colo_process_checkpoint(MigrationState *s)
|
||||
fb = qemu_file_new_output(QIO_CHANNEL(bioc));
|
||||
object_unref(OBJECT(bioc));
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
replication_start_all(REPLICATION_MODE_PRIMARY, &local_err);
|
||||
if (local_err) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
goto out;
|
||||
}
|
||||
|
||||
vm_start();
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
trace_colo_vm_state_change("stop", "run");
|
||||
|
||||
timer_mod(s->colo_delay_timer, qemu_clock_get_ms(QEMU_CLOCK_HOST) +
|
||||
@ -639,14 +639,14 @@ out:
|
||||
|
||||
void migrate_start_colo_process(MigrationState *s)
|
||||
{
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
qemu_event_init(&s->colo_checkpoint_event, false);
|
||||
s->colo_delay_timer = timer_new_ms(QEMU_CLOCK_HOST,
|
||||
colo_checkpoint_notify, s);
|
||||
|
||||
qemu_sem_init(&s->colo_exit_sem, 0);
|
||||
colo_process_checkpoint(s);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
}
|
||||
|
||||
static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
|
||||
@ -657,9 +657,9 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
|
||||
Error *local_err = NULL;
|
||||
int ret;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
vm_stop_force_state(RUN_STATE_COLO);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
trace_colo_vm_state_change("run", "stop");
|
||||
|
||||
/* FIXME: This is unnecessary for periodic checkpoint mode */
|
||||
@ -677,10 +677,10 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
|
||||
return;
|
||||
}
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
cpu_synchronize_all_states();
|
||||
ret = qemu_loadvm_state_main(mis->from_src_file, mis);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
if (ret < 0) {
|
||||
error_setg(errp, "Load VM's live state (ram) error");
|
||||
@ -719,14 +719,14 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
|
||||
return;
|
||||
}
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
vmstate_loading = true;
|
||||
colo_flush_ram_cache();
|
||||
ret = qemu_load_device_state(fb);
|
||||
if (ret < 0) {
|
||||
error_setg(errp, "COLO: load device state failed");
|
||||
vmstate_loading = false;
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
@ -734,7 +734,7 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
vmstate_loading = false;
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
@ -743,7 +743,7 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
vmstate_loading = false;
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return;
|
||||
}
|
||||
/* Notify all filters of all NIC to do checkpoint */
|
||||
@ -752,13 +752,13 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
vmstate_loading = false;
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
vmstate_loading = false;
|
||||
vm_start();
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
trace_colo_vm_state_change("stop", "run");
|
||||
|
||||
if (failover_get_state() == FAILOVER_STATUS_RELAUNCH) {
|
||||
@ -851,14 +851,14 @@ static void *colo_process_incoming_thread(void *opaque)
|
||||
fb = qemu_file_new_input(QIO_CHANNEL(bioc));
|
||||
object_unref(OBJECT(bioc));
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
replication_start_all(REPLICATION_MODE_SECONDARY, &local_err);
|
||||
if (local_err) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
goto out;
|
||||
}
|
||||
vm_start();
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
trace_colo_vm_state_change("stop", "run");
|
||||
|
||||
colo_send_message(mis->to_src_file, COLO_MESSAGE_CHECKPOINT_READY,
|
||||
@ -920,7 +920,7 @@ int coroutine_fn colo_incoming_co(void)
|
||||
Error *local_err = NULL;
|
||||
QemuThread th;
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
if (!migration_incoming_colo_enabled()) {
|
||||
return 0;
|
||||
@ -940,12 +940,12 @@ int coroutine_fn colo_incoming_co(void)
|
||||
qemu_coroutine_yield();
|
||||
mis->colo_incoming_co = NULL;
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
/* Wait checkpoint incoming thread exit before free resource */
|
||||
qemu_thread_join(&th);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
|
||||
/* We hold the global iothread lock, so it is safe here */
|
||||
/* We hold the global BQL, so it is safe here */
|
||||
colo_release_ram_cache();
|
||||
|
||||
return 0;
|
||||
|
@ -90,13 +90,13 @@ static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages,
|
||||
|
||||
void global_dirty_log_change(unsigned int flag, bool start)
|
||||
{
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
if (start) {
|
||||
memory_global_dirty_log_start(flag);
|
||||
} else {
|
||||
memory_global_dirty_log_stop(flag);
|
||||
}
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -106,12 +106,12 @@ void global_dirty_log_change(unsigned int flag, bool start)
|
||||
*/
|
||||
static void global_dirty_log_sync(unsigned int flag, bool one_shot)
|
||||
{
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
memory_global_dirty_log_sync(false);
|
||||
if (one_shot) {
|
||||
memory_global_dirty_log_stop(flag);
|
||||
}
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
static DirtyPageRecord *vcpu_dirty_stat_alloc(VcpuStat *stat)
|
||||
@ -609,7 +609,7 @@ static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config)
|
||||
int64_t start_time;
|
||||
DirtyPageRecord dirty_pages;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE);
|
||||
|
||||
/*
|
||||
@ -626,7 +626,7 @@ static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config)
|
||||
* KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE cap is enabled.
|
||||
*/
|
||||
dirtyrate_manual_reset_protect();
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
record_dirtypages_bitmap(&dirty_pages, true);
|
||||
|
||||
|
@ -1283,12 +1283,12 @@ static void migrate_fd_cleanup(MigrationState *s)
|
||||
QEMUFile *tmp;
|
||||
|
||||
trace_migrate_fd_cleanup();
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
if (s->migration_thread_running) {
|
||||
qemu_thread_join(&s->thread);
|
||||
s->migration_thread_running = false;
|
||||
}
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
|
||||
multifd_save_cleanup();
|
||||
qemu_mutex_lock(&s->qemu_file_lock);
|
||||
@ -2396,7 +2396,7 @@ static int postcopy_start(MigrationState *ms, Error **errp)
|
||||
}
|
||||
|
||||
trace_postcopy_start();
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
trace_postcopy_start_set_run();
|
||||
|
||||
migration_downtime_start(ms);
|
||||
@ -2504,7 +2504,7 @@ static int postcopy_start(MigrationState *ms, Error **errp)
|
||||
|
||||
migration_downtime_end(ms);
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
if (migrate_postcopy_ram()) {
|
||||
/*
|
||||
@ -2545,13 +2545,13 @@ fail:
|
||||
error_report_err(local_err);
|
||||
}
|
||||
}
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* migration_maybe_pause: Pause if required to by
|
||||
* migrate_pause_before_switchover called with the iothread locked
|
||||
* migrate_pause_before_switchover called with the BQL locked
|
||||
* Returns: 0 on success
|
||||
*/
|
||||
static int migration_maybe_pause(MigrationState *s,
|
||||
@ -2579,14 +2579,14 @@ static int migration_maybe_pause(MigrationState *s,
|
||||
* wait for the 'pause_sem' semaphore.
|
||||
*/
|
||||
if (s->state != MIGRATION_STATUS_CANCELLING) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
migrate_set_state(&s->state, *current_active_state,
|
||||
MIGRATION_STATUS_PRE_SWITCHOVER);
|
||||
qemu_sem_wait(&s->pause_sem);
|
||||
migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER,
|
||||
new_state);
|
||||
*current_active_state = new_state;
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
}
|
||||
|
||||
return s->state == new_state ? 0 : -EINVAL;
|
||||
@ -2597,7 +2597,7 @@ static int migration_completion_precopy(MigrationState *s,
|
||||
{
|
||||
int ret;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
migration_downtime_start(s);
|
||||
|
||||
s->vm_old_state = runstate_get();
|
||||
@ -2624,7 +2624,7 @@ static int migration_completion_precopy(MigrationState *s,
|
||||
ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false,
|
||||
s->block_inactive);
|
||||
out_unlock:
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2632,9 +2632,9 @@ static void migration_completion_postcopy(MigrationState *s)
|
||||
{
|
||||
trace_migration_completion_postcopy_end();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_savevm_state_complete_postcopy(s->to_dst_file);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
/*
|
||||
* Shutdown the postcopy fast path thread. This is only needed when dest
|
||||
@ -2658,14 +2658,14 @@ static void migration_completion_failed(MigrationState *s,
|
||||
*/
|
||||
Error *local_err = NULL;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
bdrv_activate_all(&local_err);
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
} else {
|
||||
s->block_inactive = false;
|
||||
}
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
migrate_set_state(&s->state, current_active_state,
|
||||
@ -3105,7 +3105,7 @@ static void migration_iteration_finish(MigrationState *s)
|
||||
/* If we enabled cpu throttling for auto-converge, turn it off. */
|
||||
cpu_throttle_stop();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
switch (s->state) {
|
||||
case MIGRATION_STATUS_COMPLETED:
|
||||
migration_calculate_complete(s);
|
||||
@ -3136,7 +3136,7 @@ static void migration_iteration_finish(MigrationState *s)
|
||||
break;
|
||||
}
|
||||
migrate_fd_cleanup_schedule(s);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
static void bg_migration_iteration_finish(MigrationState *s)
|
||||
@ -3148,7 +3148,7 @@ static void bg_migration_iteration_finish(MigrationState *s)
|
||||
*/
|
||||
ram_write_tracking_stop();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
switch (s->state) {
|
||||
case MIGRATION_STATUS_COMPLETED:
|
||||
migration_calculate_complete(s);
|
||||
@ -3167,7 +3167,7 @@ static void bg_migration_iteration_finish(MigrationState *s)
|
||||
}
|
||||
|
||||
migrate_fd_cleanup_schedule(s);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3289,9 +3289,9 @@ static void *migration_thread(void *opaque)
|
||||
object_ref(OBJECT(s));
|
||||
update_iteration_initial_status(s);
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_savevm_state_header(s->to_dst_file);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
/*
|
||||
* If we opened the return path, we need to make sure dst has it
|
||||
@ -3319,9 +3319,9 @@ static void *migration_thread(void *opaque)
|
||||
qemu_savevm_send_colo_enable(s->to_dst_file);
|
||||
}
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_savevm_state_setup(s->to_dst_file);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP,
|
||||
MIGRATION_STATUS_ACTIVE);
|
||||
@ -3432,10 +3432,10 @@ static void *bg_migration_thread(void *opaque)
|
||||
ram_write_tracking_prepare();
|
||||
#endif
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_savevm_state_header(s->to_dst_file);
|
||||
qemu_savevm_state_setup(s->to_dst_file);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP,
|
||||
MIGRATION_STATUS_ACTIVE);
|
||||
@ -3445,7 +3445,7 @@ static void *bg_migration_thread(void *opaque)
|
||||
trace_migration_thread_setup_complete();
|
||||
migration_downtime_start(s);
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
|
||||
s->vm_old_state = runstate_get();
|
||||
|
||||
@ -3483,7 +3483,7 @@ static void *bg_migration_thread(void *opaque)
|
||||
s->vm_start_bh = qemu_bh_new(bg_migration_vm_start_bh, s);
|
||||
qemu_bh_schedule(s->vm_start_bh);
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
while (migration_is_active(s)) {
|
||||
MigIterateState iter_state = bg_migration_iteration_run(s);
|
||||
@ -3512,7 +3512,7 @@ fail:
|
||||
if (early_fail) {
|
||||
migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
|
||||
MIGRATION_STATUS_FAILED);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
bg_migration_iteration_finish(s);
|
||||
|
@ -2395,7 +2395,7 @@ static void ram_save_cleanup(void *opaque)
|
||||
|
||||
/* We don't use dirty log with background snapshots */
|
||||
if (!migrate_background_snapshot()) {
|
||||
/* caller have hold iothread lock or is in a bh, so there is
|
||||
/* caller have hold BQL or is in a bh, so there is
|
||||
* no writing race against the migration bitmap
|
||||
*/
|
||||
if (global_dirty_tracking & GLOBAL_DIRTY_MIGRATION) {
|
||||
@ -2984,9 +2984,9 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
|
||||
migration_ops = g_malloc0(sizeof(MigrationOps));
|
||||
migration_ops->ram_save_target_page = ram_save_target_page_legacy;
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
ret = multifd_send_sync_main(f);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@ -3131,7 +3131,7 @@ out:
|
||||
*
|
||||
* Returns zero to indicate success or negative on error
|
||||
*
|
||||
* Called with iothread lock
|
||||
* Called with the BQL
|
||||
*
|
||||
* @f: QEMUFile where to send the data
|
||||
* @opaque: RAMState pointer
|
||||
@ -3221,11 +3221,11 @@ static void ram_state_pending_exact(void *opaque, uint64_t *must_precopy,
|
||||
uint64_t remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
|
||||
|
||||
if (!migration_in_postcopy() && remaining_size < s->threshold_size) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
WITH_RCU_READ_LOCK_GUARD() {
|
||||
migration_bitmap_sync_precopy(rs, false);
|
||||
}
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
|
||||
}
|
||||
|
||||
@ -3453,7 +3453,7 @@ void colo_incoming_start_dirty_log(void)
|
||||
{
|
||||
RAMBlock *block = NULL;
|
||||
/* For memory_global_dirty_log_start below. */
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_mutex_lock_ramlist();
|
||||
|
||||
memory_global_dirty_log_sync(false);
|
||||
@ -3467,7 +3467,7 @@ void colo_incoming_start_dirty_log(void)
|
||||
}
|
||||
ram_state->migration_dirty_pages = 0;
|
||||
qemu_mutex_unlock_ramlist();
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
/* It is need to hold the global lock to call this helper */
|
||||
|
@ -219,7 +219,7 @@ static void tap_send(void *opaque)
|
||||
|
||||
/*
|
||||
* When the host keeps receiving more packets while tap_send() is
|
||||
* running we can hog the QEMU global mutex. Limit the number of
|
||||
* running we can hog the BQL. Limit the number of
|
||||
* packets that are processed per tap_send() callback to prevent
|
||||
* stalling the guest.
|
||||
*/
|
||||
|
@ -217,7 +217,7 @@ void replay_mutex_lock(void)
|
||||
{
|
||||
if (replay_mode != REPLAY_MODE_NONE) {
|
||||
unsigned long id;
|
||||
g_assert(!qemu_mutex_iothread_locked());
|
||||
g_assert(!bql_locked());
|
||||
g_assert(!replay_mutex_locked());
|
||||
qemu_mutex_lock(&lock);
|
||||
id = mutex_tail++;
|
||||
|
@ -43,7 +43,7 @@ static SemihostingConsole console;
|
||||
static int console_can_read(void *opaque)
|
||||
{
|
||||
SemihostingConsole *c = opaque;
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
return (int)fifo8_num_free(&c->fifo);
|
||||
}
|
||||
|
||||
@ -58,7 +58,7 @@ static void console_wake_up(gpointer data, gpointer user_data)
|
||||
static void console_read(void *opaque, const uint8_t *buf, int size)
|
||||
{
|
||||
SemihostingConsole *c = opaque;
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
while (size-- && !fifo8_is_full(&c->fifo)) {
|
||||
fifo8_push(&c->fifo, *buf++);
|
||||
}
|
||||
@ -70,7 +70,7 @@ bool qemu_semihosting_console_ready(void)
|
||||
{
|
||||
SemihostingConsole *c = &console;
|
||||
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
return !fifo8_is_empty(&c->fifo);
|
||||
}
|
||||
|
||||
@ -78,7 +78,7 @@ void qemu_semihosting_console_block_until_ready(CPUState *cs)
|
||||
{
|
||||
SemihostingConsole *c = &console;
|
||||
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
|
||||
/* Block if the fifo is completely empty. */
|
||||
if (fifo8_is_empty(&c->fifo)) {
|
||||
|
@ -1,15 +1,15 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/main-loop.h"
|
||||
|
||||
bool qemu_mutex_iothread_locked(void)
|
||||
bool bql_locked(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
void qemu_mutex_lock_iothread_impl(const char *file, int line)
|
||||
void bql_lock_impl(const char *file, int line)
|
||||
{
|
||||
}
|
||||
|
||||
void qemu_mutex_unlock_iothread(void)
|
||||
void bql_unlock(void)
|
||||
{
|
||||
}
|
||||
|
@ -54,12 +54,12 @@ static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
|
||||
endtime_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + sleeptime_ns;
|
||||
while (sleeptime_ns > 0 && !cpu->stop) {
|
||||
if (sleeptime_ns > SCALE_MS) {
|
||||
qemu_cond_timedwait_iothread(cpu->halt_cond,
|
||||
qemu_cond_timedwait_bql(cpu->halt_cond,
|
||||
sleeptime_ns / SCALE_MS);
|
||||
} else {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
g_usleep(sleeptime_ns / SCALE_US);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
}
|
||||
sleeptime_ns = endtime_ns - qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||
}
|
||||
|
@ -65,7 +65,8 @@
|
||||
|
||||
#endif /* CONFIG_LINUX */
|
||||
|
||||
static QemuMutex qemu_global_mutex;
|
||||
/* The Big QEMU Lock (BQL) */
|
||||
static QemuMutex bql;
|
||||
|
||||
/*
|
||||
* The chosen accelerator is supposed to register this.
|
||||
@ -408,14 +409,14 @@ void qemu_init_cpu_loop(void)
|
||||
qemu_init_sigbus();
|
||||
qemu_cond_init(&qemu_cpu_cond);
|
||||
qemu_cond_init(&qemu_pause_cond);
|
||||
qemu_mutex_init(&qemu_global_mutex);
|
||||
qemu_mutex_init(&bql);
|
||||
|
||||
qemu_thread_get_self(&io_thread);
|
||||
}
|
||||
|
||||
void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
|
||||
{
|
||||
do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
|
||||
do_run_on_cpu(cpu, func, data, &bql);
|
||||
}
|
||||
|
||||
static void qemu_cpu_stop(CPUState *cpu, bool exit)
|
||||
@ -447,7 +448,7 @@ void qemu_wait_io_event(CPUState *cpu)
|
||||
slept = true;
|
||||
qemu_plugin_vcpu_idle_cb(cpu);
|
||||
}
|
||||
qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
|
||||
qemu_cond_wait(cpu->halt_cond, &bql);
|
||||
}
|
||||
if (slept) {
|
||||
qemu_plugin_vcpu_resume_cb(cpu);
|
||||
@ -500,46 +501,46 @@ bool qemu_in_vcpu_thread(void)
|
||||
return current_cpu && qemu_cpu_is_self(current_cpu);
|
||||
}
|
||||
|
||||
QEMU_DEFINE_STATIC_CO_TLS(bool, iothread_locked)
|
||||
QEMU_DEFINE_STATIC_CO_TLS(bool, bql_locked)
|
||||
|
||||
bool qemu_mutex_iothread_locked(void)
|
||||
bool bql_locked(void)
|
||||
{
|
||||
return get_iothread_locked();
|
||||
return get_bql_locked();
|
||||
}
|
||||
|
||||
bool qemu_in_main_thread(void)
|
||||
{
|
||||
return qemu_mutex_iothread_locked();
|
||||
return bql_locked();
|
||||
}
|
||||
|
||||
/*
|
||||
* The BQL is taken from so many places that it is worth profiling the
|
||||
* callers directly, instead of funneling them all through a single function.
|
||||
*/
|
||||
void qemu_mutex_lock_iothread_impl(const char *file, int line)
|
||||
void bql_lock_impl(const char *file, int line)
|
||||
{
|
||||
QemuMutexLockFunc bql_lock = qatomic_read(&qemu_bql_mutex_lock_func);
|
||||
QemuMutexLockFunc bql_lock_fn = qatomic_read(&bql_mutex_lock_func);
|
||||
|
||||
g_assert(!qemu_mutex_iothread_locked());
|
||||
bql_lock(&qemu_global_mutex, file, line);
|
||||
set_iothread_locked(true);
|
||||
g_assert(!bql_locked());
|
||||
bql_lock_fn(&bql, file, line);
|
||||
set_bql_locked(true);
|
||||
}
|
||||
|
||||
void qemu_mutex_unlock_iothread(void)
|
||||
void bql_unlock(void)
|
||||
{
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
set_iothread_locked(false);
|
||||
qemu_mutex_unlock(&qemu_global_mutex);
|
||||
g_assert(bql_locked());
|
||||
set_bql_locked(false);
|
||||
qemu_mutex_unlock(&bql);
|
||||
}
|
||||
|
||||
void qemu_cond_wait_iothread(QemuCond *cond)
|
||||
void qemu_cond_wait_bql(QemuCond *cond)
|
||||
{
|
||||
qemu_cond_wait(cond, &qemu_global_mutex);
|
||||
qemu_cond_wait(cond, &bql);
|
||||
}
|
||||
|
||||
void qemu_cond_timedwait_iothread(QemuCond *cond, int ms)
|
||||
void qemu_cond_timedwait_bql(QemuCond *cond, int ms)
|
||||
{
|
||||
qemu_cond_timedwait(cond, &qemu_global_mutex, ms);
|
||||
qemu_cond_timedwait(cond, &bql, ms);
|
||||
}
|
||||
|
||||
/* signal CPU creation */
|
||||
@ -590,15 +591,15 @@ void pause_all_vcpus(void)
|
||||
replay_mutex_unlock();
|
||||
|
||||
while (!all_vcpus_paused()) {
|
||||
qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
|
||||
qemu_cond_wait(&qemu_pause_cond, &bql);
|
||||
CPU_FOREACH(cpu) {
|
||||
qemu_cpu_kick(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
replay_mutex_lock();
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
}
|
||||
|
||||
void cpu_resume(CPUState *cpu)
|
||||
@ -627,9 +628,9 @@ void cpu_remove_sync(CPUState *cpu)
|
||||
cpu->stop = true;
|
||||
cpu->unplug = true;
|
||||
qemu_cpu_kick(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
qemu_thread_join(cpu->thread);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
}
|
||||
|
||||
void cpus_register_accel(const AccelOpsClass *ops)
|
||||
@ -668,7 +669,7 @@ void qemu_init_vcpu(CPUState *cpu)
|
||||
cpus_accel->create_vcpu_thread(cpu);
|
||||
|
||||
while (!cpu->created) {
|
||||
qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
|
||||
qemu_cond_wait(&qemu_cpu_cond, &bql);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -148,9 +148,9 @@ void vcpu_dirty_rate_stat_stop(void)
|
||||
{
|
||||
qatomic_set(&vcpu_dirty_rate_stat->running, 0);
|
||||
dirtylimit_state_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
qemu_thread_join(&vcpu_dirty_rate_stat->thread);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
dirtylimit_state_lock();
|
||||
}
|
||||
|
||||
|
@ -1119,7 +1119,7 @@ void memory_region_transaction_commit(void)
|
||||
AddressSpace *as;
|
||||
|
||||
assert(memory_region_transaction_depth);
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
--memory_region_transaction_depth;
|
||||
if (!memory_region_transaction_depth) {
|
||||
|
@ -799,7 +799,7 @@ static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
|
||||
abort();
|
||||
|
||||
found:
|
||||
/* It is safe to write mru_block outside the iothread lock. This
|
||||
/* It is safe to write mru_block outside the BQL. This
|
||||
* is what happens:
|
||||
*
|
||||
* mru_block = xxx
|
||||
@ -1597,7 +1597,7 @@ int qemu_ram_get_fd(RAMBlock *rb)
|
||||
return rb->fd;
|
||||
}
|
||||
|
||||
/* Called with iothread lock held. */
|
||||
/* Called with the BQL held. */
|
||||
void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
|
||||
{
|
||||
RAMBlock *block;
|
||||
@ -1625,7 +1625,7 @@ void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
|
||||
}
|
||||
}
|
||||
|
||||
/* Called with iothread lock held. */
|
||||
/* Called with the BQL held. */
|
||||
void qemu_ram_unset_idstr(RAMBlock *block)
|
||||
{
|
||||
/* FIXME: arch_init.c assumes that this is not called throughout
|
||||
@ -2639,8 +2639,8 @@ bool prepare_mmio_access(MemoryRegion *mr)
|
||||
{
|
||||
bool release_lock = false;
|
||||
|
||||
if (!qemu_mutex_iothread_locked()) {
|
||||
qemu_mutex_lock_iothread();
|
||||
if (!bql_locked()) {
|
||||
bql_lock();
|
||||
release_lock = true;
|
||||
}
|
||||
if (mr->flush_coalesced_mmio) {
|
||||
@ -2721,7 +2721,7 @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
|
||||
}
|
||||
|
||||
if (release_lock) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
release_lock = false;
|
||||
}
|
||||
|
||||
@ -2799,7 +2799,7 @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
|
||||
}
|
||||
|
||||
if (release_lock) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
release_lock = false;
|
||||
}
|
||||
|
||||
|
@ -819,7 +819,7 @@ void qemu_init_subsystems(void)
|
||||
|
||||
qemu_init_cpu_list();
|
||||
qemu_init_cpu_loop();
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
|
||||
atexit(qemu_run_exit_notifiers);
|
||||
|
||||
|
@ -155,9 +155,9 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
|
||||
* Now raise the debug interrupt so that it will
|
||||
* trigger after the current instruction.
|
||||
*/
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -88,7 +88,7 @@ static void arm_set_cpu_on_async_work(CPUState *target_cpu_state,
|
||||
g_free(info);
|
||||
|
||||
/* Finally set the power status */
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
target_cpu->power_state = PSCI_ON;
|
||||
}
|
||||
|
||||
@ -99,7 +99,7 @@ int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id,
|
||||
ARMCPU *target_cpu;
|
||||
struct CpuOnInfo *info;
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
DPRINTF("cpu %" PRId64 " (EL %d, %s) @ 0x%" PRIx64 " with R0 = 0x%" PRIx64
|
||||
"\n", cpuid, target_el, target_aa64 ? "aarch64" : "aarch32", entry,
|
||||
@ -196,7 +196,7 @@ static void arm_set_cpu_on_and_reset_async_work(CPUState *target_cpu_state,
|
||||
target_cpu_state->halted = 0;
|
||||
|
||||
/* Finally set the power status */
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
target_cpu->power_state = PSCI_ON;
|
||||
}
|
||||
|
||||
@ -205,7 +205,7 @@ int arm_set_cpu_on_and_reset(uint64_t cpuid)
|
||||
CPUState *target_cpu_state;
|
||||
ARMCPU *target_cpu;
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
/* Retrieve the cpu we are powering up */
|
||||
target_cpu_state = arm_get_cpu_by_id(cpuid);
|
||||
@ -247,7 +247,7 @@ static void arm_set_cpu_off_async_work(CPUState *target_cpu_state,
|
||||
{
|
||||
ARMCPU *target_cpu = ARM_CPU(target_cpu_state);
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
target_cpu->power_state = PSCI_OFF;
|
||||
target_cpu_state->halted = 1;
|
||||
target_cpu_state->exception_index = EXCP_HLT;
|
||||
@ -258,7 +258,7 @@ int arm_set_cpu_off(uint64_t cpuid)
|
||||
CPUState *target_cpu_state;
|
||||
ARMCPU *target_cpu;
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
DPRINTF("cpu %" PRId64 "\n", cpuid);
|
||||
|
||||
@ -294,7 +294,7 @@ int arm_reset_cpu(uint64_t cpuid)
|
||||
CPUState *target_cpu_state;
|
||||
ARMCPU *target_cpu;
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
DPRINTF("cpu %" PRId64 "\n", cpuid);
|
||||
|
||||
|
@ -5844,14 +5844,14 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
|
||||
* Updates to VI and VF require us to update the status of
|
||||
* virtual interrupts, which are the logical OR of these bits
|
||||
* and the state of the input lines from the GIC. (This requires
|
||||
* that we have the iothread lock, which is done by marking the
|
||||
* that we have the BQL, which is done by marking the
|
||||
* reginfo structs as ARM_CP_IO.)
|
||||
* Note that if a write to HCR pends a VIRQ or VFIQ it is never
|
||||
* possible for it to be taken immediately, because VIRQ and
|
||||
* VFIQ are masked unless running at EL0 or EL1, and HCR
|
||||
* can only be written at EL2.
|
||||
*/
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
arm_cpu_update_virq(cpu);
|
||||
arm_cpu_update_vfiq(cpu);
|
||||
arm_cpu_update_vserr(cpu);
|
||||
@ -11273,7 +11273,7 @@ void arm_cpu_do_interrupt(CPUState *cs)
|
||||
* BQL needs to be held for any modification of
|
||||
* cs->interrupt_request.
|
||||
*/
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
|
||||
arm_call_pre_el_change_hook(cpu);
|
||||
|
||||
|
@ -1721,9 +1721,9 @@ static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts)
|
||||
* sleeping.
|
||||
*/
|
||||
qatomic_set_mb(&cpu->thread_kicked, false);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
pselect(0, 0, 0, 0, ts, &cpu->accel->unblock_ipi_mask);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
}
|
||||
|
||||
static void hvf_wfi(CPUState *cpu)
|
||||
@ -1824,7 +1824,7 @@ int hvf_vcpu_exec(CPUState *cpu)
|
||||
|
||||
flush_cpu_state(cpu);
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
assert_hvf_ok(hv_vcpu_run(cpu->accel->fd));
|
||||
|
||||
/* handle VMEXIT */
|
||||
@ -1833,7 +1833,7 @@ int hvf_vcpu_exec(CPUState *cpu)
|
||||
uint32_t ec = syn_get_ec(syndrome);
|
||||
|
||||
ret = 0;
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
switch (exit_reason) {
|
||||
case HV_EXIT_REASON_EXCEPTION:
|
||||
/* This is the main one, handle below. */
|
||||
|
@ -940,7 +940,7 @@ static inline const char *aarch32_mode_name(uint32_t psr)
|
||||
*
|
||||
* Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
|
||||
* a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
|
||||
* Must be called with the iothread lock held.
|
||||
* Must be called with the BQL held.
|
||||
*/
|
||||
void arm_cpu_update_virq(ARMCPU *cpu);
|
||||
|
||||
@ -949,7 +949,7 @@ void arm_cpu_update_virq(ARMCPU *cpu);
|
||||
*
|
||||
* Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
|
||||
* a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
|
||||
* Must be called with the iothread lock held.
|
||||
* Must be called with the BQL held.
|
||||
*/
|
||||
void arm_cpu_update_vfiq(ARMCPU *cpu);
|
||||
|
||||
|
@ -1250,7 +1250,7 @@ MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
|
||||
if (run->s.regs.device_irq_level != cpu->device_irq_level) {
|
||||
switched_level = cpu->device_irq_level ^ run->s.regs.device_irq_level;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
|
||||
if (switched_level & KVM_ARM_DEV_EL1_VTIMER) {
|
||||
qemu_set_irq(cpu->gt_timer_outputs[GTIMER_VIRT],
|
||||
@ -1279,7 +1279,7 @@ MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
|
||||
|
||||
/* We also mark unknown levels as processed to not waste cycles */
|
||||
cpu->device_irq_level = run->s.regs.device_irq_level;
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
return MEMTXATTRS_UNSPECIFIED;
|
||||
@ -1410,9 +1410,9 @@ static bool kvm_arm_handle_debug(ARMCPU *cpu,
|
||||
env->exception.syndrome = debug_exit->hsr;
|
||||
env->exception.vaddress = debug_exit->far;
|
||||
env->exception.target_el = 1;
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
arm_cpu_do_interrupt(cs);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -772,9 +772,9 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
|
||||
#if !TCG_OVERSIZED_GUEST
|
||||
# error "Unexpected configuration"
|
||||
#endif
|
||||
bool locked = qemu_mutex_iothread_locked();
|
||||
bool locked = bql_locked();
|
||||
if (!locked) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
}
|
||||
if (ptw->out_be) {
|
||||
cur_val = ldq_be_p(host);
|
||||
@ -788,7 +788,7 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
|
||||
}
|
||||
}
|
||||
if (!locked) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -809,9 +809,9 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
|
||||
goto illegal_return;
|
||||
}
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
arm_call_pre_el_change_hook(env_archcpu(env));
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
if (!return_to_aa64) {
|
||||
env->aarch64 = false;
|
||||
@ -876,9 +876,9 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
|
||||
*/
|
||||
aarch64_sve_change_el(env, cur_el, new_el, return_to_aa64);
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
arm_call_el_change_hook(env_archcpu(env));
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
return;
|
||||
|
||||
|
@ -373,8 +373,8 @@ void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
|
||||
bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
|
||||
bool take_exception;
|
||||
|
||||
/* Take the iothread lock as we are going to touch the NVIC */
|
||||
qemu_mutex_lock_iothread();
|
||||
/* Take the BQL as we are going to touch the NVIC */
|
||||
bql_lock();
|
||||
|
||||
/* Check the background context had access to the FPU */
|
||||
if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
|
||||
@ -428,7 +428,7 @@ void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
|
||||
take_exception = !stacked_ok &&
|
||||
armv7m_nvic_can_take_pending_exception(env->nvic);
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
if (take_exception) {
|
||||
raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
|
||||
|
@ -482,9 +482,9 @@ void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
|
||||
{
|
||||
uint32_t mask;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
arm_call_pre_el_change_hook(env_archcpu(env));
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar);
|
||||
cpsr_write(env, val, mask, CPSRWriteExceptionReturn);
|
||||
@ -497,9 +497,9 @@ void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
|
||||
env->regs[15] &= (env->thumb ? ~1 : ~3);
|
||||
arm_rebuild_hflags(env);
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
arm_call_el_change_hook(env_archcpu(env));
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
/* Access to user mode registers from privileged modes. */
|
||||
@ -858,9 +858,9 @@ void HELPER(set_cp_reg)(CPUARMState *env, const void *rip, uint32_t value)
|
||||
const ARMCPRegInfo *ri = rip;
|
||||
|
||||
if (ri->type & ARM_CP_IO) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
ri->writefn(env, ri, value);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
} else {
|
||||
ri->writefn(env, ri, value);
|
||||
}
|
||||
@ -872,9 +872,9 @@ uint32_t HELPER(get_cp_reg)(CPUARMState *env, const void *rip)
|
||||
uint32_t res;
|
||||
|
||||
if (ri->type & ARM_CP_IO) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
res = ri->readfn(env, ri);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
} else {
|
||||
res = ri->readfn(env, ri);
|
||||
}
|
||||
@ -887,9 +887,9 @@ void HELPER(set_cp_reg64)(CPUARMState *env, const void *rip, uint64_t value)
|
||||
const ARMCPRegInfo *ri = rip;
|
||||
|
||||
if (ri->type & ARM_CP_IO) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
ri->writefn(env, ri, value);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
} else {
|
||||
ri->writefn(env, ri, value);
|
||||
}
|
||||
@ -901,9 +901,9 @@ uint64_t HELPER(get_cp_reg64)(CPUARMState *env, const void *rip)
|
||||
uint64_t res;
|
||||
|
||||
if (ri->type & ARM_CP_IO) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
res = ri->readfn(env, ri);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
} else {
|
||||
res = ri->readfn(env, ri);
|
||||
}
|
||||
|
@ -107,7 +107,7 @@ void arm_handle_psci_call(ARMCPU *cpu)
|
||||
}
|
||||
target_cpu = ARM_CPU(target_cpu_state);
|
||||
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
ret = target_cpu->power_state;
|
||||
break;
|
||||
default:
|
||||
|
@ -84,17 +84,17 @@ void hppa_cpu_alarm_timer(void *opaque)
|
||||
void HELPER(write_eirr)(CPUHPPAState *env, target_ulong val)
|
||||
{
|
||||
env->cr[CR_EIRR] &= ~val;
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
eval_interrupt(env_archcpu(env));
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
void HELPER(write_eiem)(CPUHPPAState *env, target_ulong val)
|
||||
{
|
||||
env->cr[CR_EIEM] = val;
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
eval_interrupt(env_archcpu(env));
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
void hppa_cpu_do_interrupt(CPUState *cs)
|
||||
|
@ -4,4 +4,4 @@ These sources (and ../hvf-all.c) are adapted from Veertu Inc's vdhh (Veertu Desk
|
||||
|
||||
1. Adapt to our current QEMU's `CPUState` structure and `address_space_rw` API; many struct members have been moved around (emulated x86 state, xsave_buf) due to historical differences + QEMU needing to handle more emulation targets.
|
||||
2. Removal of `apic_page` and hyperv-related functionality.
|
||||
3. More relaxed use of `qemu_mutex_lock_iothread`.
|
||||
3. More relaxed use of `bql_lock`.
|
||||
|
@ -429,9 +429,9 @@ int hvf_vcpu_exec(CPUState *cpu)
|
||||
}
|
||||
vmx_update_tpr(cpu);
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
if (!cpu_is_bsp(X86_CPU(cpu)) && cpu->halted) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
return EXCP_HLT;
|
||||
}
|
||||
|
||||
@ -450,7 +450,7 @@ int hvf_vcpu_exec(CPUState *cpu)
|
||||
rip = rreg(cpu->accel->fd, HV_X86_RIP);
|
||||
env->eflags = rreg(cpu->accel->fd, HV_X86_RFLAGS);
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
|
||||
update_apic_tpr(cpu);
|
||||
current_cpu = cpu;
|
||||
|
@ -45,9 +45,9 @@ void hyperv_x86_synic_update(X86CPU *cpu)
|
||||
|
||||
static void async_synic_update(CPUState *cs, run_on_cpu_data data)
|
||||
{
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
hyperv_x86_synic_update(X86_CPU(cs));
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit)
|
||||
|
@ -4713,9 +4713,9 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
|
||||
/* Inject NMI */
|
||||
if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
|
||||
if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
DPRINTF("injected NMI\n");
|
||||
ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
|
||||
if (ret < 0) {
|
||||
@ -4724,9 +4724,9 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
|
||||
}
|
||||
}
|
||||
if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
DPRINTF("injected SMI\n");
|
||||
ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
|
||||
if (ret < 0) {
|
||||
@ -4737,7 +4737,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
|
||||
}
|
||||
|
||||
if (!kvm_pic_in_kernel()) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
}
|
||||
|
||||
/* Force the VCPU out of its inner loop to process any INIT requests
|
||||
@ -4790,7 +4790,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
|
||||
DPRINTF("setting tpr\n");
|
||||
run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@ -4838,12 +4838,12 @@ MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
|
||||
/* We need to protect the apic state against concurrent accesses from
|
||||
* different threads in case the userspace irqchip is used. */
|
||||
if (!kvm_irqchip_in_kernel()) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
}
|
||||
cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
|
||||
cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
|
||||
if (!kvm_irqchip_in_kernel()) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
return cpu_get_mem_attrs(env);
|
||||
}
|
||||
@ -5277,17 +5277,17 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
|
||||
switch (run->exit_reason) {
|
||||
case KVM_EXIT_HLT:
|
||||
DPRINTF("handle_hlt\n");
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
ret = kvm_handle_halt(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
break;
|
||||
case KVM_EXIT_SET_TPR:
|
||||
ret = 0;
|
||||
break;
|
||||
case KVM_EXIT_TPR_ACCESS:
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
ret = kvm_handle_tpr_access(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
break;
|
||||
case KVM_EXIT_FAIL_ENTRY:
|
||||
code = run->fail_entry.hardware_entry_failure_reason;
|
||||
@ -5313,9 +5313,9 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
|
||||
break;
|
||||
case KVM_EXIT_DEBUG:
|
||||
DPRINTF("kvm_exit_debug\n");
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
ret = kvm_handle_debug(cpu, &run->debug.arch);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
break;
|
||||
case KVM_EXIT_HYPERV:
|
||||
ret = kvm_hv_handle_exit(cpu, &run->hyperv);
|
||||
|
@ -403,7 +403,7 @@ void kvm_xen_maybe_deassert_callback(CPUState *cs)
|
||||
|
||||
/* If the evtchn_upcall_pending flag is cleared, turn the GSI off. */
|
||||
if (!vi->evtchn_upcall_pending) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
/*
|
||||
* Check again now we have the lock, because it may have been
|
||||
* asserted in the interim. And we don't want to take the lock
|
||||
@ -413,7 +413,7 @@ void kvm_xen_maybe_deassert_callback(CPUState *cs)
|
||||
X86_CPU(cs)->env.xen_callback_asserted = false;
|
||||
xen_evtchn_set_callback_level(0);
|
||||
}
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@ -581,7 +581,7 @@ static int xen_set_shared_info(uint64_t gfn)
|
||||
uint64_t gpa = gfn << TARGET_PAGE_BITS;
|
||||
int i, err;
|
||||
|
||||
QEMU_IOTHREAD_LOCK_GUARD();
|
||||
BQL_LOCK_GUARD();
|
||||
|
||||
/*
|
||||
* The xen_overlay device tells KVM about it too, since it had to
|
||||
@ -773,9 +773,9 @@ static bool handle_set_param(struct kvm_xen_exit *exit, X86CPU *cpu,
|
||||
|
||||
switch (hp.index) {
|
||||
case HVM_PARAM_CALLBACK_IRQ:
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
err = xen_evtchn_set_callback_param(hp.value);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
xen_set_long_mode(exit->u.hcall.longmode);
|
||||
break;
|
||||
default:
|
||||
@ -1408,7 +1408,7 @@ int kvm_xen_soft_reset(void)
|
||||
CPUState *cpu;
|
||||
int err;
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
trace_kvm_xen_soft_reset();
|
||||
|
||||
@ -1481,9 +1481,9 @@ static int schedop_shutdown(CPUState *cs, uint64_t arg)
|
||||
break;
|
||||
|
||||
case SHUTDOWN_soft_reset:
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
ret = kvm_xen_soft_reset();
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -25,7 +25,7 @@ static void *qemu_nvmm_cpu_thread_fn(void *arg)
|
||||
|
||||
rcu_register_thread();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
current_cpu = cpu;
|
||||
@ -48,14 +48,14 @@ static void *qemu_nvmm_cpu_thread_fn(void *arg)
|
||||
}
|
||||
}
|
||||
while (cpu_thread_is_idle(cpu)) {
|
||||
qemu_cond_wait_iothread(cpu->halt_cond);
|
||||
qemu_cond_wait_bql(cpu->halt_cond);
|
||||
}
|
||||
qemu_wait_io_event_common(cpu);
|
||||
} while (!cpu->unplug || cpu_can_run(cpu));
|
||||
|
||||
nvmm_destroy_vcpu(cpu);
|
||||
cpu_thread_signal_destroyed(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
@ -399,7 +399,7 @@ nvmm_vcpu_pre_run(CPUState *cpu)
|
||||
uint8_t tpr;
|
||||
int ret;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
|
||||
tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
|
||||
if (tpr != qcpu->tpr) {
|
||||
@ -462,7 +462,7 @@ nvmm_vcpu_pre_run(CPUState *cpu)
|
||||
}
|
||||
}
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -485,9 +485,9 @@ nvmm_vcpu_post_run(CPUState *cpu, struct nvmm_vcpu_exit *exit)
|
||||
tpr = exit->exitstate.cr8;
|
||||
if (qcpu->tpr != tpr) {
|
||||
qcpu->tpr = tpr;
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
cpu_set_apic_tpr(x86_cpu->apic_state, qcpu->tpr);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@ -648,7 +648,7 @@ nvmm_handle_halted(struct nvmm_machine *mach, CPUState *cpu,
|
||||
CPUX86State *env = cpu_env(cpu);
|
||||
int ret = 0;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
|
||||
if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
(env->eflags & IF_MASK)) &&
|
||||
@ -658,7 +658,7 @@ nvmm_handle_halted(struct nvmm_machine *mach, CPUState *cpu,
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -721,7 +721,7 @@ nvmm_vcpu_loop(CPUState *cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
cpu_exec_start(cpu);
|
||||
|
||||
/*
|
||||
@ -806,16 +806,16 @@ nvmm_vcpu_loop(CPUState *cpu)
|
||||
error_report("NVMM: Unexpected VM exit code 0x%lx [hw=0x%lx]",
|
||||
exit->reason, exit->u.inv.hwcode);
|
||||
nvmm_get_registers(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_system_guest_panicked(cpu_get_crash_info(cpu));
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
} while (ret == 0);
|
||||
|
||||
cpu_exec_end(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
|
||||
qatomic_set(&cpu->exit_request, false);
|
||||
|
||||
|
@ -32,9 +32,9 @@ void x86_register_ferr_irq(qemu_irq irq)
|
||||
void fpu_check_raise_ferr_irq(CPUX86State *env)
|
||||
{
|
||||
if (ferr_irq && !(env->hflags2 & HF2_IGNNE_MASK)) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_irq_raise(ferr_irq);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -49,7 +49,7 @@ void cpu_set_ignne(void)
|
||||
{
|
||||
CPUX86State *env = &X86_CPU(first_cpu)->env;
|
||||
|
||||
assert(qemu_mutex_iothread_locked());
|
||||
assert(bql_locked());
|
||||
|
||||
env->hflags2 |= HF2_IGNNE_MASK;
|
||||
/*
|
||||
|
@ -118,9 +118,9 @@ void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
|
||||
break;
|
||||
case 8:
|
||||
if (!(env->hflags2 & HF2_VINTR_MASK)) {
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
env->int_ctl = (env->int_ctl & ~V_TPR_MASK) | (t0 & V_TPR_MASK);
|
||||
|
||||
|
@ -25,7 +25,7 @@ static void *whpx_cpu_thread_fn(void *arg)
|
||||
|
||||
rcu_register_thread();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
current_cpu = cpu;
|
||||
@ -48,14 +48,14 @@ static void *whpx_cpu_thread_fn(void *arg)
|
||||
}
|
||||
}
|
||||
while (cpu_thread_is_idle(cpu)) {
|
||||
qemu_cond_wait_iothread(cpu->halt_cond);
|
||||
qemu_cond_wait_bql(cpu->halt_cond);
|
||||
}
|
||||
qemu_wait_io_event_common(cpu);
|
||||
} while (!cpu->unplug || cpu_can_run(cpu));
|
||||
|
||||
whpx_destroy_vcpu(cpu);
|
||||
cpu_thread_signal_destroyed(cpu);
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1324,7 +1324,7 @@ static int whpx_first_vcpu_starting(CPUState *cpu)
|
||||
struct whpx_state *whpx = &whpx_global;
|
||||
HRESULT hr;
|
||||
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
|
||||
if (!QTAILQ_EMPTY(&cpu->breakpoints) ||
|
||||
(whpx->breakpoints.breakpoints &&
|
||||
@ -1442,7 +1442,7 @@ static int whpx_handle_halt(CPUState *cpu)
|
||||
CPUX86State *env = cpu_env(cpu);
|
||||
int ret = 0;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
(env->eflags & IF_MASK)) &&
|
||||
!(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
|
||||
@ -1450,7 +1450,7 @@ static int whpx_handle_halt(CPUState *cpu)
|
||||
cpu->halted = true;
|
||||
ret = 1;
|
||||
}
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1472,7 +1472,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
|
||||
memset(&new_int, 0, sizeof(new_int));
|
||||
memset(reg_values, 0, sizeof(reg_values));
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
|
||||
/* Inject NMI */
|
||||
if (!vcpu->interruption_pending &&
|
||||
@ -1563,7 +1563,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
|
||||
reg_count += 1;
|
||||
}
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
vcpu->ready_for_pic_interrupt = false;
|
||||
|
||||
if (reg_count) {
|
||||
@ -1590,9 +1590,9 @@ static void whpx_vcpu_post_run(CPUState *cpu)
|
||||
uint64_t tpr = vcpu->exit_ctx.VpContext.Cr8;
|
||||
if (vcpu->tpr != tpr) {
|
||||
vcpu->tpr = tpr;
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
cpu_set_apic_tpr(x86_cpu->apic_state, whpx_cr8_to_apic_tpr(vcpu->tpr));
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
}
|
||||
|
||||
vcpu->interruption_pending =
|
||||
@ -1652,7 +1652,7 @@ static int whpx_vcpu_run(CPUState *cpu)
|
||||
WhpxStepMode exclusive_step_mode = WHPX_STEP_NONE;
|
||||
int ret;
|
||||
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
g_assert(bql_locked());
|
||||
|
||||
if (whpx->running_cpus++ == 0) {
|
||||
/* Insert breakpoints into memory, update exception exit bitmap. */
|
||||
@ -1690,7 +1690,7 @@ static int whpx_vcpu_run(CPUState *cpu)
|
||||
}
|
||||
}
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
|
||||
if (exclusive_step_mode != WHPX_STEP_NONE) {
|
||||
start_exclusive();
|
||||
@ -2028,9 +2028,9 @@ static int whpx_vcpu_run(CPUState *cpu)
|
||||
error_report("WHPX: Unexpected VP exit code %d",
|
||||
vcpu->exit_ctx.ExitReason);
|
||||
whpx_get_registers(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
qemu_system_guest_panicked(cpu_get_crash_info(cpu));
|
||||
qemu_mutex_unlock_iothread();
|
||||
bql_unlock();
|
||||
break;
|
||||
}
|
||||
|
||||
@ -2055,7 +2055,7 @@ static int whpx_vcpu_run(CPUState *cpu)
|
||||
cpu_exec_end(cpu);
|
||||
}
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
bql_lock();
|
||||
current_cpu = cpu;
|
||||
|
||||
if (--whpx->running_cpus == 0) {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user