lockable: replaced locks with lock guard macros where appropriate

- ran regexp "qemu_mutex_lock\(.*\).*\n.*if" to find targets
- replaced result with QEMU_LOCK_GUARD if all unlocks at function end
- replaced result with WITH_QEMU_LOCK_GUARD if unlock not at end

Signed-off-by: Daniel Brodsky <dnbrdsky@gmail.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Message-id: 20200404042108.389635-3-dnbrdsky@gmail.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Daniel Brodsky 2020-04-03 21:21:08 -07:00 committed by Stefan Hajnoczi
parent 56f21718b8
commit 6e8a355de6
15 changed files with 83 additions and 106 deletions

View File

@ -1394,20 +1394,17 @@ static void iscsi_nop_timed_event(void *opaque)
{ {
IscsiLun *iscsilun = opaque; IscsiLun *iscsilun = opaque;
qemu_mutex_lock(&iscsilun->mutex); QEMU_LOCK_GUARD(&iscsilun->mutex);
if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) { if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) {
error_report("iSCSI: NOP timeout. Reconnecting..."); error_report("iSCSI: NOP timeout. Reconnecting...");
iscsilun->request_timed_out = true; iscsilun->request_timed_out = true;
} else if (iscsi_nop_out_async(iscsilun->iscsi, NULL, NULL, 0, NULL) != 0) { } else if (iscsi_nop_out_async(iscsilun->iscsi, NULL, NULL, 0, NULL) != 0) {
error_report("iSCSI: failed to sent NOP-Out. Disabling NOP messages."); error_report("iSCSI: failed to sent NOP-Out. Disabling NOP messages.");
goto out; return;
} }
timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL); timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL);
iscsi_set_events(iscsilun); iscsi_set_events(iscsilun);
out:
qemu_mutex_unlock(&iscsilun->mutex);
} }
static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp) static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp)

View File

@ -273,15 +273,14 @@ static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, uint64_t offset,
nfs_co_init_task(bs, &task); nfs_co_init_task(bs, &task);
task.iov = iov; task.iov = iov;
qemu_mutex_lock(&client->mutex); WITH_QEMU_LOCK_GUARD(&client->mutex) {
if (nfs_pread_async(client->context, client->fh, if (nfs_pread_async(client->context, client->fh,
offset, bytes, nfs_co_generic_cb, &task) != 0) { offset, bytes, nfs_co_generic_cb, &task) != 0) {
qemu_mutex_unlock(&client->mutex); return -ENOMEM;
return -ENOMEM; }
}
nfs_set_events(client); nfs_set_events(client);
qemu_mutex_unlock(&client->mutex); }
while (!task.complete) { while (!task.complete) {
qemu_coroutine_yield(); qemu_coroutine_yield();
} }
@ -320,19 +319,18 @@ static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset,
buf = iov->iov[0].iov_base; buf = iov->iov[0].iov_base;
} }
qemu_mutex_lock(&client->mutex); WITH_QEMU_LOCK_GUARD(&client->mutex) {
if (nfs_pwrite_async(client->context, client->fh, if (nfs_pwrite_async(client->context, client->fh,
offset, bytes, buf, offset, bytes, buf,
nfs_co_generic_cb, &task) != 0) { nfs_co_generic_cb, &task) != 0) {
qemu_mutex_unlock(&client->mutex); if (my_buffer) {
if (my_buffer) { g_free(buf);
g_free(buf); }
return -ENOMEM;
} }
return -ENOMEM;
}
nfs_set_events(client); nfs_set_events(client);
qemu_mutex_unlock(&client->mutex); }
while (!task.complete) { while (!task.complete) {
qemu_coroutine_yield(); qemu_coroutine_yield();
} }
@ -355,15 +353,14 @@ static int coroutine_fn nfs_co_flush(BlockDriverState *bs)
nfs_co_init_task(bs, &task); nfs_co_init_task(bs, &task);
qemu_mutex_lock(&client->mutex); WITH_QEMU_LOCK_GUARD(&client->mutex) {
if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb, if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb,
&task) != 0) { &task) != 0) {
qemu_mutex_unlock(&client->mutex); return -ENOMEM;
return -ENOMEM; }
}
nfs_set_events(client); nfs_set_events(client);
qemu_mutex_unlock(&client->mutex); }
while (!task.complete) { while (!task.complete) {
qemu_coroutine_yield(); qemu_coroutine_yield();
} }

View File

@ -22,6 +22,7 @@
#include "exec/cpu-common.h" #include "exec/cpu-common.h"
#include "hw/core/cpu.h" #include "hw/core/cpu.h"
#include "sysemu/cpus.h" #include "sysemu/cpus.h"
#include "qemu/lockable.h"
static QemuMutex qemu_cpu_list_lock; static QemuMutex qemu_cpu_list_lock;
static QemuCond exclusive_cond; static QemuCond exclusive_cond;
@ -71,7 +72,7 @@ static int cpu_get_free_index(void)
void cpu_list_add(CPUState *cpu) void cpu_list_add(CPUState *cpu)
{ {
qemu_mutex_lock(&qemu_cpu_list_lock); QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) { if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
cpu->cpu_index = cpu_get_free_index(); cpu->cpu_index = cpu_get_free_index();
assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX); assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
@ -79,15 +80,13 @@ void cpu_list_add(CPUState *cpu)
assert(!cpu_index_auto_assigned); assert(!cpu_index_auto_assigned);
} }
QTAILQ_INSERT_TAIL_RCU(&cpus, cpu, node); QTAILQ_INSERT_TAIL_RCU(&cpus, cpu, node);
qemu_mutex_unlock(&qemu_cpu_list_lock);
} }
void cpu_list_remove(CPUState *cpu) void cpu_list_remove(CPUState *cpu)
{ {
qemu_mutex_lock(&qemu_cpu_list_lock); QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
if (!QTAILQ_IN_USE(cpu, node)) { if (!QTAILQ_IN_USE(cpu, node)) {
/* there is nothing to undo since cpu_exec_init() hasn't been called */ /* there is nothing to undo since cpu_exec_init() hasn't been called */
qemu_mutex_unlock(&qemu_cpu_list_lock);
return; return;
} }
@ -95,7 +94,6 @@ void cpu_list_remove(CPUState *cpu)
QTAILQ_REMOVE_RCU(&cpus, cpu, node); QTAILQ_REMOVE_RCU(&cpus, cpu, node);
cpu->cpu_index = UNASSIGNED_CPU_INDEX; cpu->cpu_index = UNASSIGNED_CPU_INDEX;
qemu_mutex_unlock(&qemu_cpu_list_lock);
} }
struct qemu_work_item { struct qemu_work_item {
@ -237,7 +235,7 @@ void cpu_exec_start(CPUState *cpu)
* see cpu->running == true, and it will kick the CPU. * see cpu->running == true, and it will kick the CPU.
*/ */
if (unlikely(atomic_read(&pending_cpus))) { if (unlikely(atomic_read(&pending_cpus))) {
qemu_mutex_lock(&qemu_cpu_list_lock); QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
if (!cpu->has_waiter) { if (!cpu->has_waiter) {
/* Not counted in pending_cpus, let the exclusive item /* Not counted in pending_cpus, let the exclusive item
* run. Since we have the lock, just set cpu->running to true * run. Since we have the lock, just set cpu->running to true
@ -252,7 +250,6 @@ void cpu_exec_start(CPUState *cpu)
* waiter at cpu_exec_end. * waiter at cpu_exec_end.
*/ */
} }
qemu_mutex_unlock(&qemu_cpu_list_lock);
} }
} }
@ -280,7 +277,7 @@ void cpu_exec_end(CPUState *cpu)
* next cpu_exec_start. * next cpu_exec_start.
*/ */
if (unlikely(atomic_read(&pending_cpus))) { if (unlikely(atomic_read(&pending_cpus))) {
qemu_mutex_lock(&qemu_cpu_list_lock); QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
if (cpu->has_waiter) { if (cpu->has_waiter) {
cpu->has_waiter = false; cpu->has_waiter = false;
atomic_set(&pending_cpus, pending_cpus - 1); atomic_set(&pending_cpus, pending_cpus - 1);
@ -288,7 +285,6 @@ void cpu_exec_end(CPUState *cpu)
qemu_cond_signal(&exclusive_cond); qemu_cond_signal(&exclusive_cond);
} }
} }
qemu_mutex_unlock(&qemu_cpu_list_lock);
} }
} }

View File

@ -478,18 +478,19 @@ static int qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext)
cmd->u.surface_create.stride); cmd->u.surface_create.stride);
return 1; return 1;
} }
qemu_mutex_lock(&qxl->track_lock); WITH_QEMU_LOCK_GUARD(&qxl->track_lock) {
if (cmd->type == QXL_SURFACE_CMD_CREATE) { if (cmd->type == QXL_SURFACE_CMD_CREATE) {
qxl->guest_surfaces.cmds[id] = ext->cmd.data; qxl->guest_surfaces.cmds[id] = ext->cmd.data;
qxl->guest_surfaces.count++; qxl->guest_surfaces.count++;
if (qxl->guest_surfaces.max < qxl->guest_surfaces.count) if (qxl->guest_surfaces.max < qxl->guest_surfaces.count) {
qxl->guest_surfaces.max = qxl->guest_surfaces.count; qxl->guest_surfaces.max = qxl->guest_surfaces.count;
}
}
if (cmd->type == QXL_SURFACE_CMD_DESTROY) {
qxl->guest_surfaces.cmds[id] = 0;
qxl->guest_surfaces.count--;
}
} }
if (cmd->type == QXL_SURFACE_CMD_DESTROY) {
qxl->guest_surfaces.cmds[id] = 0;
qxl->guest_surfaces.count--;
}
qemu_mutex_unlock(&qxl->track_lock);
break; break;
} }
case QXL_CMD_CURSOR: case QXL_CMD_CURSOR:
@ -958,10 +959,9 @@ static void interface_update_area_complete(QXLInstance *sin,
int i; int i;
int qxl_i; int qxl_i;
qemu_mutex_lock(&qxl->ssd.lock); QEMU_LOCK_GUARD(&qxl->ssd.lock);
if (surface_id != 0 || !num_updated_rects || if (surface_id != 0 || !num_updated_rects ||
!qxl->render_update_cookie_num) { !qxl->render_update_cookie_num) {
qemu_mutex_unlock(&qxl->ssd.lock);
return; return;
} }
trace_qxl_interface_update_area_complete(qxl->id, surface_id, dirty->left, trace_qxl_interface_update_area_complete(qxl->id, surface_id, dirty->left,
@ -980,7 +980,6 @@ static void interface_update_area_complete(QXLInstance *sin,
* Don't bother copying or scheduling the bh since we will flip * Don't bother copying or scheduling the bh since we will flip
* the whole area anyway on completion of the update_area async call * the whole area anyway on completion of the update_area async call
*/ */
qemu_mutex_unlock(&qxl->ssd.lock);
return; return;
} }
qxl_i = qxl->num_dirty_rects; qxl_i = qxl->num_dirty_rects;
@ -991,7 +990,6 @@ static void interface_update_area_complete(QXLInstance *sin,
trace_qxl_interface_update_area_complete_schedule_bh(qxl->id, trace_qxl_interface_update_area_complete_schedule_bh(qxl->id,
qxl->num_dirty_rects); qxl->num_dirty_rects);
qemu_bh_schedule(qxl->update_area_bh); qemu_bh_schedule(qxl->update_area_bh);
qemu_mutex_unlock(&qxl->ssd.lock);
} }
/* called from spice server thread context only */ /* called from spice server thread context only */
@ -1694,15 +1692,14 @@ static void ioport_write(void *opaque, hwaddr addr,
case QXL_IO_MONITORS_CONFIG_ASYNC: case QXL_IO_MONITORS_CONFIG_ASYNC:
async_common: async_common:
async = QXL_ASYNC; async = QXL_ASYNC;
qemu_mutex_lock(&d->async_lock); WITH_QEMU_LOCK_GUARD(&d->async_lock) {
if (d->current_async != QXL_UNDEFINED_IO) { if (d->current_async != QXL_UNDEFINED_IO) {
qxl_set_guest_bug(d, "%d async started before last (%d) complete", qxl_set_guest_bug(d, "%d async started before last (%d) complete",
io_port, d->current_async); io_port, d->current_async);
qemu_mutex_unlock(&d->async_lock); return;
return; }
d->current_async = orig_io_port;
} }
d->current_async = orig_io_port;
qemu_mutex_unlock(&d->async_lock);
break; break;
default: default:
break; break;

View File

@ -22,6 +22,7 @@
#include "hw/vfio/vfio-platform.h" #include "hw/vfio/vfio-platform.h"
#include "migration/vmstate.h" #include "migration/vmstate.h"
#include "qemu/error-report.h" #include "qemu/error-report.h"
#include "qemu/lockable.h"
#include "qemu/main-loop.h" #include "qemu/main-loop.h"
#include "qemu/module.h" #include "qemu/module.h"
#include "qemu/range.h" #include "qemu/range.h"
@ -216,7 +217,7 @@ static void vfio_intp_interrupt(VFIOINTp *intp)
VFIOPlatformDevice *vdev = intp->vdev; VFIOPlatformDevice *vdev = intp->vdev;
bool delay_handling = false; bool delay_handling = false;
qemu_mutex_lock(&vdev->intp_mutex); QEMU_LOCK_GUARD(&vdev->intp_mutex);
if (intp->state == VFIO_IRQ_INACTIVE) { if (intp->state == VFIO_IRQ_INACTIVE) {
QLIST_FOREACH(tmp, &vdev->intp_list, next) { QLIST_FOREACH(tmp, &vdev->intp_list, next) {
if (tmp->state == VFIO_IRQ_ACTIVE || if (tmp->state == VFIO_IRQ_ACTIVE ||
@ -236,7 +237,6 @@ static void vfio_intp_interrupt(VFIOINTp *intp)
QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue, QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue,
intp, pqnext); intp, pqnext);
ret = event_notifier_test_and_clear(intp->interrupt); ret = event_notifier_test_and_clear(intp->interrupt);
qemu_mutex_unlock(&vdev->intp_mutex);
return; return;
} }
@ -266,7 +266,6 @@ static void vfio_intp_interrupt(VFIOINTp *intp)
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
vdev->mmap_timeout); vdev->mmap_timeout);
} }
qemu_mutex_unlock(&vdev->intp_mutex);
} }
/** /**

View File

@ -1653,11 +1653,10 @@ static void migrate_fd_cleanup_bh(void *opaque)
void migrate_set_error(MigrationState *s, const Error *error) void migrate_set_error(MigrationState *s, const Error *error)
{ {
qemu_mutex_lock(&s->error_mutex); QEMU_LOCK_GUARD(&s->error_mutex);
if (!s->error) { if (!s->error) {
s->error = error_copy(error); s->error = error_copy(error);
} }
qemu_mutex_unlock(&s->error_mutex);
} }
void migrate_fd_error(MigrationState *s, const Error *error) void migrate_fd_error(MigrationState *s, const Error *error)

View File

@ -894,11 +894,11 @@ void multifd_recv_sync_main(void)
for (i = 0; i < migrate_multifd_channels(); i++) { for (i = 0; i < migrate_multifd_channels(); i++) {
MultiFDRecvParams *p = &multifd_recv_state->params[i]; MultiFDRecvParams *p = &multifd_recv_state->params[i];
qemu_mutex_lock(&p->mutex); WITH_QEMU_LOCK_GUARD(&p->mutex) {
if (multifd_recv_state->packet_num < p->packet_num) { if (multifd_recv_state->packet_num < p->packet_num) {
multifd_recv_state->packet_num = p->packet_num; multifd_recv_state->packet_num = p->packet_num;
}
} }
qemu_mutex_unlock(&p->mutex);
trace_multifd_recv_sync_main_signal(p->id); trace_multifd_recv_sync_main_signal(p->id);
qemu_sem_post(&p->sem_sync); qemu_sem_post(&p->sem_sync);
} }

View File

@ -1369,7 +1369,7 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
return NULL; return NULL;
} }
qemu_mutex_lock(&rs->src_page_req_mutex); QEMU_LOCK_GUARD(&rs->src_page_req_mutex);
if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) { if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
struct RAMSrcPageRequest *entry = struct RAMSrcPageRequest *entry =
QSIMPLEQ_FIRST(&rs->src_page_requests); QSIMPLEQ_FIRST(&rs->src_page_requests);
@ -1386,7 +1386,6 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
migration_consume_urgent_request(); migration_consume_urgent_request();
} }
} }
qemu_mutex_unlock(&rs->src_page_req_mutex);
return block; return block;
} }

View File

@ -1473,7 +1473,7 @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id,
MonFdsetFd *mon_fdset_fd; MonFdsetFd *mon_fdset_fd;
AddfdInfo *fdinfo; AddfdInfo *fdinfo;
qemu_mutex_lock(&mon_fdsets_lock); QEMU_LOCK_GUARD(&mon_fdsets_lock);
if (has_fdset_id) { if (has_fdset_id) {
QLIST_FOREACH(mon_fdset, &mon_fdsets, next) { QLIST_FOREACH(mon_fdset, &mon_fdsets, next) {
/* Break if match found or match impossible due to ordering by ID */ /* Break if match found or match impossible due to ordering by ID */
@ -1494,7 +1494,6 @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id,
if (fdset_id < 0) { if (fdset_id < 0) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "fdset-id", error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "fdset-id",
"a non-negative value"); "a non-negative value");
qemu_mutex_unlock(&mon_fdsets_lock);
return NULL; return NULL;
} }
/* Use specified fdset ID */ /* Use specified fdset ID */
@ -1545,7 +1544,6 @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id,
fdinfo->fdset_id = mon_fdset->id; fdinfo->fdset_id = mon_fdset->id;
fdinfo->fd = mon_fdset_fd->fd; fdinfo->fd = mon_fdset_fd->fd;
qemu_mutex_unlock(&mon_fdsets_lock);
return fdinfo; return fdinfo;
} }

View File

@ -18,6 +18,7 @@
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "ui/qemu-spice.h" #include "ui/qemu-spice.h"
#include "qemu/timer.h" #include "qemu/timer.h"
#include "qemu/lockable.h"
#include "qemu/main-loop.h" #include "qemu/main-loop.h"
#include "qemu/option.h" #include "qemu/option.h"
#include "qemu/queue.h" #include "qemu/queue.h"
@ -483,12 +484,12 @@ void qemu_spice_display_refresh(SimpleSpiceDisplay *ssd)
{ {
graphic_hw_update(ssd->dcl.con); graphic_hw_update(ssd->dcl.con);
qemu_mutex_lock(&ssd->lock); WITH_QEMU_LOCK_GUARD(&ssd->lock) {
if (QTAILQ_EMPTY(&ssd->updates) && ssd->ds) { if (QTAILQ_EMPTY(&ssd->updates) && ssd->ds) {
qemu_spice_create_update(ssd); qemu_spice_create_update(ssd);
ssd->notify++; ssd->notify++;
}
} }
qemu_mutex_unlock(&ssd->lock);
trace_qemu_spice_display_refresh(ssd->qxl.id, ssd->notify); trace_qemu_spice_display_refresh(ssd->qxl.id, ssd->notify);
if (ssd->notify) { if (ssd->notify) {
@ -580,7 +581,7 @@ static int interface_get_cursor_command(QXLInstance *sin, QXLCommandExt *ext)
SimpleSpiceDisplay *ssd = container_of(sin, SimpleSpiceDisplay, qxl); SimpleSpiceDisplay *ssd = container_of(sin, SimpleSpiceDisplay, qxl);
int ret; int ret;
qemu_mutex_lock(&ssd->lock); QEMU_LOCK_GUARD(&ssd->lock);
if (ssd->ptr_define) { if (ssd->ptr_define) {
*ext = ssd->ptr_define->ext; *ext = ssd->ptr_define->ext;
ssd->ptr_define = NULL; ssd->ptr_define = NULL;
@ -592,7 +593,6 @@ static int interface_get_cursor_command(QXLInstance *sin, QXLCommandExt *ext)
} else { } else {
ret = false; ret = false;
} }
qemu_mutex_unlock(&ssd->lock);
return ret; return ret;
} }

View File

@ -25,6 +25,7 @@
#include "qemu/cutils.h" #include "qemu/cutils.h"
#include "trace/control.h" #include "trace/control.h"
#include "qemu/thread.h" #include "qemu/thread.h"
#include "qemu/lockable.h"
static char *logfilename; static char *logfilename;
static QemuMutex qemu_logfile_mutex; static QemuMutex qemu_logfile_mutex;
@ -94,7 +95,7 @@ void qemu_set_log(int log_flags)
if (qemu_loglevel && (!is_daemonized() || logfilename)) { if (qemu_loglevel && (!is_daemonized() || logfilename)) {
need_to_open_file = true; need_to_open_file = true;
} }
qemu_mutex_lock(&qemu_logfile_mutex); QEMU_LOCK_GUARD(&qemu_logfile_mutex);
if (qemu_logfile && !need_to_open_file) { if (qemu_logfile && !need_to_open_file) {
logfile = qemu_logfile; logfile = qemu_logfile;
atomic_rcu_set(&qemu_logfile, NULL); atomic_rcu_set(&qemu_logfile, NULL);
@ -136,7 +137,6 @@ void qemu_set_log(int log_flags)
} }
atomic_rcu_set(&qemu_logfile, logfile); atomic_rcu_set(&qemu_logfile, logfile);
} }
qemu_mutex_unlock(&qemu_logfile_mutex);
} }
void qemu_log_needs_buffers(void) void qemu_log_needs_buffers(void)

View File

@ -459,17 +459,16 @@ void timer_mod_anticipate_ns(QEMUTimer *ts, int64_t expire_time)
QEMUTimerList *timer_list = ts->timer_list; QEMUTimerList *timer_list = ts->timer_list;
bool rearm; bool rearm;
qemu_mutex_lock(&timer_list->active_timers_lock); WITH_QEMU_LOCK_GUARD(&timer_list->active_timers_lock) {
if (ts->expire_time == -1 || ts->expire_time > expire_time) { if (ts->expire_time == -1 || ts->expire_time > expire_time) {
if (ts->expire_time != -1) { if (ts->expire_time != -1) {
timer_del_locked(timer_list, ts); timer_del_locked(timer_list, ts);
}
rearm = timer_mod_ns_locked(timer_list, ts, expire_time);
} else {
rearm = false;
} }
rearm = timer_mod_ns_locked(timer_list, ts, expire_time);
} else {
rearm = false;
} }
qemu_mutex_unlock(&timer_list->active_timers_lock);
if (rearm) { if (rearm) {
timerlist_rearm(timer_list); timerlist_rearm(timer_list);
} }

View File

@ -31,6 +31,7 @@
#include "qemu/atomic.h" #include "qemu/atomic.h"
#include "qemu/thread.h" #include "qemu/thread.h"
#include "qemu/main-loop.h" #include "qemu/main-loop.h"
#include "qemu/lockable.h"
#if defined(CONFIG_MALLOC_TRIM) #if defined(CONFIG_MALLOC_TRIM)
#include <malloc.h> #include <malloc.h>
#endif #endif
@ -141,14 +142,14 @@ static void wait_for_readers(void)
void synchronize_rcu(void) void synchronize_rcu(void)
{ {
qemu_mutex_lock(&rcu_sync_lock); QEMU_LOCK_GUARD(&rcu_sync_lock);
/* Write RCU-protected pointers before reading p_rcu_reader->ctr. /* Write RCU-protected pointers before reading p_rcu_reader->ctr.
* Pairs with smp_mb_placeholder() in rcu_read_lock(). * Pairs with smp_mb_placeholder() in rcu_read_lock().
*/ */
smp_mb_global(); smp_mb_global();
qemu_mutex_lock(&rcu_registry_lock); QEMU_LOCK_GUARD(&rcu_registry_lock);
if (!QLIST_EMPTY(&registry)) { if (!QLIST_EMPTY(&registry)) {
/* In either case, the atomic_mb_set below blocks stores that free /* In either case, the atomic_mb_set below blocks stores that free
* old RCU-protected pointers. * old RCU-protected pointers.
@ -169,9 +170,6 @@ void synchronize_rcu(void)
wait_for_readers(); wait_for_readers();
} }
qemu_mutex_unlock(&rcu_registry_lock);
qemu_mutex_unlock(&rcu_sync_lock);
} }

View File

@ -210,7 +210,7 @@ static void thread_pool_cancel(BlockAIOCB *acb)
trace_thread_pool_cancel(elem, elem->common.opaque); trace_thread_pool_cancel(elem, elem->common.opaque);
qemu_mutex_lock(&pool->lock); QEMU_LOCK_GUARD(&pool->lock);
if (elem->state == THREAD_QUEUED && if (elem->state == THREAD_QUEUED &&
/* No thread has yet started working on elem. we can try to "steal" /* No thread has yet started working on elem. we can try to "steal"
* the item from the worker if we can get a signal from the * the item from the worker if we can get a signal from the
@ -225,7 +225,6 @@ static void thread_pool_cancel(BlockAIOCB *acb)
elem->ret = -ECANCELED; elem->ret = -ECANCELED;
} }
qemu_mutex_unlock(&pool->lock);
} }
static AioContext *thread_pool_get_aio_context(BlockAIOCB *acb) static AioContext *thread_pool_get_aio_context(BlockAIOCB *acb)

View File

@ -21,6 +21,7 @@
#include "standard-headers/linux/pci_regs.h" #include "standard-headers/linux/pci_regs.h"
#include "qemu/event_notifier.h" #include "qemu/event_notifier.h"
#include "qemu/vfio-helpers.h" #include "qemu/vfio-helpers.h"
#include "qemu/lockable.h"
#include "trace.h" #include "trace.h"
#define QEMU_VFIO_DEBUG 0 #define QEMU_VFIO_DEBUG 0
@ -667,14 +668,12 @@ int qemu_vfio_dma_reset_temporary(QEMUVFIOState *s)
.size = QEMU_VFIO_IOVA_MAX - s->high_water_mark, .size = QEMU_VFIO_IOVA_MAX - s->high_water_mark,
}; };
trace_qemu_vfio_dma_reset_temporary(s); trace_qemu_vfio_dma_reset_temporary(s);
qemu_mutex_lock(&s->lock); QEMU_LOCK_GUARD(&s->lock);
if (ioctl(s->container, VFIO_IOMMU_UNMAP_DMA, &unmap)) { if (ioctl(s->container, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno)); error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno));
qemu_mutex_unlock(&s->lock);
return -errno; return -errno;
} }
s->high_water_mark = QEMU_VFIO_IOVA_MAX; s->high_water_mark = QEMU_VFIO_IOVA_MAX;
qemu_mutex_unlock(&s->lock);
return 0; return 0;
} }