lockable: replaced locks with lock guard macros where appropriate
- ran regexp "qemu_mutex_lock\(.*\).*\n.*if" to find targets - replaced result with QEMU_LOCK_GUARD if all unlocks at function end - replaced result with WITH_QEMU_LOCK_GUARD if unlock not at end Signed-off-by: Daniel Brodsky <dnbrdsky@gmail.com> Reviewed-by: Juan Quintela <quintela@redhat.com> Message-id: 20200404042108.389635-3-dnbrdsky@gmail.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
56f21718b8
commit
6e8a355de6
@ -1394,20 +1394,17 @@ static void iscsi_nop_timed_event(void *opaque)
|
||||
{
|
||||
IscsiLun *iscsilun = opaque;
|
||||
|
||||
qemu_mutex_lock(&iscsilun->mutex);
|
||||
QEMU_LOCK_GUARD(&iscsilun->mutex);
|
||||
if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) {
|
||||
error_report("iSCSI: NOP timeout. Reconnecting...");
|
||||
iscsilun->request_timed_out = true;
|
||||
} else if (iscsi_nop_out_async(iscsilun->iscsi, NULL, NULL, 0, NULL) != 0) {
|
||||
error_report("iSCSI: failed to sent NOP-Out. Disabling NOP messages.");
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL);
|
||||
iscsi_set_events(iscsilun);
|
||||
|
||||
out:
|
||||
qemu_mutex_unlock(&iscsilun->mutex);
|
||||
}
|
||||
|
||||
static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp)
|
||||
|
51
block/nfs.c
51
block/nfs.c
@ -273,15 +273,14 @@ static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, uint64_t offset,
|
||||
nfs_co_init_task(bs, &task);
|
||||
task.iov = iov;
|
||||
|
||||
qemu_mutex_lock(&client->mutex);
|
||||
if (nfs_pread_async(client->context, client->fh,
|
||||
offset, bytes, nfs_co_generic_cb, &task) != 0) {
|
||||
qemu_mutex_unlock(&client->mutex);
|
||||
return -ENOMEM;
|
||||
}
|
||||
WITH_QEMU_LOCK_GUARD(&client->mutex) {
|
||||
if (nfs_pread_async(client->context, client->fh,
|
||||
offset, bytes, nfs_co_generic_cb, &task) != 0) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
nfs_set_events(client);
|
||||
qemu_mutex_unlock(&client->mutex);
|
||||
nfs_set_events(client);
|
||||
}
|
||||
while (!task.complete) {
|
||||
qemu_coroutine_yield();
|
||||
}
|
||||
@ -320,19 +319,18 @@ static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset,
|
||||
buf = iov->iov[0].iov_base;
|
||||
}
|
||||
|
||||
qemu_mutex_lock(&client->mutex);
|
||||
if (nfs_pwrite_async(client->context, client->fh,
|
||||
offset, bytes, buf,
|
||||
nfs_co_generic_cb, &task) != 0) {
|
||||
qemu_mutex_unlock(&client->mutex);
|
||||
if (my_buffer) {
|
||||
g_free(buf);
|
||||
WITH_QEMU_LOCK_GUARD(&client->mutex) {
|
||||
if (nfs_pwrite_async(client->context, client->fh,
|
||||
offset, bytes, buf,
|
||||
nfs_co_generic_cb, &task) != 0) {
|
||||
if (my_buffer) {
|
||||
g_free(buf);
|
||||
}
|
||||
return -ENOMEM;
|
||||
}
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
nfs_set_events(client);
|
||||
qemu_mutex_unlock(&client->mutex);
|
||||
nfs_set_events(client);
|
||||
}
|
||||
while (!task.complete) {
|
||||
qemu_coroutine_yield();
|
||||
}
|
||||
@ -355,15 +353,14 @@ static int coroutine_fn nfs_co_flush(BlockDriverState *bs)
|
||||
|
||||
nfs_co_init_task(bs, &task);
|
||||
|
||||
qemu_mutex_lock(&client->mutex);
|
||||
if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb,
|
||||
&task) != 0) {
|
||||
qemu_mutex_unlock(&client->mutex);
|
||||
return -ENOMEM;
|
||||
}
|
||||
WITH_QEMU_LOCK_GUARD(&client->mutex) {
|
||||
if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb,
|
||||
&task) != 0) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
nfs_set_events(client);
|
||||
qemu_mutex_unlock(&client->mutex);
|
||||
nfs_set_events(client);
|
||||
}
|
||||
while (!task.complete) {
|
||||
qemu_coroutine_yield();
|
||||
}
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include "exec/cpu-common.h"
|
||||
#include "hw/core/cpu.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "qemu/lockable.h"
|
||||
|
||||
static QemuMutex qemu_cpu_list_lock;
|
||||
static QemuCond exclusive_cond;
|
||||
@ -71,7 +72,7 @@ static int cpu_get_free_index(void)
|
||||
|
||||
void cpu_list_add(CPUState *cpu)
|
||||
{
|
||||
qemu_mutex_lock(&qemu_cpu_list_lock);
|
||||
QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
|
||||
if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
|
||||
cpu->cpu_index = cpu_get_free_index();
|
||||
assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
|
||||
@ -79,15 +80,13 @@ void cpu_list_add(CPUState *cpu)
|
||||
assert(!cpu_index_auto_assigned);
|
||||
}
|
||||
QTAILQ_INSERT_TAIL_RCU(&cpus, cpu, node);
|
||||
qemu_mutex_unlock(&qemu_cpu_list_lock);
|
||||
}
|
||||
|
||||
void cpu_list_remove(CPUState *cpu)
|
||||
{
|
||||
qemu_mutex_lock(&qemu_cpu_list_lock);
|
||||
QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
|
||||
if (!QTAILQ_IN_USE(cpu, node)) {
|
||||
/* there is nothing to undo since cpu_exec_init() hasn't been called */
|
||||
qemu_mutex_unlock(&qemu_cpu_list_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -95,7 +94,6 @@ void cpu_list_remove(CPUState *cpu)
|
||||
|
||||
QTAILQ_REMOVE_RCU(&cpus, cpu, node);
|
||||
cpu->cpu_index = UNASSIGNED_CPU_INDEX;
|
||||
qemu_mutex_unlock(&qemu_cpu_list_lock);
|
||||
}
|
||||
|
||||
struct qemu_work_item {
|
||||
@ -237,7 +235,7 @@ void cpu_exec_start(CPUState *cpu)
|
||||
* see cpu->running == true, and it will kick the CPU.
|
||||
*/
|
||||
if (unlikely(atomic_read(&pending_cpus))) {
|
||||
qemu_mutex_lock(&qemu_cpu_list_lock);
|
||||
QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
|
||||
if (!cpu->has_waiter) {
|
||||
/* Not counted in pending_cpus, let the exclusive item
|
||||
* run. Since we have the lock, just set cpu->running to true
|
||||
@ -252,7 +250,6 @@ void cpu_exec_start(CPUState *cpu)
|
||||
* waiter at cpu_exec_end.
|
||||
*/
|
||||
}
|
||||
qemu_mutex_unlock(&qemu_cpu_list_lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -280,7 +277,7 @@ void cpu_exec_end(CPUState *cpu)
|
||||
* next cpu_exec_start.
|
||||
*/
|
||||
if (unlikely(atomic_read(&pending_cpus))) {
|
||||
qemu_mutex_lock(&qemu_cpu_list_lock);
|
||||
QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
|
||||
if (cpu->has_waiter) {
|
||||
cpu->has_waiter = false;
|
||||
atomic_set(&pending_cpus, pending_cpus - 1);
|
||||
@ -288,7 +285,6 @@ void cpu_exec_end(CPUState *cpu)
|
||||
qemu_cond_signal(&exclusive_cond);
|
||||
}
|
||||
}
|
||||
qemu_mutex_unlock(&qemu_cpu_list_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -478,18 +478,19 @@ static int qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext)
|
||||
cmd->u.surface_create.stride);
|
||||
return 1;
|
||||
}
|
||||
qemu_mutex_lock(&qxl->track_lock);
|
||||
if (cmd->type == QXL_SURFACE_CMD_CREATE) {
|
||||
qxl->guest_surfaces.cmds[id] = ext->cmd.data;
|
||||
qxl->guest_surfaces.count++;
|
||||
if (qxl->guest_surfaces.max < qxl->guest_surfaces.count)
|
||||
qxl->guest_surfaces.max = qxl->guest_surfaces.count;
|
||||
WITH_QEMU_LOCK_GUARD(&qxl->track_lock) {
|
||||
if (cmd->type == QXL_SURFACE_CMD_CREATE) {
|
||||
qxl->guest_surfaces.cmds[id] = ext->cmd.data;
|
||||
qxl->guest_surfaces.count++;
|
||||
if (qxl->guest_surfaces.max < qxl->guest_surfaces.count) {
|
||||
qxl->guest_surfaces.max = qxl->guest_surfaces.count;
|
||||
}
|
||||
}
|
||||
if (cmd->type == QXL_SURFACE_CMD_DESTROY) {
|
||||
qxl->guest_surfaces.cmds[id] = 0;
|
||||
qxl->guest_surfaces.count--;
|
||||
}
|
||||
}
|
||||
if (cmd->type == QXL_SURFACE_CMD_DESTROY) {
|
||||
qxl->guest_surfaces.cmds[id] = 0;
|
||||
qxl->guest_surfaces.count--;
|
||||
}
|
||||
qemu_mutex_unlock(&qxl->track_lock);
|
||||
break;
|
||||
}
|
||||
case QXL_CMD_CURSOR:
|
||||
@ -958,10 +959,9 @@ static void interface_update_area_complete(QXLInstance *sin,
|
||||
int i;
|
||||
int qxl_i;
|
||||
|
||||
qemu_mutex_lock(&qxl->ssd.lock);
|
||||
QEMU_LOCK_GUARD(&qxl->ssd.lock);
|
||||
if (surface_id != 0 || !num_updated_rects ||
|
||||
!qxl->render_update_cookie_num) {
|
||||
qemu_mutex_unlock(&qxl->ssd.lock);
|
||||
return;
|
||||
}
|
||||
trace_qxl_interface_update_area_complete(qxl->id, surface_id, dirty->left,
|
||||
@ -980,7 +980,6 @@ static void interface_update_area_complete(QXLInstance *sin,
|
||||
* Don't bother copying or scheduling the bh since we will flip
|
||||
* the whole area anyway on completion of the update_area async call
|
||||
*/
|
||||
qemu_mutex_unlock(&qxl->ssd.lock);
|
||||
return;
|
||||
}
|
||||
qxl_i = qxl->num_dirty_rects;
|
||||
@ -991,7 +990,6 @@ static void interface_update_area_complete(QXLInstance *sin,
|
||||
trace_qxl_interface_update_area_complete_schedule_bh(qxl->id,
|
||||
qxl->num_dirty_rects);
|
||||
qemu_bh_schedule(qxl->update_area_bh);
|
||||
qemu_mutex_unlock(&qxl->ssd.lock);
|
||||
}
|
||||
|
||||
/* called from spice server thread context only */
|
||||
@ -1694,15 +1692,14 @@ static void ioport_write(void *opaque, hwaddr addr,
|
||||
case QXL_IO_MONITORS_CONFIG_ASYNC:
|
||||
async_common:
|
||||
async = QXL_ASYNC;
|
||||
qemu_mutex_lock(&d->async_lock);
|
||||
if (d->current_async != QXL_UNDEFINED_IO) {
|
||||
qxl_set_guest_bug(d, "%d async started before last (%d) complete",
|
||||
io_port, d->current_async);
|
||||
qemu_mutex_unlock(&d->async_lock);
|
||||
return;
|
||||
WITH_QEMU_LOCK_GUARD(&d->async_lock) {
|
||||
if (d->current_async != QXL_UNDEFINED_IO) {
|
||||
qxl_set_guest_bug(d, "%d async started before last (%d) complete",
|
||||
io_port, d->current_async);
|
||||
return;
|
||||
}
|
||||
d->current_async = orig_io_port;
|
||||
}
|
||||
d->current_async = orig_io_port;
|
||||
qemu_mutex_unlock(&d->async_lock);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include "hw/vfio/vfio-platform.h"
|
||||
#include "migration/vmstate.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/lockable.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/module.h"
|
||||
#include "qemu/range.h"
|
||||
@ -216,7 +217,7 @@ static void vfio_intp_interrupt(VFIOINTp *intp)
|
||||
VFIOPlatformDevice *vdev = intp->vdev;
|
||||
bool delay_handling = false;
|
||||
|
||||
qemu_mutex_lock(&vdev->intp_mutex);
|
||||
QEMU_LOCK_GUARD(&vdev->intp_mutex);
|
||||
if (intp->state == VFIO_IRQ_INACTIVE) {
|
||||
QLIST_FOREACH(tmp, &vdev->intp_list, next) {
|
||||
if (tmp->state == VFIO_IRQ_ACTIVE ||
|
||||
@ -236,7 +237,6 @@ static void vfio_intp_interrupt(VFIOINTp *intp)
|
||||
QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue,
|
||||
intp, pqnext);
|
||||
ret = event_notifier_test_and_clear(intp->interrupt);
|
||||
qemu_mutex_unlock(&vdev->intp_mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -266,7 +266,6 @@ static void vfio_intp_interrupt(VFIOINTp *intp)
|
||||
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
|
||||
vdev->mmap_timeout);
|
||||
}
|
||||
qemu_mutex_unlock(&vdev->intp_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1653,11 +1653,10 @@ static void migrate_fd_cleanup_bh(void *opaque)
|
||||
|
||||
void migrate_set_error(MigrationState *s, const Error *error)
|
||||
{
|
||||
qemu_mutex_lock(&s->error_mutex);
|
||||
QEMU_LOCK_GUARD(&s->error_mutex);
|
||||
if (!s->error) {
|
||||
s->error = error_copy(error);
|
||||
}
|
||||
qemu_mutex_unlock(&s->error_mutex);
|
||||
}
|
||||
|
||||
void migrate_fd_error(MigrationState *s, const Error *error)
|
||||
|
@ -894,11 +894,11 @@ void multifd_recv_sync_main(void)
|
||||
for (i = 0; i < migrate_multifd_channels(); i++) {
|
||||
MultiFDRecvParams *p = &multifd_recv_state->params[i];
|
||||
|
||||
qemu_mutex_lock(&p->mutex);
|
||||
if (multifd_recv_state->packet_num < p->packet_num) {
|
||||
multifd_recv_state->packet_num = p->packet_num;
|
||||
WITH_QEMU_LOCK_GUARD(&p->mutex) {
|
||||
if (multifd_recv_state->packet_num < p->packet_num) {
|
||||
multifd_recv_state->packet_num = p->packet_num;
|
||||
}
|
||||
}
|
||||
qemu_mutex_unlock(&p->mutex);
|
||||
trace_multifd_recv_sync_main_signal(p->id);
|
||||
qemu_sem_post(&p->sem_sync);
|
||||
}
|
||||
|
@ -1369,7 +1369,7 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
qemu_mutex_lock(&rs->src_page_req_mutex);
|
||||
QEMU_LOCK_GUARD(&rs->src_page_req_mutex);
|
||||
if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
|
||||
struct RAMSrcPageRequest *entry =
|
||||
QSIMPLEQ_FIRST(&rs->src_page_requests);
|
||||
@ -1386,7 +1386,6 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
|
||||
migration_consume_urgent_request();
|
||||
}
|
||||
}
|
||||
qemu_mutex_unlock(&rs->src_page_req_mutex);
|
||||
|
||||
return block;
|
||||
}
|
||||
|
@ -1473,7 +1473,7 @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id,
|
||||
MonFdsetFd *mon_fdset_fd;
|
||||
AddfdInfo *fdinfo;
|
||||
|
||||
qemu_mutex_lock(&mon_fdsets_lock);
|
||||
QEMU_LOCK_GUARD(&mon_fdsets_lock);
|
||||
if (has_fdset_id) {
|
||||
QLIST_FOREACH(mon_fdset, &mon_fdsets, next) {
|
||||
/* Break if match found or match impossible due to ordering by ID */
|
||||
@ -1494,7 +1494,6 @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id,
|
||||
if (fdset_id < 0) {
|
||||
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "fdset-id",
|
||||
"a non-negative value");
|
||||
qemu_mutex_unlock(&mon_fdsets_lock);
|
||||
return NULL;
|
||||
}
|
||||
/* Use specified fdset ID */
|
||||
@ -1545,7 +1544,6 @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id,
|
||||
fdinfo->fdset_id = mon_fdset->id;
|
||||
fdinfo->fd = mon_fdset_fd->fd;
|
||||
|
||||
qemu_mutex_unlock(&mon_fdsets_lock);
|
||||
return fdinfo;
|
||||
}
|
||||
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "ui/qemu-spice.h"
|
||||
#include "qemu/timer.h"
|
||||
#include "qemu/lockable.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/option.h"
|
||||
#include "qemu/queue.h"
|
||||
@ -483,12 +484,12 @@ void qemu_spice_display_refresh(SimpleSpiceDisplay *ssd)
|
||||
{
|
||||
graphic_hw_update(ssd->dcl.con);
|
||||
|
||||
qemu_mutex_lock(&ssd->lock);
|
||||
if (QTAILQ_EMPTY(&ssd->updates) && ssd->ds) {
|
||||
qemu_spice_create_update(ssd);
|
||||
ssd->notify++;
|
||||
WITH_QEMU_LOCK_GUARD(&ssd->lock) {
|
||||
if (QTAILQ_EMPTY(&ssd->updates) && ssd->ds) {
|
||||
qemu_spice_create_update(ssd);
|
||||
ssd->notify++;
|
||||
}
|
||||
}
|
||||
qemu_mutex_unlock(&ssd->lock);
|
||||
|
||||
trace_qemu_spice_display_refresh(ssd->qxl.id, ssd->notify);
|
||||
if (ssd->notify) {
|
||||
@ -580,7 +581,7 @@ static int interface_get_cursor_command(QXLInstance *sin, QXLCommandExt *ext)
|
||||
SimpleSpiceDisplay *ssd = container_of(sin, SimpleSpiceDisplay, qxl);
|
||||
int ret;
|
||||
|
||||
qemu_mutex_lock(&ssd->lock);
|
||||
QEMU_LOCK_GUARD(&ssd->lock);
|
||||
if (ssd->ptr_define) {
|
||||
*ext = ssd->ptr_define->ext;
|
||||
ssd->ptr_define = NULL;
|
||||
@ -592,7 +593,6 @@ static int interface_get_cursor_command(QXLInstance *sin, QXLCommandExt *ext)
|
||||
} else {
|
||||
ret = false;
|
||||
}
|
||||
qemu_mutex_unlock(&ssd->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "qemu/cutils.h"
|
||||
#include "trace/control.h"
|
||||
#include "qemu/thread.h"
|
||||
#include "qemu/lockable.h"
|
||||
|
||||
static char *logfilename;
|
||||
static QemuMutex qemu_logfile_mutex;
|
||||
@ -94,7 +95,7 @@ void qemu_set_log(int log_flags)
|
||||
if (qemu_loglevel && (!is_daemonized() || logfilename)) {
|
||||
need_to_open_file = true;
|
||||
}
|
||||
qemu_mutex_lock(&qemu_logfile_mutex);
|
||||
QEMU_LOCK_GUARD(&qemu_logfile_mutex);
|
||||
if (qemu_logfile && !need_to_open_file) {
|
||||
logfile = qemu_logfile;
|
||||
atomic_rcu_set(&qemu_logfile, NULL);
|
||||
@ -136,7 +137,6 @@ void qemu_set_log(int log_flags)
|
||||
}
|
||||
atomic_rcu_set(&qemu_logfile, logfile);
|
||||
}
|
||||
qemu_mutex_unlock(&qemu_logfile_mutex);
|
||||
}
|
||||
|
||||
void qemu_log_needs_buffers(void)
|
||||
|
@ -459,17 +459,16 @@ void timer_mod_anticipate_ns(QEMUTimer *ts, int64_t expire_time)
|
||||
QEMUTimerList *timer_list = ts->timer_list;
|
||||
bool rearm;
|
||||
|
||||
qemu_mutex_lock(&timer_list->active_timers_lock);
|
||||
if (ts->expire_time == -1 || ts->expire_time > expire_time) {
|
||||
if (ts->expire_time != -1) {
|
||||
timer_del_locked(timer_list, ts);
|
||||
WITH_QEMU_LOCK_GUARD(&timer_list->active_timers_lock) {
|
||||
if (ts->expire_time == -1 || ts->expire_time > expire_time) {
|
||||
if (ts->expire_time != -1) {
|
||||
timer_del_locked(timer_list, ts);
|
||||
}
|
||||
rearm = timer_mod_ns_locked(timer_list, ts, expire_time);
|
||||
} else {
|
||||
rearm = false;
|
||||
}
|
||||
rearm = timer_mod_ns_locked(timer_list, ts, expire_time);
|
||||
} else {
|
||||
rearm = false;
|
||||
}
|
||||
qemu_mutex_unlock(&timer_list->active_timers_lock);
|
||||
|
||||
if (rearm) {
|
||||
timerlist_rearm(timer_list);
|
||||
}
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "qemu/atomic.h"
|
||||
#include "qemu/thread.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/lockable.h"
|
||||
#if defined(CONFIG_MALLOC_TRIM)
|
||||
#include <malloc.h>
|
||||
#endif
|
||||
@ -141,14 +142,14 @@ static void wait_for_readers(void)
|
||||
|
||||
void synchronize_rcu(void)
|
||||
{
|
||||
qemu_mutex_lock(&rcu_sync_lock);
|
||||
QEMU_LOCK_GUARD(&rcu_sync_lock);
|
||||
|
||||
/* Write RCU-protected pointers before reading p_rcu_reader->ctr.
|
||||
* Pairs with smp_mb_placeholder() in rcu_read_lock().
|
||||
*/
|
||||
smp_mb_global();
|
||||
|
||||
qemu_mutex_lock(&rcu_registry_lock);
|
||||
QEMU_LOCK_GUARD(&rcu_registry_lock);
|
||||
if (!QLIST_EMPTY(®istry)) {
|
||||
/* In either case, the atomic_mb_set below blocks stores that free
|
||||
* old RCU-protected pointers.
|
||||
@ -169,9 +170,6 @@ void synchronize_rcu(void)
|
||||
|
||||
wait_for_readers();
|
||||
}
|
||||
|
||||
qemu_mutex_unlock(&rcu_registry_lock);
|
||||
qemu_mutex_unlock(&rcu_sync_lock);
|
||||
}
|
||||
|
||||
|
||||
|
@ -210,7 +210,7 @@ static void thread_pool_cancel(BlockAIOCB *acb)
|
||||
|
||||
trace_thread_pool_cancel(elem, elem->common.opaque);
|
||||
|
||||
qemu_mutex_lock(&pool->lock);
|
||||
QEMU_LOCK_GUARD(&pool->lock);
|
||||
if (elem->state == THREAD_QUEUED &&
|
||||
/* No thread has yet started working on elem. we can try to "steal"
|
||||
* the item from the worker if we can get a signal from the
|
||||
@ -225,7 +225,6 @@ static void thread_pool_cancel(BlockAIOCB *acb)
|
||||
elem->ret = -ECANCELED;
|
||||
}
|
||||
|
||||
qemu_mutex_unlock(&pool->lock);
|
||||
}
|
||||
|
||||
static AioContext *thread_pool_get_aio_context(BlockAIOCB *acb)
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "standard-headers/linux/pci_regs.h"
|
||||
#include "qemu/event_notifier.h"
|
||||
#include "qemu/vfio-helpers.h"
|
||||
#include "qemu/lockable.h"
|
||||
#include "trace.h"
|
||||
|
||||
#define QEMU_VFIO_DEBUG 0
|
||||
@ -667,14 +668,12 @@ int qemu_vfio_dma_reset_temporary(QEMUVFIOState *s)
|
||||
.size = QEMU_VFIO_IOVA_MAX - s->high_water_mark,
|
||||
};
|
||||
trace_qemu_vfio_dma_reset_temporary(s);
|
||||
qemu_mutex_lock(&s->lock);
|
||||
QEMU_LOCK_GUARD(&s->lock);
|
||||
if (ioctl(s->container, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
|
||||
error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno));
|
||||
qemu_mutex_unlock(&s->lock);
|
||||
return -errno;
|
||||
}
|
||||
s->high_water_mark = QEMU_VFIO_IOVA_MAX;
|
||||
qemu_mutex_unlock(&s->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user