cpus: protect work list with work_mutex
Protect the list of queued work items with something other than the BQL, as a preparation for running the work items outside it. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: KONRAD Frederic <fred.konrad@greensocs.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
0c71d41e2a
commit
376692b9dc
22
cpus.c
22
cpus.c
@ -819,6 +819,8 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
|
||||
wi.func = func;
|
||||
wi.data = data;
|
||||
wi.free = false;
|
||||
|
||||
qemu_mutex_lock(&cpu->work_mutex);
|
||||
if (cpu->queued_work_first == NULL) {
|
||||
cpu->queued_work_first = &wi;
|
||||
} else {
|
||||
@ -827,9 +829,10 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
|
||||
cpu->queued_work_last = &wi;
|
||||
wi.next = NULL;
|
||||
wi.done = false;
|
||||
qemu_mutex_unlock(&cpu->work_mutex);
|
||||
|
||||
qemu_cpu_kick(cpu);
|
||||
while (!wi.done) {
|
||||
while (!atomic_mb_read(&wi.done)) {
|
||||
CPUState *self_cpu = current_cpu;
|
||||
|
||||
qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
|
||||
@ -850,6 +853,8 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
|
||||
wi->func = func;
|
||||
wi->data = data;
|
||||
wi->free = true;
|
||||
|
||||
qemu_mutex_lock(&cpu->work_mutex);
|
||||
if (cpu->queued_work_first == NULL) {
|
||||
cpu->queued_work_first = wi;
|
||||
} else {
|
||||
@ -858,6 +863,7 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
|
||||
cpu->queued_work_last = wi;
|
||||
wi->next = NULL;
|
||||
wi->done = false;
|
||||
qemu_mutex_unlock(&cpu->work_mutex);
|
||||
|
||||
qemu_cpu_kick(cpu);
|
||||
}
|
||||
@ -870,15 +876,23 @@ static void flush_queued_work(CPUState *cpu)
|
||||
return;
|
||||
}
|
||||
|
||||
while ((wi = cpu->queued_work_first)) {
|
||||
qemu_mutex_lock(&cpu->work_mutex);
|
||||
while (cpu->queued_work_first != NULL) {
|
||||
wi = cpu->queued_work_first;
|
||||
cpu->queued_work_first = wi->next;
|
||||
if (!cpu->queued_work_first) {
|
||||
cpu->queued_work_last = NULL;
|
||||
}
|
||||
qemu_mutex_unlock(&cpu->work_mutex);
|
||||
wi->func(wi->data);
|
||||
wi->done = true;
|
||||
qemu_mutex_lock(&cpu->work_mutex);
|
||||
if (wi->free) {
|
||||
g_free(wi);
|
||||
} else {
|
||||
atomic_mb_set(&wi->done, true);
|
||||
}
|
||||
}
|
||||
cpu->queued_work_last = NULL;
|
||||
qemu_mutex_unlock(&cpu->work_mutex);
|
||||
qemu_cond_broadcast(&qemu_work_cond);
|
||||
}
|
||||
|
||||
|
@ -243,6 +243,8 @@ struct kvm_run;
|
||||
* @mem_io_pc: Host Program Counter at which the memory was accessed.
|
||||
* @mem_io_vaddr: Target virtual address at which the memory was accessed.
|
||||
* @kvm_fd: vCPU file descriptor for KVM.
|
||||
* @work_mutex: Lock to prevent multiple access to queued_work_*.
|
||||
* @queued_work_first: First asynchronous work pending.
|
||||
*
|
||||
* State of one CPU core or thread.
|
||||
*/
|
||||
@ -263,7 +265,6 @@ struct CPUState {
|
||||
uint32_t host_tid;
|
||||
bool running;
|
||||
struct QemuCond *halt_cond;
|
||||
struct qemu_work_item *queued_work_first, *queued_work_last;
|
||||
bool thread_kicked;
|
||||
bool created;
|
||||
bool stop;
|
||||
@ -274,6 +275,9 @@ struct CPUState {
|
||||
int64_t icount_extra;
|
||||
sigjmp_buf jmp_env;
|
||||
|
||||
QemuMutex work_mutex;
|
||||
struct qemu_work_item *queued_work_first, *queued_work_last;
|
||||
|
||||
AddressSpace *as;
|
||||
struct AddressSpaceDispatch *memory_dispatch;
|
||||
MemoryListener *tcg_as_listener;
|
||||
|
Loading…
Reference in New Issue
Block a user