cpu: introduce cpu_in_exclusive_context()

Suggested-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
[AJB: moved inside start/end_exclusive fns + cleanup]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Emilio G. Cota 2018-11-26 17:14:43 -05:00 committed by Alex Bennée
parent 504f73f7b3
commit cfbc3c6083
3 changed files with 18 additions and 4 deletions

View File

@ -238,8 +238,6 @@ void cpu_exec_step_atomic(CPUState *cpu)
uint32_t flags;
uint32_t cflags = 1;
uint32_t cf_mask = cflags & CF_HASH_MASK;
/* volatile because we modify it between setjmp and longjmp */
volatile bool in_exclusive_region = false;
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
@ -253,7 +251,6 @@ void cpu_exec_step_atomic(CPUState *cpu)
/* Since we got here, we know that parallel_cpus must be true. */
parallel_cpus = false;
in_exclusive_region = true;
cc->cpu_exec_enter(cpu);
/* execute the generated code */
trace_exec_tb(tb, pc);
@ -273,7 +270,7 @@ void cpu_exec_step_atomic(CPUState *cpu)
assert_no_pages_locked();
}
if (in_exclusive_region) {
if (cpu_in_exclusive_context(cpu)) {
/* We might longjump out of either the codegen or the
* execution, so must make sure we only end the exclusive
* region if we started it.

View File

@ -200,11 +200,15 @@ void start_exclusive(void)
* section until end_exclusive resets pending_cpus to 0.
*/
qemu_mutex_unlock(&qemu_cpu_list_lock);
current_cpu->in_exclusive_context = true;
}
/* Finish an exclusive operation. */
void end_exclusive(void)
{
current_cpu->in_exclusive_context = false;
qemu_mutex_lock(&qemu_cpu_list_lock);
atomic_set(&pending_cpus, 0);
qemu_cond_broadcast(&exclusive_resume);

View File

@ -372,6 +372,7 @@ struct CPUState {
bool unplug;
bool crash_occurred;
bool exit_request;
bool in_exclusive_context;
uint32_t cflags_next_tb;
/* updates protected by BQL */
uint32_t interrupt_request;
@ -783,6 +784,18 @@ void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
*/
void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
/**
* cpu_in_exclusive_context()
* @cpu: The vCPU to check
*
* Returns true if @cpu is an exclusive context, for example running
* something which has previously been queued via async_safe_run_on_cpu().
*/
static inline bool cpu_in_exclusive_context(const CPUState *cpu)
{
return cpu->in_exclusive_context;
}
/**
* qemu_get_cpu:
* @index: The CPUState@cpu_index value of the CPU to obtain.