replace spinlock by QemuMutex.

spinlock is only used in two cases:
  * cpu-exec.c: to protect TranslationBlock
  * mem_helper.c: for lock helper in target-i386 (which seems broken).

It's a pthread_mutex_t in user-mode, so we can use QemuMutex directly,
with an #ifdef.  The #ifdef will be removed when multithreaded TCG
will need the mutex as well.

Signed-off-by: KONRAD Frederic <fred.konrad@greensocs.com>
Message-Id: <1439220437-23957-5-git-send-email-fred.konrad@greensocs.com>
Signed-off-by: Emilio G. Cota <cota@braap.org>
[Merge Emilio G. Cota's patch to remove volatile. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
KONRAD Frederic 2015-08-10 17:27:02 +02:00 committed by Paolo Bonzini
parent d5f8d61390
commit 677ef6230b
8 changed files with 73 additions and 19 deletions

View File

@ -357,9 +357,6 @@ int cpu_exec(CPUState *cpu)
uintptr_t next_tb;
SyncClocks sc;
/* This must be volatile so it is not trashed by longjmp() */
volatile bool have_tb_lock = false;
if (cpu->halted) {
if (!cpu_has_work(cpu)) {
return EXCP_HALTED;
@ -468,8 +465,7 @@ int cpu_exec(CPUState *cpu)
cpu->exception_index = EXCP_INTERRUPT;
cpu_loop_exit(cpu);
}
spin_lock(&tcg_ctx.tb_ctx.tb_lock);
have_tb_lock = true;
tb_lock();
tb = tb_find_fast(cpu);
/* Note: we do it here to avoid a gcc bug on Mac OS X when
doing it in tb_find_slow */
@ -491,8 +487,7 @@ int cpu_exec(CPUState *cpu)
tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
next_tb & TB_EXIT_MASK, tb);
}
have_tb_lock = false;
spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
tb_unlock();
if (likely(!cpu->exit_request)) {
trace_exec_tb(tb, tb->pc);
tc_ptr = tb->tc_ptr;
@ -558,10 +553,7 @@ int cpu_exec(CPUState *cpu)
x86_cpu = X86_CPU(cpu);
env = &x86_cpu->env;
#endif
if (have_tb_lock) {
spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
have_tb_lock = false;
}
tb_lock_reset();
}
} /* for(;;) */

View File

@ -225,7 +225,7 @@ struct TranslationBlock {
struct TranslationBlock *jmp_first;
};
#include "exec/spinlock.h"
#include "qemu/thread.h"
typedef struct TBContext TBContext;
@ -235,7 +235,7 @@ struct TBContext {
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
int nb_tbs;
/* any access to the tbs or the page table must use this lock */
spinlock_t tb_lock;
QemuMutex tb_lock;
/* statistics */
int tb_flush_count;

View File

@ -105,7 +105,7 @@ static int pending_cpus;
/* Make sure everything is in a consistent state for calling fork(). */
void fork_start(void)
{
pthread_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
pthread_mutex_lock(&exclusive_lock);
mmap_fork_start();
}
@ -127,11 +127,11 @@ void fork_end(int child)
pthread_mutex_init(&cpu_list_mutex, NULL);
pthread_cond_init(&exclusive_cond, NULL);
pthread_cond_init(&exclusive_resume, NULL);
pthread_mutex_init(&tcg_ctx.tb_ctx.tb_lock, NULL);
qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
gdbserver_fork(thread_cpu);
} else {
pthread_mutex_unlock(&exclusive_lock);
pthread_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
}
}

View File

@ -1318,6 +1318,9 @@ static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env)
void cpu_set_mxcsr(CPUX86State *env, uint32_t val);
void cpu_set_fpuc(CPUX86State *env, uint16_t val);
/* mem_helper.c */
void helper_lock_init(void);
/* svm_helper.c */
void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
uint64_t param);

View File

@ -23,18 +23,37 @@
/* broken thread support */
static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
#if defined(CONFIG_USER_ONLY)
QemuMutex global_cpu_lock;
void helper_lock(void)
{
spin_lock(&global_cpu_lock);
qemu_mutex_lock(&global_cpu_lock);
}
void helper_unlock(void)
{
spin_unlock(&global_cpu_lock);
qemu_mutex_unlock(&global_cpu_lock);
}
void helper_lock_init(void)
{
qemu_mutex_init(&global_cpu_lock);
}
#else
void helper_lock(void)
{
}
void helper_unlock(void)
{
}
void helper_lock_init(void)
{
}
#endif
void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
{
uint64_t d;

View File

@ -7899,6 +7899,8 @@ void optimize_flags_init(void)
offsetof(CPUX86State, regs[i]),
reg_names[i]);
}
helper_lock_init();
}
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for

View File

@ -595,6 +595,10 @@ void *tcg_malloc_internal(TCGContext *s, int size);
void tcg_pool_reset(TCGContext *s);
void tcg_pool_delete(TCGContext *s);
void tb_lock(void);
void tb_unlock(void);
void tb_lock_reset(void);
static inline void *tcg_malloc(int size)
{
TCGContext *s = &tcg_ctx;

View File

@ -128,6 +128,39 @@ static void *l1_map[V_L1_SIZE];
/* code generation context */
TCGContext tcg_ctx;
/* translation block context */
#ifdef CONFIG_USER_ONLY
__thread int have_tb_lock;
#endif
void tb_lock(void)
{
#ifdef CONFIG_USER_ONLY
assert(!have_tb_lock);
qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
have_tb_lock++;
#endif
}
void tb_unlock(void)
{
#ifdef CONFIG_USER_ONLY
assert(have_tb_lock);
have_tb_lock--;
qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
#endif
}
void tb_lock_reset(void)
{
#ifdef CONFIG_USER_ONLY
if (have_tb_lock) {
qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
have_tb_lock = 0;
}
#endif
}
static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
tb_page_addr_t phys_page2);
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
@ -675,6 +708,7 @@ static inline void code_gen_alloc(size_t tb_size)
CODE_GEN_AVG_BLOCK_SIZE;
tcg_ctx.tb_ctx.tbs =
g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
}
/* Must be called before using the QEMU cpus. 'tb_size' is the size