hw/mips_int: hold BQL for all interrupt requests

Make sure BQL is held for all interrupt requests.

For MTTCG-enabled configurations, handling soft and hard interrupts
between vCPUs must be properly locked. By acquiring BQL, make sure
all paths triggering an IRQ are synchronized.

Signed-off-by: Miodrag Dinic <miodrag.dinic@imgtec.com>
Signed-off-by: Aleksandar Markovic <amarkovic@wavecomp.com>
Acked-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
This commit is contained in:
Aleksandar Markovic 2019-02-11 16:28:16 +01:00
parent 33a07fa2db
commit 215581bdf1
2 changed files with 15 additions and 18 deletions

View File

@ -21,6 +21,7 @@
*/
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "hw/hw.h"
#include "hw/mips/cpudevs.h"
#include "cpu.h"
@ -32,10 +33,17 @@ static void cpu_mips_irq_request(void *opaque, int irq, int level)
MIPSCPU *cpu = opaque;
CPUMIPSState *env = &cpu->env;
CPUState *cs = CPU(cpu);
bool locked = false;
if (irq < 0 || irq > 7)
return;
/* Make sure locking works even if BQL is already held by the caller */
if (!qemu_mutex_iothread_locked()) {
locked = true;
qemu_mutex_lock_iothread();
}
if (level) {
env->CP0_Cause |= 1 << (irq + CP0Ca_IP);
@ -56,6 +64,10 @@ static void cpu_mips_irq_request(void *opaque, int irq, int level)
} else {
cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
}
if (locked) {
qemu_mutex_unlock_iothread();
}
}
void cpu_mips_irq_init_cpu(MIPSCPU *cpu)

View File

@ -17,7 +17,6 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "cpu.h"
#include "internal.h"
#include "qemu/host-utils.h"
@ -905,11 +904,7 @@ target_ulong helper_mftc0_tcschefback(CPUMIPSState *env)
target_ulong helper_mfc0_count(CPUMIPSState *env)
{
int32_t count;
qemu_mutex_lock_iothread();
count = (int32_t) cpu_mips_get_count(env);
qemu_mutex_unlock_iothread();
return count;
return (int32_t)cpu_mips_get_count(env);
}
target_ulong helper_mfc0_saar(CPUMIPSState *env)
@ -1594,9 +1589,7 @@ void helper_mtc0_hwrena(CPUMIPSState *env, target_ulong arg1)
void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1)
{
qemu_mutex_lock_iothread();
cpu_mips_store_count(env, arg1);
qemu_mutex_unlock_iothread();
}
void helper_mtc0_saari(CPUMIPSState *env, target_ulong arg1)
@ -1685,9 +1678,7 @@ void helper_mttc0_entryhi(CPUMIPSState *env, target_ulong arg1)
void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1)
{
qemu_mutex_lock_iothread();
cpu_mips_store_compare(env, arg1);
qemu_mutex_unlock_iothread();
}
void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1)
@ -1741,9 +1732,7 @@ void helper_mtc0_srsctl(CPUMIPSState *env, target_ulong arg1)
void helper_mtc0_cause(CPUMIPSState *env, target_ulong arg1)
{
qemu_mutex_lock_iothread();
cpu_mips_store_cause(env, arg1);
qemu_mutex_unlock_iothread();
}
void helper_mttc0_cause(CPUMIPSState *env, target_ulong arg1)
@ -2587,16 +2576,12 @@ target_ulong helper_rdhwr_synci_step(CPUMIPSState *env)
target_ulong helper_rdhwr_cc(CPUMIPSState *env)
{
int32_t count;
check_hwrena(env, 2, GETPC());
#ifdef CONFIG_USER_ONLY
count = env->CP0_Count;
return env->CP0_Count;
#else
qemu_mutex_lock_iothread();
count = (int32_t)cpu_mips_get_count(env);
qemu_mutex_unlock_iothread();
return (int32_t)cpu_mips_get_count(env);
#endif
return count;
}
target_ulong helper_rdhwr_ccres(CPUMIPSState *env)