target/arm: Restrict cpu_exec_interrupt() handler to sysemu
Restrict cpu_exec_interrupt() and its callees to sysemu. Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Reviewed-by: Warner Losh <imp@bsdimp.com> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <20210911165434.531552-8-f4bug@amsat.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
9354e6947a
commit
083afd18a9
|
@ -440,6 +440,8 @@ static void arm_cpu_reset(DeviceState *dev)
|
||||||
arm_rebuild_hflags(env);
|
arm_rebuild_hflags(env);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef CONFIG_USER_ONLY
|
||||||
|
|
||||||
static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
|
static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
|
||||||
unsigned int target_el,
|
unsigned int target_el,
|
||||||
unsigned int cur_el, bool secure,
|
unsigned int cur_el, bool secure,
|
||||||
|
@ -556,7 +558,7 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
|
||||||
return unmasked || pstate_unmasked;
|
return unmasked || pstate_unmasked;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||||
{
|
{
|
||||||
CPUClass *cc = CPU_GET_CLASS(cs);
|
CPUClass *cc = CPU_GET_CLASS(cs);
|
||||||
CPUARMState *env = cs->env_ptr;
|
CPUARMState *env = cs->env_ptr;
|
||||||
|
@ -608,6 +610,7 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||||
cc->tcg_ops->do_interrupt(cs);
|
cc->tcg_ops->do_interrupt(cs);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
#endif /* !CONFIG_USER_ONLY */
|
||||||
|
|
||||||
void arm_cpu_update_virq(ARMCPU *cpu)
|
void arm_cpu_update_virq(ARMCPU *cpu)
|
||||||
{
|
{
|
||||||
|
@ -2010,11 +2013,11 @@ static const struct SysemuCPUOps arm_sysemu_ops = {
|
||||||
static const struct TCGCPUOps arm_tcg_ops = {
|
static const struct TCGCPUOps arm_tcg_ops = {
|
||||||
.initialize = arm_translate_init,
|
.initialize = arm_translate_init,
|
||||||
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
|
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
|
||||||
.cpu_exec_interrupt = arm_cpu_exec_interrupt,
|
|
||||||
.tlb_fill = arm_cpu_tlb_fill,
|
.tlb_fill = arm_cpu_tlb_fill,
|
||||||
.debug_excp_handler = arm_debug_excp_handler,
|
.debug_excp_handler = arm_debug_excp_handler,
|
||||||
|
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
|
.cpu_exec_interrupt = arm_cpu_exec_interrupt,
|
||||||
.do_interrupt = arm_cpu_do_interrupt,
|
.do_interrupt = arm_cpu_do_interrupt,
|
||||||
.do_transaction_failed = arm_cpu_do_transaction_failed,
|
.do_transaction_failed = arm_cpu_do_transaction_failed,
|
||||||
.do_unaligned_access = arm_cpu_do_unaligned_access,
|
.do_unaligned_access = arm_cpu_do_unaligned_access,
|
||||||
|
|
|
@ -1040,11 +1040,10 @@ uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz);
|
||||||
|
|
||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
extern const VMStateDescription vmstate_arm_cpu;
|
extern const VMStateDescription vmstate_arm_cpu;
|
||||||
#endif
|
|
||||||
|
|
||||||
void arm_cpu_do_interrupt(CPUState *cpu);
|
void arm_cpu_do_interrupt(CPUState *cpu);
|
||||||
void arm_v7m_cpu_do_interrupt(CPUState *cpu);
|
void arm_v7m_cpu_do_interrupt(CPUState *cpu);
|
||||||
bool arm_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
#endif /* !CONFIG_USER_ONLY */
|
||||||
|
|
||||||
hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
|
hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
|
||||||
MemTxAttrs *attrs);
|
MemTxAttrs *attrs);
|
||||||
|
|
|
@ -22,7 +22,7 @@
|
||||||
/* CPU models. These are not needed for the AArch64 linux-user build. */
|
/* CPU models. These are not needed for the AArch64 linux-user build. */
|
||||||
#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
|
#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
|
||||||
|
|
||||||
#ifdef CONFIG_TCG
|
#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
|
||||||
static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||||
{
|
{
|
||||||
CPUClass *cc = CPU_GET_CLASS(cs);
|
CPUClass *cc = CPU_GET_CLASS(cs);
|
||||||
|
@ -46,7 +46,7 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_TCG */
|
#endif /* !CONFIG_USER_ONLY && CONFIG_TCG */
|
||||||
|
|
||||||
static void arm926_initfn(Object *obj)
|
static void arm926_initfn(Object *obj)
|
||||||
{
|
{
|
||||||
|
@ -898,11 +898,11 @@ static void pxa270c5_initfn(Object *obj)
|
||||||
static const struct TCGCPUOps arm_v7m_tcg_ops = {
|
static const struct TCGCPUOps arm_v7m_tcg_ops = {
|
||||||
.initialize = arm_translate_init,
|
.initialize = arm_translate_init,
|
||||||
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
|
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
|
||||||
.cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt,
|
|
||||||
.tlb_fill = arm_cpu_tlb_fill,
|
.tlb_fill = arm_cpu_tlb_fill,
|
||||||
.debug_excp_handler = arm_debug_excp_handler,
|
.debug_excp_handler = arm_debug_excp_handler,
|
||||||
|
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
|
.cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt,
|
||||||
.do_interrupt = arm_v7m_cpu_do_interrupt,
|
.do_interrupt = arm_v7m_cpu_do_interrupt,
|
||||||
.do_transaction_failed = arm_cpu_do_transaction_failed,
|
.do_transaction_failed = arm_cpu_do_transaction_failed,
|
||||||
.do_unaligned_access = arm_cpu_do_unaligned_access,
|
.do_unaligned_access = arm_cpu_do_unaligned_access,
|
||||||
|
|
Loading…
Reference in New Issue