accel/tcg: Restrict TCGCPUOps::cpu_exec_interrupt() to sysemu

All targets call TCGCPUOps::cpu_exec_interrupt() from sysemu code.
Move its declaration to restrict it to system emulation.
Extend the code guarded.
Restrict the static inlined need_replay_interrupt() method to
avoid a "defined but not used" warning.

Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20210911165434.531552-24-f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Philippe Mathieu-Daudé 2021-09-11 18:54:33 +02:00 committed by Richard Henderson
parent f364a7f968
commit 77c0fc4e55
2 changed files with 9 additions and 5 deletions

View File

@ -685,6 +685,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
return false; return false;
} }
#ifndef CONFIG_USER_ONLY
/* /*
* CPU_INTERRUPT_POLL is a virtual event which gets converted into a * CPU_INTERRUPT_POLL is a virtual event which gets converted into a
* "real" interrupt event later. It does not need to be recorded for * "real" interrupt event later. It does not need to be recorded for
@ -698,12 +699,11 @@ static inline bool need_replay_interrupt(int interrupt_request)
return true; return true;
#endif #endif
} }
#endif /* !CONFIG_USER_ONLY */
static inline bool cpu_handle_interrupt(CPUState *cpu, static inline bool cpu_handle_interrupt(CPUState *cpu,
TranslationBlock **last_tb) TranslationBlock **last_tb)
{ {
CPUClass *cc = CPU_GET_CLASS(cpu);
/* Clear the interrupt flag now since we're processing /* Clear the interrupt flag now since we're processing
* cpu->interrupt_request and cpu->exit_request. * cpu->interrupt_request and cpu->exit_request.
* Ensure zeroing happens before reading cpu->exit_request or * Ensure zeroing happens before reading cpu->exit_request or
@ -725,6 +725,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
qemu_mutex_unlock_iothread(); qemu_mutex_unlock_iothread();
return true; return true;
} }
#if !defined(CONFIG_USER_ONLY)
if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) { if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
/* Do nothing */ /* Do nothing */
} else if (interrupt_request & CPU_INTERRUPT_HALT) { } else if (interrupt_request & CPU_INTERRUPT_HALT) {
@ -753,12 +754,14 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
qemu_mutex_unlock_iothread(); qemu_mutex_unlock_iothread();
return true; return true;
} }
#endif #endif /* !TARGET_I386 */
/* The target hook has 3 exit conditions: /* The target hook has 3 exit conditions:
False when the interrupt isn't processed, False when the interrupt isn't processed,
True when it is, and we should restart on a new TB, True when it is, and we should restart on a new TB,
and via longjmp via cpu_loop_exit. */ and via longjmp via cpu_loop_exit. */
else { else {
CPUClass *cc = CPU_GET_CLASS(cpu);
if (cc->tcg_ops->cpu_exec_interrupt && if (cc->tcg_ops->cpu_exec_interrupt &&
cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) { cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
if (need_replay_interrupt(interrupt_request)) { if (need_replay_interrupt(interrupt_request)) {
@ -777,6 +780,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
* reload the 'interrupt_request' value */ * reload the 'interrupt_request' value */
interrupt_request = cpu->interrupt_request; interrupt_request = cpu->interrupt_request;
} }
#endif /* !CONFIG_USER_ONLY */
if (interrupt_request & CPU_INTERRUPT_EXITTB) { if (interrupt_request & CPU_INTERRUPT_EXITTB) {
cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
/* ensure that no TB jump will be modified as /* ensure that no TB jump will be modified as

View File

@ -35,8 +35,6 @@ struct TCGCPUOps {
void (*cpu_exec_enter)(CPUState *cpu); void (*cpu_exec_enter)(CPUState *cpu);
/** @cpu_exec_exit: Callback for cpu_exec cleanup */ /** @cpu_exec_exit: Callback for cpu_exec cleanup */
void (*cpu_exec_exit)(CPUState *cpu); void (*cpu_exec_exit)(CPUState *cpu);
/** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
/** /**
* @tlb_fill: Handle a softmmu tlb miss or user-only address fault * @tlb_fill: Handle a softmmu tlb miss or user-only address fault
* *
@ -68,6 +66,8 @@ struct TCGCPUOps {
void (*do_interrupt)(CPUState *cpu); void (*do_interrupt)(CPUState *cpu);
#endif /* !CONFIG_USER_ONLY || !TARGET_I386 */ #endif /* !CONFIG_USER_ONLY || !TARGET_I386 */
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
/** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
/** /**
* @do_transaction_failed: Callback for handling failed memory transactions * @do_transaction_failed: Callback for handling failed memory transactions
* (ie bus faults or external aborts; not MMU faults) * (ie bus faults or external aborts; not MMU faults)