2007-10-25 23:35:50 +02:00
|
|
|
/*
|
|
|
|
* PowerPC emulation special registers manipulation helpers for qemu.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2003-2007 Jocelyn Mayer
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2009-07-16 22:47:01 +02:00
|
|
|
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
2007-10-25 23:35:50 +02:00
|
|
|
*/
|
|
|
|
|
2016-06-29 13:47:03 +02:00
|
|
|
#ifndef HELPER_REGS_H
|
|
|
|
#define HELPER_REGS_H
|
2007-10-25 23:35:50 +02:00
|
|
|
|
2017-12-04 23:25:43 +01:00
|
|
|
#include "qemu/main-loop.h"
|
2018-05-29 01:27:05 +02:00
|
|
|
#include "exec/exec-all.h"
|
2019-12-04 20:43:54 +01:00
|
|
|
#include "sysemu/kvm.h"
|
2017-12-04 23:25:43 +01:00
|
|
|
|
2007-10-25 23:35:50 +02:00
|
|
|
/* Swap temporary saved registers with GPRs */
|
2009-08-16 11:06:54 +02:00
|
|
|
static inline void hreg_swap_gpr_tgpr(CPUPPCState *env)
|
2007-10-25 23:35:50 +02:00
|
|
|
{
|
2008-09-04 07:26:09 +02:00
|
|
|
target_ulong tmp;
|
2007-10-25 23:35:50 +02:00
|
|
|
|
|
|
|
tmp = env->gpr[0];
|
|
|
|
env->gpr[0] = env->tgpr[0];
|
|
|
|
env->tgpr[0] = tmp;
|
|
|
|
tmp = env->gpr[1];
|
|
|
|
env->gpr[1] = env->tgpr[1];
|
|
|
|
env->tgpr[1] = tmp;
|
|
|
|
tmp = env->gpr[2];
|
|
|
|
env->gpr[2] = env->tgpr[2];
|
|
|
|
env->tgpr[2] = tmp;
|
|
|
|
tmp = env->gpr[3];
|
|
|
|
env->gpr[3] = env->tgpr[3];
|
|
|
|
env->tgpr[3] = tmp;
|
|
|
|
}
|
|
|
|
|
2009-08-16 11:06:54 +02:00
|
|
|
static inline void hreg_compute_mem_idx(CPUPPCState *env)
|
2007-11-04 03:55:33 +01:00
|
|
|
{
|
2019-03-21 11:41:17 +01:00
|
|
|
/*
|
|
|
|
* This is our encoding for server processors. The architecture
|
2016-07-09 05:41:31 +02:00
|
|
|
* specifies that there is no such thing as userspace with
|
2019-03-21 11:41:17 +01:00
|
|
|
* translation off, however it appears that MacOS does it and some
|
|
|
|
* 32-bit CPUs support it. Weird...
|
2016-05-03 18:03:24 +02:00
|
|
|
*
|
|
|
|
* 0 = Guest User space virtual mode
|
|
|
|
* 1 = Guest Kernel space virtual mode
|
2016-07-09 05:41:31 +02:00
|
|
|
* 2 = Guest User space real mode
|
|
|
|
* 3 = Guest Kernel space real mode
|
|
|
|
* 4 = HV User space virtual mode
|
|
|
|
* 5 = HV Kernel space virtual mode
|
|
|
|
* 6 = HV User space real mode
|
|
|
|
* 7 = HV Kernel space real mode
|
2016-05-03 18:03:24 +02:00
|
|
|
*
|
|
|
|
* For BookE, we need 8 MMU modes as follow:
|
|
|
|
*
|
|
|
|
* 0 = AS 0 HV User space
|
|
|
|
* 1 = AS 0 HV Kernel space
|
|
|
|
* 2 = AS 1 HV User space
|
|
|
|
* 3 = AS 1 HV Kernel space
|
|
|
|
* 4 = AS 0 Guest User space
|
|
|
|
* 5 = AS 0 Guest Kernel space
|
|
|
|
* 6 = AS 1 Guest User space
|
|
|
|
* 7 = AS 1 Guest Kernel space
|
|
|
|
*/
|
|
|
|
if (env->mmu_model & POWERPC_MMU_BOOKE) {
|
|
|
|
env->immu_idx = env->dmmu_idx = msr_pr ? 0 : 1;
|
|
|
|
env->immu_idx += msr_is ? 2 : 0;
|
|
|
|
env->dmmu_idx += msr_ds ? 2 : 0;
|
|
|
|
env->immu_idx += msr_gs ? 4 : 0;
|
|
|
|
env->dmmu_idx += msr_gs ? 4 : 0;
|
2007-11-17 22:14:09 +01:00
|
|
|
} else {
|
2016-07-09 05:41:31 +02:00
|
|
|
env->immu_idx = env->dmmu_idx = msr_pr ? 0 : 1;
|
|
|
|
env->immu_idx += msr_ir ? 0 : 2;
|
|
|
|
env->dmmu_idx += msr_dr ? 0 : 2;
|
|
|
|
env->immu_idx += msr_hv ? 4 : 0;
|
|
|
|
env->dmmu_idx += msr_hv ? 4 : 0;
|
2007-11-17 22:14:09 +01:00
|
|
|
}
|
2007-11-04 03:55:33 +01:00
|
|
|
}
|
|
|
|
|
2009-08-16 11:06:54 +02:00
|
|
|
static inline void hreg_compute_hflags(CPUPPCState *env)
|
2007-10-25 23:35:50 +02:00
|
|
|
{
|
|
|
|
target_ulong hflags_mask;
|
|
|
|
|
|
|
|
/* We 'forget' FE0 & FE1: we'll never generate imprecise exceptions */
|
|
|
|
hflags_mask = (1 << MSR_VR) | (1 << MSR_AP) | (1 << MSR_SA) |
|
|
|
|
(1 << MSR_PR) | (1 << MSR_FP) | (1 << MSR_SE) | (1 << MSR_BE) |
|
2016-06-07 04:50:20 +02:00
|
|
|
(1 << MSR_LE) | (1 << MSR_VSX) | (1 << MSR_IR) | (1 << MSR_DR);
|
2007-11-17 22:14:09 +01:00
|
|
|
hflags_mask |= (1ULL << MSR_CM) | (1ULL << MSR_SF) | MSR_HVB;
|
2007-11-04 03:55:33 +01:00
|
|
|
hreg_compute_mem_idx(env);
|
2007-10-25 23:35:50 +02:00
|
|
|
env->hflags = env->msr & hflags_mask;
|
2007-11-04 03:55:33 +01:00
|
|
|
/* Merge with hflags coming from other registers */
|
|
|
|
env->hflags |= env->hflags_nmsr;
|
2007-10-25 23:35:50 +02:00
|
|
|
}
|
|
|
|
|
2017-12-04 23:25:43 +01:00
|
|
|
static inline void cpu_interrupt_exittb(CPUState *cs)
|
|
|
|
{
|
2019-12-04 20:43:54 +01:00
|
|
|
if (!kvm_enabled()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-12-04 23:25:43 +01:00
|
|
|
if (!qemu_mutex_iothread_locked()) {
|
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
|
|
|
|
qemu_mutex_unlock_iothread();
|
|
|
|
} else {
|
|
|
|
cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-16 11:06:54 +02:00
|
|
|
static inline int hreg_store_msr(CPUPPCState *env, target_ulong value,
|
|
|
|
int alter_hv)
|
2007-10-25 23:35:50 +02:00
|
|
|
{
|
2007-10-26 01:14:50 +02:00
|
|
|
int excp;
|
2013-01-17 18:51:17 +01:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
2019-03-23 03:07:57 +01:00
|
|
|
CPUState *cs = env_cpu(env);
|
2013-01-17 18:51:17 +01:00
|
|
|
#endif
|
2007-10-25 23:35:50 +02:00
|
|
|
|
|
|
|
excp = 0;
|
|
|
|
value &= env->msr_mask;
|
2013-01-17 18:51:17 +01:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
2016-06-03 14:11:18 +02:00
|
|
|
/* Neither mtmsr nor guest state can alter HV */
|
|
|
|
if (!alter_hv || !(env->msr & MSR_HVB)) {
|
2007-11-17 22:14:09 +01:00
|
|
|
value &= ~MSR_HVB;
|
|
|
|
value |= env->msr & MSR_HVB;
|
|
|
|
}
|
2007-10-25 23:35:50 +02:00
|
|
|
if (((value >> MSR_IR) & 1) != msr_ir ||
|
|
|
|
((value >> MSR_DR) & 1) != msr_dr) {
|
2017-12-04 23:25:43 +01:00
|
|
|
cpu_interrupt_exittb(cs);
|
2016-05-03 18:03:24 +02:00
|
|
|
}
|
|
|
|
if ((env->mmu_model & POWERPC_MMU_BOOKE) &&
|
|
|
|
((value >> MSR_GS) & 1) != msr_gs) {
|
2017-12-04 23:25:43 +01:00
|
|
|
cpu_interrupt_exittb(cs);
|
2007-10-25 23:35:50 +02:00
|
|
|
}
|
|
|
|
if (unlikely((env->flags & POWERPC_FLAG_TGPR) &&
|
|
|
|
((value ^ env->msr) & (1 << MSR_TGPR)))) {
|
|
|
|
/* Swap temporary saved registers with GPRs */
|
|
|
|
hreg_swap_gpr_tgpr(env);
|
|
|
|
}
|
|
|
|
if (unlikely((value >> MSR_EP) & 1) != msr_ep) {
|
|
|
|
/* Change the exception prefix on PowerPC 601 */
|
|
|
|
env->excp_prefix = ((value >> MSR_EP) & 1) * 0xFFF00000;
|
|
|
|
}
|
2019-03-21 11:41:17 +01:00
|
|
|
/*
|
|
|
|
* If PR=1 then EE, IR and DR must be 1
|
2016-07-09 05:41:31 +02:00
|
|
|
*
|
2016-11-17 14:49:48 +01:00
|
|
|
* Note: We only enforce this on 64-bit server processors.
|
|
|
|
* It appears that:
|
|
|
|
* - 32-bit implementations supports PR=1 and EE/DR/IR=0 and MacOS
|
|
|
|
* exploits it.
|
|
|
|
* - 64-bit embedded implementations do not need any operation to be
|
|
|
|
* performed when PR is set.
|
2016-07-09 05:41:31 +02:00
|
|
|
*/
|
2019-03-22 19:03:51 +01:00
|
|
|
if (is_book3s_arch2x(env) && ((value >> MSR_PR) & 1)) {
|
2016-06-27 08:55:18 +02:00
|
|
|
value |= (1 << MSR_EE) | (1 << MSR_DR) | (1 << MSR_IR);
|
|
|
|
}
|
2007-10-25 23:35:50 +02:00
|
|
|
#endif
|
|
|
|
env->msr = value;
|
|
|
|
hreg_compute_hflags(env);
|
2013-01-17 18:51:17 +01:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
2007-10-25 23:35:50 +02:00
|
|
|
if (unlikely(msr_pow == 1)) {
|
2014-04-06 22:40:47 +02:00
|
|
|
if (!env->pending_interrupts && (*env->check_pow)(env)) {
|
2013-01-17 18:51:17 +01:00
|
|
|
cs->halted = 1;
|
2007-10-25 23:35:50 +02:00
|
|
|
excp = EXCP_HALTED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return excp;
|
|
|
|
}
|
|
|
|
|
2016-06-07 04:50:22 +02:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
2016-09-20 18:35:00 +02:00
|
|
|
static inline void check_tlb_flush(CPUPPCState *env, bool global)
|
ppc: Do some batching of TCG tlb flushes
On ppc64 especially, we flush the tlb on any slbie or tlbie instruction.
However, those instructions often come in bursts of 3 or more (context
switch will favor a series of slbie's for example to an slbia if the
SLB has less than a certain number of entries in it, and tlbie's can
happen in a series, with PAPR, H_BULK_REMOVE can remove up to 4 entries
at a time.
Doing a tlb_flush() each time is a waste of time. We end up doing a memset
of the whole TLB, reloading it for the next instruction, memset'ing again,
etc...
Those instructions don't have to take effect immediately. For slbie, they
can wait for the next context synchronizing event. For tlbie, the next
tlbsync.
This implements batching by keeping a flag that indicates that we have a
TLB in need of flushing. We check it on interrupts, rfi's, isync's and
tlbsync and flush the TLB if needed.
This reduces the number of tlb_flush() on a boot to a ubuntu installer
first dialog screen from roughly 360K down to 36K.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[clg: added a 'CPUPPCState *' variable in h_remove() and
h_bulk_remove() ]
Signed-off-by: Cédric Le Goater <clg@kaod.org>
[dwg: removed spurious whitespace change, use 0/1 not true/false
consistently, since tlb_need_flush has int type]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-05-03 18:03:25 +02:00
|
|
|
{
|
2019-03-23 03:07:57 +01:00
|
|
|
CPUState *cs = env_cpu(env);
|
2016-09-20 18:35:01 +02:00
|
|
|
|
2019-02-15 18:00:25 +01:00
|
|
|
/* Handle global flushes first */
|
2016-09-20 18:35:01 +02:00
|
|
|
if (global && (env->tlb_need_flush & TLB_NEED_GLOBAL_FLUSH)) {
|
|
|
|
env->tlb_need_flush &= ~TLB_NEED_GLOBAL_FLUSH;
|
2019-02-15 18:00:25 +01:00
|
|
|
env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
|
|
|
|
tlb_flush_all_cpus_synced(cs);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Then handle local ones */
|
|
|
|
if (env->tlb_need_flush & TLB_NEED_LOCAL_FLUSH) {
|
|
|
|
env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
|
|
|
|
tlb_flush(cs);
|
2016-09-20 18:35:01 +02:00
|
|
|
}
|
ppc: Do some batching of TCG tlb flushes
On ppc64 especially, we flush the tlb on any slbie or tlbie instruction.
However, those instructions often come in bursts of 3 or more (context
switch will favor a series of slbie's for example to an slbia if the
SLB has less than a certain number of entries in it, and tlbie's can
happen in a series, with PAPR, H_BULK_REMOVE can remove up to 4 entries
at a time.
Doing a tlb_flush() each time is a waste of time. We end up doing a memset
of the whole TLB, reloading it for the next instruction, memset'ing again,
etc...
Those instructions don't have to take effect immediately. For slbie, they
can wait for the next context synchronizing event. For tlbie, the next
tlbsync.
This implements batching by keeping a flag that indicates that we have a
TLB in need of flushing. We check it on interrupts, rfi's, isync's and
tlbsync and flush the TLB if needed.
This reduces the number of tlb_flush() on a boot to a ubuntu installer
first dialog screen from roughly 360K down to 36K.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[clg: added a 'CPUPPCState *' variable in h_remove() and
h_bulk_remove() ]
Signed-off-by: Cédric Le Goater <clg@kaod.org>
[dwg: removed spurious whitespace change, use 0/1 not true/false
consistently, since tlb_need_flush has int type]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-05-03 18:03:25 +02:00
|
|
|
}
|
|
|
|
#else
|
2016-09-20 18:35:00 +02:00
|
|
|
static inline void check_tlb_flush(CPUPPCState *env, bool global) { }
|
ppc: Do some batching of TCG tlb flushes
On ppc64 especially, we flush the tlb on any slbie or tlbie instruction.
However, those instructions often come in bursts of 3 or more (context
switch will favor a series of slbie's for example to an slbia if the
SLB has less than a certain number of entries in it, and tlbie's can
happen in a series, with PAPR, H_BULK_REMOVE can remove up to 4 entries
at a time.
Doing a tlb_flush() each time is a waste of time. We end up doing a memset
of the whole TLB, reloading it for the next instruction, memset'ing again,
etc...
Those instructions don't have to take effect immediately. For slbie, they
can wait for the next context synchronizing event. For tlbie, the next
tlbsync.
This implements batching by keeping a flag that indicates that we have a
TLB in need of flushing. We check it on interrupts, rfi's, isync's and
tlbsync and flush the TLB if needed.
This reduces the number of tlb_flush() on a boot to a ubuntu installer
first dialog screen from roughly 360K down to 36K.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[clg: added a 'CPUPPCState *' variable in h_remove() and
h_bulk_remove() ]
Signed-off-by: Cédric Le Goater <clg@kaod.org>
[dwg: removed spurious whitespace change, use 0/1 not true/false
consistently, since tlb_need_flush has int type]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-05-03 18:03:25 +02:00
|
|
|
#endif
|
|
|
|
|
2016-06-29 13:47:03 +02:00
|
|
|
#endif /* HELPER_REGS_H */
|