target-ppc: Rework ppc_store_slb

ppc_store_slb updates the SLB for PPC cpus with 64-bit hash MMUs.
Currently it takes two parameters, which contain values encoded as the
register arguments to the slbmte instruction, one register contains the
ESID portion of the SLBE and also the slot number, the other contains the
VSID portion of the SLBE.

We're shortly going to want to do some SLB updates from other code where
it is more convenient to supply the slot number and ESID separately, so
rework this function and its callers to work this way.

As a bonus, this slightly simplifies the emulation of segment registers for
when running a 32-bit OS on a 64-bit CPU.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Laurent Vivier <lvivier@redhat.com>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Reviewed-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
David Gibson 2016-01-27 11:07:29 +11:00
parent 7ef23068bf
commit bcd8123003
4 changed files with 21 additions and 22 deletions

View File

@ -1205,7 +1205,7 @@ int kvm_arch_get_registers(CPUState *cs)
* Only restore valid entries
*/
if (rb & SLB_ESID_V) {
ppc_store_slb(cpu, rb, rs);
ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs);
}
}
#endif

View File

@ -136,28 +136,30 @@ void helper_slbie(CPUPPCState *env, target_ulong addr)
}
}
int ppc_store_slb(PowerPCCPU *cpu, target_ulong rb, target_ulong rs)
int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
target_ulong esid, target_ulong vsid)
{
CPUPPCState *env = &cpu->env;
int slot = rb & 0xfff;
ppc_slb_t *slb = &env->slb[slot];
if (rb & (0x1000 - env->slb_nr)) {
return -1; /* Reserved bits set or slot too high */
if (slot >= env->slb_nr) {
return -1; /* Bad slot number */
}
if (rs & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) {
return -1; /* Reserved bits set */
}
if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
return -1; /* Bad segment size */
}
if ((rs & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
if ((vsid & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
return -1; /* 1T segment on MMU that doesn't support it */
}
/* Mask out the slot number as we store the entry */
slb->esid = rb & (SLB_ESID_ESID | SLB_ESID_V);
slb->vsid = rs;
slb->esid = esid;
slb->vsid = vsid;
LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64
" %016" PRIx64 "\n", __func__, slot, rb, rs,
" %016" PRIx64 "\n", __func__, slot, esid, vsid,
slb->esid, slb->vsid);
return 0;
@ -197,7 +199,7 @@ void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
{
PowerPCCPU *cpu = ppc_env_get_cpu(env);
if (ppc_store_slb(cpu, rb, rs) < 0) {
if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) {
helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
POWERPC_EXCP_INVAL);
}

View File

@ -6,7 +6,8 @@
#ifdef TARGET_PPC64
void ppc_hash64_check_page_sizes(PowerPCCPU *cpu, Error **errp);
void dump_slb(FILE *f, fprintf_function cpu_fprintf, PowerPCCPU *cpu);
int ppc_store_slb(PowerPCCPU *cpu, target_ulong rb, target_ulong rs);
int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
target_ulong esid, target_ulong vsid);
hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr);
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong address, int rw,
int mmu_idx);

View File

@ -2089,21 +2089,17 @@ void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
(int)srnum, value, env->sr[srnum]);
#if defined(TARGET_PPC64)
if (env->mmu_model & POWERPC_MMU_64) {
uint64_t rb = 0, rs = 0;
uint64_t esid, vsid;
/* ESID = srnum */
rb |= ((uint32_t)srnum & 0xf) << 28;
/* Set the valid bit */
rb |= SLB_ESID_V;
/* Index = ESID */
rb |= (uint32_t)srnum;
esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V;
/* VSID = VSID */
rs |= (value & 0xfffffff) << 12;
vsid = (value & 0xfffffff) << 12;
/* flags = flags */
rs |= ((value >> 27) & 0xf) << 8;
vsid |= ((value >> 27) & 0xf) << 8;
ppc_store_slb(cpu, rb, rs);
ppc_store_slb(cpu, srnum, esid, vsid);
} else
#endif
if (env->sr[srnum] != value) {