2009-05-20 20:08:24 +02:00
|
|
|
/*
|
|
|
|
* Microblaze MMU emulation for qemu.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2009 Edgar E. Iglesias
|
2012-04-12 06:30:30 +02:00
|
|
|
* Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
|
2009-05-20 20:08:24 +02:00
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
2020-10-23 14:18:21 +02:00
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
2009-05-20 20:08:24 +02:00
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2009-07-16 22:47:01 +02:00
|
|
|
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
2009-05-20 20:08:24 +02:00
|
|
|
*/
|
|
|
|
|
2016-01-26 19:05:31 +01:00
|
|
|
#include "qemu/osdep.h"
|
2022-02-07 09:27:56 +01:00
|
|
|
#include "qemu/log.h"
|
2009-05-20 20:08:24 +02:00
|
|
|
#include "cpu.h"
|
2016-03-15 13:18:37 +01:00
|
|
|
#include "exec/exec-all.h"
|
2009-05-20 20:08:24 +02:00
|
|
|
|
|
|
|
static unsigned int tlb_decode_size(unsigned int f)
|
|
|
|
{
|
|
|
|
static const unsigned int sizes[] = {
|
|
|
|
1 * 1024, 4 * 1024, 16 * 1024, 64 * 1024, 256 * 1024,
|
|
|
|
1 * 1024 * 1024, 4 * 1024 * 1024, 16 * 1024 * 1024
|
|
|
|
};
|
|
|
|
assert(f < ARRAY_SIZE(sizes));
|
|
|
|
return sizes[f];
|
|
|
|
}
|
|
|
|
|
2012-03-14 01:38:22 +01:00
|
|
|
static void mmu_flush_idx(CPUMBState *env, unsigned int idx)
|
2009-05-20 20:08:24 +02:00
|
|
|
{
|
2019-03-23 02:27:36 +01:00
|
|
|
CPUState *cs = env_cpu(env);
|
2020-09-03 08:18:35 +02:00
|
|
|
MicroBlazeMMU *mmu = &env->mmu;
|
2009-05-20 20:08:24 +02:00
|
|
|
unsigned int tlb_size;
|
|
|
|
uint32_t tlb_tag, end, t;
|
|
|
|
|
|
|
|
t = mmu->rams[RAM_TAG][idx];
|
|
|
|
if (!(t & TLB_VALID))
|
|
|
|
return;
|
|
|
|
|
|
|
|
tlb_tag = t & TLB_EPN_MASK;
|
|
|
|
tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
|
|
|
|
end = tlb_tag + tlb_size;
|
|
|
|
|
|
|
|
while (tlb_tag < end) {
|
2013-09-04 01:29:02 +02:00
|
|
|
tlb_flush_page(cs, tlb_tag);
|
2009-05-20 20:08:24 +02:00
|
|
|
tlb_tag += TARGET_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-14 01:38:22 +01:00
|
|
|
static void mmu_change_pid(CPUMBState *env, unsigned int newpid)
|
2009-05-20 20:08:24 +02:00
|
|
|
{
|
2020-09-03 08:18:35 +02:00
|
|
|
MicroBlazeMMU *mmu = &env->mmu;
|
2009-05-20 20:08:24 +02:00
|
|
|
unsigned int i;
|
2010-04-25 22:00:33 +02:00
|
|
|
uint32_t t;
|
2009-05-20 20:08:24 +02:00
|
|
|
|
|
|
|
if (newpid & ~0xff)
|
2015-11-13 13:24:57 +01:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "Illegal rpid=%x\n", newpid);
|
2009-05-20 20:08:24 +02:00
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) {
|
|
|
|
/* Lookup and decode. */
|
|
|
|
t = mmu->rams[RAM_TAG][i];
|
|
|
|
if (t & TLB_VALID) {
|
|
|
|
if (mmu->tids[i] && ((mmu->regs[MMU_R_PID] & 0xff) == mmu->tids[i]))
|
|
|
|
mmu_flush_idx(env, i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* rw - 0 = read, 1 = write, 2 = fetch. */
|
2020-09-04 20:31:57 +02:00
|
|
|
unsigned int mmu_translate(MicroBlazeCPU *cpu, MicroBlazeMMULookup *lu,
|
2021-01-22 01:18:54 +01:00
|
|
|
target_ulong vaddr, MMUAccessType rw, int mmu_idx)
|
2009-05-20 20:08:24 +02:00
|
|
|
{
|
2020-09-04 20:31:57 +02:00
|
|
|
MicroBlazeMMU *mmu = &cpu->env.mmu;
|
2009-05-20 20:08:24 +02:00
|
|
|
unsigned int i, hit = 0;
|
|
|
|
unsigned int tlb_ex = 0, tlb_wr = 0, tlb_zsel;
|
2018-04-16 21:03:01 +02:00
|
|
|
uint64_t tlb_tag, tlb_rpn, mask;
|
|
|
|
uint32_t tlb_size, t0;
|
2009-05-20 20:08:24 +02:00
|
|
|
|
|
|
|
lu->err = ERR_MISS;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) {
|
2018-04-16 21:03:01 +02:00
|
|
|
uint64_t t, d;
|
2009-05-20 20:08:24 +02:00
|
|
|
|
|
|
|
/* Lookup and decode. */
|
|
|
|
t = mmu->rams[RAM_TAG][i];
|
|
|
|
if (t & TLB_VALID) {
|
|
|
|
tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
|
|
|
|
if (tlb_size < TARGET_PAGE_SIZE) {
|
2018-05-05 15:55:25 +02:00
|
|
|
qemu_log_mask(LOG_UNIMP, "%d pages not supported\n", tlb_size);
|
2009-05-20 20:08:24 +02:00
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
2018-04-16 21:03:01 +02:00
|
|
|
mask = ~((uint64_t)tlb_size - 1);
|
2009-05-20 20:08:24 +02:00
|
|
|
tlb_tag = t & TLB_EPN_MASK;
|
|
|
|
if ((vaddr & mask) != (tlb_tag & mask)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (mmu->tids[i]
|
|
|
|
&& ((mmu->regs[MMU_R_PID] & 0xff) != mmu->tids[i])) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Bring in the data part. */
|
|
|
|
d = mmu->rams[RAM_DATA][i];
|
|
|
|
tlb_ex = d & TLB_EX;
|
|
|
|
tlb_wr = d & TLB_WR;
|
|
|
|
|
2013-04-09 13:48:19 +02:00
|
|
|
/* Now let's see if there is a zone that overrides the protbits. */
|
2009-05-20 20:08:24 +02:00
|
|
|
tlb_zsel = (d >> 4) & 0xf;
|
|
|
|
t0 = mmu->regs[MMU_R_ZPR] >> (30 - (tlb_zsel * 2));
|
|
|
|
t0 &= 0x3;
|
2009-09-03 13:04:02 +02:00
|
|
|
|
2020-09-04 20:31:57 +02:00
|
|
|
if (tlb_zsel > cpu->cfg.mmu_zones) {
|
2018-05-05 15:55:25 +02:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"tlb zone select out of range! %d\n", tlb_zsel);
|
2009-09-03 13:04:02 +02:00
|
|
|
t0 = 1; /* Ignore. */
|
|
|
|
}
|
|
|
|
|
2020-09-04 20:31:57 +02:00
|
|
|
if (cpu->cfg.mmu == 1) {
|
2009-09-03 13:04:02 +02:00
|
|
|
t0 = 1; /* Zones are disabled. */
|
|
|
|
}
|
|
|
|
|
2009-05-20 20:08:24 +02:00
|
|
|
switch (t0) {
|
|
|
|
case 0:
|
|
|
|
if (mmu_idx == MMU_USER_IDX)
|
|
|
|
continue;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
if (mmu_idx != MMU_USER_IDX) {
|
|
|
|
tlb_ex = 1;
|
|
|
|
tlb_wr = 1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
tlb_ex = 1;
|
|
|
|
tlb_wr = 1;
|
|
|
|
break;
|
2009-09-03 13:04:02 +02:00
|
|
|
default: break;
|
2009-05-20 20:08:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
lu->err = ERR_PROT;
|
|
|
|
lu->prot = PAGE_READ;
|
|
|
|
if (tlb_wr)
|
|
|
|
lu->prot |= PAGE_WRITE;
|
|
|
|
else if (rw == 1)
|
|
|
|
goto done;
|
|
|
|
if (tlb_ex)
|
|
|
|
lu->prot |=PAGE_EXEC;
|
|
|
|
else if (rw == 2) {
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
tlb_rpn = d & TLB_RPN_MASK;
|
|
|
|
|
|
|
|
lu->vaddr = tlb_tag;
|
2020-09-04 20:31:57 +02:00
|
|
|
lu->paddr = tlb_rpn & cpu->cfg.addr_mask;
|
2009-05-20 20:08:24 +02:00
|
|
|
lu->size = tlb_size;
|
|
|
|
lu->err = ERR_HIT;
|
|
|
|
lu->idx = i;
|
|
|
|
hit = 1;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
done:
|
2018-05-05 15:55:25 +02:00
|
|
|
qemu_log_mask(CPU_LOG_MMU,
|
|
|
|
"MMU vaddr=%" PRIx64 " rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
|
|
|
|
vaddr, rw, tlb_wr, tlb_ex, hit);
|
2009-05-20 20:08:24 +02:00
|
|
|
return hit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Writes/reads to the MMU's special regs end up here. */
|
2018-04-16 21:25:01 +02:00
|
|
|
uint32_t mmu_read(CPUMBState *env, bool ext, uint32_t rn)
|
2009-05-20 20:08:24 +02:00
|
|
|
{
|
2020-09-04 20:31:57 +02:00
|
|
|
MicroBlazeCPU *cpu = env_archcpu(env);
|
2009-05-20 20:08:24 +02:00
|
|
|
unsigned int i;
|
2018-04-15 23:05:22 +02:00
|
|
|
uint32_t r = 0;
|
2009-05-20 20:08:24 +02:00
|
|
|
|
2020-09-04 20:31:57 +02:00
|
|
|
if (cpu->cfg.mmu < 2 || !cpu->cfg.mmu_tlb_access) {
|
2015-11-13 13:24:57 +01:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n");
|
2009-09-03 13:04:02 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2018-04-16 21:25:01 +02:00
|
|
|
if (ext && rn != MMU_R_TLBLO) {
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "Extended access only to TLBLO.\n");
|
|
|
|
return 0;
|
|
|
|
}
|
2009-09-03 13:04:02 +02:00
|
|
|
|
2009-05-20 20:08:24 +02:00
|
|
|
switch (rn) {
|
|
|
|
/* Reads to HI/LO trig reads from the mmu rams. */
|
|
|
|
case MMU_R_TLBLO:
|
|
|
|
case MMU_R_TLBHI:
|
2020-09-04 20:31:57 +02:00
|
|
|
if (!(cpu->cfg.mmu_tlb_access & 1)) {
|
2018-05-05 15:55:25 +02:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"Invalid access to MMU reg %d\n", rn);
|
2009-09-03 13:04:02 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-05-20 20:08:24 +02:00
|
|
|
i = env->mmu.regs[MMU_R_TLBX] & 0xff;
|
2018-04-16 21:25:01 +02:00
|
|
|
r = extract64(env->mmu.rams[rn & 1][i], ext * 32, 32);
|
2009-05-20 20:08:24 +02:00
|
|
|
if (rn == MMU_R_TLBHI)
|
|
|
|
env->mmu.regs[MMU_R_PID] = env->mmu.tids[i];
|
|
|
|
break;
|
2009-09-03 13:04:02 +02:00
|
|
|
case MMU_R_PID:
|
|
|
|
case MMU_R_ZPR:
|
2020-09-04 20:31:57 +02:00
|
|
|
if (!(cpu->cfg.mmu_tlb_access & 1)) {
|
2018-05-05 15:55:25 +02:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"Invalid access to MMU reg %d\n", rn);
|
2009-09-03 13:04:02 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
r = env->mmu.regs[rn];
|
|
|
|
break;
|
2018-04-15 23:25:58 +02:00
|
|
|
case MMU_R_TLBX:
|
|
|
|
r = env->mmu.regs[rn];
|
|
|
|
break;
|
2018-04-15 23:05:22 +02:00
|
|
|
case MMU_R_TLBSX:
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "TLBSX is write-only.\n");
|
|
|
|
break;
|
2009-05-20 20:08:24 +02:00
|
|
|
default:
|
2018-04-15 23:25:58 +02:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn);
|
2009-05-20 20:08:24 +02:00
|
|
|
break;
|
|
|
|
}
|
2018-05-05 15:55:25 +02:00
|
|
|
qemu_log_mask(CPU_LOG_MMU, "%s rn=%d=%x\n", __func__, rn, r);
|
2009-05-20 20:08:24 +02:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2018-04-16 21:25:01 +02:00
|
|
|
void mmu_write(CPUMBState *env, bool ext, uint32_t rn, uint32_t v)
|
2009-05-20 20:08:24 +02:00
|
|
|
{
|
2020-09-04 20:31:57 +02:00
|
|
|
MicroBlazeCPU *cpu = env_archcpu(env);
|
2018-04-16 21:25:01 +02:00
|
|
|
uint64_t tmp64;
|
2009-05-20 20:08:24 +02:00
|
|
|
unsigned int i;
|
2020-09-04 20:31:57 +02:00
|
|
|
|
2018-05-05 15:55:25 +02:00
|
|
|
qemu_log_mask(CPU_LOG_MMU,
|
2020-11-03 08:46:02 +01:00
|
|
|
"%s rn=%d=%x old=%x\n", __func__, rn, v,
|
|
|
|
rn < 3 ? env->mmu.regs[rn] : env->mmu.regs[MMU_R_TLBX]);
|
2009-05-20 20:08:24 +02:00
|
|
|
|
2020-09-04 20:31:57 +02:00
|
|
|
if (cpu->cfg.mmu < 2 || !cpu->cfg.mmu_tlb_access) {
|
2015-11-13 13:24:57 +01:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n");
|
2009-09-03 13:04:02 +02:00
|
|
|
return;
|
|
|
|
}
|
2018-04-16 21:25:01 +02:00
|
|
|
if (ext && rn != MMU_R_TLBLO) {
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "Extended access only to TLBLO.\n");
|
|
|
|
return;
|
|
|
|
}
|
2009-09-03 13:04:02 +02:00
|
|
|
|
2009-05-20 20:08:24 +02:00
|
|
|
switch (rn) {
|
|
|
|
/* Writes to HI/LO trig writes to the mmu rams. */
|
|
|
|
case MMU_R_TLBLO:
|
|
|
|
case MMU_R_TLBHI:
|
|
|
|
i = env->mmu.regs[MMU_R_TLBX] & 0xff;
|
|
|
|
if (rn == MMU_R_TLBHI) {
|
|
|
|
if (i < 3 && !(v & TLB_VALID) && qemu_loglevel_mask(~0))
|
2018-04-14 17:59:29 +02:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
2020-08-20 07:25:16 +02:00
|
|
|
"invalidating index %x at pc=%x\n",
|
|
|
|
i, env->pc);
|
2009-05-20 20:08:24 +02:00
|
|
|
env->mmu.tids[i] = env->mmu.regs[MMU_R_PID] & 0xff;
|
|
|
|
mmu_flush_idx(env, i);
|
|
|
|
}
|
2018-04-16 21:25:01 +02:00
|
|
|
tmp64 = env->mmu.rams[rn & 1][i];
|
|
|
|
env->mmu.rams[rn & 1][i] = deposit64(tmp64, ext * 32, 32, v);
|
2009-05-20 20:08:24 +02:00
|
|
|
break;
|
|
|
|
case MMU_R_ZPR:
|
2020-09-04 20:31:57 +02:00
|
|
|
if (cpu->cfg.mmu_tlb_access <= 1) {
|
2018-05-05 15:55:25 +02:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"Invalid access to MMU reg %d\n", rn);
|
2009-09-03 13:04:02 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-05-27 01:19:58 +02:00
|
|
|
/* Changes to the zone protection reg flush the QEMU TLB.
|
|
|
|
Fortunately, these are very uncommon. */
|
|
|
|
if (v != env->mmu.regs[rn]) {
|
2019-03-23 02:27:36 +01:00
|
|
|
tlb_flush(env_cpu(env));
|
2009-05-27 01:19:58 +02:00
|
|
|
}
|
|
|
|
env->mmu.regs[rn] = v;
|
|
|
|
break;
|
2009-05-20 20:08:24 +02:00
|
|
|
case MMU_R_PID:
|
2020-09-04 20:31:57 +02:00
|
|
|
if (cpu->cfg.mmu_tlb_access <= 1) {
|
2018-05-05 15:55:25 +02:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"Invalid access to MMU reg %d\n", rn);
|
2009-09-03 13:04:02 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-05-20 20:08:24 +02:00
|
|
|
if (v != env->mmu.regs[rn]) {
|
|
|
|
mmu_change_pid(env, v);
|
|
|
|
env->mmu.regs[rn] = v;
|
|
|
|
}
|
|
|
|
break;
|
2018-04-15 23:21:06 +02:00
|
|
|
case MMU_R_TLBX:
|
|
|
|
/* Bit 31 is read-only. */
|
|
|
|
env->mmu.regs[rn] = deposit32(env->mmu.regs[rn], 0, 31, v);
|
|
|
|
break;
|
2009-05-20 20:08:24 +02:00
|
|
|
case MMU_R_TLBSX:
|
|
|
|
{
|
2020-09-03 08:18:35 +02:00
|
|
|
MicroBlazeMMULookup lu;
|
2009-05-20 20:08:24 +02:00
|
|
|
int hit;
|
2009-09-03 13:04:02 +02:00
|
|
|
|
2020-09-04 20:31:57 +02:00
|
|
|
if (cpu->cfg.mmu_tlb_access <= 1) {
|
2018-05-05 15:55:25 +02:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"Invalid access to MMU reg %d\n", rn);
|
2009-09-03 13:04:02 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-09-04 20:31:57 +02:00
|
|
|
hit = mmu_translate(cpu, &lu, v & TLB_EPN_MASK,
|
2024-01-29 11:35:06 +01:00
|
|
|
0, cpu_mmu_index(env_cpu(env), false));
|
2009-05-20 20:08:24 +02:00
|
|
|
if (hit) {
|
|
|
|
env->mmu.regs[MMU_R_TLBX] = lu.idx;
|
2018-04-15 23:18:49 +02:00
|
|
|
} else {
|
|
|
|
env->mmu.regs[MMU_R_TLBX] |= R_TBLX_MISS_MASK;
|
|
|
|
}
|
2009-05-20 20:08:24 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
2018-04-15 23:25:58 +02:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn);
|
2009-05-20 20:08:24 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-03 08:18:35 +02:00
|
|
|
void mmu_init(MicroBlazeMMU *mmu)
|
2009-05-20 20:08:24 +02:00
|
|
|
{
|
2009-09-03 13:04:02 +02:00
|
|
|
int i;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(mmu->regs); i++) {
|
|
|
|
mmu->regs[i] = 0;
|
|
|
|
}
|
2009-05-20 20:08:24 +02:00
|
|
|
}
|