target/hppa: Populate an interval tree with valid tlb entries
Complete the data structure conversion started earlier. This reduces the perf overhead of hppa_get_physical_address from ~5% to ~0.25%. Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
09cae8255f
commit
d7553f3591
@ -137,8 +137,10 @@ static void hppa_cpu_realizefn(DeviceState *dev, Error **errp)
|
|||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
{
|
{
|
||||||
HPPACPU *cpu = HPPA_CPU(cs);
|
HPPACPU *cpu = HPPA_CPU(cs);
|
||||||
|
|
||||||
cpu->alarm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
|
cpu->alarm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
|
||||||
hppa_cpu_alarm_timer, cpu);
|
hppa_cpu_alarm_timer, cpu);
|
||||||
|
hppa_ptlbe(&cpu->env);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -176,7 +176,10 @@ typedef int64_t target_sreg;
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
typedef struct HPPATLBEntry {
|
typedef struct HPPATLBEntry {
|
||||||
IntervalTreeNode itree;
|
union {
|
||||||
|
IntervalTreeNode itree;
|
||||||
|
struct HPPATLBEntry *unused_next;
|
||||||
|
};
|
||||||
|
|
||||||
target_ureg pa;
|
target_ureg pa;
|
||||||
|
|
||||||
@ -234,10 +237,22 @@ typedef struct CPUArchState {
|
|||||||
#define HPPA_TLB_ENTRIES 256
|
#define HPPA_TLB_ENTRIES 256
|
||||||
#define HPPA_BTLB_ENTRIES (HPPA_BTLB_FIXED + HPPA_BTLB_VARIABLE)
|
#define HPPA_BTLB_ENTRIES (HPPA_BTLB_FIXED + HPPA_BTLB_VARIABLE)
|
||||||
|
|
||||||
/* ??? Implement a unified itlb/dtlb for the moment. */
|
/* Index for round-robin tlb eviction. */
|
||||||
/* ??? We should use a more intelligent data structure. */
|
|
||||||
HPPATLBEntry tlb[HPPA_TLB_ENTRIES];
|
|
||||||
uint32_t tlb_last;
|
uint32_t tlb_last;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For pa1.x, the partial initialized, still invalid tlb entry
|
||||||
|
* which has had ITLBA performed, but not yet ITLBP.
|
||||||
|
*/
|
||||||
|
HPPATLBEntry *tlb_partial;
|
||||||
|
|
||||||
|
/* Linked list of all invalid (unused) tlb entries. */
|
||||||
|
HPPATLBEntry *tlb_unused;
|
||||||
|
|
||||||
|
/* Root of the search tree for all valid tlb entries. */
|
||||||
|
IntervalTreeRoot tlb_root;
|
||||||
|
|
||||||
|
HPPATLBEntry tlb[HPPA_TLB_ENTRIES];
|
||||||
} CPUHPPAState;
|
} CPUHPPAState;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -356,6 +371,7 @@ int hppa_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
|
|||||||
int hppa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
|
int hppa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
|
||||||
void hppa_cpu_dump_state(CPUState *cs, FILE *f, int);
|
void hppa_cpu_dump_state(CPUState *cs, FILE *f, int);
|
||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
|
void hppa_ptlbe(CPUHPPAState *env);
|
||||||
hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr);
|
hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr);
|
||||||
bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||||
MMUAccessType access_type, int mmu_idx,
|
MMUAccessType access_type, int mmu_idx,
|
||||||
|
@ -72,8 +72,6 @@ static int get_tlb(QEMUFile *f, void *opaque, size_t size,
|
|||||||
HPPATLBEntry *ent = opaque;
|
HPPATLBEntry *ent = opaque;
|
||||||
uint32_t val;
|
uint32_t val;
|
||||||
|
|
||||||
memset(ent, 0, sizeof(*ent));
|
|
||||||
|
|
||||||
ent->itree.start = qemu_get_be64(f);
|
ent->itree.start = qemu_get_be64(f);
|
||||||
ent->pa = qemu_get_betr(f);
|
ent->pa = qemu_get_betr(f);
|
||||||
val = qemu_get_be32(f);
|
val = qemu_get_be32(f);
|
||||||
@ -122,6 +120,53 @@ static const VMStateInfo vmstate_tlb = {
|
|||||||
.put = put_tlb,
|
.put = put_tlb,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static int tlb_pre_load(void *opaque)
|
||||||
|
{
|
||||||
|
CPUHPPAState *env = opaque;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Zap the entire tlb, on-the-side data structures and all.
|
||||||
|
* Each tlb entry will have data re-filled by put_tlb.
|
||||||
|
*/
|
||||||
|
memset(env->tlb, 0, sizeof(env->tlb));
|
||||||
|
memset(&env->tlb_root, 0, sizeof(env->tlb_root));
|
||||||
|
env->tlb_unused = NULL;
|
||||||
|
env->tlb_partial = NULL;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tlb_post_load(void *opaque, int version_id)
|
||||||
|
{
|
||||||
|
CPUHPPAState *env = opaque;
|
||||||
|
HPPATLBEntry **unused = &env->tlb_unused;
|
||||||
|
HPPATLBEntry *partial = NULL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Re-create the interval tree from the valid entries.
|
||||||
|
* Truely invalid entries should have start == end == 0.
|
||||||
|
* Otherwise it should be the in-flight tlb_partial entry.
|
||||||
|
*/
|
||||||
|
for (uint32_t i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
|
||||||
|
HPPATLBEntry *e = &env->tlb[i];
|
||||||
|
|
||||||
|
if (e->entry_valid) {
|
||||||
|
interval_tree_insert(&e->itree, &env->tlb_root);
|
||||||
|
} else if (i < HPPA_BTLB_ENTRIES) {
|
||||||
|
/* btlb not in unused list */
|
||||||
|
} else if (partial == NULL && e->itree.start < e->itree.last) {
|
||||||
|
partial = e;
|
||||||
|
} else {
|
||||||
|
*unused = e;
|
||||||
|
unused = &e->unused_next;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
env->tlb_partial = partial;
|
||||||
|
*unused = NULL;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static VMStateField vmstate_env_fields[] = {
|
static VMStateField vmstate_env_fields[] = {
|
||||||
VMSTATE_UINTTR_ARRAY(gr, CPUHPPAState, 32),
|
VMSTATE_UINTTR_ARRAY(gr, CPUHPPAState, 32),
|
||||||
VMSTATE_UINT64_ARRAY(fr, CPUHPPAState, 32),
|
VMSTATE_UINT64_ARRAY(fr, CPUHPPAState, 32),
|
||||||
@ -164,6 +209,8 @@ static const VMStateDescription vmstate_env = {
|
|||||||
.version_id = 1,
|
.version_id = 1,
|
||||||
.minimum_version_id = 1,
|
.minimum_version_id = 1,
|
||||||
.fields = vmstate_env_fields,
|
.fields = vmstate_env_fields,
|
||||||
|
.pre_load = tlb_pre_load,
|
||||||
|
.post_load = tlb_post_load,
|
||||||
};
|
};
|
||||||
|
|
||||||
static VMStateField vmstate_cpu_fields[] = {
|
static VMStateField vmstate_cpu_fields[] = {
|
||||||
|
@ -27,16 +27,13 @@
|
|||||||
|
|
||||||
static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
|
static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
|
||||||
{
|
{
|
||||||
int i;
|
IntervalTreeNode *i = interval_tree_iter_first(&env->tlb_root, addr, addr);
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
|
if (i) {
|
||||||
HPPATLBEntry *ent = &env->tlb[i];
|
HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
|
||||||
if (ent->itree.start <= addr && addr <= ent->itree.last) {
|
trace_hppa_tlb_find_entry(env, ent, ent->entry_valid,
|
||||||
trace_hppa_tlb_find_entry(env, ent + i, ent->entry_valid,
|
ent->itree.start, ent->itree.last, ent->pa);
|
||||||
ent->itree.start, ent->itree.last,
|
return ent;
|
||||||
ent->pa);
|
|
||||||
return ent;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
trace_hppa_tlb_find_entry_not_found(env, addr);
|
trace_hppa_tlb_find_entry_not_found(env, addr);
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -46,6 +43,7 @@ static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent,
|
|||||||
bool force_flush_btlb)
|
bool force_flush_btlb)
|
||||||
{
|
{
|
||||||
CPUState *cs = env_cpu(env);
|
CPUState *cs = env_cpu(env);
|
||||||
|
bool is_btlb;
|
||||||
|
|
||||||
if (!ent->entry_valid) {
|
if (!ent->entry_valid) {
|
||||||
return;
|
return;
|
||||||
@ -58,50 +56,55 @@ static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent,
|
|||||||
ent->itree.last - ent->itree.start + 1,
|
ent->itree.last - ent->itree.start + 1,
|
||||||
HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
|
HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
|
||||||
|
|
||||||
/* never clear BTLBs, unless forced to do so. */
|
/* Never clear BTLBs, unless forced to do so. */
|
||||||
if (ent < &env->tlb[HPPA_BTLB_ENTRIES] && !force_flush_btlb) {
|
is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES];
|
||||||
|
if (is_btlb && !force_flush_btlb) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
interval_tree_remove(&ent->itree, &env->tlb_root);
|
||||||
memset(ent, 0, sizeof(*ent));
|
memset(ent, 0, sizeof(*ent));
|
||||||
ent->itree.start = -1;
|
|
||||||
|
if (!is_btlb) {
|
||||||
|
ent->unused_next = env->tlb_unused;
|
||||||
|
env->tlb_unused = ent;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static HPPATLBEntry *hppa_flush_tlb_range(CPUHPPAState *env,
|
static void hppa_flush_tlb_range(CPUHPPAState *env, vaddr va_b, vaddr va_e)
|
||||||
vaddr va_b, vaddr va_e)
|
|
||||||
{
|
{
|
||||||
HPPATLBEntry *empty = NULL;
|
IntervalTreeNode *i, *n;
|
||||||
|
|
||||||
/* Zap any old entries covering ADDR; notice empty entries on the way. */
|
i = interval_tree_iter_first(&env->tlb_root, va_b, va_e);
|
||||||
for (int i = HPPA_BTLB_ENTRIES; i < ARRAY_SIZE(env->tlb); ++i) {
|
for (; i ; i = n) {
|
||||||
HPPATLBEntry *ent = &env->tlb[i];
|
HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
|
||||||
|
|
||||||
if (!ent->entry_valid) {
|
/*
|
||||||
empty = ent;
|
* Find the next entry now: In the normal case the current entry
|
||||||
} else if (va_e >= ent->itree.start && va_b <= ent->itree.last) {
|
* will be removed, but in the BTLB case it will remain.
|
||||||
hppa_flush_tlb_ent(env, ent, false);
|
*/
|
||||||
empty = ent;
|
n = interval_tree_iter_next(i, va_b, va_e);
|
||||||
}
|
hppa_flush_tlb_ent(env, ent, false);
|
||||||
}
|
}
|
||||||
return empty;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env)
|
static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env)
|
||||||
{
|
{
|
||||||
HPPATLBEntry *ent;
|
HPPATLBEntry *ent = env->tlb_unused;
|
||||||
uint32_t i;
|
|
||||||
|
|
||||||
if (env->tlb_last < HPPA_BTLB_ENTRIES || env->tlb_last >= ARRAY_SIZE(env->tlb)) {
|
if (ent == NULL) {
|
||||||
i = HPPA_BTLB_ENTRIES;
|
uint32_t i = env->tlb_last;
|
||||||
env->tlb_last = HPPA_BTLB_ENTRIES + 1;
|
|
||||||
} else {
|
if (i < HPPA_BTLB_ENTRIES || i >= ARRAY_SIZE(env->tlb)) {
|
||||||
i = env->tlb_last;
|
i = HPPA_BTLB_ENTRIES;
|
||||||
env->tlb_last++;
|
}
|
||||||
|
env->tlb_last = i + 1;
|
||||||
|
|
||||||
|
ent = &env->tlb[i];
|
||||||
|
hppa_flush_tlb_ent(env, ent, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
ent = &env->tlb[i];
|
env->tlb_unused = ent->unused_next;
|
||||||
|
|
||||||
hppa_flush_tlb_ent(env, ent, false);
|
|
||||||
return ent;
|
return ent;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -127,7 +130,7 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
|
|||||||
|
|
||||||
/* Find a valid tlb entry that matches the virtual address. */
|
/* Find a valid tlb entry that matches the virtual address. */
|
||||||
ent = hppa_find_tlb(env, addr);
|
ent = hppa_find_tlb(env, addr);
|
||||||
if (ent == NULL || !ent->entry_valid) {
|
if (ent == NULL) {
|
||||||
phys = 0;
|
phys = 0;
|
||||||
prot = 0;
|
prot = 0;
|
||||||
ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
|
ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
|
||||||
@ -303,23 +306,23 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
|
|||||||
/* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
|
/* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
|
||||||
void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
|
void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
|
||||||
{
|
{
|
||||||
HPPATLBEntry *empty;
|
HPPATLBEntry *ent;
|
||||||
|
|
||||||
/* Zap any old entries covering ADDR; notice empty entries on the way. */
|
/* Zap any old entries covering ADDR. */
|
||||||
addr &= TARGET_PAGE_MASK;
|
addr &= TARGET_PAGE_MASK;
|
||||||
empty = hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1);
|
hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1);
|
||||||
|
|
||||||
/* If we didn't see an empty entry, evict one. */
|
ent = env->tlb_partial;
|
||||||
if (empty == NULL) {
|
if (ent == NULL) {
|
||||||
empty = hppa_alloc_tlb_ent(env);
|
ent = hppa_alloc_tlb_ent(env);
|
||||||
|
env->tlb_partial = ent;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Note that empty->entry_valid == 0 already. */
|
/* Note that ent->entry_valid == 0 already. */
|
||||||
empty->itree.start = addr;
|
ent->itree.start = addr;
|
||||||
empty->itree.last = addr + TARGET_PAGE_SIZE - 1;
|
ent->itree.last = addr + TARGET_PAGE_SIZE - 1;
|
||||||
empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
|
ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
|
||||||
trace_hppa_tlb_itlba(env, empty, empty->itree.start,
|
trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
|
||||||
empty->itree.last, empty->pa);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void set_access_bits(CPUHPPAState *env, HPPATLBEntry *ent, target_ureg reg)
|
static void set_access_bits(CPUHPPAState *env, HPPATLBEntry *ent, target_ureg reg)
|
||||||
@ -333,6 +336,8 @@ static void set_access_bits(CPUHPPAState *env, HPPATLBEntry *ent, target_ureg re
|
|||||||
ent->d = extract32(reg, 28, 1);
|
ent->d = extract32(reg, 28, 1);
|
||||||
ent->t = extract32(reg, 29, 1);
|
ent->t = extract32(reg, 29, 1);
|
||||||
ent->entry_valid = 1;
|
ent->entry_valid = 1;
|
||||||
|
|
||||||
|
interval_tree_insert(&ent->itree, &env->tlb_root);
|
||||||
trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
|
trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
|
||||||
ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
|
ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
|
||||||
}
|
}
|
||||||
@ -340,14 +345,16 @@ static void set_access_bits(CPUHPPAState *env, HPPATLBEntry *ent, target_ureg re
|
|||||||
/* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
|
/* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
|
||||||
void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
|
void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
|
||||||
{
|
{
|
||||||
HPPATLBEntry *ent = hppa_find_tlb(env, addr);
|
HPPATLBEntry *ent = env->tlb_partial;
|
||||||
|
|
||||||
if (unlikely(ent == NULL)) {
|
if (ent) {
|
||||||
qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
|
env->tlb_partial = NULL;
|
||||||
return;
|
if (ent->itree.start <= addr && addr <= ent->itree.last) {
|
||||||
|
set_access_bits(env, ent, reg);
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
|
||||||
set_access_bits(env, ent, reg);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Purge (Insn/Data) TLB. This is explicitly page-based, and is
|
/* Purge (Insn/Data) TLB. This is explicitly page-based, and is
|
||||||
@ -356,17 +363,15 @@ static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
|
|||||||
{
|
{
|
||||||
CPUHPPAState *env = cpu_env(cpu);
|
CPUHPPAState *env = cpu_env(cpu);
|
||||||
target_ulong addr = (target_ulong) data.target_ptr;
|
target_ulong addr = (target_ulong) data.target_ptr;
|
||||||
HPPATLBEntry *ent = hppa_find_tlb(env, addr);
|
|
||||||
|
|
||||||
if (ent && ent->entry_valid) {
|
hppa_flush_tlb_range(env, addr, addr);
|
||||||
hppa_flush_tlb_ent(env, ent, false);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
|
void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
|
||||||
{
|
{
|
||||||
CPUState *src = env_cpu(env);
|
CPUState *src = env_cpu(env);
|
||||||
CPUState *cpu;
|
CPUState *cpu;
|
||||||
|
|
||||||
trace_hppa_tlb_ptlb(env);
|
trace_hppa_tlb_ptlb(env);
|
||||||
run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
|
run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
|
||||||
|
|
||||||
@ -378,16 +383,40 @@ void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
|
|||||||
async_safe_run_on_cpu(src, ptlb_work, data);
|
async_safe_run_on_cpu(src, ptlb_work, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void hppa_ptlbe(CPUHPPAState *env)
|
||||||
|
{
|
||||||
|
uint32_t i;
|
||||||
|
|
||||||
|
/* Zap the (non-btlb) tlb entries themselves. */
|
||||||
|
memset(&env->tlb[HPPA_BTLB_ENTRIES], 0,
|
||||||
|
sizeof(env->tlb) - HPPA_BTLB_ENTRIES * sizeof(env->tlb[0]));
|
||||||
|
env->tlb_last = HPPA_BTLB_ENTRIES;
|
||||||
|
env->tlb_partial = NULL;
|
||||||
|
|
||||||
|
/* Put them all onto the unused list. */
|
||||||
|
env->tlb_unused = &env->tlb[HPPA_BTLB_ENTRIES];
|
||||||
|
for (i = HPPA_BTLB_ENTRIES; i < ARRAY_SIZE(env->tlb) - 1; ++i) {
|
||||||
|
env->tlb[i].unused_next = &env->tlb[i + 1];
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Re-initialize the interval tree with only the btlb entries. */
|
||||||
|
memset(&env->tlb_root, 0, sizeof(env->tlb_root));
|
||||||
|
for (i = 0; i < HPPA_BTLB_ENTRIES; ++i) {
|
||||||
|
if (env->tlb[i].entry_valid) {
|
||||||
|
interval_tree_insert(&env->tlb[i].itree, &env->tlb_root);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
|
||||||
|
}
|
||||||
|
|
||||||
/* Purge (Insn/Data) TLB entry. This affects an implementation-defined
|
/* Purge (Insn/Data) TLB entry. This affects an implementation-defined
|
||||||
number of pages/entries (we choose all), and is local to the cpu. */
|
number of pages/entries (we choose all), and is local to the cpu. */
|
||||||
void HELPER(ptlbe)(CPUHPPAState *env)
|
void HELPER(ptlbe)(CPUHPPAState *env)
|
||||||
{
|
{
|
||||||
trace_hppa_tlb_ptlbe(env);
|
trace_hppa_tlb_ptlbe(env);
|
||||||
qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n");
|
qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n");
|
||||||
memset(&env->tlb[HPPA_BTLB_ENTRIES], 0,
|
hppa_ptlbe(env);
|
||||||
sizeof(env->tlb) - HPPA_BTLB_ENTRIES * sizeof(env->tlb[0]));
|
|
||||||
env->tlb_last = HPPA_BTLB_ENTRIES;
|
|
||||||
tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void cpu_hppa_change_prot_id(CPUHPPAState *env)
|
void cpu_hppa_change_prot_id(CPUHPPAState *env)
|
||||||
@ -483,9 +512,11 @@ void HELPER(diag_btlb)(CPUHPPAState *env)
|
|||||||
(long long) virt_page, phys_page, len, slot);
|
(long long) virt_page, phys_page, len, slot);
|
||||||
if (slot < HPPA_BTLB_ENTRIES) {
|
if (slot < HPPA_BTLB_ENTRIES) {
|
||||||
btlb = &env->tlb[slot];
|
btlb = &env->tlb[slot];
|
||||||
/* force flush of possibly existing BTLB entry */
|
|
||||||
|
/* Force flush of possibly existing BTLB entry. */
|
||||||
hppa_flush_tlb_ent(env, btlb, true);
|
hppa_flush_tlb_ent(env, btlb, true);
|
||||||
/* create new BTLB entry */
|
|
||||||
|
/* Create new BTLB entry */
|
||||||
btlb->itree.start = virt_page << TARGET_PAGE_BITS;
|
btlb->itree.start = virt_page << TARGET_PAGE_BITS;
|
||||||
btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1;
|
btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1;
|
||||||
btlb->pa = phys_page << TARGET_PAGE_BITS;
|
btlb->pa = phys_page << TARGET_PAGE_BITS;
|
||||||
|
Loading…
Reference in New Issue
Block a user