accel/tcg: Store some tlb flags in CPUTLBEntryFull
We have run out of bits we can use within the CPUTLBEntry comparators, as TLB_FLAGS_MASK cannot overlap alignment. Store slow_flags[] in CPUTLBEntryFull, and merge with the flags from the comparator. A new TLB_FORCE_SLOW bit is set within the comparator as an indication that the slow path must be used. Move TLB_BSWAP to TLB_SLOW_FLAGS_MASK. Since we are out of bits, we cannot create a new bit without moving an old one. Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
97e1576957
commit
58e8f1f616
@ -1107,6 +1107,24 @@ static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
|
|||||||
env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
|
env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent,
|
||||||
|
target_ulong address, int flags,
|
||||||
|
MMUAccessType access_type, bool enable)
|
||||||
|
{
|
||||||
|
if (enable) {
|
||||||
|
address |= flags & TLB_FLAGS_MASK;
|
||||||
|
flags &= TLB_SLOW_FLAGS_MASK;
|
||||||
|
if (flags) {
|
||||||
|
address |= TLB_FORCE_SLOW;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
address = -1;
|
||||||
|
flags = 0;
|
||||||
|
}
|
||||||
|
ent->addr_idx[access_type] = address;
|
||||||
|
full->slow_flags[access_type] = flags;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Add a new TLB entry. At most one entry for a given virtual address
|
* Add a new TLB entry. At most one entry for a given virtual address
|
||||||
* is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
|
* is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
|
||||||
@ -1122,9 +1140,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
|
|||||||
CPUTLB *tlb = env_tlb(env);
|
CPUTLB *tlb = env_tlb(env);
|
||||||
CPUTLBDesc *desc = &tlb->d[mmu_idx];
|
CPUTLBDesc *desc = &tlb->d[mmu_idx];
|
||||||
MemoryRegionSection *section;
|
MemoryRegionSection *section;
|
||||||
unsigned int index;
|
unsigned int index, read_flags, write_flags;
|
||||||
vaddr address;
|
|
||||||
vaddr write_address;
|
|
||||||
uintptr_t addend;
|
uintptr_t addend;
|
||||||
CPUTLBEntry *te, tn;
|
CPUTLBEntry *te, tn;
|
||||||
hwaddr iotlb, xlat, sz, paddr_page;
|
hwaddr iotlb, xlat, sz, paddr_page;
|
||||||
@ -1153,13 +1169,13 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
|
|||||||
" prot=%x idx=%d\n",
|
" prot=%x idx=%d\n",
|
||||||
addr, full->phys_addr, prot, mmu_idx);
|
addr, full->phys_addr, prot, mmu_idx);
|
||||||
|
|
||||||
address = addr_page;
|
read_flags = 0;
|
||||||
if (full->lg_page_size < TARGET_PAGE_BITS) {
|
if (full->lg_page_size < TARGET_PAGE_BITS) {
|
||||||
/* Repeat the MMU check and TLB fill on every access. */
|
/* Repeat the MMU check and TLB fill on every access. */
|
||||||
address |= TLB_INVALID_MASK;
|
read_flags |= TLB_INVALID_MASK;
|
||||||
}
|
}
|
||||||
if (full->attrs.byte_swap) {
|
if (full->attrs.byte_swap) {
|
||||||
address |= TLB_BSWAP;
|
read_flags |= TLB_BSWAP;
|
||||||
}
|
}
|
||||||
|
|
||||||
is_ram = memory_region_is_ram(section->mr);
|
is_ram = memory_region_is_ram(section->mr);
|
||||||
@ -1173,7 +1189,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
|
|||||||
addend = 0;
|
addend = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
write_address = address;
|
write_flags = read_flags;
|
||||||
if (is_ram) {
|
if (is_ram) {
|
||||||
iotlb = memory_region_get_ram_addr(section->mr) + xlat;
|
iotlb = memory_region_get_ram_addr(section->mr) + xlat;
|
||||||
/*
|
/*
|
||||||
@ -1182,9 +1198,9 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
|
|||||||
*/
|
*/
|
||||||
if (prot & PAGE_WRITE) {
|
if (prot & PAGE_WRITE) {
|
||||||
if (section->readonly) {
|
if (section->readonly) {
|
||||||
write_address |= TLB_DISCARD_WRITE;
|
write_flags |= TLB_DISCARD_WRITE;
|
||||||
} else if (cpu_physical_memory_is_clean(iotlb)) {
|
} else if (cpu_physical_memory_is_clean(iotlb)) {
|
||||||
write_address |= TLB_NOTDIRTY;
|
write_flags |= TLB_NOTDIRTY;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -1195,9 +1211,9 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
|
|||||||
* Reads to romd devices go through the ram_ptr found above,
|
* Reads to romd devices go through the ram_ptr found above,
|
||||||
* but of course reads to I/O must go through MMIO.
|
* but of course reads to I/O must go through MMIO.
|
||||||
*/
|
*/
|
||||||
write_address |= TLB_MMIO;
|
write_flags |= TLB_MMIO;
|
||||||
if (!is_romd) {
|
if (!is_romd) {
|
||||||
address = write_address;
|
read_flags = write_flags;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1242,7 +1258,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
|
|||||||
* TARGET_PAGE_BITS, and either
|
* TARGET_PAGE_BITS, and either
|
||||||
* + the ram_addr_t of the page base of the target RAM (RAM)
|
* + the ram_addr_t of the page base of the target RAM (RAM)
|
||||||
* + the offset within section->mr of the page base (I/O, ROMD)
|
* + the offset within section->mr of the page base (I/O, ROMD)
|
||||||
* We subtract the vaddr_page (which is page aligned and thus won't
|
* We subtract addr_page (which is page aligned and thus won't
|
||||||
* disturb the low bits) to give an offset which can be added to the
|
* disturb the low bits) to give an offset which can be added to the
|
||||||
* (non-page-aligned) vaddr of the eventual memory access to get
|
* (non-page-aligned) vaddr of the eventual memory access to get
|
||||||
* the MemoryRegion offset for the access. Note that the vaddr we
|
* the MemoryRegion offset for the access. Note that the vaddr we
|
||||||
@ -1250,36 +1266,30 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
|
|||||||
* vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
|
* vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
|
||||||
*/
|
*/
|
||||||
desc->fulltlb[index] = *full;
|
desc->fulltlb[index] = *full;
|
||||||
desc->fulltlb[index].xlat_section = iotlb - addr_page;
|
full = &desc->fulltlb[index];
|
||||||
desc->fulltlb[index].phys_addr = paddr_page;
|
full->xlat_section = iotlb - addr_page;
|
||||||
|
full->phys_addr = paddr_page;
|
||||||
|
|
||||||
/* Now calculate the new entry */
|
/* Now calculate the new entry */
|
||||||
tn.addend = addend - addr_page;
|
tn.addend = addend - addr_page;
|
||||||
if (prot & PAGE_READ) {
|
|
||||||
tn.addr_read = address;
|
tlb_set_compare(full, &tn, addr_page, read_flags,
|
||||||
|
MMU_INST_FETCH, prot & PAGE_EXEC);
|
||||||
|
|
||||||
if (wp_flags & BP_MEM_READ) {
|
if (wp_flags & BP_MEM_READ) {
|
||||||
tn.addr_read |= TLB_WATCHPOINT;
|
read_flags |= TLB_WATCHPOINT;
|
||||||
}
|
|
||||||
} else {
|
|
||||||
tn.addr_read = -1;
|
|
||||||
}
|
}
|
||||||
|
tlb_set_compare(full, &tn, addr_page, read_flags,
|
||||||
|
MMU_DATA_LOAD, prot & PAGE_READ);
|
||||||
|
|
||||||
if (prot & PAGE_EXEC) {
|
|
||||||
tn.addr_code = address;
|
|
||||||
} else {
|
|
||||||
tn.addr_code = -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
tn.addr_write = -1;
|
|
||||||
if (prot & PAGE_WRITE) {
|
|
||||||
tn.addr_write = write_address;
|
|
||||||
if (prot & PAGE_WRITE_INV) {
|
if (prot & PAGE_WRITE_INV) {
|
||||||
tn.addr_write |= TLB_INVALID_MASK;
|
write_flags |= TLB_INVALID_MASK;
|
||||||
}
|
}
|
||||||
if (wp_flags & BP_MEM_WRITE) {
|
if (wp_flags & BP_MEM_WRITE) {
|
||||||
tn.addr_write |= TLB_WATCHPOINT;
|
write_flags |= TLB_WATCHPOINT;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
tlb_set_compare(full, &tn, addr_page, write_flags,
|
||||||
|
MMU_DATA_STORE, prot & PAGE_WRITE);
|
||||||
|
|
||||||
copy_tlb_helper_locked(te, &tn);
|
copy_tlb_helper_locked(te, &tn);
|
||||||
tlb_n_used_entries_inc(env, mmu_idx);
|
tlb_n_used_entries_inc(env, mmu_idx);
|
||||||
@ -1509,7 +1519,8 @@ static int probe_access_internal(CPUArchState *env, vaddr addr,
|
|||||||
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
|
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
|
||||||
uint64_t tlb_addr = tlb_read_idx(entry, access_type);
|
uint64_t tlb_addr = tlb_read_idx(entry, access_type);
|
||||||
vaddr page_addr = addr & TARGET_PAGE_MASK;
|
vaddr page_addr = addr & TARGET_PAGE_MASK;
|
||||||
int flags = TLB_FLAGS_MASK;
|
int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
|
||||||
|
CPUTLBEntryFull *full;
|
||||||
|
|
||||||
if (!tlb_hit_page(tlb_addr, page_addr)) {
|
if (!tlb_hit_page(tlb_addr, page_addr)) {
|
||||||
if (!victim_tlb_hit(env, mmu_idx, index, access_type, page_addr)) {
|
if (!victim_tlb_hit(env, mmu_idx, index, access_type, page_addr)) {
|
||||||
@ -1538,7 +1549,8 @@ static int probe_access_internal(CPUArchState *env, vaddr addr,
|
|||||||
}
|
}
|
||||||
flags &= tlb_addr;
|
flags &= tlb_addr;
|
||||||
|
|
||||||
*pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index];
|
*pfull = full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
|
||||||
|
flags |= full->slow_flags[access_type];
|
||||||
|
|
||||||
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
|
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
|
||||||
if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
|
if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
|
||||||
@ -1761,6 +1773,8 @@ static bool mmu_lookup1(CPUArchState *env, MMULookupPageData *data,
|
|||||||
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
|
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
|
||||||
uint64_t tlb_addr = tlb_read_idx(entry, access_type);
|
uint64_t tlb_addr = tlb_read_idx(entry, access_type);
|
||||||
bool maybe_resized = false;
|
bool maybe_resized = false;
|
||||||
|
CPUTLBEntryFull *full;
|
||||||
|
int flags;
|
||||||
|
|
||||||
/* If the TLB entry is for a different page, reload and try again. */
|
/* If the TLB entry is for a different page, reload and try again. */
|
||||||
if (!tlb_hit(tlb_addr, addr)) {
|
if (!tlb_hit(tlb_addr, addr)) {
|
||||||
@ -1774,8 +1788,12 @@ static bool mmu_lookup1(CPUArchState *env, MMULookupPageData *data,
|
|||||||
tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK;
|
tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
data->flags = tlb_addr & TLB_FLAGS_MASK;
|
full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
|
||||||
data->full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
|
flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
|
||||||
|
flags |= full->slow_flags[access_type];
|
||||||
|
|
||||||
|
data->full = full;
|
||||||
|
data->flags = flags;
|
||||||
/* Compute haddr speculatively; depending on flags it might be invalid. */
|
/* Compute haddr speculatively; depending on flags it might be invalid. */
|
||||||
data->haddr = (void *)((uintptr_t)addr + entry->addend);
|
data->haddr = (void *)((uintptr_t)addr + entry->addend);
|
||||||
|
|
||||||
|
@ -327,17 +327,30 @@ CPUArchState *cpu_copy(CPUArchState *env);
|
|||||||
#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
|
#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
|
||||||
/* Set if TLB entry contains a watchpoint. */
|
/* Set if TLB entry contains a watchpoint. */
|
||||||
#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
|
#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
|
||||||
/* Set if TLB entry requires byte swap. */
|
/* Set if the slow path must be used; more flags in CPUTLBEntryFull. */
|
||||||
#define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5))
|
#define TLB_FORCE_SLOW (1 << (TARGET_PAGE_BITS_MIN - 5))
|
||||||
/* Set if TLB entry writes ignored. */
|
/* Set if TLB entry writes ignored. */
|
||||||
#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 6))
|
#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 6))
|
||||||
|
|
||||||
/* Use this mask to check interception with an alignment mask
|
/*
|
||||||
|
* Use this mask to check interception with an alignment mask
|
||||||
* in a TCG backend.
|
* in a TCG backend.
|
||||||
*/
|
*/
|
||||||
#define TLB_FLAGS_MASK \
|
#define TLB_FLAGS_MASK \
|
||||||
(TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
|
(TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
|
||||||
| TLB_WATCHPOINT | TLB_BSWAP | TLB_DISCARD_WRITE)
|
| TLB_WATCHPOINT | TLB_FORCE_SLOW | TLB_DISCARD_WRITE)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flags stored in CPUTLBEntryFull.slow_flags[x].
|
||||||
|
* TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x].
|
||||||
|
*/
|
||||||
|
/* Set if TLB entry requires byte swap. */
|
||||||
|
#define TLB_BSWAP (1 << 0)
|
||||||
|
|
||||||
|
#define TLB_SLOW_FLAGS_MASK TLB_BSWAP
|
||||||
|
|
||||||
|
/* The two sets of flags must not overlap. */
|
||||||
|
QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* tlb_hit_page: return true if page aligned @addr is a hit against the
|
* tlb_hit_page: return true if page aligned @addr is a hit against the
|
||||||
|
@ -124,6 +124,12 @@ typedef struct CPUTLBEntryFull {
|
|||||||
/* @lg_page_size contains the log2 of the page size. */
|
/* @lg_page_size contains the log2 of the page size. */
|
||||||
uint8_t lg_page_size;
|
uint8_t lg_page_size;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Additional tlb flags for use by the slow path. If non-zero,
|
||||||
|
* the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW.
|
||||||
|
*/
|
||||||
|
uint8_t slow_flags[MMU_ACCESS_COUNT];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allow target-specific additions to this structure.
|
* Allow target-specific additions to this structure.
|
||||||
* This may be used to cache items from the guest cpu
|
* This may be used to cache items from the guest cpu
|
||||||
|
@ -84,6 +84,7 @@ typedef enum MMUAccessType {
|
|||||||
MMU_DATA_LOAD = 0,
|
MMU_DATA_LOAD = 0,
|
||||||
MMU_DATA_STORE = 1,
|
MMU_DATA_STORE = 1,
|
||||||
MMU_INST_FETCH = 2
|
MMU_INST_FETCH = 2
|
||||||
|
#define MMU_ACCESS_COUNT 3
|
||||||
} MMUAccessType;
|
} MMUAccessType;
|
||||||
|
|
||||||
typedef struct CPUWatchpoint CPUWatchpoint;
|
typedef struct CPUWatchpoint CPUWatchpoint;
|
||||||
|
Loading…
Reference in New Issue
Block a user