Clean up MMIO TLB handling.

The IO index is now stored in its own field, instead of being wedged
into the vaddr field.  This eliminates the ROMD and watchpoint host
pointer weirdness.  The IO index space is expanded by 1 bit, and
several additional bits are made available in the TLB vaddr field.


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4704 c046a42c-6fe2-441c-8c8c-71466251a162
This commit is contained in:
pbrook 2008-06-09 00:20:13 +00:00
parent f227f17d1b
commit 0f459d16c3
7 changed files with 194 additions and 175 deletions

View File

@ -797,7 +797,7 @@ extern CPUState *cpu_single_env;
void cpu_interrupt(CPUState *s, int mask);
void cpu_reset_interrupt(CPUState *env, int mask);
int cpu_watchpoint_insert(CPUState *env, target_ulong addr);
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type);
int cpu_watchpoint_remove(CPUState *env, target_ulong addr);
void cpu_watchpoint_remove_all(CPUState *env);
int cpu_breakpoint_insert(CPUState *env, target_ulong pc);
@ -868,21 +868,34 @@ extern uint8_t *phys_ram_dirty;
extern ram_addr_t ram_size;
/* physical memory access */
#define TLB_INVALID_MASK (1 << 3)
#define IO_MEM_SHIFT 4
/* MMIO pages are identified by a combination of an IO device index and
3 flags. The ROMD code stores the page ram offset in iotlb entry,
so only a limited number of ids are avaiable. */
#define IO_MEM_SHIFT 3
#define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT))
#define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */
#define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */
#define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT)
#define IO_MEM_NOTDIRTY (4 << IO_MEM_SHIFT) /* used internally, never use directly */
/* acts like a ROM when read and like a device when written. As an
exception, the write memory callback gets the ram offset instead of
the physical address */
#define IO_MEM_NOTDIRTY (3 << IO_MEM_SHIFT)
/* Acts like a ROM when read and like a device when written. */
#define IO_MEM_ROMD (1)
#define IO_MEM_SUBPAGE (2)
#define IO_MEM_SUBWIDTH (4)
/* Flags stored in the low bits of the TLB virtual address. These are
defined so that fast path ram access is all zeros. */
/* Zero if TLB entry is valid. */
#define TLB_INVALID_MASK (1 << 3)
/* Set if TLB entry references a clean RAM page. The iotlb entry will
contain the page physical address. */
#define TLB_NOTDIRTY (1 << 4)
/* Set if TLB entry is an IO callback. */
#define TLB_MMIO (1 << 5)
typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);

View File

@ -106,16 +106,17 @@ typedef uint64_t target_phys_addr_t;
#endif
typedef struct CPUTLBEntry {
/* bit 31 to TARGET_PAGE_BITS : virtual address
bit TARGET_PAGE_BITS-1..IO_MEM_SHIFT : if non zero, memory io
zone number
/* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
go directly to ram.
bit 3 : indicates that the entry is invalid
bit 2..0 : zero
*/
target_ulong addr_read;
target_ulong addr_write;
target_ulong addr_code;
/* addend to virtual address to get physical address */
/* Addend to virtual address to get physical address. IO accesses
use the correcponding iotlb value. */
#if TARGET_PHYS_ADDR_BITS == 64
/* on i386 Linux make sure it is aligned */
target_phys_addr_t addend __attribute__((aligned(8)));
@ -143,6 +144,7 @@ typedef struct CPUTLBEntry {
int halted; /* TRUE if the CPU is in suspend state */ \
/* The meaning of the MMU modes is defined in the target code. */ \
CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \
/* buffer for temporaries in the code generator */ \
long temp_buf[CPU_TEMP_BUF_NLONGS]; \
@ -155,7 +157,7 @@ typedef struct CPUTLBEntry {
\
struct { \
target_ulong vaddr; \
target_phys_addr_t addend; \
int type; /* PAGE_READ/PAGE_WRITE */ \
} watchpoint[MAX_WATCHPOINTS]; \
int nb_watchpoints; \
int watchpoint_hit; \

230
exec.c
View File

@ -121,7 +121,7 @@ typedef struct PageDesc {
} PageDesc;
typedef struct PhysPageDesc {
/* offset in host memory of the page + io_index in the low 12 bits */
/* offset in host memory of the page + io_index in the low bits */
ram_addr_t phys_offset;
} PhysPageDesc;
@ -1188,7 +1188,7 @@ static void breakpoint_invalidate(CPUState *env, target_ulong pc)
#endif
/* Add a watchpoint. */
int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
{
int i;
@ -1201,6 +1201,7 @@ int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
i = env->nb_watchpoints++;
env->watchpoint[i].vaddr = addr;
env->watchpoint[i].type = type;
tlb_flush_page(env, addr);
/* FIXME: This flush is needed because of the hack to make memory ops
terminate the TB. It can be removed once the proper IO trap and
@ -1617,7 +1618,7 @@ static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
if ((addr - start) < length) {
tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
}
}
}
@ -1681,7 +1682,7 @@ static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
tlb_entry->addend - (unsigned long)phys_ram_base;
if (!cpu_physical_memory_is_dirty(ram_addr)) {
tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
tlb_entry->addr_write |= TLB_NOTDIRTY;
}
}
}
@ -1704,33 +1705,26 @@ void cpu_tlb_update_dirty(CPUState *env)
#endif
}
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
unsigned long start)
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
{
unsigned long addr;
if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
if (addr == start) {
tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
}
}
if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
tlb_entry->addr_write = vaddr;
}
/* update the TLB corresponding to virtual page vaddr and phys addr
addr so that it is no longer dirty */
static inline void tlb_set_dirty(CPUState *env,
unsigned long addr, target_ulong vaddr)
/* update the TLB corresponding to virtual page vaddr
so that it is no longer dirty */
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
{
int i;
addr &= TARGET_PAGE_MASK;
vaddr &= TARGET_PAGE_MASK;
i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
tlb_set_dirty1(&env->tlb_table[0][i], addr);
tlb_set_dirty1(&env->tlb_table[1][i], addr);
tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
#if (NB_MMU_MODES >= 3)
tlb_set_dirty1(&env->tlb_table[2][i], addr);
tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
#if (NB_MMU_MODES == 4)
tlb_set_dirty1(&env->tlb_table[3][i], addr);
tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
#endif
#endif
}
@ -1747,10 +1741,12 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
unsigned long pd;
unsigned int index;
target_ulong address;
target_ulong code_address;
target_phys_addr_t addend;
int ret;
CPUTLBEntry *te;
int i;
target_phys_addr_t iotlb;
p = phys_page_find(paddr >> TARGET_PAGE_BITS);
if (!p) {
@ -1764,64 +1760,69 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
#endif
ret = 0;
{
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
/* IO memory case */
address = vaddr | pd;
addend = paddr;
} else {
/* standard memory */
address = vaddr;
addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
}
address = vaddr;
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
/* IO memory case (romd handled later) */
address |= TLB_MMIO;
}
addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
/* Normal RAM. */
iotlb = pd & TARGET_PAGE_MASK;
if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
iotlb |= IO_MEM_NOTDIRTY;
else
iotlb |= IO_MEM_ROM;
} else {
/* IO handlers are currently passed a phsical address.
It would be nice to pass an offset from the base address
of that region. This would avoid having to special case RAM,
and avoid full address decoding in every device.
We can't use the high bits of pd for this because
IO_MEM_ROMD uses these as a ram address. */
iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
}
/* Make accesses to pages with watchpoints go via the
watchpoint trap routines. */
for (i = 0; i < env->nb_watchpoints; i++) {
if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
if (address & ~TARGET_PAGE_MASK) {
env->watchpoint[i].addend = 0;
address = vaddr | io_mem_watch;
} else {
env->watchpoint[i].addend = pd - paddr +
(unsigned long) phys_ram_base;
/* TODO: Figure out how to make read watchpoints coexist
with code. */
pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
}
}
code_address = address;
/* Make accesses to pages with watchpoints go via the
watchpoint trap routines. */
for (i = 0; i < env->nb_watchpoints; i++) {
if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
iotlb = io_mem_watch + paddr;
/* TODO: The memory case can be optimized by not trapping
reads of pages with a write breakpoint. */
address |= TLB_MMIO;
}
}
index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
addend -= vaddr;
te = &env->tlb_table[mmu_idx][index];
te->addend = addend;
if (prot & PAGE_READ) {
te->addr_read = address;
} else {
te->addr_read = -1;
}
index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
env->iotlb[mmu_idx][index] = iotlb - vaddr;
te = &env->tlb_table[mmu_idx][index];
te->addend = addend - vaddr;
if (prot & PAGE_READ) {
te->addr_read = address;
} else {
te->addr_read = -1;
}
if (prot & PAGE_EXEC) {
te->addr_code = address;
if (prot & PAGE_EXEC) {
te->addr_code = code_address;
} else {
te->addr_code = -1;
}
if (prot & PAGE_WRITE) {
if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
(pd & IO_MEM_ROMD)) {
/* Write access calls the I/O callback. */
te->addr_write = address | TLB_MMIO;
} else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
!cpu_physical_memory_is_dirty(pd)) {
te->addr_write = address | TLB_NOTDIRTY;
} else {
te->addr_code = -1;
}
if (prot & PAGE_WRITE) {
if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
(pd & IO_MEM_ROMD)) {
/* write access calls the I/O callback */
te->addr_write = vaddr |
(pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
} else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
!cpu_physical_memory_is_dirty(pd)) {
te->addr_write = vaddr | IO_MEM_NOTDIRTY;
} else {
te->addr_write = address;
}
} else {
te->addr_write = -1;
te->addr_write = address;
}
} else {
te->addr_write = -1;
}
return ret;
}
@ -2181,11 +2182,10 @@ static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
unassigned_mem_writeb,
};
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
uint32_t val)
{
unsigned long ram_addr;
int dirty_flags;
ram_addr = addr - (unsigned long)phys_ram_base;
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
#if !defined(CONFIG_USER_ONLY)
@ -2193,7 +2193,7 @@ static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
#endif
}
stb_p((uint8_t *)(long)addr, val);
stb_p(phys_ram_base + ram_addr, val);
#ifdef USE_KQEMU
if (cpu_single_env->kqemu_enabled &&
(dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
@ -2204,14 +2204,13 @@ static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t
/* we remove the notdirty callback only if the code has been
flushed */
if (dirty_flags == 0xff)
tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
}
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
uint32_t val)
{
unsigned long ram_addr;
int dirty_flags;
ram_addr = addr - (unsigned long)phys_ram_base;
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
#if !defined(CONFIG_USER_ONLY)
@ -2219,7 +2218,7 @@ static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
#endif
}
stw_p((uint8_t *)(long)addr, val);
stw_p(phys_ram_base + ram_addr, val);
#ifdef USE_KQEMU
if (cpu_single_env->kqemu_enabled &&
(dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
@ -2230,14 +2229,13 @@ static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t
/* we remove the notdirty callback only if the code has been
flushed */
if (dirty_flags == 0xff)
tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
}
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
uint32_t val)
{
unsigned long ram_addr;
int dirty_flags;
ram_addr = addr - (unsigned long)phys_ram_base;
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
#if !defined(CONFIG_USER_ONLY)
@ -2245,7 +2243,7 @@ static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
#endif
}
stl_p((uint8_t *)(long)addr, val);
stl_p(phys_ram_base + ram_addr, val);
#ifdef USE_KQEMU
if (cpu_single_env->kqemu_enabled &&
(dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
@ -2256,7 +2254,7 @@ static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t
/* we remove the notdirty callback only if the code has been
flushed */
if (dirty_flags == 0xff)
tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
}
static CPUReadMemoryFunc *error_mem_read[3] = {
@ -2271,67 +2269,63 @@ static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
notdirty_mem_writel,
};
/* Generate a debug exception if a watchpoint has been hit. */
static void check_watchpoint(int offset, int flags)
{
CPUState *env = cpu_single_env;
target_ulong vaddr;
int i;
vaddr = (env->mem_write_vaddr & TARGET_PAGE_MASK) + offset;
for (i = 0; i < env->nb_watchpoints; i++) {
if (vaddr == env->watchpoint[i].vaddr
&& (env->watchpoint[i].type & flags)) {
env->watchpoint_hit = i + 1;
cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
break;
}
}
}
/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
so these check for a hit then pass through to the normal out-of-line
phys routines. */
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
{
check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
return ldub_phys(addr);
}
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
{
check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
return lduw_phys(addr);
}
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
{
check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
return ldl_phys(addr);
}
/* Generate a debug exception if a watchpoint has been hit.
Returns the real physical address of the access. addr will be a host
address in case of a RAM location. */
static target_ulong check_watchpoint(target_phys_addr_t addr)
{
CPUState *env = cpu_single_env;
target_ulong watch;
target_ulong retaddr;
int i;
retaddr = addr;
for (i = 0; i < env->nb_watchpoints; i++) {
watch = env->watchpoint[i].vaddr;
if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
retaddr = addr - env->watchpoint[i].addend;
if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
cpu_single_env->watchpoint_hit = i + 1;
cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
break;
}
}
}
return retaddr;
}
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
uint32_t val)
{
addr = check_watchpoint(addr);
check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
stb_phys(addr, val);
}
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
uint32_t val)
{
addr = check_watchpoint(addr);
check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
stw_phys(addr, val);
}
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
uint32_t val)
{
addr = check_watchpoint(addr);
check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
stl_phys(addr, val);
}
@ -2501,7 +2495,7 @@ static void io_mem_init(void)
cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
io_mem_nb = 5;
io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
watch_mem_write, NULL);
/* alloc dirty bits array */
phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);

View File

@ -1117,21 +1117,37 @@ static int gdb_handle_packet(GDBState *s, CPUState *env, const char *line_buf)
if (*p == ',')
p++;
len = strtoull(p, (char **)&p, 16);
if (type == 0 || type == 1) {
switch (type) {
case 0:
case 1:
if (cpu_breakpoint_insert(env, addr) < 0)
goto breakpoint_error;
put_packet(s, "OK");
break;
#ifndef CONFIG_USER_ONLY
} else if (type == 2) {
if (cpu_watchpoint_insert(env, addr) < 0)
case 2:
type = PAGE_WRITE;
goto insert_watchpoint;
case 3:
type = PAGE_READ;
goto insert_watchpoint;
case 4:
type = PAGE_READ | PAGE_WRITE;
insert_watchpoint:
if (cpu_watchpoint_insert(env, addr, type) < 0)
goto breakpoint_error;
put_packet(s, "OK");
break;
#endif
} else {
breakpoint_error:
put_packet(s, "E22");
default:
put_packet(s, "");
break;
}
break;
breakpoint_error:
put_packet(s, "E22");
break;
case 'z':
type = strtoul(p, (char **)&p, 16);
if (*p == ',')
@ -1144,12 +1160,12 @@ static int gdb_handle_packet(GDBState *s, CPUState *env, const char *line_buf)
cpu_breakpoint_remove(env, addr);
put_packet(s, "OK");
#ifndef CONFIG_USER_ONLY
} else if (type == 2) {
} else if (type >= 2 || type <= 4) {
cpu_watchpoint_remove(env, addr);
put_packet(s, "OK");
#endif
} else {
goto breakpoint_error;
put_packet(s, "");
}
break;
case 'q':

View File

@ -202,14 +202,8 @@ static void pflash_write (pflash_t *pfl, target_ulong offset, uint32_t value,
uint8_t *p;
uint8_t cmd;
/* WARNING: when the memory area is in ROMD mode, the offset is a
ram offset, not a physical address */
cmd = value;
if (pfl->wcycle == 0)
offset -= (target_ulong)(long)pfl->storage;
else
offset -= pfl->base;
offset -= pfl->base;
DPRINTF("%s: offset " TARGET_FMT_lx " %08x %d wcycle 0x%x\n",
__func__, offset, value, width, pfl->wcycle);

View File

@ -112,13 +112,12 @@ static uint32_t pflash_read (pflash_t *pfl, uint32_t offset, int width)
DPRINTF("%s: offset " TARGET_FMT_lx "\n", __func__, offset);
ret = -1;
offset -= pfl->base;
if (pfl->rom_mode) {
offset -= (uint32_t)(long)pfl->storage;
/* Lazy reset of to ROMD mode */
if (pfl->wcycle == 0)
pflash_register_memory(pfl, 1);
} else
offset -= pfl->base;
}
offset &= pfl->chip_len - 1;
boff = offset & 0xFF;
if (pfl->width == 2)
@ -242,12 +241,7 @@ static void pflash_write (pflash_t *pfl, uint32_t offset, uint32_t value,
}
DPRINTF("%s: offset " TARGET_FMT_lx " %08x %d %d\n", __func__,
offset, value, width, pfl->wcycle);
/* WARNING: when the memory area is in ROMD mode, the offset is a
ram offset, not a physical address */
if (pfl->rom_mode)
offset -= (uint32_t)(long)pfl->storage;
else
offset -= pfl->base;
offset -= pfl->base;
offset &= pfl->chip_len - 1;
DPRINTF("%s: offset " TARGET_FMT_lx " %08x %d\n", __func__,

View File

@ -51,12 +51,13 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
int mmu_idx,
void *retaddr);
static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
target_ulong tlb_addr)
target_ulong addr)
{
DATA_TYPE res;
int index;
index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
#if SHIFT <= 2
res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
#else
@ -81,7 +82,7 @@ DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
DATA_TYPE res;
int index;
target_ulong tlb_addr;
target_phys_addr_t physaddr;
target_phys_addr_t addend;
void *retaddr;
/* test if there is match for unaligned or IO access */
@ -90,12 +91,12 @@ DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
redo:
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
physaddr = addr + env->tlb_table[mmu_idx][index].addend;
if (tlb_addr & ~TARGET_PAGE_MASK) {
/* IO access */
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
res = glue(io_read, SUFFIX)(physaddr, tlb_addr);
addend = env->iotlb[mmu_idx][index];
res = glue(io_read, SUFFIX)(addend, addr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
/* slow unaligned access (it spans two pages or IO) */
do_unaligned_access:
@ -113,7 +114,8 @@ DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
}
#endif
res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)physaddr);
addend = env->tlb_table[mmu_idx][index].addend;
res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
}
} else {
/* the page is not in the TLB : fill it */
@ -135,19 +137,19 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
{
DATA_TYPE res, res1, res2;
int index, shift;
target_phys_addr_t physaddr;
target_phys_addr_t addend;
target_ulong tlb_addr, addr1, addr2;
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
redo:
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
physaddr = addr + env->tlb_table[mmu_idx][index].addend;
if (tlb_addr & ~TARGET_PAGE_MASK) {
/* IO access */
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
res = glue(io_read, SUFFIX)(physaddr, tlb_addr);
addend = env->iotlb[mmu_idx][index];
res = glue(io_read, SUFFIX)(addend, addr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access:
/* slow unaligned access (it spans two pages) */
@ -166,7 +168,8 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
res = (DATA_TYPE)res;
} else {
/* unaligned/aligned access in the same page */
res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)physaddr);
addend = env->tlb_table[mmu_idx][index].addend;
res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
}
} else {
/* the page is not in the TLB : fill it */
@ -185,13 +188,14 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
DATA_TYPE val,
target_ulong tlb_addr,
target_ulong addr,
void *retaddr)
{
int index;
index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
env->mem_write_vaddr = tlb_addr;
env->mem_write_vaddr = addr;
env->mem_write_pc = (unsigned long)retaddr;
#if SHIFT <= 2
io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
@ -213,7 +217,7 @@ void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
DATA_TYPE val,
int mmu_idx)
{
target_phys_addr_t physaddr;
target_phys_addr_t addend;
target_ulong tlb_addr;
void *retaddr;
int index;
@ -222,13 +226,13 @@ void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
redo:
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
physaddr = addr + env->tlb_table[mmu_idx][index].addend;
if (tlb_addr & ~TARGET_PAGE_MASK) {
/* IO access */
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
retaddr = GETPC();
glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr);
addend = env->iotlb[mmu_idx][index];
glue(io_write, SUFFIX)(addend, val, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access:
retaddr = GETPC();
@ -245,7 +249,8 @@ void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
do_unaligned_access(addr, 1, mmu_idx, retaddr);
}
#endif
glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)physaddr, val);
addend = env->tlb_table[mmu_idx][index].addend;
glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
}
} else {
/* the page is not in the TLB : fill it */
@ -265,7 +270,7 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
int mmu_idx,
void *retaddr)
{
target_phys_addr_t physaddr;
target_phys_addr_t addend;
target_ulong tlb_addr;
int index, i;
@ -273,12 +278,12 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
redo:
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
physaddr = addr + env->tlb_table[mmu_idx][index].addend;
if (tlb_addr & ~TARGET_PAGE_MASK) {
/* IO access */
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr);
addend = env->iotlb[mmu_idx][index];
glue(io_write, SUFFIX)(addend, val, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access:
/* XXX: not efficient, but simple */
@ -295,7 +300,8 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
}
} else {
/* aligned/unaligned access in the same page */
glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)physaddr, val);
addend = env->tlb_table[mmu_idx][index].addend;
glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
}
} else {
/* the page is not in the TLB : fill it */