Merge remote-tracking branch 'qemu-kvm/memory/core' into staging
* qemu-kvm/memory/core: memory: get rid of cpu_register_io_memory() memory: dispatch directly via MemoryRegion exec: fix code tlb entry misused as iotlb in get_page_addr_code() memory: store section indices in iotlb instead of io indices memory: make phys_page_find() return an unadjusted section
This commit is contained in:
commit
79122e933c
@ -498,14 +498,6 @@ extern RAMList ram_list;
|
||||
extern const char *mem_path;
|
||||
extern int mem_prealloc;
|
||||
|
||||
/* physical memory access */
|
||||
|
||||
/* MMIO pages are identified by a combination of an IO device index and
|
||||
3 flags. The ROMD code stores the page ram offset in iotlb entry,
|
||||
so only a limited number of ids are avaiable. */
|
||||
|
||||
#define IO_MEM_NB_ENTRIES (1 << TARGET_PAGE_BITS)
|
||||
|
||||
/* Flags stored in the low bits of the TLB virtual address. These are
|
||||
defined so that fast path ram access is all zeros. */
|
||||
/* Zero if TLB entry is valid. */
|
||||
|
@ -299,10 +299,11 @@ extern void *tci_tb_ptr;
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
|
||||
uint64_t io_mem_read(int index, target_phys_addr_t addr, unsigned size);
|
||||
void io_mem_write(int index, target_phys_addr_t addr, uint64_t value,
|
||||
unsigned size);
|
||||
extern struct MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
|
||||
struct MemoryRegion *iotlb_to_region(target_phys_addr_t index);
|
||||
uint64_t io_mem_read(struct MemoryRegion *mr, target_phys_addr_t addr,
|
||||
unsigned size);
|
||||
void io_mem_write(struct MemoryRegion *mr, target_phys_addr_t addr,
|
||||
uint64_t value, unsigned size);
|
||||
|
||||
void tlb_fill(CPUState *env1, target_ulong addr, int is_write, int mmu_idx,
|
||||
void *retaddr);
|
||||
|
@ -32,9 +32,6 @@ void qemu_ram_free(ram_addr_t addr);
|
||||
void qemu_ram_free_from_ptr(ram_addr_t addr);
|
||||
|
||||
struct MemoryRegion;
|
||||
int cpu_register_io_memory(MemoryRegion *mr);
|
||||
void cpu_unregister_io_memory(int table_address);
|
||||
|
||||
struct MemoryRegionSection;
|
||||
void cpu_register_physical_memory_log(struct MemoryRegionSection *section,
|
||||
bool readonly);
|
||||
|
362
exec.c
362
exec.c
@ -191,6 +191,9 @@ typedef struct PhysPageEntry PhysPageEntry;
|
||||
static MemoryRegionSection *phys_sections;
|
||||
static unsigned phys_sections_nb, phys_sections_nb_alloc;
|
||||
static uint16_t phys_section_unassigned;
|
||||
static uint16_t phys_section_notdirty;
|
||||
static uint16_t phys_section_rom;
|
||||
static uint16_t phys_section_watch;
|
||||
|
||||
struct PhysPageEntry {
|
||||
uint16_t is_leaf : 1;
|
||||
@ -211,9 +214,6 @@ static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
|
||||
static void io_mem_init(void);
|
||||
static void memory_map_init(void);
|
||||
|
||||
/* io memory support */
|
||||
MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
|
||||
static char io_mem_used[IO_MEM_NB_ENTRIES];
|
||||
static MemoryRegion io_mem_watch;
|
||||
#endif
|
||||
|
||||
@ -480,13 +480,11 @@ static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
|
||||
phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
|
||||
}
|
||||
|
||||
static MemoryRegionSection phys_page_find(target_phys_addr_t index)
|
||||
static MemoryRegionSection *phys_page_find(target_phys_addr_t index)
|
||||
{
|
||||
PhysPageEntry lp = phys_map;
|
||||
PhysPageEntry *p;
|
||||
int i;
|
||||
MemoryRegionSection section;
|
||||
target_phys_addr_t delta;
|
||||
uint16_t s_index = phys_section_unassigned;
|
||||
|
||||
for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
|
||||
@ -499,15 +497,15 @@ static MemoryRegionSection phys_page_find(target_phys_addr_t index)
|
||||
|
||||
s_index = lp.ptr;
|
||||
not_found:
|
||||
section = phys_sections[s_index];
|
||||
index <<= TARGET_PAGE_BITS;
|
||||
assert(section.offset_within_address_space <= index
|
||||
&& index <= section.offset_within_address_space + section.size-1);
|
||||
delta = index - section.offset_within_address_space;
|
||||
section.offset_within_address_space += delta;
|
||||
section.offset_within_region += delta;
|
||||
section.size -= delta;
|
||||
return section;
|
||||
return &phys_sections[s_index];
|
||||
}
|
||||
|
||||
static target_phys_addr_t section_addr(MemoryRegionSection *section,
|
||||
target_phys_addr_t addr)
|
||||
{
|
||||
addr -= section->offset_within_address_space;
|
||||
addr += section->offset_within_region;
|
||||
return addr;
|
||||
}
|
||||
|
||||
static void tlb_protect_code(ram_addr_t ram_addr);
|
||||
@ -1468,17 +1466,16 @@ static void breakpoint_invalidate(CPUState *env, target_ulong pc)
|
||||
{
|
||||
target_phys_addr_t addr;
|
||||
ram_addr_t ram_addr;
|
||||
MemoryRegionSection section;
|
||||
MemoryRegionSection *section;
|
||||
|
||||
addr = cpu_get_phys_page_debug(env, pc);
|
||||
section = phys_page_find(addr >> TARGET_PAGE_BITS);
|
||||
if (!(memory_region_is_ram(section.mr)
|
||||
|| (section.mr->rom_device && section.mr->readable))) {
|
||||
if (!(memory_region_is_ram(section->mr)
|
||||
|| (section->mr->rom_device && section->mr->readable))) {
|
||||
return;
|
||||
}
|
||||
ram_addr = (memory_region_get_ram_addr(section.mr)
|
||||
+ section.offset_within_region) & TARGET_PAGE_MASK;
|
||||
ram_addr |= (pc & ~TARGET_PAGE_MASK);
|
||||
ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
|
||||
+ section_addr(section, addr);
|
||||
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
|
||||
}
|
||||
#endif
|
||||
@ -2181,7 +2178,7 @@ void tlb_set_page(CPUState *env, target_ulong vaddr,
|
||||
target_phys_addr_t paddr, int prot,
|
||||
int mmu_idx, target_ulong size)
|
||||
{
|
||||
MemoryRegionSection section;
|
||||
MemoryRegionSection *section;
|
||||
unsigned int index;
|
||||
target_ulong address;
|
||||
target_ulong code_address;
|
||||
@ -2202,24 +2199,24 @@ void tlb_set_page(CPUState *env, target_ulong vaddr,
|
||||
#endif
|
||||
|
||||
address = vaddr;
|
||||
if (!is_ram_rom_romd(§ion)) {
|
||||
if (!is_ram_rom_romd(section)) {
|
||||
/* IO memory case (romd handled later) */
|
||||
address |= TLB_MMIO;
|
||||
}
|
||||
if (is_ram_rom_romd(§ion)) {
|
||||
addend = (unsigned long)(memory_region_get_ram_ptr(section.mr)
|
||||
+ section.offset_within_region);
|
||||
if (is_ram_rom_romd(section)) {
|
||||
addend = (unsigned long)memory_region_get_ram_ptr(section->mr)
|
||||
+ section_addr(section, paddr);
|
||||
} else {
|
||||
addend = 0;
|
||||
}
|
||||
if (is_ram_rom(§ion)) {
|
||||
if (is_ram_rom(section)) {
|
||||
/* Normal RAM. */
|
||||
iotlb = (memory_region_get_ram_addr(section.mr)
|
||||
+ section.offset_within_region) & TARGET_PAGE_MASK;
|
||||
if (!section.readonly)
|
||||
iotlb |= io_mem_notdirty.ram_addr;
|
||||
iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
|
||||
+ section_addr(section, paddr);
|
||||
if (!section->readonly)
|
||||
iotlb |= phys_section_notdirty;
|
||||
else
|
||||
iotlb |= io_mem_rom.ram_addr;
|
||||
iotlb |= phys_section_rom;
|
||||
} else {
|
||||
/* IO handlers are currently passed a physical address.
|
||||
It would be nice to pass an offset from the base address
|
||||
@ -2227,8 +2224,8 @@ void tlb_set_page(CPUState *env, target_ulong vaddr,
|
||||
and avoid full address decoding in every device.
|
||||
We can't use the high bits of pd for this because
|
||||
IO_MEM_ROMD uses these as a ram address. */
|
||||
iotlb = memory_region_get_ram_addr(section.mr) & ~TARGET_PAGE_MASK;
|
||||
iotlb += section.offset_within_region;
|
||||
iotlb = section - phys_sections;
|
||||
iotlb += section_addr(section, paddr);
|
||||
}
|
||||
|
||||
code_address = address;
|
||||
@ -2238,7 +2235,7 @@ void tlb_set_page(CPUState *env, target_ulong vaddr,
|
||||
if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
|
||||
/* Avoid trapping reads of pages with a write breakpoint. */
|
||||
if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
|
||||
iotlb = io_mem_watch.ram_addr + paddr;
|
||||
iotlb = phys_section_watch + paddr;
|
||||
address |= TLB_MMIO;
|
||||
break;
|
||||
}
|
||||
@ -2261,14 +2258,14 @@ void tlb_set_page(CPUState *env, target_ulong vaddr,
|
||||
te->addr_code = -1;
|
||||
}
|
||||
if (prot & PAGE_WRITE) {
|
||||
if ((memory_region_is_ram(section.mr) && section.readonly)
|
||||
|| is_romd(§ion)) {
|
||||
if ((memory_region_is_ram(section->mr) && section->readonly)
|
||||
|| is_romd(section)) {
|
||||
/* Write access calls the I/O callback. */
|
||||
te->addr_write = address | TLB_MMIO;
|
||||
} else if (memory_region_is_ram(section.mr)
|
||||
} else if (memory_region_is_ram(section->mr)
|
||||
&& !cpu_physical_memory_is_dirty(
|
||||
section.mr->ram_addr
|
||||
+ section.offset_within_region)) {
|
||||
section->mr->ram_addr
|
||||
+ section_addr(section, paddr))) {
|
||||
te->addr_write = address | TLB_NOTDIRTY;
|
||||
} else {
|
||||
te->addr_write = address;
|
||||
@ -2631,22 +2628,22 @@ static void register_subpage(MemoryRegionSection *section)
|
||||
subpage_t *subpage;
|
||||
target_phys_addr_t base = section->offset_within_address_space
|
||||
& TARGET_PAGE_MASK;
|
||||
MemoryRegionSection existing = phys_page_find(base >> TARGET_PAGE_BITS);
|
||||
MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
|
||||
MemoryRegionSection subsection = {
|
||||
.offset_within_address_space = base,
|
||||
.size = TARGET_PAGE_SIZE,
|
||||
};
|
||||
target_phys_addr_t start, end;
|
||||
|
||||
assert(existing.mr->subpage || existing.mr == &io_mem_unassigned);
|
||||
assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
|
||||
|
||||
if (!(existing.mr->subpage)) {
|
||||
if (!(existing->mr->subpage)) {
|
||||
subpage = subpage_init(base);
|
||||
subsection.mr = &subpage->iomem;
|
||||
phys_page_set(base >> TARGET_PAGE_BITS, 1,
|
||||
phys_section_add(&subsection));
|
||||
} else {
|
||||
subpage = container_of(existing.mr, subpage_t, iomem);
|
||||
subpage = container_of(existing->mr, subpage_t, iomem);
|
||||
}
|
||||
start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
|
||||
end = start + section->size;
|
||||
@ -3399,7 +3396,7 @@ static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
|
||||
addr += mmio->base;
|
||||
addr -= section->offset_within_address_space;
|
||||
addr += section->offset_within_region;
|
||||
return io_mem_read(section->mr->ram_addr, addr, len);
|
||||
return io_mem_read(section->mr, addr, len);
|
||||
}
|
||||
|
||||
static void subpage_write(void *opaque, target_phys_addr_t addr,
|
||||
@ -3418,7 +3415,7 @@ static void subpage_write(void *opaque, target_phys_addr_t addr,
|
||||
addr += mmio->base;
|
||||
addr -= section->offset_within_address_space;
|
||||
addr += section->offset_within_region;
|
||||
io_mem_write(section->mr->ram_addr, addr, value, len);
|
||||
io_mem_write(section->mr, addr, value, len);
|
||||
}
|
||||
|
||||
static const MemoryRegionOps subpage_ops = {
|
||||
@ -3503,53 +3500,6 @@ static subpage_t *subpage_init(target_phys_addr_t base)
|
||||
return mmio;
|
||||
}
|
||||
|
||||
static int get_free_io_mem_idx(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
|
||||
if (!io_mem_used[i]) {
|
||||
io_mem_used[i] = 1;
|
||||
return i;
|
||||
}
|
||||
fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* mem_read and mem_write are arrays of functions containing the
|
||||
function to access byte (index 0), word (index 1) and dword (index
|
||||
2). Functions can be omitted with a NULL function pointer.
|
||||
If io_index is non zero, the corresponding io zone is
|
||||
modified. If it is zero, a new io zone is allocated. The return
|
||||
value can be used with cpu_register_physical_memory(). (-1) is
|
||||
returned if error. */
|
||||
static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
|
||||
{
|
||||
if (io_index <= 0) {
|
||||
io_index = get_free_io_mem_idx();
|
||||
if (io_index == -1)
|
||||
return io_index;
|
||||
} else {
|
||||
if (io_index >= IO_MEM_NB_ENTRIES)
|
||||
return -1;
|
||||
}
|
||||
|
||||
io_mem_region[io_index] = mr;
|
||||
|
||||
return io_index;
|
||||
}
|
||||
|
||||
int cpu_register_io_memory(MemoryRegion *mr)
|
||||
{
|
||||
return cpu_register_io_memory_fixed(0, mr);
|
||||
}
|
||||
|
||||
void cpu_unregister_io_memory(int io_index)
|
||||
{
|
||||
io_mem_region[io_index] = NULL;
|
||||
io_mem_used[io_index] = 0;
|
||||
}
|
||||
|
||||
static uint16_t dummy_section(MemoryRegion *mr)
|
||||
{
|
||||
MemoryRegionSection section = {
|
||||
@ -3562,13 +3512,14 @@ static uint16_t dummy_section(MemoryRegion *mr)
|
||||
return phys_section_add(§ion);
|
||||
}
|
||||
|
||||
MemoryRegion *iotlb_to_region(target_phys_addr_t index)
|
||||
{
|
||||
return phys_sections[index & ~TARGET_PAGE_MASK].mr;
|
||||
}
|
||||
|
||||
static void io_mem_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Must be first: */
|
||||
memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
|
||||
assert(io_mem_ram.ram_addr == 0);
|
||||
memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
|
||||
memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
|
||||
"unassigned", UINT64_MAX);
|
||||
@ -3576,9 +3527,6 @@ static void io_mem_init(void)
|
||||
"notdirty", UINT64_MAX);
|
||||
memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
|
||||
"subpage-ram", UINT64_MAX);
|
||||
for (i=0; i<5; i++)
|
||||
io_mem_used[i] = 1;
|
||||
|
||||
memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
|
||||
"watch", UINT64_MAX);
|
||||
}
|
||||
@ -3589,6 +3537,9 @@ static void core_begin(MemoryListener *listener)
|
||||
phys_sections_clear();
|
||||
phys_map.ptr = PHYS_MAP_NODE_NIL;
|
||||
phys_section_unassigned = dummy_section(&io_mem_unassigned);
|
||||
phys_section_notdirty = dummy_section(&io_mem_notdirty);
|
||||
phys_section_rom = dummy_section(&io_mem_rom);
|
||||
phys_section_watch = dummy_section(&io_mem_watch);
|
||||
}
|
||||
|
||||
static void core_commit(MemoryListener *listener)
|
||||
@ -3826,11 +3777,11 @@ int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
|
||||
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
|
||||
int len, int is_write)
|
||||
{
|
||||
int l, io_index;
|
||||
int l;
|
||||
uint8_t *ptr;
|
||||
uint32_t val;
|
||||
target_phys_addr_t page;
|
||||
MemoryRegionSection section;
|
||||
MemoryRegionSection *section;
|
||||
|
||||
while (len > 0) {
|
||||
page = addr & TARGET_PAGE_MASK;
|
||||
@ -3840,35 +3791,31 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
|
||||
section = phys_page_find(page >> TARGET_PAGE_BITS);
|
||||
|
||||
if (is_write) {
|
||||
if (!memory_region_is_ram(section.mr)) {
|
||||
if (!memory_region_is_ram(section->mr)) {
|
||||
target_phys_addr_t addr1;
|
||||
io_index = memory_region_get_ram_addr(section.mr)
|
||||
& (IO_MEM_NB_ENTRIES - 1);
|
||||
addr1 = (addr & ~TARGET_PAGE_MASK)
|
||||
+ section.offset_within_region;
|
||||
addr1 = section_addr(section, addr);
|
||||
/* XXX: could force cpu_single_env to NULL to avoid
|
||||
potential bugs */
|
||||
if (l >= 4 && ((addr1 & 3) == 0)) {
|
||||
/* 32 bit write access */
|
||||
val = ldl_p(buf);
|
||||
io_mem_write(io_index, addr1, val, 4);
|
||||
io_mem_write(section->mr, addr1, val, 4);
|
||||
l = 4;
|
||||
} else if (l >= 2 && ((addr1 & 1) == 0)) {
|
||||
/* 16 bit write access */
|
||||
val = lduw_p(buf);
|
||||
io_mem_write(io_index, addr1, val, 2);
|
||||
io_mem_write(section->mr, addr1, val, 2);
|
||||
l = 2;
|
||||
} else {
|
||||
/* 8 bit write access */
|
||||
val = ldub_p(buf);
|
||||
io_mem_write(io_index, addr1, val, 1);
|
||||
io_mem_write(section->mr, addr1, val, 1);
|
||||
l = 1;
|
||||
}
|
||||
} else if (!section.readonly) {
|
||||
} else if (!section->readonly) {
|
||||
ram_addr_t addr1;
|
||||
addr1 = (memory_region_get_ram_addr(section.mr)
|
||||
+ section.offset_within_region)
|
||||
| (addr & ~TARGET_PAGE_MASK);
|
||||
addr1 = memory_region_get_ram_addr(section->mr)
|
||||
+ section_addr(section, addr);
|
||||
/* RAM case */
|
||||
ptr = qemu_get_ram_ptr(addr1);
|
||||
memcpy(ptr, buf, l);
|
||||
@ -3882,34 +3829,31 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
|
||||
qemu_put_ram_ptr(ptr);
|
||||
}
|
||||
} else {
|
||||
if (!is_ram_rom_romd(§ion)) {
|
||||
if (!is_ram_rom_romd(section)) {
|
||||
target_phys_addr_t addr1;
|
||||
/* I/O case */
|
||||
io_index = memory_region_get_ram_addr(section.mr)
|
||||
& (IO_MEM_NB_ENTRIES - 1);
|
||||
addr1 = (addr & ~TARGET_PAGE_MASK)
|
||||
+ section.offset_within_region;
|
||||
addr1 = section_addr(section, addr);
|
||||
if (l >= 4 && ((addr1 & 3) == 0)) {
|
||||
/* 32 bit read access */
|
||||
val = io_mem_read(io_index, addr1, 4);
|
||||
val = io_mem_read(section->mr, addr1, 4);
|
||||
stl_p(buf, val);
|
||||
l = 4;
|
||||
} else if (l >= 2 && ((addr1 & 1) == 0)) {
|
||||
/* 16 bit read access */
|
||||
val = io_mem_read(io_index, addr1, 2);
|
||||
val = io_mem_read(section->mr, addr1, 2);
|
||||
stw_p(buf, val);
|
||||
l = 2;
|
||||
} else {
|
||||
/* 8 bit read access */
|
||||
val = io_mem_read(io_index, addr1, 1);
|
||||
val = io_mem_read(section->mr, addr1, 1);
|
||||
stb_p(buf, val);
|
||||
l = 1;
|
||||
}
|
||||
} else {
|
||||
/* RAM case */
|
||||
ptr = qemu_get_ram_ptr(section.mr->ram_addr
|
||||
+ section.offset_within_region);
|
||||
memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
|
||||
ptr = qemu_get_ram_ptr(section->mr->ram_addr)
|
||||
+ section_addr(section, addr);
|
||||
memcpy(buf, ptr, l);
|
||||
qemu_put_ram_ptr(ptr);
|
||||
}
|
||||
}
|
||||
@ -3926,7 +3870,7 @@ void cpu_physical_memory_write_rom(target_phys_addr_t addr,
|
||||
int l;
|
||||
uint8_t *ptr;
|
||||
target_phys_addr_t page;
|
||||
MemoryRegionSection section;
|
||||
MemoryRegionSection *section;
|
||||
|
||||
while (len > 0) {
|
||||
page = addr & TARGET_PAGE_MASK;
|
||||
@ -3935,13 +3879,12 @@ void cpu_physical_memory_write_rom(target_phys_addr_t addr,
|
||||
l = len;
|
||||
section = phys_page_find(page >> TARGET_PAGE_BITS);
|
||||
|
||||
if (!is_ram_rom_romd(§ion)) {
|
||||
if (!is_ram_rom_romd(section)) {
|
||||
/* do nothing */
|
||||
} else {
|
||||
unsigned long addr1;
|
||||
addr1 = (memory_region_get_ram_addr(section.mr)
|
||||
+ section.offset_within_region)
|
||||
+ (addr & ~TARGET_PAGE_MASK);
|
||||
addr1 = memory_region_get_ram_addr(section->mr)
|
||||
+ section_addr(section, addr);
|
||||
/* ROM/RAM case */
|
||||
ptr = qemu_get_ram_ptr(addr1);
|
||||
memcpy(ptr, buf, l);
|
||||
@ -4014,7 +3957,7 @@ void *cpu_physical_memory_map(target_phys_addr_t addr,
|
||||
target_phys_addr_t todo = 0;
|
||||
int l;
|
||||
target_phys_addr_t page;
|
||||
MemoryRegionSection section;
|
||||
MemoryRegionSection *section;
|
||||
ram_addr_t raddr = RAM_ADDR_MAX;
|
||||
ram_addr_t rlen;
|
||||
void *ret;
|
||||
@ -4026,7 +3969,7 @@ void *cpu_physical_memory_map(target_phys_addr_t addr,
|
||||
l = len;
|
||||
section = phys_page_find(page >> TARGET_PAGE_BITS);
|
||||
|
||||
if (!(memory_region_is_ram(section.mr) && !section.readonly)) {
|
||||
if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
|
||||
if (todo || bounce.buffer) {
|
||||
break;
|
||||
}
|
||||
@ -4041,9 +3984,8 @@ void *cpu_physical_memory_map(target_phys_addr_t addr,
|
||||
return bounce.buffer;
|
||||
}
|
||||
if (!todo) {
|
||||
raddr = memory_region_get_ram_addr(section.mr)
|
||||
+ section.offset_within_region
|
||||
+ (addr & ~TARGET_PAGE_MASK);
|
||||
raddr = memory_region_get_ram_addr(section->mr)
|
||||
+ section_addr(section, addr);
|
||||
}
|
||||
|
||||
len -= l;
|
||||
@ -4099,19 +4041,16 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
|
||||
static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
|
||||
enum device_endian endian)
|
||||
{
|
||||
int io_index;
|
||||
uint8_t *ptr;
|
||||
uint32_t val;
|
||||
MemoryRegionSection section;
|
||||
MemoryRegionSection *section;
|
||||
|
||||
section = phys_page_find(addr >> TARGET_PAGE_BITS);
|
||||
|
||||
if (!is_ram_rom_romd(§ion)) {
|
||||
if (!is_ram_rom_romd(section)) {
|
||||
/* I/O case */
|
||||
io_index = memory_region_get_ram_addr(section.mr)
|
||||
& (IO_MEM_NB_ENTRIES - 1);
|
||||
addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
|
||||
val = io_mem_read(io_index, addr, 4);
|
||||
addr = section_addr(section, addr);
|
||||
val = io_mem_read(section->mr, addr, 4);
|
||||
#if defined(TARGET_WORDS_BIGENDIAN)
|
||||
if (endian == DEVICE_LITTLE_ENDIAN) {
|
||||
val = bswap32(val);
|
||||
@ -4123,10 +4062,9 @@ static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
|
||||
#endif
|
||||
} else {
|
||||
/* RAM case */
|
||||
ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
|
||||
ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
|
||||
& TARGET_PAGE_MASK)
|
||||
+ section.offset_within_region) +
|
||||
(addr & ~TARGET_PAGE_MASK);
|
||||
+ section_addr(section, addr));
|
||||
switch (endian) {
|
||||
case DEVICE_LITTLE_ENDIAN:
|
||||
val = ldl_le_p(ptr);
|
||||
@ -4161,34 +4099,30 @@ uint32_t ldl_be_phys(target_phys_addr_t addr)
|
||||
static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
|
||||
enum device_endian endian)
|
||||
{
|
||||
int io_index;
|
||||
uint8_t *ptr;
|
||||
uint64_t val;
|
||||
MemoryRegionSection section;
|
||||
MemoryRegionSection *section;
|
||||
|
||||
section = phys_page_find(addr >> TARGET_PAGE_BITS);
|
||||
|
||||
if (!is_ram_rom_romd(§ion)) {
|
||||
if (!is_ram_rom_romd(section)) {
|
||||
/* I/O case */
|
||||
io_index = memory_region_get_ram_addr(section.mr)
|
||||
& (IO_MEM_NB_ENTRIES - 1);
|
||||
addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
|
||||
addr = section_addr(section, addr);
|
||||
|
||||
/* XXX This is broken when device endian != cpu endian.
|
||||
Fix and add "endian" variable check */
|
||||
#ifdef TARGET_WORDS_BIGENDIAN
|
||||
val = io_mem_read(io_index, addr, 4) << 32;
|
||||
val |= io_mem_read(io_index, addr + 4, 4);
|
||||
val = io_mem_read(section->mr, addr, 4) << 32;
|
||||
val |= io_mem_read(section->mr, addr + 4, 4);
|
||||
#else
|
||||
val = io_mem_read(io_index, addr, 4);
|
||||
val |= io_mem_read(io_index, addr + 4, 4) << 32;
|
||||
val = io_mem_read(section->mr, addr, 4);
|
||||
val |= io_mem_read(section->mr, addr + 4, 4) << 32;
|
||||
#endif
|
||||
} else {
|
||||
/* RAM case */
|
||||
ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
|
||||
ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
|
||||
& TARGET_PAGE_MASK)
|
||||
+ section.offset_within_region)
|
||||
+ (addr & ~TARGET_PAGE_MASK);
|
||||
+ section_addr(section, addr));
|
||||
switch (endian) {
|
||||
case DEVICE_LITTLE_ENDIAN:
|
||||
val = ldq_le_p(ptr);
|
||||
@ -4231,19 +4165,16 @@ uint32_t ldub_phys(target_phys_addr_t addr)
|
||||
static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
|
||||
enum device_endian endian)
|
||||
{
|
||||
int io_index;
|
||||
uint8_t *ptr;
|
||||
uint64_t val;
|
||||
MemoryRegionSection section;
|
||||
MemoryRegionSection *section;
|
||||
|
||||
section = phys_page_find(addr >> TARGET_PAGE_BITS);
|
||||
|
||||
if (!is_ram_rom_romd(§ion)) {
|
||||
if (!is_ram_rom_romd(section)) {
|
||||
/* I/O case */
|
||||
io_index = memory_region_get_ram_addr(section.mr)
|
||||
& (IO_MEM_NB_ENTRIES - 1);
|
||||
addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
|
||||
val = io_mem_read(io_index, addr, 2);
|
||||
addr = section_addr(section, addr);
|
||||
val = io_mem_read(section->mr, addr, 2);
|
||||
#if defined(TARGET_WORDS_BIGENDIAN)
|
||||
if (endian == DEVICE_LITTLE_ENDIAN) {
|
||||
val = bswap16(val);
|
||||
@ -4255,10 +4186,9 @@ static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
|
||||
#endif
|
||||
} else {
|
||||
/* RAM case */
|
||||
ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
|
||||
ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
|
||||
& TARGET_PAGE_MASK)
|
||||
+ section.offset_within_region)
|
||||
+ (addr & ~TARGET_PAGE_MASK);
|
||||
+ section_addr(section, addr));
|
||||
switch (endian) {
|
||||
case DEVICE_LITTLE_ENDIAN:
|
||||
val = lduw_le_p(ptr);
|
||||
@ -4294,25 +4224,21 @@ uint32_t lduw_be_phys(target_phys_addr_t addr)
|
||||
bits are used to track modified PTEs */
|
||||
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
|
||||
{
|
||||
int io_index;
|
||||
uint8_t *ptr;
|
||||
MemoryRegionSection section;
|
||||
MemoryRegionSection *section;
|
||||
|
||||
section = phys_page_find(addr >> TARGET_PAGE_BITS);
|
||||
|
||||
if (!memory_region_is_ram(section.mr) || section.readonly) {
|
||||
if (memory_region_is_ram(section.mr)) {
|
||||
io_index = io_mem_rom.ram_addr;
|
||||
} else {
|
||||
io_index = memory_region_get_ram_addr(section.mr);
|
||||
if (!memory_region_is_ram(section->mr) || section->readonly) {
|
||||
addr = section_addr(section, addr);
|
||||
if (memory_region_is_ram(section->mr)) {
|
||||
section = &phys_sections[phys_section_rom];
|
||||
}
|
||||
addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
|
||||
io_mem_write(io_index, addr, val, 4);
|
||||
io_mem_write(section->mr, addr, val, 4);
|
||||
} else {
|
||||
unsigned long addr1 = (memory_region_get_ram_addr(section.mr)
|
||||
unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
|
||||
& TARGET_PAGE_MASK)
|
||||
+ section.offset_within_region
|
||||
+ (addr & ~TARGET_PAGE_MASK);
|
||||
+ section_addr(section, addr);
|
||||
ptr = qemu_get_ram_ptr(addr1);
|
||||
stl_p(ptr, val);
|
||||
|
||||
@ -4330,32 +4256,27 @@ void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
|
||||
|
||||
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
|
||||
{
|
||||
int io_index;
|
||||
uint8_t *ptr;
|
||||
MemoryRegionSection section;
|
||||
MemoryRegionSection *section;
|
||||
|
||||
section = phys_page_find(addr >> TARGET_PAGE_BITS);
|
||||
|
||||
if (!memory_region_is_ram(section.mr) || section.readonly) {
|
||||
if (memory_region_is_ram(section.mr)) {
|
||||
io_index = io_mem_rom.ram_addr;
|
||||
} else {
|
||||
io_index = memory_region_get_ram_addr(section.mr)
|
||||
& (IO_MEM_NB_ENTRIES - 1);
|
||||
if (!memory_region_is_ram(section->mr) || section->readonly) {
|
||||
addr = section_addr(section, addr);
|
||||
if (memory_region_is_ram(section->mr)) {
|
||||
section = &phys_sections[phys_section_rom];
|
||||
}
|
||||
addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
|
||||
#ifdef TARGET_WORDS_BIGENDIAN
|
||||
io_mem_write(io_index, addr, val >> 32, 4);
|
||||
io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
|
||||
io_mem_write(section->mr, addr, val >> 32, 4);
|
||||
io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
|
||||
#else
|
||||
io_mem_write(io_index, addr, (uint32_t)val, 4);
|
||||
io_mem_write(io_index, addr + 4, val >> 32, 4);
|
||||
io_mem_write(section->mr, addr, (uint32_t)val, 4);
|
||||
io_mem_write(section->mr, addr + 4, val >> 32, 4);
|
||||
#endif
|
||||
} else {
|
||||
ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
|
||||
ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
|
||||
& TARGET_PAGE_MASK)
|
||||
+ section.offset_within_region)
|
||||
+ (addr & ~TARGET_PAGE_MASK);
|
||||
+ section_addr(section, addr));
|
||||
stq_p(ptr, val);
|
||||
}
|
||||
}
|
||||
@ -4364,20 +4285,16 @@ void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
|
||||
static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
|
||||
enum device_endian endian)
|
||||
{
|
||||
int io_index;
|
||||
uint8_t *ptr;
|
||||
MemoryRegionSection section;
|
||||
MemoryRegionSection *section;
|
||||
|
||||
section = phys_page_find(addr >> TARGET_PAGE_BITS);
|
||||
|
||||
if (!memory_region_is_ram(section.mr) || section.readonly) {
|
||||
if (memory_region_is_ram(section.mr)) {
|
||||
io_index = io_mem_rom.ram_addr;
|
||||
} else {
|
||||
io_index = memory_region_get_ram_addr(section.mr)
|
||||
& (IO_MEM_NB_ENTRIES - 1);
|
||||
if (!memory_region_is_ram(section->mr) || section->readonly) {
|
||||
addr = section_addr(section, addr);
|
||||
if (memory_region_is_ram(section->mr)) {
|
||||
section = &phys_sections[phys_section_rom];
|
||||
}
|
||||
addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
|
||||
#if defined(TARGET_WORDS_BIGENDIAN)
|
||||
if (endian == DEVICE_LITTLE_ENDIAN) {
|
||||
val = bswap32(val);
|
||||
@ -4387,12 +4304,11 @@ static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
|
||||
val = bswap32(val);
|
||||
}
|
||||
#endif
|
||||
io_mem_write(io_index, addr, val, 4);
|
||||
io_mem_write(section->mr, addr, val, 4);
|
||||
} else {
|
||||
unsigned long addr1;
|
||||
addr1 = (memory_region_get_ram_addr(section.mr) & TARGET_PAGE_MASK)
|
||||
+ section.offset_within_region
|
||||
+ (addr & ~TARGET_PAGE_MASK);
|
||||
addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
|
||||
+ section_addr(section, addr);
|
||||
/* RAM case */
|
||||
ptr = qemu_get_ram_ptr(addr1);
|
||||
switch (endian) {
|
||||
@ -4442,20 +4358,16 @@ void stb_phys(target_phys_addr_t addr, uint32_t val)
|
||||
static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
|
||||
enum device_endian endian)
|
||||
{
|
||||
int io_index;
|
||||
uint8_t *ptr;
|
||||
MemoryRegionSection section;
|
||||
MemoryRegionSection *section;
|
||||
|
||||
section = phys_page_find(addr >> TARGET_PAGE_BITS);
|
||||
|
||||
if (!memory_region_is_ram(section.mr) || section.readonly) {
|
||||
if (memory_region_is_ram(section.mr)) {
|
||||
io_index = io_mem_rom.ram_addr;
|
||||
} else {
|
||||
io_index = memory_region_get_ram_addr(section.mr)
|
||||
& (IO_MEM_NB_ENTRIES - 1);
|
||||
if (!memory_region_is_ram(section->mr) || section->readonly) {
|
||||
addr = section_addr(section, addr);
|
||||
if (memory_region_is_ram(section->mr)) {
|
||||
section = &phys_sections[phys_section_rom];
|
||||
}
|
||||
addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
|
||||
#if defined(TARGET_WORDS_BIGENDIAN)
|
||||
if (endian == DEVICE_LITTLE_ENDIAN) {
|
||||
val = bswap16(val);
|
||||
@ -4465,11 +4377,11 @@ static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
|
||||
val = bswap16(val);
|
||||
}
|
||||
#endif
|
||||
io_mem_write(io_index, addr, val, 2);
|
||||
io_mem_write(section->mr, addr, val, 2);
|
||||
} else {
|
||||
unsigned long addr1;
|
||||
addr1 = (memory_region_get_ram_addr(section.mr) & TARGET_PAGE_MASK)
|
||||
+ section.offset_within_region + (addr & ~TARGET_PAGE_MASK);
|
||||
addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
|
||||
+ section_addr(section, addr);
|
||||
/* RAM case */
|
||||
ptr = qemu_get_ram_ptr(addr1);
|
||||
switch (endian) {
|
||||
@ -4677,6 +4589,7 @@ tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
|
||||
{
|
||||
int mmu_idx, page_index, pd;
|
||||
void *p;
|
||||
MemoryRegion *mr;
|
||||
|
||||
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
mmu_idx = cpu_mmu_index(env1);
|
||||
@ -4684,9 +4597,10 @@ tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
|
||||
(addr & TARGET_PAGE_MASK))) {
|
||||
ldub_code(addr);
|
||||
}
|
||||
pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
|
||||
if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
|
||||
&& !io_mem_region[pd]->rom_device) {
|
||||
pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK;
|
||||
mr = iotlb_to_region(pd);
|
||||
if (mr != &io_mem_ram && mr != &io_mem_rom
|
||||
&& mr != &io_mem_notdirty && !mr->rom_device) {
|
||||
#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
|
||||
cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
|
||||
#else
|
||||
|
13
memory.c
13
memory.c
@ -781,13 +781,11 @@ static void memory_region_destructor_ram_from_ptr(MemoryRegion *mr)
|
||||
|
||||
static void memory_region_destructor_iomem(MemoryRegion *mr)
|
||||
{
|
||||
cpu_unregister_io_memory(mr->ram_addr);
|
||||
}
|
||||
|
||||
static void memory_region_destructor_rom_device(MemoryRegion *mr)
|
||||
{
|
||||
qemu_ram_free(mr->ram_addr & TARGET_PAGE_MASK);
|
||||
cpu_unregister_io_memory(mr->ram_addr & ~TARGET_PAGE_MASK);
|
||||
}
|
||||
|
||||
static bool memory_region_wrong_endianness(MemoryRegion *mr)
|
||||
@ -942,7 +940,7 @@ void memory_region_init_io(MemoryRegion *mr,
|
||||
mr->opaque = opaque;
|
||||
mr->terminates = true;
|
||||
mr->destructor = memory_region_destructor_iomem;
|
||||
mr->ram_addr = cpu_register_io_memory(mr);
|
||||
mr->ram_addr = ~(ram_addr_t)0;
|
||||
}
|
||||
|
||||
void memory_region_init_ram(MemoryRegion *mr,
|
||||
@ -992,7 +990,6 @@ void memory_region_init_rom_device(MemoryRegion *mr,
|
||||
mr->rom_device = true;
|
||||
mr->destructor = memory_region_destructor_rom_device;
|
||||
mr->ram_addr = qemu_ram_alloc(size, mr);
|
||||
mr->ram_addr |= cpu_register_io_memory(mr);
|
||||
}
|
||||
|
||||
static uint64_t invalid_read(void *opaque, target_phys_addr_t addr,
|
||||
@ -1501,15 +1498,15 @@ void set_system_io_map(MemoryRegion *mr)
|
||||
memory_region_update_topology(NULL);
|
||||
}
|
||||
|
||||
uint64_t io_mem_read(int io_index, target_phys_addr_t addr, unsigned size)
|
||||
uint64_t io_mem_read(MemoryRegion *mr, target_phys_addr_t addr, unsigned size)
|
||||
{
|
||||
return memory_region_dispatch_read(io_mem_region[io_index], addr, size);
|
||||
return memory_region_dispatch_read(mr, addr, size);
|
||||
}
|
||||
|
||||
void io_mem_write(int io_index, target_phys_addr_t addr,
|
||||
void io_mem_write(MemoryRegion *mr, target_phys_addr_t addr,
|
||||
uint64_t val, unsigned size)
|
||||
{
|
||||
memory_region_dispatch_write(io_mem_region[io_index], addr, val, size);
|
||||
memory_region_dispatch_write(mr, addr, val, size);
|
||||
}
|
||||
|
||||
typedef struct MemoryRegionList MemoryRegionList;
|
||||
|
@ -62,27 +62,27 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
|
||||
void *retaddr)
|
||||
{
|
||||
DATA_TYPE res;
|
||||
int index;
|
||||
index = physaddr & (IO_MEM_NB_ENTRIES - 1);
|
||||
MemoryRegion *mr = iotlb_to_region(physaddr);
|
||||
|
||||
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
|
||||
env->mem_io_pc = (unsigned long)retaddr;
|
||||
if (index != io_mem_ram.ram_addr && index != io_mem_rom.ram_addr
|
||||
&& index != io_mem_unassigned.ram_addr
|
||||
&& index != io_mem_notdirty.ram_addr
|
||||
if (mr != &io_mem_ram && mr != &io_mem_rom
|
||||
&& mr != &io_mem_unassigned
|
||||
&& mr != &io_mem_notdirty
|
||||
&& !can_do_io(env)) {
|
||||
cpu_io_recompile(env, retaddr);
|
||||
}
|
||||
|
||||
env->mem_io_vaddr = addr;
|
||||
#if SHIFT <= 2
|
||||
res = io_mem_read(index, physaddr, 1 << SHIFT);
|
||||
res = io_mem_read(mr, physaddr, 1 << SHIFT);
|
||||
#else
|
||||
#ifdef TARGET_WORDS_BIGENDIAN
|
||||
res = io_mem_read(index, physaddr, 4) << 32;
|
||||
res |= io_mem_read(index, physaddr + 4, 4);
|
||||
res = io_mem_read(mr, physaddr, 4) << 32;
|
||||
res |= io_mem_read(mr, physaddr + 4, 4);
|
||||
#else
|
||||
res = io_mem_read(index, physaddr, 4);
|
||||
res |= io_mem_read(index, physaddr + 4, 4) << 32;
|
||||
res = io_mem_read(mr, physaddr, 4);
|
||||
res |= io_mem_read(mr, physaddr + 4, 4) << 32;
|
||||
#endif
|
||||
#endif /* SHIFT > 2 */
|
||||
return res;
|
||||
@ -207,12 +207,12 @@ static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
|
||||
target_ulong addr,
|
||||
void *retaddr)
|
||||
{
|
||||
int index;
|
||||
index = physaddr & (IO_MEM_NB_ENTRIES - 1);
|
||||
MemoryRegion *mr = iotlb_to_region(physaddr);
|
||||
|
||||
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
|
||||
if (index != io_mem_ram.ram_addr && index != io_mem_rom.ram_addr
|
||||
&& index != io_mem_unassigned.ram_addr
|
||||
&& index != io_mem_notdirty.ram_addr
|
||||
if (mr != &io_mem_ram && mr != &io_mem_rom
|
||||
&& mr != &io_mem_unassigned
|
||||
&& mr != &io_mem_notdirty
|
||||
&& !can_do_io(env)) {
|
||||
cpu_io_recompile(env, retaddr);
|
||||
}
|
||||
@ -220,14 +220,14 @@ static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
|
||||
env->mem_io_vaddr = addr;
|
||||
env->mem_io_pc = (unsigned long)retaddr;
|
||||
#if SHIFT <= 2
|
||||
io_mem_write(index, physaddr, val, 1 << SHIFT);
|
||||
io_mem_write(mr, physaddr, val, 1 << SHIFT);
|
||||
#else
|
||||
#ifdef TARGET_WORDS_BIGENDIAN
|
||||
io_mem_write(index, physaddr, (val >> 32), 4);
|
||||
io_mem_write(index, physaddr + 4, (uint32_t)val, 4);
|
||||
io_mem_write(mr, physaddr, (val >> 32), 4);
|
||||
io_mem_write(mr, physaddr + 4, (uint32_t)val, 4);
|
||||
#else
|
||||
io_mem_write(index, physaddr, (uint32_t)val, 4);
|
||||
io_mem_write(index, physaddr + 4, val >> 32, 4);
|
||||
io_mem_write(mr, physaddr, (uint32_t)val, 4);
|
||||
io_mem_write(mr, physaddr + 4, val >> 32, 4);
|
||||
#endif
|
||||
#endif /* SHIFT > 2 */
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user