memory: split accesses even when the old MMIO callbacks are used

This is useful for 64-bit memory accesses.

Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2013-05-24 17:45:48 +02:00
parent 08521e28c7
commit ce5d2f331e
1 changed files with 46 additions and 17 deletions

View File

@ -302,6 +302,20 @@ static void flatview_simplify(FlatView *view)
} }
} }
static void memory_region_oldmmio_read_accessor(void *opaque,
hwaddr addr,
uint64_t *value,
unsigned size,
unsigned shift,
uint64_t mask)
{
MemoryRegion *mr = opaque;
uint64_t tmp;
tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
*value |= (tmp & mask) << shift;
}
static void memory_region_read_accessor(void *opaque, static void memory_region_read_accessor(void *opaque,
hwaddr addr, hwaddr addr,
uint64_t *value, uint64_t *value,
@ -319,6 +333,20 @@ static void memory_region_read_accessor(void *opaque,
*value |= (tmp & mask) << shift; *value |= (tmp & mask) << shift;
} }
static void memory_region_oldmmio_write_accessor(void *opaque,
hwaddr addr,
uint64_t *value,
unsigned size,
unsigned shift,
uint64_t mask)
{
MemoryRegion *mr = opaque;
uint64_t tmp;
tmp = (*value >> shift) & mask;
mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
}
static void memory_region_write_accessor(void *opaque, static void memory_region_write_accessor(void *opaque,
hwaddr addr, hwaddr addr,
uint64_t *value, uint64_t *value,
@ -359,6 +387,8 @@ static void access_with_adjusted_size(hwaddr addr,
if (!access_size_max) { if (!access_size_max) {
access_size_max = 4; access_size_max = 4;
} }
/* FIXME: support unaligned access? */
access_size = MAX(MIN(size, access_size_max), access_size_min); access_size = MAX(MIN(size, access_size_max), access_size_min);
access_mask = -1ULL >> (64 - access_size * 8); access_mask = -1ULL >> (64 - access_size * 8);
for (i = 0; i < size; i += access_size) { for (i = 0; i < size; i += access_size) {
@ -902,16 +932,16 @@ static uint64_t memory_region_dispatch_read1(MemoryRegion *mr,
return unassigned_mem_read(mr, addr, size); return unassigned_mem_read(mr, addr, size);
} }
if (!mr->ops->read) { if (mr->ops->read) {
return mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr); access_with_adjusted_size(addr, &data, size,
mr->ops->impl.min_access_size,
mr->ops->impl.max_access_size,
memory_region_read_accessor, mr);
} else {
access_with_adjusted_size(addr, &data, size, 1, 4,
memory_region_oldmmio_read_accessor, mr);
} }
/* FIXME: support unaligned access */
access_with_adjusted_size(addr, &data, size,
mr->ops->impl.min_access_size,
mr->ops->impl.max_access_size,
memory_region_read_accessor, mr);
return data; return data;
} }
@ -956,16 +986,15 @@ static void memory_region_dispatch_write(MemoryRegion *mr,
adjust_endianness(mr, &data, size); adjust_endianness(mr, &data, size);
if (!mr->ops->write) { if (mr->ops->write) {
mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, data); access_with_adjusted_size(addr, &data, size,
return; mr->ops->impl.min_access_size,
mr->ops->impl.max_access_size,
memory_region_write_accessor, mr);
} else {
access_with_adjusted_size(addr, &data, size, 1, 4,
memory_region_oldmmio_write_accessor, mr);
} }
/* FIXME: support unaligned access */
access_with_adjusted_size(addr, &data, size,
mr->ops->impl.min_access_size,
mr->ops->impl.max_access_size,
memory_region_write_accessor, mr);
} }
void memory_region_init_io(MemoryRegion *mr, void memory_region_init_io(MemoryRegion *mr,