memory: Single byte swap along the I/O path

Now that MemOp has been pushed down into the memory API, and
callers are encoding endianness, we can collapse byte swaps
along the I/O path into the accelerator and target independent
adjust_endianness.

Collapsing byte swaps along the I/O path enables additional endian
inversion logic, e.g. SPARC64 Invert Endian TTE bit, with redundant
byte swaps cancelling out.

Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Tony Nguyen <tony.nguyen@bt.com>
Message-Id: <911ff31af11922a9afba9b7ce128af8b8b80f316.1566466906.git.tony.nguyen@bt.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Tony Nguyen 2019-08-24 04:36:54 +10:00 committed by Richard Henderson
parent be5c4787e9
commit 9bf825bf3d
5 changed files with 23 additions and 142 deletions

View File

@ -1200,38 +1200,6 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
cpu_loop_exit_atomic(env_cpu(env), retaddr);
}
#ifdef TARGET_WORDS_BIGENDIAN
#define NEED_BE_BSWAP 0
#define NEED_LE_BSWAP 1
#else
#define NEED_BE_BSWAP 1
#define NEED_LE_BSWAP 0
#endif
/*
* Byte Swap Helper
*
* This should all dead code away depending on the build host and
* access type.
*/
static inline uint64_t handle_bswap(uint64_t val, MemOp op)
{
if ((memop_big_endian(op) && NEED_BE_BSWAP) ||
(!memop_big_endian(op) && NEED_LE_BSWAP)) {
switch (op & MO_SIZE) {
case MO_8: return val;
case MO_16: return bswap16(val);
case MO_32: return bswap32(val);
case MO_64: return bswap64(val);
default:
g_assert_not_reached();
}
} else {
return val;
}
}
/*
* Load Helpers
*
@ -1306,10 +1274,8 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
}
}
/* TODO: Merge bswap into io_readx -> memory_region_dispatch_read. */
res = io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index],
mmu_idx, addr, retaddr, access_type, op);
return handle_bswap(res, op);
return io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index],
mmu_idx, addr, retaddr, access_type, op);
}
/* Handle slow unaligned access (it spans two pages or IO). */
@ -1552,10 +1518,8 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
}
}
/* TODO: Merge bswap into io_writex -> memory_region_dispatch_write. */
io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx,
handle_bswap(val, op),
addr, retaddr, op);
val, addr, retaddr, op);
return;
}

17
exec.c
View File

@ -3363,14 +3363,9 @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
l = memory_access_size(mr, l, addr1);
/* XXX: could force current_cpu to NULL to avoid
potential bugs */
val = ldn_p(buf, l);
/*
* TODO: Merge bswap from ldn_p into memory_region_dispatch_write
* by using ldn_he_p and dropping MO_TE to get a host-endian value.
*/
val = ldn_he_p(buf, l);
result |= memory_region_dispatch_write(mr, addr1, val,
size_memop(l) | MO_TE,
attrs);
size_memop(l), attrs);
} else {
/* RAM case */
ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
@ -3431,13 +3426,9 @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
/* I/O case */
release_lock |= prepare_mmio_access(mr);
l = memory_access_size(mr, l, addr1);
/*
* TODO: Merge bswap from stn_p into memory_region_dispatch_read
* by using stn_he_p and dropping MO_TE to get a host-endian value.
*/
result |= memory_region_dispatch_read(mr, addr1, &val,
size_memop(l) | MO_TE, attrs);
stn_p(buf, l, val);
size_memop(l), attrs);
stn_he_p(buf, l, val);
} else {
/* RAM case */
ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);

View File

@ -544,16 +544,15 @@ void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr,
val = pci_get_byte(buf);
break;
case 2:
val = cpu_to_le16(pci_get_word(buf));
val = pci_get_word(buf);
break;
case 4:
val = cpu_to_le32(pci_get_long(buf));
val = pci_get_long(buf);
break;
default:
/* As length is under guest control, handle illegal values. */
return;
}
/* TODO: Merge bswap from cpu_to_leXX into memory_region_dispatch_write. */
memory_region_dispatch_write(mr, addr, val, size_memop(len) | MO_LE,
MEMTXATTRS_UNSPECIFIED);
}
@ -578,7 +577,6 @@ virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr,
/* Make sure caller aligned buf properly */
assert(!(((uintptr_t)buf) & (len - 1)));
/* TODO: Merge bswap from leXX_to_cpu into memory_region_dispatch_read. */
memory_region_dispatch_read(mr, addr, &val, size_memop(len) | MO_LE,
MEMTXATTRS_UNSPECIFIED);
switch (len) {
@ -586,10 +584,10 @@ virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr,
pci_set_byte(buf, val);
break;
case 2:
pci_set_word(buf, le16_to_cpu(val));
pci_set_word(buf, val);
break;
case 4:
pci_set_long(buf, le32_to_cpu(val));
pci_set_long(buf, val);
break;
default:
/* As length is under guest control, handle illegal values. */

View File

@ -351,32 +351,23 @@ static bool memory_region_big_endian(MemoryRegion *mr)
#endif
}
static bool memory_region_wrong_endianness(MemoryRegion *mr)
static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op)
{
#ifdef TARGET_WORDS_BIGENDIAN
return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
#else
return mr->ops->endianness == DEVICE_BIG_ENDIAN;
#endif
}
static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
{
if (memory_region_wrong_endianness(mr)) {
switch (size) {
case 1:
if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) {
switch (op & MO_SIZE) {
case MO_8:
break;
case 2:
case MO_16:
*data = bswap16(*data);
break;
case 4:
case MO_32:
*data = bswap32(*data);
break;
case 8:
case MO_64:
*data = bswap64(*data);
break;
default:
abort();
g_assert_not_reached();
}
}
}
@ -1458,7 +1449,7 @@ MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
}
r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
adjust_endianness(mr, pval, size);
adjust_endianness(mr, pval, op);
return r;
}
@ -1501,7 +1492,7 @@ MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
return MEMTX_DECODE_ERROR;
}
adjust_endianness(mr, &data, size);
adjust_endianness(mr, &data, op);
if ((!kvm_eventfds_enabled()) &&
memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
@ -2350,7 +2341,7 @@ void memory_region_add_eventfd(MemoryRegion *mr,
}
if (size) {
adjust_endianness(mr, &mrfd.data, size);
adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
}
memory_region_transaction_begin();
for (i = 0; i < mr->ioeventfd_nb; ++i) {
@ -2385,7 +2376,7 @@ void memory_region_del_eventfd(MemoryRegion *mr,
unsigned i;
if (size) {
adjust_endianness(mr, &mrfd.data, size);
adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
}
memory_region_transaction_begin();
for (i = 0; i < mr->ioeventfd_nb; ++i) {

View File

@ -38,18 +38,8 @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
release_lock |= prepare_mmio_access(mr);
/* I/O case */
/* TODO: Merge bswap32 into memory_region_dispatch_read. */
r = memory_region_dispatch_read(mr, addr1, &val,
MO_32 | devend_memop(endian), attrs);
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap32(val);
}
#else
if (endian == DEVICE_BIG_ENDIAN) {
val = bswap32(val);
}
#endif
} else {
/* RAM case */
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
@ -116,18 +106,8 @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL,
release_lock |= prepare_mmio_access(mr);
/* I/O case */
/* TODO: Merge bswap64 into memory_region_dispatch_read. */
r = memory_region_dispatch_read(mr, addr1, &val,
MO_64 | devend_memop(endian), attrs);
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap64(val);
}
#else
if (endian == DEVICE_BIG_ENDIAN) {
val = bswap64(val);
}
#endif
} else {
/* RAM case */
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
@ -228,18 +208,8 @@ static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL,
release_lock |= prepare_mmio_access(mr);
/* I/O case */
/* TODO: Merge bswap16 into memory_region_dispatch_read. */
r = memory_region_dispatch_read(mr, addr1, &val,
MO_16 | devend_memop(endian), attrs);
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap16(val);
}
#else
if (endian == DEVICE_BIG_ENDIAN) {
val = bswap16(val);
}
#endif
} else {
/* RAM case */
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
@ -342,17 +312,6 @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL,
mr = TRANSLATE(addr, &addr1, &l, true, attrs);
if (l < 4 || !memory_access_is_direct(mr, true)) {
release_lock |= prepare_mmio_access(mr);
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap32(val);
}
#else
if (endian == DEVICE_BIG_ENDIAN) {
val = bswap32(val);
}
#endif
/* TODO: Merge bswap32 into memory_region_dispatch_write. */
r = memory_region_dispatch_write(mr, addr1, val,
MO_32 | devend_memop(endian), attrs);
} else {
@ -449,17 +408,6 @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL,
mr = TRANSLATE(addr, &addr1, &l, true, attrs);
if (l < 2 || !memory_access_is_direct(mr, true)) {
release_lock |= prepare_mmio_access(mr);
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap16(val);
}
#else
if (endian == DEVICE_BIG_ENDIAN) {
val = bswap16(val);
}
#endif
/* TODO: Merge bswap16 into memory_region_dispatch_write. */
r = memory_region_dispatch_write(mr, addr1, val,
MO_16 | devend_memop(endian), attrs);
} else {
@ -524,17 +472,6 @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL,
mr = TRANSLATE(addr, &addr1, &l, true, attrs);
if (l < 8 || !memory_access_is_direct(mr, true)) {
release_lock |= prepare_mmio_access(mr);
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap64(val);
}
#else
if (endian == DEVICE_BIG_ENDIAN) {
val = bswap64(val);
}
#endif
/* TODO: Merge bswap64 into memory_region_dispatch_write. */
r = memory_region_dispatch_write(mr, addr1, val,
MO_64 | devend_memop(endian), attrs);
} else {