Allow page table bit to swap endianness.

Reorganize watchpoints out of i/o path.
 Return host address from probe_write / probe_access.
 -----BEGIN PGP SIGNATURE-----
 
 iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAl1uiyYdHHJpY2hhcmQu
 aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV8AuwgAnYLQQbL8kjSqzp7q
 gRlj0M2SX41ZW3fMkI794RwsljD9Z0QS7YGnpzHolig9XUYrGnip7STrMvlCr/1L
 CIMWNHlgitgBMszLqg42/TB+6RxXn+DMX/ShUzTagC6xQhinCIpdEjoLaTKSgeP+
 foIyJ2uoJLKOBP8cPTQp8evongtoQIljpsZZ0K8a4sreO1d6ytH+olkuoGiROft+
 VoJkA+kNHd9cE+LPCva8UFGu1QE6uCySvhepzOpnvOtK+SXKUm2yLOFGu7RWP1pT
 RkE0oRyRnImtg+cViHfUUFogIffFROdL5tuYMQVuqbINeROPUgJPav+R1Nz1P60a
 xM2HEw==
 =bLLU
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20190903' into staging

Allow page table bit to swap endianness.
Reorganize watchpoints out of i/o path.
Return host address from probe_write / probe_access.

# gpg: Signature made Tue 03 Sep 2019 16:47:50 BST
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* remotes/rth/tags/pull-tcg-20190903: (36 commits)
  tcg: Factor out probe_write() logic into probe_access()
  tcg: Make probe_write() return a pointer to the host page
  s390x/tcg: Pass a size to probe_write() in do_csst()
  hppa/tcg: Call probe_write() also for CONFIG_USER_ONLY
  mips/tcg: Call probe_write() for CONFIG_USER_ONLY as well
  tcg: Enforce single page access in probe_write()
  tcg: Factor out CONFIG_USER_ONLY probe_write() from s390x code
  s390x/tcg: Fix length calculation in probe_write_access()
  s390x/tcg: Use guest_addr_valid() instead of h2g_valid() in probe_write_access()
  tcg: Check for watchpoints in probe_write()
  cputlb: Handle watchpoints via TLB_WATCHPOINT
  cputlb: Remove double-alignment in store_helper
  cputlb: Fix size operand for tlb_fill on unaligned store
  exec: Factor out cpu_watchpoint_address_matches
  cputlb: Fold TLB_RECHECK into TLB_INVALID_MASK
  exec: Factor out core logic of check_watchpoint()
  exec: Move user-only watchpoint stubs inline
  target/sparc: sun4u Invert Endian TTE bit
  target/sparc: Add TLB entry with attributes
  cputlb: Byte swap memory transaction attribute
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2019-09-04 16:29:18 +01:00
commit 9de65783e1
57 changed files with 918 additions and 865 deletions

View File

@ -1890,6 +1890,7 @@ M: Paolo Bonzini <pbonzini@redhat.com>
S: Supported
F: include/exec/ioport.h
F: ioport.c
F: include/exec/memop.h
F: include/exec/memory.h
F: include/exec/ram_addr.h
F: memory.c

View File

@ -710,6 +710,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
hwaddr iotlb, xlat, sz, paddr_page;
target_ulong vaddr_page;
int asidx = cpu_asidx_from_attrs(cpu, attrs);
int wp_flags;
assert_cpu_is_self(cpu);
@ -732,11 +733,12 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
address = vaddr_page;
if (size < TARGET_PAGE_SIZE) {
/*
* Slow-path the TLB entries; we will repeat the MMU check and TLB
* fill on every access.
*/
address |= TLB_RECHECK;
/* Repeat the MMU check and TLB fill on every access. */
address |= TLB_INVALID_MASK;
}
if (attrs.byte_swap) {
/* Force the access through the I/O slow path. */
address |= TLB_MMIO;
}
if (!memory_region_is_ram(section->mr) &&
!memory_region_is_romd(section->mr)) {
@ -751,6 +753,8 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
code_address = address;
iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
paddr_page, xlat, prot, &address);
wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
TARGET_PAGE_SIZE);
index = tlb_index(env, mmu_idx, vaddr_page);
te = tlb_entry(env, mmu_idx, vaddr_page);
@ -804,6 +808,9 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
tn.addend = addend - vaddr_page;
if (prot & PAGE_READ) {
tn.addr_read = address;
if (wp_flags & BP_MEM_READ) {
tn.addr_read |= TLB_WATCHPOINT;
}
} else {
tn.addr_read = -1;
}
@ -830,6 +837,9 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
if (prot & PAGE_WRITE_INV) {
tn.addr_write |= TLB_INVALID_MASK;
}
if (wp_flags & BP_MEM_WRITE) {
tn.addr_write |= TLB_WATCHPOINT;
}
}
copy_tlb_helper_locked(te, &tn);
@ -881,7 +891,7 @@ static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
int mmu_idx, target_ulong addr, uintptr_t retaddr,
MMUAccessType access_type, int size)
MMUAccessType access_type, MemOp op)
{
CPUState *cpu = env_cpu(env);
hwaddr mr_offset;
@ -891,6 +901,10 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
bool locked = false;
MemTxResult r;
if (iotlbentry->attrs.byte_swap) {
op ^= MO_BSWAP;
}
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
mr = section->mr;
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
@ -906,14 +920,13 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
qemu_mutex_lock_iothread();
locked = true;
}
r = memory_region_dispatch_read(mr, mr_offset,
&val, size, iotlbentry->attrs);
r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs);
if (r != MEMTX_OK) {
hwaddr physaddr = mr_offset +
section->offset_within_address_space -
section->offset_within_region;
cpu_transaction_failed(cpu, physaddr, addr, size, access_type,
cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
mmu_idx, iotlbentry->attrs, r, retaddr);
}
if (locked) {
@ -925,7 +938,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
int mmu_idx, uint64_t val, target_ulong addr,
uintptr_t retaddr, int size)
uintptr_t retaddr, MemOp op)
{
CPUState *cpu = env_cpu(env);
hwaddr mr_offset;
@ -934,6 +947,10 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
bool locked = false;
MemTxResult r;
if (iotlbentry->attrs.byte_swap) {
op ^= MO_BSWAP;
}
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
mr = section->mr;
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
@ -947,15 +964,15 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
qemu_mutex_lock_iothread();
locked = true;
}
r = memory_region_dispatch_write(mr, mr_offset,
val, size, iotlbentry->attrs);
r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs);
if (r != MEMTX_OK) {
hwaddr physaddr = mr_offset +
section->offset_within_address_space -
section->offset_within_region;
cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
mmu_idx, iotlbentry->attrs, r, retaddr);
cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r,
retaddr);
}
if (locked) {
qemu_mutex_unlock_iothread();
@ -1015,10 +1032,15 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
(ADDR) & TARGET_PAGE_MASK)
/* NOTE: this function can trigger an exception */
/* NOTE2: the returned address is not exactly the physical address: it
* is actually a ram_addr_t (in system mode; the user mode emulation
* version of this function returns a guest virtual address).
/*
* Return a ram_addr_t for the virtual address for execution.
*
* Return -1 if we can't translate and execute from an entire page
* of RAM. This will force us to execute by loading and translating
* one insn at a time, without caching.
*
* NOTE: This function will trigger an exception if the page is
* not executable.
*/
tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
{
@ -1032,19 +1054,20 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
if (unlikely(entry->addr_code & TLB_INVALID_MASK)) {
/*
* The MMU protection covers a smaller range than a target
* page, so we must redo the MMU check for every insn.
*/
return -1;
}
}
assert(tlb_hit(entry->addr_code, addr));
}
if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) {
/*
* Return -1 if we can't translate and execute from an entire
* page of RAM here, which will cause us to execute by loading
* and translating one insn at a time, without caching:
* - TLB_RECHECK: means the MMU protection covers a smaller range
* than a target page, so we must redo the MMU check every insn
* - TLB_MMIO: region is not backed by RAM
*/
if (unlikely(entry->addr_code & TLB_MMIO)) {
/* The region is not backed by RAM. */
return -1;
}
@ -1052,25 +1075,70 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
return qemu_ram_addr_from_host_nofail(p);
}
/* Probe for whether the specified guest write access is permitted.
* If it is not permitted then an exception will be taken in the same
* way as if this were a real write access (and we will not return).
* Otherwise the function will return, and there will be a valid
* entry in the TLB for this access.
/*
* Probe for whether the specified guest access is permitted. If it is not
* permitted then an exception will be taken in the same way as if this
* were a real access (and we will not return).
* If the size is 0 or the page requires I/O access, returns NULL; otherwise,
* returns the address of the host page similar to tlb_vaddr_to_host().
*/
void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
uintptr_t retaddr)
void *probe_access(CPUArchState *env, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
target_ulong tlb_addr;
size_t elt_ofs;
int wp_access;
if (!tlb_hit(tlb_addr_write(entry), addr)) {
/* TLB entry is for a different page */
if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
mmu_idx, retaddr);
}
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
switch (access_type) {
case MMU_DATA_LOAD:
elt_ofs = offsetof(CPUTLBEntry, addr_read);
wp_access = BP_MEM_READ;
break;
case MMU_DATA_STORE:
elt_ofs = offsetof(CPUTLBEntry, addr_write);
wp_access = BP_MEM_WRITE;
break;
case MMU_INST_FETCH:
elt_ofs = offsetof(CPUTLBEntry, addr_code);
wp_access = BP_MEM_READ;
break;
default:
g_assert_not_reached();
}
tlb_addr = tlb_read_ofs(entry, elt_ofs);
if (unlikely(!tlb_hit(tlb_addr, addr))) {
if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs,
addr & TARGET_PAGE_MASK)) {
tlb_fill(env_cpu(env), addr, size, access_type, mmu_idx, retaddr);
/* TLB resize via tlb_fill may have moved the entry. */
index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
}
tlb_addr = tlb_read_ofs(entry, elt_ofs);
}
if (!size) {
return NULL;
}
/* Handle watchpoints. */
if (tlb_addr & TLB_WATCHPOINT) {
cpu_check_watchpoint(env_cpu(env), addr, size,
env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
wp_access, retaddr);
}
if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO)) {
/* I/O access */
return NULL;
}
return (void *)((uintptr_t)addr + entry->addend);
}
void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
@ -1133,7 +1201,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
target_ulong tlb_addr = tlb_addr_write(tlbe);
TCGMemOp mop = get_memop(oi);
MemOp mop = get_memop(oi);
int a_bits = get_alignment_bits(mop);
int s_bits = mop & MO_SIZE;
void *hostaddr;
@ -1169,7 +1237,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
}
/* Notice an IO access or a needs-MMU-lookup access */
if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) {
if (unlikely(tlb_addr & TLB_MMIO)) {
/* There's really nothing that can be done to
support this apart from stop-the-world. */
goto stop_the_world;
@ -1201,37 +1269,6 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
cpu_loop_exit_atomic(env_cpu(env), retaddr);
}
#ifdef TARGET_WORDS_BIGENDIAN
#define NEED_BE_BSWAP 0
#define NEED_LE_BSWAP 1
#else
#define NEED_BE_BSWAP 1
#define NEED_LE_BSWAP 0
#endif
/*
* Byte Swap Helper
*
* This should all dead code away depending on the build host and
* access type.
*/
static inline uint64_t handle_bswap(uint64_t val, int size, bool big_endian)
{
if ((big_endian && NEED_BE_BSWAP) || (!big_endian && NEED_LE_BSWAP)) {
switch (size) {
case 1: return val;
case 2: return bswap16(val);
case 4: return bswap32(val);
case 8: return bswap64(val);
default:
g_assert_not_reached();
}
} else {
return val;
}
}
/*
* Load Helpers
*
@ -1246,7 +1283,7 @@ typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
static inline uint64_t __attribute__((always_inline))
load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
uintptr_t retaddr, size_t size, bool big_endian, bool code_read,
uintptr_t retaddr, MemOp op, bool code_read,
FullLoadHelper *full_load)
{
uintptr_t mmu_idx = get_mmuidx(oi);
@ -1260,6 +1297,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
unsigned a_bits = get_alignment_bits(get_memop(oi));
void *haddr;
uint64_t res;
size_t size = memop_size(op);
/* Handle CPU specific unaligned behaviour */
if (addr & ((1 << a_bits) - 1)) {
@ -1277,37 +1315,36 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
entry = tlb_entry(env, mmu_idx, addr);
}
tlb_addr = code_read ? entry->addr_code : entry->addr_read;
tlb_addr &= ~TLB_INVALID_MASK;
}
/* Handle an IO access. */
/* Handle anything that isn't just a straight memory access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
CPUIOTLBEntry *iotlbentry;
/* For anything that is unaligned, recurse through full_load. */
if ((addr & (size - 1)) != 0) {
goto do_unaligned_access;
}
if (tlb_addr & TLB_RECHECK) {
/*
* This is a TLB_RECHECK access, where the MMU protection
* covers a smaller range than a target page, and we must
* repeat the MMU check here. This tlb_fill() call might
* longjump out if this access should cause a guest exception.
*/
tlb_fill(env_cpu(env), addr, size,
access_type, mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
tlb_addr = code_read ? entry->addr_code : entry->addr_read;
tlb_addr &= ~TLB_RECHECK;
if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
/* RAM access */
/* Handle watchpoints. */
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
/* On watchpoint hit, this will longjmp out. */
cpu_check_watchpoint(env_cpu(env), addr, size,
iotlbentry->attrs, BP_MEM_READ, retaddr);
/* The backing page may or may not require I/O. */
tlb_addr &= ~TLB_WATCHPOINT;
if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) {
goto do_aligned_access;
}
}
res = io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index],
mmu_idx, addr, retaddr, access_type, size);
return handle_bswap(res, size, big_endian);
/* Handle I/O access. */
return io_readx(env, iotlbentry, mmu_idx, addr,
retaddr, access_type, op);
}
/* Handle slow unaligned access (it spans two pages or IO). */
@ -1324,7 +1361,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
r2 = full_load(env, addr2, oi, retaddr);
shift = (addr & (size - 1)) * 8;
if (big_endian) {
if (memop_big_endian(op)) {
/* Big-endian combine. */
res = (r1 << shift) | (r2 >> ((size * 8) - shift));
} else {
@ -1336,30 +1373,27 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
do_aligned_access:
haddr = (void *)((uintptr_t)addr + entry->addend);
switch (size) {
case 1:
switch (op) {
case MO_UB:
res = ldub_p(haddr);
break;
case 2:
if (big_endian) {
res = lduw_be_p(haddr);
} else {
res = lduw_le_p(haddr);
}
case MO_BEUW:
res = lduw_be_p(haddr);
break;
case 4:
if (big_endian) {
res = (uint32_t)ldl_be_p(haddr);
} else {
res = (uint32_t)ldl_le_p(haddr);
}
case MO_LEUW:
res = lduw_le_p(haddr);
break;
case 8:
if (big_endian) {
res = ldq_be_p(haddr);
} else {
res = ldq_le_p(haddr);
}
case MO_BEUL:
res = (uint32_t)ldl_be_p(haddr);
break;
case MO_LEUL:
res = (uint32_t)ldl_le_p(haddr);
break;
case MO_BEQ:
res = ldq_be_p(haddr);
break;
case MO_LEQ:
res = ldq_le_p(haddr);
break;
default:
g_assert_not_reached();
@ -1381,8 +1415,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 1, false, false,
full_ldub_mmu);
return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
}
tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
@ -1394,7 +1427,7 @@ tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 2, false, false,
return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
full_le_lduw_mmu);
}
@ -1407,7 +1440,7 @@ tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 2, true, false,
return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
full_be_lduw_mmu);
}
@ -1420,7 +1453,7 @@ tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 4, false, false,
return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
full_le_ldul_mmu);
}
@ -1433,7 +1466,7 @@ tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 4, true, false,
return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
full_be_ldul_mmu);
}
@ -1446,14 +1479,14 @@ tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 8, false, false,
return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
helper_le_ldq_mmu);
}
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 8, true, false,
return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
helper_be_ldq_mmu);
}
@ -1499,7 +1532,7 @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
static inline void __attribute__((always_inline))
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
TCGMemOpIdx oi, uintptr_t retaddr, size_t size, bool big_endian)
TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
{
uintptr_t mmu_idx = get_mmuidx(oi);
uintptr_t index = tlb_index(env, mmu_idx, addr);
@ -1508,6 +1541,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
unsigned a_bits = get_alignment_bits(get_memop(oi));
void *haddr;
size_t size = memop_size(op);
/* Handle CPU specific unaligned behaviour */
if (addr & ((1 << a_bits) - 1)) {
@ -1527,35 +1561,32 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
}
/* Handle an IO access. */
/* Handle anything that isn't just a straight memory access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
CPUIOTLBEntry *iotlbentry;
/* For anything that is unaligned, recurse through byte stores. */
if ((addr & (size - 1)) != 0) {
goto do_unaligned_access;
}
if (tlb_addr & TLB_RECHECK) {
/*
* This is a TLB_RECHECK access, where the MMU protection
* covers a smaller range than a target page, and we must
* repeat the MMU check here. This tlb_fill() call might
* longjump out if this access should cause a guest exception.
*/
tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
tlb_addr = tlb_addr_write(entry);
tlb_addr &= ~TLB_RECHECK;
if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
/* RAM access */
/* Handle watchpoints. */
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
/* On watchpoint hit, this will longjmp out. */
cpu_check_watchpoint(env_cpu(env), addr, size,
iotlbentry->attrs, BP_MEM_WRITE, retaddr);
/* The backing page may or may not require I/O. */
tlb_addr &= ~TLB_WATCHPOINT;
if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) {
goto do_aligned_access;
}
}
io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx,
handle_bswap(val, size, big_endian),
addr, retaddr, size);
/* Handle I/O access. */
io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, op);
return;
}
@ -1567,6 +1598,8 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
uintptr_t index2;
CPUTLBEntry *entry2;
target_ulong page2, tlb_addr2;
size_t size2;
do_unaligned_access:
/*
* Ensure the second page is in the TLB. Note that the first page
@ -1574,14 +1607,33 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
* cannot evict the first.
*/
page2 = (addr + size) & TARGET_PAGE_MASK;
size2 = (addr + size) & ~TARGET_PAGE_MASK;
index2 = tlb_index(env, mmu_idx, page2);
entry2 = tlb_entry(env, mmu_idx, page2);
tlb_addr2 = tlb_addr_write(entry2);
if (!tlb_hit_page(tlb_addr2, page2)
&& !victim_tlb_hit(env, mmu_idx, index2, tlb_off,
page2 & TARGET_PAGE_MASK)) {
tlb_fill(env_cpu(env), page2, size, MMU_DATA_STORE,
mmu_idx, retaddr);
if (!tlb_hit_page(tlb_addr2, page2)) {
if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
mmu_idx, retaddr);
index2 = tlb_index(env, mmu_idx, page2);
entry2 = tlb_entry(env, mmu_idx, page2);
}
tlb_addr2 = tlb_addr_write(entry2);
}
/*
* Handle watchpoints. Since this may trap, all checks
* must happen before any store.
*/
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
cpu_check_watchpoint(env_cpu(env), addr, size - size2,
env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
BP_MEM_WRITE, retaddr);
}
if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
cpu_check_watchpoint(env_cpu(env), page2, size2,
env_tlb(env)->d[mmu_idx].iotlb[index2].attrs,
BP_MEM_WRITE, retaddr);
}
/*
@ -1591,7 +1643,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
*/
for (i = 0; i < size; ++i) {
uint8_t val8;
if (big_endian) {
if (memop_big_endian(op)) {
/* Big-endian extract. */
val8 = val >> (((size - 1) * 8) - (i * 8));
} else {
@ -1605,30 +1657,27 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
do_aligned_access:
haddr = (void *)((uintptr_t)addr + entry->addend);
switch (size) {
case 1:
switch (op) {
case MO_UB:
stb_p(haddr, val);
break;
case 2:
if (big_endian) {
stw_be_p(haddr, val);
} else {
stw_le_p(haddr, val);
}
case MO_BEUW:
stw_be_p(haddr, val);
break;
case 4:
if (big_endian) {
stl_be_p(haddr, val);
} else {
stl_le_p(haddr, val);
}
case MO_LEUW:
stw_le_p(haddr, val);
break;
case 8:
if (big_endian) {
stq_be_p(haddr, val);
} else {
stq_le_p(haddr, val);
}
case MO_BEUL:
stl_be_p(haddr, val);
break;
case MO_LEUL:
stl_le_p(haddr, val);
break;
case MO_BEQ:
stq_be_p(haddr, val);
break;
case MO_LEQ:
stq_le_p(haddr, val);
break;
default:
g_assert_not_reached();
@ -1639,43 +1688,43 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, 1, false);
store_helper(env, addr, val, oi, retaddr, MO_UB);
}
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, 2, false);
store_helper(env, addr, val, oi, retaddr, MO_LEUW);
}
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, 2, true);
store_helper(env, addr, val, oi, retaddr, MO_BEUW);
}
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, 4, false);
store_helper(env, addr, val, oi, retaddr, MO_LEUL);
}
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, 4, true);
store_helper(env, addr, val, oi, retaddr, MO_BEUL);
}
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, 8, false);
store_helper(env, addr, val, oi, retaddr, MO_LEQ);
}
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, 8, true);
store_helper(env, addr, val, oi, retaddr, MO_BEQ);
}
/* First set of helpers allows passing in of OI and RETADDR. This makes
@ -1740,8 +1789,7 @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 1, false, true,
full_ldub_cmmu);
return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_cmmu);
}
uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
@ -1753,7 +1801,7 @@ uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 2, false, true,
return load_helper(env, addr, oi, retaddr, MO_LEUW, true,
full_le_lduw_cmmu);
}
@ -1766,7 +1814,7 @@ uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 2, true, true,
return load_helper(env, addr, oi, retaddr, MO_BEUW, true,
full_be_lduw_cmmu);
}
@ -1779,7 +1827,7 @@ uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 4, false, true,
return load_helper(env, addr, oi, retaddr, MO_LEUL, true,
full_le_ldul_cmmu);
}
@ -1792,7 +1840,7 @@ uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 4, true, true,
return load_helper(env, addr, oi, retaddr, MO_BEUL, true,
full_be_ldul_cmmu);
}
@ -1805,13 +1853,13 @@ uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 8, false, true,
return load_helper(env, addr, oi, retaddr, MO_LEQ, true,
helper_le_ldq_cmmu);
}
uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 8, true, true,
return load_helper(env, addr, oi, retaddr, MO_BEQ, true,
helper_be_ldq_cmmu);
}

View File

@ -188,6 +188,38 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
g_assert_not_reached();
}
void *probe_access(CPUArchState *env, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int flags;
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
switch (access_type) {
case MMU_DATA_STORE:
flags = PAGE_WRITE;
break;
case MMU_DATA_LOAD:
flags = PAGE_READ;
break;
case MMU_INST_FETCH:
flags = PAGE_EXEC;
break;
default:
g_assert_not_reached();
}
if (!guest_addr_valid(addr) || page_check_range(addr, size, flags) < 0) {
CPUState *cpu = env_cpu(env);
CPUClass *cc = CPU_GET_CLASS(cpu);
cc->tlb_fill(cpu, addr, size, access_type, MMU_USER_IDX, false,
retaddr);
g_assert_not_reached();
}
return size ? g2h(addr) : NULL;
}
#if defined(__i386__)
#if defined(__NetBSD__)

177
exec.c
View File

@ -193,15 +193,12 @@ typedef struct subpage_t {
#define PHYS_SECTION_UNASSIGNED 0
#define PHYS_SECTION_NOTDIRTY 1
#define PHYS_SECTION_ROM 2
#define PHYS_SECTION_WATCH 3
static void io_mem_init(void);
static void memory_map_init(void);
static void tcg_log_global_after_sync(MemoryListener *listener);
static void tcg_commit(MemoryListener *listener);
static MemoryRegion io_mem_watch;
/**
* CPUAddressSpace: all the information a CPU needs about an AddressSpace
* @cpu: the CPU whose AddressSpace this is
@ -1062,28 +1059,7 @@ static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
}
#endif
#if defined(CONFIG_USER_ONLY)
void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
{
}
int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
int flags)
{
return -ENOSYS;
}
void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
{
}
int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
int flags, CPUWatchpoint **watchpoint)
{
return -ENOSYS;
}
#else
#ifndef CONFIG_USER_ONLY
/* Add a watchpoint. */
int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
int flags, CPUWatchpoint **watchpoint)
@ -1159,9 +1135,8 @@ void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
* partially or completely with the address range covered by the
* access).
*/
static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
vaddr addr,
vaddr len)
static inline bool watchpoint_address_matches(CPUWatchpoint *wp,
vaddr addr, vaddr len)
{
/* We know the lengths are non-zero, but a little caution is
* required to avoid errors in the case where the range ends
@ -1174,7 +1149,20 @@ static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
return !(addr > wpend || wp->vaddr > addrend);
}
#endif
/* Return flags for watchpoints that match addr + prot. */
int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len)
{
CPUWatchpoint *wp;
int ret = 0;
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
if (watchpoint_address_matches(wp, addr, TARGET_PAGE_SIZE)) {
ret |= wp->flags;
}
}
return ret;
}
#endif /* !CONFIG_USER_ONLY */
/* Add a breakpoint. */
int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
@ -1481,7 +1469,6 @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu,
target_ulong *address)
{
hwaddr iotlb;
CPUWatchpoint *wp;
if (memory_region_is_ram(section->mr)) {
/* Normal RAM. */
@ -1499,19 +1486,6 @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu,
iotlb += xlat;
}
/* Make accesses to pages with watchpoints go via the
watchpoint trap routines. */
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
/* Avoid trapping reads of pages with a write breakpoint. */
if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
iotlb = PHYS_SECTION_WATCH + paddr;
*address |= TLB_MMIO;
break;
}
}
}
return iotlb;
}
#endif /* defined(CONFIG_USER_ONLY) */
@ -2814,32 +2788,35 @@ static const MemoryRegionOps notdirty_mem_ops = {
};
/* Generate a debug exception if a watchpoint has been hit. */
static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
MemTxAttrs attrs, int flags, uintptr_t ra)
{
CPUState *cpu = current_cpu;
CPUClass *cc = CPU_GET_CLASS(cpu);
target_ulong vaddr;
CPUWatchpoint *wp;
assert(tcg_enabled());
if (cpu->watchpoint_hit) {
/* We re-entered the check after replacing the TB. Now raise
* the debug interrupt so that is will trigger after the
* current instruction. */
/*
* We re-entered the check after replacing the TB.
* Now raise the debug interrupt so that it will
* trigger after the current instruction.
*/
qemu_mutex_lock_iothread();
cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
qemu_mutex_unlock_iothread();
return;
}
vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
vaddr = cc->adjust_watchpoint_address(cpu, vaddr, len);
addr = cc->adjust_watchpoint_address(cpu, addr, len);
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
if (cpu_watchpoint_address_matches(wp, vaddr, len)
if (watchpoint_address_matches(wp, addr, len)
&& (wp->flags & flags)) {
if (flags == BP_MEM_READ) {
wp->flags |= BP_WATCHPOINT_HIT_READ;
} else {
wp->flags |= BP_WATCHPOINT_HIT_WRITE;
}
wp->hitaddr = vaddr;
wp->hitaddr = MAX(addr, wp->vaddr);
wp->hitattrs = attrs;
if (!cpu->watchpoint_hit) {
if (wp->flags & BP_CPU &&
@ -2854,11 +2831,14 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
cpu->exception_index = EXCP_DEBUG;
mmap_unlock();
cpu_loop_exit(cpu);
cpu_loop_exit_restore(cpu, ra);
} else {
/* Force execution of one insn next time. */
cpu->cflags_next_tb = 1 | curr_cflags();
mmap_unlock();
if (ra) {
cpu_restore_state(cpu, ra, true);
}
cpu_loop_exit_noexc(cpu);
}
}
@ -2868,80 +2848,6 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
}
}
/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
so these check for a hit then pass through to the normal out-of-line
phys routines. */
static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
unsigned size, MemTxAttrs attrs)
{
MemTxResult res;
uint64_t data;
int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
AddressSpace *as = current_cpu->cpu_ases[asidx].as;
check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
switch (size) {
case 1:
data = address_space_ldub(as, addr, attrs, &res);
break;
case 2:
data = address_space_lduw(as, addr, attrs, &res);
break;
case 4:
data = address_space_ldl(as, addr, attrs, &res);
break;
case 8:
data = address_space_ldq(as, addr, attrs, &res);
break;
default: abort();
}
*pdata = data;
return res;
}
static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size,
MemTxAttrs attrs)
{
MemTxResult res;
int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
AddressSpace *as = current_cpu->cpu_ases[asidx].as;
check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
switch (size) {
case 1:
address_space_stb(as, addr, val, attrs, &res);
break;
case 2:
address_space_stw(as, addr, val, attrs, &res);
break;
case 4:
address_space_stl(as, addr, val, attrs, &res);
break;
case 8:
address_space_stq(as, addr, val, attrs, &res);
break;
default: abort();
}
return res;
}
static const MemoryRegionOps watch_mem_ops = {
.read_with_attrs = watch_mem_read,
.write_with_attrs = watch_mem_write,
.endianness = DEVICE_NATIVE_ENDIAN,
.valid = {
.min_access_size = 1,
.max_access_size = 8,
.unaligned = false,
},
.impl = {
.min_access_size = 1,
.max_access_size = 8,
.unaligned = false,
},
};
static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
MemTxAttrs attrs, uint8_t *buf, hwaddr len);
static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
@ -3117,9 +3023,6 @@ static void io_mem_init(void)
memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
NULL, UINT64_MAX);
memory_region_clear_global_locking(&io_mem_notdirty);
memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
NULL, UINT64_MAX);
}
AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
@ -3133,8 +3036,6 @@ AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
assert(n == PHYS_SECTION_NOTDIRTY);
n = dummy_section(&d->map, fv, &io_mem_rom);
assert(n == PHYS_SECTION_ROM);
n = dummy_section(&d->map, fv, &io_mem_watch);
assert(n == PHYS_SECTION_WATCH);
d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
@ -3366,8 +3267,9 @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
l = memory_access_size(mr, l, addr1);
/* XXX: could force current_cpu to NULL to avoid
potential bugs */
val = ldn_p(buf, l);
result |= memory_region_dispatch_write(mr, addr1, val, l, attrs);
val = ldn_he_p(buf, l);
result |= memory_region_dispatch_write(mr, addr1, val,
size_memop(l), attrs);
} else {
/* RAM case */
ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
@ -3428,8 +3330,9 @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
/* I/O case */
release_lock |= prepare_mmio_access(mr);
l = memory_access_size(mr, l, addr1);
result |= memory_region_dispatch_read(mr, addr1, &val, l, attrs);
stn_p(buf, l, val);
result |= memory_region_dispatch_read(mr, addr1, &val,
size_memop(l), attrs);
stn_he_p(buf, l, val);
} else {
/* RAM case */
ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);

View File

@ -21,6 +21,7 @@
#include "hw/qdev-properties.h"
#include "target/arm/cpu.h"
#include "exec/exec-all.h"
#include "exec/memop.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "trace.h"
@ -2348,7 +2349,8 @@ static MemTxResult nvic_sysreg_ns_write(void *opaque, hwaddr addr,
if (attrs.secure) {
/* S accesses to the alias act like NS accesses to the real region */
attrs.secure = 0;
return memory_region_dispatch_write(mr, addr, value, size, attrs);
return memory_region_dispatch_write(mr, addr, value,
size_memop(size) | MO_TE, attrs);
} else {
/* NS attrs are RAZ/WI for privileged, and BusFault for user */
if (attrs.user) {
@ -2367,7 +2369,8 @@ static MemTxResult nvic_sysreg_ns_read(void *opaque, hwaddr addr,
if (attrs.secure) {
/* S accesses to the alias act like NS accesses to the real region */
attrs.secure = 0;
return memory_region_dispatch_read(mr, addr, data, size, attrs);
return memory_region_dispatch_read(mr, addr, data,
size_memop(size) | MO_TE, attrs);
} else {
/* NS attrs are RAZ/WI for privileged, and BusFault for user */
if (attrs.user) {
@ -2393,7 +2396,8 @@ static MemTxResult nvic_systick_write(void *opaque, hwaddr addr,
/* Direct the access to the correct systick */
mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0);
return memory_region_dispatch_write(mr, addr, value, size, attrs);
return memory_region_dispatch_write(mr, addr, value,
size_memop(size) | MO_TE, attrs);
}
static MemTxResult nvic_systick_read(void *opaque, hwaddr addr,
@ -2405,7 +2409,8 @@ static MemTxResult nvic_systick_read(void *opaque, hwaddr addr,
/* Direct the access to the correct systick */
mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0);
return memory_region_dispatch_read(mr, addr, data, size, attrs);
return memory_region_dispatch_read(mr, addr, data, size_memop(size) | MO_TE,
attrs);
}
static const MemoryRegionOps nvic_systick_ops = {

View File

@ -15,6 +15,7 @@
#include "cpu.h"
#include "s390-pci-inst.h"
#include "s390-pci-bus.h"
#include "exec/memop.h"
#include "exec/memory-internal.h"
#include "qemu/error-report.h"
#include "sysemu/hw_accel.h"
@ -372,7 +373,8 @@ static MemTxResult zpci_read_bar(S390PCIBusDevice *pbdev, uint8_t pcias,
mr = pbdev->pdev->io_regions[pcias].memory;
mr = s390_get_subregion(mr, offset, len);
offset -= mr->addr;
return memory_region_dispatch_read(mr, offset, data, len,
return memory_region_dispatch_read(mr, offset, data,
size_memop(len) | MO_BE,
MEMTXATTRS_UNSPECIFIED);
}
@ -471,7 +473,8 @@ static MemTxResult zpci_write_bar(S390PCIBusDevice *pbdev, uint8_t pcias,
mr = pbdev->pdev->io_regions[pcias].memory;
mr = s390_get_subregion(mr, offset, len);
offset -= mr->addr;
return memory_region_dispatch_write(mr, offset, data, len,
return memory_region_dispatch_write(mr, offset, data,
size_memop(len) | MO_BE,
MEMTXATTRS_UNSPECIFIED);
}
@ -780,8 +783,8 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
for (i = 0; i < len / 8; i++) {
result = memory_region_dispatch_write(mr, offset + i * 8,
ldq_p(buffer + i * 8), 8,
MEMTXATTRS_UNSPECIFIED);
ldq_p(buffer + i * 8),
MO_64, MEMTXATTRS_UNSPECIFIED);
if (result != MEMTX_OK) {
s390_program_interrupt(env, PGM_OPERAND, 6, ra);
return 0;

View File

@ -11,6 +11,7 @@
*/
#include "qemu/osdep.h"
#include "exec/memop.h"
#include "qemu/units.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
@ -1073,7 +1074,8 @@ static void vfio_rtl8168_quirk_address_write(void *opaque, hwaddr addr,
/* Write to the proper guest MSI-X table instead */
memory_region_dispatch_write(&vdev->pdev.msix_table_mmio,
offset, val, size,
offset, val,
size_memop(size) | MO_LE,
MEMTXATTRS_UNSPECIFIED);
}
return; /* Do not write guest MSI-X data to hardware */
@ -1104,7 +1106,8 @@ static uint64_t vfio_rtl8168_quirk_data_read(void *opaque,
if (rtl->enabled && (vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX)) {
hwaddr offset = rtl->addr & 0xfff;
memory_region_dispatch_read(&vdev->pdev.msix_table_mmio, offset,
&data, size, MEMTXATTRS_UNSPECIFIED);
&data, size_memop(size) | MO_LE,
MEMTXATTRS_UNSPECIFIED);
trace_vfio_quirk_rtl8168_msix_read(vdev->vbasedev.name, offset, data);
}

View File

@ -17,6 +17,7 @@
#include "qemu/osdep.h"
#include "exec/memop.h"
#include "standard-headers/linux/virtio_pci.h"
#include "hw/virtio/virtio.h"
#include "migration/qemu-file-types.h"
@ -543,16 +544,17 @@ void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr,
val = pci_get_byte(buf);
break;
case 2:
val = cpu_to_le16(pci_get_word(buf));
val = pci_get_word(buf);
break;
case 4:
val = cpu_to_le32(pci_get_long(buf));
val = pci_get_long(buf);
break;
default:
/* As length is under guest control, handle illegal values. */
return;
}
memory_region_dispatch_write(mr, addr, val, len, MEMTXATTRS_UNSPECIFIED);
memory_region_dispatch_write(mr, addr, val, size_memop(len) | MO_LE,
MEMTXATTRS_UNSPECIFIED);
}
static void
@ -575,16 +577,17 @@ virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr,
/* Make sure caller aligned buf properly */
assert(!(((uintptr_t)buf) & (len - 1)));
memory_region_dispatch_read(mr, addr, &val, len, MEMTXATTRS_UNSPECIFIED);
memory_region_dispatch_read(mr, addr, &val, size_memop(len) | MO_LE,
MEMTXATTRS_UNSPECIFIED);
switch (len) {
case 1:
pci_set_byte(buf, val);
break;
case 2:
pci_set_word(buf, le16_to_cpu(val));
pci_set_word(buf, val);
break;
case 4:
pci_set_long(buf, le32_to_cpu(val));
pci_set_long(buf, val);
break;
default:
/* As length is under guest control, handle illegal values. */

View File

@ -329,14 +329,14 @@ CPUArchState *cpu_copy(CPUArchState *env);
#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2))
/* Set if TLB entry is an IO callback. */
#define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3))
/* Set if TLB entry must have MMU lookup repeated for every access */
#define TLB_RECHECK (1 << (TARGET_PAGE_BITS - 4))
/* Set if TLB entry contains a watchpoint. */
#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS - 4))
/* Use this mask to check interception with an alignment mask
* in a TCG backend.
*/
#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
| TLB_RECHECK)
#define TLB_FLAGS_MASK \
(TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO | TLB_WATCHPOINT)
/**
* tlb_hit_page: return true if page aligned @addr is a hit against the

View File

@ -260,8 +260,6 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
void tlb_set_page(CPUState *cpu, target_ulong vaddr,
hwaddr paddr, int prot,
int mmu_idx, target_ulong size);
void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
uintptr_t retaddr);
#else
static inline void tlb_init(CPUState *cpu)
{
@ -312,6 +310,14 @@ static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
{
}
#endif
void *probe_access(CPUArchState *env, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
static inline void *probe_write(CPUArchState *env, target_ulong addr, int size,
int mmu_idx, uintptr_t retaddr)
{
return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
}
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */

View File

@ -37,6 +37,8 @@ typedef struct MemTxAttrs {
unsigned int user:1;
/* Requester ID (for MSI for example) */
unsigned int requester_id:16;
/* Invert endianness for this page */
unsigned int byte_swap:1;
/*
* The following are target-specific page-table bits. These are not
* related to actual memory transactions at all. However, this structure

134
include/exec/memop.h Normal file
View File

@ -0,0 +1,134 @@
/*
* Constants for memory operations
*
* Authors:
* Richard Henderson <rth@twiddle.net>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#ifndef MEMOP_H
#define MEMOP_H
#include "qemu/host-utils.h"
typedef enum MemOp {
MO_8 = 0,
MO_16 = 1,
MO_32 = 2,
MO_64 = 3,
MO_SIZE = 3, /* Mask for the above. */
MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */
MO_BSWAP = 8, /* Host reverse endian. */
#ifdef HOST_WORDS_BIGENDIAN
MO_LE = MO_BSWAP,
MO_BE = 0,
#else
MO_LE = 0,
MO_BE = MO_BSWAP,
#endif
#ifdef NEED_CPU_H
#ifdef TARGET_WORDS_BIGENDIAN
MO_TE = MO_BE,
#else
MO_TE = MO_LE,
#endif
#endif
/*
* MO_UNALN accesses are never checked for alignment.
* MO_ALIGN accesses will result in a call to the CPU's
* do_unaligned_access hook if the guest address is not aligned.
* The default depends on whether the target CPU defines
* TARGET_ALIGNED_ONLY.
*
* Some architectures (e.g. ARMv8) need the address which is aligned
* to a size more than the size of the memory access.
* Some architectures (e.g. SPARCv9) need an address which is aligned,
* but less strictly than the natural alignment.
*
* MO_ALIGN supposes the alignment size is the size of a memory access.
*
* There are three options:
* - unaligned access permitted (MO_UNALN).
* - an alignment to the size of an access (MO_ALIGN);
* - an alignment to a specified size, which may be more or less than
* the access size (MO_ALIGN_x where 'x' is a size in bytes);
*/
MO_ASHIFT = 4,
MO_AMASK = 7 << MO_ASHIFT,
#ifdef NEED_CPU_H
#ifdef TARGET_ALIGNED_ONLY
MO_ALIGN = 0,
MO_UNALN = MO_AMASK,
#else
MO_ALIGN = MO_AMASK,
MO_UNALN = 0,
#endif
#endif
MO_ALIGN_2 = 1 << MO_ASHIFT,
MO_ALIGN_4 = 2 << MO_ASHIFT,
MO_ALIGN_8 = 3 << MO_ASHIFT,
MO_ALIGN_16 = 4 << MO_ASHIFT,
MO_ALIGN_32 = 5 << MO_ASHIFT,
MO_ALIGN_64 = 6 << MO_ASHIFT,
/* Combinations of the above, for ease of use. */
MO_UB = MO_8,
MO_UW = MO_16,
MO_UL = MO_32,
MO_SB = MO_SIGN | MO_8,
MO_SW = MO_SIGN | MO_16,
MO_SL = MO_SIGN | MO_32,
MO_Q = MO_64,
MO_LEUW = MO_LE | MO_UW,
MO_LEUL = MO_LE | MO_UL,
MO_LESW = MO_LE | MO_SW,
MO_LESL = MO_LE | MO_SL,
MO_LEQ = MO_LE | MO_Q,
MO_BEUW = MO_BE | MO_UW,
MO_BEUL = MO_BE | MO_UL,
MO_BESW = MO_BE | MO_SW,
MO_BESL = MO_BE | MO_SL,
MO_BEQ = MO_BE | MO_Q,
#ifdef NEED_CPU_H
MO_TEUW = MO_TE | MO_UW,
MO_TEUL = MO_TE | MO_UL,
MO_TESW = MO_TE | MO_SW,
MO_TESL = MO_TE | MO_SL,
MO_TEQ = MO_TE | MO_Q,
#endif
MO_SSIZE = MO_SIZE | MO_SIGN,
} MemOp;
/* MemOp to size in bytes. */
static inline unsigned memop_size(MemOp op)
{
return 1 << (op & MO_SIZE);
}
/* Size in bytes to MemOp. */
static inline MemOp size_memop(unsigned size)
{
#ifdef CONFIG_DEBUG_TCG
/* Power of 2 up to 8. */
assert((size & (size - 1)) == 0 && size >= 1 && size <= 8);
#endif
return ctz32(size);
}
/* Big endianness from MemOp. */
static inline bool memop_big_endian(MemOp op)
{
return (op & MO_BSWAP) == MO_BE;
}
#endif

View File

@ -19,6 +19,7 @@
#include "exec/cpu-common.h"
#include "exec/hwaddr.h"
#include "exec/memattrs.h"
#include "exec/memop.h"
#include "exec/ramlist.h"
#include "qemu/bswap.h"
#include "qemu/queue.h"
@ -1739,13 +1740,13 @@ void mtree_info(bool flatview, bool dispatch_tree, bool owner);
* @mr: #MemoryRegion to access
* @addr: address within that region
* @pval: pointer to uint64_t which the data is written to
* @size: size of the access in bytes
* @op: size, sign, and endianness of the memory operation
* @attrs: memory transaction attributes to use for the access
*/
MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
hwaddr addr,
uint64_t *pval,
unsigned size,
MemOp op,
MemTxAttrs attrs);
/**
* memory_region_dispatch_write: perform a write directly to the specified
@ -1754,13 +1755,13 @@ MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
* @mr: #MemoryRegion to access
* @addr: address within that region
* @data: data to write
* @size: size of the access in bytes
* @op: size, sign, and endianness of the memory operation
* @attrs: memory transaction attributes to use for the access
*/
MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
hwaddr addr,
uint64_t data,
unsigned size,
MemOp op,
MemTxAttrs attrs);
/**
@ -2200,6 +2201,9 @@ address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
}
}
/* enum device_endian to MemOp. */
MemOp devend_memop(enum device_endian end);
#endif
#endif

View File

@ -1070,12 +1070,49 @@ static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
return false;
}
#ifdef CONFIG_USER_ONLY
static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
int flags, CPUWatchpoint **watchpoint)
{
return -ENOSYS;
}
static inline int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
vaddr len, int flags)
{
return -ENOSYS;
}
static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu,
CPUWatchpoint *wp)
{
}
static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
{
}
static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
MemTxAttrs atr, int fl, uintptr_t ra)
{
}
static inline int cpu_watchpoint_address_matches(CPUState *cpu,
vaddr addr, vaddr len)
{
return 0;
}
#else
int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
int flags, CPUWatchpoint **watchpoint);
int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
vaddr len, int flags);
void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
MemTxAttrs attrs, int flags, uintptr_t ra);
int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);
#endif
/**
* cpu_get_address_space:

View File

@ -351,32 +351,23 @@ static bool memory_region_big_endian(MemoryRegion *mr)
#endif
}
static bool memory_region_wrong_endianness(MemoryRegion *mr)
static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op)
{
#ifdef TARGET_WORDS_BIGENDIAN
return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
#else
return mr->ops->endianness == DEVICE_BIG_ENDIAN;
#endif
}
static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
{
if (memory_region_wrong_endianness(mr)) {
switch (size) {
case 1:
if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) {
switch (op & MO_SIZE) {
case MO_8:
break;
case 2:
case MO_16:
*data = bswap16(*data);
break;
case 4:
case MO_32:
*data = bswap32(*data);
break;
case 8:
case MO_64:
*data = bswap64(*data);
break;
default:
abort();
g_assert_not_reached();
}
}
}
@ -1446,9 +1437,10 @@ static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
hwaddr addr,
uint64_t *pval,
unsigned size,
MemOp op,
MemTxAttrs attrs)
{
unsigned size = memop_size(op);
MemTxResult r;
if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
@ -1457,7 +1449,7 @@ MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
}
r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
adjust_endianness(mr, pval, size);
adjust_endianness(mr, pval, op);
return r;
}
@ -1490,15 +1482,17 @@ static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
hwaddr addr,
uint64_t data,
unsigned size,
MemOp op,
MemTxAttrs attrs)
{
unsigned size = memop_size(op);
if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
unassigned_mem_write(mr, addr, data, size);
return MEMTX_DECODE_ERROR;
}
adjust_endianness(mr, &data, size);
adjust_endianness(mr, &data, op);
if ((!kvm_eventfds_enabled()) &&
memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
@ -2338,7 +2332,7 @@ void memory_region_add_eventfd(MemoryRegion *mr,
}
if (size) {
adjust_endianness(mr, &mrfd.data, size);
adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
}
memory_region_transaction_begin();
for (i = 0; i < mr->ioeventfd_nb; ++i) {
@ -2373,7 +2367,7 @@ void memory_region_del_eventfd(MemoryRegion *mr,
unsigned i;
if (size) {
adjust_endianness(mr, &mrfd.data, size);
adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
}
memory_region_transaction_begin();
for (i = 0; i < mr->ioeventfd_nb; ++i) {
@ -3273,3 +3267,21 @@ static void memory_register_types(void)
}
type_init(memory_register_types)
MemOp devend_memop(enum device_endian end)
{
static MemOp conv[] = {
[DEVICE_LITTLE_ENDIAN] = MO_LE,
[DEVICE_BIG_ENDIAN] = MO_BE,
[DEVICE_NATIVE_ENDIAN] = MO_TE,
[DEVICE_HOST_ENDIAN] = 0,
};
switch (end) {
case DEVICE_LITTLE_ENDIAN:
case DEVICE_BIG_ENDIAN:
case DEVICE_NATIVE_ENDIAN:
return conv[end];
default:
g_assert_not_reached();
}
}

View File

@ -38,16 +38,8 @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
release_lock |= prepare_mmio_access(mr);
/* I/O case */
r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap32(val);
}
#else
if (endian == DEVICE_BIG_ENDIAN) {
val = bswap32(val);
}
#endif
r = memory_region_dispatch_read(mr, addr1, &val,
MO_32 | devend_memop(endian), attrs);
} else {
/* RAM case */
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
@ -114,16 +106,8 @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL,
release_lock |= prepare_mmio_access(mr);
/* I/O case */
r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap64(val);
}
#else
if (endian == DEVICE_BIG_ENDIAN) {
val = bswap64(val);
}
#endif
r = memory_region_dispatch_read(mr, addr1, &val,
MO_64 | devend_memop(endian), attrs);
} else {
/* RAM case */
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
@ -188,7 +172,7 @@ uint32_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
release_lock |= prepare_mmio_access(mr);
/* I/O case */
r = memory_region_dispatch_read(mr, addr1, &val, 1, attrs);
r = memory_region_dispatch_read(mr, addr1, &val, MO_8, attrs);
} else {
/* RAM case */
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
@ -224,16 +208,8 @@ static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL,
release_lock |= prepare_mmio_access(mr);
/* I/O case */
r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap16(val);
}
#else
if (endian == DEVICE_BIG_ENDIAN) {
val = bswap16(val);
}
#endif
r = memory_region_dispatch_read(mr, addr1, &val,
MO_16 | devend_memop(endian), attrs);
} else {
/* RAM case */
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
@ -300,7 +276,7 @@ void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
if (l < 4 || !memory_access_is_direct(mr, true)) {
release_lock |= prepare_mmio_access(mr);
r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
r = memory_region_dispatch_write(mr, addr1, val, MO_32, attrs);
} else {
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
stl_p(ptr, val);
@ -336,17 +312,8 @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL,
mr = TRANSLATE(addr, &addr1, &l, true, attrs);
if (l < 4 || !memory_access_is_direct(mr, true)) {
release_lock |= prepare_mmio_access(mr);
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap32(val);
}
#else
if (endian == DEVICE_BIG_ENDIAN) {
val = bswap32(val);
}
#endif
r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
r = memory_region_dispatch_write(mr, addr1, val,
MO_32 | devend_memop(endian), attrs);
} else {
/* RAM case */
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
@ -408,7 +375,7 @@ void glue(address_space_stb, SUFFIX)(ARG1_DECL,
mr = TRANSLATE(addr, &addr1, &l, true, attrs);
if (!memory_access_is_direct(mr, true)) {
release_lock |= prepare_mmio_access(mr);
r = memory_region_dispatch_write(mr, addr1, val, 1, attrs);
r = memory_region_dispatch_write(mr, addr1, val, MO_8, attrs);
} else {
/* RAM case */
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
@ -441,17 +408,8 @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL,
mr = TRANSLATE(addr, &addr1, &l, true, attrs);
if (l < 2 || !memory_access_is_direct(mr, true)) {
release_lock |= prepare_mmio_access(mr);
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap16(val);
}
#else
if (endian == DEVICE_BIG_ENDIAN) {
val = bswap16(val);
}
#endif
r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
r = memory_region_dispatch_write(mr, addr1, val,
MO_16 | devend_memop(endian), attrs);
} else {
/* RAM case */
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
@ -514,17 +472,8 @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL,
mr = TRANSLATE(addr, &addr1, &l, true, attrs);
if (l < 8 || !memory_access_is_direct(mr, true)) {
release_lock |= prepare_mmio_access(mr);
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap64(val);
}
#else
if (endian == DEVICE_BIG_ENDIAN) {
val = bswap64(val);
}
#endif
r = memory_region_dispatch_write(mr, addr1, val, 8, attrs);
r = memory_region_dispatch_write(mr, addr1, val,
MO_64 | devend_memop(endian), attrs);
} else {
/* RAM case */
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);

View File

@ -403,7 +403,7 @@ static inline void gen_store_mem(DisasContext *ctx,
static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb,
int32_t disp16, int mem_idx,
TCGMemOp op)
MemOp op)
{
TCGLabel *lab_fail, *lab_done;
TCGv addr, val;

View File

@ -85,7 +85,7 @@ typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64);
typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, TCGMemOp);
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
/* initialize TCG globals. */
void a64_translate_init(void)
@ -440,7 +440,7 @@ TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
* Dn, Sn, Hn or Bn).
* (Note that this is not the same mapping as for A32; see cpu.h)
*/
static inline int fp_reg_offset(DisasContext *s, int regno, TCGMemOp size)
static inline int fp_reg_offset(DisasContext *s, int regno, MemOp size)
{
return vec_reg_offset(s, regno, 0, size);
}
@ -856,7 +856,7 @@ static void do_gpr_ld_memidx(DisasContext *s,
bool iss_valid, unsigned int iss_srt,
bool iss_sf, bool iss_ar)
{
TCGMemOp memop = s->be_data + size;
MemOp memop = s->be_data + size;
g_assert(size <= 3);
@ -933,7 +933,7 @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
TCGv_i64 tmphi;
if (size < 4) {
TCGMemOp memop = s->be_data + size;
MemOp memop = s->be_data + size;
tmphi = tcg_const_i64(0);
tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
} else {
@ -974,7 +974,7 @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
/* Get value of an element within a vector register */
static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
int element, TCGMemOp memop)
int element, MemOp memop)
{
int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
switch (memop) {
@ -1006,7 +1006,7 @@ static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
}
static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
int element, TCGMemOp memop)
int element, MemOp memop)
{
int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
switch (memop) {
@ -1033,7 +1033,7 @@ static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
/* Set value of an element within a vector register */
static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
int element, TCGMemOp memop)
int element, MemOp memop)
{
int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
switch (memop) {
@ -1055,7 +1055,7 @@ static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
}
static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
int destidx, int element, TCGMemOp memop)
int destidx, int element, MemOp memop)
{
int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
switch (memop) {
@ -1075,7 +1075,7 @@ static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
/* Store from vector register to memory */
static void do_vec_st(DisasContext *s, int srcidx, int element,
TCGv_i64 tcg_addr, int size, TCGMemOp endian)
TCGv_i64 tcg_addr, int size, MemOp endian)
{
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
@ -1087,7 +1087,7 @@ static void do_vec_st(DisasContext *s, int srcidx, int element,
/* Load from memory to vector register */
static void do_vec_ld(DisasContext *s, int destidx, int element,
TCGv_i64 tcg_addr, int size, TCGMemOp endian)
TCGv_i64 tcg_addr, int size, MemOp endian)
{
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
@ -2189,7 +2189,7 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
TCGv_i64 addr, int size, bool is_pair)
{
int idx = get_mem_index(s);
TCGMemOp memop = s->be_data;
MemOp memop = s->be_data;
g_assert(size <= 3);
if (is_pair) {
@ -3275,7 +3275,7 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
bool is_postidx = extract32(insn, 23, 1);
bool is_q = extract32(insn, 30, 1);
TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
TCGMemOp endian = s->be_data;
MemOp endian = s->be_data;
int ebytes; /* bytes per element */
int elements; /* elements per vector */
@ -5444,7 +5444,7 @@ static void disas_fp_csel(DisasContext *s, uint32_t insn)
unsigned int mos, type, rm, cond, rn, rd;
TCGv_i64 t_true, t_false, t_zero;
DisasCompare64 c;
TCGMemOp sz;
MemOp sz;
mos = extract32(insn, 29, 3);
type = extract32(insn, 22, 2);
@ -6256,7 +6256,7 @@ static void disas_fp_imm(DisasContext *s, uint32_t insn)
int mos = extract32(insn, 29, 3);
uint64_t imm;
TCGv_i64 tcg_res;
TCGMemOp sz;
MemOp sz;
if (mos || imm5) {
unallocated_encoding(s);
@ -7019,7 +7019,7 @@ static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn,
{
if (esize == size) {
int element;
TCGMemOp msize = esize == 16 ? MO_16 : MO_32;
MemOp msize = esize == 16 ? MO_16 : MO_32;
TCGv_i32 tcg_elem;
/* We should have one register left here */
@ -8011,7 +8011,7 @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
int shift = (2 * esize) - immhb;
int elements = is_scalar ? 1 : (64 / esize);
bool round = extract32(opcode, 0, 1);
TCGMemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
TCGv_i64 tcg_rn, tcg_rd, tcg_round;
TCGv_i32 tcg_rd_narrowed;
TCGv_i64 tcg_final;
@ -8170,7 +8170,7 @@ static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
}
};
NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
TCGMemOp memop = scalar ? size : MO_32;
MemOp memop = scalar ? size : MO_32;
int maxpass = scalar ? 1 : is_q ? 4 : 2;
for (pass = 0; pass < maxpass; pass++) {
@ -8214,7 +8214,7 @@ static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
TCGv_ptr tcg_fpst = get_fpstatus_ptr(size == MO_16);
TCGv_i32 tcg_shift = NULL;
TCGMemOp mop = size | (is_signed ? MO_SIGN : 0);
MemOp mop = size | (is_signed ? MO_SIGN : 0);
int pass;
if (fracbits || size == MO_64) {
@ -9993,7 +9993,7 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
int dsize = is_q ? 128 : 64;
int esize = 8 << size;
int elements = dsize/esize;
TCGMemOp memop = size | (is_u ? 0 : MO_SIGN);
MemOp memop = size | (is_u ? 0 : MO_SIGN);
TCGv_i64 tcg_rn = new_tmp_a64(s);
TCGv_i64 tcg_rd = new_tmp_a64(s);
TCGv_i64 tcg_round;
@ -10336,7 +10336,7 @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
TCGv_i64 tcg_op1 = tcg_temp_new_i64();
TCGv_i64 tcg_op2 = tcg_temp_new_i64();
TCGv_i64 tcg_passres;
TCGMemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
MemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
int elt = pass + is_q * 2;
@ -11816,7 +11816,7 @@ static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,
if (size == 2) {
/* 32 + 32 -> 64 op */
TCGMemOp memop = size + (u ? 0 : MO_SIGN);
MemOp memop = size + (u ? 0 : MO_SIGN);
for (pass = 0; pass < maxpass; pass++) {
TCGv_i64 tcg_op1 = tcg_temp_new_i64();
@ -12838,7 +12838,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
switch (is_fp) {
case 1: /* normal fp */
/* convert insn encoded size to TCGMemOp size */
/* convert insn encoded size to MemOp size */
switch (size) {
case 0: /* half-precision */
size = MO_16;
@ -12886,7 +12886,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
return;
}
/* Given TCGMemOp size, adjust register and indexing. */
/* Given MemOp size, adjust register and indexing. */
switch (size) {
case MO_16:
index = h << 2 | l << 1 | m;
@ -13183,7 +13183,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
TCGv_i64 tcg_res[2];
int pass;
bool satop = extract32(opcode, 0, 1);
TCGMemOp memop = MO_32;
MemOp memop = MO_32;
if (satop || !u) {
memop |= MO_SIGN;

View File

@ -64,7 +64,7 @@ static inline void assert_fp_access_checked(DisasContext *s)
* the FP/vector register Qn.
*/
static inline int vec_reg_offset(DisasContext *s, int regno,
int element, TCGMemOp size)
int element, MemOp size)
{
int element_size = 1 << size;
int offs = element * element_size;

View File

@ -4567,7 +4567,7 @@ static bool trans_STR_pri(DisasContext *s, arg_rri *a)
*/
/* The memory mode of the dtype. */
static const TCGMemOp dtype_mop[16] = {
static const MemOp dtype_mop[16] = {
MO_UB, MO_UB, MO_UB, MO_UB,
MO_SL, MO_UW, MO_UW, MO_UW,
MO_SW, MO_SW, MO_UL, MO_UL,

View File

@ -114,7 +114,7 @@ typedef enum ISSInfo {
} ISSInfo;
/* Save the syndrome information for a Data Abort */
static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
static void disas_set_da_iss(DisasContext *s, MemOp memop, ISSInfo issinfo)
{
uint32_t syn;
int sas = memop & MO_SIZE;
@ -1061,7 +1061,7 @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
* that the address argument is TCGv_i32 rather than TCGv.
*/
static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
{
TCGv addr = tcg_temp_new();
tcg_gen_extu_i32_tl(addr, a32);
@ -1074,7 +1074,7 @@ static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
}
static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
int index, TCGMemOp opc)
int index, MemOp opc)
{
TCGv addr;
@ -1089,7 +1089,7 @@ static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
}
static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
int index, TCGMemOp opc)
int index, MemOp opc)
{
TCGv addr;
@ -1142,7 +1142,7 @@ static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
}
static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
int index, TCGMemOp opc)
int index, MemOp opc)
{
TCGv addr = gen_aa32_addr(s, a32, opc);
tcg_gen_qemu_ld_i64(val, addr, index, opc);
@ -1157,7 +1157,7 @@ static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
}
static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
int index, TCGMemOp opc)
int index, MemOp opc)
{
TCGv addr = gen_aa32_addr(s, a32, opc);
@ -1388,7 +1388,7 @@ neon_reg_offset (int reg, int n)
* where 0 is the least significant end of the register.
*/
static inline long
neon_element_offset(int reg, int element, TCGMemOp size)
neon_element_offset(int reg, int element, MemOp size)
{
int element_size = 1 << size;
int ofs = element * element_size;
@ -1410,7 +1410,7 @@ static TCGv_i32 neon_load_reg(int reg, int pass)
return tmp;
}
static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop)
static void neon_load_element(TCGv_i32 var, int reg, int ele, MemOp mop)
{
long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
@ -1429,7 +1429,7 @@ static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop)
}
}
static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop)
static void neon_load_element64(TCGv_i64 var, int reg, int ele, MemOp mop)
{
long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
@ -1457,7 +1457,7 @@ static void neon_store_reg(int reg, int pass, TCGv_i32 var)
tcg_temp_free_i32(var);
}
static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var)
static void neon_store_element(int reg, int ele, MemOp size, TCGv_i32 var)
{
long offset = neon_element_offset(reg, ele, size);
@ -1476,7 +1476,7 @@ static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var)
}
}
static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var)
static void neon_store_element64(int reg, int ele, MemOp size, TCGv_i64 var)
{
long offset = neon_element_offset(reg, ele, size);
@ -3542,7 +3542,7 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
int n;
int vec_size;
int mmu_idx;
TCGMemOp endian;
MemOp endian;
TCGv_i32 addr;
TCGv_i32 tmp;
TCGv_i32 tmp2;
@ -6849,7 +6849,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
} else if ((insn & 0x380) == 0) {
/* VDUP */
int element;
TCGMemOp size;
MemOp size;
if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
return 1;
@ -7421,7 +7421,7 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
TCGv_i32 addr, int size)
{
TCGv_i32 tmp = tcg_temp_new_i32();
TCGMemOp opc = size | MO_ALIGN | s->be_data;
MemOp opc = size | MO_ALIGN | s->be_data;
s->is_ldex = true;
@ -7475,7 +7475,7 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
TCGv taddr;
TCGLabel *done_label;
TCGLabel *fail_label;
TCGMemOp opc = size | MO_ALIGN | s->be_data;
MemOp opc = size | MO_ALIGN | s->be_data;
/* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
[addr] = {Rt};
@ -8583,7 +8583,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
*/
TCGv taddr;
TCGMemOp opc = s->be_data;
MemOp opc = s->be_data;
rm = (insn) & 0xf;

View File

@ -23,7 +23,7 @@ typedef struct DisasContext {
int condexec_cond;
int thumb;
int sctlr_b;
TCGMemOp be_data;
MemOp be_data;
#if !defined(CONFIG_USER_ONLY)
int user;
#endif

View File

@ -137,9 +137,7 @@ static void do_stby_e(CPUHPPAState *env, target_ulong addr, target_ureg val,
default:
/* Nothing is stored, but protection is checked and the
cacheline is marked dirty. */
#ifndef CONFIG_USER_ONLY
probe_write(env, addr, 0, cpu_mmu_index(env, 0), ra);
#endif
break;
}
}

View File

@ -1500,7 +1500,7 @@ static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
*/
static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
unsigned rx, int scale, target_sreg disp,
unsigned sp, int modify, TCGMemOp mop)
unsigned sp, int modify, MemOp mop)
{
TCGv_reg ofs;
TCGv_tl addr;
@ -1518,7 +1518,7 @@ static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
unsigned rx, int scale, target_sreg disp,
unsigned sp, int modify, TCGMemOp mop)
unsigned sp, int modify, MemOp mop)
{
TCGv_reg ofs;
TCGv_tl addr;
@ -1536,7 +1536,7 @@ static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
unsigned rx, int scale, target_sreg disp,
unsigned sp, int modify, TCGMemOp mop)
unsigned sp, int modify, MemOp mop)
{
TCGv_reg ofs;
TCGv_tl addr;
@ -1554,7 +1554,7 @@ static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
unsigned rx, int scale, target_sreg disp,
unsigned sp, int modify, TCGMemOp mop)
unsigned sp, int modify, MemOp mop)
{
TCGv_reg ofs;
TCGv_tl addr;
@ -1580,7 +1580,7 @@ static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
unsigned rx, int scale, target_sreg disp,
unsigned sp, int modify, TCGMemOp mop)
unsigned sp, int modify, MemOp mop)
{
TCGv_reg dest;
@ -1653,7 +1653,7 @@ static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
target_sreg disp, unsigned sp,
int modify, TCGMemOp mop)
int modify, MemOp mop)
{
nullify_over(ctx);
do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
@ -2939,7 +2939,7 @@ static bool trans_st(DisasContext *ctx, arg_ldst *a)
static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
{
TCGMemOp mop = MO_TEUL | MO_ALIGN_16 | a->size;
MemOp mop = MO_TEUL | MO_ALIGN_16 | a->size;
TCGv_reg zero, dest, ofs;
TCGv_tl addr;

View File

@ -87,8 +87,8 @@ typedef struct DisasContext {
/* current insn context */
int override; /* -1 if no override */
int prefix;
TCGMemOp aflag;
TCGMemOp dflag;
MemOp aflag;
MemOp dflag;
target_ulong pc_start;
target_ulong pc; /* pc = eip + cs_base */
/* current block context */
@ -149,7 +149,7 @@ static void gen_eob(DisasContext *s);
static void gen_jr(DisasContext *s, TCGv dest);
static void gen_jmp(DisasContext *s, target_ulong eip);
static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d);
static void gen_op(DisasContext *s1, int op, MemOp ot, int d);
/* i386 arith/logic operations */
enum {
@ -320,7 +320,7 @@ static inline bool byte_reg_is_xH(DisasContext *s, int reg)
}
/* Select the size of a push/pop operation. */
static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot)
static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
{
if (CODE64(s)) {
return ot == MO_16 ? MO_16 : MO_64;
@ -330,13 +330,13 @@ static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot)
}
/* Select the size of the stack pointer. */
static inline TCGMemOp mo_stacksize(DisasContext *s)
static inline MemOp mo_stacksize(DisasContext *s)
{
return CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16;
}
/* Select only size 64 else 32. Used for SSE operand sizes. */
static inline TCGMemOp mo_64_32(TCGMemOp ot)
static inline MemOp mo_64_32(MemOp ot)
{
#ifdef TARGET_X86_64
return ot == MO_64 ? MO_64 : MO_32;
@ -347,19 +347,19 @@ static inline TCGMemOp mo_64_32(TCGMemOp ot)
/* Select size 8 if lsb of B is clear, else OT. Used for decoding
byte vs word opcodes. */
static inline TCGMemOp mo_b_d(int b, TCGMemOp ot)
static inline MemOp mo_b_d(int b, MemOp ot)
{
return b & 1 ? ot : MO_8;
}
/* Select size 8 if lsb of B is clear, else OT capped at 32.
Used for decoding operand size of port opcodes. */
static inline TCGMemOp mo_b_d32(int b, TCGMemOp ot)
static inline MemOp mo_b_d32(int b, MemOp ot)
{
return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
}
static void gen_op_mov_reg_v(DisasContext *s, TCGMemOp ot, int reg, TCGv t0)
static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
{
switch(ot) {
case MO_8:
@ -388,7 +388,7 @@ static void gen_op_mov_reg_v(DisasContext *s, TCGMemOp ot, int reg, TCGv t0)
}
static inline
void gen_op_mov_v_reg(DisasContext *s, TCGMemOp ot, TCGv t0, int reg)
void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
{
if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
@ -411,13 +411,13 @@ static inline void gen_op_jmp_v(TCGv dest)
}
static inline
void gen_op_add_reg_im(DisasContext *s, TCGMemOp size, int reg, int32_t val)
void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
{
tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val);
gen_op_mov_reg_v(s, size, reg, s->tmp0);
}
static inline void gen_op_add_reg_T0(DisasContext *s, TCGMemOp size, int reg)
static inline void gen_op_add_reg_T0(DisasContext *s, MemOp size, int reg)
{
tcg_gen_add_tl(s->tmp0, cpu_regs[reg], s->T0);
gen_op_mov_reg_v(s, size, reg, s->tmp0);
@ -451,7 +451,7 @@ static inline void gen_jmp_im(DisasContext *s, target_ulong pc)
/* Compute SEG:REG into A0. SEG is selected from the override segment
(OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
indicate no override. */
static void gen_lea_v_seg(DisasContext *s, TCGMemOp aflag, TCGv a0,
static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0,
int def_seg, int ovr_seg)
{
switch (aflag) {
@ -514,13 +514,13 @@ static inline void gen_string_movl_A0_EDI(DisasContext *s)
gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1);
}
static inline void gen_op_movl_T0_Dshift(DisasContext *s, TCGMemOp ot)
static inline void gen_op_movl_T0_Dshift(DisasContext *s, MemOp ot)
{
tcg_gen_ld32s_tl(s->T0, cpu_env, offsetof(CPUX86State, df));
tcg_gen_shli_tl(s->T0, s->T0, ot);
};
static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign)
static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
{
switch (size) {
case MO_8:
@ -551,18 +551,18 @@ static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign)
}
}
static void gen_extu(TCGMemOp ot, TCGv reg)
static void gen_extu(MemOp ot, TCGv reg)
{
gen_ext_tl(reg, reg, ot, false);
}
static void gen_exts(TCGMemOp ot, TCGv reg)
static void gen_exts(MemOp ot, TCGv reg)
{
gen_ext_tl(reg, reg, ot, true);
}
static inline
void gen_op_jnz_ecx(DisasContext *s, TCGMemOp size, TCGLabel *label1)
void gen_op_jnz_ecx(DisasContext *s, MemOp size, TCGLabel *label1)
{
tcg_gen_mov_tl(s->tmp0, cpu_regs[R_ECX]);
gen_extu(size, s->tmp0);
@ -570,14 +570,14 @@ void gen_op_jnz_ecx(DisasContext *s, TCGMemOp size, TCGLabel *label1)
}
static inline
void gen_op_jz_ecx(DisasContext *s, TCGMemOp size, TCGLabel *label1)
void gen_op_jz_ecx(DisasContext *s, MemOp size, TCGLabel *label1)
{
tcg_gen_mov_tl(s->tmp0, cpu_regs[R_ECX]);
gen_extu(size, s->tmp0);
tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1);
}
static void gen_helper_in_func(TCGMemOp ot, TCGv v, TCGv_i32 n)
static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
{
switch (ot) {
case MO_8:
@ -594,7 +594,7 @@ static void gen_helper_in_func(TCGMemOp ot, TCGv v, TCGv_i32 n)
}
}
static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n)
static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
{
switch (ot) {
case MO_8:
@ -611,7 +611,7 @@ static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n)
}
}
static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
static void gen_check_io(DisasContext *s, MemOp ot, target_ulong cur_eip,
uint32_t svm_flags)
{
target_ulong next_eip;
@ -644,7 +644,7 @@ static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
}
}
static inline void gen_movs(DisasContext *s, TCGMemOp ot)
static inline void gen_movs(DisasContext *s, MemOp ot)
{
gen_string_movl_A0_ESI(s);
gen_op_ld_v(s, ot, s->T0, s->A0);
@ -840,7 +840,7 @@ static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
default:
{
TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
}
@ -885,7 +885,7 @@ static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
.mask = -1 };
default:
{
TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
}
@ -897,7 +897,7 @@ static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
{
int inv, jcc_op, cond;
TCGMemOp size;
MemOp size;
CCPrepare cc;
TCGv t0;
@ -1075,7 +1075,7 @@ static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
return l2;
}
static inline void gen_stos(DisasContext *s, TCGMemOp ot)
static inline void gen_stos(DisasContext *s, MemOp ot)
{
gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
gen_string_movl_A0_EDI(s);
@ -1084,7 +1084,7 @@ static inline void gen_stos(DisasContext *s, TCGMemOp ot)
gen_op_add_reg_T0(s, s->aflag, R_EDI);
}
static inline void gen_lods(DisasContext *s, TCGMemOp ot)
static inline void gen_lods(DisasContext *s, MemOp ot)
{
gen_string_movl_A0_ESI(s);
gen_op_ld_v(s, ot, s->T0, s->A0);
@ -1093,7 +1093,7 @@ static inline void gen_lods(DisasContext *s, TCGMemOp ot)
gen_op_add_reg_T0(s, s->aflag, R_ESI);
}
static inline void gen_scas(DisasContext *s, TCGMemOp ot)
static inline void gen_scas(DisasContext *s, MemOp ot)
{
gen_string_movl_A0_EDI(s);
gen_op_ld_v(s, ot, s->T1, s->A0);
@ -1102,7 +1102,7 @@ static inline void gen_scas(DisasContext *s, TCGMemOp ot)
gen_op_add_reg_T0(s, s->aflag, R_EDI);
}
static inline void gen_cmps(DisasContext *s, TCGMemOp ot)
static inline void gen_cmps(DisasContext *s, MemOp ot)
{
gen_string_movl_A0_EDI(s);
gen_op_ld_v(s, ot, s->T1, s->A0);
@ -1126,7 +1126,7 @@ static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
}
static inline void gen_ins(DisasContext *s, TCGMemOp ot)
static inline void gen_ins(DisasContext *s, MemOp ot)
{
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
@ -1148,7 +1148,7 @@ static inline void gen_ins(DisasContext *s, TCGMemOp ot)
}
}
static inline void gen_outs(DisasContext *s, TCGMemOp ot)
static inline void gen_outs(DisasContext *s, MemOp ot)
{
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
@ -1171,7 +1171,7 @@ static inline void gen_outs(DisasContext *s, TCGMemOp ot)
/* same method as Valgrind : we generate jumps to current or next
instruction */
#define GEN_REPZ(op) \
static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, \
target_ulong cur_eip, target_ulong next_eip) \
{ \
TCGLabel *l2; \
@ -1187,7 +1187,7 @@ static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
}
#define GEN_REPZ2(op) \
static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, \
target_ulong cur_eip, \
target_ulong next_eip, \
int nz) \
@ -1284,7 +1284,7 @@ static void gen_illegal_opcode(DisasContext *s)
}
/* if d == OR_TMP0, it means memory operand (address in A0) */
static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d)
static void gen_op(DisasContext *s1, int op, MemOp ot, int d)
{
if (d != OR_TMP0) {
if (s1->prefix & PREFIX_LOCK) {
@ -1395,7 +1395,7 @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d)
}
/* if d == OR_TMP0, it means memory operand (address in A0) */
static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c)
static void gen_inc(DisasContext *s1, MemOp ot, int d, int c)
{
if (s1->prefix & PREFIX_LOCK) {
if (d != OR_TMP0) {
@ -1421,7 +1421,7 @@ static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c)
set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot);
}
static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result,
static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result,
TCGv shm1, TCGv count, bool is_right)
{
TCGv_i32 z32, s32, oldop;
@ -1466,7 +1466,7 @@ static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result,
set_cc_op(s, CC_OP_DYNAMIC);
}
static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
static void gen_shift_rm_T1(DisasContext *s, MemOp ot, int op1,
int is_right, int is_arith)
{
target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
@ -1502,7 +1502,7 @@ static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
gen_shift_flags(s, ot, s->T0, s->tmp0, s->T1, is_right);
}
static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
static void gen_shift_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
int is_right, int is_arith)
{
int mask = (ot == MO_64 ? 0x3f : 0x1f);
@ -1542,7 +1542,7 @@ static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
}
}
static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right)
static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right)
{
target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
TCGv_i32 t0, t1;
@ -1627,7 +1627,7 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right)
set_cc_op(s, CC_OP_DYNAMIC);
}
static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
static void gen_rot_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
int is_right)
{
int mask = (ot == MO_64 ? 0x3f : 0x1f);
@ -1705,7 +1705,7 @@ static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
}
/* XXX: add faster immediate = 1 case */
static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1,
int is_right)
{
gen_compute_eflags(s);
@ -1761,7 +1761,7 @@ static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
}
/* XXX: add faster immediate case */
static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1,
bool is_right, TCGv count_in)
{
target_ulong mask = (ot == MO_64 ? 63 : 31);
@ -1842,7 +1842,7 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
tcg_temp_free(count);
}
static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s)
static void gen_shift(DisasContext *s1, int op, MemOp ot, int d, int s)
{
if (s != OR_TMP1)
gen_op_mov_v_reg(s1, ot, s1->T1, s);
@ -1872,7 +1872,7 @@ static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s)
}
}
static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c)
static void gen_shifti(DisasContext *s1, int op, MemOp ot, int d, int c)
{
switch(op) {
case OP_ROL:
@ -2149,7 +2149,7 @@ static void gen_add_A0_ds_seg(DisasContext *s)
/* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
OR_TMP0 */
static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
TCGMemOp ot, int reg, int is_store)
MemOp ot, int reg, int is_store)
{
int mod, rm;
@ -2179,7 +2179,7 @@ static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
}
}
static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot)
static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
{
uint32_t ret;
@ -2202,7 +2202,7 @@ static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot)
return ret;
}
static inline int insn_const_size(TCGMemOp ot)
static inline int insn_const_size(MemOp ot)
{
if (ot <= MO_32) {
return 1 << ot;
@ -2266,7 +2266,7 @@ static inline void gen_jcc(DisasContext *s, int b,
}
}
static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b,
static void gen_cmovcc1(CPUX86State *env, DisasContext *s, MemOp ot, int b,
int modrm, int reg)
{
CCPrepare cc;
@ -2363,8 +2363,8 @@ static inline void gen_stack_update(DisasContext *s, int addend)
/* Generate a push. It depends on ss32, addseg and dflag. */
static void gen_push_v(DisasContext *s, TCGv val)
{
TCGMemOp d_ot = mo_pushpop(s, s->dflag);
TCGMemOp a_ot = mo_stacksize(s);
MemOp d_ot = mo_pushpop(s, s->dflag);
MemOp a_ot = mo_stacksize(s);
int size = 1 << d_ot;
TCGv new_esp = s->A0;
@ -2383,9 +2383,9 @@ static void gen_push_v(DisasContext *s, TCGv val)
}
/* two step pop is necessary for precise exceptions */
static TCGMemOp gen_pop_T0(DisasContext *s)
static MemOp gen_pop_T0(DisasContext *s)
{
TCGMemOp d_ot = mo_pushpop(s, s->dflag);
MemOp d_ot = mo_pushpop(s, s->dflag);
gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1);
gen_op_ld_v(s, d_ot, s->T0, s->A0);
@ -2393,7 +2393,7 @@ static TCGMemOp gen_pop_T0(DisasContext *s)
return d_ot;
}
static inline void gen_pop_update(DisasContext *s, TCGMemOp ot)
static inline void gen_pop_update(DisasContext *s, MemOp ot)
{
gen_stack_update(s, 1 << ot);
}
@ -2405,8 +2405,8 @@ static inline void gen_stack_A0(DisasContext *s)
static void gen_pusha(DisasContext *s)
{
TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16;
TCGMemOp d_ot = s->dflag;
MemOp s_ot = s->ss32 ? MO_32 : MO_16;
MemOp d_ot = s->dflag;
int size = 1 << d_ot;
int i;
@ -2421,8 +2421,8 @@ static void gen_pusha(DisasContext *s)
static void gen_popa(DisasContext *s)
{
TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16;
TCGMemOp d_ot = s->dflag;
MemOp s_ot = s->ss32 ? MO_32 : MO_16;
MemOp d_ot = s->dflag;
int size = 1 << d_ot;
int i;
@ -2442,8 +2442,8 @@ static void gen_popa(DisasContext *s)
static void gen_enter(DisasContext *s, int esp_addend, int level)
{
TCGMemOp d_ot = mo_pushpop(s, s->dflag);
TCGMemOp a_ot = CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16;
MemOp d_ot = mo_pushpop(s, s->dflag);
MemOp a_ot = CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16;
int size = 1 << d_ot;
/* Push BP; compute FrameTemp into T1. */
@ -2482,8 +2482,8 @@ static void gen_enter(DisasContext *s, int esp_addend, int level)
static void gen_leave(DisasContext *s)
{
TCGMemOp d_ot = mo_pushpop(s, s->dflag);
TCGMemOp a_ot = mo_stacksize(s);
MemOp d_ot = mo_pushpop(s, s->dflag);
MemOp a_ot = mo_stacksize(s);
gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1);
gen_op_ld_v(s, d_ot, s->T0, s->A0);
@ -3045,7 +3045,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
SSEFunc_0_eppi sse_fn_eppi;
SSEFunc_0_ppi sse_fn_ppi;
SSEFunc_0_eppt sse_fn_eppt;
TCGMemOp ot;
MemOp ot;
b &= 0xff;
if (s->prefix & PREFIX_DATA)
@ -4488,7 +4488,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
CPUX86State *env = cpu->env_ptr;
int b, prefixes;
int shift;
TCGMemOp ot, aflag, dflag;
MemOp ot, aflag, dflag;
int modrm, reg, rm, mod, op, opreg, val;
target_ulong next_eip, tval;
int rex_w, rex_r;
@ -5566,8 +5566,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0x1be: /* movsbS Gv, Eb */
case 0x1bf: /* movswS Gv, Eb */
{
TCGMemOp d_ot;
TCGMemOp s_ot;
MemOp d_ot;
MemOp s_ot;
/* d_ot is the size of destination */
d_ot = dflag;

View File

@ -2414,7 +2414,7 @@ DISAS_INSN(cas)
uint16_t ext;
TCGv load;
TCGv cmp;
TCGMemOp opc;
MemOp opc;
switch ((insn >> 9) & 3) {
case 1:

View File

@ -919,7 +919,7 @@ static void dec_load(DisasContext *dc)
unsigned int size;
bool rev = false, ex = false, ea = false;
int mem_index = cpu_mmu_index(&dc->cpu->env, false);
TCGMemOp mop;
MemOp mop;
mop = dc->opcode & 3;
size = 1 << mop;
@ -1035,7 +1035,7 @@ static void dec_store(DisasContext *dc)
unsigned int size;
bool rev = false, ex = false, ea = false;
int mem_index = cpu_mmu_index(&dc->cpu->env, false);
TCGMemOp mop;
MemOp mop;
mop = dc->opcode & 3;
size = 1 << mop;

View File

@ -24,6 +24,7 @@
#include "exec/helper-proto.h"
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
#include "exec/memop.h"
#include "sysemu/kvm.h"
#include "fpu/softfloat.h"
@ -4536,16 +4537,14 @@ static inline void ensure_writable_pages(CPUMIPSState *env,
int mmu_idx,
uintptr_t retaddr)
{
#if !defined(CONFIG_USER_ONLY)
target_ulong page_addr;
/* FIXME: Probe the actual accesses (pass and use a size) */
if (unlikely(MSA_PAGESPAN(addr))) {
/* first page */
probe_write(env, addr, 0, mmu_idx, retaddr);
/* second page */
page_addr = (addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
probe_write(env, page_addr, 0, mmu_idx, retaddr);
addr = (addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
probe_write(env, addr, 0, mmu_idx, retaddr);
}
#endif
}
void helper_msa_st_b(CPUMIPSState *env, uint32_t wd,
@ -4741,11 +4740,11 @@ void helper_cache(CPUMIPSState *env, target_ulong addr, uint32_t op)
if (op == 9) {
/* Index Store Tag */
memory_region_dispatch_write(env->itc_tag, index, env->CP0_TagLo,
8, MEMTXATTRS_UNSPECIFIED);
MO_64, MEMTXATTRS_UNSPECIFIED);
} else if (op == 5) {
/* Index Load Tag */
memory_region_dispatch_read(env->itc_tag, index, &env->CP0_TagLo,
8, MEMTXATTRS_UNSPECIFIED);
MO_64, MEMTXATTRS_UNSPECIFIED);
}
#endif
}

View File

@ -2526,7 +2526,7 @@ typedef struct DisasContext {
int32_t CP0_Config5;
/* Routine used to access memory */
int mem_idx;
TCGMemOp default_tcg_memop_mask;
MemOp default_tcg_memop_mask;
uint32_t hflags, saved_hflags;
target_ulong btarget;
bool ulri;
@ -3706,7 +3706,7 @@ static void gen_st(DisasContext *ctx, uint32_t opc, int rt,
/* Store conditional */
static void gen_st_cond(DisasContext *ctx, int rt, int base, int offset,
TCGMemOp tcg_mo, bool eva)
MemOp tcg_mo, bool eva)
{
TCGv addr, t0, val;
TCGLabel *l1 = gen_new_label();
@ -4549,7 +4549,7 @@ static void gen_HILO(DisasContext *ctx, uint32_t opc, int acc, int reg)
}
static inline void gen_r6_ld(target_long addr, int reg, int memidx,
TCGMemOp memop)
MemOp memop)
{
TCGv t0 = tcg_const_tl(addr);
tcg_gen_qemu_ld_tl(t0, t0, memidx, memop);
@ -21859,7 +21859,7 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx)
extract32(ctx->opcode, 0, 8);
TCGv va = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGMemOp memop = (extract32(ctx->opcode, 8, 3)) ==
MemOp memop = (extract32(ctx->opcode, 8, 3)) ==
NM_P_LS_UAWM ? MO_UNALN : 0;
count = (count == 0) ? 8 : count;

View File

@ -681,7 +681,7 @@ static bool trans_l_lwa(DisasContext *dc, arg_load *a)
return true;
}
static void do_load(DisasContext *dc, arg_load *a, TCGMemOp mop)
static void do_load(DisasContext *dc, arg_load *a, MemOp mop)
{
TCGv ea;
@ -763,7 +763,7 @@ static bool trans_l_swa(DisasContext *dc, arg_store *a)
return true;
}
static void do_store(DisasContext *dc, arg_store *a, TCGMemOp mop)
static void do_store(DisasContext *dc, arg_store *a, MemOp mop)
{
TCGv t0 = tcg_temp_new();
tcg_gen_addi_tl(t0, cpu_R[a->a], a->i);

View File

@ -163,7 +163,7 @@ struct DisasContext {
int mem_idx;
int access_type;
/* Translation flags */
TCGMemOp default_tcg_memop_mask;
MemOp default_tcg_memop_mask;
#if defined(TARGET_PPC64)
bool sf_mode;
bool has_cfar;
@ -3142,7 +3142,7 @@ static void gen_isync(DisasContext *ctx)
#define MEMOP_GET_SIZE(x) (1 << ((x) & MO_SIZE))
static void gen_load_locked(DisasContext *ctx, TCGMemOp memop)
static void gen_load_locked(DisasContext *ctx, MemOp memop)
{
TCGv gpr = cpu_gpr[rD(ctx->opcode)];
TCGv t0 = tcg_temp_new();
@ -3167,7 +3167,7 @@ LARX(lbarx, DEF_MEMOP(MO_UB))
LARX(lharx, DEF_MEMOP(MO_UW))
LARX(lwarx, DEF_MEMOP(MO_UL))
static void gen_fetch_inc_conditional(DisasContext *ctx, TCGMemOp memop,
static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop,
TCGv EA, TCGCond cond, int addend)
{
TCGv t = tcg_temp_new();
@ -3193,7 +3193,7 @@ static void gen_fetch_inc_conditional(DisasContext *ctx, TCGMemOp memop,
tcg_temp_free(u);
}
static void gen_ld_atomic(DisasContext *ctx, TCGMemOp memop)
static void gen_ld_atomic(DisasContext *ctx, MemOp memop)
{
uint32_t gpr_FC = FC(ctx->opcode);
TCGv EA = tcg_temp_new();
@ -3306,7 +3306,7 @@ static void gen_ldat(DisasContext *ctx)
}
#endif
static void gen_st_atomic(DisasContext *ctx, TCGMemOp memop)
static void gen_st_atomic(DisasContext *ctx, MemOp memop)
{
uint32_t gpr_FC = FC(ctx->opcode);
TCGv EA = tcg_temp_new();
@ -3389,7 +3389,7 @@ static void gen_stdat(DisasContext *ctx)
}
#endif
static void gen_conditional_store(DisasContext *ctx, TCGMemOp memop)
static void gen_conditional_store(DisasContext *ctx, MemOp memop)
{
TCGLabel *l1 = gen_new_label();
TCGLabel *l2 = gen_new_label();

View File

@ -18,7 +18,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
static inline bool gen_lr(DisasContext *ctx, arg_atomic *a, TCGMemOp mop)
static inline bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop)
{
TCGv src1 = tcg_temp_new();
/* Put addr in load_res, data in load_val. */
@ -37,7 +37,7 @@ static inline bool gen_lr(DisasContext *ctx, arg_atomic *a, TCGMemOp mop)
return true;
}
static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, TCGMemOp mop)
static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
{
TCGv src1 = tcg_temp_new();
TCGv src2 = tcg_temp_new();
@ -82,8 +82,8 @@ static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, TCGMemOp mop)
}
static bool gen_amo(DisasContext *ctx, arg_atomic *a,
void(*func)(TCGv, TCGv, TCGv, TCGArg, TCGMemOp),
TCGMemOp mop)
void(*func)(TCGv, TCGv, TCGv, TCGArg, MemOp),
MemOp mop)
{
TCGv src1 = tcg_temp_new();
TCGv src2 = tcg_temp_new();

View File

@ -135,7 +135,7 @@ static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a)
return gen_branch(ctx, a, TCG_COND_GEU);
}
static bool gen_load(DisasContext *ctx, arg_lb *a, TCGMemOp memop)
static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
{
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
@ -174,7 +174,7 @@ static bool trans_lhu(DisasContext *ctx, arg_lhu *a)
return gen_load(ctx, a, MO_TEUW);
}
static bool gen_store(DisasContext *ctx, arg_sb *a, TCGMemOp memop)
static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
{
TCGv t0 = tcg_temp_new();
TCGv dat = tcg_temp_new();

View File

@ -1443,9 +1443,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
}
/* Sanity check writability of the store address. */
#ifndef CONFIG_USER_ONLY
probe_write(env, a2, 0, mem_idx, ra);
#endif
probe_write(env, a2, 1 << sc, mem_idx, ra);
/*
* Note that the compare-and-swap is atomic, and the store is atomic,
@ -2615,22 +2613,15 @@ uint32_t HELPER(cu42)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len,
uintptr_t ra)
{
#ifdef CONFIG_USER_ONLY
if (!h2g_valid(addr) || !h2g_valid(addr + len - 1) ||
page_check_range(addr, len, PAGE_WRITE) < 0) {
s390_program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO, ra);
}
#else
/* test the actual access, not just any access to the page due to LAP */
while (len) {
const uint64_t pagelen = -(addr | -TARGET_PAGE_MASK);
const uint64_t pagelen = -(addr | TARGET_PAGE_MASK);
const uint64_t curlen = MIN(pagelen, len);
probe_write(env, addr, curlen, cpu_mmu_index(env, false), ra);
addr = wrap_address(env, addr + curlen);
len -= curlen;
}
#endif
}
void HELPER(probe_write_access)(CPUS390XState *env, uint64_t addr, uint64_t len)

View File

@ -152,7 +152,7 @@ static inline int vec_full_reg_offset(uint8_t reg)
return offsetof(CPUS390XState, vregs[reg][0]);
}
static inline int vec_reg_offset(uint8_t reg, uint8_t enr, TCGMemOp es)
static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
{
/* Convert element size (es) - e.g. MO_8 - to bytes */
const uint8_t bytes = 1 << es;
@ -2262,7 +2262,7 @@ static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
#ifndef CONFIG_USER_ONLY
static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
{
TCGMemOp mop = s->insn->data;
MemOp mop = s->insn->data;
TCGv_i64 addr, old, cc;
TCGLabel *lab = gen_new_label();
@ -3228,7 +3228,7 @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
{
TCGv_i64 a1, a2;
TCGMemOp mop = s->insn->data;
MemOp mop = s->insn->data;
/* In a parallel context, stop the world and single step. */
if (tb_cflags(s->base.tb) & CF_PARALLEL) {

View File

@ -57,13 +57,13 @@
#define FPF_LONG 3
#define FPF_EXT 4
static inline bool valid_vec_element(uint8_t enr, TCGMemOp es)
static inline bool valid_vec_element(uint8_t enr, MemOp es)
{
return !(enr & ~(NUM_VEC_ELEMENTS(es) - 1));
}
static void read_vec_element_i64(TCGv_i64 dst, uint8_t reg, uint8_t enr,
TCGMemOp memop)
MemOp memop)
{
const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE);
@ -96,7 +96,7 @@ static void read_vec_element_i64(TCGv_i64 dst, uint8_t reg, uint8_t enr,
}
static void read_vec_element_i32(TCGv_i32 dst, uint8_t reg, uint8_t enr,
TCGMemOp memop)
MemOp memop)
{
const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE);
@ -123,7 +123,7 @@ static void read_vec_element_i32(TCGv_i32 dst, uint8_t reg, uint8_t enr,
}
static void write_vec_element_i64(TCGv_i64 src, int reg, uint8_t enr,
TCGMemOp memop)
MemOp memop)
{
const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE);
@ -146,7 +146,7 @@ static void write_vec_element_i64(TCGv_i64 src, int reg, uint8_t enr,
}
static void write_vec_element_i32(TCGv_i32 src, int reg, uint8_t enr,
TCGMemOp memop)
MemOp memop)
{
const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE);

View File

@ -275,6 +275,7 @@ enum {
#define TTE_VALID_BIT (1ULL << 63)
#define TTE_NFO_BIT (1ULL << 60)
#define TTE_IE_BIT (1ULL << 59)
#define TTE_USED_BIT (1ULL << 41)
#define TTE_LOCKED_BIT (1ULL << 6)
#define TTE_SIDEEFFECT_BIT (1ULL << 3)
@ -291,6 +292,7 @@ enum {
#define TTE_IS_VALID(tte) ((tte) & TTE_VALID_BIT)
#define TTE_IS_NFO(tte) ((tte) & TTE_NFO_BIT)
#define TTE_IS_IE(tte) ((tte) & TTE_IE_BIT)
#define TTE_IS_USED(tte) ((tte) & TTE_USED_BIT)
#define TTE_IS_LOCKED(tte) ((tte) & TTE_LOCKED_BIT)
#define TTE_IS_SIDEEFFECT(tte) ((tte) & TTE_SIDEEFFECT_BIT)

View File

@ -88,7 +88,7 @@ static const int perm_table[2][8] = {
};
static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
int *prot, int *access_index,
int *prot, int *access_index, MemTxAttrs *attrs,
target_ulong address, int rw, int mmu_idx,
target_ulong *page_size)
{
@ -219,6 +219,7 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
target_ulong vaddr;
target_ulong page_size;
int error_code = 0, prot, access_index;
MemTxAttrs attrs = {};
/*
* TODO: If we ever need tlb_vaddr_to_host for this target,
@ -229,7 +230,7 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
assert(!probe);
address &= TARGET_PAGE_MASK;
error_code = get_physical_address(env, &paddr, &prot, &access_index,
error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs,
address, access_type,
mmu_idx, &page_size);
vaddr = address;
@ -490,8 +491,8 @@ static inline int ultrasparc_tag_match(SparcTLBEntry *tlb,
return 0;
}
static int get_physical_address_data(CPUSPARCState *env,
hwaddr *physical, int *prot,
static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical,
int *prot, MemTxAttrs *attrs,
target_ulong address, int rw, int mmu_idx)
{
CPUState *cs = env_cpu(env);
@ -536,6 +537,10 @@ static int get_physical_address_data(CPUSPARCState *env,
if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) {
int do_fault = 0;
if (TTE_IS_IE(env->dtlb[i].tte)) {
attrs->byte_swap = true;
}
/* access ok? */
/* multiple bits in SFSR.FT may be set on TT_DFAULT */
if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) {
@ -608,8 +613,8 @@ static int get_physical_address_data(CPUSPARCState *env,
return 1;
}
static int get_physical_address_code(CPUSPARCState *env,
hwaddr *physical, int *prot,
static int get_physical_address_code(CPUSPARCState *env, hwaddr *physical,
int *prot, MemTxAttrs *attrs,
target_ulong address, int mmu_idx)
{
CPUState *cs = env_cpu(env);
@ -686,7 +691,7 @@ static int get_physical_address_code(CPUSPARCState *env,
}
static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
int *prot, int *access_index,
int *prot, int *access_index, MemTxAttrs *attrs,
target_ulong address, int rw, int mmu_idx,
target_ulong *page_size)
{
@ -716,11 +721,11 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
}
if (rw == 2) {
return get_physical_address_code(env, physical, prot, address,
return get_physical_address_code(env, physical, prot, attrs, address,
mmu_idx);
} else {
return get_physical_address_data(env, physical, prot, address, rw,
mmu_idx);
return get_physical_address_data(env, physical, prot, attrs, address,
rw, mmu_idx);
}
}
@ -734,10 +739,11 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
target_ulong vaddr;
hwaddr paddr;
target_ulong page_size;
MemTxAttrs attrs = {};
int error_code = 0, prot, access_index;
address &= TARGET_PAGE_MASK;
error_code = get_physical_address(env, &paddr, &prot, &access_index,
error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs,
address, access_type,
mmu_idx, &page_size);
if (likely(error_code == 0)) {
@ -747,7 +753,8 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
env->dmmu.mmu_primary_context,
env->dmmu.mmu_secondary_context);
tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, prot, mmu_idx,
page_size);
return true;
}
if (probe) {
@ -789,7 +796,7 @@ void dump_mmu(CPUSPARCState *env)
}
if (TTE_IS_VALID(env->dtlb[i].tte)) {
qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx"
", %s, %s, %s, %s, ctx %" PRId64 " %s\n",
", %s, %s, %s, %s, ie %s, ctx %" PRId64 " %s\n",
i,
env->dtlb[i].tag & (uint64_t)~0x1fffULL,
TTE_PA(env->dtlb[i].tte),
@ -798,6 +805,8 @@ void dump_mmu(CPUSPARCState *env)
TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO",
TTE_IS_LOCKED(env->dtlb[i].tte) ?
"locked" : "unlocked",
TTE_IS_IE(env->dtlb[i].tte) ?
"yes" : "no",
env->dtlb[i].tag & (uint64_t)0x1fffULL,
TTE_IS_GLOBAL(env->dtlb[i].tte) ?
"global" : "local");
@ -849,9 +858,10 @@ static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys,
{
target_ulong page_size;
int prot, access_index;
MemTxAttrs attrs = {};
return get_physical_address(env, phys, &prot, &access_index, addr, rw,
mmu_idx, &page_size);
return get_physical_address(env, phys, &prot, &access_index, &attrs, addr,
rw, mmu_idx, &page_size);
}
#if defined(TARGET_SPARC64)

View File

@ -2019,7 +2019,7 @@ static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
}
static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
TCGv addr, int mmu_idx, TCGMemOp memop)
TCGv addr, int mmu_idx, MemOp memop)
{
gen_address_mask(dc, addr);
tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop);
@ -2050,10 +2050,10 @@ typedef struct {
ASIType type;
int asi;
int mem_idx;
TCGMemOp memop;
MemOp memop;
} DisasASI;
static DisasASI get_asi(DisasContext *dc, int insn, TCGMemOp memop)
static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop)
{
int asi = GET_FIELD(insn, 19, 26);
ASIType type = GET_ASI_HELPER;
@ -2267,7 +2267,7 @@ static DisasASI get_asi(DisasContext *dc, int insn, TCGMemOp memop)
}
static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
int insn, TCGMemOp memop)
int insn, MemOp memop)
{
DisasASI da = get_asi(dc, insn, memop);
@ -2305,7 +2305,7 @@ static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
}
static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
int insn, TCGMemOp memop)
int insn, MemOp memop)
{
DisasASI da = get_asi(dc, insn, memop);
@ -2511,7 +2511,7 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr,
case GET_ASI_BLOCK:
/* Valid for lddfa on aligned registers only. */
if (size == 8 && (rd & 7) == 0) {
TCGMemOp memop;
MemOp memop;
TCGv eight;
int i;
@ -2625,7 +2625,7 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr,
case GET_ASI_BLOCK:
/* Valid for stdfa on aligned registers only. */
if (size == 8 && (rd & 7) == 0) {
TCGMemOp memop;
MemOp memop;
TCGv eight;
int i;

View File

@ -290,7 +290,7 @@ static void gen_cmul2(TCGv tdest, TCGv tsrca, TCGv tsrcb, int sh, int rd)
}
static TileExcp gen_st_opcode(DisasContext *dc, unsigned dest, unsigned srca,
unsigned srcb, TCGMemOp memop, const char *name)
unsigned srcb, MemOp memop, const char *name)
{
if (dest) {
return TILEGX_EXCP_OPCODE_UNKNOWN;
@ -305,7 +305,7 @@ static TileExcp gen_st_opcode(DisasContext *dc, unsigned dest, unsigned srca,
}
static TileExcp gen_st_add_opcode(DisasContext *dc, unsigned srca, unsigned srcb,
int imm, TCGMemOp memop, const char *name)
int imm, MemOp memop, const char *name)
{
TCGv tsrca = load_gr(dc, srca);
TCGv tsrcb = load_gr(dc, srcb);
@ -496,7 +496,7 @@ static TileExcp gen_rr_opcode(DisasContext *dc, unsigned opext,
{
TCGv tdest, tsrca;
const char *mnemonic;
TCGMemOp memop;
MemOp memop;
TileExcp ret = TILEGX_EXCP_NONE;
bool prefetch_nofault = false;
@ -1478,7 +1478,7 @@ static TileExcp gen_rri_opcode(DisasContext *dc, unsigned opext,
TCGv tsrca = load_gr(dc, srca);
bool prefetch_nofault = false;
const char *mnemonic;
TCGMemOp memop;
MemOp memop;
int i2, i3;
TCGv t0;
@ -2106,7 +2106,7 @@ static TileExcp decode_y2(DisasContext *dc, tilegx_bundle_bits bundle)
unsigned srca = get_SrcA_Y2(bundle);
unsigned srcbdest = get_SrcBDest_Y2(bundle);
const char *mnemonic;
TCGMemOp memop;
MemOp memop;
bool prefetch_nofault = false;
switch (OEY2(opc, mode)) {

View File

@ -219,7 +219,7 @@ static inline void generate_trap(DisasContext *ctx, int class, int tin);
/* Functions for load/save to/from memory */
static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2,
int16_t con, TCGMemOp mop)
int16_t con, MemOp mop)
{
TCGv temp = tcg_temp_new();
tcg_gen_addi_tl(temp, r2, con);
@ -228,7 +228,7 @@ static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2,
}
static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2,
int16_t con, TCGMemOp mop)
int16_t con, MemOp mop)
{
TCGv temp = tcg_temp_new();
tcg_gen_addi_tl(temp, r2, con);
@ -276,7 +276,7 @@ static void gen_offset_ld_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con,
}
static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off,
TCGMemOp mop)
MemOp mop)
{
TCGv temp = tcg_temp_new();
tcg_gen_addi_tl(temp, r2, off);
@ -286,7 +286,7 @@ static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off,
}
static void gen_ld_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off,
TCGMemOp mop)
MemOp mop)
{
TCGv temp = tcg_temp_new();
tcg_gen_addi_tl(temp, r2, off);

View File

@ -512,7 +512,7 @@ Both t0 and t1 may be split into little-endian ordered pairs of registers
if dealing with 64-bit quantities on a 32-bit host.
The memidx selects the qemu tlb index to use (e.g. user or kernel access).
The flags are the TCGMemOp bits, selecting the sign, width, and endianness
The flags are the MemOp bits, selecting the sign, width, and endianness
of the memory access.
For a 32-bit host, qemu_ld/st_i64 is guaranteed to only be used with a

View File

@ -1423,7 +1423,7 @@ static inline void tcg_out_rev16(TCGContext *s, TCGReg rd, TCGReg rn)
tcg_out_insn(s, 3507, REV16, TCG_TYPE_I32, rd, rn);
}
static inline void tcg_out_sxt(TCGContext *s, TCGType ext, TCGMemOp s_bits,
static inline void tcg_out_sxt(TCGContext *s, TCGType ext, MemOp s_bits,
TCGReg rd, TCGReg rn)
{
/* Using ALIASes SXTB, SXTH, SXTW, of SBFM Xd, Xn, #0, #7|15|31 */
@ -1431,7 +1431,7 @@ static inline void tcg_out_sxt(TCGContext *s, TCGType ext, TCGMemOp s_bits,
tcg_out_sbfm(s, ext, rd, rn, 0, bits);
}
static inline void tcg_out_uxt(TCGContext *s, TCGMemOp s_bits,
static inline void tcg_out_uxt(TCGContext *s, MemOp s_bits,
TCGReg rd, TCGReg rn)
{
/* Using ALIASes UXTB, UXTH of UBFM Wd, Wn, #0, #7|15 */
@ -1580,8 +1580,8 @@ static inline void tcg_out_adr(TCGContext *s, TCGReg rd, void *target)
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
TCGMemOpIdx oi = lb->oi;
TCGMemOp opc = get_memop(oi);
TCGMemOp size = opc & MO_SIZE;
MemOp opc = get_memop(oi);
MemOp size = opc & MO_SIZE;
if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) {
return false;
@ -1605,8 +1605,8 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
TCGMemOpIdx oi = lb->oi;
TCGMemOp opc = get_memop(oi);
TCGMemOp size = opc & MO_SIZE;
MemOp opc = get_memop(oi);
MemOp size = opc & MO_SIZE;
if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) {
return false;
@ -1649,7 +1649,7 @@ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8);
slow path for the failure case, which will be patched later when finalizing
the slow path. Generated code returns the host addend in X1,
clobbers X0,X2,X3,TMP. */
static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp opc,
static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc,
tcg_insn_unit **label_ptr, int mem_index,
bool is_read)
{
@ -1709,11 +1709,11 @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp opc,
#endif /* CONFIG_SOFTMMU */
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop, TCGType ext,
static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
TCGReg data_r, TCGReg addr_r,
TCGType otype, TCGReg off_r)
{
const TCGMemOp bswap = memop & MO_BSWAP;
const MemOp bswap = memop & MO_BSWAP;
switch (memop & MO_SSIZE) {
case MO_UB:
@ -1765,11 +1765,11 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop, TCGType ext,
}
}
static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp memop,
static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
TCGReg data_r, TCGReg addr_r,
TCGType otype, TCGReg off_r)
{
const TCGMemOp bswap = memop & MO_BSWAP;
const MemOp bswap = memop & MO_BSWAP;
switch (memop & MO_SIZE) {
case MO_8:
@ -1804,7 +1804,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp memop,
static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
TCGMemOpIdx oi, TCGType ext)
{
TCGMemOp memop = get_memop(oi);
MemOp memop = get_memop(oi);
const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
#ifdef CONFIG_SOFTMMU
unsigned mem_index = get_mmuidx(oi);
@ -1829,7 +1829,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
TCGMemOpIdx oi)
{
TCGMemOp memop = get_memop(oi);
MemOp memop = get_memop(oi);
const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
#ifdef CONFIG_SOFTMMU
unsigned mem_index = get_mmuidx(oi);

View File

@ -1233,7 +1233,7 @@ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4);
containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */
static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
TCGMemOp opc, int mem_index, bool is_load)
MemOp opc, int mem_index, bool is_load)
{
int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read)
: offsetof(CPUTLBEntry, addr_write));
@ -1348,7 +1348,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
TCGReg argreg, datalo, datahi;
TCGMemOpIdx oi = lb->oi;
TCGMemOp opc = get_memop(oi);
MemOp opc = get_memop(oi);
void *func;
if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) {
@ -1412,7 +1412,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
TCGReg argreg, datalo, datahi;
TCGMemOpIdx oi = lb->oi;
TCGMemOp opc = get_memop(oi);
MemOp opc = get_memop(oi);
if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) {
return false;
@ -1453,11 +1453,11 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
}
#endif /* SOFTMMU */
static inline void tcg_out_qemu_ld_index(TCGContext *s, TCGMemOp opc,
static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
TCGReg datalo, TCGReg datahi,
TCGReg addrlo, TCGReg addend)
{
TCGMemOp bswap = opc & MO_BSWAP;
MemOp bswap = opc & MO_BSWAP;
switch (opc & MO_SSIZE) {
case MO_UB:
@ -1514,11 +1514,11 @@ static inline void tcg_out_qemu_ld_index(TCGContext *s, TCGMemOp opc,
}
}
static inline void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc,
static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc,
TCGReg datalo, TCGReg datahi,
TCGReg addrlo)
{
TCGMemOp bswap = opc & MO_BSWAP;
MemOp bswap = opc & MO_BSWAP;
switch (opc & MO_SSIZE) {
case MO_UB:
@ -1577,7 +1577,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
{
TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
TCGMemOpIdx oi;
TCGMemOp opc;
MemOp opc;
#ifdef CONFIG_SOFTMMU
int mem_index;
TCGReg addend;
@ -1614,11 +1614,11 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
#endif
}
static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, TCGMemOp opc,
static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, MemOp opc,
TCGReg datalo, TCGReg datahi,
TCGReg addrlo, TCGReg addend)
{
TCGMemOp bswap = opc & MO_BSWAP;
MemOp bswap = opc & MO_BSWAP;
switch (opc & MO_SIZE) {
case MO_8:
@ -1659,11 +1659,11 @@ static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, TCGMemOp opc,
}
}
static inline void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc,
static inline void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc,
TCGReg datalo, TCGReg datahi,
TCGReg addrlo)
{
TCGMemOp bswap = opc & MO_BSWAP;
MemOp bswap = opc & MO_BSWAP;
switch (opc & MO_SIZE) {
case MO_8:
@ -1708,7 +1708,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
{
TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
TCGMemOpIdx oi;
TCGMemOp opc;
MemOp opc;
#ifdef CONFIG_SOFTMMU
int mem_index;
TCGReg addend;

View File

@ -1697,7 +1697,7 @@ static void * const qemu_st_helpers[16] = {
First argument register is clobbered. */
static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
int mem_index, TCGMemOp opc,
int mem_index, MemOp opc,
tcg_insn_unit **label_ptr, int which)
{
const TCGReg r0 = TCG_REG_L0;
@ -1810,7 +1810,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{
TCGMemOpIdx oi = l->oi;
TCGMemOp opc = get_memop(oi);
MemOp opc = get_memop(oi);
TCGReg data_reg;
tcg_insn_unit **label_ptr = &l->label_ptr[0];
int rexw = (l->type == TCG_TYPE_I64 ? P_REXW : 0);
@ -1895,8 +1895,8 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{
TCGMemOpIdx oi = l->oi;
TCGMemOp opc = get_memop(oi);
TCGMemOp s_bits = opc & MO_SIZE;
MemOp opc = get_memop(oi);
MemOp s_bits = opc & MO_SIZE;
tcg_insn_unit **label_ptr = &l->label_ptr[0];
TCGReg retaddr;
@ -1995,10 +1995,10 @@ static inline int setup_guest_base_seg(void)
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
TCGReg base, int index, intptr_t ofs,
int seg, bool is64, TCGMemOp memop)
int seg, bool is64, MemOp memop)
{
const TCGMemOp real_bswap = memop & MO_BSWAP;
TCGMemOp bswap = real_bswap;
const MemOp real_bswap = memop & MO_BSWAP;
MemOp bswap = real_bswap;
int rexw = is64 * P_REXW;
int movop = OPC_MOVL_GvEv;
@ -2103,7 +2103,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
TCGReg datalo, datahi, addrlo;
TCGReg addrhi __attribute__((unused));
TCGMemOpIdx oi;
TCGMemOp opc;
MemOp opc;
#if defined(CONFIG_SOFTMMU)
int mem_index;
tcg_insn_unit *label_ptr[2];
@ -2137,15 +2137,15 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
TCGReg base, int index, intptr_t ofs,
int seg, TCGMemOp memop)
int seg, MemOp memop)
{
/* ??? Ideally we wouldn't need a scratch register. For user-only,
we could perform the bswap twice to restore the original value
instead of moving to the scratch. But as it is, the L constraint
means that TCG_REG_L0 is definitely free here. */
const TCGReg scratch = TCG_REG_L0;
const TCGMemOp real_bswap = memop & MO_BSWAP;
TCGMemOp bswap = real_bswap;
const MemOp real_bswap = memop & MO_BSWAP;
MemOp bswap = real_bswap;
int movop = OPC_MOVL_EvGv;
if (have_movbe && real_bswap) {
@ -2221,7 +2221,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
TCGReg datalo, datahi, addrlo;
TCGReg addrhi __attribute__((unused));
TCGMemOpIdx oi;
TCGMemOp opc;
MemOp opc;
#if defined(CONFIG_SOFTMMU)
int mem_index;
tcg_insn_unit *label_ptr[2];

View File

@ -1215,7 +1215,7 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
TCGReg addrh, TCGMemOpIdx oi,
tcg_insn_unit *label_ptr[2], bool is_load)
{
TCGMemOp opc = get_memop(oi);
MemOp opc = get_memop(oi);
unsigned s_bits = opc & MO_SIZE;
unsigned a_bits = get_alignment_bits(opc);
int mem_index = get_mmuidx(oi);
@ -1313,7 +1313,7 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{
TCGMemOpIdx oi = l->oi;
TCGMemOp opc = get_memop(oi);
MemOp opc = get_memop(oi);
TCGReg v0;
int i;
@ -1363,8 +1363,8 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{
TCGMemOpIdx oi = l->oi;
TCGMemOp opc = get_memop(oi);
TCGMemOp s_bits = opc & MO_SIZE;
MemOp opc = get_memop(oi);
MemOp s_bits = opc & MO_SIZE;
int i;
/* resolve label address */
@ -1413,7 +1413,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
#endif
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
TCGReg base, TCGMemOp opc, bool is_64)
TCGReg base, MemOp opc, bool is_64)
{
switch (opc & (MO_SSIZE | MO_BSWAP)) {
case MO_UB:
@ -1521,7 +1521,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
TCGReg addr_regl, addr_regh __attribute__((unused));
TCGReg data_regl, data_regh;
TCGMemOpIdx oi;
TCGMemOp opc;
MemOp opc;
#if defined(CONFIG_SOFTMMU)
tcg_insn_unit *label_ptr[2];
#endif
@ -1558,7 +1558,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
}
static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
TCGReg base, TCGMemOp opc)
TCGReg base, MemOp opc)
{
/* Don't clutter the code below with checks to avoid bswapping ZERO. */
if ((lo | hi) == 0) {
@ -1624,7 +1624,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
TCGReg addr_regl, addr_regh __attribute__((unused));
TCGReg data_regl, data_regh;
TCGMemOpIdx oi;
TCGMemOp opc;
MemOp opc;
#if defined(CONFIG_SOFTMMU)
tcg_insn_unit *label_ptr[2];
#endif

View File

@ -1013,7 +1013,7 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(qemu_ld):
{
TCGMemOpIdx oi = op->args[nb_oargs + nb_iargs];
TCGMemOp mop = get_memop(oi);
MemOp mop = get_memop(oi);
if (!(mop & MO_SIGN)) {
mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
}

View File

@ -1506,7 +1506,7 @@ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768);
in CR7, loads the addend of the TLB into R3, and returns the register
containing the guest address (zero-extended into R4). Clobbers R0 and R2. */
static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp opc,
static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc,
TCGReg addrlo, TCGReg addrhi,
int mem_index, bool is_read)
{
@ -1633,7 +1633,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
TCGMemOpIdx oi = lb->oi;
TCGMemOp opc = get_memop(oi);
MemOp opc = get_memop(oi);
TCGReg hi, lo, arg = TCG_REG_R3;
if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) {
@ -1680,8 +1680,8 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
TCGMemOpIdx oi = lb->oi;
TCGMemOp opc = get_memop(oi);
TCGMemOp s_bits = opc & MO_SIZE;
MemOp opc = get_memop(oi);
MemOp s_bits = opc & MO_SIZE;
TCGReg hi, lo, arg = TCG_REG_R3;
if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) {
@ -1744,7 +1744,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
TCGReg datalo, datahi, addrlo, rbase;
TCGReg addrhi __attribute__((unused));
TCGMemOpIdx oi;
TCGMemOp opc, s_bits;
MemOp opc, s_bits;
#ifdef CONFIG_SOFTMMU
int mem_index;
tcg_insn_unit *label_ptr;
@ -1819,7 +1819,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
TCGReg datalo, datahi, addrlo, rbase;
TCGReg addrhi __attribute__((unused));
TCGMemOpIdx oi;
TCGMemOp opc, s_bits;
MemOp opc, s_bits;
#ifdef CONFIG_SOFTMMU
int mem_index;
tcg_insn_unit *label_ptr;

View File

@ -970,7 +970,7 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
TCGReg addrh, TCGMemOpIdx oi,
tcg_insn_unit **label_ptr, bool is_load)
{
TCGMemOp opc = get_memop(oi);
MemOp opc = get_memop(oi);
unsigned s_bits = opc & MO_SIZE;
unsigned a_bits = get_alignment_bits(opc);
tcg_target_long compare_mask;
@ -1044,7 +1044,7 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{
TCGMemOpIdx oi = l->oi;
TCGMemOp opc = get_memop(oi);
MemOp opc = get_memop(oi);
TCGReg a0 = tcg_target_call_iarg_regs[0];
TCGReg a1 = tcg_target_call_iarg_regs[1];
TCGReg a2 = tcg_target_call_iarg_regs[2];
@ -1077,8 +1077,8 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{
TCGMemOpIdx oi = l->oi;
TCGMemOp opc = get_memop(oi);
TCGMemOp s_bits = opc & MO_SIZE;
MemOp opc = get_memop(oi);
MemOp s_bits = opc & MO_SIZE;
TCGReg a0 = tcg_target_call_iarg_regs[0];
TCGReg a1 = tcg_target_call_iarg_regs[1];
TCGReg a2 = tcg_target_call_iarg_regs[2];
@ -1121,9 +1121,9 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
#endif /* CONFIG_SOFTMMU */
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
TCGReg base, TCGMemOp opc, bool is_64)
TCGReg base, MemOp opc, bool is_64)
{
const TCGMemOp bswap = opc & MO_BSWAP;
const MemOp bswap = opc & MO_BSWAP;
/* We don't yet handle byteswapping, assert */
g_assert(!bswap);
@ -1172,7 +1172,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
TCGReg addr_regl, addr_regh __attribute__((unused));
TCGReg data_regl, data_regh;
TCGMemOpIdx oi;
TCGMemOp opc;
MemOp opc;
#if defined(CONFIG_SOFTMMU)
tcg_insn_unit *label_ptr[1];
#endif
@ -1208,9 +1208,9 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
}
static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
TCGReg base, TCGMemOp opc)
TCGReg base, MemOp opc)
{
const TCGMemOp bswap = opc & MO_BSWAP;
const MemOp bswap = opc & MO_BSWAP;
/* We don't yet handle byteswapping, assert */
g_assert(!bswap);
@ -1243,7 +1243,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
TCGReg addr_regl, addr_regh __attribute__((unused));
TCGReg data_regl, data_regh;
TCGMemOpIdx oi;
TCGMemOp opc;
MemOp opc;
#if defined(CONFIG_SOFTMMU)
tcg_insn_unit *label_ptr[1];
#endif

View File

@ -1430,7 +1430,7 @@ static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
}
}
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data,
TCGReg base, TCGReg index, int disp)
{
switch (opc & (MO_SSIZE | MO_BSWAP)) {
@ -1489,7 +1489,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
}
}
static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data,
TCGReg base, TCGReg index, int disp)
{
switch (opc & (MO_SIZE | MO_BSWAP)) {
@ -1544,7 +1544,7 @@ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19));
/* Load and compare a TLB entry, leaving the flags set. Loads the TLB
addend into R2. Returns a register with the santitized guest address. */
static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc,
int mem_index, bool is_ld)
{
unsigned s_bits = opc & MO_SIZE;
@ -1614,7 +1614,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
TCGReg addr_reg = lb->addrlo_reg;
TCGReg data_reg = lb->datalo_reg;
TCGMemOpIdx oi = lb->oi;
TCGMemOp opc = get_memop(oi);
MemOp opc = get_memop(oi);
if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
(intptr_t)s->code_ptr, 2)) {
@ -1639,7 +1639,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
TCGReg addr_reg = lb->addrlo_reg;
TCGReg data_reg = lb->datalo_reg;
TCGMemOpIdx oi = lb->oi;
TCGMemOp opc = get_memop(oi);
MemOp opc = get_memop(oi);
if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
(intptr_t)s->code_ptr, 2)) {
@ -1694,7 +1694,7 @@ static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
TCGMemOpIdx oi)
{
TCGMemOp opc = get_memop(oi);
MemOp opc = get_memop(oi);
#ifdef CONFIG_SOFTMMU
unsigned mem_index = get_mmuidx(oi);
tcg_insn_unit *label_ptr;
@ -1721,7 +1721,7 @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
TCGMemOpIdx oi)
{
TCGMemOp opc = get_memop(oi);
MemOp opc = get_memop(oi);
#ifdef CONFIG_SOFTMMU
unsigned mem_index = get_mmuidx(oi);
tcg_insn_unit *label_ptr;

View File

@ -1081,7 +1081,7 @@ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12));
is in the returned register, maybe %o0. The TLB addend is in %o1. */
static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
TCGMemOp opc, int which)
MemOp opc, int which)
{
int fast_off = TLB_MASK_TABLE_OFS(mem_index);
int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
@ -1164,7 +1164,7 @@ static const int qemu_st_opc[16] = {
static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
TCGMemOpIdx oi, bool is_64)
{
TCGMemOp memop = get_memop(oi);
MemOp memop = get_memop(oi);
#ifdef CONFIG_SOFTMMU
unsigned memi = get_mmuidx(oi);
TCGReg addrz, param;
@ -1246,7 +1246,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
TCGMemOpIdx oi)
{
TCGMemOp memop = get_memop(oi);
MemOp memop = get_memop(oi);
#ifdef CONFIG_SOFTMMU
unsigned memi = get_mmuidx(oi);
TCGReg addrz, param;

View File

@ -2714,7 +2714,7 @@ void tcg_gen_lookup_and_goto_ptr(void)
}
}
static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st)
static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
{
/* Trigger the asserts within as early as possible. */
(void)get_alignment_bits(op);
@ -2743,7 +2743,7 @@ static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st)
}
static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr,
TCGMemOp memop, TCGArg idx)
MemOp memop, TCGArg idx)
{
TCGMemOpIdx oi = make_memop_idx(memop, idx);
#if TARGET_LONG_BITS == 32
@ -2758,7 +2758,7 @@ static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr,
}
static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr,
TCGMemOp memop, TCGArg idx)
MemOp memop, TCGArg idx)
{
TCGMemOpIdx oi = make_memop_idx(memop, idx);
#if TARGET_LONG_BITS == 32
@ -2788,9 +2788,9 @@ static void tcg_gen_req_mo(TCGBar type)
}
}
void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
{
TCGMemOp orig_memop;
MemOp orig_memop;
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
memop = tcg_canonicalize_memop(memop, 0, 0);
@ -2825,7 +2825,7 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
}
}
void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
{
TCGv_i32 swap = NULL;
@ -2858,9 +2858,9 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
}
}
void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
{
TCGMemOp orig_memop;
MemOp orig_memop;
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop);
@ -2911,7 +2911,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
}
}
void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
{
TCGv_i64 swap = NULL;
@ -2953,7 +2953,7 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
}
}
static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, TCGMemOp opc)
static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc)
{
switch (opc & MO_SSIZE) {
case MO_SB:
@ -2974,7 +2974,7 @@ static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, TCGMemOp opc)
}
}
static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, TCGMemOp opc)
static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc)
{
switch (opc & MO_SSIZE) {
case MO_SB:
@ -3034,7 +3034,7 @@ static void * const table_cmpxchg[16] = {
};
void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv,
TCGv_i32 newv, TCGArg idx, TCGMemOp memop)
TCGv_i32 newv, TCGArg idx, MemOp memop)
{
memop = tcg_canonicalize_memop(memop, 0, 0);
@ -3078,7 +3078,7 @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv,
}
void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
TCGv_i64 newv, TCGArg idx, TCGMemOp memop)
TCGv_i64 newv, TCGArg idx, MemOp memop)
{
memop = tcg_canonicalize_memop(memop, 1, 0);
@ -3142,7 +3142,7 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
}
static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val,
TCGArg idx, TCGMemOp memop, bool new_val,
TCGArg idx, MemOp memop, bool new_val,
void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
{
TCGv_i32 t1 = tcg_temp_new_i32();
@ -3160,7 +3160,7 @@ static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val,
}
static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val,
TCGArg idx, TCGMemOp memop, void * const table[])
TCGArg idx, MemOp memop, void * const table[])
{
gen_atomic_op_i32 gen;
@ -3185,7 +3185,7 @@ static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val,
}
static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
TCGArg idx, TCGMemOp memop, bool new_val,
TCGArg idx, MemOp memop, bool new_val,
void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
{
TCGv_i64 t1 = tcg_temp_new_i64();
@ -3203,7 +3203,7 @@ static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
}
static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
TCGArg idx, TCGMemOp memop, void * const table[])
TCGArg idx, MemOp memop, void * const table[])
{
memop = tcg_canonicalize_memop(memop, 1, 0);
@ -3257,7 +3257,7 @@ static void * const table_##NAME[16] = { \
WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \
}; \
void tcg_gen_atomic_##NAME##_i32 \
(TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, TCGMemOp memop) \
(TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop) \
{ \
if (tcg_ctx->tb_cflags & CF_PARALLEL) { \
do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \
@ -3267,7 +3267,7 @@ void tcg_gen_atomic_##NAME##_i32 \
} \
} \
void tcg_gen_atomic_##NAME##_i64 \
(TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, TCGMemOp memop) \
(TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, MemOp memop) \
{ \
if (tcg_ctx->tb_cflags & CF_PARALLEL) { \
do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \

View File

@ -851,10 +851,10 @@ void tcg_gen_lookup_and_goto_ptr(void);
#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64
#endif
void tcg_gen_qemu_ld_i32(TCGv_i32, TCGv, TCGArg, TCGMemOp);
void tcg_gen_qemu_st_i32(TCGv_i32, TCGv, TCGArg, TCGMemOp);
void tcg_gen_qemu_ld_i64(TCGv_i64, TCGv, TCGArg, TCGMemOp);
void tcg_gen_qemu_st_i64(TCGv_i64, TCGv, TCGArg, TCGMemOp);
void tcg_gen_qemu_ld_i32(TCGv_i32, TCGv, TCGArg, MemOp);
void tcg_gen_qemu_st_i32(TCGv_i32, TCGv, TCGArg, MemOp);
void tcg_gen_qemu_ld_i64(TCGv_i64, TCGv, TCGArg, MemOp);
void tcg_gen_qemu_st_i64(TCGv_i64, TCGv, TCGArg, MemOp);
static inline void tcg_gen_qemu_ld8u(TCGv ret, TCGv addr, int mem_index)
{
@ -912,46 +912,46 @@ static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index)
}
void tcg_gen_atomic_cmpxchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGv_i32,
TCGArg, TCGMemOp);
TCGArg, MemOp);
void tcg_gen_atomic_cmpxchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGv_i64,
TCGArg, TCGMemOp);
TCGArg, MemOp);
void tcg_gen_atomic_xchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
void tcg_gen_atomic_xchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
void tcg_gen_atomic_xchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
void tcg_gen_atomic_xchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
void tcg_gen_atomic_fetch_add_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
void tcg_gen_atomic_fetch_add_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
void tcg_gen_atomic_fetch_and_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
void tcg_gen_atomic_fetch_and_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
void tcg_gen_atomic_fetch_or_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
void tcg_gen_atomic_fetch_or_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
void tcg_gen_atomic_fetch_xor_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
void tcg_gen_atomic_fetch_xor_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
void tcg_gen_atomic_fetch_smin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
void tcg_gen_atomic_fetch_smin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
void tcg_gen_atomic_fetch_umin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
void tcg_gen_atomic_fetch_umin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
void tcg_gen_atomic_fetch_smax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
void tcg_gen_atomic_fetch_smax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
void tcg_gen_atomic_fetch_umax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
void tcg_gen_atomic_fetch_umax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
void tcg_gen_atomic_fetch_add_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
void tcg_gen_atomic_fetch_add_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
void tcg_gen_atomic_fetch_and_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
void tcg_gen_atomic_fetch_and_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
void tcg_gen_atomic_fetch_or_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
void tcg_gen_atomic_fetch_or_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
void tcg_gen_atomic_fetch_xor_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
void tcg_gen_atomic_fetch_xor_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
void tcg_gen_atomic_fetch_smin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
void tcg_gen_atomic_fetch_smin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
void tcg_gen_atomic_fetch_umin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
void tcg_gen_atomic_fetch_umin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
void tcg_gen_atomic_fetch_smax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
void tcg_gen_atomic_fetch_smax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
void tcg_gen_atomic_fetch_umax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
void tcg_gen_atomic_fetch_umax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
void tcg_gen_atomic_add_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
void tcg_gen_atomic_add_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
void tcg_gen_atomic_and_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
void tcg_gen_atomic_and_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
void tcg_gen_atomic_or_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
void tcg_gen_atomic_or_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
void tcg_gen_atomic_xor_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
void tcg_gen_atomic_xor_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
void tcg_gen_atomic_smin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
void tcg_gen_atomic_smin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
void tcg_gen_atomic_umin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
void tcg_gen_atomic_umin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
void tcg_gen_atomic_smax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
void tcg_gen_atomic_smax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
void tcg_gen_atomic_umax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
void tcg_gen_atomic_umax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
void tcg_gen_atomic_add_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
void tcg_gen_atomic_add_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
void tcg_gen_atomic_and_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
void tcg_gen_atomic_and_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
void tcg_gen_atomic_or_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
void tcg_gen_atomic_or_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
void tcg_gen_atomic_xor_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
void tcg_gen_atomic_xor_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
void tcg_gen_atomic_smin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
void tcg_gen_atomic_smin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
void tcg_gen_atomic_umin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
void tcg_gen_atomic_umin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
void tcg_gen_atomic_smax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
void tcg_gen_atomic_smax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
void tcg_gen_atomic_umax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
void tcg_gen_atomic_umax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
void tcg_gen_mov_vec(TCGv_vec, TCGv_vec);
void tcg_gen_dup_i32_vec(unsigned vece, TCGv_vec, TCGv_i32);

View File

@ -2055,7 +2055,7 @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs)
case INDEX_op_qemu_st_i64:
{
TCGMemOpIdx oi = op->args[k++];
TCGMemOp op = get_memop(oi);
MemOp op = get_memop(oi);
unsigned ix = get_mmuidx(oi);
if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {

101
tcg/tcg.h
View File

@ -26,6 +26,7 @@
#define TCG_H
#include "cpu.h"
#include "exec/memop.h"
#include "exec/tb-context.h"
#include "qemu/bitops.h"
#include "qemu/queue.h"
@ -309,103 +310,13 @@ typedef enum TCGType {
#endif
} TCGType;
/* Constants for qemu_ld and qemu_st for the Memory Operation field. */
typedef enum TCGMemOp {
MO_8 = 0,
MO_16 = 1,
MO_32 = 2,
MO_64 = 3,
MO_SIZE = 3, /* Mask for the above. */
MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */
MO_BSWAP = 8, /* Host reverse endian. */
#ifdef HOST_WORDS_BIGENDIAN
MO_LE = MO_BSWAP,
MO_BE = 0,
#else
MO_LE = 0,
MO_BE = MO_BSWAP,
#endif
#ifdef TARGET_WORDS_BIGENDIAN
MO_TE = MO_BE,
#else
MO_TE = MO_LE,
#endif
/*
* MO_UNALN accesses are never checked for alignment.
* MO_ALIGN accesses will result in a call to the CPU's
* do_unaligned_access hook if the guest address is not aligned.
* The default depends on whether the target CPU defines
* TARGET_ALIGNED_ONLY.
*
* Some architectures (e.g. ARMv8) need the address which is aligned
* to a size more than the size of the memory access.
* Some architectures (e.g. SPARCv9) need an address which is aligned,
* but less strictly than the natural alignment.
*
* MO_ALIGN supposes the alignment size is the size of a memory access.
*
* There are three options:
* - unaligned access permitted (MO_UNALN).
* - an alignment to the size of an access (MO_ALIGN);
* - an alignment to a specified size, which may be more or less than
* the access size (MO_ALIGN_x where 'x' is a size in bytes);
*/
MO_ASHIFT = 4,
MO_AMASK = 7 << MO_ASHIFT,
#ifdef TARGET_ALIGNED_ONLY
MO_ALIGN = 0,
MO_UNALN = MO_AMASK,
#else
MO_ALIGN = MO_AMASK,
MO_UNALN = 0,
#endif
MO_ALIGN_2 = 1 << MO_ASHIFT,
MO_ALIGN_4 = 2 << MO_ASHIFT,
MO_ALIGN_8 = 3 << MO_ASHIFT,
MO_ALIGN_16 = 4 << MO_ASHIFT,
MO_ALIGN_32 = 5 << MO_ASHIFT,
MO_ALIGN_64 = 6 << MO_ASHIFT,
/* Combinations of the above, for ease of use. */
MO_UB = MO_8,
MO_UW = MO_16,
MO_UL = MO_32,
MO_SB = MO_SIGN | MO_8,
MO_SW = MO_SIGN | MO_16,
MO_SL = MO_SIGN | MO_32,
MO_Q = MO_64,
MO_LEUW = MO_LE | MO_UW,
MO_LEUL = MO_LE | MO_UL,
MO_LESW = MO_LE | MO_SW,
MO_LESL = MO_LE | MO_SL,
MO_LEQ = MO_LE | MO_Q,
MO_BEUW = MO_BE | MO_UW,
MO_BEUL = MO_BE | MO_UL,
MO_BESW = MO_BE | MO_SW,
MO_BESL = MO_BE | MO_SL,
MO_BEQ = MO_BE | MO_Q,
MO_TEUW = MO_TE | MO_UW,
MO_TEUL = MO_TE | MO_UL,
MO_TESW = MO_TE | MO_SW,
MO_TESL = MO_TE | MO_SL,
MO_TEQ = MO_TE | MO_Q,
MO_SSIZE = MO_SIZE | MO_SIGN,
} TCGMemOp;
/**
* get_alignment_bits
* @memop: TCGMemOp value
* @memop: MemOp value
*
* Extract the alignment size from the memop.
*/
static inline unsigned get_alignment_bits(TCGMemOp memop)
static inline unsigned get_alignment_bits(MemOp memop)
{
unsigned a = memop & MO_AMASK;
@ -1186,7 +1097,7 @@ static inline size_t tcg_current_code_size(TCGContext *s)
return tcg_ptr_byte_diff(s->code_ptr, s->code_buf);
}
/* Combine the TCGMemOp and mmu_idx parameters into a single value. */
/* Combine the MemOp and mmu_idx parameters into a single value. */
typedef uint32_t TCGMemOpIdx;
/**
@ -1196,7 +1107,7 @@ typedef uint32_t TCGMemOpIdx;
*
* Encode these values into a single parameter.
*/
static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx)
static inline TCGMemOpIdx make_memop_idx(MemOp op, unsigned idx)
{
tcg_debug_assert(idx <= 15);
return (op << 4) | idx;
@ -1208,7 +1119,7 @@ static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx)
*
* Extract the memory operation from the combined value.
*/
static inline TCGMemOp get_memop(TCGMemOpIdx oi)
static inline MemOp get_memop(TCGMemOpIdx oi)
{
return oi >> 4;
}

View File

@ -16,7 +16,7 @@
#define TRACE_MEM_ST (1ULL << 5) /* store (y/n) */
static inline uint8_t trace_mem_build_info(
int size_shift, bool sign_extend, TCGMemOp endianness, bool store)
int size_shift, bool sign_extend, MemOp endianness, bool store)
{
uint8_t res;
@ -33,7 +33,7 @@ static inline uint8_t trace_mem_build_info(
return res;
}
static inline uint8_t trace_mem_get_info(TCGMemOp op, bool store)
static inline uint8_t trace_mem_get_info(MemOp op, bool store)
{
return trace_mem_build_info(op & MO_SIZE, !!(op & MO_SIGN),
op & MO_BSWAP, store);

View File

@ -18,7 +18,7 @@
*
* Return a value for the 'info' argument in guest memory access traces.
*/
static uint8_t trace_mem_get_info(TCGMemOp op, bool store);
static uint8_t trace_mem_get_info(MemOp op, bool store);
/**
* trace_mem_build_info:
@ -26,7 +26,7 @@ static uint8_t trace_mem_get_info(TCGMemOp op, bool store);
* Return a value for the 'info' argument in guest memory access traces.
*/
static uint8_t trace_mem_build_info(int size_shift, bool sign_extend,
TCGMemOp endianness, bool store);
MemOp endianness, bool store);
#include "trace/mem-internal.h"