From 14776ab5a12972ea439c7fb2203a4c15a09094b4 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Sat, 24 Aug 2019 04:10:58 +1000 Subject: [PATCH 01/36] tcg: TCGMemOp is now accelerator independent MemOp Preparation for collapsing the two byte swaps, adjust_endianness and handle_bswap, along the I/O path. Target dependant attributes are conditionalized upon NEED_CPU_H. Signed-off-by: Tony Nguyen Acked-by: David Gibson Reviewed-by: Richard Henderson Acked-by: Cornelia Huck Message-Id: <81d9cd7d7f5aaadfa772d6c48ecee834e9cf7882.1566466906.git.tony.nguyen@bt.com> Signed-off-by: Richard Henderson --- MAINTAINERS | 1 + accel/tcg/cputlb.c | 2 +- include/exec/memop.h | 110 ++++++++++++++++++++ target/alpha/translate.c | 2 +- target/arm/translate-a64.c | 48 ++++----- target/arm/translate-a64.h | 2 +- target/arm/translate-sve.c | 2 +- target/arm/translate.c | 32 +++--- target/arm/translate.h | 2 +- target/hppa/translate.c | 14 +-- target/i386/translate.c | 132 ++++++++++++------------ target/m68k/translate.c | 2 +- target/microblaze/translate.c | 4 +- target/mips/translate.c | 8 +- target/openrisc/translate.c | 4 +- target/ppc/translate.c | 12 +-- target/riscv/insn_trans/trans_rva.inc.c | 8 +- target/riscv/insn_trans/trans_rvi.inc.c | 4 +- target/s390x/translate.c | 6 +- target/s390x/translate_vx.inc.c | 10 +- target/sparc/translate.c | 14 +-- target/tilegx/translate.c | 10 +- target/tricore/translate.c | 8 +- tcg/README | 2 +- tcg/aarch64/tcg-target.inc.c | 26 ++--- tcg/arm/tcg-target.inc.c | 26 ++--- tcg/i386/tcg-target.inc.c | 24 ++--- tcg/mips/tcg-target.inc.c | 16 +-- tcg/optimize.c | 2 +- tcg/ppc/tcg-target.inc.c | 12 +-- tcg/riscv/tcg-target.inc.c | 20 ++-- tcg/s390/tcg-target.inc.c | 14 +-- tcg/sparc/tcg-target.inc.c | 6 +- tcg/tcg-op.c | 38 +++---- tcg/tcg-op.h | 80 +++++++------- tcg/tcg.c | 2 +- tcg/tcg.h | 101 ++---------------- trace/mem-internal.h | 4 +- trace/mem.h | 4 +- 39 files changed, 418 insertions(+), 396 deletions(-) create mode 100644 include/exec/memop.h diff --git a/MAINTAINERS b/MAINTAINERS index ef6c01084b..c4c90f732d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1890,6 +1890,7 @@ M: Paolo Bonzini S: Supported F: include/exec/ioport.h F: ioport.c +F: include/exec/memop.h F: include/exec/memory.h F: include/exec/ram_addr.h F: memory.c diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index bb9897b25a..523be4c848 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1133,7 +1133,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, uintptr_t index = tlb_index(env, mmu_idx, addr); CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); target_ulong tlb_addr = tlb_addr_write(tlbe); - TCGMemOp mop = get_memop(oi); + MemOp mop = get_memop(oi); int a_bits = get_alignment_bits(mop); int s_bits = mop & MO_SIZE; void *hostaddr; diff --git a/include/exec/memop.h b/include/exec/memop.h new file mode 100644 index 0000000000..7262ca3dfd --- /dev/null +++ b/include/exec/memop.h @@ -0,0 +1,110 @@ +/* + * Constants for memory operations + * + * Authors: + * Richard Henderson + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef MEMOP_H +#define MEMOP_H + +typedef enum MemOp { + MO_8 = 0, + MO_16 = 1, + MO_32 = 2, + MO_64 = 3, + MO_SIZE = 3, /* Mask for the above. */ + + MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */ + + MO_BSWAP = 8, /* Host reverse endian. */ +#ifdef HOST_WORDS_BIGENDIAN + MO_LE = MO_BSWAP, + MO_BE = 0, +#else + MO_LE = 0, + MO_BE = MO_BSWAP, +#endif +#ifdef NEED_CPU_H +#ifdef TARGET_WORDS_BIGENDIAN + MO_TE = MO_BE, +#else + MO_TE = MO_LE, +#endif +#endif + + /* + * MO_UNALN accesses are never checked for alignment. + * MO_ALIGN accesses will result in a call to the CPU's + * do_unaligned_access hook if the guest address is not aligned. + * The default depends on whether the target CPU defines + * TARGET_ALIGNED_ONLY. + * + * Some architectures (e.g. ARMv8) need the address which is aligned + * to a size more than the size of the memory access. + * Some architectures (e.g. SPARCv9) need an address which is aligned, + * but less strictly than the natural alignment. + * + * MO_ALIGN supposes the alignment size is the size of a memory access. + * + * There are three options: + * - unaligned access permitted (MO_UNALN). + * - an alignment to the size of an access (MO_ALIGN); + * - an alignment to a specified size, which may be more or less than + * the access size (MO_ALIGN_x where 'x' is a size in bytes); + */ + MO_ASHIFT = 4, + MO_AMASK = 7 << MO_ASHIFT, +#ifdef NEED_CPU_H +#ifdef TARGET_ALIGNED_ONLY + MO_ALIGN = 0, + MO_UNALN = MO_AMASK, +#else + MO_ALIGN = MO_AMASK, + MO_UNALN = 0, +#endif +#endif + MO_ALIGN_2 = 1 << MO_ASHIFT, + MO_ALIGN_4 = 2 << MO_ASHIFT, + MO_ALIGN_8 = 3 << MO_ASHIFT, + MO_ALIGN_16 = 4 << MO_ASHIFT, + MO_ALIGN_32 = 5 << MO_ASHIFT, + MO_ALIGN_64 = 6 << MO_ASHIFT, + + /* Combinations of the above, for ease of use. */ + MO_UB = MO_8, + MO_UW = MO_16, + MO_UL = MO_32, + MO_SB = MO_SIGN | MO_8, + MO_SW = MO_SIGN | MO_16, + MO_SL = MO_SIGN | MO_32, + MO_Q = MO_64, + + MO_LEUW = MO_LE | MO_UW, + MO_LEUL = MO_LE | MO_UL, + MO_LESW = MO_LE | MO_SW, + MO_LESL = MO_LE | MO_SL, + MO_LEQ = MO_LE | MO_Q, + + MO_BEUW = MO_BE | MO_UW, + MO_BEUL = MO_BE | MO_UL, + MO_BESW = MO_BE | MO_SW, + MO_BESL = MO_BE | MO_SL, + MO_BEQ = MO_BE | MO_Q, + +#ifdef NEED_CPU_H + MO_TEUW = MO_TE | MO_UW, + MO_TEUL = MO_TE | MO_UL, + MO_TESW = MO_TE | MO_SW, + MO_TESL = MO_TE | MO_SL, + MO_TEQ = MO_TE | MO_Q, +#endif + + MO_SSIZE = MO_SIZE | MO_SIGN, +} MemOp; + +#endif diff --git a/target/alpha/translate.c b/target/alpha/translate.c index 1e29653aac..a69f58bf65 100644 --- a/target/alpha/translate.c +++ b/target/alpha/translate.c @@ -403,7 +403,7 @@ static inline void gen_store_mem(DisasContext *ctx, static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb, int32_t disp16, int mem_idx, - TCGMemOp op) + MemOp op) { TCGLabel *lab_fail, *lab_done; TCGv addr, val; diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index 6fd0b779d3..29c6742117 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -85,7 +85,7 @@ typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64); typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr); typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32); typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr); -typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, TCGMemOp); +typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp); /* initialize TCG globals. */ void a64_translate_init(void) @@ -433,7 +433,7 @@ TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf) * Dn, Sn, Hn or Bn). * (Note that this is not the same mapping as for A32; see cpu.h) */ -static inline int fp_reg_offset(DisasContext *s, int regno, TCGMemOp size) +static inline int fp_reg_offset(DisasContext *s, int regno, MemOp size) { return vec_reg_offset(s, regno, 0, size); } @@ -849,7 +849,7 @@ static void do_gpr_ld_memidx(DisasContext *s, bool iss_valid, unsigned int iss_srt, bool iss_sf, bool iss_ar) { - TCGMemOp memop = s->be_data + size; + MemOp memop = s->be_data + size; g_assert(size <= 3); @@ -926,7 +926,7 @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size) TCGv_i64 tmphi; if (size < 4) { - TCGMemOp memop = s->be_data + size; + MemOp memop = s->be_data + size; tmphi = tcg_const_i64(0); tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop); } else { @@ -967,7 +967,7 @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size) /* Get value of an element within a vector register */ static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx, - int element, TCGMemOp memop) + int element, MemOp memop) { int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE); switch (memop) { @@ -999,7 +999,7 @@ static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx, } static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx, - int element, TCGMemOp memop) + int element, MemOp memop) { int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE); switch (memop) { @@ -1026,7 +1026,7 @@ static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx, /* Set value of an element within a vector register */ static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx, - int element, TCGMemOp memop) + int element, MemOp memop) { int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE); switch (memop) { @@ -1048,7 +1048,7 @@ static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx, } static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src, - int destidx, int element, TCGMemOp memop) + int destidx, int element, MemOp memop) { int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE); switch (memop) { @@ -1068,7 +1068,7 @@ static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src, /* Store from vector register to memory */ static void do_vec_st(DisasContext *s, int srcidx, int element, - TCGv_i64 tcg_addr, int size, TCGMemOp endian) + TCGv_i64 tcg_addr, int size, MemOp endian) { TCGv_i64 tcg_tmp = tcg_temp_new_i64(); @@ -1080,7 +1080,7 @@ static void do_vec_st(DisasContext *s, int srcidx, int element, /* Load from memory to vector register */ static void do_vec_ld(DisasContext *s, int destidx, int element, - TCGv_i64 tcg_addr, int size, TCGMemOp endian) + TCGv_i64 tcg_addr, int size, MemOp endian) { TCGv_i64 tcg_tmp = tcg_temp_new_i64(); @@ -2176,7 +2176,7 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, TCGv_i64 addr, int size, bool is_pair) { int idx = get_mem_index(s); - TCGMemOp memop = s->be_data; + MemOp memop = s->be_data; g_assert(size <= 3); if (is_pair) { @@ -3262,7 +3262,7 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) bool is_postidx = extract32(insn, 23, 1); bool is_q = extract32(insn, 30, 1); TCGv_i64 clean_addr, tcg_rn, tcg_ebytes; - TCGMemOp endian = s->be_data; + MemOp endian = s->be_data; int ebytes; /* bytes per element */ int elements; /* elements per vector */ @@ -5431,7 +5431,7 @@ static void disas_fp_csel(DisasContext *s, uint32_t insn) unsigned int mos, type, rm, cond, rn, rd; TCGv_i64 t_true, t_false, t_zero; DisasCompare64 c; - TCGMemOp sz; + MemOp sz; mos = extract32(insn, 29, 3); type = extract32(insn, 22, 2); @@ -6243,7 +6243,7 @@ static void disas_fp_imm(DisasContext *s, uint32_t insn) int mos = extract32(insn, 29, 3); uint64_t imm; TCGv_i64 tcg_res; - TCGMemOp sz; + MemOp sz; if (mos || imm5) { unallocated_encoding(s); @@ -7006,7 +7006,7 @@ static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn, { if (esize == size) { int element; - TCGMemOp msize = esize == 16 ? MO_16 : MO_32; + MemOp msize = esize == 16 ? MO_16 : MO_32; TCGv_i32 tcg_elem; /* We should have one register left here */ @@ -7998,7 +7998,7 @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q, int shift = (2 * esize) - immhb; int elements = is_scalar ? 1 : (64 / esize); bool round = extract32(opcode, 0, 1); - TCGMemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN); + MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN); TCGv_i64 tcg_rn, tcg_rd, tcg_round; TCGv_i32 tcg_rd_narrowed; TCGv_i64 tcg_final; @@ -8157,7 +8157,7 @@ static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q, } }; NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size]; - TCGMemOp memop = scalar ? size : MO_32; + MemOp memop = scalar ? size : MO_32; int maxpass = scalar ? 1 : is_q ? 4 : 2; for (pass = 0; pass < maxpass; pass++) { @@ -8201,7 +8201,7 @@ static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn, TCGv_ptr tcg_fpst = get_fpstatus_ptr(size == MO_16); TCGv_i32 tcg_shift = NULL; - TCGMemOp mop = size | (is_signed ? MO_SIGN : 0); + MemOp mop = size | (is_signed ? MO_SIGN : 0); int pass; if (fracbits || size == MO_64) { @@ -9980,7 +9980,7 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u, int dsize = is_q ? 128 : 64; int esize = 8 << size; int elements = dsize/esize; - TCGMemOp memop = size | (is_u ? 0 : MO_SIGN); + MemOp memop = size | (is_u ? 0 : MO_SIGN); TCGv_i64 tcg_rn = new_tmp_a64(s); TCGv_i64 tcg_rd = new_tmp_a64(s); TCGv_i64 tcg_round; @@ -10323,7 +10323,7 @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size, TCGv_i64 tcg_op1 = tcg_temp_new_i64(); TCGv_i64 tcg_op2 = tcg_temp_new_i64(); TCGv_i64 tcg_passres; - TCGMemOp memop = MO_32 | (is_u ? 0 : MO_SIGN); + MemOp memop = MO_32 | (is_u ? 0 : MO_SIGN); int elt = pass + is_q * 2; @@ -11803,7 +11803,7 @@ static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u, if (size == 2) { /* 32 + 32 -> 64 op */ - TCGMemOp memop = size + (u ? 0 : MO_SIGN); + MemOp memop = size + (u ? 0 : MO_SIGN); for (pass = 0; pass < maxpass; pass++) { TCGv_i64 tcg_op1 = tcg_temp_new_i64(); @@ -12825,7 +12825,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) switch (is_fp) { case 1: /* normal fp */ - /* convert insn encoded size to TCGMemOp size */ + /* convert insn encoded size to MemOp size */ switch (size) { case 0: /* half-precision */ size = MO_16; @@ -12873,7 +12873,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) return; } - /* Given TCGMemOp size, adjust register and indexing. */ + /* Given MemOp size, adjust register and indexing. */ switch (size) { case MO_16: index = h << 2 | l << 1 | m; @@ -13170,7 +13170,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) TCGv_i64 tcg_res[2]; int pass; bool satop = extract32(opcode, 0, 1); - TCGMemOp memop = MO_32; + MemOp memop = MO_32; if (satop || !u) { memop |= MO_SIGN; diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h index 12ad8ac6ed..a0f4f535ba 100644 --- a/target/arm/translate-a64.h +++ b/target/arm/translate-a64.h @@ -62,7 +62,7 @@ static inline void assert_fp_access_checked(DisasContext *s) * the FP/vector register Qn. */ static inline int vec_reg_offset(DisasContext *s, int regno, - int element, TCGMemOp size) + int element, MemOp size) { int element_size = 1 << size; int offs = element * element_size; diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c index fa068b0e47..5d7edd0907 100644 --- a/target/arm/translate-sve.c +++ b/target/arm/translate-sve.c @@ -4567,7 +4567,7 @@ static bool trans_STR_pri(DisasContext *s, arg_rri *a) */ /* The memory mode of the dtype. */ -static const TCGMemOp dtype_mop[16] = { +static const MemOp dtype_mop[16] = { MO_UB, MO_UB, MO_UB, MO_UB, MO_SL, MO_UW, MO_UW, MO_UW, MO_SW, MO_SW, MO_UL, MO_UL, diff --git a/target/arm/translate.c b/target/arm/translate.c index cbe19b7a62..defe74fc88 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -114,7 +114,7 @@ typedef enum ISSInfo { } ISSInfo; /* Save the syndrome information for a Data Abort */ -static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo) +static void disas_set_da_iss(DisasContext *s, MemOp memop, ISSInfo issinfo) { uint32_t syn; int sas = memop & MO_SIZE; @@ -1042,7 +1042,7 @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var) * that the address argument is TCGv_i32 rather than TCGv. */ -static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op) +static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op) { TCGv addr = tcg_temp_new(); tcg_gen_extu_i32_tl(addr, a32); @@ -1055,7 +1055,7 @@ static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op) } static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, - int index, TCGMemOp opc) + int index, MemOp opc) { TCGv addr; @@ -1070,7 +1070,7 @@ static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, } static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, - int index, TCGMemOp opc) + int index, MemOp opc) { TCGv addr; @@ -1123,7 +1123,7 @@ static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val) } static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, - int index, TCGMemOp opc) + int index, MemOp opc) { TCGv addr = gen_aa32_addr(s, a32, opc); tcg_gen_qemu_ld_i64(val, addr, index, opc); @@ -1138,7 +1138,7 @@ static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val, } static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, - int index, TCGMemOp opc) + int index, MemOp opc) { TCGv addr = gen_aa32_addr(s, a32, opc); @@ -1369,7 +1369,7 @@ neon_reg_offset (int reg, int n) * where 0 is the least significant end of the register. */ static inline long -neon_element_offset(int reg, int element, TCGMemOp size) +neon_element_offset(int reg, int element, MemOp size) { int element_size = 1 << size; int ofs = element * element_size; @@ -1391,7 +1391,7 @@ static TCGv_i32 neon_load_reg(int reg, int pass) return tmp; } -static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop) +static void neon_load_element(TCGv_i32 var, int reg, int ele, MemOp mop) { long offset = neon_element_offset(reg, ele, mop & MO_SIZE); @@ -1410,7 +1410,7 @@ static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop) } } -static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop) +static void neon_load_element64(TCGv_i64 var, int reg, int ele, MemOp mop) { long offset = neon_element_offset(reg, ele, mop & MO_SIZE); @@ -1438,7 +1438,7 @@ static void neon_store_reg(int reg, int pass, TCGv_i32 var) tcg_temp_free_i32(var); } -static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var) +static void neon_store_element(int reg, int ele, MemOp size, TCGv_i32 var) { long offset = neon_element_offset(reg, ele, size); @@ -1457,7 +1457,7 @@ static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var) } } -static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var) +static void neon_store_element64(int reg, int ele, MemOp size, TCGv_i64 var) { long offset = neon_element_offset(reg, ele, size); @@ -3523,7 +3523,7 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) int n; int vec_size; int mmu_idx; - TCGMemOp endian; + MemOp endian; TCGv_i32 addr; TCGv_i32 tmp; TCGv_i32 tmp2; @@ -6830,7 +6830,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) } else if ((insn & 0x380) == 0) { /* VDUP */ int element; - TCGMemOp size; + MemOp size; if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) { return 1; @@ -7395,7 +7395,7 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, TCGv_i32 addr, int size) { TCGv_i32 tmp = tcg_temp_new_i32(); - TCGMemOp opc = size | MO_ALIGN | s->be_data; + MemOp opc = size | MO_ALIGN | s->be_data; s->is_ldex = true; @@ -7449,7 +7449,7 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, TCGv taddr; TCGLabel *done_label; TCGLabel *fail_label; - TCGMemOp opc = size | MO_ALIGN | s->be_data; + MemOp opc = size | MO_ALIGN | s->be_data; /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) { [addr] = {Rt}; @@ -8557,7 +8557,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) */ TCGv taddr; - TCGMemOp opc = s->be_data; + MemOp opc = s->be_data; rm = (insn) & 0xf; diff --git a/target/arm/translate.h b/target/arm/translate.h index 92ef790be9..f26b1e731a 100644 --- a/target/arm/translate.h +++ b/target/arm/translate.h @@ -23,7 +23,7 @@ typedef struct DisasContext { int condexec_cond; int thumb; int sctlr_b; - TCGMemOp be_data; + MemOp be_data; #if !defined(CONFIG_USER_ONLY) int user; #endif diff --git a/target/hppa/translate.c b/target/hppa/translate.c index 8c6189512c..53e17d8963 100644 --- a/target/hppa/translate.c +++ b/target/hppa/translate.c @@ -1500,7 +1500,7 @@ static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs, */ static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb, unsigned rx, int scale, target_sreg disp, - unsigned sp, int modify, TCGMemOp mop) + unsigned sp, int modify, MemOp mop) { TCGv_reg ofs; TCGv_tl addr; @@ -1518,7 +1518,7 @@ static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb, static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb, unsigned rx, int scale, target_sreg disp, - unsigned sp, int modify, TCGMemOp mop) + unsigned sp, int modify, MemOp mop) { TCGv_reg ofs; TCGv_tl addr; @@ -1536,7 +1536,7 @@ static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb, static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb, unsigned rx, int scale, target_sreg disp, - unsigned sp, int modify, TCGMemOp mop) + unsigned sp, int modify, MemOp mop) { TCGv_reg ofs; TCGv_tl addr; @@ -1554,7 +1554,7 @@ static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb, static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb, unsigned rx, int scale, target_sreg disp, - unsigned sp, int modify, TCGMemOp mop) + unsigned sp, int modify, MemOp mop) { TCGv_reg ofs; TCGv_tl addr; @@ -1580,7 +1580,7 @@ static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb, static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb, unsigned rx, int scale, target_sreg disp, - unsigned sp, int modify, TCGMemOp mop) + unsigned sp, int modify, MemOp mop) { TCGv_reg dest; @@ -1653,7 +1653,7 @@ static bool trans_fldd(DisasContext *ctx, arg_ldst *a) static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb, target_sreg disp, unsigned sp, - int modify, TCGMemOp mop) + int modify, MemOp mop) { nullify_over(ctx); do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop); @@ -2939,7 +2939,7 @@ static bool trans_st(DisasContext *ctx, arg_ldst *a) static bool trans_ldc(DisasContext *ctx, arg_ldst *a) { - TCGMemOp mop = MO_TEUL | MO_ALIGN_16 | a->size; + MemOp mop = MO_TEUL | MO_ALIGN_16 | a->size; TCGv_reg zero, dest, ofs; TCGv_tl addr; diff --git a/target/i386/translate.c b/target/i386/translate.c index 5cd74ad639..868b0acafe 100644 --- a/target/i386/translate.c +++ b/target/i386/translate.c @@ -87,8 +87,8 @@ typedef struct DisasContext { /* current insn context */ int override; /* -1 if no override */ int prefix; - TCGMemOp aflag; - TCGMemOp dflag; + MemOp aflag; + MemOp dflag; target_ulong pc_start; target_ulong pc; /* pc = eip + cs_base */ /* current block context */ @@ -149,7 +149,7 @@ static void gen_eob(DisasContext *s); static void gen_jr(DisasContext *s, TCGv dest); static void gen_jmp(DisasContext *s, target_ulong eip); static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num); -static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d); +static void gen_op(DisasContext *s1, int op, MemOp ot, int d); /* i386 arith/logic operations */ enum { @@ -320,7 +320,7 @@ static inline bool byte_reg_is_xH(DisasContext *s, int reg) } /* Select the size of a push/pop operation. */ -static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot) +static inline MemOp mo_pushpop(DisasContext *s, MemOp ot) { if (CODE64(s)) { return ot == MO_16 ? MO_16 : MO_64; @@ -330,13 +330,13 @@ static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot) } /* Select the size of the stack pointer. */ -static inline TCGMemOp mo_stacksize(DisasContext *s) +static inline MemOp mo_stacksize(DisasContext *s) { return CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16; } /* Select only size 64 else 32. Used for SSE operand sizes. */ -static inline TCGMemOp mo_64_32(TCGMemOp ot) +static inline MemOp mo_64_32(MemOp ot) { #ifdef TARGET_X86_64 return ot == MO_64 ? MO_64 : MO_32; @@ -347,19 +347,19 @@ static inline TCGMemOp mo_64_32(TCGMemOp ot) /* Select size 8 if lsb of B is clear, else OT. Used for decoding byte vs word opcodes. */ -static inline TCGMemOp mo_b_d(int b, TCGMemOp ot) +static inline MemOp mo_b_d(int b, MemOp ot) { return b & 1 ? ot : MO_8; } /* Select size 8 if lsb of B is clear, else OT capped at 32. Used for decoding operand size of port opcodes. */ -static inline TCGMemOp mo_b_d32(int b, TCGMemOp ot) +static inline MemOp mo_b_d32(int b, MemOp ot) { return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8; } -static void gen_op_mov_reg_v(DisasContext *s, TCGMemOp ot, int reg, TCGv t0) +static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0) { switch(ot) { case MO_8: @@ -388,7 +388,7 @@ static void gen_op_mov_reg_v(DisasContext *s, TCGMemOp ot, int reg, TCGv t0) } static inline -void gen_op_mov_v_reg(DisasContext *s, TCGMemOp ot, TCGv t0, int reg) +void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg) { if (ot == MO_8 && byte_reg_is_xH(s, reg)) { tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8); @@ -411,13 +411,13 @@ static inline void gen_op_jmp_v(TCGv dest) } static inline -void gen_op_add_reg_im(DisasContext *s, TCGMemOp size, int reg, int32_t val) +void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val) { tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val); gen_op_mov_reg_v(s, size, reg, s->tmp0); } -static inline void gen_op_add_reg_T0(DisasContext *s, TCGMemOp size, int reg) +static inline void gen_op_add_reg_T0(DisasContext *s, MemOp size, int reg) { tcg_gen_add_tl(s->tmp0, cpu_regs[reg], s->T0); gen_op_mov_reg_v(s, size, reg, s->tmp0); @@ -451,7 +451,7 @@ static inline void gen_jmp_im(DisasContext *s, target_ulong pc) /* Compute SEG:REG into A0. SEG is selected from the override segment (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to indicate no override. */ -static void gen_lea_v_seg(DisasContext *s, TCGMemOp aflag, TCGv a0, +static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0, int def_seg, int ovr_seg) { switch (aflag) { @@ -514,13 +514,13 @@ static inline void gen_string_movl_A0_EDI(DisasContext *s) gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1); } -static inline void gen_op_movl_T0_Dshift(DisasContext *s, TCGMemOp ot) +static inline void gen_op_movl_T0_Dshift(DisasContext *s, MemOp ot) { tcg_gen_ld32s_tl(s->T0, cpu_env, offsetof(CPUX86State, df)); tcg_gen_shli_tl(s->T0, s->T0, ot); }; -static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign) +static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign) { switch (size) { case MO_8: @@ -551,18 +551,18 @@ static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign) } } -static void gen_extu(TCGMemOp ot, TCGv reg) +static void gen_extu(MemOp ot, TCGv reg) { gen_ext_tl(reg, reg, ot, false); } -static void gen_exts(TCGMemOp ot, TCGv reg) +static void gen_exts(MemOp ot, TCGv reg) { gen_ext_tl(reg, reg, ot, true); } static inline -void gen_op_jnz_ecx(DisasContext *s, TCGMemOp size, TCGLabel *label1) +void gen_op_jnz_ecx(DisasContext *s, MemOp size, TCGLabel *label1) { tcg_gen_mov_tl(s->tmp0, cpu_regs[R_ECX]); gen_extu(size, s->tmp0); @@ -570,14 +570,14 @@ void gen_op_jnz_ecx(DisasContext *s, TCGMemOp size, TCGLabel *label1) } static inline -void gen_op_jz_ecx(DisasContext *s, TCGMemOp size, TCGLabel *label1) +void gen_op_jz_ecx(DisasContext *s, MemOp size, TCGLabel *label1) { tcg_gen_mov_tl(s->tmp0, cpu_regs[R_ECX]); gen_extu(size, s->tmp0); tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1); } -static void gen_helper_in_func(TCGMemOp ot, TCGv v, TCGv_i32 n) +static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n) { switch (ot) { case MO_8: @@ -594,7 +594,7 @@ static void gen_helper_in_func(TCGMemOp ot, TCGv v, TCGv_i32 n) } } -static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n) +static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n) { switch (ot) { case MO_8: @@ -611,7 +611,7 @@ static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n) } } -static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip, +static void gen_check_io(DisasContext *s, MemOp ot, target_ulong cur_eip, uint32_t svm_flags) { target_ulong next_eip; @@ -644,7 +644,7 @@ static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip, } } -static inline void gen_movs(DisasContext *s, TCGMemOp ot) +static inline void gen_movs(DisasContext *s, MemOp ot) { gen_string_movl_A0_ESI(s); gen_op_ld_v(s, ot, s->T0, s->A0); @@ -840,7 +840,7 @@ static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg) return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 }; default: { - TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3; + MemOp size = (s->cc_op - CC_OP_ADDB) & 3; TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true); return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 }; } @@ -885,7 +885,7 @@ static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg) .mask = -1 }; default: { - TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3; + MemOp size = (s->cc_op - CC_OP_ADDB) & 3; TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false); return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 }; } @@ -897,7 +897,7 @@ static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg) static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) { int inv, jcc_op, cond; - TCGMemOp size; + MemOp size; CCPrepare cc; TCGv t0; @@ -1075,7 +1075,7 @@ static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip) return l2; } -static inline void gen_stos(DisasContext *s, TCGMemOp ot) +static inline void gen_stos(DisasContext *s, MemOp ot) { gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); gen_string_movl_A0_EDI(s); @@ -1084,7 +1084,7 @@ static inline void gen_stos(DisasContext *s, TCGMemOp ot) gen_op_add_reg_T0(s, s->aflag, R_EDI); } -static inline void gen_lods(DisasContext *s, TCGMemOp ot) +static inline void gen_lods(DisasContext *s, MemOp ot) { gen_string_movl_A0_ESI(s); gen_op_ld_v(s, ot, s->T0, s->A0); @@ -1093,7 +1093,7 @@ static inline void gen_lods(DisasContext *s, TCGMemOp ot) gen_op_add_reg_T0(s, s->aflag, R_ESI); } -static inline void gen_scas(DisasContext *s, TCGMemOp ot) +static inline void gen_scas(DisasContext *s, MemOp ot) { gen_string_movl_A0_EDI(s); gen_op_ld_v(s, ot, s->T1, s->A0); @@ -1102,7 +1102,7 @@ static inline void gen_scas(DisasContext *s, TCGMemOp ot) gen_op_add_reg_T0(s, s->aflag, R_EDI); } -static inline void gen_cmps(DisasContext *s, TCGMemOp ot) +static inline void gen_cmps(DisasContext *s, MemOp ot) { gen_string_movl_A0_EDI(s); gen_op_ld_v(s, ot, s->T1, s->A0); @@ -1126,7 +1126,7 @@ static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot) } -static inline void gen_ins(DisasContext *s, TCGMemOp ot) +static inline void gen_ins(DisasContext *s, MemOp ot) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); @@ -1148,7 +1148,7 @@ static inline void gen_ins(DisasContext *s, TCGMemOp ot) } } -static inline void gen_outs(DisasContext *s, TCGMemOp ot) +static inline void gen_outs(DisasContext *s, MemOp ot) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); @@ -1171,7 +1171,7 @@ static inline void gen_outs(DisasContext *s, TCGMemOp ot) /* same method as Valgrind : we generate jumps to current or next instruction */ #define GEN_REPZ(op) \ -static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \ +static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, \ target_ulong cur_eip, target_ulong next_eip) \ { \ TCGLabel *l2; \ @@ -1187,7 +1187,7 @@ static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \ } #define GEN_REPZ2(op) \ -static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \ +static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, \ target_ulong cur_eip, \ target_ulong next_eip, \ int nz) \ @@ -1284,7 +1284,7 @@ static void gen_illegal_opcode(DisasContext *s) } /* if d == OR_TMP0, it means memory operand (address in A0) */ -static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) +static void gen_op(DisasContext *s1, int op, MemOp ot, int d) { if (d != OR_TMP0) { if (s1->prefix & PREFIX_LOCK) { @@ -1395,7 +1395,7 @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) } /* if d == OR_TMP0, it means memory operand (address in A0) */ -static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c) +static void gen_inc(DisasContext *s1, MemOp ot, int d, int c) { if (s1->prefix & PREFIX_LOCK) { if (d != OR_TMP0) { @@ -1421,7 +1421,7 @@ static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c) set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot); } -static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result, +static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result, TCGv shm1, TCGv count, bool is_right) { TCGv_i32 z32, s32, oldop; @@ -1466,7 +1466,7 @@ static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result, set_cc_op(s, CC_OP_DYNAMIC); } -static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1, +static void gen_shift_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right, int is_arith) { target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f); @@ -1502,7 +1502,7 @@ static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1, gen_shift_flags(s, ot, s->T0, s->tmp0, s->T1, is_right); } -static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, +static void gen_shift_rm_im(DisasContext *s, MemOp ot, int op1, int op2, int is_right, int is_arith) { int mask = (ot == MO_64 ? 0x3f : 0x1f); @@ -1542,7 +1542,7 @@ static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, } } -static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) +static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right) { target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f); TCGv_i32 t0, t1; @@ -1627,7 +1627,7 @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) set_cc_op(s, CC_OP_DYNAMIC); } -static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, +static void gen_rot_rm_im(DisasContext *s, MemOp ot, int op1, int op2, int is_right) { int mask = (ot == MO_64 ? 0x3f : 0x1f); @@ -1705,7 +1705,7 @@ static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, } /* XXX: add faster immediate = 1 case */ -static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1, +static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right) { gen_compute_eflags(s); @@ -1761,7 +1761,7 @@ static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1, } /* XXX: add faster immediate case */ -static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, +static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1, bool is_right, TCGv count_in) { target_ulong mask = (ot == MO_64 ? 63 : 31); @@ -1842,7 +1842,7 @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, tcg_temp_free(count); } -static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s) +static void gen_shift(DisasContext *s1, int op, MemOp ot, int d, int s) { if (s != OR_TMP1) gen_op_mov_v_reg(s1, ot, s1->T1, s); @@ -1872,7 +1872,7 @@ static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s) } } -static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c) +static void gen_shifti(DisasContext *s1, int op, MemOp ot, int d, int c) { switch(op) { case OP_ROL: @@ -2149,7 +2149,7 @@ static void gen_add_A0_ds_seg(DisasContext *s) /* generate modrm memory load or store of 'reg'. TMP0 is used if reg == OR_TMP0 */ static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm, - TCGMemOp ot, int reg, int is_store) + MemOp ot, int reg, int is_store) { int mod, rm; @@ -2179,7 +2179,7 @@ static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm, } } -static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot) +static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot) { uint32_t ret; @@ -2202,7 +2202,7 @@ static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot) return ret; } -static inline int insn_const_size(TCGMemOp ot) +static inline int insn_const_size(MemOp ot) { if (ot <= MO_32) { return 1 << ot; @@ -2266,7 +2266,7 @@ static inline void gen_jcc(DisasContext *s, int b, } } -static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b, +static void gen_cmovcc1(CPUX86State *env, DisasContext *s, MemOp ot, int b, int modrm, int reg) { CCPrepare cc; @@ -2363,8 +2363,8 @@ static inline void gen_stack_update(DisasContext *s, int addend) /* Generate a push. It depends on ss32, addseg and dflag. */ static void gen_push_v(DisasContext *s, TCGv val) { - TCGMemOp d_ot = mo_pushpop(s, s->dflag); - TCGMemOp a_ot = mo_stacksize(s); + MemOp d_ot = mo_pushpop(s, s->dflag); + MemOp a_ot = mo_stacksize(s); int size = 1 << d_ot; TCGv new_esp = s->A0; @@ -2383,9 +2383,9 @@ static void gen_push_v(DisasContext *s, TCGv val) } /* two step pop is necessary for precise exceptions */ -static TCGMemOp gen_pop_T0(DisasContext *s) +static MemOp gen_pop_T0(DisasContext *s) { - TCGMemOp d_ot = mo_pushpop(s, s->dflag); + MemOp d_ot = mo_pushpop(s, s->dflag); gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1); gen_op_ld_v(s, d_ot, s->T0, s->A0); @@ -2393,7 +2393,7 @@ static TCGMemOp gen_pop_T0(DisasContext *s) return d_ot; } -static inline void gen_pop_update(DisasContext *s, TCGMemOp ot) +static inline void gen_pop_update(DisasContext *s, MemOp ot) { gen_stack_update(s, 1 << ot); } @@ -2405,8 +2405,8 @@ static inline void gen_stack_A0(DisasContext *s) static void gen_pusha(DisasContext *s) { - TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16; - TCGMemOp d_ot = s->dflag; + MemOp s_ot = s->ss32 ? MO_32 : MO_16; + MemOp d_ot = s->dflag; int size = 1 << d_ot; int i; @@ -2421,8 +2421,8 @@ static void gen_pusha(DisasContext *s) static void gen_popa(DisasContext *s) { - TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16; - TCGMemOp d_ot = s->dflag; + MemOp s_ot = s->ss32 ? MO_32 : MO_16; + MemOp d_ot = s->dflag; int size = 1 << d_ot; int i; @@ -2442,8 +2442,8 @@ static void gen_popa(DisasContext *s) static void gen_enter(DisasContext *s, int esp_addend, int level) { - TCGMemOp d_ot = mo_pushpop(s, s->dflag); - TCGMemOp a_ot = CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16; + MemOp d_ot = mo_pushpop(s, s->dflag); + MemOp a_ot = CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16; int size = 1 << d_ot; /* Push BP; compute FrameTemp into T1. */ @@ -2482,8 +2482,8 @@ static void gen_enter(DisasContext *s, int esp_addend, int level) static void gen_leave(DisasContext *s) { - TCGMemOp d_ot = mo_pushpop(s, s->dflag); - TCGMemOp a_ot = mo_stacksize(s); + MemOp d_ot = mo_pushpop(s, s->dflag); + MemOp a_ot = mo_stacksize(s); gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1); gen_op_ld_v(s, d_ot, s->T0, s->A0); @@ -3045,7 +3045,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, SSEFunc_0_eppi sse_fn_eppi; SSEFunc_0_ppi sse_fn_ppi; SSEFunc_0_eppt sse_fn_eppt; - TCGMemOp ot; + MemOp ot; b &= 0xff; if (s->prefix & PREFIX_DATA) @@ -4488,7 +4488,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) CPUX86State *env = cpu->env_ptr; int b, prefixes; int shift; - TCGMemOp ot, aflag, dflag; + MemOp ot, aflag, dflag; int modrm, reg, rm, mod, op, opreg, val; target_ulong next_eip, tval; int rex_w, rex_r; @@ -5566,8 +5566,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0x1be: /* movsbS Gv, Eb */ case 0x1bf: /* movswS Gv, Eb */ { - TCGMemOp d_ot; - TCGMemOp s_ot; + MemOp d_ot; + MemOp s_ot; /* d_ot is the size of destination */ d_ot = dflag; diff --git a/target/m68k/translate.c b/target/m68k/translate.c index 60bcfb7bd0..24c1dd3408 100644 --- a/target/m68k/translate.c +++ b/target/m68k/translate.c @@ -2414,7 +2414,7 @@ DISAS_INSN(cas) uint16_t ext; TCGv load; TCGv cmp; - TCGMemOp opc; + MemOp opc; switch ((insn >> 9) & 3) { case 1: diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c index 95ff663292..761f535357 100644 --- a/target/microblaze/translate.c +++ b/target/microblaze/translate.c @@ -919,7 +919,7 @@ static void dec_load(DisasContext *dc) unsigned int size; bool rev = false, ex = false, ea = false; int mem_index = cpu_mmu_index(&dc->cpu->env, false); - TCGMemOp mop; + MemOp mop; mop = dc->opcode & 3; size = 1 << mop; @@ -1035,7 +1035,7 @@ static void dec_store(DisasContext *dc) unsigned int size; bool rev = false, ex = false, ea = false; int mem_index = cpu_mmu_index(&dc->cpu->env, false); - TCGMemOp mop; + MemOp mop; mop = dc->opcode & 3; size = 1 << mop; diff --git a/target/mips/translate.c b/target/mips/translate.c index 8ebde6ffee..ed84e6e4e6 100644 --- a/target/mips/translate.c +++ b/target/mips/translate.c @@ -2526,7 +2526,7 @@ typedef struct DisasContext { int32_t CP0_Config5; /* Routine used to access memory */ int mem_idx; - TCGMemOp default_tcg_memop_mask; + MemOp default_tcg_memop_mask; uint32_t hflags, saved_hflags; target_ulong btarget; bool ulri; @@ -3706,7 +3706,7 @@ static void gen_st(DisasContext *ctx, uint32_t opc, int rt, /* Store conditional */ static void gen_st_cond(DisasContext *ctx, int rt, int base, int offset, - TCGMemOp tcg_mo, bool eva) + MemOp tcg_mo, bool eva) { TCGv addr, t0, val; TCGLabel *l1 = gen_new_label(); @@ -4549,7 +4549,7 @@ static void gen_HILO(DisasContext *ctx, uint32_t opc, int acc, int reg) } static inline void gen_r6_ld(target_long addr, int reg, int memidx, - TCGMemOp memop) + MemOp memop) { TCGv t0 = tcg_const_tl(addr); tcg_gen_qemu_ld_tl(t0, t0, memidx, memop); @@ -21827,7 +21827,7 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) extract32(ctx->opcode, 0, 8); TCGv va = tcg_temp_new(); TCGv t1 = tcg_temp_new(); - TCGMemOp memop = (extract32(ctx->opcode, 8, 3)) == + MemOp memop = (extract32(ctx->opcode, 8, 3)) == NM_P_LS_UAWM ? MO_UNALN : 0; count = (count == 0) ? 8 : count; diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c index 4360ce4045..b189c506c5 100644 --- a/target/openrisc/translate.c +++ b/target/openrisc/translate.c @@ -681,7 +681,7 @@ static bool trans_l_lwa(DisasContext *dc, arg_load *a) return true; } -static void do_load(DisasContext *dc, arg_load *a, TCGMemOp mop) +static void do_load(DisasContext *dc, arg_load *a, MemOp mop) { TCGv ea; @@ -763,7 +763,7 @@ static bool trans_l_swa(DisasContext *dc, arg_store *a) return true; } -static void do_store(DisasContext *dc, arg_store *a, TCGMemOp mop) +static void do_store(DisasContext *dc, arg_store *a, MemOp mop) { TCGv t0 = tcg_temp_new(); tcg_gen_addi_tl(t0, cpu_R[a->a], a->i); diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 0cf3f979e2..adb8fd516f 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -163,7 +163,7 @@ struct DisasContext { int mem_idx; int access_type; /* Translation flags */ - TCGMemOp default_tcg_memop_mask; + MemOp default_tcg_memop_mask; #if defined(TARGET_PPC64) bool sf_mode; bool has_cfar; @@ -3142,7 +3142,7 @@ static void gen_isync(DisasContext *ctx) #define MEMOP_GET_SIZE(x) (1 << ((x) & MO_SIZE)) -static void gen_load_locked(DisasContext *ctx, TCGMemOp memop) +static void gen_load_locked(DisasContext *ctx, MemOp memop) { TCGv gpr = cpu_gpr[rD(ctx->opcode)]; TCGv t0 = tcg_temp_new(); @@ -3167,7 +3167,7 @@ LARX(lbarx, DEF_MEMOP(MO_UB)) LARX(lharx, DEF_MEMOP(MO_UW)) LARX(lwarx, DEF_MEMOP(MO_UL)) -static void gen_fetch_inc_conditional(DisasContext *ctx, TCGMemOp memop, +static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop, TCGv EA, TCGCond cond, int addend) { TCGv t = tcg_temp_new(); @@ -3193,7 +3193,7 @@ static void gen_fetch_inc_conditional(DisasContext *ctx, TCGMemOp memop, tcg_temp_free(u); } -static void gen_ld_atomic(DisasContext *ctx, TCGMemOp memop) +static void gen_ld_atomic(DisasContext *ctx, MemOp memop) { uint32_t gpr_FC = FC(ctx->opcode); TCGv EA = tcg_temp_new(); @@ -3306,7 +3306,7 @@ static void gen_ldat(DisasContext *ctx) } #endif -static void gen_st_atomic(DisasContext *ctx, TCGMemOp memop) +static void gen_st_atomic(DisasContext *ctx, MemOp memop) { uint32_t gpr_FC = FC(ctx->opcode); TCGv EA = tcg_temp_new(); @@ -3389,7 +3389,7 @@ static void gen_stdat(DisasContext *ctx) } #endif -static void gen_conditional_store(DisasContext *ctx, TCGMemOp memop) +static void gen_conditional_store(DisasContext *ctx, MemOp memop) { TCGLabel *l1 = gen_new_label(); TCGLabel *l2 = gen_new_label(); diff --git a/target/riscv/insn_trans/trans_rva.inc.c b/target/riscv/insn_trans/trans_rva.inc.c index fadd88849e..be8a9f06dd 100644 --- a/target/riscv/insn_trans/trans_rva.inc.c +++ b/target/riscv/insn_trans/trans_rva.inc.c @@ -18,7 +18,7 @@ * this program. If not, see . */ -static inline bool gen_lr(DisasContext *ctx, arg_atomic *a, TCGMemOp mop) +static inline bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop) { TCGv src1 = tcg_temp_new(); /* Put addr in load_res, data in load_val. */ @@ -37,7 +37,7 @@ static inline bool gen_lr(DisasContext *ctx, arg_atomic *a, TCGMemOp mop) return true; } -static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, TCGMemOp mop) +static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop) { TCGv src1 = tcg_temp_new(); TCGv src2 = tcg_temp_new(); @@ -82,8 +82,8 @@ static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, TCGMemOp mop) } static bool gen_amo(DisasContext *ctx, arg_atomic *a, - void(*func)(TCGv, TCGv, TCGv, TCGArg, TCGMemOp), - TCGMemOp mop) + void(*func)(TCGv, TCGv, TCGv, TCGArg, MemOp), + MemOp mop) { TCGv src1 = tcg_temp_new(); TCGv src2 = tcg_temp_new(); diff --git a/target/riscv/insn_trans/trans_rvi.inc.c b/target/riscv/insn_trans/trans_rvi.inc.c index 1af795e05d..d04ca0394c 100644 --- a/target/riscv/insn_trans/trans_rvi.inc.c +++ b/target/riscv/insn_trans/trans_rvi.inc.c @@ -135,7 +135,7 @@ static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a) return gen_branch(ctx, a, TCG_COND_GEU); } -static bool gen_load(DisasContext *ctx, arg_lb *a, TCGMemOp memop) +static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop) { TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); @@ -174,7 +174,7 @@ static bool trans_lhu(DisasContext *ctx, arg_lhu *a) return gen_load(ctx, a, MO_TEUW); } -static bool gen_store(DisasContext *ctx, arg_sb *a, TCGMemOp memop) +static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop) { TCGv t0 = tcg_temp_new(); TCGv dat = tcg_temp_new(); diff --git a/target/s390x/translate.c b/target/s390x/translate.c index ac0d8b6410..2927247c82 100644 --- a/target/s390x/translate.c +++ b/target/s390x/translate.c @@ -152,7 +152,7 @@ static inline int vec_full_reg_offset(uint8_t reg) return offsetof(CPUS390XState, vregs[reg][0]); } -static inline int vec_reg_offset(uint8_t reg, uint8_t enr, TCGMemOp es) +static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es) { /* Convert element size (es) - e.g. MO_8 - to bytes */ const uint8_t bytes = 1 << es; @@ -2262,7 +2262,7 @@ static DisasJumpType op_csst(DisasContext *s, DisasOps *o) #ifndef CONFIG_USER_ONLY static DisasJumpType op_csp(DisasContext *s, DisasOps *o) { - TCGMemOp mop = s->insn->data; + MemOp mop = s->insn->data; TCGv_i64 addr, old, cc; TCGLabel *lab = gen_new_label(); @@ -3228,7 +3228,7 @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o) static DisasJumpType op_lpd(DisasContext *s, DisasOps *o) { TCGv_i64 a1, a2; - TCGMemOp mop = s->insn->data; + MemOp mop = s->insn->data; /* In a parallel context, stop the world and single step. */ if (tb_cflags(s->base.tb) & CF_PARALLEL) { diff --git a/target/s390x/translate_vx.inc.c b/target/s390x/translate_vx.inc.c index 0caddb3958..5ce7bfb0af 100644 --- a/target/s390x/translate_vx.inc.c +++ b/target/s390x/translate_vx.inc.c @@ -57,13 +57,13 @@ #define FPF_LONG 3 #define FPF_EXT 4 -static inline bool valid_vec_element(uint8_t enr, TCGMemOp es) +static inline bool valid_vec_element(uint8_t enr, MemOp es) { return !(enr & ~(NUM_VEC_ELEMENTS(es) - 1)); } static void read_vec_element_i64(TCGv_i64 dst, uint8_t reg, uint8_t enr, - TCGMemOp memop) + MemOp memop) { const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); @@ -96,7 +96,7 @@ static void read_vec_element_i64(TCGv_i64 dst, uint8_t reg, uint8_t enr, } static void read_vec_element_i32(TCGv_i32 dst, uint8_t reg, uint8_t enr, - TCGMemOp memop) + MemOp memop) { const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); @@ -123,7 +123,7 @@ static void read_vec_element_i32(TCGv_i32 dst, uint8_t reg, uint8_t enr, } static void write_vec_element_i64(TCGv_i64 src, int reg, uint8_t enr, - TCGMemOp memop) + MemOp memop) { const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); @@ -146,7 +146,7 @@ static void write_vec_element_i64(TCGv_i64 src, int reg, uint8_t enr, } static void write_vec_element_i32(TCGv_i32 src, int reg, uint8_t enr, - TCGMemOp memop) + MemOp memop) { const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); diff --git a/target/sparc/translate.c b/target/sparc/translate.c index 02c16128c8..c68bf4a2e4 100644 --- a/target/sparc/translate.c +++ b/target/sparc/translate.c @@ -2019,7 +2019,7 @@ static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs, } static void gen_swap(DisasContext *dc, TCGv dst, TCGv src, - TCGv addr, int mmu_idx, TCGMemOp memop) + TCGv addr, int mmu_idx, MemOp memop) { gen_address_mask(dc, addr); tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop); @@ -2050,10 +2050,10 @@ typedef struct { ASIType type; int asi; int mem_idx; - TCGMemOp memop; + MemOp memop; } DisasASI; -static DisasASI get_asi(DisasContext *dc, int insn, TCGMemOp memop) +static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop) { int asi = GET_FIELD(insn, 19, 26); ASIType type = GET_ASI_HELPER; @@ -2267,7 +2267,7 @@ static DisasASI get_asi(DisasContext *dc, int insn, TCGMemOp memop) } static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, - int insn, TCGMemOp memop) + int insn, MemOp memop) { DisasASI da = get_asi(dc, insn, memop); @@ -2305,7 +2305,7 @@ static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, } static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, - int insn, TCGMemOp memop) + int insn, MemOp memop) { DisasASI da = get_asi(dc, insn, memop); @@ -2511,7 +2511,7 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, case GET_ASI_BLOCK: /* Valid for lddfa on aligned registers only. */ if (size == 8 && (rd & 7) == 0) { - TCGMemOp memop; + MemOp memop; TCGv eight; int i; @@ -2625,7 +2625,7 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr, case GET_ASI_BLOCK: /* Valid for stdfa on aligned registers only. */ if (size == 8 && (rd & 7) == 0) { - TCGMemOp memop; + MemOp memop; TCGv eight; int i; diff --git a/target/tilegx/translate.c b/target/tilegx/translate.c index c46a4ab151..68dd4aa2d8 100644 --- a/target/tilegx/translate.c +++ b/target/tilegx/translate.c @@ -290,7 +290,7 @@ static void gen_cmul2(TCGv tdest, TCGv tsrca, TCGv tsrcb, int sh, int rd) } static TileExcp gen_st_opcode(DisasContext *dc, unsigned dest, unsigned srca, - unsigned srcb, TCGMemOp memop, const char *name) + unsigned srcb, MemOp memop, const char *name) { if (dest) { return TILEGX_EXCP_OPCODE_UNKNOWN; @@ -305,7 +305,7 @@ static TileExcp gen_st_opcode(DisasContext *dc, unsigned dest, unsigned srca, } static TileExcp gen_st_add_opcode(DisasContext *dc, unsigned srca, unsigned srcb, - int imm, TCGMemOp memop, const char *name) + int imm, MemOp memop, const char *name) { TCGv tsrca = load_gr(dc, srca); TCGv tsrcb = load_gr(dc, srcb); @@ -496,7 +496,7 @@ static TileExcp gen_rr_opcode(DisasContext *dc, unsigned opext, { TCGv tdest, tsrca; const char *mnemonic; - TCGMemOp memop; + MemOp memop; TileExcp ret = TILEGX_EXCP_NONE; bool prefetch_nofault = false; @@ -1478,7 +1478,7 @@ static TileExcp gen_rri_opcode(DisasContext *dc, unsigned opext, TCGv tsrca = load_gr(dc, srca); bool prefetch_nofault = false; const char *mnemonic; - TCGMemOp memop; + MemOp memop; int i2, i3; TCGv t0; @@ -2106,7 +2106,7 @@ static TileExcp decode_y2(DisasContext *dc, tilegx_bundle_bits bundle) unsigned srca = get_SrcA_Y2(bundle); unsigned srcbdest = get_SrcBDest_Y2(bundle); const char *mnemonic; - TCGMemOp memop; + MemOp memop; bool prefetch_nofault = false; switch (OEY2(opc, mode)) { diff --git a/target/tricore/translate.c b/target/tricore/translate.c index 4f10407477..c574638c9f 100644 --- a/target/tricore/translate.c +++ b/target/tricore/translate.c @@ -219,7 +219,7 @@ static inline void generate_trap(DisasContext *ctx, int class, int tin); /* Functions for load/save to/from memory */ static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2, - int16_t con, TCGMemOp mop) + int16_t con, MemOp mop) { TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, r2, con); @@ -228,7 +228,7 @@ static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2, } static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2, - int16_t con, TCGMemOp mop) + int16_t con, MemOp mop) { TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, r2, con); @@ -276,7 +276,7 @@ static void gen_offset_ld_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con, } static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off, - TCGMemOp mop) + MemOp mop) { TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, r2, off); @@ -286,7 +286,7 @@ static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off, } static void gen_ld_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off, - TCGMemOp mop) + MemOp mop) { TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, r2, off); diff --git a/tcg/README b/tcg/README index 21fcdf737f..b4382fa8b5 100644 --- a/tcg/README +++ b/tcg/README @@ -512,7 +512,7 @@ Both t0 and t1 may be split into little-endian ordered pairs of registers if dealing with 64-bit quantities on a 32-bit host. The memidx selects the qemu tlb index to use (e.g. user or kernel access). -The flags are the TCGMemOp bits, selecting the sign, width, and endianness +The flags are the MemOp bits, selecting the sign, width, and endianness of the memory access. For a 32-bit host, qemu_ld/st_i64 is guaranteed to only be used with a diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c index 0713448bf5..3f921015d3 100644 --- a/tcg/aarch64/tcg-target.inc.c +++ b/tcg/aarch64/tcg-target.inc.c @@ -1423,7 +1423,7 @@ static inline void tcg_out_rev16(TCGContext *s, TCGReg rd, TCGReg rn) tcg_out_insn(s, 3507, REV16, TCG_TYPE_I32, rd, rn); } -static inline void tcg_out_sxt(TCGContext *s, TCGType ext, TCGMemOp s_bits, +static inline void tcg_out_sxt(TCGContext *s, TCGType ext, MemOp s_bits, TCGReg rd, TCGReg rn) { /* Using ALIASes SXTB, SXTH, SXTW, of SBFM Xd, Xn, #0, #7|15|31 */ @@ -1431,7 +1431,7 @@ static inline void tcg_out_sxt(TCGContext *s, TCGType ext, TCGMemOp s_bits, tcg_out_sbfm(s, ext, rd, rn, 0, bits); } -static inline void tcg_out_uxt(TCGContext *s, TCGMemOp s_bits, +static inline void tcg_out_uxt(TCGContext *s, MemOp s_bits, TCGReg rd, TCGReg rn) { /* Using ALIASes UXTB, UXTH of UBFM Wd, Wn, #0, #7|15 */ @@ -1580,8 +1580,8 @@ static inline void tcg_out_adr(TCGContext *s, TCGReg rd, void *target) static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); - TCGMemOp size = opc & MO_SIZE; + MemOp opc = get_memop(oi); + MemOp size = opc & MO_SIZE; if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) { return false; @@ -1605,8 +1605,8 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); - TCGMemOp size = opc & MO_SIZE; + MemOp opc = get_memop(oi); + MemOp size = opc & MO_SIZE; if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) { return false; @@ -1649,7 +1649,7 @@ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8); slow path for the failure case, which will be patched later when finalizing the slow path. Generated code returns the host addend in X1, clobbers X0,X2,X3,TMP. */ -static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp opc, +static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, tcg_insn_unit **label_ptr, int mem_index, bool is_read) { @@ -1709,11 +1709,11 @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp opc, #endif /* CONFIG_SOFTMMU */ -static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop, TCGType ext, +static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext, TCGReg data_r, TCGReg addr_r, TCGType otype, TCGReg off_r) { - const TCGMemOp bswap = memop & MO_BSWAP; + const MemOp bswap = memop & MO_BSWAP; switch (memop & MO_SSIZE) { case MO_UB: @@ -1765,11 +1765,11 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop, TCGType ext, } } -static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp memop, +static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop, TCGReg data_r, TCGReg addr_r, TCGType otype, TCGReg off_r) { - const TCGMemOp bswap = memop & MO_BSWAP; + const MemOp bswap = memop & MO_BSWAP; switch (memop & MO_SIZE) { case MO_8: @@ -1804,7 +1804,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp memop, static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, TCGMemOpIdx oi, TCGType ext) { - TCGMemOp memop = get_memop(oi); + MemOp memop = get_memop(oi); const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; #ifdef CONFIG_SOFTMMU unsigned mem_index = get_mmuidx(oi); @@ -1829,7 +1829,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, TCGMemOpIdx oi) { - TCGMemOp memop = get_memop(oi); + MemOp memop = get_memop(oi); const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; #ifdef CONFIG_SOFTMMU unsigned mem_index = get_mmuidx(oi); diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c index ece88dc2eb..94d80d79d1 100644 --- a/tcg/arm/tcg-target.inc.c +++ b/tcg/arm/tcg-target.inc.c @@ -1233,7 +1233,7 @@ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4); containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, - TCGMemOp opc, int mem_index, bool is_load) + MemOp opc, int mem_index, bool is_load) { int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read) : offsetof(CPUTLBEntry, addr_write)); @@ -1348,7 +1348,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg argreg, datalo, datahi; TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); void *func; if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) { @@ -1412,7 +1412,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg argreg, datalo, datahi; TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) { return false; @@ -1453,11 +1453,11 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) } #endif /* SOFTMMU */ -static inline void tcg_out_qemu_ld_index(TCGContext *s, TCGMemOp opc, +static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc, TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg addend) { - TCGMemOp bswap = opc & MO_BSWAP; + MemOp bswap = opc & MO_BSWAP; switch (opc & MO_SSIZE) { case MO_UB: @@ -1514,11 +1514,11 @@ static inline void tcg_out_qemu_ld_index(TCGContext *s, TCGMemOp opc, } } -static inline void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, +static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo, TCGReg datahi, TCGReg addrlo) { - TCGMemOp bswap = opc & MO_BSWAP; + MemOp bswap = opc & MO_BSWAP; switch (opc & MO_SSIZE) { case MO_UB: @@ -1577,7 +1577,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) { TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); TCGMemOpIdx oi; - TCGMemOp opc; + MemOp opc; #ifdef CONFIG_SOFTMMU int mem_index; TCGReg addend; @@ -1614,11 +1614,11 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) #endif } -static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, TCGMemOp opc, +static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, MemOp opc, TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg addend) { - TCGMemOp bswap = opc & MO_BSWAP; + MemOp bswap = opc & MO_BSWAP; switch (opc & MO_SIZE) { case MO_8: @@ -1659,11 +1659,11 @@ static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, TCGMemOp opc, } } -static inline void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, +static inline void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, TCGReg datahi, TCGReg addrlo) { - TCGMemOp bswap = opc & MO_BSWAP; + MemOp bswap = opc & MO_BSWAP; switch (opc & MO_SIZE) { case MO_8: @@ -1708,7 +1708,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) { TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); TCGMemOpIdx oi; - TCGMemOp opc; + MemOp opc; #ifdef CONFIG_SOFTMMU int mem_index; TCGReg addend; diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c index 6ddeebf4bc..9d8ed974e0 100644 --- a/tcg/i386/tcg-target.inc.c +++ b/tcg/i386/tcg-target.inc.c @@ -1697,7 +1697,7 @@ static void * const qemu_st_helpers[16] = { First argument register is clobbered. */ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, - int mem_index, TCGMemOp opc, + int mem_index, MemOp opc, tcg_insn_unit **label_ptr, int which) { const TCGReg r0 = TCG_REG_L0; @@ -1810,7 +1810,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64, static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = l->oi; - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); TCGReg data_reg; tcg_insn_unit **label_ptr = &l->label_ptr[0]; int rexw = (l->type == TCG_TYPE_I64 ? P_REXW : 0); @@ -1895,8 +1895,8 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = l->oi; - TCGMemOp opc = get_memop(oi); - TCGMemOp s_bits = opc & MO_SIZE; + MemOp opc = get_memop(oi); + MemOp s_bits = opc & MO_SIZE; tcg_insn_unit **label_ptr = &l->label_ptr[0]; TCGReg retaddr; @@ -1995,10 +1995,10 @@ static inline int setup_guest_base_seg(void) static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, TCGReg base, int index, intptr_t ofs, - int seg, bool is64, TCGMemOp memop) + int seg, bool is64, MemOp memop) { - const TCGMemOp real_bswap = memop & MO_BSWAP; - TCGMemOp bswap = real_bswap; + const MemOp real_bswap = memop & MO_BSWAP; + MemOp bswap = real_bswap; int rexw = is64 * P_REXW; int movop = OPC_MOVL_GvEv; @@ -2103,7 +2103,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) TCGReg datalo, datahi, addrlo; TCGReg addrhi __attribute__((unused)); TCGMemOpIdx oi; - TCGMemOp opc; + MemOp opc; #if defined(CONFIG_SOFTMMU) int mem_index; tcg_insn_unit *label_ptr[2]; @@ -2137,15 +2137,15 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, TCGReg base, int index, intptr_t ofs, - int seg, TCGMemOp memop) + int seg, MemOp memop) { /* ??? Ideally we wouldn't need a scratch register. For user-only, we could perform the bswap twice to restore the original value instead of moving to the scratch. But as it is, the L constraint means that TCG_REG_L0 is definitely free here. */ const TCGReg scratch = TCG_REG_L0; - const TCGMemOp real_bswap = memop & MO_BSWAP; - TCGMemOp bswap = real_bswap; + const MemOp real_bswap = memop & MO_BSWAP; + MemOp bswap = real_bswap; int movop = OPC_MOVL_EvGv; if (have_movbe && real_bswap) { @@ -2221,7 +2221,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) TCGReg datalo, datahi, addrlo; TCGReg addrhi __attribute__((unused)); TCGMemOpIdx oi; - TCGMemOp opc; + MemOp opc; #if defined(CONFIG_SOFTMMU) int mem_index; tcg_insn_unit *label_ptr[2]; diff --git a/tcg/mips/tcg-target.inc.c b/tcg/mips/tcg-target.inc.c index 41bff32fb4..5442167045 100644 --- a/tcg/mips/tcg-target.inc.c +++ b/tcg/mips/tcg-target.inc.c @@ -1215,7 +1215,7 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl, TCGReg addrh, TCGMemOpIdx oi, tcg_insn_unit *label_ptr[2], bool is_load) { - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); unsigned s_bits = opc & MO_SIZE; unsigned a_bits = get_alignment_bits(opc); int mem_index = get_mmuidx(oi); @@ -1313,7 +1313,7 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi, static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = l->oi; - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); TCGReg v0; int i; @@ -1363,8 +1363,8 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = l->oi; - TCGMemOp opc = get_memop(oi); - TCGMemOp s_bits = opc & MO_SIZE; + MemOp opc = get_memop(oi); + MemOp s_bits = opc & MO_SIZE; int i; /* resolve label address */ @@ -1413,7 +1413,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) #endif static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, - TCGReg base, TCGMemOp opc, bool is_64) + TCGReg base, MemOp opc, bool is_64) { switch (opc & (MO_SSIZE | MO_BSWAP)) { case MO_UB: @@ -1521,7 +1521,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) TCGReg addr_regl, addr_regh __attribute__((unused)); TCGReg data_regl, data_regh; TCGMemOpIdx oi; - TCGMemOp opc; + MemOp opc; #if defined(CONFIG_SOFTMMU) tcg_insn_unit *label_ptr[2]; #endif @@ -1558,7 +1558,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) } static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, - TCGReg base, TCGMemOp opc) + TCGReg base, MemOp opc) { /* Don't clutter the code below with checks to avoid bswapping ZERO. */ if ((lo | hi) == 0) { @@ -1624,7 +1624,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) TCGReg addr_regl, addr_regh __attribute__((unused)); TCGReg data_regl, data_regh; TCGMemOpIdx oi; - TCGMemOp opc; + MemOp opc; #if defined(CONFIG_SOFTMMU) tcg_insn_unit *label_ptr[2]; #endif diff --git a/tcg/optimize.c b/tcg/optimize.c index cee2a36a60..f7f4e873c9 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1013,7 +1013,7 @@ void tcg_optimize(TCGContext *s) CASE_OP_32_64(qemu_ld): { TCGMemOpIdx oi = op->args[nb_oargs + nb_iargs]; - TCGMemOp mop = get_memop(oi); + MemOp mop = get_memop(oi); if (!(mop & MO_SIGN)) { mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1; } diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index 852b8940fb..815edac077 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -1506,7 +1506,7 @@ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768); in CR7, loads the addend of the TLB into R3, and returns the register containing the guest address (zero-extended into R4). Clobbers R0 and R2. */ -static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp opc, +static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc, TCGReg addrlo, TCGReg addrhi, int mem_index, bool is_read) { @@ -1633,7 +1633,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); TCGReg hi, lo, arg = TCG_REG_R3; if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) { @@ -1680,8 +1680,8 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); - TCGMemOp s_bits = opc & MO_SIZE; + MemOp opc = get_memop(oi); + MemOp s_bits = opc & MO_SIZE; TCGReg hi, lo, arg = TCG_REG_R3; if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) { @@ -1744,7 +1744,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) TCGReg datalo, datahi, addrlo, rbase; TCGReg addrhi __attribute__((unused)); TCGMemOpIdx oi; - TCGMemOp opc, s_bits; + MemOp opc, s_bits; #ifdef CONFIG_SOFTMMU int mem_index; tcg_insn_unit *label_ptr; @@ -1819,7 +1819,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) TCGReg datalo, datahi, addrlo, rbase; TCGReg addrhi __attribute__((unused)); TCGMemOpIdx oi; - TCGMemOp opc, s_bits; + MemOp opc, s_bits; #ifdef CONFIG_SOFTMMU int mem_index; tcg_insn_unit *label_ptr; diff --git a/tcg/riscv/tcg-target.inc.c b/tcg/riscv/tcg-target.inc.c index 3e76bf5738..7018509693 100644 --- a/tcg/riscv/tcg-target.inc.c +++ b/tcg/riscv/tcg-target.inc.c @@ -970,7 +970,7 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, TCGReg addrh, TCGMemOpIdx oi, tcg_insn_unit **label_ptr, bool is_load) { - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); unsigned s_bits = opc & MO_SIZE; unsigned a_bits = get_alignment_bits(opc); tcg_target_long compare_mask; @@ -1044,7 +1044,7 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi, static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = l->oi; - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); TCGReg a0 = tcg_target_call_iarg_regs[0]; TCGReg a1 = tcg_target_call_iarg_regs[1]; TCGReg a2 = tcg_target_call_iarg_regs[2]; @@ -1077,8 +1077,8 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = l->oi; - TCGMemOp opc = get_memop(oi); - TCGMemOp s_bits = opc & MO_SIZE; + MemOp opc = get_memop(oi); + MemOp s_bits = opc & MO_SIZE; TCGReg a0 = tcg_target_call_iarg_regs[0]; TCGReg a1 = tcg_target_call_iarg_regs[1]; TCGReg a2 = tcg_target_call_iarg_regs[2]; @@ -1121,9 +1121,9 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) #endif /* CONFIG_SOFTMMU */ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, - TCGReg base, TCGMemOp opc, bool is_64) + TCGReg base, MemOp opc, bool is_64) { - const TCGMemOp bswap = opc & MO_BSWAP; + const MemOp bswap = opc & MO_BSWAP; /* We don't yet handle byteswapping, assert */ g_assert(!bswap); @@ -1172,7 +1172,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) TCGReg addr_regl, addr_regh __attribute__((unused)); TCGReg data_regl, data_regh; TCGMemOpIdx oi; - TCGMemOp opc; + MemOp opc; #if defined(CONFIG_SOFTMMU) tcg_insn_unit *label_ptr[1]; #endif @@ -1208,9 +1208,9 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) } static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, - TCGReg base, TCGMemOp opc) + TCGReg base, MemOp opc) { - const TCGMemOp bswap = opc & MO_BSWAP; + const MemOp bswap = opc & MO_BSWAP; /* We don't yet handle byteswapping, assert */ g_assert(!bswap); @@ -1243,7 +1243,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) TCGReg addr_regl, addr_regh __attribute__((unused)); TCGReg data_regl, data_regh; TCGMemOpIdx oi; - TCGMemOp opc; + MemOp opc; #if defined(CONFIG_SOFTMMU) tcg_insn_unit *label_ptr[1]; #endif diff --git a/tcg/s390/tcg-target.inc.c b/tcg/s390/tcg-target.inc.c index fe42939d98..8aaa4cebe8 100644 --- a/tcg/s390/tcg-target.inc.c +++ b/tcg/s390/tcg-target.inc.c @@ -1430,7 +1430,7 @@ static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest) } } -static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data, +static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data, TCGReg base, TCGReg index, int disp) { switch (opc & (MO_SSIZE | MO_BSWAP)) { @@ -1489,7 +1489,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data, } } -static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, TCGReg data, +static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data, TCGReg base, TCGReg index, int disp) { switch (opc & (MO_SIZE | MO_BSWAP)) { @@ -1544,7 +1544,7 @@ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19)); /* Load and compare a TLB entry, leaving the flags set. Loads the TLB addend into R2. Returns a register with the santitized guest address. */ -static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc, +static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, int mem_index, bool is_ld) { unsigned s_bits = opc & MO_SIZE; @@ -1614,7 +1614,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) TCGReg addr_reg = lb->addrlo_reg; TCGReg data_reg = lb->datalo_reg; TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, 2)) { @@ -1639,7 +1639,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) TCGReg addr_reg = lb->addrlo_reg; TCGReg data_reg = lb->datalo_reg; TCGMemOpIdx oi = lb->oi; - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, 2)) { @@ -1694,7 +1694,7 @@ static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg, static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, TCGMemOpIdx oi) { - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); #ifdef CONFIG_SOFTMMU unsigned mem_index = get_mmuidx(oi); tcg_insn_unit *label_ptr; @@ -1721,7 +1721,7 @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, TCGMemOpIdx oi) { - TCGMemOp opc = get_memop(oi); + MemOp opc = get_memop(oi); #ifdef CONFIG_SOFTMMU unsigned mem_index = get_mmuidx(oi); tcg_insn_unit *label_ptr; diff --git a/tcg/sparc/tcg-target.inc.c b/tcg/sparc/tcg-target.inc.c index 10b1cea63b..d7986cda5c 100644 --- a/tcg/sparc/tcg-target.inc.c +++ b/tcg/sparc/tcg-target.inc.c @@ -1081,7 +1081,7 @@ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12)); is in the returned register, maybe %o0. The TLB addend is in %o1. */ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index, - TCGMemOp opc, int which) + MemOp opc, int which) { int fast_off = TLB_MASK_TABLE_OFS(mem_index); int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); @@ -1164,7 +1164,7 @@ static const int qemu_st_opc[16] = { static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, TCGMemOpIdx oi, bool is_64) { - TCGMemOp memop = get_memop(oi); + MemOp memop = get_memop(oi); #ifdef CONFIG_SOFTMMU unsigned memi = get_mmuidx(oi); TCGReg addrz, param; @@ -1246,7 +1246,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, TCGMemOpIdx oi) { - TCGMemOp memop = get_memop(oi); + MemOp memop = get_memop(oi); #ifdef CONFIG_SOFTMMU unsigned memi = get_mmuidx(oi); TCGReg addrz, param; diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c index 587d092238..e87c327fbf 100644 --- a/tcg/tcg-op.c +++ b/tcg/tcg-op.c @@ -2714,7 +2714,7 @@ void tcg_gen_lookup_and_goto_ptr(void) } } -static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st) +static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) { /* Trigger the asserts within as early as possible. */ (void)get_alignment_bits(op); @@ -2743,7 +2743,7 @@ static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st) } static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr, - TCGMemOp memop, TCGArg idx) + MemOp memop, TCGArg idx) { TCGMemOpIdx oi = make_memop_idx(memop, idx); #if TARGET_LONG_BITS == 32 @@ -2758,7 +2758,7 @@ static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr, } static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr, - TCGMemOp memop, TCGArg idx) + MemOp memop, TCGArg idx) { TCGMemOpIdx oi = make_memop_idx(memop, idx); #if TARGET_LONG_BITS == 32 @@ -2788,9 +2788,9 @@ static void tcg_gen_req_mo(TCGBar type) } } -void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop) +void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) { - TCGMemOp orig_memop; + MemOp orig_memop; tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); memop = tcg_canonicalize_memop(memop, 0, 0); @@ -2825,7 +2825,7 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop) } } -void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop) +void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) { TCGv_i32 swap = NULL; @@ -2858,9 +2858,9 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop) } } -void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop) +void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) { - TCGMemOp orig_memop; + MemOp orig_memop; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop); @@ -2911,7 +2911,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop) } } -void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop) +void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) { TCGv_i64 swap = NULL; @@ -2953,7 +2953,7 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop) } } -static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, TCGMemOp opc) +static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc) { switch (opc & MO_SSIZE) { case MO_SB: @@ -2974,7 +2974,7 @@ static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, TCGMemOp opc) } } -static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, TCGMemOp opc) +static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc) { switch (opc & MO_SSIZE) { case MO_SB: @@ -3034,7 +3034,7 @@ static void * const table_cmpxchg[16] = { }; void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, - TCGv_i32 newv, TCGArg idx, TCGMemOp memop) + TCGv_i32 newv, TCGArg idx, MemOp memop) { memop = tcg_canonicalize_memop(memop, 0, 0); @@ -3078,7 +3078,7 @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, } void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, - TCGv_i64 newv, TCGArg idx, TCGMemOp memop) + TCGv_i64 newv, TCGArg idx, MemOp memop) { memop = tcg_canonicalize_memop(memop, 1, 0); @@ -3142,7 +3142,7 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, } static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, - TCGArg idx, TCGMemOp memop, bool new_val, + TCGArg idx, MemOp memop, bool new_val, void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32)) { TCGv_i32 t1 = tcg_temp_new_i32(); @@ -3160,7 +3160,7 @@ static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, } static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, - TCGArg idx, TCGMemOp memop, void * const table[]) + TCGArg idx, MemOp memop, void * const table[]) { gen_atomic_op_i32 gen; @@ -3185,7 +3185,7 @@ static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, } static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, - TCGArg idx, TCGMemOp memop, bool new_val, + TCGArg idx, MemOp memop, bool new_val, void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64)) { TCGv_i64 t1 = tcg_temp_new_i64(); @@ -3203,7 +3203,7 @@ static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, } static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, - TCGArg idx, TCGMemOp memop, void * const table[]) + TCGArg idx, MemOp memop, void * const table[]) { memop = tcg_canonicalize_memop(memop, 1, 0); @@ -3257,7 +3257,7 @@ static void * const table_##NAME[16] = { \ WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \ }; \ void tcg_gen_atomic_##NAME##_i32 \ - (TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, TCGMemOp memop) \ + (TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop) \ { \ if (tcg_ctx->tb_cflags & CF_PARALLEL) { \ do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \ @@ -3267,7 +3267,7 @@ void tcg_gen_atomic_##NAME##_i32 \ } \ } \ void tcg_gen_atomic_##NAME##_i64 \ - (TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, TCGMemOp memop) \ + (TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, MemOp memop) \ { \ if (tcg_ctx->tb_cflags & CF_PARALLEL) { \ do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \ diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h index 2d4dd5cd7d..e9cf172762 100644 --- a/tcg/tcg-op.h +++ b/tcg/tcg-op.h @@ -851,10 +851,10 @@ void tcg_gen_lookup_and_goto_ptr(void); #define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64 #endif -void tcg_gen_qemu_ld_i32(TCGv_i32, TCGv, TCGArg, TCGMemOp); -void tcg_gen_qemu_st_i32(TCGv_i32, TCGv, TCGArg, TCGMemOp); -void tcg_gen_qemu_ld_i64(TCGv_i64, TCGv, TCGArg, TCGMemOp); -void tcg_gen_qemu_st_i64(TCGv_i64, TCGv, TCGArg, TCGMemOp); +void tcg_gen_qemu_ld_i32(TCGv_i32, TCGv, TCGArg, MemOp); +void tcg_gen_qemu_st_i32(TCGv_i32, TCGv, TCGArg, MemOp); +void tcg_gen_qemu_ld_i64(TCGv_i64, TCGv, TCGArg, MemOp); +void tcg_gen_qemu_st_i64(TCGv_i64, TCGv, TCGArg, MemOp); static inline void tcg_gen_qemu_ld8u(TCGv ret, TCGv addr, int mem_index) { @@ -912,46 +912,46 @@ static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index) } void tcg_gen_atomic_cmpxchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGv_i32, - TCGArg, TCGMemOp); + TCGArg, MemOp); void tcg_gen_atomic_cmpxchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGv_i64, - TCGArg, TCGMemOp); + TCGArg, MemOp); -void tcg_gen_atomic_xchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_xchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); +void tcg_gen_atomic_xchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_xchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_fetch_add_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_add_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_and_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_and_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_or_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_or_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_xor_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_xor_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_smin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_smin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_umin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_umin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_smax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_smax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_umax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_fetch_umax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); +void tcg_gen_atomic_fetch_add_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_add_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_and_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_and_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_or_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_or_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_xor_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_xor_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_smin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_smin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_umin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_umin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_smax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_smax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_umax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_umax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); -void tcg_gen_atomic_add_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_add_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_and_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_and_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_or_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_or_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_xor_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_xor_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_smin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_smin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_umin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_umin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_smax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_smax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); -void tcg_gen_atomic_umax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); -void tcg_gen_atomic_umax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); +void tcg_gen_atomic_add_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_add_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_and_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_and_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_or_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_or_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_xor_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_xor_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_smin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_smin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_umin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_umin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_smax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_smax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_umax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_umax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); void tcg_gen_mov_vec(TCGv_vec, TCGv_vec); void tcg_gen_dup_i32_vec(unsigned vece, TCGv_vec, TCGv_i32); diff --git a/tcg/tcg.c b/tcg/tcg.c index 0458eaec57..16b2d0e0ec 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -2055,7 +2055,7 @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs) case INDEX_op_qemu_st_i64: { TCGMemOpIdx oi = op->args[k++]; - TCGMemOp op = get_memop(oi); + MemOp op = get_memop(oi); unsigned ix = get_mmuidx(oi); if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) { diff --git a/tcg/tcg.h b/tcg/tcg.h index 529acb2ed8..a37181c899 100644 --- a/tcg/tcg.h +++ b/tcg/tcg.h @@ -26,6 +26,7 @@ #define TCG_H #include "cpu.h" +#include "exec/memop.h" #include "exec/tb-context.h" #include "qemu/bitops.h" #include "qemu/queue.h" @@ -309,103 +310,13 @@ typedef enum TCGType { #endif } TCGType; -/* Constants for qemu_ld and qemu_st for the Memory Operation field. */ -typedef enum TCGMemOp { - MO_8 = 0, - MO_16 = 1, - MO_32 = 2, - MO_64 = 3, - MO_SIZE = 3, /* Mask for the above. */ - - MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */ - - MO_BSWAP = 8, /* Host reverse endian. */ -#ifdef HOST_WORDS_BIGENDIAN - MO_LE = MO_BSWAP, - MO_BE = 0, -#else - MO_LE = 0, - MO_BE = MO_BSWAP, -#endif -#ifdef TARGET_WORDS_BIGENDIAN - MO_TE = MO_BE, -#else - MO_TE = MO_LE, -#endif - - /* - * MO_UNALN accesses are never checked for alignment. - * MO_ALIGN accesses will result in a call to the CPU's - * do_unaligned_access hook if the guest address is not aligned. - * The default depends on whether the target CPU defines - * TARGET_ALIGNED_ONLY. - * - * Some architectures (e.g. ARMv8) need the address which is aligned - * to a size more than the size of the memory access. - * Some architectures (e.g. SPARCv9) need an address which is aligned, - * but less strictly than the natural alignment. - * - * MO_ALIGN supposes the alignment size is the size of a memory access. - * - * There are three options: - * - unaligned access permitted (MO_UNALN). - * - an alignment to the size of an access (MO_ALIGN); - * - an alignment to a specified size, which may be more or less than - * the access size (MO_ALIGN_x where 'x' is a size in bytes); - */ - MO_ASHIFT = 4, - MO_AMASK = 7 << MO_ASHIFT, -#ifdef TARGET_ALIGNED_ONLY - MO_ALIGN = 0, - MO_UNALN = MO_AMASK, -#else - MO_ALIGN = MO_AMASK, - MO_UNALN = 0, -#endif - MO_ALIGN_2 = 1 << MO_ASHIFT, - MO_ALIGN_4 = 2 << MO_ASHIFT, - MO_ALIGN_8 = 3 << MO_ASHIFT, - MO_ALIGN_16 = 4 << MO_ASHIFT, - MO_ALIGN_32 = 5 << MO_ASHIFT, - MO_ALIGN_64 = 6 << MO_ASHIFT, - - /* Combinations of the above, for ease of use. */ - MO_UB = MO_8, - MO_UW = MO_16, - MO_UL = MO_32, - MO_SB = MO_SIGN | MO_8, - MO_SW = MO_SIGN | MO_16, - MO_SL = MO_SIGN | MO_32, - MO_Q = MO_64, - - MO_LEUW = MO_LE | MO_UW, - MO_LEUL = MO_LE | MO_UL, - MO_LESW = MO_LE | MO_SW, - MO_LESL = MO_LE | MO_SL, - MO_LEQ = MO_LE | MO_Q, - - MO_BEUW = MO_BE | MO_UW, - MO_BEUL = MO_BE | MO_UL, - MO_BESW = MO_BE | MO_SW, - MO_BESL = MO_BE | MO_SL, - MO_BEQ = MO_BE | MO_Q, - - MO_TEUW = MO_TE | MO_UW, - MO_TEUL = MO_TE | MO_UL, - MO_TESW = MO_TE | MO_SW, - MO_TESL = MO_TE | MO_SL, - MO_TEQ = MO_TE | MO_Q, - - MO_SSIZE = MO_SIZE | MO_SIGN, -} TCGMemOp; - /** * get_alignment_bits - * @memop: TCGMemOp value + * @memop: MemOp value * * Extract the alignment size from the memop. */ -static inline unsigned get_alignment_bits(TCGMemOp memop) +static inline unsigned get_alignment_bits(MemOp memop) { unsigned a = memop & MO_AMASK; @@ -1186,7 +1097,7 @@ static inline size_t tcg_current_code_size(TCGContext *s) return tcg_ptr_byte_diff(s->code_ptr, s->code_buf); } -/* Combine the TCGMemOp and mmu_idx parameters into a single value. */ +/* Combine the MemOp and mmu_idx parameters into a single value. */ typedef uint32_t TCGMemOpIdx; /** @@ -1196,7 +1107,7 @@ typedef uint32_t TCGMemOpIdx; * * Encode these values into a single parameter. */ -static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx) +static inline TCGMemOpIdx make_memop_idx(MemOp op, unsigned idx) { tcg_debug_assert(idx <= 15); return (op << 4) | idx; @@ -1208,7 +1119,7 @@ static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx) * * Extract the memory operation from the combined value. */ -static inline TCGMemOp get_memop(TCGMemOpIdx oi) +static inline MemOp get_memop(TCGMemOpIdx oi) { return oi >> 4; } diff --git a/trace/mem-internal.h b/trace/mem-internal.h index f6efaf6d6b..3444fbc596 100644 --- a/trace/mem-internal.h +++ b/trace/mem-internal.h @@ -16,7 +16,7 @@ #define TRACE_MEM_ST (1ULL << 5) /* store (y/n) */ static inline uint8_t trace_mem_build_info( - int size_shift, bool sign_extend, TCGMemOp endianness, bool store) + int size_shift, bool sign_extend, MemOp endianness, bool store) { uint8_t res; @@ -33,7 +33,7 @@ static inline uint8_t trace_mem_build_info( return res; } -static inline uint8_t trace_mem_get_info(TCGMemOp op, bool store) +static inline uint8_t trace_mem_get_info(MemOp op, bool store) { return trace_mem_build_info(op & MO_SIZE, !!(op & MO_SIGN), op & MO_BSWAP, store); diff --git a/trace/mem.h b/trace/mem.h index 2b58196e53..8cf213d85b 100644 --- a/trace/mem.h +++ b/trace/mem.h @@ -18,7 +18,7 @@ * * Return a value for the 'info' argument in guest memory access traces. */ -static uint8_t trace_mem_get_info(TCGMemOp op, bool store); +static uint8_t trace_mem_get_info(MemOp op, bool store); /** * trace_mem_build_info: @@ -26,7 +26,7 @@ static uint8_t trace_mem_get_info(TCGMemOp op, bool store); * Return a value for the 'info' argument in guest memory access traces. */ static uint8_t trace_mem_build_info(int size_shift, bool sign_extend, - TCGMemOp endianness, bool store); + MemOp endianness, bool store); #include "trace/mem-internal.h" From 66b9b24375ac215cdcbdf9e14d665395360abff4 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Sat, 24 Aug 2019 04:29:05 +1000 Subject: [PATCH 02/36] memory: Introduce size_memop The memory_region_dispatch_{read|write} operand "unsigned size" is being converted into a "MemOp op". Introduce no-op size_memop to aid preparatory conversion of interfaces. Once interfaces are converted, size_memop will be implemented to return a MemOp from size in bytes. Signed-off-by: Tony Nguyen Reviewed-by: Richard Henderson Message-Id: <35b8ee74020f67cf40848fb7d5f127cf96c851d6.1566466906.git.tony.nguyen@bt.com> Signed-off-by: Richard Henderson --- include/exec/memop.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/include/exec/memop.h b/include/exec/memop.h index 7262ca3dfd..dfd76a1604 100644 --- a/include/exec/memop.h +++ b/include/exec/memop.h @@ -107,4 +107,14 @@ typedef enum MemOp { MO_SSIZE = MO_SIZE | MO_SIGN, } MemOp; +/* Size in bytes to MemOp. */ +static inline unsigned size_memop(unsigned size) +{ + /* + * FIXME: No-op to aid conversion of memory_region_dispatch_{read|write} + * "unsigned size" operand into a "MemOp op". + */ + return size; +} + #endif From e501824b3f3b3650e7cb8a509064cac01bc27c82 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Sat, 24 Aug 2019 04:36:41 +1000 Subject: [PATCH 03/36] target/mips: Access MemoryRegion with MemOp MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The memory_region_dispatch_{read|write} operand "unsigned size" is being converted into a "MemOp op". Convert interfaces by using no-op size_memop. After all interfaces are converted, size_memop will be implemented and the memory_region_dispatch_{read|write} operand "unsigned size" will be converted into a "MemOp op". As size_memop is a no-op, this patch does not change any behaviour. Signed-off-by: Tony Nguyen Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Reviewed-by: Aleksandar Markovic Message-Id: Signed-off-by: Richard Henderson --- target/mips/op_helper.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/target/mips/op_helper.c b/target/mips/op_helper.c index f88a3ab904..1f0e88364a 100644 --- a/target/mips/op_helper.c +++ b/target/mips/op_helper.c @@ -24,6 +24,7 @@ #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" +#include "exec/memop.h" #include "sysemu/kvm.h" #include "fpu/softfloat.h" @@ -4741,11 +4742,11 @@ void helper_cache(CPUMIPSState *env, target_ulong addr, uint32_t op) if (op == 9) { /* Index Store Tag */ memory_region_dispatch_write(env->itc_tag, index, env->CP0_TagLo, - 8, MEMTXATTRS_UNSPECIFIED); + size_memop(8), MEMTXATTRS_UNSPECIFIED); } else if (op == 5) { /* Index Load Tag */ memory_region_dispatch_read(env->itc_tag, index, &env->CP0_TagLo, - 8, MEMTXATTRS_UNSPECIFIED); + size_memop(8), MEMTXATTRS_UNSPECIFIED); } #endif } From bd8b5319be72785c59c418d03ed80c0825c57203 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Sat, 24 Aug 2019 04:36:42 +1000 Subject: [PATCH 04/36] hw/s390x: Access MemoryRegion with MemOp The memory_region_dispatch_{read|write} operand "unsigned size" is being converted into a "MemOp op". Convert interfaces by using no-op size_memop. After all interfaces are converted, size_memop will be implemented and the memory_region_dispatch_{read|write} operand "unsigned size" will be converted into a "MemOp op". As size_memop is a no-op, this patch does not change any behaviour. Signed-off-by: Tony Nguyen Reviewed-by: Richard Henderson Reviewed-by: Cornelia Huck Message-Id: <2f41da26201fb9b0339c2b7fde34df864f7f9ea8.1566466906.git.tony.nguyen@bt.com> Signed-off-by: Richard Henderson --- hw/s390x/s390-pci-inst.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c index 00235148be..0c958fc391 100644 --- a/hw/s390x/s390-pci-inst.c +++ b/hw/s390x/s390-pci-inst.c @@ -15,6 +15,7 @@ #include "cpu.h" #include "s390-pci-inst.h" #include "s390-pci-bus.h" +#include "exec/memop.h" #include "exec/memory-internal.h" #include "qemu/error-report.h" #include "sysemu/hw_accel.h" @@ -372,7 +373,7 @@ static MemTxResult zpci_read_bar(S390PCIBusDevice *pbdev, uint8_t pcias, mr = pbdev->pdev->io_regions[pcias].memory; mr = s390_get_subregion(mr, offset, len); offset -= mr->addr; - return memory_region_dispatch_read(mr, offset, data, len, + return memory_region_dispatch_read(mr, offset, data, size_memop(len), MEMTXATTRS_UNSPECIFIED); } @@ -471,7 +472,7 @@ static MemTxResult zpci_write_bar(S390PCIBusDevice *pbdev, uint8_t pcias, mr = pbdev->pdev->io_regions[pcias].memory; mr = s390_get_subregion(mr, offset, len); offset -= mr->addr; - return memory_region_dispatch_write(mr, offset, data, len, + return memory_region_dispatch_write(mr, offset, data, size_memop(len), MEMTXATTRS_UNSPECIFIED); } @@ -780,7 +781,8 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr, for (i = 0; i < len / 8; i++) { result = memory_region_dispatch_write(mr, offset + i * 8, - ldq_p(buffer + i * 8), 8, + ldq_p(buffer + i * 8), + size_memop(8), MEMTXATTRS_UNSPECIFIED); if (result != MEMTX_OK) { s390_program_interrupt(env, PGM_OPERAND, 6, ra); From 40f74205da313a36ff4e8b0668be3a8aa104a896 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Sat, 24 Aug 2019 04:36:43 +1000 Subject: [PATCH 05/36] hw/intc/armv7m_nic: Access MemoryRegion with MemOp MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The memory_region_dispatch_{read|write} operand "unsigned size" is being converted into a "MemOp op". Convert interfaces by using no-op size_memop. After all interfaces are converted, size_memop will be implemented and the memory_region_dispatch_{read|write} operand "unsigned size" will be converted into a "MemOp op". As size_memop is a no-op, this patch does not change any behaviour. Signed-off-by: Tony Nguyen Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-Id: <21113bae2f54b45176701e0bf595937031368ae6.1566466906.git.tony.nguyen@bt.com> Signed-off-by: Richard Henderson --- hw/intc/armv7m_nvic.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c index 62ab8b7273..7220940133 100644 --- a/hw/intc/armv7m_nvic.c +++ b/hw/intc/armv7m_nvic.c @@ -21,6 +21,7 @@ #include "hw/qdev-properties.h" #include "target/arm/cpu.h" #include "exec/exec-all.h" +#include "exec/memop.h" #include "qemu/log.h" #include "qemu/module.h" #include "trace.h" @@ -2348,7 +2349,8 @@ static MemTxResult nvic_sysreg_ns_write(void *opaque, hwaddr addr, if (attrs.secure) { /* S accesses to the alias act like NS accesses to the real region */ attrs.secure = 0; - return memory_region_dispatch_write(mr, addr, value, size, attrs); + return memory_region_dispatch_write(mr, addr, value, size_memop(size), + attrs); } else { /* NS attrs are RAZ/WI for privileged, and BusFault for user */ if (attrs.user) { @@ -2367,7 +2369,8 @@ static MemTxResult nvic_sysreg_ns_read(void *opaque, hwaddr addr, if (attrs.secure) { /* S accesses to the alias act like NS accesses to the real region */ attrs.secure = 0; - return memory_region_dispatch_read(mr, addr, data, size, attrs); + return memory_region_dispatch_read(mr, addr, data, size_memop(size), + attrs); } else { /* NS attrs are RAZ/WI for privileged, and BusFault for user */ if (attrs.user) { @@ -2393,7 +2396,8 @@ static MemTxResult nvic_systick_write(void *opaque, hwaddr addr, /* Direct the access to the correct systick */ mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); - return memory_region_dispatch_write(mr, addr, value, size, attrs); + return memory_region_dispatch_write(mr, addr, value, size_memop(size), + attrs); } static MemTxResult nvic_systick_read(void *opaque, hwaddr addr, @@ -2405,7 +2409,7 @@ static MemTxResult nvic_systick_read(void *opaque, hwaddr addr, /* Direct the access to the correct systick */ mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); - return memory_region_dispatch_read(mr, addr, data, size, attrs); + return memory_region_dispatch_read(mr, addr, data, size_memop(size), attrs); } static const MemoryRegionOps nvic_systick_ops = { From 062c08d1fb52e7751f2afd8141de23d1a488d5fa Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Sat, 24 Aug 2019 04:36:44 +1000 Subject: [PATCH 06/36] hw/virtio: Access MemoryRegion with MemOp The memory_region_dispatch_{read|write} operand "unsigned size" is being converted into a "MemOp op". Convert interfaces by using no-op size_memop. After all interfaces are converted, size_memop will be implemented and the memory_region_dispatch_{read|write} operand "unsigned size" will be converted into a "MemOp op". As size_memop is a no-op, this patch does not change any behaviour. Signed-off-by: Tony Nguyen Reviewed-by: Richard Henderson Reviewed-by: Cornelia Huck Message-Id: Signed-off-by: Richard Henderson --- hw/virtio/virtio-pci.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index 8babd92e59..82c5e87a44 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -17,6 +17,7 @@ #include "qemu/osdep.h" +#include "exec/memop.h" #include "standard-headers/linux/virtio_pci.h" #include "hw/virtio/virtio.h" #include "migration/qemu-file-types.h" @@ -552,7 +553,8 @@ void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr, /* As length is under guest control, handle illegal values. */ return; } - memory_region_dispatch_write(mr, addr, val, len, MEMTXATTRS_UNSPECIFIED); + memory_region_dispatch_write(mr, addr, val, size_memop(len), + MEMTXATTRS_UNSPECIFIED); } static void @@ -575,7 +577,8 @@ virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr, /* Make sure caller aligned buf properly */ assert(!(((uintptr_t)buf) & (len - 1))); - memory_region_dispatch_read(mr, addr, &val, len, MEMTXATTRS_UNSPECIFIED); + memory_region_dispatch_read(mr, addr, &val, size_memop(len), + MEMTXATTRS_UNSPECIFIED); switch (len) { case 1: pci_set_byte(buf, val); From 475fbf0a3c006b4aa96f423666e285d295e51e38 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Sat, 24 Aug 2019 04:36:45 +1000 Subject: [PATCH 07/36] hw/vfio: Access MemoryRegion with MemOp The memory_region_dispatch_{read|write} operand "unsigned size" is being converted into a "MemOp op". Convert interfaces by using no-op size_memop. After all interfaces are converted, size_memop will be implemented and the memory_region_dispatch_{read|write} operand "unsigned size" will be converted into a "MemOp op". As size_memop is a no-op, this patch does not change any behaviour. Signed-off-by: Tony Nguyen Reviewed-by: Richard Henderson Reviewed-by: Cornelia Huck Message-Id: Signed-off-by: Richard Henderson --- hw/vfio/pci-quirks.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c index f71aace156..a4e1d2abb5 100644 --- a/hw/vfio/pci-quirks.c +++ b/hw/vfio/pci-quirks.c @@ -11,6 +11,7 @@ */ #include "qemu/osdep.h" +#include "exec/memop.h" #include "qemu/units.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" @@ -1073,7 +1074,7 @@ static void vfio_rtl8168_quirk_address_write(void *opaque, hwaddr addr, /* Write to the proper guest MSI-X table instead */ memory_region_dispatch_write(&vdev->pdev.msix_table_mmio, - offset, val, size, + offset, val, size_memop(size), MEMTXATTRS_UNSPECIFIED); } return; /* Do not write guest MSI-X data to hardware */ @@ -1104,7 +1105,8 @@ static uint64_t vfio_rtl8168_quirk_data_read(void *opaque, if (rtl->enabled && (vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX)) { hwaddr offset = rtl->addr & 0xfff; memory_region_dispatch_read(&vdev->pdev.msix_table_mmio, offset, - &data, size, MEMTXATTRS_UNSPECIFIED); + &data, size_memop(size), + MEMTXATTRS_UNSPECIFIED); trace_vfio_quirk_rtl8168_msix_read(vdev->vbasedev.name, offset, data); } From 3d9e7c3e7bf11962e1100d077e46f93f780b7310 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Sat, 24 Aug 2019 04:36:46 +1000 Subject: [PATCH 08/36] exec: Access MemoryRegion with MemOp MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The memory_region_dispatch_{read|write} operand "unsigned size" is being converted into a "MemOp op". Convert interfaces by using no-op size_memop. After all interfaces are converted, size_memop will be implemented and the memory_region_dispatch_{read|write} operand "unsigned size" will be converted into a "MemOp op". As size_memop is a no-op, this patch does not change any behaviour. Signed-off-by: Tony Nguyen Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-Id: <3b042deef0a60dd49ae2320ece92120ba6027f2b.1566466906.git.tony.nguyen@bt.com> Signed-off-by: Richard Henderson --- exec.c | 6 ++++-- memory_ldst.inc.c | 18 +++++++++--------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/exec.c b/exec.c index 1df966d17a..cc9697fe1b 100644 --- a/exec.c +++ b/exec.c @@ -3364,7 +3364,8 @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, /* XXX: could force current_cpu to NULL to avoid potential bugs */ val = ldn_p(buf, l); - result |= memory_region_dispatch_write(mr, addr1, val, l, attrs); + result |= memory_region_dispatch_write(mr, addr1, val, + size_memop(l), attrs); } else { /* RAM case */ ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); @@ -3425,7 +3426,8 @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, /* I/O case */ release_lock |= prepare_mmio_access(mr); l = memory_access_size(mr, l, addr1); - result |= memory_region_dispatch_read(mr, addr1, &val, l, attrs); + result |= memory_region_dispatch_read(mr, addr1, &val, + size_memop(l), attrs); stn_p(buf, l, val); } else { /* RAM case */ diff --git a/memory_ldst.inc.c b/memory_ldst.inc.c index acf865b900..1e8a2fc3ba 100644 --- a/memory_ldst.inc.c +++ b/memory_ldst.inc.c @@ -38,7 +38,7 @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL, release_lock |= prepare_mmio_access(mr); /* I/O case */ - r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs); + r = memory_region_dispatch_read(mr, addr1, &val, size_memop(4), attrs); #if defined(TARGET_WORDS_BIGENDIAN) if (endian == DEVICE_LITTLE_ENDIAN) { val = bswap32(val); @@ -114,7 +114,7 @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL, release_lock |= prepare_mmio_access(mr); /* I/O case */ - r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs); + r = memory_region_dispatch_read(mr, addr1, &val, size_memop(8), attrs); #if defined(TARGET_WORDS_BIGENDIAN) if (endian == DEVICE_LITTLE_ENDIAN) { val = bswap64(val); @@ -188,7 +188,7 @@ uint32_t glue(address_space_ldub, SUFFIX)(ARG1_DECL, release_lock |= prepare_mmio_access(mr); /* I/O case */ - r = memory_region_dispatch_read(mr, addr1, &val, 1, attrs); + r = memory_region_dispatch_read(mr, addr1, &val, size_memop(1), attrs); } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->ram_block, addr1); @@ -224,7 +224,7 @@ static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL, release_lock |= prepare_mmio_access(mr); /* I/O case */ - r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs); + r = memory_region_dispatch_read(mr, addr1, &val, size_memop(2), attrs); #if defined(TARGET_WORDS_BIGENDIAN) if (endian == DEVICE_LITTLE_ENDIAN) { val = bswap16(val); @@ -300,7 +300,7 @@ void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL, if (l < 4 || !memory_access_is_direct(mr, true)) { release_lock |= prepare_mmio_access(mr); - r = memory_region_dispatch_write(mr, addr1, val, 4, attrs); + r = memory_region_dispatch_write(mr, addr1, val, size_memop(4), attrs); } else { ptr = qemu_map_ram_ptr(mr->ram_block, addr1); stl_p(ptr, val); @@ -346,7 +346,7 @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL, val = bswap32(val); } #endif - r = memory_region_dispatch_write(mr, addr1, val, 4, attrs); + r = memory_region_dispatch_write(mr, addr1, val, size_memop(4), attrs); } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->ram_block, addr1); @@ -408,7 +408,7 @@ void glue(address_space_stb, SUFFIX)(ARG1_DECL, mr = TRANSLATE(addr, &addr1, &l, true, attrs); if (!memory_access_is_direct(mr, true)) { release_lock |= prepare_mmio_access(mr); - r = memory_region_dispatch_write(mr, addr1, val, 1, attrs); + r = memory_region_dispatch_write(mr, addr1, val, size_memop(1), attrs); } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->ram_block, addr1); @@ -451,7 +451,7 @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL, val = bswap16(val); } #endif - r = memory_region_dispatch_write(mr, addr1, val, 2, attrs); + r = memory_region_dispatch_write(mr, addr1, val, size_memop(2), attrs); } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->ram_block, addr1); @@ -524,7 +524,7 @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL, val = bswap64(val); } #endif - r = memory_region_dispatch_write(mr, addr1, val, 8, attrs); + r = memory_region_dispatch_write(mr, addr1, val, size_memop(8), attrs); } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->ram_block, addr1); From 4cbb198eefef41bbca703605c78875fd4fec6ef6 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Sat, 24 Aug 2019 04:36:47 +1000 Subject: [PATCH 09/36] cputlb: Access MemoryRegion with MemOp MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The memory_region_dispatch_{read|write} operand "unsigned size" is being converted into a "MemOp op". Convert interfaces by using no-op size_memop. After all interfaces are converted, size_memop will be implemented and the memory_region_dispatch_{read|write} operand "unsigned size" will be converted into a "MemOp op". As size_memop is a no-op, this patch does not change any behaviour. Signed-off-by: Tony Nguyen Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-Id: Signed-off-by: Richard Henderson --- accel/tcg/cputlb.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 523be4c848..6c83878f73 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -906,8 +906,8 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, qemu_mutex_lock_iothread(); locked = true; } - r = memory_region_dispatch_read(mr, mr_offset, - &val, size, iotlbentry->attrs); + r = memory_region_dispatch_read(mr, mr_offset, &val, size_memop(size), + iotlbentry->attrs); if (r != MEMTX_OK) { hwaddr physaddr = mr_offset + section->offset_within_address_space - @@ -947,8 +947,8 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, qemu_mutex_lock_iothread(); locked = true; } - r = memory_region_dispatch_write(mr, mr_offset, - val, size, iotlbentry->attrs); + r = memory_region_dispatch_write(mr, mr_offset, val, size_memop(size), + iotlbentry->attrs); if (r != MEMTX_OK) { hwaddr physaddr = mr_offset + section->offset_within_address_space - From e67c904668d82ca4416cd91d37d9f5abcceef747 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Sat, 24 Aug 2019 04:36:48 +1000 Subject: [PATCH 10/36] memory: Access MemoryRegion with MemOp Convert memory_region_dispatch_{read|write} operand "unsigned size" into a "MemOp op". Signed-off-by: Tony Nguyen Reviewed-by: Richard Henderson Message-Id: <1dd82df5801866743f838f1d046475115a1d32da.1566466906.git.tony.nguyen@bt.com> Signed-off-by: Richard Henderson --- include/exec/memop.h | 22 +++++++++++++++------- include/exec/memory.h | 9 +++++---- memory.c | 7 +++++-- 3 files changed, 25 insertions(+), 13 deletions(-) diff --git a/include/exec/memop.h b/include/exec/memop.h index dfd76a1604..0a610b75d9 100644 --- a/include/exec/memop.h +++ b/include/exec/memop.h @@ -12,6 +12,8 @@ #ifndef MEMOP_H #define MEMOP_H +#include "qemu/host-utils.h" + typedef enum MemOp { MO_8 = 0, MO_16 = 1, @@ -107,14 +109,20 @@ typedef enum MemOp { MO_SSIZE = MO_SIZE | MO_SIGN, } MemOp; -/* Size in bytes to MemOp. */ -static inline unsigned size_memop(unsigned size) +/* MemOp to size in bytes. */ +static inline unsigned memop_size(MemOp op) { - /* - * FIXME: No-op to aid conversion of memory_region_dispatch_{read|write} - * "unsigned size" operand into a "MemOp op". - */ - return size; + return 1 << (op & MO_SIZE); +} + +/* Size in bytes to MemOp. */ +static inline MemOp size_memop(unsigned size) +{ +#ifdef CONFIG_DEBUG_TCG + /* Power of 2 up to 8. */ + assert((size & (size - 1)) == 0 && size >= 1 && size <= 8); +#endif + return ctz32(size); } #endif diff --git a/include/exec/memory.h b/include/exec/memory.h index fddc2ff48a..192875b080 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -19,6 +19,7 @@ #include "exec/cpu-common.h" #include "exec/hwaddr.h" #include "exec/memattrs.h" +#include "exec/memop.h" #include "exec/ramlist.h" #include "qemu/bswap.h" #include "qemu/queue.h" @@ -1749,13 +1750,13 @@ void mtree_info(bool flatview, bool dispatch_tree, bool owner); * @mr: #MemoryRegion to access * @addr: address within that region * @pval: pointer to uint64_t which the data is written to - * @size: size of the access in bytes + * @op: size, sign, and endianness of the memory operation * @attrs: memory transaction attributes to use for the access */ MemTxResult memory_region_dispatch_read(MemoryRegion *mr, hwaddr addr, uint64_t *pval, - unsigned size, + MemOp op, MemTxAttrs attrs); /** * memory_region_dispatch_write: perform a write directly to the specified @@ -1764,13 +1765,13 @@ MemTxResult memory_region_dispatch_read(MemoryRegion *mr, * @mr: #MemoryRegion to access * @addr: address within that region * @data: data to write - * @size: size of the access in bytes + * @op: size, sign, and endianness of the memory operation * @attrs: memory transaction attributes to use for the access */ MemTxResult memory_region_dispatch_write(MemoryRegion *mr, hwaddr addr, uint64_t data, - unsigned size, + MemOp op, MemTxAttrs attrs); /** diff --git a/memory.c b/memory.c index 7fd93b1d42..3d87908784 100644 --- a/memory.c +++ b/memory.c @@ -1446,9 +1446,10 @@ static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr, MemTxResult memory_region_dispatch_read(MemoryRegion *mr, hwaddr addr, uint64_t *pval, - unsigned size, + MemOp op, MemTxAttrs attrs) { + unsigned size = memop_size(op); MemTxResult r; if (!memory_region_access_valid(mr, addr, size, false, attrs)) { @@ -1490,9 +1491,11 @@ static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr, MemTxResult memory_region_dispatch_write(MemoryRegion *mr, hwaddr addr, uint64_t data, - unsigned size, + MemOp op, MemTxAttrs attrs) { + unsigned size = memop_size(op); + if (!memory_region_access_valid(mr, addr, size, true, attrs)) { unassigned_mem_write(mr, addr, data, size); return MEMTX_DECODE_ERROR; From c1adc2273327fef986efd2aa26414981e3144309 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Sat, 24 Aug 2019 04:36:49 +1000 Subject: [PATCH 11/36] hw/s390x: Hard code size with MO_{8|16|32|64} Temporarily no-op size_memop was introduced to aid the conversion of memory_region_dispatch_{read|write} operand "unsigned size" into "MemOp op". Now size_memop is implemented, again hard coded size but with MO_{8|16|32|64}. This is more expressive and avoids size_memop calls. Signed-off-by: Tony Nguyen Reviewed-by: Richard Henderson Reviewed-by: Cornelia Huck Message-Id: <76dc97273a8eb5e10170ffc16526863df808f487.1566466906.git.tony.nguyen@bt.com> Signed-off-by: Richard Henderson --- hw/s390x/s390-pci-inst.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c index 0c958fc391..0e92a372ca 100644 --- a/hw/s390x/s390-pci-inst.c +++ b/hw/s390x/s390-pci-inst.c @@ -782,8 +782,7 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr, for (i = 0; i < len / 8; i++) { result = memory_region_dispatch_write(mr, offset + i * 8, ldq_p(buffer + i * 8), - size_memop(8), - MEMTXATTRS_UNSPECIFIED); + MO_64, MEMTXATTRS_UNSPECIFIED); if (result != MEMTX_OK) { s390_program_interrupt(env, PGM_OPERAND, 6, ra); return 0; From 4574664677116dedb29b12150137f3888374a857 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Sat, 24 Aug 2019 04:36:50 +1000 Subject: [PATCH 12/36] target/mips: Hard code size with MO_{8|16|32|64} Temporarily no-op size_memop was introduced to aid the conversion of memory_region_dispatch_{read|write} operand "unsigned size" into "MemOp op". Now size_memop is implemented, again hard coded size but with MO_{8|16|32|64}. This is more expressive and avoids size_memop calls. Signed-off-by: Tony Nguyen Reviewed-by: Richard Henderson Reviewed-by: Aleksandar Markovic Message-Id: <99c4459d5c1dc9013820be3dbda9798165c15b99.1566466906.git.tony.nguyen@bt.com> Signed-off-by: Richard Henderson --- target/mips/op_helper.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/target/mips/op_helper.c b/target/mips/op_helper.c index 1f0e88364a..34bcc8d884 100644 --- a/target/mips/op_helper.c +++ b/target/mips/op_helper.c @@ -4742,11 +4742,11 @@ void helper_cache(CPUMIPSState *env, target_ulong addr, uint32_t op) if (op == 9) { /* Index Store Tag */ memory_region_dispatch_write(env->itc_tag, index, env->CP0_TagLo, - size_memop(8), MEMTXATTRS_UNSPECIFIED); + MO_64, MEMTXATTRS_UNSPECIFIED); } else if (op == 5) { /* Index Load Tag */ memory_region_dispatch_read(env->itc_tag, index, &env->CP0_TagLo, - size_memop(8), MEMTXATTRS_UNSPECIFIED); + MO_64, MEMTXATTRS_UNSPECIFIED); } #endif } From 07f0834f264a79d6225202bd35ca37f74afb8df1 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Sat, 24 Aug 2019 04:36:51 +1000 Subject: [PATCH 13/36] exec: Hard code size with MO_{8|16|32|64} Temporarily no-op size_memop was introduced to aid the conversion of memory_region_dispatch_{read|write} operand "unsigned size" into "MemOp op". Now size_memop is implemented, again hard coded size but with MO_{8|16|32|64}. This is more expressive and avoids size_memop calls. Signed-off-by: Tony Nguyen Reviewed-by: Richard Henderson Message-Id: <99f69701cad294db638f84abebc58115e1b9de9a.1566466906.git.tony.nguyen@bt.com> Signed-off-by: Richard Henderson --- memory_ldst.inc.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/memory_ldst.inc.c b/memory_ldst.inc.c index 1e8a2fc3ba..de658c40c4 100644 --- a/memory_ldst.inc.c +++ b/memory_ldst.inc.c @@ -38,7 +38,7 @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL, release_lock |= prepare_mmio_access(mr); /* I/O case */ - r = memory_region_dispatch_read(mr, addr1, &val, size_memop(4), attrs); + r = memory_region_dispatch_read(mr, addr1, &val, MO_32, attrs); #if defined(TARGET_WORDS_BIGENDIAN) if (endian == DEVICE_LITTLE_ENDIAN) { val = bswap32(val); @@ -114,7 +114,7 @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL, release_lock |= prepare_mmio_access(mr); /* I/O case */ - r = memory_region_dispatch_read(mr, addr1, &val, size_memop(8), attrs); + r = memory_region_dispatch_read(mr, addr1, &val, MO_64, attrs); #if defined(TARGET_WORDS_BIGENDIAN) if (endian == DEVICE_LITTLE_ENDIAN) { val = bswap64(val); @@ -188,7 +188,7 @@ uint32_t glue(address_space_ldub, SUFFIX)(ARG1_DECL, release_lock |= prepare_mmio_access(mr); /* I/O case */ - r = memory_region_dispatch_read(mr, addr1, &val, size_memop(1), attrs); + r = memory_region_dispatch_read(mr, addr1, &val, MO_8, attrs); } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->ram_block, addr1); @@ -224,7 +224,7 @@ static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL, release_lock |= prepare_mmio_access(mr); /* I/O case */ - r = memory_region_dispatch_read(mr, addr1, &val, size_memop(2), attrs); + r = memory_region_dispatch_read(mr, addr1, &val, MO_16, attrs); #if defined(TARGET_WORDS_BIGENDIAN) if (endian == DEVICE_LITTLE_ENDIAN) { val = bswap16(val); @@ -300,7 +300,7 @@ void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL, if (l < 4 || !memory_access_is_direct(mr, true)) { release_lock |= prepare_mmio_access(mr); - r = memory_region_dispatch_write(mr, addr1, val, size_memop(4), attrs); + r = memory_region_dispatch_write(mr, addr1, val, MO_32, attrs); } else { ptr = qemu_map_ram_ptr(mr->ram_block, addr1); stl_p(ptr, val); @@ -346,7 +346,7 @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL, val = bswap32(val); } #endif - r = memory_region_dispatch_write(mr, addr1, val, size_memop(4), attrs); + r = memory_region_dispatch_write(mr, addr1, val, MO_32, attrs); } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->ram_block, addr1); @@ -408,7 +408,7 @@ void glue(address_space_stb, SUFFIX)(ARG1_DECL, mr = TRANSLATE(addr, &addr1, &l, true, attrs); if (!memory_access_is_direct(mr, true)) { release_lock |= prepare_mmio_access(mr); - r = memory_region_dispatch_write(mr, addr1, val, size_memop(1), attrs); + r = memory_region_dispatch_write(mr, addr1, val, MO_8, attrs); } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->ram_block, addr1); @@ -451,7 +451,7 @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL, val = bswap16(val); } #endif - r = memory_region_dispatch_write(mr, addr1, val, size_memop(2), attrs); + r = memory_region_dispatch_write(mr, addr1, val, MO_16, attrs); } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->ram_block, addr1); @@ -524,7 +524,7 @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL, val = bswap64(val); } #endif - r = memory_region_dispatch_write(mr, addr1, val, size_memop(8), attrs); + r = memory_region_dispatch_write(mr, addr1, val, MO_64, attrs); } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->ram_block, addr1); From d5d680cacc66ef7e3c02c81dc8f3a34eabce6dfe Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Sat, 24 Aug 2019 04:36:52 +1000 Subject: [PATCH 14/36] memory: Access MemoryRegion with endianness Preparation for collapsing the two byte swaps adjust_endianness and handle_bswap into the former. Call memory_region_dispatch_{read|write} with endianness encoded into the "MemOp op" operand. This patch does not change any behaviour as memory_region_dispatch_{read|write} is yet to handle the endianness. Once it does handle endianness, callers with byte swaps can collapse them into adjust_endianness. Reviewed-by: Richard Henderson Signed-off-by: Tony Nguyen Message-Id: <8066ab3eb037c0388dfadfe53c5118429dd1de3a.1566466906.git.tony.nguyen@bt.com> Signed-off-by: Richard Henderson --- accel/tcg/cputlb.c | 8 ++++++-- exec.c | 13 +++++++++++-- hw/intc/armv7m_nvic.c | 15 ++++++++------- hw/s390x/s390-pci-inst.c | 6 ++++-- hw/vfio/pci-quirks.c | 5 +++-- hw/virtio/virtio-pci.c | 6 ++++-- include/exec/memory.h | 3 +++ memory.c | 18 ++++++++++++++++++ memory_ldst.inc.c | 24 ++++++++++++++++++------ 9 files changed, 75 insertions(+), 23 deletions(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 6c83878f73..f64c6b1c75 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -906,7 +906,8 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, qemu_mutex_lock_iothread(); locked = true; } - r = memory_region_dispatch_read(mr, mr_offset, &val, size_memop(size), + r = memory_region_dispatch_read(mr, mr_offset, &val, + size_memop(size) | MO_TE, iotlbentry->attrs); if (r != MEMTX_OK) { hwaddr physaddr = mr_offset + @@ -947,7 +948,8 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, qemu_mutex_lock_iothread(); locked = true; } - r = memory_region_dispatch_write(mr, mr_offset, val, size_memop(size), + r = memory_region_dispatch_write(mr, mr_offset, val, + size_memop(size) | MO_TE, iotlbentry->attrs); if (r != MEMTX_OK) { hwaddr physaddr = mr_offset + @@ -1305,6 +1307,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, } } + /* TODO: Merge bswap into io_readx -> memory_region_dispatch_read. */ res = io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx, addr, retaddr, access_type, size); return handle_bswap(res, size, big_endian); @@ -1553,6 +1556,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, } } + /* TODO: Merge bswap into io_writex -> memory_region_dispatch_write. */ io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx, handle_bswap(val, size, big_endian), addr, retaddr, size); diff --git a/exec.c b/exec.c index cc9697fe1b..e4652c0e75 100644 --- a/exec.c +++ b/exec.c @@ -3364,8 +3364,13 @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, /* XXX: could force current_cpu to NULL to avoid potential bugs */ val = ldn_p(buf, l); + /* + * TODO: Merge bswap from ldn_p into memory_region_dispatch_write + * by using ldn_he_p and dropping MO_TE to get a host-endian value. + */ result |= memory_region_dispatch_write(mr, addr1, val, - size_memop(l), attrs); + size_memop(l) | MO_TE, + attrs); } else { /* RAM case */ ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); @@ -3426,8 +3431,12 @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, /* I/O case */ release_lock |= prepare_mmio_access(mr); l = memory_access_size(mr, l, addr1); + /* + * TODO: Merge bswap from stn_p into memory_region_dispatch_read + * by using stn_he_p and dropping MO_TE to get a host-endian value. + */ result |= memory_region_dispatch_read(mr, addr1, &val, - size_memop(l), attrs); + size_memop(l) | MO_TE, attrs); stn_p(buf, l, val); } else { /* RAM case */ diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c index 7220940133..8e93e51e81 100644 --- a/hw/intc/armv7m_nvic.c +++ b/hw/intc/armv7m_nvic.c @@ -2349,8 +2349,8 @@ static MemTxResult nvic_sysreg_ns_write(void *opaque, hwaddr addr, if (attrs.secure) { /* S accesses to the alias act like NS accesses to the real region */ attrs.secure = 0; - return memory_region_dispatch_write(mr, addr, value, size_memop(size), - attrs); + return memory_region_dispatch_write(mr, addr, value, + size_memop(size) | MO_TE, attrs); } else { /* NS attrs are RAZ/WI for privileged, and BusFault for user */ if (attrs.user) { @@ -2369,8 +2369,8 @@ static MemTxResult nvic_sysreg_ns_read(void *opaque, hwaddr addr, if (attrs.secure) { /* S accesses to the alias act like NS accesses to the real region */ attrs.secure = 0; - return memory_region_dispatch_read(mr, addr, data, size_memop(size), - attrs); + return memory_region_dispatch_read(mr, addr, data, + size_memop(size) | MO_TE, attrs); } else { /* NS attrs are RAZ/WI for privileged, and BusFault for user */ if (attrs.user) { @@ -2396,8 +2396,8 @@ static MemTxResult nvic_systick_write(void *opaque, hwaddr addr, /* Direct the access to the correct systick */ mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); - return memory_region_dispatch_write(mr, addr, value, size_memop(size), - attrs); + return memory_region_dispatch_write(mr, addr, value, + size_memop(size) | MO_TE, attrs); } static MemTxResult nvic_systick_read(void *opaque, hwaddr addr, @@ -2409,7 +2409,8 @@ static MemTxResult nvic_systick_read(void *opaque, hwaddr addr, /* Direct the access to the correct systick */ mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); - return memory_region_dispatch_read(mr, addr, data, size_memop(size), attrs); + return memory_region_dispatch_read(mr, addr, data, size_memop(size) | MO_TE, + attrs); } static const MemoryRegionOps nvic_systick_ops = { diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c index 0e92a372ca..4b3bd4a804 100644 --- a/hw/s390x/s390-pci-inst.c +++ b/hw/s390x/s390-pci-inst.c @@ -373,7 +373,8 @@ static MemTxResult zpci_read_bar(S390PCIBusDevice *pbdev, uint8_t pcias, mr = pbdev->pdev->io_regions[pcias].memory; mr = s390_get_subregion(mr, offset, len); offset -= mr->addr; - return memory_region_dispatch_read(mr, offset, data, size_memop(len), + return memory_region_dispatch_read(mr, offset, data, + size_memop(len) | MO_BE, MEMTXATTRS_UNSPECIFIED); } @@ -472,7 +473,8 @@ static MemTxResult zpci_write_bar(S390PCIBusDevice *pbdev, uint8_t pcias, mr = pbdev->pdev->io_regions[pcias].memory; mr = s390_get_subregion(mr, offset, len); offset -= mr->addr; - return memory_region_dispatch_write(mr, offset, data, size_memop(len), + return memory_region_dispatch_write(mr, offset, data, + size_memop(len) | MO_BE, MEMTXATTRS_UNSPECIFIED); } diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c index a4e1d2abb5..136f3a9ad6 100644 --- a/hw/vfio/pci-quirks.c +++ b/hw/vfio/pci-quirks.c @@ -1074,7 +1074,8 @@ static void vfio_rtl8168_quirk_address_write(void *opaque, hwaddr addr, /* Write to the proper guest MSI-X table instead */ memory_region_dispatch_write(&vdev->pdev.msix_table_mmio, - offset, val, size_memop(size), + offset, val, + size_memop(size) | MO_LE, MEMTXATTRS_UNSPECIFIED); } return; /* Do not write guest MSI-X data to hardware */ @@ -1105,7 +1106,7 @@ static uint64_t vfio_rtl8168_quirk_data_read(void *opaque, if (rtl->enabled && (vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX)) { hwaddr offset = rtl->addr & 0xfff; memory_region_dispatch_read(&vdev->pdev.msix_table_mmio, offset, - &data, size_memop(size), + &data, size_memop(size) | MO_LE, MEMTXATTRS_UNSPECIFIED); trace_vfio_quirk_rtl8168_msix_read(vdev->vbasedev.name, offset, data); } diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index 82c5e87a44..d89a85bb33 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -553,7 +553,8 @@ void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr, /* As length is under guest control, handle illegal values. */ return; } - memory_region_dispatch_write(mr, addr, val, size_memop(len), + /* TODO: Merge bswap from cpu_to_leXX into memory_region_dispatch_write. */ + memory_region_dispatch_write(mr, addr, val, size_memop(len) | MO_LE, MEMTXATTRS_UNSPECIFIED); } @@ -577,7 +578,8 @@ virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr, /* Make sure caller aligned buf properly */ assert(!(((uintptr_t)buf) & (len - 1))); - memory_region_dispatch_read(mr, addr, &val, size_memop(len), + /* TODO: Merge bswap from leXX_to_cpu into memory_region_dispatch_read. */ + memory_region_dispatch_read(mr, addr, &val, size_memop(len) | MO_LE, MEMTXATTRS_UNSPECIFIED); switch (len) { case 1: diff --git a/include/exec/memory.h b/include/exec/memory.h index 192875b080..c4c86a6ff4 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -2211,6 +2211,9 @@ address_space_write_cached(MemoryRegionCache *cache, hwaddr addr, } } +/* enum device_endian to MemOp. */ +MemOp devend_memop(enum device_endian end); + #endif #endif diff --git a/memory.c b/memory.c index 3d87908784..d72143a18f 100644 --- a/memory.c +++ b/memory.c @@ -3285,3 +3285,21 @@ static void memory_register_types(void) } type_init(memory_register_types) + +MemOp devend_memop(enum device_endian end) +{ + static MemOp conv[] = { + [DEVICE_LITTLE_ENDIAN] = MO_LE, + [DEVICE_BIG_ENDIAN] = MO_BE, + [DEVICE_NATIVE_ENDIAN] = MO_TE, + [DEVICE_HOST_ENDIAN] = 0, + }; + switch (end) { + case DEVICE_LITTLE_ENDIAN: + case DEVICE_BIG_ENDIAN: + case DEVICE_NATIVE_ENDIAN: + return conv[end]; + default: + g_assert_not_reached(); + } +} diff --git a/memory_ldst.inc.c b/memory_ldst.inc.c index de658c40c4..809a7e8389 100644 --- a/memory_ldst.inc.c +++ b/memory_ldst.inc.c @@ -38,7 +38,9 @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL, release_lock |= prepare_mmio_access(mr); /* I/O case */ - r = memory_region_dispatch_read(mr, addr1, &val, MO_32, attrs); + /* TODO: Merge bswap32 into memory_region_dispatch_read. */ + r = memory_region_dispatch_read(mr, addr1, &val, + MO_32 | devend_memop(endian), attrs); #if defined(TARGET_WORDS_BIGENDIAN) if (endian == DEVICE_LITTLE_ENDIAN) { val = bswap32(val); @@ -114,7 +116,9 @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL, release_lock |= prepare_mmio_access(mr); /* I/O case */ - r = memory_region_dispatch_read(mr, addr1, &val, MO_64, attrs); + /* TODO: Merge bswap64 into memory_region_dispatch_read. */ + r = memory_region_dispatch_read(mr, addr1, &val, + MO_64 | devend_memop(endian), attrs); #if defined(TARGET_WORDS_BIGENDIAN) if (endian == DEVICE_LITTLE_ENDIAN) { val = bswap64(val); @@ -224,7 +228,9 @@ static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL, release_lock |= prepare_mmio_access(mr); /* I/O case */ - r = memory_region_dispatch_read(mr, addr1, &val, MO_16, attrs); + /* TODO: Merge bswap16 into memory_region_dispatch_read. */ + r = memory_region_dispatch_read(mr, addr1, &val, + MO_16 | devend_memop(endian), attrs); #if defined(TARGET_WORDS_BIGENDIAN) if (endian == DEVICE_LITTLE_ENDIAN) { val = bswap16(val); @@ -346,7 +352,9 @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL, val = bswap32(val); } #endif - r = memory_region_dispatch_write(mr, addr1, val, MO_32, attrs); + /* TODO: Merge bswap32 into memory_region_dispatch_write. */ + r = memory_region_dispatch_write(mr, addr1, val, + MO_32 | devend_memop(endian), attrs); } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->ram_block, addr1); @@ -451,7 +459,9 @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL, val = bswap16(val); } #endif - r = memory_region_dispatch_write(mr, addr1, val, MO_16, attrs); + /* TODO: Merge bswap16 into memory_region_dispatch_write. */ + r = memory_region_dispatch_write(mr, addr1, val, + MO_16 | devend_memop(endian), attrs); } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->ram_block, addr1); @@ -524,7 +534,9 @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL, val = bswap64(val); } #endif - r = memory_region_dispatch_write(mr, addr1, val, MO_64, attrs); + /* TODO: Merge bswap64 into memory_region_dispatch_write. */ + r = memory_region_dispatch_write(mr, addr1, val, + MO_64 | devend_memop(endian), attrs); } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->ram_block, addr1); From be5c4787e9a6eed12fd765d9e890f7cc6cd63220 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Sat, 24 Aug 2019 04:36:53 +1000 Subject: [PATCH 15/36] cputlb: Replace size and endian operands for MemOp Preparation for collapsing the two byte swaps adjust_endianness and handle_bswap into the former. Signed-off-by: Tony Nguyen Reviewed-by: Richard Henderson Message-Id: <755b7104410956b743e1f1e9c34ab87db113360f.1566466906.git.tony.nguyen@bt.com> Signed-off-by: Richard Henderson --- accel/tcg/cputlb.c | 170 +++++++++++++++++++++---------------------- include/exec/memop.h | 6 ++ 2 files changed, 87 insertions(+), 89 deletions(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index f64c6b1c75..5c12eef292 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -881,7 +881,7 @@ static void tlb_fill(CPUState *cpu, target_ulong addr, int size, static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, int mmu_idx, target_ulong addr, uintptr_t retaddr, - MMUAccessType access_type, int size) + MMUAccessType access_type, MemOp op) { CPUState *cpu = env_cpu(env); hwaddr mr_offset; @@ -906,15 +906,13 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, qemu_mutex_lock_iothread(); locked = true; } - r = memory_region_dispatch_read(mr, mr_offset, &val, - size_memop(size) | MO_TE, - iotlbentry->attrs); + r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs); if (r != MEMTX_OK) { hwaddr physaddr = mr_offset + section->offset_within_address_space - section->offset_within_region; - cpu_transaction_failed(cpu, physaddr, addr, size, access_type, + cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, mmu_idx, iotlbentry->attrs, r, retaddr); } if (locked) { @@ -926,7 +924,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, int mmu_idx, uint64_t val, target_ulong addr, - uintptr_t retaddr, int size) + uintptr_t retaddr, MemOp op) { CPUState *cpu = env_cpu(env); hwaddr mr_offset; @@ -948,16 +946,15 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, qemu_mutex_lock_iothread(); locked = true; } - r = memory_region_dispatch_write(mr, mr_offset, val, - size_memop(size) | MO_TE, - iotlbentry->attrs); + r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs); if (r != MEMTX_OK) { hwaddr physaddr = mr_offset + section->offset_within_address_space - section->offset_within_region; - cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE, - mmu_idx, iotlbentry->attrs, r, retaddr); + cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), + MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r, + retaddr); } if (locked) { qemu_mutex_unlock_iothread(); @@ -1218,14 +1215,15 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, * access type. */ -static inline uint64_t handle_bswap(uint64_t val, int size, bool big_endian) +static inline uint64_t handle_bswap(uint64_t val, MemOp op) { - if ((big_endian && NEED_BE_BSWAP) || (!big_endian && NEED_LE_BSWAP)) { - switch (size) { - case 1: return val; - case 2: return bswap16(val); - case 4: return bswap32(val); - case 8: return bswap64(val); + if ((memop_big_endian(op) && NEED_BE_BSWAP) || + (!memop_big_endian(op) && NEED_LE_BSWAP)) { + switch (op & MO_SIZE) { + case MO_8: return val; + case MO_16: return bswap16(val); + case MO_32: return bswap32(val); + case MO_64: return bswap64(val); default: g_assert_not_reached(); } @@ -1248,7 +1246,7 @@ typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, static inline uint64_t __attribute__((always_inline)) load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, - uintptr_t retaddr, size_t size, bool big_endian, bool code_read, + uintptr_t retaddr, MemOp op, bool code_read, FullLoadHelper *full_load) { uintptr_t mmu_idx = get_mmuidx(oi); @@ -1262,6 +1260,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, unsigned a_bits = get_alignment_bits(get_memop(oi)); void *haddr; uint64_t res; + size_t size = memop_size(op); /* Handle CPU specific unaligned behaviour */ if (addr & ((1 << a_bits) - 1)) { @@ -1309,8 +1308,8 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, /* TODO: Merge bswap into io_readx -> memory_region_dispatch_read. */ res = io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index], - mmu_idx, addr, retaddr, access_type, size); - return handle_bswap(res, size, big_endian); + mmu_idx, addr, retaddr, access_type, op); + return handle_bswap(res, op); } /* Handle slow unaligned access (it spans two pages or IO). */ @@ -1327,7 +1326,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, r2 = full_load(env, addr2, oi, retaddr); shift = (addr & (size - 1)) * 8; - if (big_endian) { + if (memop_big_endian(op)) { /* Big-endian combine. */ res = (r1 << shift) | (r2 >> ((size * 8) - shift)); } else { @@ -1339,30 +1338,27 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, do_aligned_access: haddr = (void *)((uintptr_t)addr + entry->addend); - switch (size) { - case 1: + switch (op) { + case MO_UB: res = ldub_p(haddr); break; - case 2: - if (big_endian) { - res = lduw_be_p(haddr); - } else { - res = lduw_le_p(haddr); - } + case MO_BEUW: + res = lduw_be_p(haddr); break; - case 4: - if (big_endian) { - res = (uint32_t)ldl_be_p(haddr); - } else { - res = (uint32_t)ldl_le_p(haddr); - } + case MO_LEUW: + res = lduw_le_p(haddr); break; - case 8: - if (big_endian) { - res = ldq_be_p(haddr); - } else { - res = ldq_le_p(haddr); - } + case MO_BEUL: + res = (uint32_t)ldl_be_p(haddr); + break; + case MO_LEUL: + res = (uint32_t)ldl_le_p(haddr); + break; + case MO_BEQ: + res = ldq_be_p(haddr); + break; + case MO_LEQ: + res = ldq_le_p(haddr); break; default: g_assert_not_reached(); @@ -1384,8 +1380,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { - return load_helper(env, addr, oi, retaddr, 1, false, false, - full_ldub_mmu); + return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); } tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, @@ -1397,7 +1392,7 @@ tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { - return load_helper(env, addr, oi, retaddr, 2, false, false, + return load_helper(env, addr, oi, retaddr, MO_LEUW, false, full_le_lduw_mmu); } @@ -1410,7 +1405,7 @@ tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { - return load_helper(env, addr, oi, retaddr, 2, true, false, + return load_helper(env, addr, oi, retaddr, MO_BEUW, false, full_be_lduw_mmu); } @@ -1423,7 +1418,7 @@ tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { - return load_helper(env, addr, oi, retaddr, 4, false, false, + return load_helper(env, addr, oi, retaddr, MO_LEUL, false, full_le_ldul_mmu); } @@ -1436,7 +1431,7 @@ tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { - return load_helper(env, addr, oi, retaddr, 4, true, false, + return load_helper(env, addr, oi, retaddr, MO_BEUL, false, full_be_ldul_mmu); } @@ -1449,14 +1444,14 @@ tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { - return load_helper(env, addr, oi, retaddr, 8, false, false, + return load_helper(env, addr, oi, retaddr, MO_LEQ, false, helper_le_ldq_mmu); } uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { - return load_helper(env, addr, oi, retaddr, 8, true, false, + return load_helper(env, addr, oi, retaddr, MO_BEQ, false, helper_be_ldq_mmu); } @@ -1502,7 +1497,7 @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, static inline void __attribute__((always_inline)) store_helper(CPUArchState *env, target_ulong addr, uint64_t val, - TCGMemOpIdx oi, uintptr_t retaddr, size_t size, bool big_endian) + TCGMemOpIdx oi, uintptr_t retaddr, MemOp op) { uintptr_t mmu_idx = get_mmuidx(oi); uintptr_t index = tlb_index(env, mmu_idx, addr); @@ -1511,6 +1506,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); unsigned a_bits = get_alignment_bits(get_memop(oi)); void *haddr; + size_t size = memop_size(op); /* Handle CPU specific unaligned behaviour */ if (addr & ((1 << a_bits) - 1)) { @@ -1558,8 +1554,8 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, /* TODO: Merge bswap into io_writex -> memory_region_dispatch_write. */ io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx, - handle_bswap(val, size, big_endian), - addr, retaddr, size); + handle_bswap(val, op), + addr, retaddr, op); return; } @@ -1595,7 +1591,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, */ for (i = 0; i < size; ++i) { uint8_t val8; - if (big_endian) { + if (memop_big_endian(op)) { /* Big-endian extract. */ val8 = val >> (((size - 1) * 8) - (i * 8)); } else { @@ -1609,30 +1605,27 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, do_aligned_access: haddr = (void *)((uintptr_t)addr + entry->addend); - switch (size) { - case 1: + switch (op) { + case MO_UB: stb_p(haddr, val); break; - case 2: - if (big_endian) { - stw_be_p(haddr, val); - } else { - stw_le_p(haddr, val); - } + case MO_BEUW: + stw_be_p(haddr, val); break; - case 4: - if (big_endian) { - stl_be_p(haddr, val); - } else { - stl_le_p(haddr, val); - } + case MO_LEUW: + stw_le_p(haddr, val); break; - case 8: - if (big_endian) { - stq_be_p(haddr, val); - } else { - stq_le_p(haddr, val); - } + case MO_BEUL: + stl_be_p(haddr, val); + break; + case MO_LEUL: + stl_le_p(haddr, val); + break; + case MO_BEQ: + stq_be_p(haddr, val); + break; + case MO_LEQ: + stq_le_p(haddr, val); break; default: g_assert_not_reached(); @@ -1643,43 +1636,43 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, TCGMemOpIdx oi, uintptr_t retaddr) { - store_helper(env, addr, val, oi, retaddr, 1, false); + store_helper(env, addr, val, oi, retaddr, MO_UB); } void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, TCGMemOpIdx oi, uintptr_t retaddr) { - store_helper(env, addr, val, oi, retaddr, 2, false); + store_helper(env, addr, val, oi, retaddr, MO_LEUW); } void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, TCGMemOpIdx oi, uintptr_t retaddr) { - store_helper(env, addr, val, oi, retaddr, 2, true); + store_helper(env, addr, val, oi, retaddr, MO_BEUW); } void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, TCGMemOpIdx oi, uintptr_t retaddr) { - store_helper(env, addr, val, oi, retaddr, 4, false); + store_helper(env, addr, val, oi, retaddr, MO_LEUL); } void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, TCGMemOpIdx oi, uintptr_t retaddr) { - store_helper(env, addr, val, oi, retaddr, 4, true); + store_helper(env, addr, val, oi, retaddr, MO_BEUL); } void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, TCGMemOpIdx oi, uintptr_t retaddr) { - store_helper(env, addr, val, oi, retaddr, 8, false); + store_helper(env, addr, val, oi, retaddr, MO_LEQ); } void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, TCGMemOpIdx oi, uintptr_t retaddr) { - store_helper(env, addr, val, oi, retaddr, 8, true); + store_helper(env, addr, val, oi, retaddr, MO_BEQ); } /* First set of helpers allows passing in of OI and RETADDR. This makes @@ -1744,8 +1737,7 @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { - return load_helper(env, addr, oi, retaddr, 1, false, true, - full_ldub_cmmu); + return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_cmmu); } uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr, @@ -1757,7 +1749,7 @@ uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr, static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { - return load_helper(env, addr, oi, retaddr, 2, false, true, + return load_helper(env, addr, oi, retaddr, MO_LEUW, true, full_le_lduw_cmmu); } @@ -1770,7 +1762,7 @@ uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr, static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { - return load_helper(env, addr, oi, retaddr, 2, true, true, + return load_helper(env, addr, oi, retaddr, MO_BEUW, true, full_be_lduw_cmmu); } @@ -1783,7 +1775,7 @@ uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr, static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { - return load_helper(env, addr, oi, retaddr, 4, false, true, + return load_helper(env, addr, oi, retaddr, MO_LEUL, true, full_le_ldul_cmmu); } @@ -1796,7 +1788,7 @@ uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr, static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { - return load_helper(env, addr, oi, retaddr, 4, true, true, + return load_helper(env, addr, oi, retaddr, MO_BEUL, true, full_be_ldul_cmmu); } @@ -1809,13 +1801,13 @@ uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr, uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { - return load_helper(env, addr, oi, retaddr, 8, false, true, + return load_helper(env, addr, oi, retaddr, MO_LEQ, true, helper_le_ldq_cmmu); } uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { - return load_helper(env, addr, oi, retaddr, 8, true, true, + return load_helper(env, addr, oi, retaddr, MO_BEQ, true, helper_be_ldq_cmmu); } diff --git a/include/exec/memop.h b/include/exec/memop.h index 0a610b75d9..529d07b02d 100644 --- a/include/exec/memop.h +++ b/include/exec/memop.h @@ -125,4 +125,10 @@ static inline MemOp size_memop(unsigned size) return ctz32(size); } +/* Big endianness from MemOp. */ +static inline bool memop_big_endian(MemOp op) +{ + return (op & MO_BSWAP) == MO_BE; +} + #endif From 9bf825bf3df4ebae3af51566c8088e3f1249a910 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Sat, 24 Aug 2019 04:36:54 +1000 Subject: [PATCH 16/36] memory: Single byte swap along the I/O path Now that MemOp has been pushed down into the memory API, and callers are encoding endianness, we can collapse byte swaps along the I/O path into the accelerator and target independent adjust_endianness. Collapsing byte swaps along the I/O path enables additional endian inversion logic, e.g. SPARC64 Invert Endian TTE bit, with redundant byte swaps cancelling out. Reviewed-by: Richard Henderson Suggested-by: Richard Henderson Signed-off-by: Tony Nguyen Message-Id: <911ff31af11922a9afba9b7ce128af8b8b80f316.1566466906.git.tony.nguyen@bt.com> Signed-off-by: Richard Henderson --- accel/tcg/cputlb.c | 42 ++-------------------------- exec.c | 17 +++--------- hw/virtio/virtio-pci.c | 10 +++---- memory.c | 33 ++++++++-------------- memory_ldst.inc.c | 63 ------------------------------------------ 5 files changed, 23 insertions(+), 142 deletions(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 5c12eef292..3c9e634d99 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1200,38 +1200,6 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, cpu_loop_exit_atomic(env_cpu(env), retaddr); } -#ifdef TARGET_WORDS_BIGENDIAN -#define NEED_BE_BSWAP 0 -#define NEED_LE_BSWAP 1 -#else -#define NEED_BE_BSWAP 1 -#define NEED_LE_BSWAP 0 -#endif - -/* - * Byte Swap Helper - * - * This should all dead code away depending on the build host and - * access type. - */ - -static inline uint64_t handle_bswap(uint64_t val, MemOp op) -{ - if ((memop_big_endian(op) && NEED_BE_BSWAP) || - (!memop_big_endian(op) && NEED_LE_BSWAP)) { - switch (op & MO_SIZE) { - case MO_8: return val; - case MO_16: return bswap16(val); - case MO_32: return bswap32(val); - case MO_64: return bswap64(val); - default: - g_assert_not_reached(); - } - } else { - return val; - } -} - /* * Load Helpers * @@ -1306,10 +1274,8 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, } } - /* TODO: Merge bswap into io_readx -> memory_region_dispatch_read. */ - res = io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index], - mmu_idx, addr, retaddr, access_type, op); - return handle_bswap(res, op); + return io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index], + mmu_idx, addr, retaddr, access_type, op); } /* Handle slow unaligned access (it spans two pages or IO). */ @@ -1552,10 +1518,8 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, } } - /* TODO: Merge bswap into io_writex -> memory_region_dispatch_write. */ io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx, - handle_bswap(val, op), - addr, retaddr, op); + val, addr, retaddr, op); return; } diff --git a/exec.c b/exec.c index e4652c0e75..53a15b7ad7 100644 --- a/exec.c +++ b/exec.c @@ -3363,14 +3363,9 @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, l = memory_access_size(mr, l, addr1); /* XXX: could force current_cpu to NULL to avoid potential bugs */ - val = ldn_p(buf, l); - /* - * TODO: Merge bswap from ldn_p into memory_region_dispatch_write - * by using ldn_he_p and dropping MO_TE to get a host-endian value. - */ + val = ldn_he_p(buf, l); result |= memory_region_dispatch_write(mr, addr1, val, - size_memop(l) | MO_TE, - attrs); + size_memop(l), attrs); } else { /* RAM case */ ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); @@ -3431,13 +3426,9 @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, /* I/O case */ release_lock |= prepare_mmio_access(mr); l = memory_access_size(mr, l, addr1); - /* - * TODO: Merge bswap from stn_p into memory_region_dispatch_read - * by using stn_he_p and dropping MO_TE to get a host-endian value. - */ result |= memory_region_dispatch_read(mr, addr1, &val, - size_memop(l) | MO_TE, attrs); - stn_p(buf, l, val); + size_memop(l), attrs); + stn_he_p(buf, l, val); } else { /* RAM case */ ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index d89a85bb33..ffb03728f9 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -544,16 +544,15 @@ void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr, val = pci_get_byte(buf); break; case 2: - val = cpu_to_le16(pci_get_word(buf)); + val = pci_get_word(buf); break; case 4: - val = cpu_to_le32(pci_get_long(buf)); + val = pci_get_long(buf); break; default: /* As length is under guest control, handle illegal values. */ return; } - /* TODO: Merge bswap from cpu_to_leXX into memory_region_dispatch_write. */ memory_region_dispatch_write(mr, addr, val, size_memop(len) | MO_LE, MEMTXATTRS_UNSPECIFIED); } @@ -578,7 +577,6 @@ virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr, /* Make sure caller aligned buf properly */ assert(!(((uintptr_t)buf) & (len - 1))); - /* TODO: Merge bswap from leXX_to_cpu into memory_region_dispatch_read. */ memory_region_dispatch_read(mr, addr, &val, size_memop(len) | MO_LE, MEMTXATTRS_UNSPECIFIED); switch (len) { @@ -586,10 +584,10 @@ virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr, pci_set_byte(buf, val); break; case 2: - pci_set_word(buf, le16_to_cpu(val)); + pci_set_word(buf, val); break; case 4: - pci_set_long(buf, le32_to_cpu(val)); + pci_set_long(buf, val); break; default: /* As length is under guest control, handle illegal values. */ diff --git a/memory.c b/memory.c index d72143a18f..7cdf4fe556 100644 --- a/memory.c +++ b/memory.c @@ -351,32 +351,23 @@ static bool memory_region_big_endian(MemoryRegion *mr) #endif } -static bool memory_region_wrong_endianness(MemoryRegion *mr) +static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op) { -#ifdef TARGET_WORDS_BIGENDIAN - return mr->ops->endianness == DEVICE_LITTLE_ENDIAN; -#else - return mr->ops->endianness == DEVICE_BIG_ENDIAN; -#endif -} - -static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size) -{ - if (memory_region_wrong_endianness(mr)) { - switch (size) { - case 1: + if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) { + switch (op & MO_SIZE) { + case MO_8: break; - case 2: + case MO_16: *data = bswap16(*data); break; - case 4: + case MO_32: *data = bswap32(*data); break; - case 8: + case MO_64: *data = bswap64(*data); break; default: - abort(); + g_assert_not_reached(); } } } @@ -1458,7 +1449,7 @@ MemTxResult memory_region_dispatch_read(MemoryRegion *mr, } r = memory_region_dispatch_read1(mr, addr, pval, size, attrs); - adjust_endianness(mr, pval, size); + adjust_endianness(mr, pval, op); return r; } @@ -1501,7 +1492,7 @@ MemTxResult memory_region_dispatch_write(MemoryRegion *mr, return MEMTX_DECODE_ERROR; } - adjust_endianness(mr, &data, size); + adjust_endianness(mr, &data, op); if ((!kvm_eventfds_enabled()) && memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) { @@ -2350,7 +2341,7 @@ void memory_region_add_eventfd(MemoryRegion *mr, } if (size) { - adjust_endianness(mr, &mrfd.data, size); + adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE); } memory_region_transaction_begin(); for (i = 0; i < mr->ioeventfd_nb; ++i) { @@ -2385,7 +2376,7 @@ void memory_region_del_eventfd(MemoryRegion *mr, unsigned i; if (size) { - adjust_endianness(mr, &mrfd.data, size); + adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE); } memory_region_transaction_begin(); for (i = 0; i < mr->ioeventfd_nb; ++i) { diff --git a/memory_ldst.inc.c b/memory_ldst.inc.c index 809a7e8389..c54aee4a95 100644 --- a/memory_ldst.inc.c +++ b/memory_ldst.inc.c @@ -38,18 +38,8 @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL, release_lock |= prepare_mmio_access(mr); /* I/O case */ - /* TODO: Merge bswap32 into memory_region_dispatch_read. */ r = memory_region_dispatch_read(mr, addr1, &val, MO_32 | devend_memop(endian), attrs); -#if defined(TARGET_WORDS_BIGENDIAN) - if (endian == DEVICE_LITTLE_ENDIAN) { - val = bswap32(val); - } -#else - if (endian == DEVICE_BIG_ENDIAN) { - val = bswap32(val); - } -#endif } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->ram_block, addr1); @@ -116,18 +106,8 @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL, release_lock |= prepare_mmio_access(mr); /* I/O case */ - /* TODO: Merge bswap64 into memory_region_dispatch_read. */ r = memory_region_dispatch_read(mr, addr1, &val, MO_64 | devend_memop(endian), attrs); -#if defined(TARGET_WORDS_BIGENDIAN) - if (endian == DEVICE_LITTLE_ENDIAN) { - val = bswap64(val); - } -#else - if (endian == DEVICE_BIG_ENDIAN) { - val = bswap64(val); - } -#endif } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->ram_block, addr1); @@ -228,18 +208,8 @@ static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL, release_lock |= prepare_mmio_access(mr); /* I/O case */ - /* TODO: Merge bswap16 into memory_region_dispatch_read. */ r = memory_region_dispatch_read(mr, addr1, &val, MO_16 | devend_memop(endian), attrs); -#if defined(TARGET_WORDS_BIGENDIAN) - if (endian == DEVICE_LITTLE_ENDIAN) { - val = bswap16(val); - } -#else - if (endian == DEVICE_BIG_ENDIAN) { - val = bswap16(val); - } -#endif } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->ram_block, addr1); @@ -342,17 +312,6 @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL, mr = TRANSLATE(addr, &addr1, &l, true, attrs); if (l < 4 || !memory_access_is_direct(mr, true)) { release_lock |= prepare_mmio_access(mr); - -#if defined(TARGET_WORDS_BIGENDIAN) - if (endian == DEVICE_LITTLE_ENDIAN) { - val = bswap32(val); - } -#else - if (endian == DEVICE_BIG_ENDIAN) { - val = bswap32(val); - } -#endif - /* TODO: Merge bswap32 into memory_region_dispatch_write. */ r = memory_region_dispatch_write(mr, addr1, val, MO_32 | devend_memop(endian), attrs); } else { @@ -449,17 +408,6 @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL, mr = TRANSLATE(addr, &addr1, &l, true, attrs); if (l < 2 || !memory_access_is_direct(mr, true)) { release_lock |= prepare_mmio_access(mr); - -#if defined(TARGET_WORDS_BIGENDIAN) - if (endian == DEVICE_LITTLE_ENDIAN) { - val = bswap16(val); - } -#else - if (endian == DEVICE_BIG_ENDIAN) { - val = bswap16(val); - } -#endif - /* TODO: Merge bswap16 into memory_region_dispatch_write. */ r = memory_region_dispatch_write(mr, addr1, val, MO_16 | devend_memop(endian), attrs); } else { @@ -524,17 +472,6 @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL, mr = TRANSLATE(addr, &addr1, &l, true, attrs); if (l < 8 || !memory_access_is_direct(mr, true)) { release_lock |= prepare_mmio_access(mr); - -#if defined(TARGET_WORDS_BIGENDIAN) - if (endian == DEVICE_LITTLE_ENDIAN) { - val = bswap64(val); - } -#else - if (endian == DEVICE_BIG_ENDIAN) { - val = bswap64(val); - } -#endif - /* TODO: Merge bswap64 into memory_region_dispatch_write. */ r = memory_region_dispatch_write(mr, addr1, val, MO_64 | devend_memop(endian), attrs); } else { From a26fc6f5152b47f1d7ed928f9c9d462d01ff1624 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Sat, 24 Aug 2019 04:36:56 +1000 Subject: [PATCH 17/36] cputlb: Byte swap memory transaction attribute Notice new attribute, byte swap, and force the transaction through the memory slow path. Required by architectures that can invert endianness of memory transaction, e.g. SPARC64 has the Invert Endian TTE bit. Suggested-by: Richard Henderson Signed-off-by: Tony Nguyen Reviewed-by: Richard Henderson Message-Id: <2a10a1f1c00a894af1212c8f68ef09c2966023c1.1566466906.git.tony.nguyen@bt.com> Signed-off-by: Richard Henderson --- accel/tcg/cputlb.c | 12 ++++++++++++ include/exec/memattrs.h | 2 ++ 2 files changed, 14 insertions(+) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 3c9e634d99..d9787cc893 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -738,6 +738,10 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, */ address |= TLB_RECHECK; } + if (attrs.byte_swap) { + /* Force the access through the I/O slow path. */ + address |= TLB_MMIO; + } if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) { /* IO memory case */ @@ -891,6 +895,10 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, bool locked = false; MemTxResult r; + if (iotlbentry->attrs.byte_swap) { + op ^= MO_BSWAP; + } + section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); mr = section->mr; mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; @@ -933,6 +941,10 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, bool locked = false; MemTxResult r; + if (iotlbentry->attrs.byte_swap) { + op ^= MO_BSWAP; + } + section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); mr = section->mr; mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h index d4a3477d71..95f2d20d55 100644 --- a/include/exec/memattrs.h +++ b/include/exec/memattrs.h @@ -37,6 +37,8 @@ typedef struct MemTxAttrs { unsigned int user:1; /* Requester ID (for MSI for example) */ unsigned int requester_id:16; + /* Invert endianness for this page */ + unsigned int byte_swap:1; /* * The following are target-specific page-table bits. These are not * related to actual memory transactions at all. However, this structure From 9bed46e67e2ee54bc596ba58063ee71a5ca40923 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Sat, 24 Aug 2019 04:36:57 +1000 Subject: [PATCH 18/36] target/sparc: Add TLB entry with attributes Append MemTxAttrs to interfaces so we can pass along up coming Invert Endian TTE bit on SPARC64. Signed-off-by: Tony Nguyen Reviewed-by: Richard Henderson Message-Id: Signed-off-by: Richard Henderson --- target/sparc/mmu_helper.c | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c index cbd1e91179..826e14b6f0 100644 --- a/target/sparc/mmu_helper.c +++ b/target/sparc/mmu_helper.c @@ -88,7 +88,7 @@ static const int perm_table[2][8] = { }; static int get_physical_address(CPUSPARCState *env, hwaddr *physical, - int *prot, int *access_index, + int *prot, int *access_index, MemTxAttrs *attrs, target_ulong address, int rw, int mmu_idx, target_ulong *page_size) { @@ -219,6 +219,7 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, target_ulong vaddr; target_ulong page_size; int error_code = 0, prot, access_index; + MemTxAttrs attrs = {}; /* * TODO: If we ever need tlb_vaddr_to_host for this target, @@ -229,7 +230,7 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, assert(!probe); address &= TARGET_PAGE_MASK; - error_code = get_physical_address(env, &paddr, &prot, &access_index, + error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs, address, access_type, mmu_idx, &page_size); vaddr = address; @@ -490,8 +491,8 @@ static inline int ultrasparc_tag_match(SparcTLBEntry *tlb, return 0; } -static int get_physical_address_data(CPUSPARCState *env, - hwaddr *physical, int *prot, +static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical, + int *prot, MemTxAttrs *attrs, target_ulong address, int rw, int mmu_idx) { CPUState *cs = env_cpu(env); @@ -608,8 +609,8 @@ static int get_physical_address_data(CPUSPARCState *env, return 1; } -static int get_physical_address_code(CPUSPARCState *env, - hwaddr *physical, int *prot, +static int get_physical_address_code(CPUSPARCState *env, hwaddr *physical, + int *prot, MemTxAttrs *attrs, target_ulong address, int mmu_idx) { CPUState *cs = env_cpu(env); @@ -686,7 +687,7 @@ static int get_physical_address_code(CPUSPARCState *env, } static int get_physical_address(CPUSPARCState *env, hwaddr *physical, - int *prot, int *access_index, + int *prot, int *access_index, MemTxAttrs *attrs, target_ulong address, int rw, int mmu_idx, target_ulong *page_size) { @@ -716,11 +717,11 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical, } if (rw == 2) { - return get_physical_address_code(env, physical, prot, address, + return get_physical_address_code(env, physical, prot, attrs, address, mmu_idx); } else { - return get_physical_address_data(env, physical, prot, address, rw, - mmu_idx); + return get_physical_address_data(env, physical, prot, attrs, address, + rw, mmu_idx); } } @@ -734,10 +735,11 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, target_ulong vaddr; hwaddr paddr; target_ulong page_size; + MemTxAttrs attrs = {}; int error_code = 0, prot, access_index; address &= TARGET_PAGE_MASK; - error_code = get_physical_address(env, &paddr, &prot, &access_index, + error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs, address, access_type, mmu_idx, &page_size); if (likely(error_code == 0)) { @@ -747,7 +749,8 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, env->dmmu.mmu_primary_context, env->dmmu.mmu_secondary_context); - tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size); + tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, prot, mmu_idx, + page_size); return true; } if (probe) { @@ -849,9 +852,10 @@ static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys, { target_ulong page_size; int prot, access_index; + MemTxAttrs attrs = {}; - return get_physical_address(env, phys, &prot, &access_index, addr, rw, - mmu_idx, &page_size); + return get_physical_address(env, phys, &prot, &access_index, &attrs, addr, + rw, mmu_idx, &page_size); } #if defined(TARGET_SPARC64) From ccdb4c5535f41ee4da2ef158f58fca0327e50dab Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Sat, 24 Aug 2019 04:36:58 +1000 Subject: [PATCH 19/36] target/sparc: sun4u Invert Endian TTE bit This bit configures endianness of PCI MMIO devices. It is used by Solaris and OpenBSD sunhme drivers. Tested working on OpenBSD. Unfortunately Solaris 10 had a unrelated keyboard issue blocking testing... another inch towards Solaris 10 on SPARC64 =) Signed-off-by: Tony Nguyen Reviewed-by: Richard Henderson Tested-by: Mark Cave-Ayland Message-Id: <3c8d5181a584f1b3712d3d8d66801b13cecb4b88.1566466906.git.tony.nguyen@bt.com> Signed-off-by: Richard Henderson --- target/sparc/cpu.h | 2 ++ target/sparc/mmu_helper.c | 8 +++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h index 694d7139cf..490e14dfcf 100644 --- a/target/sparc/cpu.h +++ b/target/sparc/cpu.h @@ -275,6 +275,7 @@ enum { #define TTE_VALID_BIT (1ULL << 63) #define TTE_NFO_BIT (1ULL << 60) +#define TTE_IE_BIT (1ULL << 59) #define TTE_USED_BIT (1ULL << 41) #define TTE_LOCKED_BIT (1ULL << 6) #define TTE_SIDEEFFECT_BIT (1ULL << 3) @@ -291,6 +292,7 @@ enum { #define TTE_IS_VALID(tte) ((tte) & TTE_VALID_BIT) #define TTE_IS_NFO(tte) ((tte) & TTE_NFO_BIT) +#define TTE_IS_IE(tte) ((tte) & TTE_IE_BIT) #define TTE_IS_USED(tte) ((tte) & TTE_USED_BIT) #define TTE_IS_LOCKED(tte) ((tte) & TTE_LOCKED_BIT) #define TTE_IS_SIDEEFFECT(tte) ((tte) & TTE_SIDEEFFECT_BIT) diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c index 826e14b6f0..77dc86ac5c 100644 --- a/target/sparc/mmu_helper.c +++ b/target/sparc/mmu_helper.c @@ -537,6 +537,10 @@ static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical, if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) { int do_fault = 0; + if (TTE_IS_IE(env->dtlb[i].tte)) { + attrs->byte_swap = true; + } + /* access ok? */ /* multiple bits in SFSR.FT may be set on TT_DFAULT */ if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) { @@ -792,7 +796,7 @@ void dump_mmu(CPUSPARCState *env) } if (TTE_IS_VALID(env->dtlb[i].tte)) { qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx" - ", %s, %s, %s, %s, ctx %" PRId64 " %s\n", + ", %s, %s, %s, %s, ie %s, ctx %" PRId64 " %s\n", i, env->dtlb[i].tag & (uint64_t)~0x1fffULL, TTE_PA(env->dtlb[i].tte), @@ -801,6 +805,8 @@ void dump_mmu(CPUSPARCState *env) TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO", TTE_IS_LOCKED(env->dtlb[i].tte) ? "locked" : "unlocked", + TTE_IS_IE(env->dtlb[i].tte) ? + "yes" : "no", env->dtlb[i].tag & (uint64_t)0x1fffULL, TTE_IS_GLOBAL(env->dtlb[i].tte) ? "global" : "local"); From 74841f044e96644d8ed8a388fe505df5ab843d0a Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 24 Aug 2019 13:31:58 -0700 Subject: [PATCH 20/36] exec: Move user-only watchpoint stubs inline MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let the user-only watchpoint stubs resolve to empty inline functions. Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: David Hildenbrand Signed-off-by: Richard Henderson --- exec.c | 26 ++------------------------ include/hw/core/cpu.h | 23 +++++++++++++++++++++++ 2 files changed, 25 insertions(+), 24 deletions(-) diff --git a/exec.c b/exec.c index 53a15b7ad7..31fb75901f 100644 --- a/exec.c +++ b/exec.c @@ -1062,28 +1062,7 @@ static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) } #endif -#if defined(CONFIG_USER_ONLY) -void cpu_watchpoint_remove_all(CPUState *cpu, int mask) - -{ -} - -int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, - int flags) -{ - return -ENOSYS; -} - -void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint) -{ -} - -int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, - int flags, CPUWatchpoint **watchpoint) -{ - return -ENOSYS; -} -#else +#ifndef CONFIG_USER_ONLY /* Add a watchpoint. */ int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, int flags, CPUWatchpoint **watchpoint) @@ -1173,8 +1152,7 @@ static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp, return !(addr > wpend || wp->vaddr > addrend); } - -#endif +#endif /* !CONFIG_USER_ONLY */ /* Add a breakpoint. */ int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 77fca95a40..6de688059d 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -1070,12 +1070,35 @@ static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask) return false; } +#ifdef CONFIG_USER_ONLY +static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, + int flags, CPUWatchpoint **watchpoint) +{ + return -ENOSYS; +} + +static inline int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, + vaddr len, int flags) +{ + return -ENOSYS; +} + +static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu, + CPUWatchpoint *wp) +{ +} + +static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask) +{ +} +#else int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, int flags, CPUWatchpoint **watchpoint); int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, int flags); void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint); void cpu_watchpoint_remove_all(CPUState *cpu, int mask); +#endif /** * cpu_get_address_space: From 0026348b48fe532279e8c12b100c16c1aa991373 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 23 Aug 2019 12:07:40 +0200 Subject: [PATCH 21/36] exec: Factor out core logic of check_watchpoint() We want to perform the same checks in probe_write() to trigger a cpu exit before doing any modifications. We'll have to pass a PC. Signed-off-by: David Hildenbrand Reviewed-by: Richard Henderson Message-Id: <20190823100741.9621-9-david@redhat.com> [rth: Use vaddr for len, like other watchpoint functions; Move user-only stub to static inline.] Signed-off-by: Richard Henderson --- exec.c | 26 ++++++++++++++++++-------- include/hw/core/cpu.h | 7 +++++++ 2 files changed, 25 insertions(+), 8 deletions(-) diff --git a/exec.c b/exec.c index 31fb75901f..cb6f5763dc 100644 --- a/exec.c +++ b/exec.c @@ -2789,11 +2789,10 @@ static const MemoryRegionOps notdirty_mem_ops = { }; /* Generate a debug exception if a watchpoint has been hit. */ -static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) +void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, + MemTxAttrs attrs, int flags, uintptr_t ra) { - CPUState *cpu = current_cpu; CPUClass *cc = CPU_GET_CLASS(cpu); - target_ulong vaddr; CPUWatchpoint *wp; assert(tcg_enabled()); @@ -2804,17 +2803,17 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG); return; } - vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset; - vaddr = cc->adjust_watchpoint_address(cpu, vaddr, len); + + addr = cc->adjust_watchpoint_address(cpu, addr, len); QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { - if (cpu_watchpoint_address_matches(wp, vaddr, len) + if (cpu_watchpoint_address_matches(wp, addr, len) && (wp->flags & flags)) { if (flags == BP_MEM_READ) { wp->flags |= BP_WATCHPOINT_HIT_READ; } else { wp->flags |= BP_WATCHPOINT_HIT_WRITE; } - wp->hitaddr = vaddr; + wp->hitaddr = MAX(addr, wp->vaddr); wp->hitattrs = attrs; if (!cpu->watchpoint_hit) { if (wp->flags & BP_CPU && @@ -2829,11 +2828,14 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) if (wp->flags & BP_STOP_BEFORE_ACCESS) { cpu->exception_index = EXCP_DEBUG; mmap_unlock(); - cpu_loop_exit(cpu); + cpu_loop_exit_restore(cpu, ra); } else { /* Force execution of one insn next time. */ cpu->cflags_next_tb = 1 | curr_cflags(); mmap_unlock(); + if (ra) { + cpu_restore_state(cpu, ra, true); + } cpu_loop_exit_noexc(cpu); } } @@ -2843,6 +2845,14 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) } } +static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) +{ + CPUState *cpu = current_cpu; + vaddr addr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset; + + cpu_check_watchpoint(cpu, addr, len, attrs, flags, 0); +} + /* Watchpoint access routines. Watchpoints are inserted using TLB tricks, so these check for a hit then pass through to the normal out-of-line phys routines. */ diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 6de688059d..7bd8bed5b2 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -1091,6 +1091,11 @@ static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu, static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask) { } + +static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, + MemTxAttrs atr, int fl, uintptr_t ra) +{ +} #else int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, int flags, CPUWatchpoint **watchpoint); @@ -1098,6 +1103,8 @@ int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, int flags); void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint); void cpu_watchpoint_remove_all(CPUState *cpu, int mask); +void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, + MemTxAttrs attrs, int flags, uintptr_t ra); #endif /** From 30d7e098d5c38644359820317fcf72e3e129ec53 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Fri, 23 Aug 2019 15:12:32 -0700 Subject: [PATCH 22/36] cputlb: Fold TLB_RECHECK into TLB_INVALID_MASK We had two different mechanisms to force a recheck of the tlb. Before TLB_RECHECK was introduced, we had a PAGE_WRITE_INV bit that would immediate set TLB_INVALID_MASK, which automatically means that a second check of the tlb entry fails. We can use the same mechanism to handle small pages. Conserve TLB_* bits by removing TLB_RECHECK. Reviewed-by: David Hildenbrand Signed-off-by: Richard Henderson --- accel/tcg/cputlb.c | 86 +++++++++++------------------------------- include/exec/cpu-all.h | 5 +-- 2 files changed, 24 insertions(+), 67 deletions(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index d9787cc893..c9576bebcf 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -732,11 +732,8 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, address = vaddr_page; if (size < TARGET_PAGE_SIZE) { - /* - * Slow-path the TLB entries; we will repeat the MMU check and TLB - * fill on every access. - */ - address |= TLB_RECHECK; + /* Repeat the MMU check and TLB fill on every access. */ + address |= TLB_INVALID_MASK; } if (attrs.byte_swap) { /* Force the access through the I/O slow path. */ @@ -1026,10 +1023,15 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ (ADDR) & TARGET_PAGE_MASK) -/* NOTE: this function can trigger an exception */ -/* NOTE2: the returned address is not exactly the physical address: it - * is actually a ram_addr_t (in system mode; the user mode emulation - * version of this function returns a guest virtual address). +/* + * Return a ram_addr_t for the virtual address for execution. + * + * Return -1 if we can't translate and execute from an entire page + * of RAM. This will force us to execute by loading and translating + * one insn at a time, without caching. + * + * NOTE: This function will trigger an exception if the page is + * not executable. */ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) { @@ -1043,19 +1045,20 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); index = tlb_index(env, mmu_idx, addr); entry = tlb_entry(env, mmu_idx, addr); + + if (unlikely(entry->addr_code & TLB_INVALID_MASK)) { + /* + * The MMU protection covers a smaller range than a target + * page, so we must redo the MMU check for every insn. + */ + return -1; + } } assert(tlb_hit(entry->addr_code, addr)); } - if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) { - /* - * Return -1 if we can't translate and execute from an entire - * page of RAM here, which will cause us to execute by loading - * and translating one insn at a time, without caching: - * - TLB_RECHECK: means the MMU protection covers a smaller range - * than a target page, so we must redo the MMU check every insn - * - TLB_MMIO: region is not backed by RAM - */ + if (unlikely(entry->addr_code & TLB_MMIO)) { + /* The region is not backed by RAM. */ return -1; } @@ -1180,7 +1183,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, } /* Notice an IO access or a needs-MMU-lookup access */ - if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) { + if (unlikely(tlb_addr & TLB_MMIO)) { /* There's really nothing that can be done to support this apart from stop-the-world. */ goto stop_the_world; @@ -1258,6 +1261,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, entry = tlb_entry(env, mmu_idx, addr); } tlb_addr = code_read ? entry->addr_code : entry->addr_read; + tlb_addr &= ~TLB_INVALID_MASK; } /* Handle an IO access. */ @@ -1265,27 +1269,6 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, if ((addr & (size - 1)) != 0) { goto do_unaligned_access; } - - if (tlb_addr & TLB_RECHECK) { - /* - * This is a TLB_RECHECK access, where the MMU protection - * covers a smaller range than a target page, and we must - * repeat the MMU check here. This tlb_fill() call might - * longjump out if this access should cause a guest exception. - */ - tlb_fill(env_cpu(env), addr, size, - access_type, mmu_idx, retaddr); - index = tlb_index(env, mmu_idx, addr); - entry = tlb_entry(env, mmu_idx, addr); - - tlb_addr = code_read ? entry->addr_code : entry->addr_read; - tlb_addr &= ~TLB_RECHECK; - if (!(tlb_addr & ~TARGET_PAGE_MASK)) { - /* RAM access */ - goto do_aligned_access; - } - } - return io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx, addr, retaddr, access_type, op); } @@ -1314,7 +1297,6 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, return res & MAKE_64BIT_MASK(0, size * 8); } - do_aligned_access: haddr = (void *)((uintptr_t)addr + entry->addend); switch (op) { case MO_UB: @@ -1509,27 +1491,6 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, if ((addr & (size - 1)) != 0) { goto do_unaligned_access; } - - if (tlb_addr & TLB_RECHECK) { - /* - * This is a TLB_RECHECK access, where the MMU protection - * covers a smaller range than a target page, and we must - * repeat the MMU check here. This tlb_fill() call might - * longjump out if this access should cause a guest exception. - */ - tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, - mmu_idx, retaddr); - index = tlb_index(env, mmu_idx, addr); - entry = tlb_entry(env, mmu_idx, addr); - - tlb_addr = tlb_addr_write(entry); - tlb_addr &= ~TLB_RECHECK; - if (!(tlb_addr & ~TARGET_PAGE_MASK)) { - /* RAM access */ - goto do_aligned_access; - } - } - io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx, val, addr, retaddr, op); return; @@ -1579,7 +1540,6 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, return; } - do_aligned_access: haddr = (void *)((uintptr_t)addr + entry->addend); switch (op) { case MO_UB: diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index 8323094648..8d07ae23a5 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -329,14 +329,11 @@ CPUArchState *cpu_copy(CPUArchState *env); #define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2)) /* Set if TLB entry is an IO callback. */ #define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3)) -/* Set if TLB entry must have MMU lookup repeated for every access */ -#define TLB_RECHECK (1 << (TARGET_PAGE_BITS - 4)) /* Use this mask to check interception with an alignment mask * in a TCG backend. */ -#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \ - | TLB_RECHECK) +#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO) /** * tlb_hit_page: return true if page aligned @addr is a hit against the From 56ad8b007dde7a61e02582e1f2d5c57fc0165a6b Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 24 Aug 2019 08:21:34 -0700 Subject: [PATCH 23/36] exec: Factor out cpu_watchpoint_address_matches We want to move the check for watchpoints from memory_region_section_get_iotlb to tlb_set_page_with_attrs. Isolate the loop over watchpoints to an exported function. Rename the existing cpu_watchpoint_address_matches to watchpoint_address_matches, since it doesn't actually have a cpu argument. Reviewed-by: David Hildenbrand Signed-off-by: Richard Henderson --- exec.c | 45 ++++++++++++++++++++++++++++--------------- include/hw/core/cpu.h | 7 +++++++ 2 files changed, 36 insertions(+), 16 deletions(-) diff --git a/exec.c b/exec.c index cb6f5763dc..8575ce51ad 100644 --- a/exec.c +++ b/exec.c @@ -1138,9 +1138,8 @@ void cpu_watchpoint_remove_all(CPUState *cpu, int mask) * partially or completely with the address range covered by the * access). */ -static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp, - vaddr addr, - vaddr len) +static inline bool watchpoint_address_matches(CPUWatchpoint *wp, + vaddr addr, vaddr len) { /* We know the lengths are non-zero, but a little caution is * required to avoid errors in the case where the range ends @@ -1152,6 +1151,20 @@ static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp, return !(addr > wpend || wp->vaddr > addrend); } + +/* Return flags for watchpoints that match addr + prot. */ +int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len) +{ + CPUWatchpoint *wp; + int ret = 0; + + QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { + if (watchpoint_address_matches(wp, addr, TARGET_PAGE_SIZE)) { + ret |= wp->flags; + } + } + return ret; +} #endif /* !CONFIG_USER_ONLY */ /* Add a breakpoint. */ @@ -1459,7 +1472,7 @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu, target_ulong *address) { hwaddr iotlb; - CPUWatchpoint *wp; + int flags, match; if (memory_region_is_ram(section->mr)) { /* Normal RAM. */ @@ -1477,17 +1490,17 @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu, iotlb += xlat; } - /* Make accesses to pages with watchpoints go via the - watchpoint trap routines. */ - QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { - if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) { - /* Avoid trapping reads of pages with a write breakpoint. */ - if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) { - iotlb = PHYS_SECTION_WATCH + paddr; - *address |= TLB_MMIO; - break; - } - } + /* Avoid trapping reads of pages with a write breakpoint. */ + match = (prot & PAGE_READ ? BP_MEM_READ : 0) + | (prot & PAGE_WRITE ? BP_MEM_WRITE : 0); + flags = cpu_watchpoint_address_matches(cpu, vaddr, TARGET_PAGE_SIZE); + if (flags & match) { + /* + * Make accesses to pages with watchpoints go via the + * watchpoint trap routines. + */ + iotlb = PHYS_SECTION_WATCH + paddr; + *address |= TLB_MMIO; } return iotlb; @@ -2806,7 +2819,7 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, addr = cc->adjust_watchpoint_address(cpu, addr, len); QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { - if (cpu_watchpoint_address_matches(wp, addr, len) + if (watchpoint_address_matches(wp, addr, len) && (wp->flags & flags)) { if (flags == BP_MEM_READ) { wp->flags |= BP_WATCHPOINT_HIT_READ; diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 7bd8bed5b2..c7cda65c66 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -1096,6 +1096,12 @@ static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, MemTxAttrs atr, int fl, uintptr_t ra) { } + +static inline int cpu_watchpoint_address_matches(CPUState *cpu, + vaddr addr, vaddr len) +{ + return 0; +} #else int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, int flags, CPUWatchpoint **watchpoint); @@ -1105,6 +1111,7 @@ void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint); void cpu_watchpoint_remove_all(CPUState *cpu, int mask); void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, MemTxAttrs attrs, int flags, uintptr_t ra); +int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len); #endif /** From 8f7cd2ad4acd01242d00807e231097b3de9f0930 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 28 Aug 2019 15:25:28 -0700 Subject: [PATCH 24/36] cputlb: Fix size operand for tlb_fill on unaligned store MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We are currently passing the size of the full write to the tlb_fill for the second page. Instead pass the real size of the write to that page. This argument is unused within all tlb_fill, except to be logged via tracing, so in practice this makes no difference. But in a moment we'll need the value of size2 for watchpoints, and if we've computed the value we might as well use it. Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: David Hildenbrand Signed-off-by: Richard Henderson --- accel/tcg/cputlb.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index c9576bebcf..7fb67d2f05 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1504,6 +1504,8 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, uintptr_t index2; CPUTLBEntry *entry2; target_ulong page2, tlb_addr2; + size_t size2; + do_unaligned_access: /* * Ensure the second page is in the TLB. Note that the first page @@ -1511,13 +1513,14 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, * cannot evict the first. */ page2 = (addr + size) & TARGET_PAGE_MASK; + size2 = (addr + size) & ~TARGET_PAGE_MASK; index2 = tlb_index(env, mmu_idx, page2); entry2 = tlb_entry(env, mmu_idx, page2); tlb_addr2 = tlb_addr_write(entry2); if (!tlb_hit_page(tlb_addr2, page2) && !victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2 & TARGET_PAGE_MASK)) { - tlb_fill(env_cpu(env), page2, size, MMU_DATA_STORE, + tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, mmu_idx, retaddr); } From 5787585d0406cfd54dda0c71ea1a603347ce6e71 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 28 Aug 2019 15:32:55 -0700 Subject: [PATCH 25/36] cputlb: Remove double-alignment in store_helper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We have already aligned page2 to the start of the next page. There is no reason to do that a second time. Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: David Hildenbrand Signed-off-by: Richard Henderson --- accel/tcg/cputlb.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 7fb67d2f05..d0f8db33a2 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1518,8 +1518,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, entry2 = tlb_entry(env, mmu_idx, page2); tlb_addr2 = tlb_addr_write(entry2); if (!tlb_hit_page(tlb_addr2, page2) - && !victim_tlb_hit(env, mmu_idx, index2, tlb_off, - page2 & TARGET_PAGE_MASK)) { + && !victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, mmu_idx, retaddr); } From 50b107c5d617eaf93301cef20221312e7a986701 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 24 Aug 2019 09:51:09 -0700 Subject: [PATCH 26/36] cputlb: Handle watchpoints via TLB_WATCHPOINT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The raising of exceptions from check_watchpoint, buried inside of the I/O subsystem, is fundamentally broken. We do not have the helper return address with which we can unwind guest state. Replace PHYS_SECTION_WATCH and io_mem_watch with TLB_WATCHPOINT. Move the call to cpu_check_watchpoint into the cputlb helpers where we do have the helper return address. This allows watchpoints on RAM to bypass the full i/o access path. Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: David Hildenbrand Signed-off-by: Richard Henderson --- accel/tcg/cputlb.c | 89 ++++++++++++++++++++++++++++---- exec.c | 114 +++-------------------------------------- include/exec/cpu-all.h | 5 +- 3 files changed, 90 insertions(+), 118 deletions(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index d0f8db33a2..9a9a626938 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -710,6 +710,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, hwaddr iotlb, xlat, sz, paddr_page; target_ulong vaddr_page; int asidx = cpu_asidx_from_attrs(cpu, attrs); + int wp_flags; assert_cpu_is_self(cpu); @@ -752,6 +753,8 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, code_address = address; iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page, paddr_page, xlat, prot, &address); + wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page, + TARGET_PAGE_SIZE); index = tlb_index(env, mmu_idx, vaddr_page); te = tlb_entry(env, mmu_idx, vaddr_page); @@ -805,6 +808,9 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, tn.addend = addend - vaddr_page; if (prot & PAGE_READ) { tn.addr_read = address; + if (wp_flags & BP_MEM_READ) { + tn.addr_read |= TLB_WATCHPOINT; + } } else { tn.addr_read = -1; } @@ -831,6 +837,9 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, if (prot & PAGE_WRITE_INV) { tn.addr_write |= TLB_INVALID_MASK; } + if (wp_flags & BP_MEM_WRITE) { + tn.addr_write |= TLB_WATCHPOINT; + } } copy_tlb_helper_locked(te, &tn); @@ -1264,13 +1273,33 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, tlb_addr &= ~TLB_INVALID_MASK; } - /* Handle an IO access. */ + /* Handle anything that isn't just a straight memory access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + CPUIOTLBEntry *iotlbentry; + + /* For anything that is unaligned, recurse through full_load. */ if ((addr & (size - 1)) != 0) { goto do_unaligned_access; } - return io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index], - mmu_idx, addr, retaddr, access_type, op); + + iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; + + /* Handle watchpoints. */ + if (unlikely(tlb_addr & TLB_WATCHPOINT)) { + /* On watchpoint hit, this will longjmp out. */ + cpu_check_watchpoint(env_cpu(env), addr, size, + iotlbentry->attrs, BP_MEM_READ, retaddr); + + /* The backing page may or may not require I/O. */ + tlb_addr &= ~TLB_WATCHPOINT; + if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) { + goto do_aligned_access; + } + } + + /* Handle I/O access. */ + return io_readx(env, iotlbentry, mmu_idx, addr, + retaddr, access_type, op); } /* Handle slow unaligned access (it spans two pages or IO). */ @@ -1297,6 +1326,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, return res & MAKE_64BIT_MASK(0, size * 8); } + do_aligned_access: haddr = (void *)((uintptr_t)addr + entry->addend); switch (op) { case MO_UB: @@ -1486,13 +1516,32 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; } - /* Handle an IO access. */ + /* Handle anything that isn't just a straight memory access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + CPUIOTLBEntry *iotlbentry; + + /* For anything that is unaligned, recurse through byte stores. */ if ((addr & (size - 1)) != 0) { goto do_unaligned_access; } - io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx, - val, addr, retaddr, op); + + iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; + + /* Handle watchpoints. */ + if (unlikely(tlb_addr & TLB_WATCHPOINT)) { + /* On watchpoint hit, this will longjmp out. */ + cpu_check_watchpoint(env_cpu(env), addr, size, + iotlbentry->attrs, BP_MEM_WRITE, retaddr); + + /* The backing page may or may not require I/O. */ + tlb_addr &= ~TLB_WATCHPOINT; + if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) { + goto do_aligned_access; + } + } + + /* Handle I/O access. */ + io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, op); return; } @@ -1517,10 +1566,29 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, index2 = tlb_index(env, mmu_idx, page2); entry2 = tlb_entry(env, mmu_idx, page2); tlb_addr2 = tlb_addr_write(entry2); - if (!tlb_hit_page(tlb_addr2, page2) - && !victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { - tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, - mmu_idx, retaddr); + if (!tlb_hit_page(tlb_addr2, page2)) { + if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { + tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, + mmu_idx, retaddr); + index2 = tlb_index(env, mmu_idx, page2); + entry2 = tlb_entry(env, mmu_idx, page2); + } + tlb_addr2 = tlb_addr_write(entry2); + } + + /* + * Handle watchpoints. Since this may trap, all checks + * must happen before any store. + */ + if (unlikely(tlb_addr & TLB_WATCHPOINT)) { + cpu_check_watchpoint(env_cpu(env), addr, size - size2, + env_tlb(env)->d[mmu_idx].iotlb[index].attrs, + BP_MEM_WRITE, retaddr); + } + if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { + cpu_check_watchpoint(env_cpu(env), page2, size2, + env_tlb(env)->d[mmu_idx].iotlb[index2].attrs, + BP_MEM_WRITE, retaddr); } /* @@ -1542,6 +1610,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, return; } + do_aligned_access: haddr = (void *)((uintptr_t)addr + entry->addend); switch (op) { case MO_UB: diff --git a/exec.c b/exec.c index 8575ce51ad..ad0f4a598f 100644 --- a/exec.c +++ b/exec.c @@ -193,15 +193,12 @@ typedef struct subpage_t { #define PHYS_SECTION_UNASSIGNED 0 #define PHYS_SECTION_NOTDIRTY 1 #define PHYS_SECTION_ROM 2 -#define PHYS_SECTION_WATCH 3 static void io_mem_init(void); static void memory_map_init(void); static void tcg_log_global_after_sync(MemoryListener *listener); static void tcg_commit(MemoryListener *listener); -static MemoryRegion io_mem_watch; - /** * CPUAddressSpace: all the information a CPU needs about an AddressSpace * @cpu: the CPU whose AddressSpace this is @@ -1472,7 +1469,6 @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu, target_ulong *address) { hwaddr iotlb; - int flags, match; if (memory_region_is_ram(section->mr)) { /* Normal RAM. */ @@ -1490,19 +1486,6 @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu, iotlb += xlat; } - /* Avoid trapping reads of pages with a write breakpoint. */ - match = (prot & PAGE_READ ? BP_MEM_READ : 0) - | (prot & PAGE_WRITE ? BP_MEM_WRITE : 0); - flags = cpu_watchpoint_address_matches(cpu, vaddr, TARGET_PAGE_SIZE); - if (flags & match) { - /* - * Make accesses to pages with watchpoints go via the - * watchpoint trap routines. - */ - iotlb = PHYS_SECTION_WATCH + paddr; - *address |= TLB_MMIO; - } - return iotlb; } #endif /* defined(CONFIG_USER_ONLY) */ @@ -2810,10 +2793,14 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, assert(tcg_enabled()); if (cpu->watchpoint_hit) { - /* We re-entered the check after replacing the TB. Now raise - * the debug interrupt so that is will trigger after the - * current instruction. */ + /* + * We re-entered the check after replacing the TB. + * Now raise the debug interrupt so that it will + * trigger after the current instruction. + */ + qemu_mutex_lock_iothread(); cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG); + qemu_mutex_unlock_iothread(); return; } @@ -2858,88 +2845,6 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, } } -static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) -{ - CPUState *cpu = current_cpu; - vaddr addr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset; - - cpu_check_watchpoint(cpu, addr, len, attrs, flags, 0); -} - -/* Watchpoint access routines. Watchpoints are inserted using TLB tricks, - so these check for a hit then pass through to the normal out-of-line - phys routines. */ -static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata, - unsigned size, MemTxAttrs attrs) -{ - MemTxResult res; - uint64_t data; - int asidx = cpu_asidx_from_attrs(current_cpu, attrs); - AddressSpace *as = current_cpu->cpu_ases[asidx].as; - - check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ); - switch (size) { - case 1: - data = address_space_ldub(as, addr, attrs, &res); - break; - case 2: - data = address_space_lduw(as, addr, attrs, &res); - break; - case 4: - data = address_space_ldl(as, addr, attrs, &res); - break; - case 8: - data = address_space_ldq(as, addr, attrs, &res); - break; - default: abort(); - } - *pdata = data; - return res; -} - -static MemTxResult watch_mem_write(void *opaque, hwaddr addr, - uint64_t val, unsigned size, - MemTxAttrs attrs) -{ - MemTxResult res; - int asidx = cpu_asidx_from_attrs(current_cpu, attrs); - AddressSpace *as = current_cpu->cpu_ases[asidx].as; - - check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE); - switch (size) { - case 1: - address_space_stb(as, addr, val, attrs, &res); - break; - case 2: - address_space_stw(as, addr, val, attrs, &res); - break; - case 4: - address_space_stl(as, addr, val, attrs, &res); - break; - case 8: - address_space_stq(as, addr, val, attrs, &res); - break; - default: abort(); - } - return res; -} - -static const MemoryRegionOps watch_mem_ops = { - .read_with_attrs = watch_mem_read, - .write_with_attrs = watch_mem_write, - .endianness = DEVICE_NATIVE_ENDIAN, - .valid = { - .min_access_size = 1, - .max_access_size = 8, - .unaligned = false, - }, - .impl = { - .min_access_size = 1, - .max_access_size = 8, - .unaligned = false, - }, -}; - static MemTxResult flatview_read(FlatView *fv, hwaddr addr, MemTxAttrs attrs, uint8_t *buf, hwaddr len); static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, @@ -3115,9 +3020,6 @@ static void io_mem_init(void) memory_region_init_io(&io_mem_notdirty, NULL, ¬dirty_mem_ops, NULL, NULL, UINT64_MAX); memory_region_clear_global_locking(&io_mem_notdirty); - - memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL, - NULL, UINT64_MAX); } AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv) @@ -3131,8 +3033,6 @@ AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv) assert(n == PHYS_SECTION_NOTDIRTY); n = dummy_section(&d->map, fv, &io_mem_rom); assert(n == PHYS_SECTION_ROM); - n = dummy_section(&d->map, fv, &io_mem_watch); - assert(n == PHYS_SECTION_WATCH); d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 }; diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index 8d07ae23a5..d2d443c4f9 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -329,11 +329,14 @@ CPUArchState *cpu_copy(CPUArchState *env); #define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2)) /* Set if TLB entry is an IO callback. */ #define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3)) +/* Set if TLB entry contains a watchpoint. */ +#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS - 4)) /* Use this mask to check interception with an alignment mask * in a TCG backend. */ -#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO) +#define TLB_FLAGS_MASK \ + (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO | TLB_WATCHPOINT) /** * tlb_hit_page: return true if page aligned @addr is a hit against the From 03a981893c99faba84bb373976796ad7dce0aecc Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 23 Aug 2019 12:07:41 +0200 Subject: [PATCH 27/36] tcg: Check for watchpoints in probe_write() Let size > 0 indicate a promise to write to those bytes. Check for write watchpoints in the probed range. Suggested-by: Richard Henderson Reviewed-by: Richard Henderson Signed-off-by: David Hildenbrand Message-Id: <20190823100741.9621-10-david@redhat.com> [rth: Recompute index after tlb_fill; check TLB_WATCHPOINT.] Signed-off-by: Richard Henderson --- accel/tcg/cputlb.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 9a9a626938..010c4c6e3c 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1086,13 +1086,24 @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, { uintptr_t index = tlb_index(env, mmu_idx, addr); CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); + target_ulong tlb_addr = tlb_addr_write(entry); - if (!tlb_hit(tlb_addr_write(entry), addr)) { - /* TLB entry is for a different page */ + if (unlikely(!tlb_hit(tlb_addr, addr))) { if (!VICTIM_TLB_HIT(addr_write, addr)) { tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, mmu_idx, retaddr); + /* TLB resize via tlb_fill may have moved the entry. */ + index = tlb_index(env, mmu_idx, addr); + entry = tlb_entry(env, mmu_idx, addr); } + tlb_addr = tlb_addr_write(entry); + } + + /* Handle watchpoints. */ + if ((tlb_addr & TLB_WATCHPOINT) && size > 0) { + cpu_check_watchpoint(env_cpu(env), addr, size, + env_tlb(env)->d[mmu_idx].iotlb[index].attrs, + BP_MEM_WRITE, retaddr); } } From 9e5bef4920b85f30e6f1588b742abb10bc840c93 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 26 Aug 2019 09:51:06 +0200 Subject: [PATCH 28/36] s390x/tcg: Use guest_addr_valid() instead of h2g_valid() in probe_write_access() If I'm not completely wrong, we are dealing with guest addresses here and not with host addresses. Use the right check. Fixes: c5a7392cfb96 ("s390x/tcg: Provide probe_write_access helper") Reviewed-by: Richard Henderson Signed-off-by: David Hildenbrand Reviewed-by: Cornelia Huck Message-Id: <20190826075112.25637-2-david@redhat.com> Signed-off-by: Richard Henderson --- target/s390x/mem_helper.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c index 91ba2e03d9..7819aca15d 100644 --- a/target/s390x/mem_helper.c +++ b/target/s390x/mem_helper.c @@ -2616,7 +2616,7 @@ void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len, uintptr_t ra) { #ifdef CONFIG_USER_ONLY - if (!h2g_valid(addr) || !h2g_valid(addr + len - 1) || + if (!guest_addr_valid(addr) || !guest_addr_valid(addr + len - 1) || page_check_range(addr, len, PAGE_WRITE) < 0) { s390_program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO, ra); } From 46750128631eaace54b69ddd8b63683edd4606cc Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 26 Aug 2019 09:51:07 +0200 Subject: [PATCH 29/36] s390x/tcg: Fix length calculation in probe_write_access() Hm... how did that "-" slip in (-TAGRET_PAGE_SIZE would be correct). This currently makes us exceed one page in a single probe_write() call, essentially leaving some memory unchecked. Fixes: c5a7392cfb96 ("s390x/tcg: Provide probe_write_access helper") Reviewed-by: Richard Henderson Signed-off-by: David Hildenbrand Reviewed-by: Cornelia Huck Message-Id: <20190826075112.25637-3-david@redhat.com> Signed-off-by: Richard Henderson --- target/s390x/mem_helper.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c index 7819aca15d..4b43440e89 100644 --- a/target/s390x/mem_helper.c +++ b/target/s390x/mem_helper.c @@ -2623,7 +2623,7 @@ void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len, #else /* test the actual access, not just any access to the page due to LAP */ while (len) { - const uint64_t pagelen = -(addr | -TARGET_PAGE_MASK); + const uint64_t pagelen = -(addr | TARGET_PAGE_MASK); const uint64_t curlen = MIN(pagelen, len); probe_write(env, addr, curlen, cpu_mmu_index(env, false), ra); From 59e96ac6cb13951dd09afc70622858089abf3384 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 26 Aug 2019 09:51:08 +0200 Subject: [PATCH 30/36] tcg: Factor out CONFIG_USER_ONLY probe_write() from s390x code Factor it out into common code. Similar to the !CONFIG_USER_ONLY variant, let's not allow to cross page boundaries. Signed-off-by: David Hildenbrand Reviewed-by: Richard Henderson Message-Id: <20190826075112.25637-4-david@redhat.com> [rth: Move cpu & cc variables inside if block.] Signed-off-by: Richard Henderson --- accel/tcg/user-exec.c | 14 ++++++++++++++ include/exec/exec-all.h | 4 ++-- target/s390x/mem_helper.c | 7 ------- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index 897d1571c4..86e6827201 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -188,6 +188,20 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, g_assert_not_reached(); } +void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, + uintptr_t retaddr) +{ + if (!guest_addr_valid(addr) || + page_check_range(addr, size, PAGE_WRITE) < 0) { + CPUState *cpu = env_cpu(env); + CPUClass *cc = CPU_GET_CLASS(cpu); + + cc->tlb_fill(cpu, addr, size, MMU_DATA_STORE, MMU_USER_IDX, false, + retaddr); + g_assert_not_reached(); + } +} + #if defined(__i386__) #if defined(__NetBSD__) diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 135aeaab0d..cbcc85add3 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -260,8 +260,6 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, void tlb_set_page(CPUState *cpu, target_ulong vaddr, hwaddr paddr, int prot, int mmu_idx, target_ulong size); -void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, - uintptr_t retaddr); #else static inline void tlb_init(CPUState *cpu) { @@ -312,6 +310,8 @@ static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, { } #endif +void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, + uintptr_t retaddr); #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c index 4b43440e89..fdff60ce5d 100644 --- a/target/s390x/mem_helper.c +++ b/target/s390x/mem_helper.c @@ -2615,12 +2615,6 @@ uint32_t HELPER(cu42)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len, uintptr_t ra) { -#ifdef CONFIG_USER_ONLY - if (!guest_addr_valid(addr) || !guest_addr_valid(addr + len - 1) || - page_check_range(addr, len, PAGE_WRITE) < 0) { - s390_program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO, ra); - } -#else /* test the actual access, not just any access to the page due to LAP */ while (len) { const uint64_t pagelen = -(addr | TARGET_PAGE_MASK); @@ -2630,7 +2624,6 @@ void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len, addr = wrap_address(env, addr + curlen); len -= curlen; } -#endif } void HELPER(probe_write_access)(CPUS390XState *env, uint64_t addr, uint64_t len) From ca86cf328ce216bb304bbf09a43614613f945d86 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 26 Aug 2019 09:51:09 +0200 Subject: [PATCH 31/36] tcg: Enforce single page access in probe_write() Let's enforce the interface restriction. Signed-off-by: David Hildenbrand Reviewed-by: Richard Henderson Message-Id: <20190826075112.25637-5-david@redhat.com> Signed-off-by: Richard Henderson --- accel/tcg/cputlb.c | 2 ++ accel/tcg/user-exec.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 010c4c6e3c..707adf7631 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1088,6 +1088,8 @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); target_ulong tlb_addr = tlb_addr_write(entry); + g_assert(-(addr | TARGET_PAGE_MASK) >= size); + if (unlikely(!tlb_hit(tlb_addr, addr))) { if (!VICTIM_TLB_HIT(addr_write, addr)) { tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index 86e6827201..625c33f893 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -191,6 +191,8 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, uintptr_t retaddr) { + g_assert(-(addr | TARGET_PAGE_MASK) >= size); + if (!guest_addr_valid(addr) || page_check_range(addr, size, PAGE_WRITE) < 0) { CPUState *cpu = env_cpu(env); From 3a9576ec98cf87ac29e6e3aa885d6c782f918f73 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 26 Aug 2019 09:51:10 +0200 Subject: [PATCH 32/36] mips/tcg: Call probe_write() for CONFIG_USER_ONLY as well Let's call it also for CONFIG_USER_ONLY. While at it, add a FIXME and get rid of one local variable. MIPS code probably needs a bigger refactoring in regards of ensure_writable_pages(), similar to s390x, so for example, watchpoints can be handled reliably later. The actually accessed addresses should be probed only, not full pages. Signed-off-by: David Hildenbrand Reviewed-by: Aleksandar Markovic Message-Id: <20190826075112.25637-6-david@redhat.com> Signed-off-by: Richard Henderson --- target/mips/op_helper.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/target/mips/op_helper.c b/target/mips/op_helper.c index 34bcc8d884..08d9a4f9f1 100644 --- a/target/mips/op_helper.c +++ b/target/mips/op_helper.c @@ -4537,16 +4537,14 @@ static inline void ensure_writable_pages(CPUMIPSState *env, int mmu_idx, uintptr_t retaddr) { -#if !defined(CONFIG_USER_ONLY) - target_ulong page_addr; + /* FIXME: Probe the actual accesses (pass and use a size) */ if (unlikely(MSA_PAGESPAN(addr))) { /* first page */ probe_write(env, addr, 0, mmu_idx, retaddr); /* second page */ - page_addr = (addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; - probe_write(env, page_addr, 0, mmu_idx, retaddr); + addr = (addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; + probe_write(env, addr, 0, mmu_idx, retaddr); } -#endif } void helper_msa_st_b(CPUMIPSState *env, uint32_t wd, From 200aa7a527dd2af04a0cf524a44596e89db6d4d6 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 26 Aug 2019 09:51:11 +0200 Subject: [PATCH 33/36] hppa/tcg: Call probe_write() also for CONFIG_USER_ONLY We now have a variant for CONFIG_USER_ONLY as well. Reviewed-by: Richard Henderson Signed-off-by: David Hildenbrand Message-Id: <20190826075112.25637-7-david@redhat.com> Signed-off-by: Richard Henderson --- target/hppa/op_helper.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/target/hppa/op_helper.c b/target/hppa/op_helper.c index df0f1361ef..f0516e81f1 100644 --- a/target/hppa/op_helper.c +++ b/target/hppa/op_helper.c @@ -137,9 +137,7 @@ static void do_stby_e(CPUHPPAState *env, target_ulong addr, target_ureg val, default: /* Nothing is stored, but protection is checked and the cacheline is marked dirty. */ -#ifndef CONFIG_USER_ONLY probe_write(env, addr, 0, cpu_mmu_index(env, 0), ra); -#endif break; } } From 9cd9cdaefc2be7a7e26684734bd334b717c50e5a Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 26 Aug 2019 09:51:12 +0200 Subject: [PATCH 34/36] s390x/tcg: Pass a size to probe_write() in do_csst() ... and also call it for CONFIG_USER_ONLY. This function probably will also need some refactoring in regards to probing, however, we'll have to come back to that later, once cleaning up the other mem helpers. The alignment check always makes sure that the write access falls into a single page. Reviewed-by: Richard Henderson Signed-off-by: David Hildenbrand Message-Id: <20190826075112.25637-8-david@redhat.com> Signed-off-by: Richard Henderson --- target/s390x/mem_helper.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c index fdff60ce5d..29fcce426e 100644 --- a/target/s390x/mem_helper.c +++ b/target/s390x/mem_helper.c @@ -1443,9 +1443,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, } /* Sanity check writability of the store address. */ -#ifndef CONFIG_USER_ONLY - probe_write(env, a2, 0, mem_idx, ra); -#endif + probe_write(env, a2, 1 << sc, mem_idx, ra); /* * Note that the compare-and-swap is atomic, and the store is atomic, From fef39ccd567032d3ad520ed80f3576068e6eb2e3 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 30 Aug 2019 12:09:58 +0200 Subject: [PATCH 35/36] tcg: Make probe_write() return a pointer to the host page ... similar to tlb_vaddr_to_host(); however, allow access to the host page except when TLB_NOTDIRTY or TLB_MMIO is set. Reviewed-by: Richard Henderson Signed-off-by: David Hildenbrand Message-Id: <20190830100959.26615-2-david@redhat.com> Signed-off-by: Richard Henderson --- accel/tcg/cputlb.c | 21 ++++++++++++++++----- accel/tcg/user-exec.c | 6 ++++-- include/exec/exec-all.h | 4 ++-- 3 files changed, 22 insertions(+), 9 deletions(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 707adf7631..cb969d8372 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1078,11 +1078,11 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) /* Probe for whether the specified guest write access is permitted. * If it is not permitted then an exception will be taken in the same * way as if this were a real write access (and we will not return). - * Otherwise the function will return, and there will be a valid - * entry in the TLB for this access. + * If the size is 0 or the page requires I/O access, returns NULL; otherwise, + * returns the address of the host page similar to tlb_vaddr_to_host(). */ -void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, - uintptr_t retaddr) +void *probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, + uintptr_t retaddr) { uintptr_t index = tlb_index(env, mmu_idx, addr); CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); @@ -1101,12 +1101,23 @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, tlb_addr = tlb_addr_write(entry); } + if (!size) { + return NULL; + } + /* Handle watchpoints. */ - if ((tlb_addr & TLB_WATCHPOINT) && size > 0) { + if (tlb_addr & TLB_WATCHPOINT) { cpu_check_watchpoint(env_cpu(env), addr, size, env_tlb(env)->d[mmu_idx].iotlb[index].attrs, BP_MEM_WRITE, retaddr); } + + if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO)) { + /* I/O access */ + return NULL; + } + + return (void *)((uintptr_t)addr + entry->addend); } void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index 625c33f893..5720bf8056 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -188,8 +188,8 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, g_assert_not_reached(); } -void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, - uintptr_t retaddr) +void *probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, + uintptr_t retaddr) { g_assert(-(addr | TARGET_PAGE_MASK) >= size); @@ -202,6 +202,8 @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, retaddr); g_assert_not_reached(); } + + return size ? g2h(addr) : NULL; } #if defined(__i386__) diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index cbcc85add3..a7893ed16b 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -310,8 +310,8 @@ static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, { } #endif -void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, - uintptr_t retaddr); +void *probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, + uintptr_t retaddr); #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ From c25c283df0f08582df29f1d5d7be1516b851532d Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 30 Aug 2019 12:09:59 +0200 Subject: [PATCH 36/36] tcg: Factor out probe_write() logic into probe_access() Let's also allow to probe other access types. Reviewed-by: Richard Henderson Signed-off-by: David Hildenbrand Message-Id: <20190830100959.26615-3-david@redhat.com> Signed-off-by: Richard Henderson --- accel/tcg/cputlb.c | 43 ++++++++++++++++++++++++++++++----------- accel/tcg/user-exec.c | 26 +++++++++++++++++++------ include/exec/exec-all.h | 10 ++++++++-- 3 files changed, 60 insertions(+), 19 deletions(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index cb969d8372..abae79650c 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1075,30 +1075,51 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) return qemu_ram_addr_from_host_nofail(p); } -/* Probe for whether the specified guest write access is permitted. - * If it is not permitted then an exception will be taken in the same - * way as if this were a real write access (and we will not return). +/* + * Probe for whether the specified guest access is permitted. If it is not + * permitted then an exception will be taken in the same way as if this + * were a real access (and we will not return). * If the size is 0 or the page requires I/O access, returns NULL; otherwise, * returns the address of the host page similar to tlb_vaddr_to_host(). */ -void *probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, - uintptr_t retaddr) +void *probe_access(CPUArchState *env, target_ulong addr, int size, + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { uintptr_t index = tlb_index(env, mmu_idx, addr); CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); - target_ulong tlb_addr = tlb_addr_write(entry); + target_ulong tlb_addr; + size_t elt_ofs; + int wp_access; g_assert(-(addr | TARGET_PAGE_MASK) >= size); + switch (access_type) { + case MMU_DATA_LOAD: + elt_ofs = offsetof(CPUTLBEntry, addr_read); + wp_access = BP_MEM_READ; + break; + case MMU_DATA_STORE: + elt_ofs = offsetof(CPUTLBEntry, addr_write); + wp_access = BP_MEM_WRITE; + break; + case MMU_INST_FETCH: + elt_ofs = offsetof(CPUTLBEntry, addr_code); + wp_access = BP_MEM_READ; + break; + default: + g_assert_not_reached(); + } + tlb_addr = tlb_read_ofs(entry, elt_ofs); + if (unlikely(!tlb_hit(tlb_addr, addr))) { - if (!VICTIM_TLB_HIT(addr_write, addr)) { - tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, - mmu_idx, retaddr); + if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, + addr & TARGET_PAGE_MASK)) { + tlb_fill(env_cpu(env), addr, size, access_type, mmu_idx, retaddr); /* TLB resize via tlb_fill may have moved the entry. */ index = tlb_index(env, mmu_idx, addr); entry = tlb_entry(env, mmu_idx, addr); } - tlb_addr = tlb_addr_write(entry); + tlb_addr = tlb_read_ofs(entry, elt_ofs); } if (!size) { @@ -1109,7 +1130,7 @@ void *probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, if (tlb_addr & TLB_WATCHPOINT) { cpu_check_watchpoint(env_cpu(env), addr, size, env_tlb(env)->d[mmu_idx].iotlb[index].attrs, - BP_MEM_WRITE, retaddr); + wp_access, retaddr); } if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO)) { diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index 5720bf8056..71c4bf6477 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -188,17 +188,31 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, g_assert_not_reached(); } -void *probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, - uintptr_t retaddr) +void *probe_access(CPUArchState *env, target_ulong addr, int size, + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { + int flags; + g_assert(-(addr | TARGET_PAGE_MASK) >= size); - if (!guest_addr_valid(addr) || - page_check_range(addr, size, PAGE_WRITE) < 0) { + switch (access_type) { + case MMU_DATA_STORE: + flags = PAGE_WRITE; + break; + case MMU_DATA_LOAD: + flags = PAGE_READ; + break; + case MMU_INST_FETCH: + flags = PAGE_EXEC; + break; + default: + g_assert_not_reached(); + } + + if (!guest_addr_valid(addr) || page_check_range(addr, size, flags) < 0) { CPUState *cpu = env_cpu(env); CPUClass *cc = CPU_GET_CLASS(cpu); - - cc->tlb_fill(cpu, addr, size, MMU_DATA_STORE, MMU_USER_IDX, false, + cc->tlb_fill(cpu, addr, size, access_type, MMU_USER_IDX, false, retaddr); g_assert_not_reached(); } diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index a7893ed16b..81b02eb2fe 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -310,8 +310,14 @@ static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, { } #endif -void *probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, - uintptr_t retaddr); +void *probe_access(CPUArchState *env, target_ulong addr, int size, + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); + +static inline void *probe_write(CPUArchState *env, target_ulong addr, int size, + int mmu_idx, uintptr_t retaddr) +{ + return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); +} #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */