diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index 19cd4507fb..415e6c6e15 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -910,12 +910,13 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase, #ifdef CONFIG_SOFTMMU unsigned s_bits = opc & MO_SIZE; + unsigned s_mask = (1u << s_bits) - 1; int mem_index = get_mmuidx(oi); int fast_ofs = TLB_MASK_TABLE_OFS(mem_index); int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); - TCGReg mask_base = TCG_AREG0, table_base = TCG_AREG0; - tcg_target_long compare_mask; + int compare_mask; + TCGReg addr_adj; ldst = new_ldst_label(s); ldst->is_ld = is_ld; @@ -924,14 +925,33 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase, QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11)); - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, mask_base, mask_ofs); - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, table_base, table_ofs); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); + /* + * For aligned accesses, we check the first byte and include the alignment + * bits within the address. For unaligned access, we check that we don't + * cross pages using the address of the last byte of the access. + */ + addr_adj = addr_reg; + if (a_bits < s_bits) { + addr_adj = TCG_REG_TMP0; + tcg_out_opc_imm(s, TARGET_LONG_BITS == 32 ? OPC_ADDIW : OPC_ADDI, + addr_adj, addr_reg, s_mask - a_mask); + } + compare_mask = TARGET_PAGE_MASK | a_mask; + if (compare_mask == sextreg(compare_mask, 0, 12)) { + tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask); + } else { + tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask); + tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_adj); + } + /* Load the tlb comparator and the addend. */ tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2, is_ld ? offsetof(CPUTLBEntry, addr_read) @@ -939,29 +959,17 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase, tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, offsetof(CPUTLBEntry, addend)); - /* We don't support unaligned accesses. */ - if (a_bits < s_bits) { - a_bits = s_bits; - } - /* Clear the non-page, non-alignment bits from the address. */ - compare_mask = (tcg_target_long)TARGET_PAGE_MASK | a_mask; - if (compare_mask == sextreg(compare_mask, 0, 12)) { - tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, compare_mask); - } else { - tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask); - tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_reg); - } - /* Compare masked address with the TLB entry. */ ldst->label_ptr[0] = s->code_ptr; tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0); /* TLB Hit - translate address using addend. */ + addr_adj = addr_reg; if (TARGET_LONG_BITS == 32) { - tcg_out_ext32u(s, TCG_REG_TMP0, addr_reg); - addr_reg = TCG_REG_TMP0; + addr_adj = TCG_REG_TMP0; + tcg_out_ext32u(s, addr_adj, addr_reg); } - tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addr_reg); + tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addr_adj); *pbase = TCG_REG_TMP0; #else if (a_mask) {