tcg/s390x: Simplify constraints on qemu_ld/st

Adjust the softmmu tlb to use R0+R1, not any of the normally available
registers.  Since we handle overlap betwen inputs and helper arguments,
we can allow any allocatable reg.

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2023-04-07 14:16:12 -07:00
parent 8b1b45971f
commit 9490142284
3 changed files with 12 additions and 27 deletions

View File

@ -10,12 +10,10 @@
* tcg-target-con-str.h; the constraint combination is inclusive or. * tcg-target-con-str.h; the constraint combination is inclusive or.
*/ */
C_O0_I1(r) C_O0_I1(r)
C_O0_I2(L, L)
C_O0_I2(r, r) C_O0_I2(r, r)
C_O0_I2(r, ri) C_O0_I2(r, ri)
C_O0_I2(r, rA) C_O0_I2(r, rA)
C_O0_I2(v, r) C_O0_I2(v, r)
C_O1_I1(r, L)
C_O1_I1(r, r) C_O1_I1(r, r)
C_O1_I1(v, r) C_O1_I1(v, r)
C_O1_I1(v, v) C_O1_I1(v, v)

View File

@ -9,7 +9,6 @@
* REGS(letter, register_mask) * REGS(letter, register_mask)
*/ */
REGS('r', ALL_GENERAL_REGS) REGS('r', ALL_GENERAL_REGS)
REGS('L', ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
REGS('v', ALL_VECTOR_REGS) REGS('v', ALL_VECTOR_REGS)
REGS('o', 0xaaaa) /* odd numbered general regs */ REGS('o', 0xaaaa) /* odd numbered general regs */

View File

@ -44,18 +44,6 @@
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 16) #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 16)
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32) #define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
/*
* For softmmu, we need to avoid conflicts with the first 3
* argument registers to perform the tlb lookup, and to call
* the helper function.
*/
#ifdef CONFIG_SOFTMMU
#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_R2, 3)
#else
#define SOFTMMU_RESERVE_REGS 0
#endif
/* Several places within the instruction set 0 means "no register" /* Several places within the instruction set 0 means "no register"
rather than TCG_REG_R0. */ rather than TCG_REG_R0. */
#define TCG_REG_NONE 0 #define TCG_REG_NONE 0
@ -1814,13 +1802,13 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
ldst->oi = oi; ldst->oi = oi;
ldst->addrlo_reg = addr_reg; ldst->addrlo_reg = addr_reg;
tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE, tcg_out_sh64(s, RSY_SRLG, TCG_TMP0, addr_reg, TCG_REG_NONE,
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19)); QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19));
tcg_out_insn(s, RXY, NG, TCG_REG_R2, TCG_AREG0, TCG_REG_NONE, mask_off); tcg_out_insn(s, RXY, NG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, mask_off);
tcg_out_insn(s, RXY, AG, TCG_REG_R2, TCG_AREG0, TCG_REG_NONE, table_off); tcg_out_insn(s, RXY, AG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, table_off);
/* /*
* For aligned accesses, we check the first byte and include the alignment * For aligned accesses, we check the first byte and include the alignment
@ -1830,10 +1818,10 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
a_off = (a_bits >= s_bits ? 0 : s_mask - a_mask); a_off = (a_bits >= s_bits ? 0 : s_mask - a_mask);
tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask; tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask;
if (a_off == 0) { if (a_off == 0) {
tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask); tgen_andi_risbg(s, TCG_REG_R0, addr_reg, tlb_mask);
} else { } else {
tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off); tcg_out_insn(s, RX, LA, TCG_REG_R0, addr_reg, TCG_REG_NONE, a_off);
tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask); tgen_andi(s, TCG_TYPE_TL, TCG_REG_R0, tlb_mask);
} }
if (is_ld) { if (is_ld) {
@ -1842,16 +1830,16 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
ofs = offsetof(CPUTLBEntry, addr_write); ofs = offsetof(CPUTLBEntry, addr_write);
} }
if (TARGET_LONG_BITS == 32) { if (TARGET_LONG_BITS == 32) {
tcg_out_insn(s, RX, C, TCG_REG_R3, TCG_REG_R2, TCG_REG_NONE, ofs); tcg_out_insn(s, RX, C, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
} else { } else {
tcg_out_insn(s, RXY, CG, TCG_REG_R3, TCG_REG_R2, TCG_REG_NONE, ofs); tcg_out_insn(s, RXY, CG, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
} }
tcg_out16(s, RI_BRC | (S390_CC_NE << 4)); tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
ldst->label_ptr[0] = s->code_ptr++; ldst->label_ptr[0] = s->code_ptr++;
h->index = TCG_REG_R2; h->index = TCG_TMP0;
tcg_out_insn(s, RXY, LG, h->index, TCG_REG_R2, TCG_REG_NONE, tcg_out_insn(s, RXY, LG, h->index, TCG_TMP0, TCG_REG_NONE,
offsetof(CPUTLBEntry, addend)); offsetof(CPUTLBEntry, addend));
if (TARGET_LONG_BITS == 32) { if (TARGET_LONG_BITS == 32) {
@ -3155,10 +3143,10 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_qemu_ld_i32: case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_ld_i64: case INDEX_op_qemu_ld_i64:
return C_O1_I1(r, L); return C_O1_I1(r, r);
case INDEX_op_qemu_st_i64: case INDEX_op_qemu_st_i64:
case INDEX_op_qemu_st_i32: case INDEX_op_qemu_st_i32:
return C_O0_I2(L, L); return C_O0_I2(r, r);
case INDEX_op_deposit_i32: case INDEX_op_deposit_i32:
case INDEX_op_deposit_i64: case INDEX_op_deposit_i64: