tcg/ppc: Reorg tcg_out_tlb_read

Allocate TCG_REG_TMP2.  Use R0, TMP1, TMP2 instead of any of
the normally allocated registers for the tlb load.

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2023-04-04 01:25:06 +03:00
parent f48cc9020b
commit 01a112e2e9
1 changed files with 47 additions and 31 deletions

View File

@ -68,6 +68,7 @@
#else
# define TCG_REG_TMP1 TCG_REG_R12
#endif
#define TCG_REG_TMP2 TCG_REG_R11
#define TCG_VEC_TMP1 TCG_REG_V0
#define TCG_VEC_TMP2 TCG_REG_V1
@ -2015,13 +2016,11 @@ static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg)
/*
* For the purposes of ppc32 sorting 4 input registers into 4 argument
* registers, there is an outside chance we would require 3 temps.
* Because of constraints, no inputs are in r3, and env will not be
* placed into r3 until after the sorting is done, and is thus free.
*/
static const TCGLdstHelperParam ldst_helper_param = {
.ra_gen = ldst_ra_gen,
.ntmp = 3,
.tmp = { TCG_REG_TMP1, TCG_REG_R0, TCG_REG_R3 }
.tmp = { TCG_REG_TMP1, TCG_REG_TMP2, TCG_REG_R0 }
};
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
@ -2135,31 +2134,31 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
/* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768);
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_AREG0, mask_off);
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R4, TCG_AREG0, table_off);
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, mask_off);
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_AREG0, table_off);
/* Extract the page index, shifted into place for tlb index. */
if (TCG_TARGET_REG_BITS == 32) {
tcg_out_shri32(s, TCG_REG_TMP1, addrlo,
tcg_out_shri32(s, TCG_REG_R0, addrlo,
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
} else {
tcg_out_shri64(s, TCG_REG_TMP1, addrlo,
tcg_out_shri64(s, TCG_REG_R0, addrlo,
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
}
tcg_out32(s, AND | SAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_TMP1));
tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0));
/* Load the TLB comparator. */
/* Load the (low part) TLB comparator into TMP2. */
if (cmp_off == 0 && TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
uint32_t lxu = (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32
? LWZUX : LDUX);
tcg_out32(s, lxu | TAB(TCG_REG_TMP1, TCG_REG_R3, TCG_REG_R4));
tcg_out32(s, lxu | TAB(TCG_REG_TMP2, TCG_REG_TMP1, TCG_REG_TMP2));
} else {
tcg_out32(s, ADD | TAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_R4));
tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2));
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP1, TCG_REG_R3, cmp_off + 4);
tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R4, TCG_REG_R3, cmp_off);
tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2,
TCG_REG_TMP1, cmp_off + 4 * HOST_BIG_ENDIAN);
} else {
tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP1, TCG_REG_R3, cmp_off);
tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP2, TCG_REG_TMP1, cmp_off);
}
}
@ -2167,11 +2166,12 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
* Load the TLB addend for use on the fast path.
* Do this asap to minimize any load use delay.
*/
h->base = TCG_REG_R3;
tcg_out_ld(s, TCG_TYPE_PTR, h->base, TCG_REG_R3,
offsetof(CPUTLBEntry, addend));
if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
offsetof(CPUTLBEntry, addend));
}
/* Clear the non-page, non-alignment bits from the address */
/* Clear the non-page, non-alignment bits from the address in R0. */
if (TCG_TARGET_REG_BITS == 32) {
/*
* We don't support unaligned accesses on 32-bits.
@ -2204,9 +2204,6 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
if (TARGET_LONG_BITS == 32) {
tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
(32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
/* Zero-extend the address for use in the final address. */
tcg_out_ext32u(s, TCG_REG_R4, addrlo);
addrlo = TCG_REG_R4;
} else if (a_bits == 0) {
tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS);
} else {
@ -2215,21 +2212,36 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
}
}
h->index = addrlo;
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1,
/* Low part comparison into cr7. */
tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
0, 7, TCG_TYPE_I32);
tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_R4, 0, 6, TCG_TYPE_I32);
/* Load the high part TLB comparator into TMP2. */
tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1,
cmp_off + 4 * !HOST_BIG_ENDIAN);
/* Load addend, deferred for this case. */
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
offsetof(CPUTLBEntry, addend));
/* High part comparison into cr6. */
tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_TMP2, 0, 6, TCG_TYPE_I32);
/* Combine comparisons into cr7. */
tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
} else {
tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1,
/* Full comparison into cr7. */
tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
0, 7, TCG_TYPE_TL);
}
/* Load a pointer into the current opcode w/conditional branch-link. */
ldst->label_ptr[0] = s->code_ptr;
tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
h->base = TCG_REG_TMP1;
#else
if (a_bits) {
ldst = new_ldst_label(s);
@ -2247,13 +2259,16 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
}
h->base = guest_base ? TCG_GUEST_BASE_REG : 0;
h->index = addrlo;
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
h->index = TCG_REG_TMP1;
}
#endif
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
/* Zero-extend the guest address for use in the host address. */
tcg_out_ext32u(s, TCG_REG_R0, addrlo);
h->index = TCG_REG_R0;
} else {
h->index = addrlo;
}
return ldst;
}
@ -3905,7 +3920,8 @@ static void tcg_target_init(TCGContext *s)
#if defined(_CALL_SYSV) || TCG_TARGET_REG_BITS == 64
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */
#endif
tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); /* mem temp */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP1);
tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP2);
if (USE_REG_TB) {