TCG MIPS queue

- Fixes for 64-bit guests
 - Small cleanups
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJV/zQrAAoJELqceAYd3YybF1UP/1Siw6dm6i+nVMvDRJ/J0UaE
 bXxvwP1F6AEqZGKc0h+N1Wi0PYtCtk/gUqJCGDMgU4ML7H1QSKM/6Av9DR00DhkS
 EZUY6tHtEh9l0BrTPRGMaw0Hr8hZbZB7zrp9eK0/29R0cOUO7G9hbRLeVVCqr6l9
 NiZ4VEEInh5PwyU7TaCpgZmc3X5uuRelwY9KEKK+h72xdsq10bU2pDji0U9uoI+E
 5+ab5/SuEDy8y2+FcSLT1FfO4bGdQNPkZ7HqjCrqPZCQL3EEqYWL3CTwXm1pDaN7
 wLwLx1wdzJZ/vvbLNWyuz4+/PMe28Bq0XWNdd0xNiHKNVsuD9qxGaD0DtCnB5au7
 R8apTfUYN7Lw3ok7SsrrDp7sV3f8UB4y6AmVamqyfEoBwJhOhwdvxGtgtO8zAPKP
 EL1oFFdSH3tn/P+ULL7CKyWI3ocNFnqCNB7bmUWYTe+4Tff2shtaZFA43Y0kBrcW
 dJrurZRtVp7G7StSJ6y67N6bFNfcY/hTo5BGalPngwYUu2k5xuFujuU79q/kmQL5
 zOIfUxzCLEuRTaD6bg1QhjDxplFhLcjriTtcXibq4QsgcWhgwHuelOHQcIZGMpCV
 MDSKZZrwXsh+rYGObz7bgA77qso3VZhuiJf/sN9te1Xotcvh5frpPnmP5phSOPax
 9/yGVpYH+63LjXsUNi4Q
 =6gKE
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/aurel/tags/pull-tcg-mips-20150921' into staging

TCG MIPS queue

- Fixes for 64-bit guests
- Small cleanups

# gpg: Signature made Sun 20 Sep 2015 23:33:15 BST using RSA key ID 1DDD8C9B
# gpg: Good signature from "Aurelien Jarno <aurelien@aurel32.net>"
# gpg:                 aka "Aurelien Jarno <aurelien@jarno.fr>"
# gpg:                 aka "Aurelien Jarno <aurel32@debian.org>"
# gpg: WARNING: This key is not certified with sufficiently trusted signatures!
# gpg:          It is not certain that the signature belongs to the owner.
# Primary key fingerprint: 7746 2642 A9EF 94FD 0F77  196D BA9C 7806 1DDD 8C9B

* remotes/aurel/tags/pull-tcg-mips-20150921:
  tcg/mips: pass oi to tcg_out_tlb_load
  tcg/mips: move tcg_out_addsub2
  tcg/mips: Fix clobbering of qemu_ld inputs

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2015-09-21 19:42:33 +01:00
commit 75ebcd7f08

View File

@ -567,6 +567,55 @@ static inline void tcg_out_addi(TCGContext *s, TCGReg reg, TCGArg val)
}
}
static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, TCGReg al,
TCGReg ah, TCGArg bl, TCGArg bh, bool cbl,
bool cbh, bool is_sub)
{
TCGReg th = TCG_TMP1;
/* If we have a negative constant such that negating it would
make the high part zero, we can (usually) eliminate one insn. */
if (cbl && cbh && bh == -1 && bl != 0) {
bl = -bl;
bh = 0;
is_sub = !is_sub;
}
/* By operating on the high part first, we get to use the final
carry operation to move back from the temporary. */
if (!cbh) {
tcg_out_opc_reg(s, (is_sub ? OPC_SUBU : OPC_ADDU), th, ah, bh);
} else if (bh != 0 || ah == rl) {
tcg_out_opc_imm(s, OPC_ADDIU, th, ah, (is_sub ? -bh : bh));
} else {
th = ah;
}
/* Note that tcg optimization should eliminate the bl == 0 case. */
if (is_sub) {
if (cbl) {
tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, al, bl);
tcg_out_opc_imm(s, OPC_ADDIU, rl, al, -bl);
} else {
tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, al, bl);
tcg_out_opc_reg(s, OPC_SUBU, rl, al, bl);
}
tcg_out_opc_reg(s, OPC_SUBU, rh, th, TCG_TMP0);
} else {
if (cbl) {
tcg_out_opc_imm(s, OPC_ADDIU, rl, al, bl);
tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, rl, bl);
} else if (rl == al && rl == bl) {
tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, al, 31);
tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
} else {
tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, rl, (rl == bl ? al : bl));
}
tcg_out_opc_reg(s, OPC_ADDU, rh, th, TCG_TMP0);
}
}
/* Bit 0 set if inversion required; bit 1 set if swapping required. */
#define MIPS_CMP_INV 1
#define MIPS_CMP_SWAP 2
@ -934,9 +983,11 @@ static int tcg_out_call_iarg_reg2(TCGContext *s, int i, TCGReg al, TCGReg ah)
/* Perform the tlb comparison operation. The complete host address is
placed in BASE. Clobbers AT, T0, A0. */
static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
TCGReg addrh, int mem_index, TCGMemOp s_bits,
TCGReg addrh, TCGMemOpIdx oi,
tcg_insn_unit *label_ptr[2], bool is_load)
{
TCGMemOp s_bits = get_memop(oi) & MO_SIZE;
int mem_index = get_mmuidx(oi);
int cmp_off
= (is_load
? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)
@ -962,30 +1013,34 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
add_off -= 0x7ff0;
}
/* Load the tlb comparator. */
if (TARGET_LONG_BITS == 64) {
tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, TCG_REG_A0, cmp_off + LO_OFF);
tcg_out_opc_imm(s, OPC_LW, base, TCG_REG_A0, cmp_off + HI_OFF);
} else {
tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, TCG_REG_A0, cmp_off);
}
/* Load the (low half) tlb comparator. */
tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, TCG_REG_A0,
cmp_off + (TARGET_LONG_BITS == 64 ? LO_OFF : 0));
/* Mask the page bits, keeping the alignment bits to compare against.
In between, load the tlb addend for the fast path. */
In between on 32-bit targets, load the tlb addend for the fast path. */
tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1,
TARGET_PAGE_MASK | ((1 << s_bits) - 1));
tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0, add_off);
if (TARGET_LONG_BITS == 32) {
tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0, add_off);
}
tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrl);
label_ptr[0] = s->code_ptr;
tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0);
/* Load and test the high half tlb comparator. */
if (TARGET_LONG_BITS == 64) {
/* delay slot */
tcg_out_nop(s);
tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, TCG_REG_A0, cmp_off + HI_OFF);
/* Load the tlb addend for the fast path. We can't do it earlier with
64-bit targets or we'll clobber a0 before reading the high half tlb
comparator. */
tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0, add_off);
label_ptr[1] = s->code_ptr;
tcg_out_opc_br(s, OPC_BNE, addrh, base);
tcg_out_opc_br(s, OPC_BNE, addrh, TCG_TMP0);
}
/* delay slot */
@ -1156,8 +1211,6 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
TCGMemOp opc;
#if defined(CONFIG_SOFTMMU)
tcg_insn_unit *label_ptr[2];
int mem_index;
TCGMemOp s_bits;
#endif
/* Note that we've eliminated V0 from the output registers,
so we won't overwrite the base register during loading. */
@ -1171,11 +1224,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
opc = get_memop(oi);
#if defined(CONFIG_SOFTMMU)
mem_index = get_mmuidx(oi);
s_bits = opc & MO_SIZE;
tcg_out_tlb_load(s, base, addr_regl, addr_regh, mem_index,
s_bits, label_ptr, 1);
tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 1);
tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc);
add_qemu_ldst_label(s, 1, oi, data_regl, data_regh, addr_regl, addr_regh,
s->code_ptr, label_ptr);
@ -1233,55 +1282,6 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
}
}
static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, TCGReg al,
TCGReg ah, TCGArg bl, TCGArg bh, bool cbl,
bool cbh, bool is_sub)
{
TCGReg th = TCG_TMP1;
/* If we have a negative constant such that negating it would
make the high part zero, we can (usually) eliminate one insn. */
if (cbl && cbh && bh == -1 && bl != 0) {
bl = -bl;
bh = 0;
is_sub = !is_sub;
}
/* By operating on the high part first, we get to use the final
carry operation to move back from the temporary. */
if (!cbh) {
tcg_out_opc_reg(s, (is_sub ? OPC_SUBU : OPC_ADDU), th, ah, bh);
} else if (bh != 0 || ah == rl) {
tcg_out_opc_imm(s, OPC_ADDIU, th, ah, (is_sub ? -bh : bh));
} else {
th = ah;
}
/* Note that tcg optimization should eliminate the bl == 0 case. */
if (is_sub) {
if (cbl) {
tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, al, bl);
tcg_out_opc_imm(s, OPC_ADDIU, rl, al, -bl);
} else {
tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, al, bl);
tcg_out_opc_reg(s, OPC_SUBU, rl, al, bl);
}
tcg_out_opc_reg(s, OPC_SUBU, rh, th, TCG_TMP0);
} else {
if (cbl) {
tcg_out_opc_imm(s, OPC_ADDIU, rl, al, bl);
tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, rl, bl);
} else if (rl == al && rl == bl) {
tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, al, 31);
tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
} else {
tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, rl, (rl == bl ? al : bl));
}
tcg_out_opc_reg(s, OPC_ADDU, rh, th, TCG_TMP0);
}
}
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
{
TCGReg addr_regl, addr_regh __attribute__((unused));
@ -1290,8 +1290,6 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
TCGMemOp opc;
#if defined(CONFIG_SOFTMMU)
tcg_insn_unit *label_ptr[2];
int mem_index;
TCGMemOp s_bits;
#endif
data_regl = *args++;
@ -1302,14 +1300,10 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
opc = get_memop(oi);
#if defined(CONFIG_SOFTMMU)
mem_index = get_mmuidx(oi);
s_bits = opc & 3;
/* Note that we eliminated the helper's address argument,
so we can reuse that for the base. */
base = (TARGET_LONG_BITS == 32 ? TCG_REG_A1 : TCG_REG_A2);
tcg_out_tlb_load(s, base, addr_regl, addr_regh, mem_index,
s_bits, label_ptr, 0);
tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 0);
tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
add_qemu_ldst_label(s, 0, oi, data_regl, data_regh, addr_regl, addr_regh,
s->code_ptr, label_ptr);