2008-05-20 01:59:38 +02:00
|
|
|
/*
|
|
|
|
* Tiny Code Generator for QEMU
|
|
|
|
*
|
|
|
|
* Copyright (c) 2008 Andrzej Zaborowski
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
2008-10-05 11:59:14 +02:00
|
|
|
|
2013-06-07 16:26:20 +02:00
|
|
|
#include "elf.h"
|
2021-08-10 07:18:27 +02:00
|
|
|
#include "../tcg-ldst.c.inc"
|
2020-02-04 12:41:01 +01:00
|
|
|
#include "../tcg-pool.c.inc"
|
2013-10-03 21:51:24 +02:00
|
|
|
|
2016-10-15 02:45:26 +02:00
|
|
|
int arm_arch = __ARM_ARCH;
|
2010-04-09 20:52:48 +02:00
|
|
|
|
2013-05-02 13:18:38 +02:00
|
|
|
#ifndef use_idiv_instructions
|
|
|
|
bool use_idiv_instructions;
|
|
|
|
#endif
|
2020-09-06 00:54:33 +02:00
|
|
|
#ifndef use_neon_instructions
|
|
|
|
bool use_neon_instructions;
|
|
|
|
#endif
|
2013-05-02 13:18:38 +02:00
|
|
|
|
2016-04-21 10:48:50 +02:00
|
|
|
#ifdef CONFIG_DEBUG_TCG
|
2008-10-05 11:59:14 +02:00
|
|
|
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
|
2021-05-04 01:47:52 +02:00
|
|
|
"%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
|
|
|
|
"%r8", "%r9", "%r10", "%r11", "%r12", "%sp", "%r14", "%pc",
|
|
|
|
"%q0", "%q1", "%q2", "%q3", "%q4", "%q5", "%q6", "%q7",
|
|
|
|
"%q8", "%q9", "%q10", "%q11", "%q12", "%q13", "%q14", "%q15",
|
2008-05-20 01:59:38 +02:00
|
|
|
};
|
2008-10-05 11:59:14 +02:00
|
|
|
#endif
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2008-10-05 11:59:14 +02:00
|
|
|
static const int tcg_target_reg_alloc_order[] = {
|
2008-05-20 01:59:38 +02:00
|
|
|
TCG_REG_R4,
|
|
|
|
TCG_REG_R5,
|
|
|
|
TCG_REG_R6,
|
|
|
|
TCG_REG_R7,
|
|
|
|
TCG_REG_R8,
|
|
|
|
TCG_REG_R9,
|
|
|
|
TCG_REG_R10,
|
|
|
|
TCG_REG_R11,
|
|
|
|
TCG_REG_R13,
|
2010-04-09 20:52:48 +02:00
|
|
|
TCG_REG_R0,
|
|
|
|
TCG_REG_R1,
|
|
|
|
TCG_REG_R2,
|
|
|
|
TCG_REG_R3,
|
|
|
|
TCG_REG_R12,
|
2008-05-20 01:59:38 +02:00
|
|
|
TCG_REG_R14,
|
2021-05-04 01:47:52 +02:00
|
|
|
|
|
|
|
TCG_REG_Q0,
|
|
|
|
TCG_REG_Q1,
|
|
|
|
TCG_REG_Q2,
|
|
|
|
TCG_REG_Q3,
|
|
|
|
/* Q4 - Q7 are call-saved, and skipped. */
|
|
|
|
TCG_REG_Q8,
|
|
|
|
TCG_REG_Q9,
|
|
|
|
TCG_REG_Q10,
|
|
|
|
TCG_REG_Q11,
|
|
|
|
TCG_REG_Q12,
|
|
|
|
TCG_REG_Q13,
|
|
|
|
TCG_REG_Q14,
|
|
|
|
TCG_REG_Q15,
|
2008-05-20 01:59:38 +02:00
|
|
|
};
|
|
|
|
|
2008-10-05 11:59:14 +02:00
|
|
|
static const int tcg_target_call_iarg_regs[4] = {
|
2008-05-20 01:59:38 +02:00
|
|
|
TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
|
|
|
|
};
|
2022-10-19 16:55:36 +02:00
|
|
|
|
|
|
|
static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
|
|
|
|
{
|
|
|
|
tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
|
|
|
|
tcg_debug_assert(slot >= 0 && slot <= 3);
|
|
|
|
return TCG_REG_R0 + slot;
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2013-03-12 17:50:25 +01:00
|
|
|
#define TCG_REG_TMP TCG_REG_R12
|
2021-05-04 01:47:52 +02:00
|
|
|
#define TCG_VEC_TMP TCG_REG_Q15
|
2021-08-10 05:44:50 +02:00
|
|
|
#ifndef CONFIG_SOFTMMU
|
|
|
|
#define TCG_REG_GUEST_BASE TCG_REG_R11
|
|
|
|
#endif
|
2013-03-12 17:49:04 +01:00
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
typedef enum {
|
2017-07-28 05:43:30 +02:00
|
|
|
COND_EQ = 0x0,
|
|
|
|
COND_NE = 0x1,
|
|
|
|
COND_CS = 0x2, /* Unsigned greater or equal */
|
|
|
|
COND_CC = 0x3, /* Unsigned less than */
|
|
|
|
COND_MI = 0x4, /* Negative */
|
|
|
|
COND_PL = 0x5, /* Zero or greater */
|
|
|
|
COND_VS = 0x6, /* Overflow */
|
|
|
|
COND_VC = 0x7, /* No overflow */
|
|
|
|
COND_HI = 0x8, /* Unsigned greater than */
|
|
|
|
COND_LS = 0x9, /* Unsigned less or equal */
|
|
|
|
COND_GE = 0xa,
|
|
|
|
COND_LT = 0xb,
|
|
|
|
COND_GT = 0xc,
|
|
|
|
COND_LE = 0xd,
|
|
|
|
COND_AL = 0xe,
|
2021-08-10 01:16:59 +02:00
|
|
|
} ARMCond;
|
2017-07-28 05:43:30 +02:00
|
|
|
|
|
|
|
#define TO_CPSR (1 << 20)
|
|
|
|
|
|
|
|
#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
|
|
|
|
#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
|
|
|
|
#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
|
|
|
|
#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
|
|
|
|
#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
|
|
|
|
#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
|
|
|
|
#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
|
|
|
|
#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
|
|
|
|
|
|
|
|
typedef enum {
|
|
|
|
ARITH_AND = 0x0 << 21,
|
|
|
|
ARITH_EOR = 0x1 << 21,
|
|
|
|
ARITH_SUB = 0x2 << 21,
|
|
|
|
ARITH_RSB = 0x3 << 21,
|
|
|
|
ARITH_ADD = 0x4 << 21,
|
|
|
|
ARITH_ADC = 0x5 << 21,
|
|
|
|
ARITH_SBC = 0x6 << 21,
|
|
|
|
ARITH_RSC = 0x7 << 21,
|
|
|
|
ARITH_TST = 0x8 << 21 | TO_CPSR,
|
|
|
|
ARITH_CMP = 0xa << 21 | TO_CPSR,
|
|
|
|
ARITH_CMN = 0xb << 21 | TO_CPSR,
|
|
|
|
ARITH_ORR = 0xc << 21,
|
|
|
|
ARITH_MOV = 0xd << 21,
|
|
|
|
ARITH_BIC = 0xe << 21,
|
|
|
|
ARITH_MVN = 0xf << 21,
|
|
|
|
|
2022-11-27 09:48:47 +01:00
|
|
|
INSN_B = 0x0a000000,
|
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
INSN_CLZ = 0x016f0f10,
|
|
|
|
INSN_RBIT = 0x06ff0f30,
|
|
|
|
|
2021-08-08 20:29:18 +02:00
|
|
|
INSN_LDMIA = 0x08b00000,
|
|
|
|
INSN_STMDB = 0x09200000,
|
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
INSN_LDR_IMM = 0x04100000,
|
|
|
|
INSN_LDR_REG = 0x06100000,
|
|
|
|
INSN_STR_IMM = 0x04000000,
|
|
|
|
INSN_STR_REG = 0x06000000,
|
|
|
|
|
|
|
|
INSN_LDRH_IMM = 0x005000b0,
|
|
|
|
INSN_LDRH_REG = 0x001000b0,
|
|
|
|
INSN_LDRSH_IMM = 0x005000f0,
|
|
|
|
INSN_LDRSH_REG = 0x001000f0,
|
|
|
|
INSN_STRH_IMM = 0x004000b0,
|
|
|
|
INSN_STRH_REG = 0x000000b0,
|
|
|
|
|
|
|
|
INSN_LDRB_IMM = 0x04500000,
|
|
|
|
INSN_LDRB_REG = 0x06500000,
|
|
|
|
INSN_LDRSB_IMM = 0x005000d0,
|
|
|
|
INSN_LDRSB_REG = 0x001000d0,
|
|
|
|
INSN_STRB_IMM = 0x04400000,
|
|
|
|
INSN_STRB_REG = 0x06400000,
|
|
|
|
|
|
|
|
INSN_LDRD_IMM = 0x004000d0,
|
|
|
|
INSN_LDRD_REG = 0x000000d0,
|
|
|
|
INSN_STRD_IMM = 0x004000f0,
|
|
|
|
INSN_STRD_REG = 0x000000f0,
|
|
|
|
|
2018-04-18 00:06:23 +02:00
|
|
|
INSN_DMB_ISH = 0xf57ff05b,
|
|
|
|
INSN_DMB_MCR = 0xee070fba,
|
2017-07-28 05:45:38 +02:00
|
|
|
|
|
|
|
/* Architected nop introduced in v6k. */
|
|
|
|
/* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this
|
|
|
|
also Just So Happened to do nothing on pre-v6k so that we
|
|
|
|
don't need to conditionalize it? */
|
|
|
|
INSN_NOP_v6k = 0xe320f000,
|
|
|
|
/* Otherwise the assembler uses mov r0,r0 */
|
|
|
|
INSN_NOP_v4 = (COND_AL << 28) | ARITH_MOV,
|
2021-05-04 01:48:03 +02:00
|
|
|
|
2020-09-06 00:54:33 +02:00
|
|
|
INSN_VADD = 0xf2000800,
|
|
|
|
INSN_VAND = 0xf2000110,
|
2020-09-05 20:58:47 +02:00
|
|
|
INSN_VBIC = 0xf2100110,
|
2020-09-06 00:54:33 +02:00
|
|
|
INSN_VEOR = 0xf3000110,
|
2020-09-05 20:58:47 +02:00
|
|
|
INSN_VORN = 0xf2300110,
|
2021-05-04 01:48:07 +02:00
|
|
|
INSN_VORR = 0xf2200110,
|
2020-09-06 00:54:33 +02:00
|
|
|
INSN_VSUB = 0xf3000800,
|
2020-09-05 21:30:17 +02:00
|
|
|
INSN_VMUL = 0xf2000910,
|
2020-09-05 21:37:36 +02:00
|
|
|
INSN_VQADD = 0xf2000010,
|
|
|
|
INSN_VQADD_U = 0xf3000010,
|
|
|
|
INSN_VQSUB = 0xf2000210,
|
|
|
|
INSN_VQSUB_U = 0xf3000210,
|
2020-09-05 21:44:06 +02:00
|
|
|
INSN_VMAX = 0xf2000600,
|
|
|
|
INSN_VMAX_U = 0xf3000600,
|
|
|
|
INSN_VMIN = 0xf2000610,
|
|
|
|
INSN_VMIN_U = 0xf3000610,
|
2020-09-06 00:54:33 +02:00
|
|
|
|
2020-09-05 20:58:47 +02:00
|
|
|
INSN_VABS = 0xf3b10300,
|
2020-09-06 00:54:33 +02:00
|
|
|
INSN_VMVN = 0xf3b00580,
|
2020-09-05 20:58:47 +02:00
|
|
|
INSN_VNEG = 0xf3b10380,
|
2020-09-06 00:54:33 +02:00
|
|
|
|
|
|
|
INSN_VCEQ0 = 0xf3b10100,
|
|
|
|
INSN_VCGT0 = 0xf3b10000,
|
|
|
|
INSN_VCGE0 = 0xf3b10080,
|
|
|
|
INSN_VCLE0 = 0xf3b10180,
|
|
|
|
INSN_VCLT0 = 0xf3b10200,
|
|
|
|
|
|
|
|
INSN_VCEQ = 0xf3000810,
|
|
|
|
INSN_VCGE = 0xf2000310,
|
|
|
|
INSN_VCGT = 0xf2000300,
|
|
|
|
INSN_VCGE_U = 0xf3000310,
|
|
|
|
INSN_VCGT_U = 0xf3000300,
|
|
|
|
|
2020-09-05 21:24:28 +02:00
|
|
|
INSN_VSHLI = 0xf2800510, /* VSHL (immediate) */
|
|
|
|
INSN_VSARI = 0xf2800010, /* VSHR.S */
|
|
|
|
INSN_VSHRI = 0xf3800010, /* VSHR.U */
|
2020-09-05 22:26:48 +02:00
|
|
|
INSN_VSLI = 0xf3800510,
|
2020-09-05 22:13:10 +02:00
|
|
|
INSN_VSHL_S = 0xf2000400, /* VSHL.S (register) */
|
|
|
|
INSN_VSHL_U = 0xf3000400, /* VSHL.U (register) */
|
2020-09-05 21:24:28 +02:00
|
|
|
|
2020-09-05 21:54:37 +02:00
|
|
|
INSN_VBSL = 0xf3100110,
|
|
|
|
INSN_VBIT = 0xf3200110,
|
|
|
|
INSN_VBIF = 0xf3300110,
|
|
|
|
|
2020-09-06 00:54:33 +02:00
|
|
|
INSN_VTST = 0xf2000810,
|
2021-05-04 01:48:07 +02:00
|
|
|
|
2020-09-05 09:03:27 +02:00
|
|
|
INSN_VDUP_G = 0xee800b10, /* VDUP (ARM core register) */
|
|
|
|
INSN_VDUP_S = 0xf3b00c00, /* VDUP (scalar) */
|
|
|
|
INSN_VLDR_D = 0xed100b00, /* VLDR.64 */
|
2021-05-04 01:48:03 +02:00
|
|
|
INSN_VLD1 = 0xf4200000, /* VLD1 (multiple single elements) */
|
2020-09-05 09:03:27 +02:00
|
|
|
INSN_VLD1R = 0xf4a00c00, /* VLD1 (single element to all lanes) */
|
2021-05-04 01:48:03 +02:00
|
|
|
INSN_VST1 = 0xf4000000, /* VST1 (multiple single elements) */
|
2020-09-05 09:03:27 +02:00
|
|
|
INSN_VMOVI = 0xf2800010, /* VMOV (immediate) */
|
2017-07-28 05:43:30 +02:00
|
|
|
} ARMInsn;
|
|
|
|
|
2017-07-28 05:45:38 +02:00
|
|
|
#define INSN_NOP (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4)
|
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
static const uint8_t tcg_cond_to_arm_cond[] = {
|
|
|
|
[TCG_COND_EQ] = COND_EQ,
|
|
|
|
[TCG_COND_NE] = COND_NE,
|
|
|
|
[TCG_COND_LT] = COND_LT,
|
|
|
|
[TCG_COND_GE] = COND_GE,
|
|
|
|
[TCG_COND_LE] = COND_LE,
|
|
|
|
[TCG_COND_GT] = COND_GT,
|
|
|
|
/* unsigned */
|
|
|
|
[TCG_COND_LTU] = COND_CC,
|
|
|
|
[TCG_COND_GEU] = COND_CS,
|
|
|
|
[TCG_COND_LEU] = COND_LS,
|
|
|
|
[TCG_COND_GTU] = COND_HI,
|
|
|
|
};
|
|
|
|
|
2020-09-05 09:03:27 +02:00
|
|
|
static int encode_imm(uint32_t imm);
|
|
|
|
|
|
|
|
/* TCG private relocation type: add with pc+imm8 */
|
|
|
|
#define R_ARM_PC8 11
|
|
|
|
|
|
|
|
/* TCG private relocation type: vldr with imm8 << 2 */
|
|
|
|
#define R_ARM_PC11 12
|
|
|
|
|
2020-11-06 00:27:27 +01:00
|
|
|
static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
|
tcg/arm: fix branch target change during code retranslation
QEMU uses code retranslation to restore the CPU state when an exception
happens. For it to work the retranslation must not modify the generated
code. This is what is currently implemented in ARM TCG.
However on CPU that don't have icache/dcache/memory synchronised like
ARM, this requirement is stronger and code retranslation must not modify
the generated code "atomically", as the cache line might be flushed
at any moment (interrupt, exception, task switching), even if not
triggered by QEMU. The probability for this to happen is very low, and
depends on cache size and associativiy, machine load, interrupts, so the
symptoms are might happen randomly.
This requirement is currently not followed in tcg/arm, for the
load/store code, which basically has the following structure:
1) tlb access code is written
2) conditional fast path code is written
3) branch is written with a temporary target
4) slow path code is written
5) branch target is updated
The cache lines corresponding to the retranslated code is not flushed
after code retranslation as the generated code is supposed to be the
same. However if the cache line corresponding to the branch instruction
is flushed between step 3 and 5, and is not flushed again before the
code is executed again, the branch target is wrong. In the guest, the
symptoms are MMU page fault at a random addresses, which leads to
kernel page fault or segmentation faults.
The patch fixes this issue by avoiding writing the branch target until
it is known, that is by writing only the branch instruction first, and
later only the offset.
This fixes booting linux guests on ARM hosts (tested: arm, i386, mips,
mipsel, sh4, sparc).
Acked-by: Edgar E. Iglesias <edgar.iglesias@gmail.com>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
2011-01-06 22:43:13 +01:00
|
|
|
{
|
2020-11-06 00:27:27 +01:00
|
|
|
const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
|
|
|
|
ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) >> 2;
|
|
|
|
|
2018-11-30 22:01:57 +01:00
|
|
|
if (offset == sextract32(offset, 0, 24)) {
|
2020-11-06 00:27:27 +01:00
|
|
|
*src_rw = deposit32(*src_rw, 0, 24, offset);
|
2018-11-30 22:01:57 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
tcg/arm: fix branch target change during code retranslation
QEMU uses code retranslation to restore the CPU state when an exception
happens. For it to work the retranslation must not modify the generated
code. This is what is currently implemented in ARM TCG.
However on CPU that don't have icache/dcache/memory synchronised like
ARM, this requirement is stronger and code retranslation must not modify
the generated code "atomically", as the cache line might be flushed
at any moment (interrupt, exception, task switching), even if not
triggered by QEMU. The probability for this to happen is very low, and
depends on cache size and associativiy, machine load, interrupts, so the
symptoms are might happen randomly.
This requirement is currently not followed in tcg/arm, for the
load/store code, which basically has the following structure:
1) tlb access code is written
2) conditional fast path code is written
3) branch is written with a temporary target
4) slow path code is written
5) branch target is updated
The cache lines corresponding to the retranslated code is not flushed
after code retranslation as the generated code is supposed to be the
same. However if the cache line corresponding to the branch instruction
is flushed between step 3 and 5, and is not flushed again before the
code is executed again, the branch target is wrong. In the guest, the
symptoms are MMU page fault at a random addresses, which leads to
kernel page fault or segmentation faults.
The patch fixes this issue by avoiding writing the branch target until
it is known, that is by writing only the branch instruction first, and
later only the offset.
This fixes booting linux guests on ARM hosts (tested: arm, i386, mips,
mipsel, sh4, sparc).
Acked-by: Edgar E. Iglesias <edgar.iglesias@gmail.com>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
2011-01-06 22:43:13 +01:00
|
|
|
}
|
|
|
|
|
2020-11-06 00:27:27 +01:00
|
|
|
static bool reloc_pc13(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
|
2019-04-25 19:39:39 +02:00
|
|
|
{
|
2020-11-06 00:27:27 +01:00
|
|
|
const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
|
|
|
|
ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
|
2019-04-25 19:39:39 +02:00
|
|
|
|
|
|
|
if (offset >= -0xfff && offset <= 0xfff) {
|
2020-11-06 00:27:27 +01:00
|
|
|
tcg_insn_unit insn = *src_rw;
|
2019-04-25 19:39:39 +02:00
|
|
|
bool u = (offset >= 0);
|
|
|
|
if (!u) {
|
|
|
|
offset = -offset;
|
|
|
|
}
|
|
|
|
insn = deposit32(insn, 23, 1, u);
|
|
|
|
insn = deposit32(insn, 0, 12, offset);
|
2020-11-06 00:27:27 +01:00
|
|
|
*src_rw = insn;
|
2019-04-25 19:39:39 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-09-05 09:03:27 +02:00
|
|
|
static bool reloc_pc11(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
|
|
|
|
{
|
|
|
|
const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
|
|
|
|
ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) / 4;
|
|
|
|
|
|
|
|
if (offset >= -0xff && offset <= 0xff) {
|
|
|
|
tcg_insn_unit insn = *src_rw;
|
|
|
|
bool u = (offset >= 0);
|
|
|
|
if (!u) {
|
|
|
|
offset = -offset;
|
|
|
|
}
|
|
|
|
insn = deposit32(insn, 23, 1, u);
|
|
|
|
insn = deposit32(insn, 0, 8, offset);
|
|
|
|
*src_rw = insn;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
|
|
|
|
{
|
|
|
|
const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
|
|
|
|
ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
|
2021-08-10 00:27:08 +02:00
|
|
|
int imm12 = encode_imm(offset);
|
2020-09-05 09:03:27 +02:00
|
|
|
|
2021-08-10 00:27:08 +02:00
|
|
|
if (imm12 >= 0) {
|
|
|
|
*src_rw = deposit32(*src_rw, 0, 12, imm12);
|
2020-09-05 09:03:27 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-11-30 20:52:48 +01:00
|
|
|
static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
|
2013-08-21 00:30:10 +02:00
|
|
|
intptr_t value, intptr_t addend)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2016-04-21 10:48:49 +02:00
|
|
|
tcg_debug_assert(addend == 0);
|
2020-09-05 09:03:27 +02:00
|
|
|
switch (type) {
|
|
|
|
case R_ARM_PC24:
|
2020-11-06 00:27:27 +01:00
|
|
|
return reloc_pc24(code_ptr, (const tcg_insn_unit *)value);
|
2020-09-05 09:03:27 +02:00
|
|
|
case R_ARM_PC13:
|
2020-11-06 00:27:27 +01:00
|
|
|
return reloc_pc13(code_ptr, (const tcg_insn_unit *)value);
|
2020-09-05 09:03:27 +02:00
|
|
|
case R_ARM_PC11:
|
|
|
|
return reloc_pc11(code_ptr, (const tcg_insn_unit *)value);
|
|
|
|
case R_ARM_PC8:
|
|
|
|
return reloc_pc8(code_ptr, (const tcg_insn_unit *)value);
|
|
|
|
default:
|
2017-07-28 05:47:56 +02:00
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2013-03-05 06:12:30 +01:00
|
|
|
#define TCG_CT_CONST_ARM 0x100
|
|
|
|
#define TCG_CT_CONST_INV 0x200
|
|
|
|
#define TCG_CT_CONST_NEG 0x400
|
|
|
|
#define TCG_CT_CONST_ZERO 0x800
|
2020-09-06 00:54:33 +02:00
|
|
|
#define TCG_CT_CONST_ORRI 0x1000
|
|
|
|
#define TCG_CT_CONST_ANDI 0x2000
|
2013-03-05 06:36:45 +01:00
|
|
|
|
2020-10-15 20:53:54 +02:00
|
|
|
#define ALL_GENERAL_REGS 0xffffu
|
2021-05-04 01:47:52 +02:00
|
|
|
#define ALL_VECTOR_REGS 0xffff0000u
|
2020-10-15 20:53:54 +02:00
|
|
|
|
|
|
|
/*
|
2023-04-24 13:31:46 +02:00
|
|
|
* r0-r3 will be overwritten when reading the tlb entry (softmmu only);
|
|
|
|
* r14 will be overwritten by the BLNE branching to the slow path.
|
2020-10-15 20:53:54 +02:00
|
|
|
*/
|
2010-04-09 20:52:48 +02:00
|
|
|
#ifdef CONFIG_SOFTMMU
|
2023-04-24 13:31:46 +02:00
|
|
|
#define ALL_QLDST_REGS \
|
2020-10-15 20:53:54 +02:00
|
|
|
(ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \
|
|
|
|
(1 << TCG_REG_R2) | (1 << TCG_REG_R3) | \
|
|
|
|
(1 << TCG_REG_R14)))
|
|
|
|
#else
|
2023-04-24 13:31:46 +02:00
|
|
|
#define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~(1 << TCG_REG_R14))
|
2008-05-20 01:59:38 +02:00
|
|
|
#endif
|
|
|
|
|
2021-08-10 00:27:08 +02:00
|
|
|
/*
|
|
|
|
* ARM immediates for ALU instructions are made of an unsigned 8-bit
|
|
|
|
* right-rotated by an even amount between 0 and 30.
|
|
|
|
*
|
|
|
|
* Return < 0 if @imm cannot be encoded, else the entire imm12 field.
|
|
|
|
*/
|
|
|
|
static int encode_imm(uint32_t imm)
|
2009-08-22 14:29:09 +02:00
|
|
|
{
|
2021-08-10 00:27:08 +02:00
|
|
|
uint32_t rot, imm8;
|
|
|
|
|
|
|
|
/* Simple case, no rotation required. */
|
|
|
|
if ((imm & ~0xff) == 0) {
|
|
|
|
return imm;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Next, try a simple even shift. */
|
|
|
|
rot = ctz32(imm) & ~1;
|
|
|
|
imm8 = imm >> rot;
|
|
|
|
rot = 32 - rot;
|
|
|
|
if ((imm8 & ~0xff) == 0) {
|
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Finally, try harder with rotations.
|
|
|
|
* The ctz test above will have taken care of rotates >= 8.
|
|
|
|
*/
|
|
|
|
for (rot = 2; rot < 8; rot += 2) {
|
|
|
|
imm8 = rol32(imm, rot);
|
|
|
|
if ((imm8 & ~0xff) == 0) {
|
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Fail: imm cannot be encoded. */
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
found:
|
|
|
|
/* Note that rot is even, and we discard bit 0 by shifting by 7. */
|
|
|
|
return rot << 7 | imm8;
|
2009-08-22 14:29:09 +02:00
|
|
|
}
|
|
|
|
|
2021-08-10 00:27:08 +02:00
|
|
|
static int encode_imm_nofail(uint32_t imm)
|
2009-08-22 14:29:09 +02:00
|
|
|
{
|
2021-08-10 00:27:08 +02:00
|
|
|
int ret = encode_imm(imm);
|
|
|
|
tcg_debug_assert(ret >= 0);
|
|
|
|
return ret;
|
2009-08-22 14:29:09 +02:00
|
|
|
}
|
2009-07-18 14:20:30 +02:00
|
|
|
|
2021-08-10 00:57:07 +02:00
|
|
|
static bool check_fit_imm(uint32_t imm)
|
2009-07-18 14:20:30 +02:00
|
|
|
{
|
2009-08-22 14:29:09 +02:00
|
|
|
return encode_imm(imm) >= 0;
|
2009-07-18 14:20:30 +02:00
|
|
|
}
|
|
|
|
|
2020-09-05 09:03:27 +02:00
|
|
|
/* Return true if v16 is a valid 16-bit shifted immediate. */
|
|
|
|
static bool is_shimm16(uint16_t v16, int *cmode, int *imm8)
|
|
|
|
{
|
|
|
|
if (v16 == (v16 & 0xff)) {
|
|
|
|
*cmode = 0x8;
|
|
|
|
*imm8 = v16 & 0xff;
|
|
|
|
return true;
|
|
|
|
} else if (v16 == (v16 & 0xff00)) {
|
|
|
|
*cmode = 0xa;
|
|
|
|
*imm8 = v16 >> 8;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return true if v32 is a valid 32-bit shifted immediate. */
|
|
|
|
static bool is_shimm32(uint32_t v32, int *cmode, int *imm8)
|
|
|
|
{
|
|
|
|
if (v32 == (v32 & 0xff)) {
|
|
|
|
*cmode = 0x0;
|
|
|
|
*imm8 = v32 & 0xff;
|
|
|
|
return true;
|
|
|
|
} else if (v32 == (v32 & 0xff00)) {
|
|
|
|
*cmode = 0x2;
|
|
|
|
*imm8 = (v32 >> 8) & 0xff;
|
|
|
|
return true;
|
|
|
|
} else if (v32 == (v32 & 0xff0000)) {
|
|
|
|
*cmode = 0x4;
|
|
|
|
*imm8 = (v32 >> 16) & 0xff;
|
|
|
|
return true;
|
|
|
|
} else if (v32 == (v32 & 0xff000000)) {
|
|
|
|
*cmode = 0x6;
|
|
|
|
*imm8 = v32 >> 24;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return true if v32 is a valid 32-bit shifting ones immediate. */
|
|
|
|
static bool is_soimm32(uint32_t v32, int *cmode, int *imm8)
|
|
|
|
{
|
|
|
|
if ((v32 & 0xffff00ff) == 0xff) {
|
|
|
|
*cmode = 0xc;
|
|
|
|
*imm8 = (v32 >> 8) & 0xff;
|
|
|
|
return true;
|
|
|
|
} else if ((v32 & 0xff00ffff) == 0xffff) {
|
|
|
|
*cmode = 0xd;
|
|
|
|
*imm8 = (v32 >> 16) & 0xff;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return non-zero if v32 can be formed by MOVI+ORR.
|
|
|
|
* Place the parameters for MOVI in (cmode, imm8).
|
|
|
|
* Return the cmode for ORR; the imm8 can be had via extraction from v32.
|
|
|
|
*/
|
|
|
|
static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 6; i > 0; i -= 2) {
|
|
|
|
/* Mask out one byte we can add with ORR. */
|
|
|
|
uint32_t tmp = v32 & ~(0xffu << (i * 4));
|
|
|
|
if (is_shimm32(tmp, cmode, imm8) ||
|
|
|
|
is_soimm32(tmp, cmode, imm8)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
2020-09-06 00:54:33 +02:00
|
|
|
/* Return true if V is a valid 16-bit or 32-bit shifted immediate. */
|
|
|
|
static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
|
|
|
|
{
|
|
|
|
if (v32 == deposit32(v32, 16, 16, v32)) {
|
|
|
|
return is_shimm16(v32, cmode, imm8);
|
|
|
|
} else {
|
|
|
|
return is_shimm32(v32, cmode, imm8);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-05-20 01:59:38 +02:00
|
|
|
/* Test if a constant matches the constraint.
|
|
|
|
* TODO: define constraints for:
|
|
|
|
*
|
|
|
|
* ldr/str offset: between -0xfff and 0xfff
|
|
|
|
* ldrh/strh offset: between -0xff and 0xff
|
|
|
|
* mov operand2: values represented with x << (2 * y), x < 0x100
|
|
|
|
* add, sub, eor...: ditto
|
|
|
|
*/
|
2021-05-04 01:47:37 +02:00
|
|
|
static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2013-03-05 06:36:45 +01:00
|
|
|
if (ct & TCG_CT_CONST) {
|
2008-05-20 01:59:38 +02:00
|
|
|
return 1;
|
2013-03-05 06:36:45 +01:00
|
|
|
} else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) {
|
2009-07-18 14:20:30 +02:00
|
|
|
return 1;
|
2013-03-05 06:36:45 +01:00
|
|
|
} else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) {
|
|
|
|
return 1;
|
2013-03-05 07:06:21 +01:00
|
|
|
} else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) {
|
|
|
|
return 1;
|
2013-03-05 06:12:30 +01:00
|
|
|
} else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
|
|
|
|
return 1;
|
2013-03-05 06:36:45 +01:00
|
|
|
}
|
2020-09-06 00:54:33 +02:00
|
|
|
|
|
|
|
switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) {
|
|
|
|
case 0:
|
|
|
|
break;
|
|
|
|
case TCG_CT_CONST_ANDI:
|
|
|
|
val = ~val;
|
|
|
|
/* fallthru */
|
|
|
|
case TCG_CT_CONST_ORRI:
|
|
|
|
if (val == deposit64(val, 32, 32, val)) {
|
|
|
|
int cmode, imm8;
|
|
|
|
return is_shimm1632(val, &cmode, &imm8);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Both bits should not be set for the same insn. */
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_b_imm(TCGContext *s, ARMCond cond, int32_t offset)
|
2017-07-28 05:43:30 +02:00
|
|
|
{
|
2022-11-27 09:48:47 +01:00
|
|
|
tcg_out32(s, (cond << 28) | INSN_B |
|
2017-07-28 05:43:30 +02:00
|
|
|
(((offset - 8) >> 2) & 0x00ffffff));
|
|
|
|
}
|
2013-03-12 03:51:56 +01:00
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_bl_imm(TCGContext *s, ARMCond cond, int32_t offset)
|
2017-07-28 05:43:30 +02:00
|
|
|
{
|
|
|
|
tcg_out32(s, (cond << 28) | 0x0b000000 |
|
|
|
|
(((offset - 8) >> 2) & 0x00ffffff));
|
|
|
|
}
|
2013-03-12 23:06:53 +01:00
|
|
|
|
2021-08-10 01:37:53 +02:00
|
|
|
static void tcg_out_blx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
|
2017-07-28 05:43:30 +02:00
|
|
|
{
|
|
|
|
tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
|
|
|
|
}
|
2013-03-12 23:06:53 +01:00
|
|
|
|
2021-08-10 00:57:07 +02:00
|
|
|
static void tcg_out_blx_imm(TCGContext *s, int32_t offset)
|
2017-07-28 05:43:30 +02:00
|
|
|
{
|
|
|
|
tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
|
|
|
|
(((offset - 8) >> 2) & 0x00ffffff));
|
|
|
|
}
|
2013-03-13 07:18:30 +01:00
|
|
|
|
2021-08-10 01:37:53 +02:00
|
|
|
static void tcg_out_dat_reg(TCGContext *s, ARMCond cond, ARMInsn opc,
|
|
|
|
TCGReg rd, TCGReg rn, TCGReg rm, int shift)
|
2017-07-28 05:43:30 +02:00
|
|
|
{
|
|
|
|
tcg_out32(s, (cond << 28) | (0 << 25) | opc |
|
|
|
|
(rn << 16) | (rd << 12) | shift | rm);
|
|
|
|
}
|
2016-07-14 22:20:16 +02:00
|
|
|
|
2021-08-10 01:37:53 +02:00
|
|
|
static void tcg_out_mov_reg(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rm)
|
2017-07-28 05:43:30 +02:00
|
|
|
{
|
|
|
|
/* Simple reg-reg move, optimising out the 'do nothing' case */
|
|
|
|
if (rd != rm) {
|
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0));
|
|
|
|
}
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_bx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
|
2017-07-28 05:43:30 +02:00
|
|
|
{
|
2021-08-13 01:00:10 +02:00
|
|
|
tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_b_reg(TCGContext *s, ARMCond cond, TCGReg rn)
|
2021-08-13 01:00:10 +02:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Unless the C portion of QEMU is compiled as thumb, we don't need
|
|
|
|
* true BX semantics; merely a branch to an address held in a register.
|
|
|
|
*/
|
2022-01-03 03:30:13 +01:00
|
|
|
tcg_out_bx_reg(s, cond, rn);
|
2017-07-28 05:43:30 +02:00
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2021-08-10 01:25:30 +02:00
|
|
|
static void tcg_out_dat_imm(TCGContext *s, ARMCond cond, ARMInsn opc,
|
2021-08-10 01:37:53 +02:00
|
|
|
TCGReg rd, TCGReg rn, int im)
|
2017-07-28 05:43:30 +02:00
|
|
|
{
|
|
|
|
tcg_out32(s, (cond << 28) | (1 << 25) | opc |
|
|
|
|
(rn << 16) | (rd << 12) | im);
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2021-08-10 01:25:30 +02:00
|
|
|
static void tcg_out_ldstm(TCGContext *s, ARMCond cond, ARMInsn opc,
|
2021-08-08 20:29:18 +02:00
|
|
|
TCGReg rn, uint16_t mask)
|
|
|
|
{
|
|
|
|
tcg_out32(s, (cond << 28) | opc | (rn << 16) | mask);
|
|
|
|
}
|
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
/* Note that this routine is used for both LDR and LDRH formats, so we do
|
|
|
|
not wish to include an immediate shift at this point. */
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_memop_r(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
|
2017-07-28 05:43:30 +02:00
|
|
|
TCGReg rn, TCGReg rm, bool u, bool p, bool w)
|
|
|
|
{
|
|
|
|
tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24)
|
|
|
|
| (w << 21) | (rn << 16) | (rt << 12) | rm);
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_memop_8(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
|
2017-07-28 05:43:30 +02:00
|
|
|
TCGReg rn, int imm8, bool p, bool w)
|
|
|
|
{
|
|
|
|
bool u = 1;
|
|
|
|
if (imm8 < 0) {
|
|
|
|
imm8 = -imm8;
|
|
|
|
u = 0;
|
|
|
|
}
|
|
|
|
tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
|
|
|
|
(rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf));
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:25:30 +02:00
|
|
|
static void tcg_out_memop_12(TCGContext *s, ARMCond cond, ARMInsn opc,
|
|
|
|
TCGReg rt, TCGReg rn, int imm12, bool p, bool w)
|
2017-07-28 05:43:30 +02:00
|
|
|
{
|
|
|
|
bool u = 1;
|
|
|
|
if (imm12 < 0) {
|
|
|
|
imm12 = -imm12;
|
|
|
|
u = 0;
|
|
|
|
}
|
|
|
|
tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
|
|
|
|
(rn << 16) | (rt << 12) | imm12);
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_ld32_12(TCGContext *s, ARMCond cond, TCGReg rt,
|
2021-08-10 00:57:07 +02:00
|
|
|
TCGReg rn, int imm12)
|
2017-07-28 05:43:30 +02:00
|
|
|
{
|
|
|
|
tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0);
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_st32_12(TCGContext *s, ARMCond cond, TCGReg rt,
|
2021-08-10 00:57:07 +02:00
|
|
|
TCGReg rn, int imm12)
|
2017-07-28 05:43:30 +02:00
|
|
|
{
|
|
|
|
tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0);
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_ld32_r(TCGContext *s, ARMCond cond, TCGReg rt,
|
2021-08-10 00:57:07 +02:00
|
|
|
TCGReg rn, TCGReg rm)
|
2017-07-28 05:43:30 +02:00
|
|
|
{
|
|
|
|
tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0);
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_st32_r(TCGContext *s, ARMCond cond, TCGReg rt,
|
2021-08-10 00:57:07 +02:00
|
|
|
TCGReg rn, TCGReg rm)
|
2017-07-28 05:43:30 +02:00
|
|
|
{
|
|
|
|
tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0);
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_ldrd_8(TCGContext *s, ARMCond cond, TCGReg rt,
|
2021-08-10 00:57:07 +02:00
|
|
|
TCGReg rn, int imm8)
|
2017-07-28 05:43:30 +02:00
|
|
|
{
|
|
|
|
tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0);
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_ldrd_r(TCGContext *s, ARMCond cond, TCGReg rt,
|
2021-08-10 00:57:07 +02:00
|
|
|
TCGReg rn, TCGReg rm)
|
2017-07-28 05:43:30 +02:00
|
|
|
{
|
|
|
|
tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0);
|
|
|
|
}
|
|
|
|
|
2021-08-10 00:57:07 +02:00
|
|
|
static void __attribute__((unused))
|
2021-08-10 01:16:59 +02:00
|
|
|
tcg_out_ldrd_rwb(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, TCGReg rm)
|
2019-01-23 05:33:03 +01:00
|
|
|
{
|
|
|
|
tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1);
|
|
|
|
}
|
|
|
|
|
2023-04-10 09:15:24 +02:00
|
|
|
static void __attribute__((unused))
|
|
|
|
tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, int imm8)
|
2017-07-28 05:43:30 +02:00
|
|
|
{
|
|
|
|
tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0);
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_strd_r(TCGContext *s, ARMCond cond, TCGReg rt,
|
2021-08-10 00:57:07 +02:00
|
|
|
TCGReg rn, TCGReg rm)
|
2017-07-28 05:43:30 +02:00
|
|
|
{
|
|
|
|
tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Register pre-increment with base writeback. */
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_ld32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
|
2021-08-10 00:57:07 +02:00
|
|
|
TCGReg rn, TCGReg rm)
|
2017-07-28 05:43:30 +02:00
|
|
|
{
|
|
|
|
tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1);
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_st32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
|
2021-08-10 00:57:07 +02:00
|
|
|
TCGReg rn, TCGReg rm)
|
2017-07-28 05:43:30 +02:00
|
|
|
{
|
|
|
|
tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1);
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_ld16u_8(TCGContext *s, ARMCond cond, TCGReg rt,
|
2021-08-10 00:57:07 +02:00
|
|
|
TCGReg rn, int imm8)
|
2017-07-28 05:43:30 +02:00
|
|
|
{
|
|
|
|
tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0);
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_st16_8(TCGContext *s, ARMCond cond, TCGReg rt,
|
2021-08-10 00:57:07 +02:00
|
|
|
TCGReg rn, int imm8)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2017-07-28 05:43:30 +02:00
|
|
|
tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_ld16u_r(TCGContext *s, ARMCond cond, TCGReg rt,
|
2021-08-10 00:57:07 +02:00
|
|
|
TCGReg rn, TCGReg rm)
|
2008-05-23 20:50:44 +02:00
|
|
|
{
|
2017-07-28 05:43:30 +02:00
|
|
|
tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0);
|
2013-07-28 02:09:47 +02:00
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_st16_r(TCGContext *s, ARMCond cond, TCGReg rt,
|
2021-08-10 00:57:07 +02:00
|
|
|
TCGReg rn, TCGReg rm)
|
2013-07-28 02:09:47 +02:00
|
|
|
{
|
2017-07-28 05:43:30 +02:00
|
|
|
tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0);
|
2008-05-23 20:50:44 +02:00
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_ld16s_8(TCGContext *s, ARMCond cond, TCGReg rt,
|
2021-08-10 00:57:07 +02:00
|
|
|
TCGReg rn, int imm8)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2017-07-28 05:43:30 +02:00
|
|
|
tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_ld16s_r(TCGContext *s, ARMCond cond, TCGReg rt,
|
2021-08-10 00:57:07 +02:00
|
|
|
TCGReg rn, TCGReg rm)
|
2010-04-09 20:52:48 +02:00
|
|
|
{
|
2017-07-28 05:43:30 +02:00
|
|
|
tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0);
|
2010-04-09 20:52:48 +02:00
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_ld8_12(TCGContext *s, ARMCond cond, TCGReg rt,
|
2021-08-10 00:57:07 +02:00
|
|
|
TCGReg rn, int imm12)
|
2011-03-16 16:21:31 +01:00
|
|
|
{
|
2017-07-28 05:43:30 +02:00
|
|
|
tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0);
|
2011-03-16 16:21:31 +01:00
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_st8_12(TCGContext *s, ARMCond cond, TCGReg rt,
|
2021-08-10 00:57:07 +02:00
|
|
|
TCGReg rn, int imm12)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2017-07-28 05:43:30 +02:00
|
|
|
tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_ld8_r(TCGContext *s, ARMCond cond, TCGReg rt,
|
2021-08-10 00:57:07 +02:00
|
|
|
TCGReg rn, TCGReg rm)
|
2013-03-13 23:24:33 +01:00
|
|
|
{
|
2017-07-28 05:43:30 +02:00
|
|
|
tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0);
|
2013-03-13 23:24:33 +01:00
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_st8_r(TCGContext *s, ARMCond cond, TCGReg rt,
|
2021-08-10 00:57:07 +02:00
|
|
|
TCGReg rn, TCGReg rm)
|
2012-08-26 15:40:02 +02:00
|
|
|
{
|
2017-07-28 05:43:30 +02:00
|
|
|
tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0);
|
2012-08-26 15:40:02 +02:00
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_ld8s_8(TCGContext *s, ARMCond cond, TCGReg rt,
|
2021-08-10 00:57:07 +02:00
|
|
|
TCGReg rn, int imm8)
|
2017-04-28 09:45:57 +02:00
|
|
|
{
|
2017-07-28 05:43:30 +02:00
|
|
|
tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0);
|
2017-04-28 09:45:57 +02:00
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_ld8s_r(TCGContext *s, ARMCond cond, TCGReg rt,
|
2021-08-10 00:57:07 +02:00
|
|
|
TCGReg rn, TCGReg rm)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2017-07-28 05:43:30 +02:00
|
|
|
tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2021-08-10 01:37:53 +02:00
|
|
|
static void tcg_out_movi_pool(TCGContext *s, ARMCond cond,
|
|
|
|
TCGReg rd, uint32_t arg)
|
2017-07-28 05:47:56 +02:00
|
|
|
{
|
|
|
|
new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0);
|
|
|
|
tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0);
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:37:53 +02:00
|
|
|
static void tcg_out_movi32(TCGContext *s, ARMCond cond,
|
|
|
|
TCGReg rd, uint32_t arg)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2021-08-10 00:27:08 +02:00
|
|
|
int imm12, diff, opc, sh1, sh2;
|
2017-07-28 05:47:56 +02:00
|
|
|
uint32_t tt0, tt1, tt2;
|
2017-06-06 02:18:54 +02:00
|
|
|
|
|
|
|
/* Check a single MOV/MVN before anything else. */
|
2021-08-10 00:27:08 +02:00
|
|
|
imm12 = encode_imm(arg);
|
|
|
|
if (imm12 >= 0) {
|
|
|
|
tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, imm12);
|
2017-06-06 02:18:54 +02:00
|
|
|
return;
|
|
|
|
}
|
2021-08-10 00:27:08 +02:00
|
|
|
imm12 = encode_imm(~arg);
|
|
|
|
if (imm12 >= 0) {
|
|
|
|
tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, imm12);
|
2017-06-06 02:18:54 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check for a pc-relative address. This will usually be the TB,
|
|
|
|
or within the TB, which is immediately before the code block. */
|
2020-11-06 00:27:27 +01:00
|
|
|
diff = tcg_pcrel_diff(s, (void *)arg) - 8;
|
2017-06-06 02:18:54 +02:00
|
|
|
if (diff >= 0) {
|
2021-08-10 00:27:08 +02:00
|
|
|
imm12 = encode_imm(diff);
|
|
|
|
if (imm12 >= 0) {
|
|
|
|
tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC, imm12);
|
tcg-arm: Improve constant generation
Try fully rotated arguments to mov and mvn before trying movt
or full decomposition. Begin decomposition with mvn when it
looks like it'll help. Examples include
-: mov r9, #0x00000fa0
-: orr r9, r9, #0x000ee000
-: orr r9, r9, #0x0ff00000
-: orr r9, r9, #0xf0000000
+: mvn r9, #0x0000005f
+: eor r9, r9, #0x00011000
Reviewed-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2013-03-05 08:16:24 +01:00
|
|
|
return;
|
|
|
|
}
|
2017-06-06 02:18:54 +02:00
|
|
|
} else {
|
2021-08-10 00:27:08 +02:00
|
|
|
imm12 = encode_imm(-diff);
|
|
|
|
if (imm12 >= 0) {
|
|
|
|
tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC, imm12);
|
tcg-arm: Improve constant generation
Try fully rotated arguments to mov and mvn before trying movt
or full decomposition. Begin decomposition with mvn when it
looks like it'll help. Examples include
-: mov r9, #0x00000fa0
-: orr r9, r9, #0x000ee000
-: orr r9, r9, #0x0ff00000
-: orr r9, r9, #0xf0000000
+: mvn r9, #0x0000005f
+: eor r9, r9, #0x00011000
Reviewed-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2013-03-05 08:16:24 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Use movw + movt. */
|
|
|
|
if (use_armv7_instructions) {
|
2010-04-09 20:52:48 +02:00
|
|
|
/* movw */
|
|
|
|
tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
|
|
|
|
| ((arg << 4) & 0x000f0000) | (arg & 0xfff));
|
2011-01-06 22:43:13 +01:00
|
|
|
if (arg & 0xffff0000) {
|
2010-04-09 20:52:48 +02:00
|
|
|
/* movt */
|
|
|
|
tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
|
|
|
|
| ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
|
|
|
|
}
|
tcg-arm: Improve constant generation
Try fully rotated arguments to mov and mvn before trying movt
or full decomposition. Begin decomposition with mvn when it
looks like it'll help. Examples include
-: mov r9, #0x00000fa0
-: orr r9, r9, #0x000ee000
-: orr r9, r9, #0x0ff00000
-: orr r9, r9, #0xf0000000
+: mvn r9, #0x0000005f
+: eor r9, r9, #0x00011000
Reviewed-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2013-03-05 08:16:24 +01:00
|
|
|
return;
|
|
|
|
}
|
2011-01-06 22:43:13 +01:00
|
|
|
|
2017-07-28 05:47:56 +02:00
|
|
|
/* Look for sequences of two insns. If we have lots of 1's, we can
|
|
|
|
shorten the sequence by beginning with mvn and then clearing
|
|
|
|
higher bits with eor. */
|
|
|
|
tt0 = arg;
|
tcg-arm: Improve constant generation
Try fully rotated arguments to mov and mvn before trying movt
or full decomposition. Begin decomposition with mvn when it
looks like it'll help. Examples include
-: mov r9, #0x00000fa0
-: orr r9, r9, #0x000ee000
-: orr r9, r9, #0x0ff00000
-: orr r9, r9, #0xf0000000
+: mvn r9, #0x0000005f
+: eor r9, r9, #0x00011000
Reviewed-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2013-03-05 08:16:24 +01:00
|
|
|
opc = ARITH_MOV;
|
2017-07-28 05:47:56 +02:00
|
|
|
if (ctpop32(arg) > 16) {
|
|
|
|
tt0 = ~arg;
|
|
|
|
opc = ARITH_MVN;
|
|
|
|
}
|
|
|
|
sh1 = ctz32(tt0) & ~1;
|
|
|
|
tt1 = tt0 & ~(0xff << sh1);
|
|
|
|
sh2 = ctz32(tt1) & ~1;
|
|
|
|
tt2 = tt1 & ~(0xff << sh2);
|
|
|
|
if (tt2 == 0) {
|
2021-08-10 00:27:08 +02:00
|
|
|
int rot;
|
|
|
|
|
2017-07-28 05:47:56 +02:00
|
|
|
rot = ((32 - sh1) << 7) & 0xf00;
|
|
|
|
tcg_out_dat_imm(s, cond, opc, rd, 0, ((tt0 >> sh1) & 0xff) | rot);
|
|
|
|
rot = ((32 - sh2) << 7) & 0xf00;
|
|
|
|
tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd,
|
|
|
|
((tt0 >> sh2) & 0xff) | rot);
|
|
|
|
return;
|
2011-01-06 22:43:13 +01:00
|
|
|
}
|
tcg-arm: Improve constant generation
Try fully rotated arguments to mov and mvn before trying movt
or full decomposition. Begin decomposition with mvn when it
looks like it'll help. Examples include
-: mov r9, #0x00000fa0
-: orr r9, r9, #0x000ee000
-: orr r9, r9, #0x0ff00000
-: orr r9, r9, #0xf0000000
+: mvn r9, #0x0000005f
+: eor r9, r9, #0x00011000
Reviewed-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2013-03-05 08:16:24 +01:00
|
|
|
|
2017-07-28 05:47:56 +02:00
|
|
|
/* Otherwise, drop it into the constant pool. */
|
|
|
|
tcg_out_movi_pool(s, cond, rd, arg);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2021-08-10 00:27:08 +02:00
|
|
|
/*
|
|
|
|
* Emit either the reg,imm or reg,reg form of a data-processing insn.
|
|
|
|
* rhs must satisfy the "rI" constraint.
|
|
|
|
*/
|
2021-08-10 01:37:53 +02:00
|
|
|
static void tcg_out_dat_rI(TCGContext *s, ARMCond cond, ARMInsn opc,
|
|
|
|
TCGReg dst, TCGReg lhs, TCGArg rhs, int rhs_is_const)
|
2012-09-26 20:48:54 +02:00
|
|
|
{
|
|
|
|
if (rhs_is_const) {
|
2021-08-10 00:27:08 +02:00
|
|
|
tcg_out_dat_imm(s, cond, opc, dst, lhs, encode_imm_nofail(rhs));
|
2012-09-26 20:48:54 +02:00
|
|
|
} else {
|
|
|
|
tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-10 00:27:08 +02:00
|
|
|
/*
|
|
|
|
* Emit either the reg,imm or reg,reg form of a data-processing insn.
|
|
|
|
* rhs must satisfy the "rIK" constraint.
|
|
|
|
*/
|
2021-08-10 01:25:30 +02:00
|
|
|
static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc,
|
|
|
|
ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs,
|
2013-03-05 06:36:45 +01:00
|
|
|
bool rhs_is_const)
|
|
|
|
{
|
|
|
|
if (rhs_is_const) {
|
2021-08-10 00:27:08 +02:00
|
|
|
int imm12 = encode_imm(rhs);
|
|
|
|
if (imm12 < 0) {
|
|
|
|
imm12 = encode_imm_nofail(~rhs);
|
2013-03-05 06:36:45 +01:00
|
|
|
opc = opinv;
|
|
|
|
}
|
2021-08-10 00:27:08 +02:00
|
|
|
tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
|
2013-03-05 06:36:45 +01:00
|
|
|
} else {
|
|
|
|
tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:25:30 +02:00
|
|
|
static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc,
|
2021-08-10 01:37:53 +02:00
|
|
|
ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs,
|
2013-03-05 07:06:21 +01:00
|
|
|
bool rhs_is_const)
|
|
|
|
{
|
|
|
|
/* Emit either the reg,imm or reg,reg form of a data-processing insn.
|
|
|
|
* rhs must satisfy the "rIN" constraint.
|
|
|
|
*/
|
|
|
|
if (rhs_is_const) {
|
2021-08-10 00:27:08 +02:00
|
|
|
int imm12 = encode_imm(rhs);
|
|
|
|
if (imm12 < 0) {
|
|
|
|
imm12 = encode_imm_nofail(-rhs);
|
2013-03-05 07:06:21 +01:00
|
|
|
opc = opneg;
|
|
|
|
}
|
2021-08-10 00:27:08 +02:00
|
|
|
tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
|
2013-03-05 07:06:21 +01:00
|
|
|
} else {
|
|
|
|
tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd,
|
2021-08-10 00:57:07 +02:00
|
|
|
TCGReg rn, TCGReg rm)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2013-03-12 18:34:18 +01:00
|
|
|
/* mul */
|
|
|
|
tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0,
|
2021-08-10 00:57:07 +02:00
|
|
|
TCGReg rd1, TCGReg rn, TCGReg rm)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2013-03-12 18:34:18 +01:00
|
|
|
/* umull */
|
|
|
|
tcg_out32(s, (cond << 28) | 0x00800090 |
|
|
|
|
(rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0,
|
2021-08-10 00:57:07 +02:00
|
|
|
TCGReg rd1, TCGReg rn, TCGReg rm)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2013-03-12 18:34:18 +01:00
|
|
|
/* smull */
|
|
|
|
tcg_out32(s, (cond << 28) | 0x00c00090 |
|
|
|
|
(rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2021-08-10 01:37:53 +02:00
|
|
|
static void tcg_out_sdiv(TCGContext *s, ARMCond cond,
|
|
|
|
TCGReg rd, TCGReg rn, TCGReg rm)
|
2013-03-12 06:11:30 +01:00
|
|
|
{
|
|
|
|
tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:37:53 +02:00
|
|
|
static void tcg_out_udiv(TCGContext *s, ARMCond cond,
|
|
|
|
TCGReg rd, TCGReg rn, TCGReg rm)
|
2013-03-12 06:11:30 +01:00
|
|
|
{
|
|
|
|
tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
|
|
|
|
}
|
|
|
|
|
2023-04-05 20:17:01 +02:00
|
|
|
static void tcg_out_ext8s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
|
2010-04-09 20:52:48 +02:00
|
|
|
{
|
2022-01-03 05:54:57 +01:00
|
|
|
/* sxtb */
|
2023-04-05 20:17:01 +02:00
|
|
|
tcg_out32(s, 0x06af0070 | (COND_AL << 28) | (rd << 12) | rn);
|
2010-04-09 20:52:48 +02:00
|
|
|
}
|
|
|
|
|
2023-04-05 22:26:51 +02:00
|
|
|
static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rn)
|
|
|
|
{
|
|
|
|
tcg_out_dat_imm(s, COND_AL, ARITH_AND, rd, rn, 0xff);
|
|
|
|
}
|
|
|
|
|
2023-04-05 23:49:59 +02:00
|
|
|
static void tcg_out_ext16s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
|
2010-04-09 20:52:48 +02:00
|
|
|
{
|
2022-01-03 05:54:57 +01:00
|
|
|
/* sxth */
|
2023-04-05 23:49:59 +02:00
|
|
|
tcg_out32(s, 0x06bf0070 | (COND_AL << 28) | (rd << 12) | rn);
|
2010-04-09 20:52:48 +02:00
|
|
|
}
|
|
|
|
|
2023-04-06 01:25:22 +02:00
|
|
|
static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rn)
|
|
|
|
{
|
2023-04-10 09:15:24 +02:00
|
|
|
/* uxth */
|
|
|
|
tcg_out32(s, 0x06ff0070 | (COND_AL << 28) | (rd << 12) | rn);
|
2023-04-06 01:25:22 +02:00
|
|
|
}
|
|
|
|
|
2023-04-06 02:50:09 +02:00
|
|
|
static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rn)
|
2023-04-06 03:07:05 +02:00
|
|
|
{
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rn)
|
2023-04-06 03:30:56 +02:00
|
|
|
{
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
|
2023-04-06 03:56:28 +02:00
|
|
|
{
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
|
2023-04-06 04:58:35 +02:00
|
|
|
{
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn)
|
2023-04-06 02:50:09 +02:00
|
|
|
{
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:37:53 +02:00
|
|
|
static void tcg_out_bswap16(TCGContext *s, ARMCond cond,
|
|
|
|
TCGReg rd, TCGReg rn, int flags)
|
2010-04-09 20:52:48 +02:00
|
|
|
{
|
2022-01-03 05:54:57 +01:00
|
|
|
if (flags & TCG_BSWAP_OS) {
|
|
|
|
/* revsh */
|
|
|
|
tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
|
2021-06-13 09:42:55 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-01-03 05:54:57 +01:00
|
|
|
/* rev16 */
|
|
|
|
tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
|
|
|
|
if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
|
|
|
|
/* uxth */
|
|
|
|
tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd);
|
2012-10-09 21:53:11 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:37:53 +02:00
|
|
|
static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
|
2010-04-09 20:52:48 +02:00
|
|
|
{
|
2022-01-03 05:54:57 +01:00
|
|
|
/* rev */
|
|
|
|
tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
|
2010-04-09 20:52:48 +02:00
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd,
|
2021-08-10 00:57:07 +02:00
|
|
|
TCGArg a1, int ofs, int len, bool const_a1)
|
2013-03-05 06:12:30 +01:00
|
|
|
{
|
|
|
|
if (const_a1) {
|
|
|
|
/* bfi becomes bfc with rn == 15. */
|
|
|
|
a1 = 15;
|
|
|
|
}
|
|
|
|
/* bfi/bfc */
|
|
|
|
tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1
|
|
|
|
| (ofs << 7) | ((ofs + len - 1) << 16));
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_extract(TCGContext *s, ARMCond cond, TCGReg rd,
|
2021-08-10 01:37:53 +02:00
|
|
|
TCGReg rn, int ofs, int len)
|
2016-10-15 02:51:45 +02:00
|
|
|
{
|
|
|
|
/* ubfx */
|
2021-08-10 01:37:53 +02:00
|
|
|
tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | rn
|
2016-10-15 02:51:45 +02:00
|
|
|
| (ofs << 7) | ((len - 1) << 16));
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_sextract(TCGContext *s, ARMCond cond, TCGReg rd,
|
2021-08-10 01:37:53 +02:00
|
|
|
TCGReg rn, int ofs, int len)
|
2016-10-15 02:51:45 +02:00
|
|
|
{
|
|
|
|
/* sbfx */
|
2021-08-10 01:37:53 +02:00
|
|
|
tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | rn
|
2016-10-15 02:51:45 +02:00
|
|
|
| (ofs << 7) | ((len - 1) << 16));
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_ld32u(TCGContext *s, ARMCond cond,
|
2021-08-10 01:37:53 +02:00
|
|
|
TCGReg rd, TCGReg rn, int32_t offset)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
|
|
|
if (offset > 0xfff || offset < -0xfff) {
|
2013-03-12 17:49:04 +01:00
|
|
|
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
|
|
|
|
tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP);
|
2008-05-20 01:59:38 +02:00
|
|
|
} else
|
|
|
|
tcg_out_ld32_12(s, cond, rd, rn, offset);
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_st32(TCGContext *s, ARMCond cond,
|
2021-08-10 01:37:53 +02:00
|
|
|
TCGReg rd, TCGReg rn, int32_t offset)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
|
|
|
if (offset > 0xfff || offset < -0xfff) {
|
2013-03-12 17:49:04 +01:00
|
|
|
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
|
|
|
|
tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP);
|
2008-05-20 01:59:38 +02:00
|
|
|
} else
|
|
|
|
tcg_out_st32_12(s, cond, rd, rn, offset);
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_ld16u(TCGContext *s, ARMCond cond,
|
2021-08-10 01:37:53 +02:00
|
|
|
TCGReg rd, TCGReg rn, int32_t offset)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
|
|
|
if (offset > 0xff || offset < -0xff) {
|
2013-03-12 17:49:04 +01:00
|
|
|
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
|
|
|
|
tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
|
2008-05-20 01:59:38 +02:00
|
|
|
} else
|
|
|
|
tcg_out_ld16u_8(s, cond, rd, rn, offset);
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_ld16s(TCGContext *s, ARMCond cond,
|
2021-08-10 01:37:53 +02:00
|
|
|
TCGReg rd, TCGReg rn, int32_t offset)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
|
|
|
if (offset > 0xff || offset < -0xff) {
|
2013-03-12 17:49:04 +01:00
|
|
|
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
|
|
|
|
tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
|
2008-05-20 01:59:38 +02:00
|
|
|
} else
|
|
|
|
tcg_out_ld16s_8(s, cond, rd, rn, offset);
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_st16(TCGContext *s, ARMCond cond,
|
2021-08-10 01:37:53 +02:00
|
|
|
TCGReg rd, TCGReg rn, int32_t offset)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
|
|
|
if (offset > 0xff || offset < -0xff) {
|
2013-03-12 17:49:04 +01:00
|
|
|
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
|
|
|
|
tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
|
2008-05-20 01:59:38 +02:00
|
|
|
} else
|
2010-04-09 20:52:48 +02:00
|
|
|
tcg_out_st16_8(s, cond, rd, rn, offset);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_ld8u(TCGContext *s, ARMCond cond,
|
2021-08-10 01:37:53 +02:00
|
|
|
TCGReg rd, TCGReg rn, int32_t offset)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
|
|
|
if (offset > 0xfff || offset < -0xfff) {
|
2013-03-12 17:49:04 +01:00
|
|
|
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
|
|
|
|
tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
|
2008-05-20 01:59:38 +02:00
|
|
|
} else
|
|
|
|
tcg_out_ld8_12(s, cond, rd, rn, offset);
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_ld8s(TCGContext *s, ARMCond cond,
|
2021-08-10 01:37:53 +02:00
|
|
|
TCGReg rd, TCGReg rn, int32_t offset)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
|
|
|
if (offset > 0xff || offset < -0xff) {
|
2013-03-12 17:49:04 +01:00
|
|
|
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
|
|
|
|
tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
|
2008-05-20 01:59:38 +02:00
|
|
|
} else
|
|
|
|
tcg_out_ld8s_8(s, cond, rd, rn, offset);
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_st8(TCGContext *s, ARMCond cond,
|
2021-08-10 01:37:53 +02:00
|
|
|
TCGReg rd, TCGReg rn, int32_t offset)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
|
|
|
if (offset > 0xfff || offset < -0xfff) {
|
2013-03-12 17:49:04 +01:00
|
|
|
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
|
|
|
|
tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
|
2008-05-20 01:59:38 +02:00
|
|
|
} else
|
|
|
|
tcg_out_st8_12(s, cond, rd, rn, offset);
|
|
|
|
}
|
|
|
|
|
2021-08-13 01:31:13 +02:00
|
|
|
/*
|
|
|
|
* The _goto case is normally between TBs within the same code buffer, and
|
2013-07-28 02:09:47 +02:00
|
|
|
* with the code buffer limited to 16MB we wouldn't need the long case.
|
|
|
|
* But we also use it for the tail-call to the qemu_ld/st helpers, which does.
|
2011-12-12 16:37:31 +01:00
|
|
|
*/
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_goto(TCGContext *s, ARMCond cond, const tcg_insn_unit *addr)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2014-04-24 23:23:40 +02:00
|
|
|
intptr_t addri = (intptr_t)addr;
|
|
|
|
ptrdiff_t disp = tcg_pcrel_diff(s, addr);
|
2021-08-13 01:31:13 +02:00
|
|
|
bool arm_mode = !(addri & 1);
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2021-08-13 01:31:13 +02:00
|
|
|
if (arm_mode && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
|
2021-08-13 01:00:10 +02:00
|
|
|
tcg_out_b_imm(s, cond, disp);
|
2013-07-28 02:09:47 +02:00
|
|
|
return;
|
2011-03-16 16:21:31 +01:00
|
|
|
}
|
2021-08-13 01:31:13 +02:00
|
|
|
|
|
|
|
/* LDR is interworking from v5t. */
|
2022-01-03 03:30:13 +01:00
|
|
|
tcg_out_movi_pool(s, cond, TCG_REG_PC, addri);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2021-08-13 01:31:13 +02:00
|
|
|
/*
|
|
|
|
* The call case is mostly used for helpers - so it's not unreasonable
|
|
|
|
* for them to be beyond branch range.
|
|
|
|
*/
|
2022-10-18 09:51:41 +02:00
|
|
|
static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *addr)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2014-04-24 23:23:40 +02:00
|
|
|
intptr_t addri = (intptr_t)addr;
|
|
|
|
ptrdiff_t disp = tcg_pcrel_diff(s, addr);
|
2021-08-13 01:31:13 +02:00
|
|
|
bool arm_mode = !(addri & 1);
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2014-04-24 23:23:40 +02:00
|
|
|
if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) {
|
2021-08-13 01:31:13 +02:00
|
|
|
if (arm_mode) {
|
2021-08-13 01:00:10 +02:00
|
|
|
tcg_out_bl_imm(s, COND_AL, disp);
|
2022-01-03 03:30:13 +01:00
|
|
|
} else {
|
2021-08-13 01:31:13 +02:00
|
|
|
tcg_out_blx_imm(s, disp);
|
|
|
|
}
|
2022-01-03 03:30:13 +01:00
|
|
|
return;
|
2021-08-13 01:31:13 +02:00
|
|
|
}
|
|
|
|
|
2022-01-03 03:30:13 +01:00
|
|
|
tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
|
|
|
|
tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2022-10-18 09:51:41 +02:00
|
|
|
static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr,
|
|
|
|
const TCGHelperInfo *info)
|
|
|
|
{
|
|
|
|
tcg_out_call_int(s, addr);
|
|
|
|
}
|
|
|
|
|
2021-08-10 01:16:59 +02:00
|
|
|
static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2013-04-23 22:07:40 +02:00
|
|
|
if (l->has_value) {
|
2014-04-24 23:23:40 +02:00
|
|
|
tcg_out_goto(s, cond, l->u.value_ptr);
|
2008-05-20 01:59:38 +02:00
|
|
|
} else {
|
2015-02-13 22:39:54 +01:00
|
|
|
tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0);
|
2021-08-13 01:00:10 +02:00
|
|
|
tcg_out_b_imm(s, cond, 0);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-10 00:57:07 +02:00
|
|
|
static void tcg_out_mb(TCGContext *s, TCGArg a0)
|
2016-07-14 22:20:16 +02:00
|
|
|
{
|
|
|
|
if (use_armv7_instructions) {
|
|
|
|
tcg_out32(s, INSN_DMB_ISH);
|
2022-01-03 05:54:57 +01:00
|
|
|
} else {
|
2016-07-14 22:20:16 +02:00
|
|
|
tcg_out32(s, INSN_DMB_MCR);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-15 22:01:37 +01:00
|
|
|
static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
|
|
|
|
const int *const_args)
|
|
|
|
{
|
|
|
|
TCGReg al = args[0];
|
|
|
|
TCGReg ah = args[1];
|
|
|
|
TCGArg bl = args[2];
|
|
|
|
TCGArg bh = args[3];
|
|
|
|
TCGCond cond = args[4];
|
|
|
|
int const_bl = const_args[2];
|
|
|
|
int const_bh = const_args[3];
|
|
|
|
|
|
|
|
switch (cond) {
|
|
|
|
case TCG_COND_EQ:
|
|
|
|
case TCG_COND_NE:
|
|
|
|
case TCG_COND_LTU:
|
|
|
|
case TCG_COND_LEU:
|
|
|
|
case TCG_COND_GTU:
|
|
|
|
case TCG_COND_GEU:
|
|
|
|
/* We perform a conditional comparision. If the high half is
|
|
|
|
equal, then overwrite the flags with the comparison of the
|
|
|
|
low half. The resulting flags cover the whole. */
|
|
|
|
tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh);
|
|
|
|
tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl);
|
|
|
|
return cond;
|
|
|
|
|
|
|
|
case TCG_COND_LT:
|
|
|
|
case TCG_COND_GE:
|
|
|
|
/* We perform a double-word subtraction and examine the result.
|
|
|
|
We do not actually need the result of the subtract, so the
|
|
|
|
low part "subtract" is a compare. For the high half we have
|
|
|
|
no choice but to compute into a temporary. */
|
|
|
|
tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl);
|
|
|
|
tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR,
|
|
|
|
TCG_REG_TMP, ah, bh, const_bh);
|
|
|
|
return cond;
|
|
|
|
|
|
|
|
case TCG_COND_LE:
|
|
|
|
case TCG_COND_GT:
|
|
|
|
/* Similar, but with swapped arguments, via reversed subtract. */
|
|
|
|
tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR,
|
|
|
|
TCG_REG_TMP, al, bl, const_bl);
|
|
|
|
tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR,
|
|
|
|
TCG_REG_TMP, ah, bh, const_bh);
|
|
|
|
return tcg_swap_cond(cond);
|
|
|
|
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-04 01:48:03 +02:00
|
|
|
/*
|
|
|
|
* Note that TCGReg references Q-registers.
|
|
|
|
* Q-regno = 2 * D-regno, so shift left by 1 whlie inserting.
|
|
|
|
*/
|
|
|
|
static uint32_t encode_vd(TCGReg rd)
|
|
|
|
{
|
|
|
|
tcg_debug_assert(rd >= TCG_REG_Q0);
|
|
|
|
return (extract32(rd, 3, 1) << 22) | (extract32(rd, 0, 3) << 13);
|
|
|
|
}
|
|
|
|
|
2021-05-04 01:48:07 +02:00
|
|
|
static uint32_t encode_vn(TCGReg rn)
|
|
|
|
{
|
|
|
|
tcg_debug_assert(rn >= TCG_REG_Q0);
|
|
|
|
return (extract32(rn, 3, 1) << 7) | (extract32(rn, 0, 3) << 17);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t encode_vm(TCGReg rm)
|
|
|
|
{
|
|
|
|
tcg_debug_assert(rm >= TCG_REG_Q0);
|
|
|
|
return (extract32(rm, 3, 1) << 5) | (extract32(rm, 0, 3) << 1);
|
|
|
|
}
|
|
|
|
|
2020-09-06 00:54:33 +02:00
|
|
|
static void tcg_out_vreg2(TCGContext *s, ARMInsn insn, int q, int vece,
|
|
|
|
TCGReg d, TCGReg m)
|
|
|
|
{
|
|
|
|
tcg_out32(s, insn | (vece << 18) | (q << 6) |
|
|
|
|
encode_vd(d) | encode_vm(m));
|
|
|
|
}
|
|
|
|
|
2021-05-04 01:48:07 +02:00
|
|
|
static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece,
|
|
|
|
TCGReg d, TCGReg n, TCGReg m)
|
|
|
|
{
|
|
|
|
tcg_out32(s, insn | (vece << 20) | (q << 6) |
|
|
|
|
encode_vd(d) | encode_vn(n) | encode_vm(m));
|
|
|
|
}
|
|
|
|
|
2020-09-05 09:03:27 +02:00
|
|
|
static void tcg_out_vmovi(TCGContext *s, TCGReg rd,
|
|
|
|
int q, int op, int cmode, uint8_t imm8)
|
|
|
|
{
|
|
|
|
tcg_out32(s, INSN_VMOVI | encode_vd(rd) | (q << 6) | (op << 5)
|
|
|
|
| (cmode << 8) | extract32(imm8, 0, 4)
|
|
|
|
| (extract32(imm8, 4, 3) << 16)
|
|
|
|
| (extract32(imm8, 7, 1) << 24));
|
|
|
|
}
|
|
|
|
|
2020-09-05 21:24:28 +02:00
|
|
|
static void tcg_out_vshifti(TCGContext *s, ARMInsn insn, int q,
|
|
|
|
TCGReg rd, TCGReg rm, int l_imm6)
|
|
|
|
{
|
|
|
|
tcg_out32(s, insn | (q << 6) | encode_vd(rd) | encode_vm(rm) |
|
|
|
|
(extract32(l_imm6, 6, 1) << 7) |
|
|
|
|
(extract32(l_imm6, 0, 6) << 16));
|
|
|
|
}
|
|
|
|
|
2021-05-04 01:48:03 +02:00
|
|
|
static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
|
|
|
|
TCGReg rd, TCGReg rn, int offset)
|
|
|
|
{
|
|
|
|
if (offset != 0) {
|
|
|
|
if (check_fit_imm(offset) || check_fit_imm(-offset)) {
|
|
|
|
tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
|
|
|
|
TCG_REG_TMP, rn, offset, true);
|
|
|
|
} else {
|
|
|
|
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
|
|
|
|
TCG_REG_TMP, TCG_REG_TMP, rn, 0);
|
|
|
|
}
|
|
|
|
rn = TCG_REG_TMP;
|
|
|
|
}
|
|
|
|
tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf);
|
|
|
|
}
|
|
|
|
|
2023-04-22 06:32:22 +02:00
|
|
|
typedef struct {
|
|
|
|
ARMCond cond;
|
|
|
|
TCGReg base;
|
|
|
|
int index;
|
|
|
|
bool index_scratch;
|
2023-04-22 07:48:58 +02:00
|
|
|
TCGAtomAlign aa;
|
2023-04-22 06:32:22 +02:00
|
|
|
} HostAddress;
|
|
|
|
|
2023-04-19 12:43:17 +02:00
|
|
|
bool tcg_target_has_memory_bswap(MemOp memop)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-04-10 09:15:24 +02:00
|
|
|
static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg)
|
|
|
|
{
|
|
|
|
/* We arrive at the slow path via "BLNE", so R14 contains l->raddr. */
|
|
|
|
return TCG_REG_R14;
|
2012-08-26 15:40:02 +02:00
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2023-04-10 09:15:24 +02:00
|
|
|
static const TCGLdstHelperParam ldst_helper_param = {
|
|
|
|
.ra_gen = ldst_ra_gen,
|
|
|
|
.ntmp = 1,
|
|
|
|
.tmp = { TCG_REG_TMP },
|
|
|
|
};
|
|
|
|
|
2019-04-21 23:51:00 +02:00
|
|
|
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
2013-03-13 23:24:33 +01:00
|
|
|
{
|
2023-04-10 09:15:24 +02:00
|
|
|
MemOp opc = get_memop(lb->oi);
|
2013-03-13 23:24:33 +01:00
|
|
|
|
2020-11-06 00:27:27 +01:00
|
|
|
if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
|
2019-04-21 23:51:00 +02:00
|
|
|
return false;
|
|
|
|
}
|
2013-03-13 23:24:33 +01:00
|
|
|
|
2023-04-10 09:15:24 +02:00
|
|
|
tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
|
2022-10-18 09:51:41 +02:00
|
|
|
tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]);
|
2023-04-10 09:15:24 +02:00
|
|
|
tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param);
|
2013-03-13 23:24:33 +01:00
|
|
|
|
2014-04-24 23:23:40 +02:00
|
|
|
tcg_out_goto(s, COND_AL, lb->raddr);
|
2019-04-21 23:51:00 +02:00
|
|
|
return true;
|
2013-03-13 23:24:33 +01:00
|
|
|
}
|
|
|
|
|
2019-04-21 23:51:00 +02:00
|
|
|
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
2013-03-13 23:24:33 +01:00
|
|
|
{
|
2023-04-10 09:15:24 +02:00
|
|
|
MemOp opc = get_memop(lb->oi);
|
2013-03-13 23:24:33 +01:00
|
|
|
|
2020-11-06 00:27:27 +01:00
|
|
|
if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
|
2019-04-21 23:51:00 +02:00
|
|
|
return false;
|
|
|
|
}
|
2013-03-13 23:24:33 +01:00
|
|
|
|
2023-04-10 09:15:24 +02:00
|
|
|
tcg_out_st_helper_args(s, lb, &ldst_helper_param);
|
2013-03-13 23:24:33 +01:00
|
|
|
|
2013-07-28 02:09:47 +02:00
|
|
|
/* Tail-call to the helper, which will return to the fast path. */
|
2021-06-14 01:40:38 +02:00
|
|
|
tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]);
|
2019-04-21 23:51:00 +02:00
|
|
|
return true;
|
2013-03-13 23:24:33 +01:00
|
|
|
}
|
2013-03-13 02:18:07 +01:00
|
|
|
|
2023-04-23 16:54:41 +02:00
|
|
|
static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
|
|
|
|
TCGReg addrlo, TCGReg addrhi,
|
|
|
|
MemOpIdx oi, bool is_ld)
|
|
|
|
{
|
|
|
|
TCGLabelQemuLdst *ldst = NULL;
|
|
|
|
MemOp opc = get_memop(oi);
|
2023-04-22 07:48:58 +02:00
|
|
|
unsigned a_mask;
|
|
|
|
|
|
|
|
#ifdef CONFIG_SOFTMMU
|
|
|
|
*h = (HostAddress){
|
|
|
|
.cond = COND_AL,
|
|
|
|
.base = addrlo,
|
|
|
|
.index = TCG_REG_R1,
|
|
|
|
.index_scratch = true,
|
|
|
|
};
|
|
|
|
#else
|
|
|
|
*h = (HostAddress){
|
|
|
|
.cond = COND_AL,
|
|
|
|
.base = addrlo,
|
|
|
|
.index = guest_base ? TCG_REG_GUEST_BASE : -1,
|
|
|
|
.index_scratch = false,
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
|
|
|
|
a_mask = (1 << h->aa.align) - 1;
|
2023-04-23 16:54:41 +02:00
|
|
|
|
|
|
|
#ifdef CONFIG_SOFTMMU
|
|
|
|
int mem_index = get_mmuidx(oi);
|
|
|
|
int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
|
|
|
|
: offsetof(CPUTLBEntry, addr_write);
|
|
|
|
int fast_off = TLB_MASK_TABLE_OFS(mem_index);
|
|
|
|
unsigned s_mask = (1 << (opc & MO_SIZE)) - 1;
|
|
|
|
TCGReg t_addr;
|
|
|
|
|
|
|
|
ldst = new_ldst_label(s);
|
|
|
|
ldst->is_ld = is_ld;
|
|
|
|
ldst->oi = oi;
|
|
|
|
ldst->addrlo_reg = addrlo;
|
|
|
|
ldst->addrhi_reg = addrhi;
|
|
|
|
|
|
|
|
/* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}. */
|
|
|
|
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
|
|
|
|
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -256);
|
|
|
|
QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
|
|
|
|
QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4);
|
|
|
|
tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
|
|
|
|
|
|
|
|
/* Extract the tlb index from the address into R0. */
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo,
|
|
|
|
SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
|
|
|
|
* Load the tlb comparator into R2/R3 and the fast path addend into R1.
|
|
|
|
*/
|
|
|
|
if (cmp_off == 0) {
|
2023-03-23 02:13:12 +01:00
|
|
|
if (s->addr_type == TCG_TYPE_I32) {
|
2023-04-23 16:54:41 +02:00
|
|
|
tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
|
2023-03-23 02:13:12 +01:00
|
|
|
} else {
|
|
|
|
tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
|
2023-04-23 16:54:41 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
|
|
|
|
TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0);
|
2023-03-23 02:13:12 +01:00
|
|
|
if (s->addr_type == TCG_TYPE_I32) {
|
2023-04-23 16:54:41 +02:00
|
|
|
tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
|
2023-03-23 02:13:12 +01:00
|
|
|
} else {
|
|
|
|
tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
|
2023-04-23 16:54:41 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Load the tlb addend. */
|
|
|
|
tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1,
|
|
|
|
offsetof(CPUTLBEntry, addend));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check alignment, check comparators.
|
|
|
|
* Do this in 2-4 insns. Use MOVW for v7, if possible,
|
|
|
|
* to reduce the number of sequential conditional instructions.
|
|
|
|
* Almost all guests have at least 4k pages, which means that we need
|
|
|
|
* to clear at least 9 bits even for an 8-byte memory, which means it
|
|
|
|
* isn't worth checking for an immediate operand for BIC.
|
|
|
|
*
|
|
|
|
* For unaligned accesses, test the page of the last unit of alignment.
|
|
|
|
* This leaves the least significant alignment bits unchanged, and of
|
|
|
|
* course must be zero.
|
|
|
|
*/
|
|
|
|
t_addr = addrlo;
|
|
|
|
if (a_mask < s_mask) {
|
|
|
|
t_addr = TCG_REG_R0;
|
|
|
|
tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr,
|
|
|
|
addrlo, s_mask - a_mask);
|
|
|
|
}
|
|
|
|
if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) {
|
|
|
|
tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(TARGET_PAGE_MASK | a_mask));
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
|
|
|
|
t_addr, TCG_REG_TMP, 0);
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0);
|
|
|
|
} else {
|
|
|
|
if (a_mask) {
|
|
|
|
tcg_debug_assert(a_mask <= 0xff);
|
|
|
|
tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
|
|
|
|
}
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr,
|
|
|
|
SHIFT_IMM_LSR(TARGET_PAGE_BITS));
|
|
|
|
tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP,
|
|
|
|
0, TCG_REG_R2, TCG_REG_TMP,
|
|
|
|
SHIFT_IMM_LSL(TARGET_PAGE_BITS));
|
|
|
|
}
|
|
|
|
|
2023-03-23 02:13:12 +01:00
|
|
|
if (s->addr_type != TCG_TYPE_I32) {
|
2023-04-23 16:54:41 +02:00
|
|
|
tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
if (a_mask) {
|
|
|
|
ldst = new_ldst_label(s);
|
|
|
|
ldst->is_ld = is_ld;
|
|
|
|
ldst->oi = oi;
|
|
|
|
ldst->addrlo_reg = addrlo;
|
|
|
|
ldst->addrhi_reg = addrhi;
|
|
|
|
|
2023-04-22 07:48:58 +02:00
|
|
|
/* We are expecting alignment to max out at 7 */
|
2023-04-23 16:54:41 +02:00
|
|
|
tcg_debug_assert(a_mask <= 0xff);
|
|
|
|
/* tst addr, #mask */
|
|
|
|
tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return ldst;
|
|
|
|
}
|
|
|
|
|
2023-04-22 06:32:22 +02:00
|
|
|
static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
|
|
|
|
TCGReg datahi, HostAddress h)
|
2013-03-13 02:18:07 +01:00
|
|
|
{
|
2023-04-22 06:32:22 +02:00
|
|
|
TCGReg base;
|
|
|
|
|
2021-06-14 01:40:38 +02:00
|
|
|
/* Byte swapping is left to middle-end expansion. */
|
|
|
|
tcg_debug_assert((opc & MO_BSWAP) == 0);
|
2013-03-13 23:24:33 +01:00
|
|
|
|
2013-09-04 01:16:47 +02:00
|
|
|
switch (opc & MO_SSIZE) {
|
|
|
|
case MO_UB:
|
2023-04-22 06:32:22 +02:00
|
|
|
if (h.index < 0) {
|
|
|
|
tcg_out_ld8_12(s, h.cond, datalo, h.base, 0);
|
|
|
|
} else {
|
|
|
|
tcg_out_ld8_r(s, h.cond, datalo, h.base, h.index);
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_SB:
|
2023-04-22 06:32:22 +02:00
|
|
|
if (h.index < 0) {
|
|
|
|
tcg_out_ld8s_8(s, h.cond, datalo, h.base, 0);
|
|
|
|
} else {
|
|
|
|
tcg_out_ld8s_r(s, h.cond, datalo, h.base, h.index);
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_UW:
|
2023-04-22 06:32:22 +02:00
|
|
|
if (h.index < 0) {
|
|
|
|
tcg_out_ld16u_8(s, h.cond, datalo, h.base, 0);
|
|
|
|
} else {
|
|
|
|
tcg_out_ld16u_r(s, h.cond, datalo, h.base, h.index);
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_SW:
|
2023-04-22 06:32:22 +02:00
|
|
|
if (h.index < 0) {
|
|
|
|
tcg_out_ld16s_8(s, h.cond, datalo, h.base, 0);
|
|
|
|
} else {
|
|
|
|
tcg_out_ld16s_r(s, h.cond, datalo, h.base, h.index);
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_UL:
|
2023-04-22 06:32:22 +02:00
|
|
|
if (h.index < 0) {
|
|
|
|
tcg_out_ld32_12(s, h.cond, datalo, h.base, 0);
|
|
|
|
} else {
|
|
|
|
tcg_out_ld32_r(s, h.cond, datalo, h.base, h.index);
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2022-01-06 22:00:51 +01:00
|
|
|
case MO_UQ:
|
2022-10-14 02:24:52 +02:00
|
|
|
/* We used pair allocation for datalo, so already should be aligned. */
|
|
|
|
tcg_debug_assert((datalo & 1) == 0);
|
|
|
|
tcg_debug_assert(datahi == datalo + 1);
|
2022-01-03 06:26:17 +01:00
|
|
|
/* LDRD requires alignment; double-check that. */
|
2022-10-14 02:24:52 +02:00
|
|
|
if (get_alignment_bits(opc) >= MO_64) {
|
2023-04-22 06:32:22 +02:00
|
|
|
if (h.index < 0) {
|
|
|
|
tcg_out_ldrd_8(s, h.cond, datalo, h.base, 0);
|
|
|
|
break;
|
|
|
|
}
|
2022-03-11 08:38:47 +01:00
|
|
|
/*
|
|
|
|
* Rm (the second address op) must not overlap Rt or Rt + 1.
|
|
|
|
* Since datalo is aligned, we can simplify the test via alignment.
|
|
|
|
* Flip the two address arguments if that works.
|
|
|
|
*/
|
2023-04-22 06:32:22 +02:00
|
|
|
if ((h.index & ~1) != datalo) {
|
|
|
|
tcg_out_ldrd_r(s, h.cond, datalo, h.base, h.index);
|
2022-03-11 08:38:47 +01:00
|
|
|
break;
|
|
|
|
}
|
2023-04-22 06:32:22 +02:00
|
|
|
if ((h.base & ~1) != datalo) {
|
|
|
|
tcg_out_ldrd_r(s, h.cond, datalo, h.index, h.base);
|
2022-03-11 08:38:47 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2023-04-22 06:32:22 +02:00
|
|
|
if (h.index < 0) {
|
|
|
|
base = h.base;
|
|
|
|
if (datalo == h.base) {
|
|
|
|
tcg_out_mov_reg(s, h.cond, TCG_REG_TMP, base);
|
|
|
|
base = TCG_REG_TMP;
|
|
|
|
}
|
|
|
|
} else if (h.index_scratch) {
|
|
|
|
tcg_out_ld32_rwb(s, h.cond, datalo, h.index, h.base);
|
|
|
|
tcg_out_ld32_12(s, h.cond, datahi, h.index, 4);
|
|
|
|
break;
|
2021-06-14 01:40:38 +02:00
|
|
|
} else {
|
2023-04-22 06:32:22 +02:00
|
|
|
tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP,
|
|
|
|
h.base, h.index, SHIFT_IMM_LSL(0));
|
|
|
|
base = TCG_REG_TMP;
|
tcg-arm: fix qemu_ld64
Emulating fldl on arm doesn't seem to work too well. It's the way
qemu_ld64 is translated to arm instructions.
tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, 4);
Consider case where data_reg==0, data_reg2==1, and addr_reg==0. First load
overwrited addr_reg. So let's put an if (data_ref==addr_reg).
(Pablo Virolainen)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6808 c046a42c-6fe2-441c-8c8c-71466251a162
2009-03-10 22:43:25 +01:00
|
|
|
}
|
2023-04-22 06:32:22 +02:00
|
|
|
tcg_out_ld32_12(s, h.cond, datalo, base, 0);
|
|
|
|
tcg_out_ld32_12(s, h.cond, datahi, base, 4);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2021-06-14 01:40:38 +02:00
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-06 21:51:01 +02:00
|
|
|
static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
|
|
|
|
TCGReg addrlo, TCGReg addrhi,
|
|
|
|
MemOpIdx oi, TCGType data_type)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2023-04-06 21:51:01 +02:00
|
|
|
MemOp opc = get_memop(oi);
|
2023-04-23 16:54:41 +02:00
|
|
|
TCGLabelQemuLdst *ldst;
|
2023-04-22 06:32:22 +02:00
|
|
|
HostAddress h;
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2023-04-23 16:54:41 +02:00
|
|
|
ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true);
|
|
|
|
if (ldst) {
|
|
|
|
ldst->type = data_type;
|
|
|
|
ldst->datalo_reg = datalo;
|
|
|
|
ldst->datahi_reg = datahi;
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2023-04-23 16:54:41 +02:00
|
|
|
/*
|
|
|
|
* This a conditional BL only to load a pointer within this
|
|
|
|
* opcode into LR for the slow path. We will not be using
|
|
|
|
* the value for a tail call.
|
|
|
|
*/
|
|
|
|
ldst->label_ptr[0] = s->code_ptr;
|
|
|
|
tcg_out_bl_imm(s, COND_NE, 0);
|
|
|
|
|
|
|
|
tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
|
|
|
|
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
|
|
|
|
} else {
|
|
|
|
tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
|
2021-08-10 07:18:27 +02:00
|
|
|
}
|
2013-09-13 00:06:23 +02:00
|
|
|
}
|
|
|
|
|
2023-04-22 06:32:22 +02:00
|
|
|
static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
|
|
|
|
TCGReg datahi, HostAddress h)
|
2013-09-13 00:06:23 +02:00
|
|
|
{
|
2021-06-14 01:40:38 +02:00
|
|
|
/* Byte swapping is left to middle-end expansion. */
|
|
|
|
tcg_debug_assert((opc & MO_BSWAP) == 0);
|
2013-09-13 00:06:23 +02:00
|
|
|
|
|
|
|
switch (opc & MO_SIZE) {
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_8:
|
2023-04-22 06:32:22 +02:00
|
|
|
if (h.index < 0) {
|
|
|
|
tcg_out_st8_12(s, h.cond, datalo, h.base, 0);
|
2021-08-10 05:44:50 +02:00
|
|
|
} else {
|
2023-04-22 06:32:22 +02:00
|
|
|
tcg_out_st8_r(s, h.cond, datalo, h.base, h.index);
|
2010-04-09 20:52:48 +02:00
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_16:
|
2023-04-22 06:32:22 +02:00
|
|
|
if (h.index < 0) {
|
|
|
|
tcg_out_st16_8(s, h.cond, datalo, h.base, 0);
|
|
|
|
} else {
|
|
|
|
tcg_out_st16_r(s, h.cond, datalo, h.base, h.index);
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_32:
|
2023-04-22 06:32:22 +02:00
|
|
|
if (h.index < 0) {
|
|
|
|
tcg_out_st32_12(s, h.cond, datalo, h.base, 0);
|
|
|
|
} else {
|
|
|
|
tcg_out_st32_r(s, h.cond, datalo, h.base, h.index);
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_64:
|
2022-10-14 02:24:52 +02:00
|
|
|
/* We used pair allocation for datalo, so already should be aligned. */
|
|
|
|
tcg_debug_assert((datalo & 1) == 0);
|
|
|
|
tcg_debug_assert(datahi == datalo + 1);
|
2022-01-03 06:26:17 +01:00
|
|
|
/* STRD requires alignment; double-check that. */
|
2022-10-14 02:24:52 +02:00
|
|
|
if (get_alignment_bits(opc) >= MO_64) {
|
2023-04-22 06:32:22 +02:00
|
|
|
if (h.index < 0) {
|
|
|
|
tcg_out_strd_8(s, h.cond, datalo, h.base, 0);
|
|
|
|
} else {
|
|
|
|
tcg_out_strd_r(s, h.cond, datalo, h.base, h.index);
|
|
|
|
}
|
|
|
|
} else if (h.index_scratch) {
|
|
|
|
tcg_out_st32_rwb(s, h.cond, datalo, h.index, h.base);
|
|
|
|
tcg_out_st32_12(s, h.cond, datahi, h.index, 4);
|
2010-04-09 20:52:48 +02:00
|
|
|
} else {
|
2023-04-22 06:32:22 +02:00
|
|
|
tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP,
|
|
|
|
h.base, h.index, SHIFT_IMM_LSL(0));
|
|
|
|
tcg_out_st32_12(s, h.cond, datalo, TCG_REG_TMP, 0);
|
|
|
|
tcg_out_st32_12(s, h.cond, datahi, TCG_REG_TMP, 4);
|
2010-04-09 20:52:48 +02:00
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2021-06-14 01:40:38 +02:00
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
2013-09-13 00:06:23 +02:00
|
|
|
}
|
|
|
|
|
2023-04-06 21:51:01 +02:00
|
|
|
static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
|
|
|
|
TCGReg addrlo, TCGReg addrhi,
|
|
|
|
MemOpIdx oi, TCGType data_type)
|
2013-09-13 00:06:23 +02:00
|
|
|
{
|
2023-04-06 21:51:01 +02:00
|
|
|
MemOp opc = get_memop(oi);
|
2023-04-23 16:54:41 +02:00
|
|
|
TCGLabelQemuLdst *ldst;
|
2023-04-22 06:32:22 +02:00
|
|
|
HostAddress h;
|
2013-09-13 00:06:23 +02:00
|
|
|
|
2023-04-23 16:54:41 +02:00
|
|
|
ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false);
|
|
|
|
if (ldst) {
|
|
|
|
ldst->type = data_type;
|
|
|
|
ldst->datalo_reg = datalo;
|
|
|
|
ldst->datahi_reg = datahi;
|
2023-04-22 06:32:22 +02:00
|
|
|
|
|
|
|
h.cond = COND_EQ;
|
2023-04-23 16:54:41 +02:00
|
|
|
tcg_out_qemu_st_direct(s, opc, datalo, datahi, h);
|
2023-04-22 06:32:22 +02:00
|
|
|
|
2023-04-23 16:54:41 +02:00
|
|
|
/* The conditional call is last, as we're going to return here. */
|
|
|
|
ldst->label_ptr[0] = s->code_ptr;
|
|
|
|
tcg_out_bl_imm(s, COND_NE, 0);
|
|
|
|
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
|
|
|
|
} else {
|
|
|
|
tcg_out_qemu_st_direct(s, opc, datalo, datahi, h);
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2020-02-16 06:40:01 +01:00
|
|
|
static void tcg_out_epilogue(TCGContext *s);
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2022-11-26 21:42:06 +01:00
|
|
|
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
|
|
|
|
{
|
|
|
|
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, arg);
|
|
|
|
tcg_out_epilogue(s);
|
|
|
|
}
|
|
|
|
|
2022-11-27 02:14:05 +01:00
|
|
|
static void tcg_out_goto_tb(TCGContext *s, int which)
|
|
|
|
{
|
2022-11-27 09:48:47 +01:00
|
|
|
uintptr_t i_addr;
|
|
|
|
intptr_t i_disp;
|
|
|
|
|
|
|
|
/* Direct branch will be patched by tb_target_set_jmp_target. */
|
|
|
|
set_jmp_insn_offset(s, which);
|
|
|
|
tcg_out32(s, INSN_NOP);
|
2022-11-27 02:14:05 +01:00
|
|
|
|
2022-11-27 09:48:47 +01:00
|
|
|
/* When branch is out of range, fall through to indirect. */
|
|
|
|
i_addr = get_jmp_target_addr(s, which);
|
|
|
|
i_disp = tcg_pcrel_diff(s, (void *)i_addr) - 8;
|
|
|
|
tcg_debug_assert(i_disp < 0);
|
|
|
|
if (i_disp >= -0xfff) {
|
|
|
|
tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, i_disp);
|
|
|
|
} else {
|
2022-11-27 02:14:05 +01:00
|
|
|
/*
|
|
|
|
* The TB is close, but outside the 12 bits addressable by
|
|
|
|
* the load. We can extend this to 20 bits with a sub of a
|
2022-11-27 09:48:47 +01:00
|
|
|
* shifted immediate from pc.
|
2022-11-27 02:14:05 +01:00
|
|
|
*/
|
2022-11-27 09:48:47 +01:00
|
|
|
int h = -i_disp;
|
|
|
|
int l = h & 0xfff;
|
|
|
|
|
|
|
|
h = encode_imm_nofail(h - l);
|
|
|
|
tcg_out_dat_imm(s, COND_AL, ARITH_SUB, TCG_REG_R0, TCG_REG_PC, h);
|
|
|
|
tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, l);
|
2022-11-27 02:14:05 +01:00
|
|
|
}
|
|
|
|
set_jmp_reset_offset(s, which);
|
|
|
|
}
|
|
|
|
|
2022-12-05 23:43:18 +01:00
|
|
|
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
|
|
|
|
uintptr_t jmp_rx, uintptr_t jmp_rw)
|
|
|
|
{
|
2022-11-27 09:48:47 +01:00
|
|
|
uintptr_t addr = tb->jmp_target_addr[n];
|
|
|
|
ptrdiff_t offset = addr - (jmp_rx + 8);
|
|
|
|
tcg_insn_unit insn;
|
|
|
|
|
|
|
|
/* Either directly branch, or fall through to indirect branch. */
|
|
|
|
if (offset == sextract64(offset, 0, 26)) {
|
|
|
|
/* B <addr> */
|
|
|
|
insn = deposit32((COND_AL << 28) | INSN_B, 0, 24, offset >> 2);
|
|
|
|
} else {
|
|
|
|
insn = INSN_NOP;
|
|
|
|
}
|
|
|
|
|
|
|
|
qatomic_set((uint32_t *)jmp_rw, insn);
|
|
|
|
flush_idcache_range(jmp_rx, jmp_rw, 4);
|
2022-12-05 23:43:18 +01:00
|
|
|
}
|
|
|
|
|
2021-08-10 00:57:07 +02:00
|
|
|
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
|
|
|
const TCGArg args[TCG_MAX_OP_ARGS],
|
|
|
|
const int const_args[TCG_MAX_OP_ARGS])
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2013-03-12 03:51:56 +01:00
|
|
|
TCGArg a0, a1, a2, a3, a4, a5;
|
2008-05-20 01:59:38 +02:00
|
|
|
int c;
|
|
|
|
|
|
|
|
switch (opc) {
|
2017-04-28 09:49:45 +02:00
|
|
|
case INDEX_op_goto_ptr:
|
2021-08-13 01:00:10 +02:00
|
|
|
tcg_out_b_reg(s, COND_AL, args[0]);
|
2017-04-28 09:49:45 +02:00
|
|
|
break;
|
2008-05-20 01:59:38 +02:00
|
|
|
case INDEX_op_br:
|
2015-02-13 22:39:54 +01:00
|
|
|
tcg_out_goto_label(s, COND_AL, arg_label(args[0]));
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
|
|
|
|
|
|
|
case INDEX_op_ld8u_i32:
|
|
|
|
tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_ld8s_i32:
|
|
|
|
tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_ld16u_i32:
|
|
|
|
tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_ld16s_i32:
|
|
|
|
tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_ld_i32:
|
|
|
|
tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_st8_i32:
|
2010-04-09 20:52:48 +02:00
|
|
|
tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
|
|
|
case INDEX_op_st16_i32:
|
2010-04-09 20:52:48 +02:00
|
|
|
tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
|
|
|
case INDEX_op_st_i32:
|
|
|
|
tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
|
2012-09-26 20:48:55 +02:00
|
|
|
case INDEX_op_movcond_i32:
|
|
|
|
/* Constraints mean that v2 is always in the same register as dest,
|
|
|
|
* so we only need to do "if condition passed, move v1 to dest".
|
|
|
|
*/
|
2013-03-12 02:21:59 +01:00
|
|
|
tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
|
|
|
|
args[1], args[2], const_args[2]);
|
|
|
|
tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV,
|
|
|
|
ARITH_MVN, args[0], 0, args[3], const_args[3]);
|
2012-09-26 20:48:55 +02:00
|
|
|
break;
|
2008-05-20 01:59:38 +02:00
|
|
|
case INDEX_op_add_i32:
|
2013-03-05 07:06:21 +01:00
|
|
|
tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
|
|
|
|
args[0], args[1], args[2], const_args[2]);
|
|
|
|
break;
|
2008-05-20 01:59:38 +02:00
|
|
|
case INDEX_op_sub_i32:
|
2013-03-12 02:04:14 +01:00
|
|
|
if (const_args[1]) {
|
|
|
|
if (const_args[2]) {
|
|
|
|
tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]);
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_rI(s, COND_AL, ARITH_RSB,
|
|
|
|
args[0], args[2], args[1], 1);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
|
|
|
|
args[0], args[1], args[2], const_args[2]);
|
|
|
|
}
|
2013-03-05 07:06:21 +01:00
|
|
|
break;
|
2008-05-20 01:59:38 +02:00
|
|
|
case INDEX_op_and_i32:
|
2013-03-05 06:36:45 +01:00
|
|
|
tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC,
|
|
|
|
args[0], args[1], args[2], const_args[2]);
|
|
|
|
break;
|
2010-03-03 00:13:43 +01:00
|
|
|
case INDEX_op_andc_i32:
|
2013-03-05 06:36:45 +01:00
|
|
|
tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
|
|
|
|
args[0], args[1], args[2], const_args[2]);
|
|
|
|
break;
|
2008-05-20 01:59:38 +02:00
|
|
|
case INDEX_op_or_i32:
|
|
|
|
c = ARITH_ORR;
|
|
|
|
goto gen_arith;
|
|
|
|
case INDEX_op_xor_i32:
|
|
|
|
c = ARITH_EOR;
|
|
|
|
/* Fall through. */
|
|
|
|
gen_arith:
|
2012-09-26 20:48:54 +02:00
|
|
|
tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
|
|
|
case INDEX_op_add2_i32:
|
2013-03-12 03:51:56 +01:00
|
|
|
a0 = args[0], a1 = args[1], a2 = args[2];
|
|
|
|
a3 = args[3], a4 = args[4], a5 = args[5];
|
|
|
|
if (a0 == a3 || (a0 == a5 && !const_args[5])) {
|
2013-03-12 17:49:04 +01:00
|
|
|
a0 = TCG_REG_TMP;
|
2013-03-12 03:51:56 +01:00
|
|
|
}
|
|
|
|
tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
|
|
|
|
a0, a2, a4, const_args[4]);
|
|
|
|
tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC,
|
|
|
|
a1, a3, a5, const_args[5]);
|
|
|
|
tcg_out_mov_reg(s, COND_AL, args[0], a0);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
|
|
|
case INDEX_op_sub2_i32:
|
2013-03-12 03:51:56 +01:00
|
|
|
a0 = args[0], a1 = args[1], a2 = args[2];
|
|
|
|
a3 = args[3], a4 = args[4], a5 = args[5];
|
|
|
|
if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
|
2013-03-12 17:49:04 +01:00
|
|
|
a0 = TCG_REG_TMP;
|
2013-03-12 03:51:56 +01:00
|
|
|
}
|
|
|
|
if (const_args[2]) {
|
|
|
|
if (const_args[4]) {
|
|
|
|
tcg_out_movi32(s, COND_AL, a0, a4);
|
|
|
|
a4 = a0;
|
|
|
|
}
|
|
|
|
tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1);
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR,
|
|
|
|
ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]);
|
|
|
|
}
|
|
|
|
if (const_args[3]) {
|
|
|
|
if (const_args[5]) {
|
|
|
|
tcg_out_movi32(s, COND_AL, a1, a5);
|
|
|
|
a5 = a1;
|
|
|
|
}
|
|
|
|
tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1);
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC,
|
|
|
|
a1, a3, a5, const_args[5]);
|
|
|
|
}
|
|
|
|
tcg_out_mov_reg(s, COND_AL, args[0], a0);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2008-05-20 13:26:40 +02:00
|
|
|
case INDEX_op_neg_i32:
|
|
|
|
tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
|
|
|
|
break;
|
2009-08-22 13:55:06 +02:00
|
|
|
case INDEX_op_not_i32:
|
|
|
|
tcg_out_dat_reg(s, COND_AL,
|
|
|
|
ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
|
|
|
|
break;
|
2008-05-20 01:59:38 +02:00
|
|
|
case INDEX_op_mul_i32:
|
|
|
|
tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_mulu2_i32:
|
|
|
|
tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
|
|
|
|
break;
|
2013-02-20 08:51:58 +01:00
|
|
|
case INDEX_op_muls2_i32:
|
|
|
|
tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
|
|
|
|
break;
|
2008-05-20 01:59:38 +02:00
|
|
|
/* XXX: Perhaps args[2] & 0x1f is wrong */
|
|
|
|
case INDEX_op_shl_i32:
|
|
|
|
c = const_args[2] ?
|
|
|
|
SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
|
|
|
|
goto gen_shift32;
|
|
|
|
case INDEX_op_shr_i32:
|
|
|
|
c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
|
|
|
|
SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
|
|
|
|
goto gen_shift32;
|
|
|
|
case INDEX_op_sar_i32:
|
|
|
|
c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
|
|
|
|
SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
|
2010-04-09 20:52:48 +02:00
|
|
|
goto gen_shift32;
|
|
|
|
case INDEX_op_rotr_i32:
|
|
|
|
c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
|
|
|
|
SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
|
2008-05-20 01:59:38 +02:00
|
|
|
/* Fall through. */
|
|
|
|
gen_shift32:
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
|
|
|
|
break;
|
|
|
|
|
2010-04-09 20:52:48 +02:00
|
|
|
case INDEX_op_rotl_i32:
|
|
|
|
if (const_args[2]) {
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
|
|
|
|
((0x20 - args[2]) & 0x1f) ?
|
|
|
|
SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
|
|
|
|
SHIFT_IMM_LSL(0));
|
|
|
|
} else {
|
2014-02-13 11:26:46 +01:00
|
|
|
tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20);
|
2010-04-09 20:52:48 +02:00
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
|
2013-03-12 17:49:04 +01:00
|
|
|
SHIFT_REG_ROR(TCG_REG_TMP));
|
2010-04-09 20:52:48 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2016-11-16 14:59:40 +01:00
|
|
|
case INDEX_op_ctz_i32:
|
|
|
|
tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0);
|
|
|
|
a1 = TCG_REG_TMP;
|
|
|
|
goto do_clz;
|
|
|
|
|
|
|
|
case INDEX_op_clz_i32:
|
|
|
|
a1 = args[1];
|
|
|
|
do_clz:
|
|
|
|
a0 = args[0];
|
|
|
|
a2 = args[2];
|
|
|
|
c = const_args[2];
|
|
|
|
if (c && a2 == 32) {
|
|
|
|
tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0);
|
|
|
|
tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0);
|
|
|
|
if (c || a0 != a2) {
|
|
|
|
tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2008-05-20 01:59:38 +02:00
|
|
|
case INDEX_op_brcond_i32:
|
2013-03-12 02:21:59 +01:00
|
|
|
tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
|
2012-09-26 20:48:54 +02:00
|
|
|
args[0], args[1], const_args[1]);
|
2015-02-13 22:39:54 +01:00
|
|
|
tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]],
|
|
|
|
arg_label(args[3]));
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2010-03-01 22:33:48 +01:00
|
|
|
case INDEX_op_setcond_i32:
|
2013-03-12 02:21:59 +01:00
|
|
|
tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
|
|
|
|
args[1], args[2], const_args[2]);
|
2010-03-01 22:33:48 +01:00
|
|
|
tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
|
|
|
|
ARITH_MOV, args[0], 0, 1);
|
|
|
|
tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
|
|
|
|
ARITH_MOV, args[0], 0, 0);
|
|
|
|
break;
|
2018-01-15 22:01:37 +01:00
|
|
|
|
|
|
|
case INDEX_op_brcond2_i32:
|
|
|
|
c = tcg_out_cmp2(s, args, const_args);
|
|
|
|
tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5]));
|
|
|
|
break;
|
2010-03-01 22:33:49 +01:00
|
|
|
case INDEX_op_setcond2_i32:
|
2018-01-15 22:01:37 +01:00
|
|
|
c = tcg_out_cmp2(s, args + 1, const_args + 1);
|
|
|
|
tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1);
|
|
|
|
tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
|
2010-03-01 22:33:49 +01:00
|
|
|
ARITH_MOV, args[0], 0, 0);
|
2010-03-02 22:26:04 +01:00
|
|
|
break;
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2023-05-17 05:07:20 +02:00
|
|
|
case INDEX_op_qemu_ld_a32_i32:
|
|
|
|
tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2023-05-17 05:07:20 +02:00
|
|
|
case INDEX_op_qemu_ld_a64_i32:
|
|
|
|
tcg_out_qemu_ld(s, args[0], -1, args[1], args[2],
|
|
|
|
args[3], TCG_TYPE_I32);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2023-05-17 05:07:20 +02:00
|
|
|
case INDEX_op_qemu_ld_a32_i64:
|
|
|
|
tcg_out_qemu_ld(s, args[0], args[1], args[2], -1,
|
|
|
|
args[3], TCG_TYPE_I64);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2023-05-17 05:07:20 +02:00
|
|
|
case INDEX_op_qemu_ld_a64_i64:
|
|
|
|
tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3],
|
|
|
|
args[4], TCG_TYPE_I64);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case INDEX_op_qemu_st_a32_i32:
|
|
|
|
tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
|
|
|
|
break;
|
|
|
|
case INDEX_op_qemu_st_a64_i32:
|
|
|
|
tcg_out_qemu_st(s, args[0], -1, args[1], args[2],
|
|
|
|
args[3], TCG_TYPE_I32);
|
|
|
|
break;
|
|
|
|
case INDEX_op_qemu_st_a32_i64:
|
|
|
|
tcg_out_qemu_st(s, args[0], args[1], args[2], -1,
|
|
|
|
args[3], TCG_TYPE_I64);
|
|
|
|
break;
|
|
|
|
case INDEX_op_qemu_st_a64_i64:
|
|
|
|
tcg_out_qemu_st(s, args[0], args[1], args[2], args[3],
|
|
|
|
args[4], TCG_TYPE_I64);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
|
|
|
|
2010-04-09 20:52:48 +02:00
|
|
|
case INDEX_op_bswap16_i32:
|
2021-06-13 09:42:55 +02:00
|
|
|
tcg_out_bswap16(s, COND_AL, args[0], args[1], args[2]);
|
2010-04-09 20:52:48 +02:00
|
|
|
break;
|
|
|
|
case INDEX_op_bswap32_i32:
|
|
|
|
tcg_out_bswap32(s, COND_AL, args[0], args[1]);
|
|
|
|
break;
|
|
|
|
|
2013-03-05 06:12:30 +01:00
|
|
|
case INDEX_op_deposit_i32:
|
|
|
|
tcg_out_deposit(s, COND_AL, args[0], args[2],
|
|
|
|
args[3], args[4], const_args[2]);
|
|
|
|
break;
|
2016-10-15 02:51:45 +02:00
|
|
|
case INDEX_op_extract_i32:
|
|
|
|
tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_sextract_i32:
|
|
|
|
tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]);
|
|
|
|
break;
|
2019-02-25 22:20:01 +01:00
|
|
|
case INDEX_op_extract2_i32:
|
|
|
|
/* ??? These optimization vs zero should be generic. */
|
|
|
|
/* ??? But we can't substitute 2 for 1 in the opcode stream yet. */
|
|
|
|
if (const_args[1]) {
|
|
|
|
if (const_args[2]) {
|
|
|
|
tcg_out_movi(s, TCG_TYPE_REG, args[0], 0);
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
|
|
|
|
args[2], SHIFT_IMM_LSL(32 - args[3]));
|
|
|
|
}
|
|
|
|
} else if (const_args[2]) {
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
|
|
|
|
args[1], SHIFT_IMM_LSR(args[3]));
|
|
|
|
} else {
|
|
|
|
/* We can do extract2 in 2 insns, vs the 3 required otherwise. */
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0,
|
|
|
|
args[2], SHIFT_IMM_LSL(32 - args[3]));
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP,
|
|
|
|
args[1], SHIFT_IMM_LSR(args[3]));
|
|
|
|
}
|
|
|
|
break;
|
2013-03-05 06:12:30 +01:00
|
|
|
|
2013-03-12 06:11:30 +01:00
|
|
|
case INDEX_op_div_i32:
|
|
|
|
tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_divu_i32:
|
|
|
|
tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
|
2016-07-14 22:20:16 +02:00
|
|
|
case INDEX_op_mb:
|
|
|
|
tcg_out_mb(s, args[0]);
|
|
|
|
break;
|
|
|
|
|
2014-04-25 21:19:33 +02:00
|
|
|
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
|
|
|
|
case INDEX_op_call: /* Always emitted via tcg_out_call. */
|
2022-11-26 21:42:06 +01:00
|
|
|
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
|
2022-11-27 02:14:05 +01:00
|
|
|
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
|
2023-04-05 20:17:01 +02:00
|
|
|
case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
|
2023-04-05 22:26:51 +02:00
|
|
|
case INDEX_op_ext8u_i32:
|
2023-04-05 23:49:59 +02:00
|
|
|
case INDEX_op_ext16s_i32:
|
2023-04-06 01:25:22 +02:00
|
|
|
case INDEX_op_ext16u_i32:
|
2008-05-20 01:59:38 +02:00
|
|
|
default:
|
2023-04-05 21:09:14 +02:00
|
|
|
g_assert_not_reached();
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-17 18:19:33 +02:00
|
|
|
static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
|
|
|
|
{
|
2017-09-14 02:38:44 +02:00
|
|
|
switch (op) {
|
|
|
|
case INDEX_op_goto_ptr:
|
2020-10-17 18:19:33 +02:00
|
|
|
return C_O0_I1(r);
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2017-09-14 02:38:44 +02:00
|
|
|
case INDEX_op_ld8u_i32:
|
|
|
|
case INDEX_op_ld8s_i32:
|
|
|
|
case INDEX_op_ld16u_i32:
|
|
|
|
case INDEX_op_ld16s_i32:
|
|
|
|
case INDEX_op_ld_i32:
|
|
|
|
case INDEX_op_neg_i32:
|
|
|
|
case INDEX_op_not_i32:
|
|
|
|
case INDEX_op_bswap16_i32:
|
|
|
|
case INDEX_op_bswap32_i32:
|
|
|
|
case INDEX_op_ext8s_i32:
|
|
|
|
case INDEX_op_ext16s_i32:
|
|
|
|
case INDEX_op_ext16u_i32:
|
|
|
|
case INDEX_op_extract_i32:
|
|
|
|
case INDEX_op_sextract_i32:
|
2020-10-17 18:19:33 +02:00
|
|
|
return C_O1_I1(r, r);
|
|
|
|
|
|
|
|
case INDEX_op_st8_i32:
|
|
|
|
case INDEX_op_st16_i32:
|
|
|
|
case INDEX_op_st_i32:
|
|
|
|
return C_O0_I2(r, r);
|
2013-03-05 06:12:30 +01:00
|
|
|
|
2017-09-14 02:38:44 +02:00
|
|
|
case INDEX_op_add_i32:
|
|
|
|
case INDEX_op_sub_i32:
|
|
|
|
case INDEX_op_setcond_i32:
|
2020-10-17 18:19:33 +02:00
|
|
|
return C_O1_I2(r, r, rIN);
|
|
|
|
|
2017-09-14 02:38:44 +02:00
|
|
|
case INDEX_op_and_i32:
|
|
|
|
case INDEX_op_andc_i32:
|
|
|
|
case INDEX_op_clz_i32:
|
|
|
|
case INDEX_op_ctz_i32:
|
2020-10-17 18:19:33 +02:00
|
|
|
return C_O1_I2(r, r, rIK);
|
|
|
|
|
2017-09-14 02:38:44 +02:00
|
|
|
case INDEX_op_mul_i32:
|
|
|
|
case INDEX_op_div_i32:
|
|
|
|
case INDEX_op_divu_i32:
|
2020-10-17 18:19:33 +02:00
|
|
|
return C_O1_I2(r, r, r);
|
|
|
|
|
2017-09-14 02:38:44 +02:00
|
|
|
case INDEX_op_mulu2_i32:
|
|
|
|
case INDEX_op_muls2_i32:
|
2020-10-17 18:19:33 +02:00
|
|
|
return C_O2_I2(r, r, r, r);
|
|
|
|
|
2017-09-14 02:38:44 +02:00
|
|
|
case INDEX_op_or_i32:
|
|
|
|
case INDEX_op_xor_i32:
|
2020-10-17 18:19:33 +02:00
|
|
|
return C_O1_I2(r, r, rI);
|
|
|
|
|
2017-09-14 02:38:44 +02:00
|
|
|
case INDEX_op_shl_i32:
|
|
|
|
case INDEX_op_shr_i32:
|
|
|
|
case INDEX_op_sar_i32:
|
|
|
|
case INDEX_op_rotl_i32:
|
|
|
|
case INDEX_op_rotr_i32:
|
2020-10-17 18:19:33 +02:00
|
|
|
return C_O1_I2(r, r, ri);
|
2013-03-12 06:11:30 +01:00
|
|
|
|
2017-09-14 02:38:44 +02:00
|
|
|
case INDEX_op_brcond_i32:
|
2020-10-17 18:19:33 +02:00
|
|
|
return C_O0_I2(r, rIN);
|
2017-09-14 02:38:44 +02:00
|
|
|
case INDEX_op_deposit_i32:
|
2020-10-17 18:19:33 +02:00
|
|
|
return C_O1_I2(r, 0, rZ);
|
2019-02-25 22:20:01 +01:00
|
|
|
case INDEX_op_extract2_i32:
|
2020-10-17 18:19:33 +02:00
|
|
|
return C_O1_I2(r, rZ, rZ);
|
2017-09-14 02:38:44 +02:00
|
|
|
case INDEX_op_movcond_i32:
|
2020-10-17 18:19:33 +02:00
|
|
|
return C_O1_I4(r, r, rIN, rIK, 0);
|
2017-09-14 02:38:44 +02:00
|
|
|
case INDEX_op_add2_i32:
|
2020-10-17 18:19:33 +02:00
|
|
|
return C_O2_I4(r, r, r, r, rIN, rIK);
|
2017-09-14 02:38:44 +02:00
|
|
|
case INDEX_op_sub2_i32:
|
2020-10-17 18:19:33 +02:00
|
|
|
return C_O2_I4(r, r, rI, rI, rIN, rIK);
|
2017-09-14 02:38:44 +02:00
|
|
|
case INDEX_op_brcond2_i32:
|
2020-10-17 18:19:33 +02:00
|
|
|
return C_O0_I4(r, r, rI, rI);
|
2017-09-14 02:38:44 +02:00
|
|
|
case INDEX_op_setcond2_i32:
|
2020-10-17 18:19:33 +02:00
|
|
|
return C_O1_I4(r, r, r, rI, rI);
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2023-05-17 05:07:20 +02:00
|
|
|
case INDEX_op_qemu_ld_a32_i32:
|
|
|
|
return C_O1_I1(r, q);
|
|
|
|
case INDEX_op_qemu_ld_a64_i32:
|
|
|
|
return C_O1_I2(r, q, q);
|
|
|
|
case INDEX_op_qemu_ld_a32_i64:
|
|
|
|
return C_O2_I1(e, p, q);
|
|
|
|
case INDEX_op_qemu_ld_a64_i64:
|
|
|
|
return C_O2_I2(e, p, q, q);
|
|
|
|
case INDEX_op_qemu_st_a32_i32:
|
|
|
|
return C_O0_I2(q, q);
|
|
|
|
case INDEX_op_qemu_st_a64_i32:
|
|
|
|
return C_O0_I3(q, q, q);
|
|
|
|
case INDEX_op_qemu_st_a32_i64:
|
|
|
|
return C_O0_I3(Q, p, q);
|
|
|
|
case INDEX_op_qemu_st_a64_i64:
|
|
|
|
return C_O0_I4(Q, p, q, q);
|
2016-11-18 09:31:40 +01:00
|
|
|
|
2021-05-04 01:47:52 +02:00
|
|
|
case INDEX_op_st_vec:
|
|
|
|
return C_O0_I2(w, r);
|
|
|
|
case INDEX_op_ld_vec:
|
|
|
|
case INDEX_op_dupm_vec:
|
|
|
|
return C_O1_I1(w, r);
|
|
|
|
case INDEX_op_dup_vec:
|
|
|
|
return C_O1_I1(w, wr);
|
2020-09-05 20:58:47 +02:00
|
|
|
case INDEX_op_abs_vec:
|
|
|
|
case INDEX_op_neg_vec:
|
|
|
|
case INDEX_op_not_vec:
|
2020-09-05 21:24:28 +02:00
|
|
|
case INDEX_op_shli_vec:
|
|
|
|
case INDEX_op_shri_vec:
|
|
|
|
case INDEX_op_sari_vec:
|
2020-09-05 20:58:47 +02:00
|
|
|
return C_O1_I1(w, w);
|
2021-05-04 01:47:52 +02:00
|
|
|
case INDEX_op_dup2_vec:
|
|
|
|
case INDEX_op_add_vec:
|
2020-09-05 21:30:17 +02:00
|
|
|
case INDEX_op_mul_vec:
|
2020-09-05 21:44:06 +02:00
|
|
|
case INDEX_op_smax_vec:
|
|
|
|
case INDEX_op_smin_vec:
|
2020-09-05 21:37:36 +02:00
|
|
|
case INDEX_op_ssadd_vec:
|
|
|
|
case INDEX_op_sssub_vec:
|
2021-05-04 01:47:52 +02:00
|
|
|
case INDEX_op_sub_vec:
|
2020-09-05 21:44:06 +02:00
|
|
|
case INDEX_op_umax_vec:
|
|
|
|
case INDEX_op_umin_vec:
|
2020-09-05 21:37:36 +02:00
|
|
|
case INDEX_op_usadd_vec:
|
|
|
|
case INDEX_op_ussub_vec:
|
2021-05-04 01:47:52 +02:00
|
|
|
case INDEX_op_xor_vec:
|
2020-09-05 22:13:10 +02:00
|
|
|
case INDEX_op_arm_sshl_vec:
|
|
|
|
case INDEX_op_arm_ushl_vec:
|
2020-09-06 00:54:33 +02:00
|
|
|
return C_O1_I2(w, w, w);
|
2020-09-05 22:26:48 +02:00
|
|
|
case INDEX_op_arm_sli_vec:
|
|
|
|
return C_O1_I2(w, 0, w);
|
2021-05-04 01:47:52 +02:00
|
|
|
case INDEX_op_or_vec:
|
2020-09-05 20:58:47 +02:00
|
|
|
case INDEX_op_andc_vec:
|
2020-09-06 00:54:33 +02:00
|
|
|
return C_O1_I2(w, w, wO);
|
2021-05-04 01:47:52 +02:00
|
|
|
case INDEX_op_and_vec:
|
2020-09-05 20:58:47 +02:00
|
|
|
case INDEX_op_orc_vec:
|
2020-09-06 00:54:33 +02:00
|
|
|
return C_O1_I2(w, w, wV);
|
2021-05-04 01:47:52 +02:00
|
|
|
case INDEX_op_cmp_vec:
|
2020-09-06 00:54:33 +02:00
|
|
|
return C_O1_I2(w, w, wZ);
|
2020-09-05 21:54:37 +02:00
|
|
|
case INDEX_op_bitsel_vec:
|
|
|
|
return C_O1_I3(w, w, w, w);
|
2017-09-14 02:38:44 +02:00
|
|
|
default:
|
2020-10-17 18:19:33 +02:00
|
|
|
g_assert_not_reached();
|
2016-11-18 09:31:40 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-06-03 02:26:56 +02:00
|
|
|
static void tcg_target_init(TCGContext *s)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2021-06-29 07:14:00 +02:00
|
|
|
/*
|
|
|
|
* Only probe for the platform and capabilities if we haven't already
|
|
|
|
* determined maximum values at compile time.
|
|
|
|
*/
|
2021-05-04 01:47:52 +02:00
|
|
|
#if !defined(use_idiv_instructions) || !defined(use_neon_instructions)
|
2013-05-02 13:18:38 +02:00
|
|
|
{
|
2013-06-07 16:26:20 +02:00
|
|
|
unsigned long hwcap = qemu_getauxval(AT_HWCAP);
|
2021-05-04 01:47:52 +02:00
|
|
|
#ifndef use_idiv_instructions
|
2013-05-02 13:18:38 +02:00
|
|
|
use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0;
|
2021-05-04 01:47:52 +02:00
|
|
|
#endif
|
|
|
|
#ifndef use_neon_instructions
|
|
|
|
use_neon_instructions = (hwcap & HWCAP_ARM_NEON) != 0;
|
|
|
|
#endif
|
2013-05-02 13:18:38 +02:00
|
|
|
}
|
2013-06-07 16:26:20 +02:00
|
|
|
#endif
|
2021-05-04 01:47:52 +02:00
|
|
|
|
2013-06-06 19:46:35 +02:00
|
|
|
if (__ARM_ARCH < 7) {
|
2013-06-07 16:26:20 +02:00
|
|
|
const char *pl = (const char *)qemu_getauxval(AT_PLATFORM);
|
2013-06-06 19:46:35 +02:00
|
|
|
if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') {
|
|
|
|
arm_arch = pl[1] - '0';
|
|
|
|
}
|
2022-01-03 02:42:07 +01:00
|
|
|
|
|
|
|
if (arm_arch < 6) {
|
|
|
|
error_report("TCG: ARMv%d is unsupported; exiting", arm_arch);
|
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
}
|
2013-06-06 19:46:35 +02:00
|
|
|
}
|
2013-05-02 13:18:38 +02:00
|
|
|
|
2021-05-04 01:47:52 +02:00
|
|
|
tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
|
2017-09-11 21:44:30 +02:00
|
|
|
|
|
|
|
tcg_target_call_clobber_regs = 0;
|
|
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
|
|
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
|
|
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
|
|
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
|
|
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
|
|
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2021-05-04 01:47:52 +02:00
|
|
|
if (use_neon_instructions) {
|
|
|
|
tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
|
|
|
|
tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
|
|
|
|
|
|
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q0);
|
|
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q1);
|
|
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q2);
|
|
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q3);
|
|
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q8);
|
|
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q9);
|
|
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q10);
|
|
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q11);
|
|
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q12);
|
|
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q13);
|
|
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q14);
|
|
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q15);
|
|
|
|
}
|
|
|
|
|
2017-09-11 20:25:55 +02:00
|
|
|
s->reserved_regs = 0;
|
2008-05-20 01:59:38 +02:00
|
|
|
tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
|
2013-03-12 17:49:04 +01:00
|
|
|
tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
|
2010-04-09 20:52:48 +02:00
|
|
|
tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
|
2021-05-04 01:47:52 +02:00
|
|
|
tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2021-05-04 01:48:03 +02:00
|
|
|
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
|
|
|
|
TCGReg arg1, intptr_t arg2)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2021-05-04 01:48:03 +02:00
|
|
|
switch (type) {
|
|
|
|
case TCG_TYPE_I32:
|
|
|
|
tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
|
|
|
|
return;
|
|
|
|
case TCG_TYPE_V64:
|
|
|
|
/* regs 1; size 8; align 8 */
|
|
|
|
tcg_out_vldst(s, INSN_VLD1 | 0x7d0, arg, arg1, arg2);
|
|
|
|
return;
|
|
|
|
case TCG_TYPE_V128:
|
2021-09-12 19:49:25 +02:00
|
|
|
/*
|
|
|
|
* We have only 8-byte alignment for the stack per the ABI.
|
|
|
|
* Rather than dynamically re-align the stack, it's easier
|
|
|
|
* to simply not request alignment beyond that. So:
|
|
|
|
* regs 2; size 8; align 8
|
|
|
|
*/
|
|
|
|
tcg_out_vldst(s, INSN_VLD1 | 0xad0, arg, arg1, arg2);
|
2021-05-04 01:48:03 +02:00
|
|
|
return;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2021-05-04 01:48:03 +02:00
|
|
|
static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
|
|
|
|
TCGReg arg1, intptr_t arg2)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2021-05-04 01:48:03 +02:00
|
|
|
switch (type) {
|
|
|
|
case TCG_TYPE_I32:
|
|
|
|
tcg_out_st32(s, COND_AL, arg, arg1, arg2);
|
|
|
|
return;
|
|
|
|
case TCG_TYPE_V64:
|
|
|
|
/* regs 1; size 8; align 8 */
|
|
|
|
tcg_out_vldst(s, INSN_VST1 | 0x7d0, arg, arg1, arg2);
|
|
|
|
return;
|
|
|
|
case TCG_TYPE_V128:
|
2021-09-12 19:49:25 +02:00
|
|
|
/* See tcg_out_ld re alignment: regs 2; size 8; align 8 */
|
|
|
|
tcg_out_vldst(s, INSN_VST1 | 0xad0, arg, arg1, arg2);
|
2021-05-04 01:48:03 +02:00
|
|
|
return;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2021-08-10 00:57:07 +02:00
|
|
|
static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
|
|
|
|
TCGReg base, intptr_t ofs)
|
2016-06-20 07:59:13 +02:00
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-05-04 01:48:07 +02:00
|
|
|
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2021-05-04 01:48:07 +02:00
|
|
|
if (ret == arg) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
switch (type) {
|
|
|
|
case TCG_TYPE_I32:
|
|
|
|
if (ret < TCG_REG_Q0 && arg < TCG_REG_Q0) {
|
|
|
|
tcg_out_mov_reg(s, COND_AL, ret, arg);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
|
|
|
|
case TCG_TYPE_V64:
|
|
|
|
case TCG_TYPE_V128:
|
|
|
|
/* "VMOV D,N" is an alias for "VORR D,N,N". */
|
|
|
|
tcg_out_vreg3(s, INSN_VORR, type - TCG_TYPE_V64, 0, ret, arg, arg);
|
|
|
|
return true;
|
|
|
|
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2021-05-04 01:48:07 +02:00
|
|
|
static void tcg_out_movi(TCGContext *s, TCGType type,
|
|
|
|
TCGReg ret, tcg_target_long arg)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2021-05-04 01:48:07 +02:00
|
|
|
tcg_debug_assert(type == TCG_TYPE_I32);
|
|
|
|
tcg_debug_assert(ret < TCG_REG_Q0);
|
2008-05-20 01:59:38 +02:00
|
|
|
tcg_out_movi32(s, COND_AL, ret, arg);
|
|
|
|
}
|
|
|
|
|
2023-04-06 06:39:54 +02:00
|
|
|
static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-10-18 13:28:04 +02:00
|
|
|
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
|
|
|
|
tcg_target_long imm)
|
|
|
|
{
|
|
|
|
int enc, opc = ARITH_ADD;
|
|
|
|
|
|
|
|
/* All of the easiest immediates to encode are positive. */
|
|
|
|
if (imm < 0) {
|
|
|
|
imm = -imm;
|
|
|
|
opc = ARITH_SUB;
|
|
|
|
}
|
|
|
|
enc = encode_imm(imm);
|
|
|
|
if (enc >= 0) {
|
|
|
|
tcg_out_dat_imm(s, COND_AL, opc, rd, rs, enc);
|
|
|
|
} else {
|
|
|
|
tcg_out_movi32(s, COND_AL, TCG_REG_TMP, imm);
|
|
|
|
tcg_out_dat_reg(s, COND_AL, opc, rd, rs,
|
|
|
|
TCG_REG_TMP, SHIFT_IMM_LSL(0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-05 09:03:27 +02:00
|
|
|
/* Type is always V128, with I64 elements. */
|
|
|
|
static void tcg_out_dup2_vec(TCGContext *s, TCGReg rd, TCGReg rl, TCGReg rh)
|
|
|
|
{
|
|
|
|
/* Move high element into place first. */
|
|
|
|
/* VMOV Dd+1, Ds */
|
|
|
|
tcg_out_vreg3(s, INSN_VORR | (1 << 12), 0, 0, rd, rh, rh);
|
|
|
|
/* Move low element into place; tcg_out_mov will check for nop. */
|
|
|
|
tcg_out_mov(s, TCG_TYPE_V64, rd, rl);
|
|
|
|
}
|
|
|
|
|
2021-05-04 01:47:52 +02:00
|
|
|
static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
|
|
|
|
TCGReg rd, TCGReg rs)
|
|
|
|
{
|
2020-09-05 09:03:27 +02:00
|
|
|
int q = type - TCG_TYPE_V64;
|
|
|
|
|
|
|
|
if (vece == MO_64) {
|
|
|
|
if (type == TCG_TYPE_V128) {
|
|
|
|
tcg_out_dup2_vec(s, rd, rs, rs);
|
|
|
|
} else {
|
|
|
|
tcg_out_mov(s, TCG_TYPE_V64, rd, rs);
|
|
|
|
}
|
|
|
|
} else if (rs < TCG_REG_Q0) {
|
|
|
|
int b = (vece == MO_8);
|
|
|
|
int e = (vece == MO_16);
|
|
|
|
tcg_out32(s, INSN_VDUP_G | (b << 22) | (q << 21) | (e << 5) |
|
|
|
|
encode_vn(rd) | (rs << 12));
|
|
|
|
} else {
|
|
|
|
int imm4 = 1 << vece;
|
|
|
|
tcg_out32(s, INSN_VDUP_S | (imm4 << 16) | (q << 6) |
|
|
|
|
encode_vd(rd) | encode_vm(rs));
|
|
|
|
}
|
|
|
|
return true;
|
2021-05-04 01:47:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
|
|
|
|
TCGReg rd, TCGReg base, intptr_t offset)
|
|
|
|
{
|
2020-09-05 09:03:27 +02:00
|
|
|
if (vece == MO_64) {
|
|
|
|
tcg_out_ld(s, TCG_TYPE_V64, rd, base, offset);
|
|
|
|
if (type == TCG_TYPE_V128) {
|
|
|
|
tcg_out_dup2_vec(s, rd, rd, rd);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
int q = type - TCG_TYPE_V64;
|
|
|
|
tcg_out_vldst(s, INSN_VLD1R | (vece << 6) | (q << 5),
|
|
|
|
rd, base, offset);
|
|
|
|
}
|
|
|
|
return true;
|
2021-05-04 01:47:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
|
|
|
|
TCGReg rd, int64_t v64)
|
|
|
|
{
|
2020-09-05 09:03:27 +02:00
|
|
|
int q = type - TCG_TYPE_V64;
|
|
|
|
int cmode, imm8, i;
|
|
|
|
|
|
|
|
/* Test all bytes equal first. */
|
|
|
|
if (vece == MO_8) {
|
|
|
|
tcg_out_vmovi(s, rd, q, 0, 0xe, v64);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Test all bytes 0x00 or 0xff second. This can match cases that
|
|
|
|
* might otherwise take 2 or 3 insns for MO_16 or MO_32 below.
|
|
|
|
*/
|
|
|
|
for (i = imm8 = 0; i < 8; i++) {
|
|
|
|
uint8_t byte = v64 >> (i * 8);
|
|
|
|
if (byte == 0xff) {
|
|
|
|
imm8 |= 1 << i;
|
|
|
|
} else if (byte != 0) {
|
|
|
|
goto fail_bytes;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
tcg_out_vmovi(s, rd, q, 1, 0xe, imm8);
|
|
|
|
return;
|
|
|
|
fail_bytes:
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Tests for various replications. For each element width, if we
|
|
|
|
* cannot find an expansion there's no point checking a larger
|
|
|
|
* width because we already know by replication it cannot match.
|
|
|
|
*/
|
|
|
|
if (vece == MO_16) {
|
|
|
|
uint16_t v16 = v64;
|
|
|
|
|
|
|
|
if (is_shimm16(v16, &cmode, &imm8)) {
|
|
|
|
tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (is_shimm16(~v16, &cmode, &imm8)) {
|
|
|
|
tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Otherwise, all remaining constants can be loaded in two insns:
|
|
|
|
* rd = v16 & 0xff, rd |= v16 & 0xff00.
|
|
|
|
*/
|
|
|
|
tcg_out_vmovi(s, rd, q, 0, 0x8, v16 & 0xff);
|
|
|
|
tcg_out_vmovi(s, rd, q, 0, 0xb, v16 >> 8); /* VORRI */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vece == MO_32) {
|
|
|
|
uint32_t v32 = v64;
|
|
|
|
|
|
|
|
if (is_shimm32(v32, &cmode, &imm8) ||
|
|
|
|
is_soimm32(v32, &cmode, &imm8)) {
|
|
|
|
tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (is_shimm32(~v32, &cmode, &imm8) ||
|
|
|
|
is_soimm32(~v32, &cmode, &imm8)) {
|
|
|
|
tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Restrict the set of constants to those we can load with
|
|
|
|
* two instructions. Others we load from the pool.
|
|
|
|
*/
|
|
|
|
i = is_shimm32_pair(v32, &cmode, &imm8);
|
|
|
|
if (i) {
|
|
|
|
tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
|
|
|
|
tcg_out_vmovi(s, rd, q, 0, i | 1, extract32(v32, i * 4, 8));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
i = is_shimm32_pair(~v32, &cmode, &imm8);
|
|
|
|
if (i) {
|
|
|
|
tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
|
|
|
|
tcg_out_vmovi(s, rd, q, 1, i | 1, extract32(~v32, i * 4, 8));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* As a last resort, load from the constant pool.
|
|
|
|
*/
|
|
|
|
if (!q || vece == MO_64) {
|
|
|
|
new_pool_l2(s, R_ARM_PC11, s->code_ptr, 0, v64, v64 >> 32);
|
|
|
|
/* VLDR Dd, [pc + offset] */
|
|
|
|
tcg_out32(s, INSN_VLDR_D | encode_vd(rd) | (0xf << 16));
|
|
|
|
if (q) {
|
|
|
|
tcg_out_dup2_vec(s, rd, rd, rd);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
new_pool_label(s, (uint32_t)v64, R_ARM_PC8, s->code_ptr, 0);
|
|
|
|
/* add tmp, pc, offset */
|
|
|
|
tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, TCG_REG_PC, 0);
|
|
|
|
tcg_out_dupm_vec(s, type, MO_32, rd, TCG_REG_TMP, 0);
|
|
|
|
}
|
2021-05-04 01:47:52 +02:00
|
|
|
}
|
|
|
|
|
2020-09-06 00:54:33 +02:00
|
|
|
static const ARMInsn vec_cmp_insn[16] = {
|
|
|
|
[TCG_COND_EQ] = INSN_VCEQ,
|
|
|
|
[TCG_COND_GT] = INSN_VCGT,
|
|
|
|
[TCG_COND_GE] = INSN_VCGE,
|
|
|
|
[TCG_COND_GTU] = INSN_VCGT_U,
|
|
|
|
[TCG_COND_GEU] = INSN_VCGE_U,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const ARMInsn vec_cmp0_insn[16] = {
|
|
|
|
[TCG_COND_EQ] = INSN_VCEQ0,
|
|
|
|
[TCG_COND_GT] = INSN_VCGT0,
|
|
|
|
[TCG_COND_GE] = INSN_VCGE0,
|
|
|
|
[TCG_COND_LT] = INSN_VCLT0,
|
|
|
|
[TCG_COND_LE] = INSN_VCLE0,
|
|
|
|
};
|
|
|
|
|
2021-05-04 01:47:52 +02:00
|
|
|
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
|
|
|
|
unsigned vecl, unsigned vece,
|
2021-09-08 20:53:38 +02:00
|
|
|
const TCGArg args[TCG_MAX_OP_ARGS],
|
|
|
|
const int const_args[TCG_MAX_OP_ARGS])
|
2021-05-04 01:47:52 +02:00
|
|
|
{
|
2020-09-06 00:54:33 +02:00
|
|
|
TCGType type = vecl + TCG_TYPE_V64;
|
|
|
|
unsigned q = vecl;
|
2020-09-05 21:54:37 +02:00
|
|
|
TCGArg a0, a1, a2, a3;
|
2020-09-06 00:54:33 +02:00
|
|
|
int cmode, imm8;
|
|
|
|
|
|
|
|
a0 = args[0];
|
|
|
|
a1 = args[1];
|
|
|
|
a2 = args[2];
|
|
|
|
|
|
|
|
switch (opc) {
|
|
|
|
case INDEX_op_ld_vec:
|
|
|
|
tcg_out_ld(s, type, a0, a1, a2);
|
|
|
|
return;
|
|
|
|
case INDEX_op_st_vec:
|
|
|
|
tcg_out_st(s, type, a0, a1, a2);
|
|
|
|
return;
|
|
|
|
case INDEX_op_dupm_vec:
|
|
|
|
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
|
|
|
|
return;
|
|
|
|
case INDEX_op_dup2_vec:
|
|
|
|
tcg_out_dup2_vec(s, a0, a1, a2);
|
|
|
|
return;
|
2020-09-05 20:58:47 +02:00
|
|
|
case INDEX_op_abs_vec:
|
|
|
|
tcg_out_vreg2(s, INSN_VABS, q, vece, a0, a1);
|
|
|
|
return;
|
|
|
|
case INDEX_op_neg_vec:
|
|
|
|
tcg_out_vreg2(s, INSN_VNEG, q, vece, a0, a1);
|
|
|
|
return;
|
|
|
|
case INDEX_op_not_vec:
|
|
|
|
tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a1);
|
|
|
|
return;
|
2020-09-06 00:54:33 +02:00
|
|
|
case INDEX_op_add_vec:
|
|
|
|
tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2);
|
|
|
|
return;
|
2020-09-05 21:30:17 +02:00
|
|
|
case INDEX_op_mul_vec:
|
|
|
|
tcg_out_vreg3(s, INSN_VMUL, q, vece, a0, a1, a2);
|
|
|
|
return;
|
2020-09-05 21:44:06 +02:00
|
|
|
case INDEX_op_smax_vec:
|
|
|
|
tcg_out_vreg3(s, INSN_VMAX, q, vece, a0, a1, a2);
|
|
|
|
return;
|
|
|
|
case INDEX_op_smin_vec:
|
|
|
|
tcg_out_vreg3(s, INSN_VMIN, q, vece, a0, a1, a2);
|
|
|
|
return;
|
2020-09-06 00:54:33 +02:00
|
|
|
case INDEX_op_sub_vec:
|
|
|
|
tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2);
|
|
|
|
return;
|
2020-09-05 21:37:36 +02:00
|
|
|
case INDEX_op_ssadd_vec:
|
|
|
|
tcg_out_vreg3(s, INSN_VQADD, q, vece, a0, a1, a2);
|
|
|
|
return;
|
|
|
|
case INDEX_op_sssub_vec:
|
|
|
|
tcg_out_vreg3(s, INSN_VQSUB, q, vece, a0, a1, a2);
|
|
|
|
return;
|
2020-09-05 21:44:06 +02:00
|
|
|
case INDEX_op_umax_vec:
|
|
|
|
tcg_out_vreg3(s, INSN_VMAX_U, q, vece, a0, a1, a2);
|
|
|
|
return;
|
|
|
|
case INDEX_op_umin_vec:
|
|
|
|
tcg_out_vreg3(s, INSN_VMIN_U, q, vece, a0, a1, a2);
|
|
|
|
return;
|
2020-09-05 21:37:36 +02:00
|
|
|
case INDEX_op_usadd_vec:
|
|
|
|
tcg_out_vreg3(s, INSN_VQADD_U, q, vece, a0, a1, a2);
|
|
|
|
return;
|
|
|
|
case INDEX_op_ussub_vec:
|
|
|
|
tcg_out_vreg3(s, INSN_VQSUB_U, q, vece, a0, a1, a2);
|
|
|
|
return;
|
2020-09-06 00:54:33 +02:00
|
|
|
case INDEX_op_xor_vec:
|
|
|
|
tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2);
|
|
|
|
return;
|
2020-09-05 22:13:10 +02:00
|
|
|
case INDEX_op_arm_sshl_vec:
|
|
|
|
/*
|
|
|
|
* Note that Vm is the data and Vn is the shift count,
|
|
|
|
* therefore the arguments appear reversed.
|
|
|
|
*/
|
|
|
|
tcg_out_vreg3(s, INSN_VSHL_S, q, vece, a0, a2, a1);
|
|
|
|
return;
|
|
|
|
case INDEX_op_arm_ushl_vec:
|
|
|
|
/* See above. */
|
|
|
|
tcg_out_vreg3(s, INSN_VSHL_U, q, vece, a0, a2, a1);
|
|
|
|
return;
|
2020-09-05 21:24:28 +02:00
|
|
|
case INDEX_op_shli_vec:
|
|
|
|
tcg_out_vshifti(s, INSN_VSHLI, q, a0, a1, a2 + (8 << vece));
|
|
|
|
return;
|
|
|
|
case INDEX_op_shri_vec:
|
|
|
|
tcg_out_vshifti(s, INSN_VSHRI, q, a0, a1, (16 << vece) - a2);
|
|
|
|
return;
|
|
|
|
case INDEX_op_sari_vec:
|
|
|
|
tcg_out_vshifti(s, INSN_VSARI, q, a0, a1, (16 << vece) - a2);
|
|
|
|
return;
|
2020-09-05 22:26:48 +02:00
|
|
|
case INDEX_op_arm_sli_vec:
|
|
|
|
tcg_out_vshifti(s, INSN_VSLI, q, a0, a2, args[3] + (8 << vece));
|
|
|
|
return;
|
2020-09-06 00:54:33 +02:00
|
|
|
|
2020-09-05 20:58:47 +02:00
|
|
|
case INDEX_op_andc_vec:
|
|
|
|
if (!const_args[2]) {
|
|
|
|
tcg_out_vreg3(s, INSN_VBIC, q, 0, a0, a1, a2);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
a2 = ~a2;
|
|
|
|
/* fall through */
|
2020-09-06 00:54:33 +02:00
|
|
|
case INDEX_op_and_vec:
|
|
|
|
if (const_args[2]) {
|
|
|
|
is_shimm1632(~a2, &cmode, &imm8);
|
|
|
|
if (a0 == a1) {
|
|
|
|
tcg_out_vmovi(s, a0, q, 1, cmode | 1, imm8); /* VBICI */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
tcg_out_vmovi(s, a0, q, 1, cmode, imm8); /* VMVNI */
|
|
|
|
a2 = a0;
|
|
|
|
}
|
|
|
|
tcg_out_vreg3(s, INSN_VAND, q, 0, a0, a1, a2);
|
|
|
|
return;
|
|
|
|
|
2020-09-05 20:58:47 +02:00
|
|
|
case INDEX_op_orc_vec:
|
|
|
|
if (!const_args[2]) {
|
|
|
|
tcg_out_vreg3(s, INSN_VORN, q, 0, a0, a1, a2);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
a2 = ~a2;
|
|
|
|
/* fall through */
|
2020-09-06 00:54:33 +02:00
|
|
|
case INDEX_op_or_vec:
|
|
|
|
if (const_args[2]) {
|
|
|
|
is_shimm1632(a2, &cmode, &imm8);
|
|
|
|
if (a0 == a1) {
|
|
|
|
tcg_out_vmovi(s, a0, q, 0, cmode | 1, imm8); /* VORRI */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
tcg_out_vmovi(s, a0, q, 0, cmode, imm8); /* VMOVI */
|
|
|
|
a2 = a0;
|
|
|
|
}
|
|
|
|
tcg_out_vreg3(s, INSN_VORR, q, 0, a0, a1, a2);
|
|
|
|
return;
|
|
|
|
|
|
|
|
case INDEX_op_cmp_vec:
|
|
|
|
{
|
|
|
|
TCGCond cond = args[3];
|
|
|
|
|
|
|
|
if (cond == TCG_COND_NE) {
|
|
|
|
if (const_args[2]) {
|
|
|
|
tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a1);
|
|
|
|
} else {
|
|
|
|
tcg_out_vreg3(s, INSN_VCEQ, q, vece, a0, a1, a2);
|
|
|
|
tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ARMInsn insn;
|
|
|
|
|
|
|
|
if (const_args[2]) {
|
|
|
|
insn = vec_cmp0_insn[cond];
|
|
|
|
if (insn) {
|
|
|
|
tcg_out_vreg2(s, insn, q, vece, a0, a1);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0);
|
|
|
|
a2 = TCG_VEC_TMP;
|
|
|
|
}
|
|
|
|
insn = vec_cmp_insn[cond];
|
|
|
|
if (insn == 0) {
|
|
|
|
TCGArg t;
|
|
|
|
t = a1, a1 = a2, a2 = t;
|
|
|
|
cond = tcg_swap_cond(cond);
|
|
|
|
insn = vec_cmp_insn[cond];
|
|
|
|
tcg_debug_assert(insn != 0);
|
|
|
|
}
|
|
|
|
tcg_out_vreg3(s, insn, q, vece, a0, a1, a2);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
|
2020-09-05 21:54:37 +02:00
|
|
|
case INDEX_op_bitsel_vec:
|
|
|
|
a3 = args[3];
|
|
|
|
if (a0 == a3) {
|
|
|
|
tcg_out_vreg3(s, INSN_VBIT, q, 0, a0, a2, a1);
|
|
|
|
} else if (a0 == a2) {
|
|
|
|
tcg_out_vreg3(s, INSN_VBIF, q, 0, a0, a3, a1);
|
|
|
|
} else {
|
|
|
|
tcg_out_mov(s, type, a0, a1);
|
|
|
|
tcg_out_vreg3(s, INSN_VBSL, q, 0, a0, a2, a3);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
|
2020-09-06 00:54:33 +02:00
|
|
|
case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
|
|
|
|
case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
2021-05-04 01:47:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
|
|
|
|
{
|
2020-09-06 00:54:33 +02:00
|
|
|
switch (opc) {
|
|
|
|
case INDEX_op_add_vec:
|
|
|
|
case INDEX_op_sub_vec:
|
|
|
|
case INDEX_op_and_vec:
|
2020-09-05 20:58:47 +02:00
|
|
|
case INDEX_op_andc_vec:
|
2020-09-06 00:54:33 +02:00
|
|
|
case INDEX_op_or_vec:
|
2020-09-05 20:58:47 +02:00
|
|
|
case INDEX_op_orc_vec:
|
2020-09-06 00:54:33 +02:00
|
|
|
case INDEX_op_xor_vec:
|
2020-09-05 20:58:47 +02:00
|
|
|
case INDEX_op_not_vec:
|
2020-09-05 21:24:28 +02:00
|
|
|
case INDEX_op_shli_vec:
|
|
|
|
case INDEX_op_shri_vec:
|
|
|
|
case INDEX_op_sari_vec:
|
2020-09-05 21:37:36 +02:00
|
|
|
case INDEX_op_ssadd_vec:
|
|
|
|
case INDEX_op_sssub_vec:
|
|
|
|
case INDEX_op_usadd_vec:
|
|
|
|
case INDEX_op_ussub_vec:
|
2020-09-05 21:54:37 +02:00
|
|
|
case INDEX_op_bitsel_vec:
|
2020-09-06 00:54:33 +02:00
|
|
|
return 1;
|
2020-09-05 20:58:47 +02:00
|
|
|
case INDEX_op_abs_vec:
|
2020-09-06 00:54:33 +02:00
|
|
|
case INDEX_op_cmp_vec:
|
2020-09-05 21:30:17 +02:00
|
|
|
case INDEX_op_mul_vec:
|
2020-09-05 20:58:47 +02:00
|
|
|
case INDEX_op_neg_vec:
|
2020-09-05 21:44:06 +02:00
|
|
|
case INDEX_op_smax_vec:
|
|
|
|
case INDEX_op_smin_vec:
|
|
|
|
case INDEX_op_umax_vec:
|
|
|
|
case INDEX_op_umin_vec:
|
2020-09-06 00:54:33 +02:00
|
|
|
return vece < MO_64;
|
2020-09-05 22:13:10 +02:00
|
|
|
case INDEX_op_shlv_vec:
|
|
|
|
case INDEX_op_shrv_vec:
|
|
|
|
case INDEX_op_sarv_vec:
|
2020-09-05 22:26:48 +02:00
|
|
|
case INDEX_op_rotli_vec:
|
2020-09-05 23:20:57 +02:00
|
|
|
case INDEX_op_rotlv_vec:
|
|
|
|
case INDEX_op_rotrv_vec:
|
2020-09-05 22:13:10 +02:00
|
|
|
return -1;
|
2020-09-06 00:54:33 +02:00
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
2021-05-04 01:47:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
|
|
|
|
TCGArg a0, ...)
|
|
|
|
{
|
2020-09-05 22:13:10 +02:00
|
|
|
va_list va;
|
2020-09-05 23:20:57 +02:00
|
|
|
TCGv_vec v0, v1, v2, t1, t2, c1;
|
2020-09-05 22:13:10 +02:00
|
|
|
TCGArg a2;
|
|
|
|
|
|
|
|
va_start(va, a0);
|
|
|
|
v0 = temp_tcgv_vec(arg_temp(a0));
|
|
|
|
v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
|
|
|
|
a2 = va_arg(va, TCGArg);
|
|
|
|
va_end(va);
|
|
|
|
|
|
|
|
switch (opc) {
|
|
|
|
case INDEX_op_shlv_vec:
|
|
|
|
/*
|
|
|
|
* Merely propagate shlv_vec to arm_ushl_vec.
|
|
|
|
* In this way we don't set TCG_TARGET_HAS_shv_vec
|
|
|
|
* because everything is done via expansion.
|
|
|
|
*/
|
|
|
|
v2 = temp_tcgv_vec(arg_temp(a2));
|
|
|
|
vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
|
|
|
|
tcgv_vec_arg(v1), tcgv_vec_arg(v2));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case INDEX_op_shrv_vec:
|
|
|
|
case INDEX_op_sarv_vec:
|
|
|
|
/* Right shifts are negative left shifts for NEON. */
|
|
|
|
v2 = temp_tcgv_vec(arg_temp(a2));
|
|
|
|
t1 = tcg_temp_new_vec(type);
|
|
|
|
tcg_gen_neg_vec(vece, t1, v2);
|
|
|
|
if (opc == INDEX_op_shrv_vec) {
|
|
|
|
opc = INDEX_op_arm_ushl_vec;
|
|
|
|
} else {
|
|
|
|
opc = INDEX_op_arm_sshl_vec;
|
|
|
|
}
|
|
|
|
vec_gen_3(opc, type, vece, tcgv_vec_arg(v0),
|
|
|
|
tcgv_vec_arg(v1), tcgv_vec_arg(t1));
|
|
|
|
tcg_temp_free_vec(t1);
|
|
|
|
break;
|
|
|
|
|
2020-09-05 22:26:48 +02:00
|
|
|
case INDEX_op_rotli_vec:
|
|
|
|
t1 = tcg_temp_new_vec(type);
|
|
|
|
tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1));
|
|
|
|
vec_gen_4(INDEX_op_arm_sli_vec, type, vece,
|
|
|
|
tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2);
|
|
|
|
tcg_temp_free_vec(t1);
|
|
|
|
break;
|
|
|
|
|
2020-09-05 23:20:57 +02:00
|
|
|
case INDEX_op_rotlv_vec:
|
|
|
|
v2 = temp_tcgv_vec(arg_temp(a2));
|
|
|
|
t1 = tcg_temp_new_vec(type);
|
|
|
|
c1 = tcg_constant_vec(type, vece, 8 << vece);
|
|
|
|
tcg_gen_sub_vec(vece, t1, v2, c1);
|
|
|
|
/* Right shifts are negative left shifts for NEON. */
|
|
|
|
vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
|
|
|
|
tcgv_vec_arg(v1), tcgv_vec_arg(t1));
|
|
|
|
vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
|
|
|
|
tcgv_vec_arg(v1), tcgv_vec_arg(v2));
|
|
|
|
tcg_gen_or_vec(vece, v0, v0, t1);
|
|
|
|
tcg_temp_free_vec(t1);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case INDEX_op_rotrv_vec:
|
|
|
|
v2 = temp_tcgv_vec(arg_temp(a2));
|
|
|
|
t1 = tcg_temp_new_vec(type);
|
|
|
|
t2 = tcg_temp_new_vec(type);
|
|
|
|
c1 = tcg_constant_vec(type, vece, 8 << vece);
|
|
|
|
tcg_gen_neg_vec(vece, t1, v2);
|
|
|
|
tcg_gen_sub_vec(vece, t2, c1, v2);
|
|
|
|
/* Right shifts are negative left shifts for NEON. */
|
|
|
|
vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
|
|
|
|
tcgv_vec_arg(v1), tcgv_vec_arg(t1));
|
|
|
|
vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t2),
|
|
|
|
tcgv_vec_arg(v1), tcgv_vec_arg(t2));
|
|
|
|
tcg_gen_or_vec(vece, v0, t1, t2);
|
|
|
|
tcg_temp_free_vec(t1);
|
|
|
|
tcg_temp_free_vec(t2);
|
|
|
|
break;
|
|
|
|
|
2020-09-05 22:13:10 +02:00
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
2021-05-04 01:47:52 +02:00
|
|
|
}
|
|
|
|
|
2017-07-28 05:47:56 +02:00
|
|
|
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < count; ++i) {
|
|
|
|
p[i] = INSN_NOP;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-05 16:55:33 +02:00
|
|
|
/* Compute frame size via macros, to share between tcg_target_qemu_prologue
|
|
|
|
and tcg_register_jit. */
|
|
|
|
|
|
|
|
#define PUSH_SIZE ((11 - 4 + 1 + 1) * sizeof(tcg_target_long))
|
|
|
|
|
|
|
|
#define FRAME_SIZE \
|
|
|
|
((PUSH_SIZE \
|
|
|
|
+ TCG_STATIC_CALL_ARGS_SIZE \
|
|
|
|
+ CPU_TEMP_BUF_NLONGS * sizeof(long) \
|
|
|
|
+ TCG_TARGET_STACK_ALIGN - 1) \
|
|
|
|
& -TCG_TARGET_STACK_ALIGN)
|
|
|
|
|
2020-02-16 06:40:01 +01:00
|
|
|
#define STACK_ADDEND (FRAME_SIZE - PUSH_SIZE)
|
|
|
|
|
2010-06-03 02:26:56 +02:00
|
|
|
static void tcg_target_qemu_prologue(TCGContext *s)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2013-03-13 01:11:40 +01:00
|
|
|
/* Calling convention requires us to save r4-r11 and lr. */
|
|
|
|
/* stmdb sp!, { r4 - r11, lr } */
|
2021-08-08 20:29:18 +02:00
|
|
|
tcg_out_ldstm(s, COND_AL, INSN_STMDB, TCG_REG_CALL_STACK,
|
|
|
|
(1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
|
|
|
|
(1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
|
|
|
|
(1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_R14));
|
2011-05-15 18:03:25 +02:00
|
|
|
|
2013-06-05 16:55:33 +02:00
|
|
|
/* Reserve callee argument and tcg temp space. */
|
2013-03-13 01:11:40 +01:00
|
|
|
tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
|
2020-02-16 06:40:01 +01:00
|
|
|
TCG_REG_CALL_STACK, STACK_ADDEND, 1);
|
2013-03-13 01:11:40 +01:00
|
|
|
tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
|
|
|
|
CPU_TEMP_BUF_NLONGS * sizeof(long));
|
2010-03-05 08:35:07 +01:00
|
|
|
|
2011-05-15 18:03:25 +02:00
|
|
|
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2021-08-10 05:44:50 +02:00
|
|
|
#ifndef CONFIG_SOFTMMU
|
|
|
|
if (guest_base) {
|
|
|
|
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
|
|
|
|
tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-08-13 01:00:10 +02:00
|
|
|
tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]);
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2017-04-28 09:49:45 +02:00
|
|
|
/*
|
|
|
|
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
|
|
|
|
* and fall through to the rest of the epilogue.
|
|
|
|
*/
|
2020-11-06 00:41:38 +01:00
|
|
|
tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
|
2017-04-28 09:49:45 +02:00
|
|
|
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0);
|
2020-02-16 06:40:01 +01:00
|
|
|
tcg_out_epilogue(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_out_epilogue(TCGContext *s)
|
|
|
|
{
|
|
|
|
/* Release local stack frame. */
|
2013-03-13 01:11:40 +01:00
|
|
|
tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
|
2020-02-16 06:40:01 +01:00
|
|
|
TCG_REG_CALL_STACK, STACK_ADDEND, 1);
|
2013-03-13 01:11:40 +01:00
|
|
|
|
|
|
|
/* ldmia sp!, { r4 - r11, pc } */
|
2021-08-08 20:29:18 +02:00
|
|
|
tcg_out_ldstm(s, COND_AL, INSN_LDMIA, TCG_REG_CALL_STACK,
|
|
|
|
(1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
|
|
|
|
(1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
|
|
|
|
(1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_PC));
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
2013-06-05 16:55:33 +02:00
|
|
|
|
|
|
|
typedef struct {
|
2014-05-15 21:49:30 +02:00
|
|
|
DebugFrameHeader h;
|
2013-06-05 16:55:33 +02:00
|
|
|
uint8_t fde_def_cfa[4];
|
|
|
|
uint8_t fde_reg_ofs[18];
|
|
|
|
} DebugFrame;
|
|
|
|
|
|
|
|
#define ELF_HOST_MACHINE EM_ARM
|
|
|
|
|
|
|
|
/* We're expecting a 2 byte uleb128 encoded value. */
|
|
|
|
QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
|
|
|
|
|
2014-05-15 21:49:30 +02:00
|
|
|
static const DebugFrame debug_frame = {
|
|
|
|
.h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
|
|
|
|
.h.cie.id = -1,
|
|
|
|
.h.cie.version = 1,
|
|
|
|
.h.cie.code_align = 1,
|
|
|
|
.h.cie.data_align = 0x7c, /* sleb128 -4 */
|
|
|
|
.h.cie.return_column = 14,
|
2013-06-05 16:55:33 +02:00
|
|
|
|
|
|
|
/* Total FDE size does not include the "len" member. */
|
2014-05-15 21:49:30 +02:00
|
|
|
.h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
|
2013-06-05 16:55:33 +02:00
|
|
|
|
|
|
|
.fde_def_cfa = {
|
|
|
|
12, 13, /* DW_CFA_def_cfa sp, ... */
|
|
|
|
(FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
|
|
|
|
(FRAME_SIZE >> 7)
|
|
|
|
},
|
|
|
|
.fde_reg_ofs = {
|
|
|
|
/* The following must match the stmdb in the prologue. */
|
|
|
|
0x8e, 1, /* DW_CFA_offset, lr, -4 */
|
|
|
|
0x8b, 2, /* DW_CFA_offset, r11, -8 */
|
|
|
|
0x8a, 3, /* DW_CFA_offset, r10, -12 */
|
|
|
|
0x89, 4, /* DW_CFA_offset, r9, -16 */
|
|
|
|
0x88, 5, /* DW_CFA_offset, r8, -20 */
|
|
|
|
0x87, 6, /* DW_CFA_offset, r7, -24 */
|
|
|
|
0x86, 7, /* DW_CFA_offset, r6, -28 */
|
|
|
|
0x85, 8, /* DW_CFA_offset, r5, -32 */
|
|
|
|
0x84, 9, /* DW_CFA_offset, r4, -36 */
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-10-29 17:17:30 +01:00
|
|
|
void tcg_register_jit(const void *buf, size_t buf_size)
|
2013-06-05 16:55:33 +02:00
|
|
|
{
|
|
|
|
tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
|
|
|
|
}
|