2008-05-20 01:59:38 +02:00
|
|
|
/*
|
|
|
|
* Tiny Code Generator for QEMU
|
|
|
|
*
|
|
|
|
* Copyright (c) 2008 Andrzej Zaborowski
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
2008-10-05 11:59:14 +02:00
|
|
|
|
2013-06-07 16:26:20 +02:00
|
|
|
#include "elf.h"
|
2020-01-01 12:23:01 +01:00
|
|
|
#include "../tcg-pool.inc.c"
|
2013-10-03 21:51:24 +02:00
|
|
|
|
2016-10-15 02:45:26 +02:00
|
|
|
int arm_arch = __ARM_ARCH;
|
2010-04-09 20:52:48 +02:00
|
|
|
|
2013-05-02 13:18:38 +02:00
|
|
|
#ifndef use_idiv_instructions
|
|
|
|
bool use_idiv_instructions;
|
|
|
|
#endif
|
|
|
|
|
2014-03-25 22:11:37 +01:00
|
|
|
/* ??? Ought to think about changing CONFIG_SOFTMMU to always defined. */
|
|
|
|
#ifdef CONFIG_SOFTMMU
|
|
|
|
# define USING_SOFTMMU 1
|
|
|
|
#else
|
|
|
|
# define USING_SOFTMMU 0
|
|
|
|
#endif
|
|
|
|
|
2016-04-21 10:48:50 +02:00
|
|
|
#ifdef CONFIG_DEBUG_TCG
|
2008-10-05 11:59:14 +02:00
|
|
|
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
|
2008-05-20 01:59:38 +02:00
|
|
|
"%r0",
|
|
|
|
"%r1",
|
|
|
|
"%r2",
|
|
|
|
"%r3",
|
|
|
|
"%r4",
|
|
|
|
"%r5",
|
|
|
|
"%r6",
|
|
|
|
"%r7",
|
|
|
|
"%r8",
|
|
|
|
"%r9",
|
|
|
|
"%r10",
|
|
|
|
"%r11",
|
|
|
|
"%r12",
|
|
|
|
"%r13",
|
|
|
|
"%r14",
|
2010-04-09 20:52:48 +02:00
|
|
|
"%pc",
|
2008-05-20 01:59:38 +02:00
|
|
|
};
|
2008-10-05 11:59:14 +02:00
|
|
|
#endif
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2008-10-05 11:59:14 +02:00
|
|
|
static const int tcg_target_reg_alloc_order[] = {
|
2008-05-20 01:59:38 +02:00
|
|
|
TCG_REG_R4,
|
|
|
|
TCG_REG_R5,
|
|
|
|
TCG_REG_R6,
|
|
|
|
TCG_REG_R7,
|
|
|
|
TCG_REG_R8,
|
|
|
|
TCG_REG_R9,
|
|
|
|
TCG_REG_R10,
|
|
|
|
TCG_REG_R11,
|
|
|
|
TCG_REG_R13,
|
2010-04-09 20:52:48 +02:00
|
|
|
TCG_REG_R0,
|
|
|
|
TCG_REG_R1,
|
|
|
|
TCG_REG_R2,
|
|
|
|
TCG_REG_R3,
|
|
|
|
TCG_REG_R12,
|
2008-05-20 01:59:38 +02:00
|
|
|
TCG_REG_R14,
|
|
|
|
};
|
|
|
|
|
2008-10-05 11:59:14 +02:00
|
|
|
static const int tcg_target_call_iarg_regs[4] = {
|
2008-05-20 01:59:38 +02:00
|
|
|
TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
|
|
|
|
};
|
2008-10-05 11:59:14 +02:00
|
|
|
static const int tcg_target_call_oarg_regs[2] = {
|
2008-05-20 01:59:38 +02:00
|
|
|
TCG_REG_R0, TCG_REG_R1
|
|
|
|
};
|
|
|
|
|
2013-03-12 17:50:25 +01:00
|
|
|
#define TCG_REG_TMP TCG_REG_R12
|
2013-03-12 17:49:04 +01:00
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
enum arm_cond_code_e {
|
|
|
|
COND_EQ = 0x0,
|
|
|
|
COND_NE = 0x1,
|
|
|
|
COND_CS = 0x2, /* Unsigned greater or equal */
|
|
|
|
COND_CC = 0x3, /* Unsigned less than */
|
|
|
|
COND_MI = 0x4, /* Negative */
|
|
|
|
COND_PL = 0x5, /* Zero or greater */
|
|
|
|
COND_VS = 0x6, /* Overflow */
|
|
|
|
COND_VC = 0x7, /* No overflow */
|
|
|
|
COND_HI = 0x8, /* Unsigned greater than */
|
|
|
|
COND_LS = 0x9, /* Unsigned less or equal */
|
|
|
|
COND_GE = 0xa,
|
|
|
|
COND_LT = 0xb,
|
|
|
|
COND_GT = 0xc,
|
|
|
|
COND_LE = 0xd,
|
|
|
|
COND_AL = 0xe,
|
|
|
|
};
|
|
|
|
|
|
|
|
#define TO_CPSR (1 << 20)
|
|
|
|
|
|
|
|
#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
|
|
|
|
#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
|
|
|
|
#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
|
|
|
|
#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
|
|
|
|
#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
|
|
|
|
#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
|
|
|
|
#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
|
|
|
|
#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
|
|
|
|
|
|
|
|
typedef enum {
|
|
|
|
ARITH_AND = 0x0 << 21,
|
|
|
|
ARITH_EOR = 0x1 << 21,
|
|
|
|
ARITH_SUB = 0x2 << 21,
|
|
|
|
ARITH_RSB = 0x3 << 21,
|
|
|
|
ARITH_ADD = 0x4 << 21,
|
|
|
|
ARITH_ADC = 0x5 << 21,
|
|
|
|
ARITH_SBC = 0x6 << 21,
|
|
|
|
ARITH_RSC = 0x7 << 21,
|
|
|
|
ARITH_TST = 0x8 << 21 | TO_CPSR,
|
|
|
|
ARITH_CMP = 0xa << 21 | TO_CPSR,
|
|
|
|
ARITH_CMN = 0xb << 21 | TO_CPSR,
|
|
|
|
ARITH_ORR = 0xc << 21,
|
|
|
|
ARITH_MOV = 0xd << 21,
|
|
|
|
ARITH_BIC = 0xe << 21,
|
|
|
|
ARITH_MVN = 0xf << 21,
|
|
|
|
|
|
|
|
INSN_CLZ = 0x016f0f10,
|
|
|
|
INSN_RBIT = 0x06ff0f30,
|
|
|
|
|
|
|
|
INSN_LDR_IMM = 0x04100000,
|
|
|
|
INSN_LDR_REG = 0x06100000,
|
|
|
|
INSN_STR_IMM = 0x04000000,
|
|
|
|
INSN_STR_REG = 0x06000000,
|
|
|
|
|
|
|
|
INSN_LDRH_IMM = 0x005000b0,
|
|
|
|
INSN_LDRH_REG = 0x001000b0,
|
|
|
|
INSN_LDRSH_IMM = 0x005000f0,
|
|
|
|
INSN_LDRSH_REG = 0x001000f0,
|
|
|
|
INSN_STRH_IMM = 0x004000b0,
|
|
|
|
INSN_STRH_REG = 0x000000b0,
|
|
|
|
|
|
|
|
INSN_LDRB_IMM = 0x04500000,
|
|
|
|
INSN_LDRB_REG = 0x06500000,
|
|
|
|
INSN_LDRSB_IMM = 0x005000d0,
|
|
|
|
INSN_LDRSB_REG = 0x001000d0,
|
|
|
|
INSN_STRB_IMM = 0x04400000,
|
|
|
|
INSN_STRB_REG = 0x06400000,
|
|
|
|
|
|
|
|
INSN_LDRD_IMM = 0x004000d0,
|
|
|
|
INSN_LDRD_REG = 0x000000d0,
|
|
|
|
INSN_STRD_IMM = 0x004000f0,
|
|
|
|
INSN_STRD_REG = 0x000000f0,
|
|
|
|
|
2018-04-18 00:06:23 +02:00
|
|
|
INSN_DMB_ISH = 0xf57ff05b,
|
|
|
|
INSN_DMB_MCR = 0xee070fba,
|
2017-07-28 05:45:38 +02:00
|
|
|
|
|
|
|
/* Architected nop introduced in v6k. */
|
|
|
|
/* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this
|
|
|
|
also Just So Happened to do nothing on pre-v6k so that we
|
|
|
|
don't need to conditionalize it? */
|
|
|
|
INSN_NOP_v6k = 0xe320f000,
|
|
|
|
/* Otherwise the assembler uses mov r0,r0 */
|
|
|
|
INSN_NOP_v4 = (COND_AL << 28) | ARITH_MOV,
|
2017-07-28 05:43:30 +02:00
|
|
|
} ARMInsn;
|
|
|
|
|
2017-07-28 05:45:38 +02:00
|
|
|
#define INSN_NOP (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4)
|
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
static const uint8_t tcg_cond_to_arm_cond[] = {
|
|
|
|
[TCG_COND_EQ] = COND_EQ,
|
|
|
|
[TCG_COND_NE] = COND_NE,
|
|
|
|
[TCG_COND_LT] = COND_LT,
|
|
|
|
[TCG_COND_GE] = COND_GE,
|
|
|
|
[TCG_COND_LE] = COND_LE,
|
|
|
|
[TCG_COND_GT] = COND_GT,
|
|
|
|
/* unsigned */
|
|
|
|
[TCG_COND_LTU] = COND_CC,
|
|
|
|
[TCG_COND_GEU] = COND_CS,
|
|
|
|
[TCG_COND_LEU] = COND_LS,
|
|
|
|
[TCG_COND_GTU] = COND_HI,
|
|
|
|
};
|
|
|
|
|
2018-11-30 22:01:57 +01:00
|
|
|
static inline bool reloc_pc24(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
|
tcg/arm: fix branch target change during code retranslation
QEMU uses code retranslation to restore the CPU state when an exception
happens. For it to work the retranslation must not modify the generated
code. This is what is currently implemented in ARM TCG.
However on CPU that don't have icache/dcache/memory synchronised like
ARM, this requirement is stronger and code retranslation must not modify
the generated code "atomically", as the cache line might be flushed
at any moment (interrupt, exception, task switching), even if not
triggered by QEMU. The probability for this to happen is very low, and
depends on cache size and associativiy, machine load, interrupts, so the
symptoms are might happen randomly.
This requirement is currently not followed in tcg/arm, for the
load/store code, which basically has the following structure:
1) tlb access code is written
2) conditional fast path code is written
3) branch is written with a temporary target
4) slow path code is written
5) branch target is updated
The cache lines corresponding to the retranslated code is not flushed
after code retranslation as the generated code is supposed to be the
same. However if the cache line corresponding to the branch instruction
is flushed between step 3 and 5, and is not flushed again before the
code is executed again, the branch target is wrong. In the guest, the
symptoms are MMU page fault at a random addresses, which leads to
kernel page fault or segmentation faults.
The patch fixes this issue by avoiding writing the branch target until
it is known, that is by writing only the branch instruction first, and
later only the offset.
This fixes booting linux guests on ARM hosts (tested: arm, i386, mips,
mipsel, sh4, sparc).
Acked-by: Edgar E. Iglesias <edgar.iglesias@gmail.com>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
2011-01-06 22:43:13 +01:00
|
|
|
{
|
2014-04-24 23:23:40 +02:00
|
|
|
ptrdiff_t offset = (tcg_ptr_byte_diff(target, code_ptr) - 8) >> 2;
|
2018-11-30 22:01:57 +01:00
|
|
|
if (offset == sextract32(offset, 0, 24)) {
|
|
|
|
*code_ptr = (*code_ptr & ~0xffffff) | (offset & 0xffffff);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
tcg/arm: fix branch target change during code retranslation
QEMU uses code retranslation to restore the CPU state when an exception
happens. For it to work the retranslation must not modify the generated
code. This is what is currently implemented in ARM TCG.
However on CPU that don't have icache/dcache/memory synchronised like
ARM, this requirement is stronger and code retranslation must not modify
the generated code "atomically", as the cache line might be flushed
at any moment (interrupt, exception, task switching), even if not
triggered by QEMU. The probability for this to happen is very low, and
depends on cache size and associativiy, machine load, interrupts, so the
symptoms are might happen randomly.
This requirement is currently not followed in tcg/arm, for the
load/store code, which basically has the following structure:
1) tlb access code is written
2) conditional fast path code is written
3) branch is written with a temporary target
4) slow path code is written
5) branch target is updated
The cache lines corresponding to the retranslated code is not flushed
after code retranslation as the generated code is supposed to be the
same. However if the cache line corresponding to the branch instruction
is flushed between step 3 and 5, and is not flushed again before the
code is executed again, the branch target is wrong. In the guest, the
symptoms are MMU page fault at a random addresses, which leads to
kernel page fault or segmentation faults.
The patch fixes this issue by avoiding writing the branch target until
it is known, that is by writing only the branch instruction first, and
later only the offset.
This fixes booting linux guests on ARM hosts (tested: arm, i386, mips,
mipsel, sh4, sparc).
Acked-by: Edgar E. Iglesias <edgar.iglesias@gmail.com>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
2011-01-06 22:43:13 +01:00
|
|
|
}
|
|
|
|
|
2019-04-25 19:39:39 +02:00
|
|
|
static inline bool reloc_pc13(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
|
|
|
|
{
|
|
|
|
ptrdiff_t offset = tcg_ptr_byte_diff(target, code_ptr) - 8;
|
|
|
|
|
|
|
|
if (offset >= -0xfff && offset <= 0xfff) {
|
|
|
|
tcg_insn_unit insn = *code_ptr;
|
|
|
|
bool u = (offset >= 0);
|
|
|
|
if (!u) {
|
|
|
|
offset = -offset;
|
|
|
|
}
|
|
|
|
insn = deposit32(insn, 23, 1, u);
|
|
|
|
insn = deposit32(insn, 0, 12, offset);
|
|
|
|
*code_ptr = insn;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-11-30 20:52:48 +01:00
|
|
|
static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
|
2013-08-21 00:30:10 +02:00
|
|
|
intptr_t value, intptr_t addend)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2016-04-21 10:48:49 +02:00
|
|
|
tcg_debug_assert(addend == 0);
|
2017-07-28 05:47:56 +02:00
|
|
|
|
|
|
|
if (type == R_ARM_PC24) {
|
2018-11-30 22:01:57 +01:00
|
|
|
return reloc_pc24(code_ptr, (tcg_insn_unit *)value);
|
2017-07-28 05:47:56 +02:00
|
|
|
} else if (type == R_ARM_PC13) {
|
2019-04-25 19:39:39 +02:00
|
|
|
return reloc_pc13(code_ptr, (tcg_insn_unit *)value);
|
2017-07-28 05:47:56 +02:00
|
|
|
} else {
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2013-03-05 06:12:30 +01:00
|
|
|
#define TCG_CT_CONST_ARM 0x100
|
|
|
|
#define TCG_CT_CONST_INV 0x200
|
|
|
|
#define TCG_CT_CONST_NEG 0x400
|
|
|
|
#define TCG_CT_CONST_ZERO 0x800
|
2013-03-05 06:36:45 +01:00
|
|
|
|
2008-05-20 01:59:38 +02:00
|
|
|
/* parse target specific constraints */
|
2016-11-18 11:50:59 +01:00
|
|
|
static const char *target_parse_constraint(TCGArgConstraint *ct,
|
|
|
|
const char *ct_str, TCGType type)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2016-11-18 11:50:59 +01:00
|
|
|
switch (*ct_str++) {
|
2009-07-18 14:20:30 +02:00
|
|
|
case 'I':
|
2013-03-05 06:36:45 +01:00
|
|
|
ct->ct |= TCG_CT_CONST_ARM;
|
|
|
|
break;
|
|
|
|
case 'K':
|
|
|
|
ct->ct |= TCG_CT_CONST_INV;
|
|
|
|
break;
|
2013-03-05 07:06:21 +01:00
|
|
|
case 'N': /* The gcc constraint letter is L, already used here. */
|
|
|
|
ct->ct |= TCG_CT_CONST_NEG;
|
|
|
|
break;
|
2013-03-05 06:12:30 +01:00
|
|
|
case 'Z':
|
|
|
|
ct->ct |= TCG_CT_CONST_ZERO;
|
|
|
|
break;
|
2009-07-18 14:20:30 +02:00
|
|
|
|
2008-05-20 01:59:38 +02:00
|
|
|
case 'r':
|
|
|
|
ct->ct |= TCG_CT_REG;
|
2017-09-11 21:44:30 +02:00
|
|
|
ct->u.regs = 0xffff;
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
|
|
|
|
2010-04-09 20:52:48 +02:00
|
|
|
/* qemu_ld address */
|
|
|
|
case 'l':
|
2008-05-20 01:59:38 +02:00
|
|
|
ct->ct |= TCG_CT_REG;
|
2017-09-11 21:44:30 +02:00
|
|
|
ct->u.regs = 0xffff;
|
2010-04-09 20:52:48 +02:00
|
|
|
#ifdef CONFIG_SOFTMMU
|
2013-07-28 02:09:47 +02:00
|
|
|
/* r0-r2,lr will be overwritten when reading the tlb entry,
|
2010-04-09 20:52:48 +02:00
|
|
|
so don't use these. */
|
2008-05-20 01:59:38 +02:00
|
|
|
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
|
|
|
|
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
|
2012-08-26 15:40:02 +02:00
|
|
|
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
|
tcg/arm: Use LDRD to load tlb mask+table
This changes the code generation for the tlb from e.g.
ldr ip, [r6, #-0x10]
ldr r2, [r6, #-0xc]
and ip, ip, r4, lsr #8
ldrd r0, r1, [r2, ip]!
ldr r2, [r2, #0x18]
to
ldrd r0, r1, [r6, #-0x10]
and r0, r0, r4, lsr #8
ldrd r2, r3, [r1, r0]!
ldr r1, [r1, #0x18]
for armv7 hosts. Rearranging the register allocation in
order to avoid overlap between the two ldrd pairs causes
the patch to be larger than it ordinarily would be.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2019-03-23 08:46:23 +01:00
|
|
|
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
|
2013-07-28 02:09:47 +02:00
|
|
|
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R14);
|
2010-04-09 20:52:48 +02:00
|
|
|
#endif
|
2008-05-25 01:12:19 +02:00
|
|
|
break;
|
|
|
|
|
2013-09-12 18:07:38 +02:00
|
|
|
/* qemu_st address & data */
|
2010-04-09 20:52:48 +02:00
|
|
|
case 's':
|
2008-05-20 01:59:38 +02:00
|
|
|
ct->ct |= TCG_CT_REG;
|
2017-09-11 21:44:30 +02:00
|
|
|
ct->u.regs = 0xffff;
|
2013-03-13 07:18:30 +01:00
|
|
|
/* r0-r2 will be overwritten when reading the tlb entry (softmmu only)
|
|
|
|
and r0-r1 doing the byte swapping, so don't use these. */
|
2008-05-20 01:59:38 +02:00
|
|
|
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
|
|
|
|
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
|
2013-03-13 07:18:30 +01:00
|
|
|
#if defined(CONFIG_SOFTMMU)
|
|
|
|
/* Avoid clashes with registers being used for helper args */
|
2010-04-09 20:52:48 +02:00
|
|
|
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
|
2012-09-02 17:28:56 +02:00
|
|
|
#if TARGET_LONG_BITS == 64
|
2012-08-26 15:40:02 +02:00
|
|
|
/* Avoid clashes with registers being used for helper args */
|
|
|
|
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
|
|
|
|
#endif
|
2013-07-28 02:09:47 +02:00
|
|
|
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R14);
|
2008-05-20 01:59:38 +02:00
|
|
|
#endif
|
2010-04-09 20:52:48 +02:00
|
|
|
break;
|
2008-05-20 01:59:38 +02:00
|
|
|
|
|
|
|
default:
|
2016-11-18 11:50:59 +01:00
|
|
|
return NULL;
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
2016-11-18 11:50:59 +01:00
|
|
|
return ct_str;
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2009-08-22 14:29:09 +02:00
|
|
|
static inline uint32_t rotl(uint32_t val, int n)
|
|
|
|
{
|
|
|
|
return (val << n) | (val >> (32 - n));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ARM immediates for ALU instructions are made of an unsigned 8-bit
|
|
|
|
right-rotated by an even amount between 0 and 30. */
|
|
|
|
static inline int encode_imm(uint32_t imm)
|
|
|
|
{
|
2009-08-25 01:12:25 +02:00
|
|
|
int shift;
|
|
|
|
|
2009-08-22 14:29:09 +02:00
|
|
|
/* simple case, only lower bits */
|
|
|
|
if ((imm & ~0xff) == 0)
|
|
|
|
return 0;
|
|
|
|
/* then try a simple even shift */
|
|
|
|
shift = ctz32(imm) & ~1;
|
|
|
|
if (((imm >> shift) & ~0xff) == 0)
|
|
|
|
return 32 - shift;
|
|
|
|
/* now try harder with rotations */
|
|
|
|
if ((rotl(imm, 2) & ~0xff) == 0)
|
|
|
|
return 2;
|
|
|
|
if ((rotl(imm, 4) & ~0xff) == 0)
|
|
|
|
return 4;
|
|
|
|
if ((rotl(imm, 6) & ~0xff) == 0)
|
|
|
|
return 6;
|
|
|
|
/* imm can't be encoded */
|
|
|
|
return -1;
|
|
|
|
}
|
2009-07-18 14:20:30 +02:00
|
|
|
|
|
|
|
static inline int check_fit_imm(uint32_t imm)
|
|
|
|
{
|
2009-08-22 14:29:09 +02:00
|
|
|
return encode_imm(imm) >= 0;
|
2009-07-18 14:20:30 +02:00
|
|
|
}
|
|
|
|
|
2008-05-20 01:59:38 +02:00
|
|
|
/* Test if a constant matches the constraint.
|
|
|
|
* TODO: define constraints for:
|
|
|
|
*
|
|
|
|
* ldr/str offset: between -0xfff and 0xfff
|
|
|
|
* ldrh/strh offset: between -0xff and 0xff
|
|
|
|
* mov operand2: values represented with x << (2 * y), x < 0x100
|
|
|
|
* add, sub, eor...: ditto
|
|
|
|
*/
|
2014-03-31 06:22:11 +02:00
|
|
|
static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
|
2013-03-05 06:36:45 +01:00
|
|
|
const TCGArgConstraint *arg_ct)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
|
|
|
int ct;
|
|
|
|
ct = arg_ct->ct;
|
2013-03-05 06:36:45 +01:00
|
|
|
if (ct & TCG_CT_CONST) {
|
2008-05-20 01:59:38 +02:00
|
|
|
return 1;
|
2013-03-05 06:36:45 +01:00
|
|
|
} else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) {
|
2009-07-18 14:20:30 +02:00
|
|
|
return 1;
|
2013-03-05 06:36:45 +01:00
|
|
|
} else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) {
|
|
|
|
return 1;
|
2013-03-05 07:06:21 +01:00
|
|
|
} else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) {
|
|
|
|
return 1;
|
2013-03-05 06:12:30 +01:00
|
|
|
} else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
|
|
|
|
return 1;
|
2013-03-05 06:36:45 +01:00
|
|
|
} else {
|
2008-05-20 01:59:38 +02:00
|
|
|
return 0;
|
2013-03-05 06:36:45 +01:00
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
|
|
|
|
{
|
|
|
|
tcg_out32(s, (cond << 28) | 0x0a000000 |
|
|
|
|
(((offset - 8) >> 2) & 0x00ffffff));
|
|
|
|
}
|
2013-03-12 03:51:56 +01:00
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
|
|
|
|
{
|
|
|
|
tcg_out32(s, (cond << 28) | 0x0b000000 |
|
|
|
|
(((offset - 8) >> 2) & 0x00ffffff));
|
|
|
|
}
|
2013-03-12 23:06:53 +01:00
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
static inline void tcg_out_blx(TCGContext *s, int cond, int rn)
|
|
|
|
{
|
|
|
|
tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
|
|
|
|
}
|
2013-03-12 23:06:53 +01:00
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
static inline void tcg_out_blx_imm(TCGContext *s, int32_t offset)
|
|
|
|
{
|
|
|
|
tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
|
|
|
|
(((offset - 8) >> 2) & 0x00ffffff));
|
|
|
|
}
|
2013-03-13 07:18:30 +01:00
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
static inline void tcg_out_dat_reg(TCGContext *s,
|
|
|
|
int cond, int opc, int rd, int rn, int rm, int shift)
|
|
|
|
{
|
|
|
|
tcg_out32(s, (cond << 28) | (0 << 25) | opc |
|
|
|
|
(rn << 16) | (rd << 12) | shift | rm);
|
|
|
|
}
|
2016-07-14 22:20:16 +02:00
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
static inline void tcg_out_nop(TCGContext *s)
|
|
|
|
{
|
2017-07-28 05:45:38 +02:00
|
|
|
tcg_out32(s, INSN_NOP);
|
2017-07-28 05:43:30 +02:00
|
|
|
}
|
2016-07-14 22:20:16 +02:00
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
static inline void tcg_out_mov_reg(TCGContext *s, int cond, int rd, int rm)
|
|
|
|
{
|
|
|
|
/* Simple reg-reg move, optimising out the 'do nothing' case */
|
|
|
|
if (rd != rm) {
|
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0));
|
|
|
|
}
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
static inline void tcg_out_bx(TCGContext *s, int cond, TCGReg rn)
|
|
|
|
{
|
|
|
|
/* Unless the C portion of QEMU is compiled as thumb, we don't
|
|
|
|
actually need true BX semantics; merely a branch to an address
|
|
|
|
held in a register. */
|
|
|
|
if (use_armv5t_instructions) {
|
|
|
|
tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
|
|
|
|
} else {
|
|
|
|
tcg_out_mov_reg(s, cond, TCG_REG_PC, rn);
|
|
|
|
}
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
static inline void tcg_out_dat_imm(TCGContext *s,
|
|
|
|
int cond, int opc, int rd, int rn, int im)
|
|
|
|
{
|
|
|
|
tcg_out32(s, (cond << 28) | (1 << 25) | opc |
|
|
|
|
(rn << 16) | (rd << 12) | im);
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
/* Note that this routine is used for both LDR and LDRH formats, so we do
|
|
|
|
not wish to include an immediate shift at this point. */
|
|
|
|
static void tcg_out_memop_r(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm, bool u, bool p, bool w)
|
|
|
|
{
|
|
|
|
tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24)
|
|
|
|
| (w << 21) | (rn << 16) | (rt << 12) | rm);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_out_memop_8(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
|
|
|
|
TCGReg rn, int imm8, bool p, bool w)
|
|
|
|
{
|
|
|
|
bool u = 1;
|
|
|
|
if (imm8 < 0) {
|
|
|
|
imm8 = -imm8;
|
|
|
|
u = 0;
|
|
|
|
}
|
|
|
|
tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
|
|
|
|
(rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_out_memop_12(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
|
|
|
|
TCGReg rn, int imm12, bool p, bool w)
|
|
|
|
{
|
|
|
|
bool u = 1;
|
|
|
|
if (imm12 < 0) {
|
|
|
|
imm12 = -imm12;
|
|
|
|
u = 0;
|
|
|
|
}
|
|
|
|
tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
|
|
|
|
(rn << 16) | (rt << 12) | imm12);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_ld32_12(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, int imm12)
|
|
|
|
{
|
|
|
|
tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_st32_12(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, int imm12)
|
|
|
|
{
|
|
|
|
tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_ld32_r(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm)
|
|
|
|
{
|
|
|
|
tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_st32_r(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm)
|
|
|
|
{
|
|
|
|
tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_ldrd_8(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, int imm8)
|
|
|
|
{
|
|
|
|
tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_ldrd_r(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm)
|
|
|
|
{
|
|
|
|
tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0);
|
|
|
|
}
|
|
|
|
|
2019-01-23 05:33:03 +01:00
|
|
|
static inline void tcg_out_ldrd_rwb(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm)
|
|
|
|
{
|
|
|
|
tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1);
|
|
|
|
}
|
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
static inline void tcg_out_strd_8(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, int imm8)
|
|
|
|
{
|
|
|
|
tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_strd_r(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm)
|
|
|
|
{
|
|
|
|
tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Register pre-increment with base writeback. */
|
|
|
|
static inline void tcg_out_ld32_rwb(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm)
|
|
|
|
{
|
|
|
|
tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_st32_rwb(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm)
|
|
|
|
{
|
|
|
|
tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_ld16u_8(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, int imm8)
|
|
|
|
{
|
|
|
|
tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0);
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
static inline void tcg_out_st16_8(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, int imm8)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2017-07-28 05:43:30 +02:00
|
|
|
tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
static inline void tcg_out_ld16u_r(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm)
|
2008-05-23 20:50:44 +02:00
|
|
|
{
|
2017-07-28 05:43:30 +02:00
|
|
|
tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0);
|
2013-07-28 02:09:47 +02:00
|
|
|
}
|
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
static inline void tcg_out_st16_r(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm)
|
2013-07-28 02:09:47 +02:00
|
|
|
{
|
2017-07-28 05:43:30 +02:00
|
|
|
tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0);
|
2008-05-23 20:50:44 +02:00
|
|
|
}
|
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
static inline void tcg_out_ld16s_8(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, int imm8)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2017-07-28 05:43:30 +02:00
|
|
|
tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
static inline void tcg_out_ld16s_r(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm)
|
2010-04-09 20:52:48 +02:00
|
|
|
{
|
2017-07-28 05:43:30 +02:00
|
|
|
tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0);
|
2010-04-09 20:52:48 +02:00
|
|
|
}
|
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
static inline void tcg_out_ld8_12(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, int imm12)
|
2011-03-16 16:21:31 +01:00
|
|
|
{
|
2017-07-28 05:43:30 +02:00
|
|
|
tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0);
|
2011-03-16 16:21:31 +01:00
|
|
|
}
|
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
static inline void tcg_out_st8_12(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, int imm12)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2017-07-28 05:43:30 +02:00
|
|
|
tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
static inline void tcg_out_ld8_r(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm)
|
2013-03-13 23:24:33 +01:00
|
|
|
{
|
2017-07-28 05:43:30 +02:00
|
|
|
tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0);
|
2013-03-13 23:24:33 +01:00
|
|
|
}
|
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
static inline void tcg_out_st8_r(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm)
|
2012-08-26 15:40:02 +02:00
|
|
|
{
|
2017-07-28 05:43:30 +02:00
|
|
|
tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0);
|
2012-08-26 15:40:02 +02:00
|
|
|
}
|
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
static inline void tcg_out_ld8s_8(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, int imm8)
|
2017-04-28 09:45:57 +02:00
|
|
|
{
|
2017-07-28 05:43:30 +02:00
|
|
|
tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0);
|
2017-04-28 09:45:57 +02:00
|
|
|
}
|
|
|
|
|
2017-07-28 05:43:30 +02:00
|
|
|
static inline void tcg_out_ld8s_r(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2017-07-28 05:43:30 +02:00
|
|
|
tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2017-07-28 05:47:56 +02:00
|
|
|
static void tcg_out_movi_pool(TCGContext *s, int cond, int rd, uint32_t arg)
|
|
|
|
{
|
|
|
|
new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0);
|
|
|
|
tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0);
|
|
|
|
}
|
|
|
|
|
tcg-arm: Improve constant generation
Try fully rotated arguments to mov and mvn before trying movt
or full decomposition. Begin decomposition with mvn when it
looks like it'll help. Examples include
-: mov r9, #0x00000fa0
-: orr r9, r9, #0x000ee000
-: orr r9, r9, #0x0ff00000
-: orr r9, r9, #0xf0000000
+: mvn r9, #0x0000005f
+: eor r9, r9, #0x00011000
Reviewed-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2013-03-05 08:16:24 +01:00
|
|
|
static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2017-07-28 05:47:56 +02:00
|
|
|
int rot, diff, opc, sh1, sh2;
|
|
|
|
uint32_t tt0, tt1, tt2;
|
2017-06-06 02:18:54 +02:00
|
|
|
|
|
|
|
/* Check a single MOV/MVN before anything else. */
|
|
|
|
rot = encode_imm(arg);
|
|
|
|
if (rot >= 0) {
|
|
|
|
tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0,
|
|
|
|
rotl(arg, rot) | (rot << 7));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
rot = encode_imm(~arg);
|
|
|
|
if (rot >= 0) {
|
|
|
|
tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0,
|
|
|
|
rotl(~arg, rot) | (rot << 7));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check for a pc-relative address. This will usually be the TB,
|
|
|
|
or within the TB, which is immediately before the code block. */
|
|
|
|
diff = arg - ((intptr_t)s->code_ptr + 8);
|
|
|
|
if (diff >= 0) {
|
|
|
|
rot = encode_imm(diff);
|
tcg-arm: Improve constant generation
Try fully rotated arguments to mov and mvn before trying movt
or full decomposition. Begin decomposition with mvn when it
looks like it'll help. Examples include
-: mov r9, #0x00000fa0
-: orr r9, r9, #0x000ee000
-: orr r9, r9, #0x0ff00000
-: orr r9, r9, #0xf0000000
+: mvn r9, #0x0000005f
+: eor r9, r9, #0x00011000
Reviewed-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2013-03-05 08:16:24 +01:00
|
|
|
if (rot >= 0) {
|
2017-06-06 02:18:54 +02:00
|
|
|
tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC,
|
|
|
|
rotl(diff, rot) | (rot << 7));
|
tcg-arm: Improve constant generation
Try fully rotated arguments to mov and mvn before trying movt
or full decomposition. Begin decomposition with mvn when it
looks like it'll help. Examples include
-: mov r9, #0x00000fa0
-: orr r9, r9, #0x000ee000
-: orr r9, r9, #0x0ff00000
-: orr r9, r9, #0xf0000000
+: mvn r9, #0x0000005f
+: eor r9, r9, #0x00011000
Reviewed-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2013-03-05 08:16:24 +01:00
|
|
|
return;
|
|
|
|
}
|
2017-06-06 02:18:54 +02:00
|
|
|
} else {
|
|
|
|
rot = encode_imm(-diff);
|
tcg-arm: Improve constant generation
Try fully rotated arguments to mov and mvn before trying movt
or full decomposition. Begin decomposition with mvn when it
looks like it'll help. Examples include
-: mov r9, #0x00000fa0
-: orr r9, r9, #0x000ee000
-: orr r9, r9, #0x0ff00000
-: orr r9, r9, #0xf0000000
+: mvn r9, #0x0000005f
+: eor r9, r9, #0x00011000
Reviewed-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2013-03-05 08:16:24 +01:00
|
|
|
if (rot >= 0) {
|
2017-06-06 02:18:54 +02:00
|
|
|
tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC,
|
|
|
|
rotl(-diff, rot) | (rot << 7));
|
tcg-arm: Improve constant generation
Try fully rotated arguments to mov and mvn before trying movt
or full decomposition. Begin decomposition with mvn when it
looks like it'll help. Examples include
-: mov r9, #0x00000fa0
-: orr r9, r9, #0x000ee000
-: orr r9, r9, #0x0ff00000
-: orr r9, r9, #0xf0000000
+: mvn r9, #0x0000005f
+: eor r9, r9, #0x00011000
Reviewed-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2013-03-05 08:16:24 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Use movw + movt. */
|
|
|
|
if (use_armv7_instructions) {
|
2010-04-09 20:52:48 +02:00
|
|
|
/* movw */
|
|
|
|
tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
|
|
|
|
| ((arg << 4) & 0x000f0000) | (arg & 0xfff));
|
2011-01-06 22:43:13 +01:00
|
|
|
if (arg & 0xffff0000) {
|
2010-04-09 20:52:48 +02:00
|
|
|
/* movt */
|
|
|
|
tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
|
|
|
|
| ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
|
|
|
|
}
|
tcg-arm: Improve constant generation
Try fully rotated arguments to mov and mvn before trying movt
or full decomposition. Begin decomposition with mvn when it
looks like it'll help. Examples include
-: mov r9, #0x00000fa0
-: orr r9, r9, #0x000ee000
-: orr r9, r9, #0x0ff00000
-: orr r9, r9, #0xf0000000
+: mvn r9, #0x0000005f
+: eor r9, r9, #0x00011000
Reviewed-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2013-03-05 08:16:24 +01:00
|
|
|
return;
|
|
|
|
}
|
2011-01-06 22:43:13 +01:00
|
|
|
|
2017-07-28 05:47:56 +02:00
|
|
|
/* Look for sequences of two insns. If we have lots of 1's, we can
|
|
|
|
shorten the sequence by beginning with mvn and then clearing
|
|
|
|
higher bits with eor. */
|
|
|
|
tt0 = arg;
|
tcg-arm: Improve constant generation
Try fully rotated arguments to mov and mvn before trying movt
or full decomposition. Begin decomposition with mvn when it
looks like it'll help. Examples include
-: mov r9, #0x00000fa0
-: orr r9, r9, #0x000ee000
-: orr r9, r9, #0x0ff00000
-: orr r9, r9, #0xf0000000
+: mvn r9, #0x0000005f
+: eor r9, r9, #0x00011000
Reviewed-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2013-03-05 08:16:24 +01:00
|
|
|
opc = ARITH_MOV;
|
2017-07-28 05:47:56 +02:00
|
|
|
if (ctpop32(arg) > 16) {
|
|
|
|
tt0 = ~arg;
|
|
|
|
opc = ARITH_MVN;
|
|
|
|
}
|
|
|
|
sh1 = ctz32(tt0) & ~1;
|
|
|
|
tt1 = tt0 & ~(0xff << sh1);
|
|
|
|
sh2 = ctz32(tt1) & ~1;
|
|
|
|
tt2 = tt1 & ~(0xff << sh2);
|
|
|
|
if (tt2 == 0) {
|
|
|
|
rot = ((32 - sh1) << 7) & 0xf00;
|
|
|
|
tcg_out_dat_imm(s, cond, opc, rd, 0, ((tt0 >> sh1) & 0xff) | rot);
|
|
|
|
rot = ((32 - sh2) << 7) & 0xf00;
|
|
|
|
tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd,
|
|
|
|
((tt0 >> sh2) & 0xff) | rot);
|
|
|
|
return;
|
2011-01-06 22:43:13 +01:00
|
|
|
}
|
tcg-arm: Improve constant generation
Try fully rotated arguments to mov and mvn before trying movt
or full decomposition. Begin decomposition with mvn when it
looks like it'll help. Examples include
-: mov r9, #0x00000fa0
-: orr r9, r9, #0x000ee000
-: orr r9, r9, #0x0ff00000
-: orr r9, r9, #0xf0000000
+: mvn r9, #0x0000005f
+: eor r9, r9, #0x00011000
Reviewed-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2013-03-05 08:16:24 +01:00
|
|
|
|
2017-07-28 05:47:56 +02:00
|
|
|
/* Otherwise, drop it into the constant pool. */
|
|
|
|
tcg_out_movi_pool(s, cond, rd, arg);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2012-09-26 20:48:54 +02:00
|
|
|
static inline void tcg_out_dat_rI(TCGContext *s, int cond, int opc, TCGArg dst,
|
|
|
|
TCGArg lhs, TCGArg rhs, int rhs_is_const)
|
|
|
|
{
|
|
|
|
/* Emit either the reg,imm or reg,reg form of a data-processing insn.
|
|
|
|
* rhs must satisfy the "rI" constraint.
|
|
|
|
*/
|
|
|
|
if (rhs_is_const) {
|
|
|
|
int rot = encode_imm(rhs);
|
2016-04-21 10:48:49 +02:00
|
|
|
tcg_debug_assert(rot >= 0);
|
2012-09-26 20:48:54 +02:00
|
|
|
tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-05 06:36:45 +01:00
|
|
|
static void tcg_out_dat_rIK(TCGContext *s, int cond, int opc, int opinv,
|
|
|
|
TCGReg dst, TCGReg lhs, TCGArg rhs,
|
|
|
|
bool rhs_is_const)
|
|
|
|
{
|
|
|
|
/* Emit either the reg,imm or reg,reg form of a data-processing insn.
|
|
|
|
* rhs must satisfy the "rIK" constraint.
|
|
|
|
*/
|
|
|
|
if (rhs_is_const) {
|
|
|
|
int rot = encode_imm(rhs);
|
|
|
|
if (rot < 0) {
|
|
|
|
rhs = ~rhs;
|
|
|
|
rot = encode_imm(rhs);
|
2016-04-21 10:48:49 +02:00
|
|
|
tcg_debug_assert(rot >= 0);
|
2013-03-05 06:36:45 +01:00
|
|
|
opc = opinv;
|
|
|
|
}
|
|
|
|
tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-05 07:06:21 +01:00
|
|
|
static void tcg_out_dat_rIN(TCGContext *s, int cond, int opc, int opneg,
|
|
|
|
TCGArg dst, TCGArg lhs, TCGArg rhs,
|
|
|
|
bool rhs_is_const)
|
|
|
|
{
|
|
|
|
/* Emit either the reg,imm or reg,reg form of a data-processing insn.
|
|
|
|
* rhs must satisfy the "rIN" constraint.
|
|
|
|
*/
|
|
|
|
if (rhs_is_const) {
|
|
|
|
int rot = encode_imm(rhs);
|
|
|
|
if (rot < 0) {
|
|
|
|
rhs = -rhs;
|
|
|
|
rot = encode_imm(rhs);
|
2016-04-21 10:48:49 +02:00
|
|
|
tcg_debug_assert(rot >= 0);
|
2013-03-05 07:06:21 +01:00
|
|
|
opc = opneg;
|
|
|
|
}
|
|
|
|
tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-12 18:34:18 +01:00
|
|
|
static inline void tcg_out_mul32(TCGContext *s, int cond, TCGReg rd,
|
|
|
|
TCGReg rn, TCGReg rm)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2013-03-12 18:34:18 +01:00
|
|
|
/* if ArchVersion() < 6 && d == n then UNPREDICTABLE; */
|
|
|
|
if (!use_armv6_instructions && rd == rn) {
|
|
|
|
if (rd == rm) {
|
|
|
|
/* rd == rn == rm; copy an input to tmp first. */
|
|
|
|
tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
|
|
|
|
rm = rn = TCG_REG_TMP;
|
|
|
|
} else {
|
|
|
|
rn = rm;
|
|
|
|
rm = rd;
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
2013-03-12 18:34:18 +01:00
|
|
|
/* mul */
|
|
|
|
tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2013-03-12 18:34:18 +01:00
|
|
|
static inline void tcg_out_umull32(TCGContext *s, int cond, TCGReg rd0,
|
|
|
|
TCGReg rd1, TCGReg rn, TCGReg rm)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2013-03-12 18:34:18 +01:00
|
|
|
/* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
|
|
|
|
if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
|
|
|
|
if (rd0 == rm || rd1 == rm) {
|
|
|
|
tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
|
|
|
|
rn = TCG_REG_TMP;
|
|
|
|
} else {
|
|
|
|
TCGReg t = rn;
|
|
|
|
rn = rm;
|
|
|
|
rm = t;
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
2013-03-12 18:34:18 +01:00
|
|
|
/* umull */
|
|
|
|
tcg_out32(s, (cond << 28) | 0x00800090 |
|
|
|
|
(rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2013-03-12 18:34:18 +01:00
|
|
|
static inline void tcg_out_smull32(TCGContext *s, int cond, TCGReg rd0,
|
|
|
|
TCGReg rd1, TCGReg rn, TCGReg rm)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2013-03-12 18:34:18 +01:00
|
|
|
/* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
|
|
|
|
if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
|
|
|
|
if (rd0 == rm || rd1 == rm) {
|
|
|
|
tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
|
|
|
|
rn = TCG_REG_TMP;
|
|
|
|
} else {
|
|
|
|
TCGReg t = rn;
|
|
|
|
rn = rm;
|
|
|
|
rm = t;
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
2013-03-12 18:34:18 +01:00
|
|
|
/* smull */
|
|
|
|
tcg_out32(s, (cond << 28) | 0x00c00090 |
|
|
|
|
(rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2013-03-12 06:11:30 +01:00
|
|
|
static inline void tcg_out_sdiv(TCGContext *s, int cond, int rd, int rn, int rm)
|
|
|
|
{
|
|
|
|
tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_udiv(TCGContext *s, int cond, int rd, int rn, int rm)
|
|
|
|
{
|
|
|
|
tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
|
|
|
|
}
|
|
|
|
|
2010-04-09 20:52:48 +02:00
|
|
|
static inline void tcg_out_ext8s(TCGContext *s, int cond,
|
|
|
|
int rd, int rn)
|
|
|
|
{
|
|
|
|
if (use_armv6_instructions) {
|
|
|
|
/* sxtb */
|
|
|
|
tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
|
|
|
|
} else {
|
2010-04-25 05:46:22 +02:00
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV,
|
2010-04-09 20:52:48 +02:00
|
|
|
rd, 0, rn, SHIFT_IMM_LSL(24));
|
2010-04-25 05:46:22 +02:00
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV,
|
2010-04-09 20:52:48 +02:00
|
|
|
rd, 0, rd, SHIFT_IMM_ASR(24));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-04-09 20:52:48 +02:00
|
|
|
static inline void tcg_out_ext8u(TCGContext *s, int cond,
|
|
|
|
int rd, int rn)
|
|
|
|
{
|
|
|
|
tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
|
|
|
|
}
|
|
|
|
|
2010-04-09 20:52:48 +02:00
|
|
|
static inline void tcg_out_ext16s(TCGContext *s, int cond,
|
|
|
|
int rd, int rn)
|
|
|
|
{
|
|
|
|
if (use_armv6_instructions) {
|
|
|
|
/* sxth */
|
|
|
|
tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
|
|
|
|
} else {
|
2010-04-25 05:46:22 +02:00
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV,
|
2010-04-09 20:52:48 +02:00
|
|
|
rd, 0, rn, SHIFT_IMM_LSL(16));
|
2010-04-25 05:46:22 +02:00
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV,
|
2010-04-09 20:52:48 +02:00
|
|
|
rd, 0, rd, SHIFT_IMM_ASR(16));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_ext16u(TCGContext *s, int cond,
|
|
|
|
int rd, int rn)
|
|
|
|
{
|
|
|
|
if (use_armv6_instructions) {
|
|
|
|
/* uxth */
|
|
|
|
tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
|
|
|
|
} else {
|
2010-04-25 05:46:22 +02:00
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV,
|
2010-04-09 20:52:48 +02:00
|
|
|
rd, 0, rn, SHIFT_IMM_LSL(16));
|
2010-04-25 05:46:22 +02:00
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV,
|
2010-04-09 20:52:48 +02:00
|
|
|
rd, 0, rd, SHIFT_IMM_LSR(16));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-04-09 20:52:48 +02:00
|
|
|
static inline void tcg_out_bswap16s(TCGContext *s, int cond, int rd, int rn)
|
|
|
|
{
|
|
|
|
if (use_armv6_instructions) {
|
|
|
|
/* revsh */
|
|
|
|
tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV,
|
2013-03-12 17:49:04 +01:00
|
|
|
TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
|
2010-04-09 20:52:48 +02:00
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV,
|
2013-03-12 17:49:04 +01:00
|
|
|
TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_ASR(16));
|
2010-04-09 20:52:48 +02:00
|
|
|
tcg_out_dat_reg(s, cond, ARITH_ORR,
|
2013-03-12 17:49:04 +01:00
|
|
|
rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
|
2010-04-09 20:52:48 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-04-09 20:52:48 +02:00
|
|
|
static inline void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn)
|
|
|
|
{
|
|
|
|
if (use_armv6_instructions) {
|
|
|
|
/* rev16 */
|
|
|
|
tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV,
|
2013-03-12 17:49:04 +01:00
|
|
|
TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
|
2010-04-09 20:52:48 +02:00
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV,
|
2013-03-12 17:49:04 +01:00
|
|
|
TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_LSR(16));
|
2010-04-09 20:52:48 +02:00
|
|
|
tcg_out_dat_reg(s, cond, ARITH_ORR,
|
2013-03-12 17:49:04 +01:00
|
|
|
rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
|
2010-04-09 20:52:48 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-09 21:53:11 +02:00
|
|
|
/* swap the two low bytes assuming that the two high input bytes and the
|
|
|
|
two high output bit can hold any value. */
|
|
|
|
static inline void tcg_out_bswap16st(TCGContext *s, int cond, int rd, int rn)
|
|
|
|
{
|
|
|
|
if (use_armv6_instructions) {
|
|
|
|
/* rev16 */
|
|
|
|
tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV,
|
2013-03-12 17:49:04 +01:00
|
|
|
TCG_REG_TMP, 0, rn, SHIFT_IMM_LSR(8));
|
|
|
|
tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_TMP, TCG_REG_TMP, 0xff);
|
2012-10-09 21:53:11 +02:00
|
|
|
tcg_out_dat_reg(s, cond, ARITH_ORR,
|
2013-03-12 17:49:04 +01:00
|
|
|
rd, TCG_REG_TMP, rn, SHIFT_IMM_LSL(8));
|
2012-10-09 21:53:11 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-04-09 20:52:48 +02:00
|
|
|
static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
|
|
|
|
{
|
|
|
|
if (use_armv6_instructions) {
|
|
|
|
/* rev */
|
|
|
|
tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_reg(s, cond, ARITH_EOR,
|
2013-03-12 17:49:04 +01:00
|
|
|
TCG_REG_TMP, rn, rn, SHIFT_IMM_ROR(16));
|
2010-04-09 20:52:48 +02:00
|
|
|
tcg_out_dat_imm(s, cond, ARITH_BIC,
|
2013-03-12 17:49:04 +01:00
|
|
|
TCG_REG_TMP, TCG_REG_TMP, 0xff | 0x800);
|
2010-04-09 20:52:48 +02:00
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV,
|
|
|
|
rd, 0, rn, SHIFT_IMM_ROR(8));
|
|
|
|
tcg_out_dat_reg(s, cond, ARITH_EOR,
|
2013-03-12 17:49:04 +01:00
|
|
|
rd, rd, TCG_REG_TMP, SHIFT_IMM_LSR(8));
|
2010-04-09 20:52:48 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-05 06:12:30 +01:00
|
|
|
static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd,
|
|
|
|
TCGArg a1, int ofs, int len, bool const_a1)
|
|
|
|
{
|
|
|
|
if (const_a1) {
|
|
|
|
/* bfi becomes bfc with rn == 15. */
|
|
|
|
a1 = 15;
|
|
|
|
}
|
|
|
|
/* bfi/bfc */
|
|
|
|
tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1
|
|
|
|
| (ofs << 7) | ((ofs + len - 1) << 16));
|
|
|
|
}
|
|
|
|
|
2016-10-15 02:51:45 +02:00
|
|
|
static inline void tcg_out_extract(TCGContext *s, int cond, TCGReg rd,
|
|
|
|
TCGArg a1, int ofs, int len)
|
|
|
|
{
|
|
|
|
/* ubfx */
|
|
|
|
tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | a1
|
|
|
|
| (ofs << 7) | ((len - 1) << 16));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_sextract(TCGContext *s, int cond, TCGReg rd,
|
|
|
|
TCGArg a1, int ofs, int len)
|
|
|
|
{
|
|
|
|
/* sbfx */
|
|
|
|
tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | a1
|
|
|
|
| (ofs << 7) | ((len - 1) << 16));
|
|
|
|
}
|
|
|
|
|
2008-05-20 01:59:38 +02:00
|
|
|
static inline void tcg_out_ld32u(TCGContext *s, int cond,
|
|
|
|
int rd, int rn, int32_t offset)
|
|
|
|
{
|
|
|
|
if (offset > 0xfff || offset < -0xfff) {
|
2013-03-12 17:49:04 +01:00
|
|
|
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
|
|
|
|
tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP);
|
2008-05-20 01:59:38 +02:00
|
|
|
} else
|
|
|
|
tcg_out_ld32_12(s, cond, rd, rn, offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_st32(TCGContext *s, int cond,
|
|
|
|
int rd, int rn, int32_t offset)
|
|
|
|
{
|
|
|
|
if (offset > 0xfff || offset < -0xfff) {
|
2013-03-12 17:49:04 +01:00
|
|
|
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
|
|
|
|
tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP);
|
2008-05-20 01:59:38 +02:00
|
|
|
} else
|
|
|
|
tcg_out_st32_12(s, cond, rd, rn, offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_ld16u(TCGContext *s, int cond,
|
|
|
|
int rd, int rn, int32_t offset)
|
|
|
|
{
|
|
|
|
if (offset > 0xff || offset < -0xff) {
|
2013-03-12 17:49:04 +01:00
|
|
|
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
|
|
|
|
tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
|
2008-05-20 01:59:38 +02:00
|
|
|
} else
|
|
|
|
tcg_out_ld16u_8(s, cond, rd, rn, offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_ld16s(TCGContext *s, int cond,
|
|
|
|
int rd, int rn, int32_t offset)
|
|
|
|
{
|
|
|
|
if (offset > 0xff || offset < -0xff) {
|
2013-03-12 17:49:04 +01:00
|
|
|
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
|
|
|
|
tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
|
2008-05-20 01:59:38 +02:00
|
|
|
} else
|
|
|
|
tcg_out_ld16s_8(s, cond, rd, rn, offset);
|
|
|
|
}
|
|
|
|
|
2010-04-09 20:52:48 +02:00
|
|
|
static inline void tcg_out_st16(TCGContext *s, int cond,
|
2008-05-20 01:59:38 +02:00
|
|
|
int rd, int rn, int32_t offset)
|
|
|
|
{
|
|
|
|
if (offset > 0xff || offset < -0xff) {
|
2013-03-12 17:49:04 +01:00
|
|
|
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
|
|
|
|
tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
|
2008-05-20 01:59:38 +02:00
|
|
|
} else
|
2010-04-09 20:52:48 +02:00
|
|
|
tcg_out_st16_8(s, cond, rd, rn, offset);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_ld8u(TCGContext *s, int cond,
|
|
|
|
int rd, int rn, int32_t offset)
|
|
|
|
{
|
|
|
|
if (offset > 0xfff || offset < -0xfff) {
|
2013-03-12 17:49:04 +01:00
|
|
|
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
|
|
|
|
tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
|
2008-05-20 01:59:38 +02:00
|
|
|
} else
|
|
|
|
tcg_out_ld8_12(s, cond, rd, rn, offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_ld8s(TCGContext *s, int cond,
|
|
|
|
int rd, int rn, int32_t offset)
|
|
|
|
{
|
|
|
|
if (offset > 0xff || offset < -0xff) {
|
2013-03-12 17:49:04 +01:00
|
|
|
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
|
|
|
|
tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
|
2008-05-20 01:59:38 +02:00
|
|
|
} else
|
|
|
|
tcg_out_ld8s_8(s, cond, rd, rn, offset);
|
|
|
|
}
|
|
|
|
|
2010-04-09 20:52:48 +02:00
|
|
|
static inline void tcg_out_st8(TCGContext *s, int cond,
|
2008-05-20 01:59:38 +02:00
|
|
|
int rd, int rn, int32_t offset)
|
|
|
|
{
|
|
|
|
if (offset > 0xfff || offset < -0xfff) {
|
2013-03-12 17:49:04 +01:00
|
|
|
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
|
|
|
|
tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
|
2008-05-20 01:59:38 +02:00
|
|
|
} else
|
|
|
|
tcg_out_st8_12(s, cond, rd, rn, offset);
|
|
|
|
}
|
|
|
|
|
2013-07-28 02:09:47 +02:00
|
|
|
/* The _goto case is normally between TBs within the same code buffer, and
|
|
|
|
* with the code buffer limited to 16MB we wouldn't need the long case.
|
|
|
|
* But we also use it for the tail-call to the qemu_ld/st helpers, which does.
|
2011-12-12 16:37:31 +01:00
|
|
|
*/
|
2017-04-28 09:45:57 +02:00
|
|
|
static void tcg_out_goto(TCGContext *s, int cond, tcg_insn_unit *addr)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2014-04-24 23:23:40 +02:00
|
|
|
intptr_t addri = (intptr_t)addr;
|
|
|
|
ptrdiff_t disp = tcg_pcrel_diff(s, addr);
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2014-04-24 23:23:40 +02:00
|
|
|
if ((addri & 1) == 0 && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
|
2013-07-28 02:09:47 +02:00
|
|
|
tcg_out_b(s, cond, disp);
|
|
|
|
return;
|
2011-03-16 16:21:31 +01:00
|
|
|
}
|
2017-07-28 06:12:24 +02:00
|
|
|
tcg_out_movi_pool(s, cond, TCG_REG_PC, addri);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2011-12-12 16:37:31 +01:00
|
|
|
/* The call case is mostly used for helpers - so it's not unreasonable
|
|
|
|
* for them to be beyond branch range */
|
2014-04-24 23:23:40 +02:00
|
|
|
static void tcg_out_call(TCGContext *s, tcg_insn_unit *addr)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2014-04-24 23:23:40 +02:00
|
|
|
intptr_t addri = (intptr_t)addr;
|
|
|
|
ptrdiff_t disp = tcg_pcrel_diff(s, addr);
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2014-04-24 23:23:40 +02:00
|
|
|
if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) {
|
|
|
|
if (addri & 1) {
|
2011-03-16 16:21:31 +01:00
|
|
|
/* Use BLX if the target is in Thumb mode */
|
2013-07-04 20:20:26 +02:00
|
|
|
if (!use_armv5t_instructions) {
|
2011-03-16 16:21:31 +01:00
|
|
|
tcg_abort();
|
|
|
|
}
|
2014-04-24 23:23:40 +02:00
|
|
|
tcg_out_blx_imm(s, disp);
|
2011-03-16 16:21:31 +01:00
|
|
|
} else {
|
2014-04-24 23:23:40 +02:00
|
|
|
tcg_out_bl(s, COND_AL, disp);
|
2011-03-16 16:21:31 +01:00
|
|
|
}
|
2013-03-13 21:40:43 +01:00
|
|
|
} else if (use_armv7_instructions) {
|
2014-04-24 23:23:40 +02:00
|
|
|
tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
|
2013-03-13 21:40:43 +01:00
|
|
|
tcg_out_blx(s, COND_AL, TCG_REG_TMP);
|
2011-03-16 16:21:31 +01:00
|
|
|
} else {
|
2019-04-25 19:39:39 +02:00
|
|
|
/* ??? Know that movi_pool emits exactly 1 insn. */
|
|
|
|
tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 0);
|
2017-07-28 06:12:24 +02:00
|
|
|
tcg_out_movi_pool(s, COND_AL, TCG_REG_PC, addri);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-13 22:39:54 +01:00
|
|
|
static inline void tcg_out_goto_label(TCGContext *s, int cond, TCGLabel *l)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2013-04-23 22:07:40 +02:00
|
|
|
if (l->has_value) {
|
2014-04-24 23:23:40 +02:00
|
|
|
tcg_out_goto(s, cond, l->u.value_ptr);
|
2008-05-20 01:59:38 +02:00
|
|
|
} else {
|
2015-02-13 22:39:54 +01:00
|
|
|
tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0);
|
2018-11-29 22:13:59 +01:00
|
|
|
tcg_out_b(s, cond, 0);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-14 22:20:16 +02:00
|
|
|
static inline void tcg_out_mb(TCGContext *s, TCGArg a0)
|
|
|
|
{
|
|
|
|
if (use_armv7_instructions) {
|
|
|
|
tcg_out32(s, INSN_DMB_ISH);
|
|
|
|
} else if (use_armv6_instructions) {
|
|
|
|
tcg_out32(s, INSN_DMB_MCR);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-15 22:01:37 +01:00
|
|
|
static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
|
|
|
|
const int *const_args)
|
|
|
|
{
|
|
|
|
TCGReg al = args[0];
|
|
|
|
TCGReg ah = args[1];
|
|
|
|
TCGArg bl = args[2];
|
|
|
|
TCGArg bh = args[3];
|
|
|
|
TCGCond cond = args[4];
|
|
|
|
int const_bl = const_args[2];
|
|
|
|
int const_bh = const_args[3];
|
|
|
|
|
|
|
|
switch (cond) {
|
|
|
|
case TCG_COND_EQ:
|
|
|
|
case TCG_COND_NE:
|
|
|
|
case TCG_COND_LTU:
|
|
|
|
case TCG_COND_LEU:
|
|
|
|
case TCG_COND_GTU:
|
|
|
|
case TCG_COND_GEU:
|
|
|
|
/* We perform a conditional comparision. If the high half is
|
|
|
|
equal, then overwrite the flags with the comparison of the
|
|
|
|
low half. The resulting flags cover the whole. */
|
|
|
|
tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh);
|
|
|
|
tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl);
|
|
|
|
return cond;
|
|
|
|
|
|
|
|
case TCG_COND_LT:
|
|
|
|
case TCG_COND_GE:
|
|
|
|
/* We perform a double-word subtraction and examine the result.
|
|
|
|
We do not actually need the result of the subtract, so the
|
|
|
|
low part "subtract" is a compare. For the high half we have
|
|
|
|
no choice but to compute into a temporary. */
|
|
|
|
tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl);
|
|
|
|
tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR,
|
|
|
|
TCG_REG_TMP, ah, bh, const_bh);
|
|
|
|
return cond;
|
|
|
|
|
|
|
|
case TCG_COND_LE:
|
|
|
|
case TCG_COND_GT:
|
|
|
|
/* Similar, but with swapped arguments, via reversed subtract. */
|
|
|
|
tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR,
|
|
|
|
TCG_REG_TMP, al, bl, const_bl);
|
|
|
|
tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR,
|
|
|
|
TCG_REG_TMP, ah, bh, const_bh);
|
|
|
|
return tcg_swap_cond(cond);
|
|
|
|
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-05-20 01:59:38 +02:00
|
|
|
#ifdef CONFIG_SOFTMMU
|
2020-01-01 12:23:01 +01:00
|
|
|
#include "../tcg-ldst.inc.c"
|
2017-07-30 21:30:41 +02:00
|
|
|
|
2013-07-28 02:09:47 +02:00
|
|
|
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
|
|
|
|
* int mmu_idx, uintptr_t ra)
|
|
|
|
*/
|
2014-04-24 23:23:40 +02:00
|
|
|
static void * const qemu_ld_helpers[16] = {
|
2013-09-12 17:58:33 +02:00
|
|
|
[MO_UB] = helper_ret_ldub_mmu,
|
|
|
|
[MO_SB] = helper_ret_ldsb_mmu,
|
|
|
|
|
|
|
|
[MO_LEUW] = helper_le_lduw_mmu,
|
|
|
|
[MO_LEUL] = helper_le_ldul_mmu,
|
|
|
|
[MO_LEQ] = helper_le_ldq_mmu,
|
|
|
|
[MO_LESW] = helper_le_ldsw_mmu,
|
|
|
|
[MO_LESL] = helper_le_ldul_mmu,
|
|
|
|
|
|
|
|
[MO_BEUW] = helper_be_lduw_mmu,
|
|
|
|
[MO_BEUL] = helper_be_ldul_mmu,
|
|
|
|
[MO_BEQ] = helper_be_ldq_mmu,
|
|
|
|
[MO_BESW] = helper_be_ldsw_mmu,
|
|
|
|
[MO_BESL] = helper_be_ldul_mmu,
|
2011-09-18 16:55:46 +02:00
|
|
|
};
|
|
|
|
|
2013-07-28 02:09:47 +02:00
|
|
|
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
|
|
|
|
* uintxx_t val, int mmu_idx, uintptr_t ra)
|
|
|
|
*/
|
2014-04-24 23:23:40 +02:00
|
|
|
static void * const qemu_st_helpers[16] = {
|
2013-09-12 17:58:33 +02:00
|
|
|
[MO_UB] = helper_ret_stb_mmu,
|
|
|
|
[MO_LEUW] = helper_le_stw_mmu,
|
|
|
|
[MO_LEUL] = helper_le_stl_mmu,
|
|
|
|
[MO_LEQ] = helper_le_stq_mmu,
|
|
|
|
[MO_BEUW] = helper_be_stw_mmu,
|
|
|
|
[MO_BEUL] = helper_be_stl_mmu,
|
|
|
|
[MO_BEQ] = helper_be_stq_mmu,
|
2011-09-18 16:55:46 +02:00
|
|
|
};
|
2012-08-26 15:40:02 +02:00
|
|
|
|
|
|
|
/* Helper routines for marshalling helper function arguments into
|
|
|
|
* the correct registers and stack.
|
|
|
|
* argreg is where we want to put this argument, arg is the argument itself.
|
|
|
|
* Return value is the updated argreg ready for the next call.
|
|
|
|
* Note that argreg 0..3 is real registers, 4+ on stack.
|
|
|
|
*
|
|
|
|
* We provide routines for arguments which are: immediate, 32 bit
|
|
|
|
* value in register, 16 and 8 bit values in register (which must be zero
|
|
|
|
* extended before use) and 64 bit value in a lo:hi register pair.
|
|
|
|
*/
|
2013-03-13 01:11:40 +01:00
|
|
|
#define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG) \
|
|
|
|
static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg) \
|
|
|
|
{ \
|
|
|
|
if (argreg < 4) { \
|
|
|
|
MOV_ARG(s, COND_AL, argreg, arg); \
|
|
|
|
} else { \
|
|
|
|
int ofs = (argreg - 4) * 4; \
|
|
|
|
EXT_ARG; \
|
2016-04-21 10:48:49 +02:00
|
|
|
tcg_debug_assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE); \
|
2013-03-13 01:11:40 +01:00
|
|
|
tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs); \
|
|
|
|
} \
|
|
|
|
return argreg + 1; \
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32,
|
2013-03-12 17:49:04 +01:00
|
|
|
(tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
|
2013-03-13 01:11:40 +01:00
|
|
|
DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u,
|
2013-03-12 17:49:04 +01:00
|
|
|
(tcg_out_ext8u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
|
2013-03-13 01:11:40 +01:00
|
|
|
DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u,
|
2013-03-12 17:49:04 +01:00
|
|
|
(tcg_out_ext16u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
|
2013-03-13 01:11:40 +01:00
|
|
|
DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, )
|
|
|
|
|
|
|
|
static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg,
|
|
|
|
TCGReg arglo, TCGReg arghi)
|
2012-08-26 15:40:02 +02:00
|
|
|
{
|
|
|
|
/* 64 bit arguments must go in even/odd register pairs
|
|
|
|
* and in 8-aligned stack slots.
|
|
|
|
*/
|
|
|
|
if (argreg & 1) {
|
|
|
|
argreg++;
|
|
|
|
}
|
2013-08-28 20:16:16 +02:00
|
|
|
if (use_armv6_instructions && argreg >= 4
|
|
|
|
&& (arglo & 1) == 0 && arghi == arglo + 1) {
|
|
|
|
tcg_out_strd_8(s, COND_AL, arglo,
|
|
|
|
TCG_REG_CALL_STACK, (argreg - 4) * 4);
|
|
|
|
return argreg + 2;
|
|
|
|
} else {
|
|
|
|
argreg = tcg_out_arg_reg32(s, argreg, arglo);
|
|
|
|
argreg = tcg_out_arg_reg32(s, argreg, arghi);
|
|
|
|
return argreg;
|
|
|
|
}
|
2012-08-26 15:40:02 +02:00
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2008-05-24 22:07:07 +02:00
|
|
|
#define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
|
|
|
|
|
2019-03-23 06:03:39 +01:00
|
|
|
/* We expect to use an 9-bit sign-magnitude negative offset from ENV. */
|
|
|
|
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
|
|
|
|
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -256);
|
2013-08-28 23:40:52 +02:00
|
|
|
|
tcg/arm: Use LDRD to load tlb mask+table
This changes the code generation for the tlb from e.g.
ldr ip, [r6, #-0x10]
ldr r2, [r6, #-0xc]
and ip, ip, r4, lsr #8
ldrd r0, r1, [r2, ip]!
ldr r2, [r2, #0x18]
to
ldrd r0, r1, [r6, #-0x10]
and r0, r0, r4, lsr #8
ldrd r2, r3, [r1, r0]!
ldr r1, [r1, #0x18]
for armv7 hosts. Rearranging the register allocation in
order to avoid overlap between the two ldrd pairs causes
the patch to be larger than it ordinarily would be.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2019-03-23 08:46:23 +01:00
|
|
|
/* These offsets are built into the LDRD below. */
|
|
|
|
QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
|
|
|
|
QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4);
|
|
|
|
|
2013-08-30 17:45:53 +02:00
|
|
|
/* Load and compare a TLB entry, leaving the flags set. Returns the register
|
|
|
|
containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2013-08-30 17:45:53 +02:00
|
|
|
static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
|
2019-08-23 20:10:58 +02:00
|
|
|
MemOp opc, int mem_index, bool is_load)
|
2013-03-13 02:18:07 +01:00
|
|
|
{
|
2019-01-23 05:33:03 +01:00
|
|
|
int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read)
|
|
|
|
: offsetof(CPUTLBEntry, addr_write));
|
2019-03-23 06:03:39 +01:00
|
|
|
int fast_off = TLB_MASK_TABLE_OFS(mem_index);
|
|
|
|
int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
|
|
|
|
int table_off = fast_off + offsetof(CPUTLBDescFast, table);
|
2016-07-14 21:43:06 +02:00
|
|
|
unsigned s_bits = opc & MO_SIZE;
|
|
|
|
unsigned a_bits = get_alignment_bits(opc);
|
2013-03-13 07:18:30 +01:00
|
|
|
|
tcg/arm: Use LDRD to load tlb mask+table
This changes the code generation for the tlb from e.g.
ldr ip, [r6, #-0x10]
ldr r2, [r6, #-0xc]
and ip, ip, r4, lsr #8
ldrd r0, r1, [r2, ip]!
ldr r2, [r2, #0x18]
to
ldrd r0, r1, [r6, #-0x10]
and r0, r0, r4, lsr #8
ldrd r2, r3, [r1, r0]!
ldr r1, [r1, #0x18]
for armv7 hosts. Rearranging the register allocation in
order to avoid overlap between the two ldrd pairs causes
the patch to be larger than it ordinarily would be.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2019-03-23 08:46:23 +01:00
|
|
|
/*
|
|
|
|
* We don't support inline unaligned acceses, but we can easily
|
|
|
|
* support overalignment checks.
|
|
|
|
*/
|
|
|
|
if (a_bits < s_bits) {
|
|
|
|
a_bits = s_bits;
|
|
|
|
}
|
2019-01-23 05:33:03 +01:00
|
|
|
|
tcg/arm: Use LDRD to load tlb mask+table
This changes the code generation for the tlb from e.g.
ldr ip, [r6, #-0x10]
ldr r2, [r6, #-0xc]
and ip, ip, r4, lsr #8
ldrd r0, r1, [r2, ip]!
ldr r2, [r2, #0x18]
to
ldrd r0, r1, [r6, #-0x10]
and r0, r0, r4, lsr #8
ldrd r2, r3, [r1, r0]!
ldr r1, [r1, #0x18]
for armv7 hosts. Rearranging the register allocation in
order to avoid overlap between the two ldrd pairs causes
the patch to be larger than it ordinarily would be.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2019-03-23 08:46:23 +01:00
|
|
|
/* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}. */
|
|
|
|
if (use_armv6_instructions) {
|
|
|
|
tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
|
|
|
|
} else {
|
|
|
|
tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R0, TCG_AREG0, mask_off);
|
|
|
|
tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R1, TCG_AREG0, table_off);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Extract the tlb index from the address into R0. */
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo,
|
2019-01-23 05:33:03 +01:00
|
|
|
SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS));
|
|
|
|
|
|
|
|
/*
|
tcg/arm: Use LDRD to load tlb mask+table
This changes the code generation for the tlb from e.g.
ldr ip, [r6, #-0x10]
ldr r2, [r6, #-0xc]
and ip, ip, r4, lsr #8
ldrd r0, r1, [r2, ip]!
ldr r2, [r2, #0x18]
to
ldrd r0, r1, [r6, #-0x10]
and r0, r0, r4, lsr #8
ldrd r2, r3, [r1, r0]!
ldr r1, [r1, #0x18]
for armv7 hosts. Rearranging the register allocation in
order to avoid overlap between the two ldrd pairs causes
the patch to be larger than it ordinarily would be.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2019-03-23 08:46:23 +01:00
|
|
|
* Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
|
|
|
|
* Load the tlb comparator into R2/R3 and the fast path addend into R1.
|
2019-01-23 05:33:03 +01:00
|
|
|
*/
|
|
|
|
if (cmp_off == 0) {
|
tcg/arm: Use LDRD to load tlb mask+table
This changes the code generation for the tlb from e.g.
ldr ip, [r6, #-0x10]
ldr r2, [r6, #-0xc]
and ip, ip, r4, lsr #8
ldrd r0, r1, [r2, ip]!
ldr r2, [r2, #0x18]
to
ldrd r0, r1, [r6, #-0x10]
and r0, r0, r4, lsr #8
ldrd r2, r3, [r1, r0]!
ldr r1, [r1, #0x18]
for armv7 hosts. Rearranging the register allocation in
order to avoid overlap between the two ldrd pairs causes
the patch to be larger than it ordinarily would be.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2019-03-23 08:46:23 +01:00
|
|
|
if (use_armv6_instructions && TARGET_LONG_BITS == 64) {
|
|
|
|
tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
|
2019-01-23 05:33:03 +01:00
|
|
|
} else {
|
tcg/arm: Use LDRD to load tlb mask+table
This changes the code generation for the tlb from e.g.
ldr ip, [r6, #-0x10]
ldr r2, [r6, #-0xc]
and ip, ip, r4, lsr #8
ldrd r0, r1, [r2, ip]!
ldr r2, [r2, #0x18]
to
ldrd r0, r1, [r6, #-0x10]
and r0, r0, r4, lsr #8
ldrd r2, r3, [r1, r0]!
ldr r1, [r1, #0x18]
for armv7 hosts. Rearranging the register allocation in
order to avoid overlap between the two ldrd pairs causes
the patch to be larger than it ordinarily would be.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2019-03-23 08:46:23 +01:00
|
|
|
tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
|
2013-03-13 07:18:30 +01:00
|
|
|
}
|
2019-01-23 05:33:03 +01:00
|
|
|
} else {
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
|
tcg/arm: Use LDRD to load tlb mask+table
This changes the code generation for the tlb from e.g.
ldr ip, [r6, #-0x10]
ldr r2, [r6, #-0xc]
and ip, ip, r4, lsr #8
ldrd r0, r1, [r2, ip]!
ldr r2, [r2, #0x18]
to
ldrd r0, r1, [r6, #-0x10]
and r0, r0, r4, lsr #8
ldrd r2, r3, [r1, r0]!
ldr r1, [r1, #0x18]
for armv7 hosts. Rearranging the register allocation in
order to avoid overlap between the two ldrd pairs causes
the patch to be larger than it ordinarily would be.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2019-03-23 08:46:23 +01:00
|
|
|
TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0);
|
2019-01-23 05:33:03 +01:00
|
|
|
if (use_armv6_instructions && TARGET_LONG_BITS == 64) {
|
tcg/arm: Use LDRD to load tlb mask+table
This changes the code generation for the tlb from e.g.
ldr ip, [r6, #-0x10]
ldr r2, [r6, #-0xc]
and ip, ip, r4, lsr #8
ldrd r0, r1, [r2, ip]!
ldr r2, [r2, #0x18]
to
ldrd r0, r1, [r6, #-0x10]
and r0, r0, r4, lsr #8
ldrd r2, r3, [r1, r0]!
ldr r1, [r1, #0x18]
for armv7 hosts. Rearranging the register allocation in
order to avoid overlap between the two ldrd pairs causes
the patch to be larger than it ordinarily would be.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2019-03-23 08:46:23 +01:00
|
|
|
tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
|
2019-01-23 05:33:03 +01:00
|
|
|
} else {
|
tcg/arm: Use LDRD to load tlb mask+table
This changes the code generation for the tlb from e.g.
ldr ip, [r6, #-0x10]
ldr r2, [r6, #-0xc]
and ip, ip, r4, lsr #8
ldrd r0, r1, [r2, ip]!
ldr r2, [r2, #0x18]
to
ldrd r0, r1, [r6, #-0x10]
and r0, r0, r4, lsr #8
ldrd r2, r3, [r1, r0]!
ldr r1, [r1, #0x18]
for armv7 hosts. Rearranging the register allocation in
order to avoid overlap between the two ldrd pairs causes
the patch to be larger than it ordinarily would be.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2019-03-23 08:46:23 +01:00
|
|
|
tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
|
|
|
|
}
|
2019-01-23 05:33:03 +01:00
|
|
|
}
|
|
|
|
if (!use_armv6_instructions && TARGET_LONG_BITS == 64) {
|
tcg/arm: Use LDRD to load tlb mask+table
This changes the code generation for the tlb from e.g.
ldr ip, [r6, #-0x10]
ldr r2, [r6, #-0xc]
and ip, ip, r4, lsr #8
ldrd r0, r1, [r2, ip]!
ldr r2, [r2, #0x18]
to
ldrd r0, r1, [r6, #-0x10]
and r0, r0, r4, lsr #8
ldrd r2, r3, [r1, r0]!
ldr r1, [r1, #0x18]
for armv7 hosts. Rearranging the register allocation in
order to avoid overlap between the two ldrd pairs causes
the patch to be larger than it ordinarily would be.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2019-03-23 08:46:23 +01:00
|
|
|
tcg_out_ld32_12(s, COND_AL, TCG_REG_R3, TCG_REG_R1, cmp_off + 4);
|
2012-10-09 21:53:11 +02:00
|
|
|
}
|
2013-03-13 02:18:07 +01:00
|
|
|
|
2017-07-27 22:16:16 +02:00
|
|
|
/* Load the tlb addend. */
|
tcg/arm: Use LDRD to load tlb mask+table
This changes the code generation for the tlb from e.g.
ldr ip, [r6, #-0x10]
ldr r2, [r6, #-0xc]
and ip, ip, r4, lsr #8
ldrd r0, r1, [r2, ip]!
ldr r2, [r2, #0x18]
to
ldrd r0, r1, [r6, #-0x10]
and r0, r0, r4, lsr #8
ldrd r2, r3, [r1, r0]!
ldr r1, [r1, #0x18]
for armv7 hosts. Rearranging the register allocation in
order to avoid overlap between the two ldrd pairs causes
the patch to be larger than it ordinarily would be.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2019-03-23 08:46:23 +01:00
|
|
|
tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1,
|
2019-01-23 05:33:03 +01:00
|
|
|
offsetof(CPUTLBEntry, addend));
|
2017-07-27 22:16:16 +02:00
|
|
|
|
2019-05-07 19:33:44 +02:00
|
|
|
/*
|
|
|
|
* Check alignment, check comparators.
|
|
|
|
* Do this in no more than 3 insns. Use MOVW for v7, if possible,
|
|
|
|
* to reduce the number of sequential conditional instructions.
|
|
|
|
* Almost all guests have at least 4k pages, which means that we need
|
|
|
|
* to clear at least 9 bits even for an 8-byte memory, which means it
|
|
|
|
* isn't worth checking for an immediate operand for BIC.
|
|
|
|
*/
|
|
|
|
if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) {
|
2017-07-27 22:16:16 +02:00
|
|
|
tcg_target_ulong mask = ~(TARGET_PAGE_MASK | ((1 << a_bits) - 1));
|
2013-08-30 18:48:56 +02:00
|
|
|
|
2019-05-07 19:33:44 +02:00
|
|
|
tcg_out_movi32(s, COND_AL, TCG_REG_TMP, mask);
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
|
|
|
|
addrlo, TCG_REG_TMP, 0);
|
tcg/arm: Use LDRD to load tlb mask+table
This changes the code generation for the tlb from e.g.
ldr ip, [r6, #-0x10]
ldr r2, [r6, #-0xc]
and ip, ip, r4, lsr #8
ldrd r0, r1, [r2, ip]!
ldr r2, [r2, #0x18]
to
ldrd r0, r1, [r6, #-0x10]
and r0, r0, r4, lsr #8
ldrd r2, r3, [r1, r0]!
ldr r1, [r1, #0x18]
for armv7 hosts. Rearranging the register allocation in
order to avoid overlap between the two ldrd pairs causes
the patch to be larger than it ordinarily would be.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2019-03-23 08:46:23 +01:00
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0);
|
2017-07-27 22:16:16 +02:00
|
|
|
} else {
|
|
|
|
if (a_bits) {
|
|
|
|
tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo,
|
|
|
|
(1 << a_bits) - 1);
|
|
|
|
}
|
tcg/arm: Use LDRD to load tlb mask+table
This changes the code generation for the tlb from e.g.
ldr ip, [r6, #-0x10]
ldr r2, [r6, #-0xc]
and ip, ip, r4, lsr #8
ldrd r0, r1, [r2, ip]!
ldr r2, [r2, #0x18]
to
ldrd r0, r1, [r6, #-0x10]
and r0, r0, r4, lsr #8
ldrd r2, r3, [r1, r0]!
ldr r1, [r1, #0x18]
for armv7 hosts. Rearranging the register allocation in
order to avoid overlap between the two ldrd pairs causes
the patch to be larger than it ordinarily would be.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2019-03-23 08:46:23 +01:00
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, addrlo,
|
|
|
|
SHIFT_IMM_LSR(TARGET_PAGE_BITS));
|
2017-07-27 22:16:16 +02:00
|
|
|
tcg_out_dat_reg(s, (a_bits ? COND_EQ : COND_AL), ARITH_CMP,
|
tcg/arm: Use LDRD to load tlb mask+table
This changes the code generation for the tlb from e.g.
ldr ip, [r6, #-0x10]
ldr r2, [r6, #-0xc]
and ip, ip, r4, lsr #8
ldrd r0, r1, [r2, ip]!
ldr r2, [r2, #0x18]
to
ldrd r0, r1, [r6, #-0x10]
and r0, r0, r4, lsr #8
ldrd r2, r3, [r1, r0]!
ldr r1, [r1, #0x18]
for armv7 hosts. Rearranging the register allocation in
order to avoid overlap between the two ldrd pairs causes
the patch to be larger than it ordinarily would be.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2019-03-23 08:46:23 +01:00
|
|
|
0, TCG_REG_R2, TCG_REG_TMP,
|
2017-07-27 22:16:16 +02:00
|
|
|
SHIFT_IMM_LSL(TARGET_PAGE_BITS));
|
|
|
|
}
|
2013-03-13 07:18:30 +01:00
|
|
|
|
2013-03-13 02:18:07 +01:00
|
|
|
if (TARGET_LONG_BITS == 64) {
|
tcg/arm: Use LDRD to load tlb mask+table
This changes the code generation for the tlb from e.g.
ldr ip, [r6, #-0x10]
ldr r2, [r6, #-0xc]
and ip, ip, r4, lsr #8
ldrd r0, r1, [r2, ip]!
ldr r2, [r2, #0x18]
to
ldrd r0, r1, [r6, #-0x10]
and r0, r0, r4, lsr #8
ldrd r2, r3, [r1, r0]!
ldr r1, [r1, #0x18]
for armv7 hosts. Rearranging the register allocation in
order to avoid overlap between the two ldrd pairs causes
the patch to be larger than it ordinarily would be.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2019-03-23 08:46:23 +01:00
|
|
|
tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0);
|
2013-03-13 02:18:07 +01:00
|
|
|
}
|
2013-08-30 17:16:00 +02:00
|
|
|
|
tcg/arm: Use LDRD to load tlb mask+table
This changes the code generation for the tlb from e.g.
ldr ip, [r6, #-0x10]
ldr r2, [r6, #-0xc]
and ip, ip, r4, lsr #8
ldrd r0, r1, [r2, ip]!
ldr r2, [r2, #0x18]
to
ldrd r0, r1, [r6, #-0x10]
and r0, r0, r4, lsr #8
ldrd r2, r3, [r1, r0]!
ldr r1, [r1, #0x18]
for armv7 hosts. Rearranging the register allocation in
order to avoid overlap between the two ldrd pairs causes
the patch to be larger than it ordinarily would be.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2019-03-23 08:46:23 +01:00
|
|
|
return TCG_REG_R1;
|
2013-03-13 02:18:07 +01:00
|
|
|
}
|
2013-03-13 23:24:33 +01:00
|
|
|
|
|
|
|
/* Record the context of a call to the out of line helper code for the slow
|
|
|
|
path for a load or store, so that we can later generate the correct
|
|
|
|
helper code. */
|
2015-05-13 18:10:33 +02:00
|
|
|
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
|
2013-09-12 18:07:38 +02:00
|
|
|
TCGReg datalo, TCGReg datahi, TCGReg addrlo,
|
2015-05-13 18:10:33 +02:00
|
|
|
TCGReg addrhi, tcg_insn_unit *raddr,
|
|
|
|
tcg_insn_unit *label_ptr)
|
2013-03-13 23:24:33 +01:00
|
|
|
{
|
2013-10-03 21:51:24 +02:00
|
|
|
TCGLabelQemuLdst *label = new_ldst_label(s);
|
2013-03-13 23:24:33 +01:00
|
|
|
|
|
|
|
label->is_ld = is_ld;
|
2015-05-13 18:10:33 +02:00
|
|
|
label->oi = oi;
|
2013-09-12 18:07:38 +02:00
|
|
|
label->datalo_reg = datalo;
|
|
|
|
label->datahi_reg = datahi;
|
|
|
|
label->addrlo_reg = addrlo;
|
|
|
|
label->addrhi_reg = addrhi;
|
2013-03-13 23:24:33 +01:00
|
|
|
label->raddr = raddr;
|
|
|
|
label->label_ptr[0] = label_ptr;
|
|
|
|
}
|
|
|
|
|
2019-04-21 23:51:00 +02:00
|
|
|
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
2013-03-13 23:24:33 +01:00
|
|
|
{
|
2013-09-12 18:07:38 +02:00
|
|
|
TCGReg argreg, datalo, datahi;
|
2015-05-13 18:10:33 +02:00
|
|
|
TCGMemOpIdx oi = lb->oi;
|
2019-08-23 20:10:58 +02:00
|
|
|
MemOp opc = get_memop(oi);
|
2014-04-24 23:23:40 +02:00
|
|
|
void *func;
|
2013-03-13 23:24:33 +01:00
|
|
|
|
2019-04-21 23:51:00 +02:00
|
|
|
if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) {
|
|
|
|
return false;
|
|
|
|
}
|
2013-03-13 23:24:33 +01:00
|
|
|
|
|
|
|
argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0);
|
|
|
|
if (TARGET_LONG_BITS == 64) {
|
|
|
|
argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
|
|
|
|
} else {
|
|
|
|
argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
|
|
|
|
}
|
2015-05-13 18:10:33 +02:00
|
|
|
argreg = tcg_out_arg_imm32(s, argreg, oi);
|
2013-07-28 02:09:47 +02:00
|
|
|
argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
|
|
|
|
|
|
|
|
/* For armv6 we can use the canonical unsigned helpers and minimize
|
|
|
|
icache usage. For pre-armv6, use the signed helpers since we do
|
|
|
|
not have a single insn sign-extend. */
|
|
|
|
if (use_armv6_instructions) {
|
2015-05-29 18:16:51 +02:00
|
|
|
func = qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)];
|
2013-07-28 02:09:47 +02:00
|
|
|
} else {
|
2015-05-29 18:16:51 +02:00
|
|
|
func = qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)];
|
2013-09-04 01:16:47 +02:00
|
|
|
if (opc & MO_SIGN) {
|
|
|
|
opc = MO_UL;
|
2013-07-28 02:09:47 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
tcg_out_call(s, func);
|
2013-03-13 23:24:33 +01:00
|
|
|
|
2013-09-12 18:07:38 +02:00
|
|
|
datalo = lb->datalo_reg;
|
|
|
|
datahi = lb->datahi_reg;
|
2013-09-12 17:58:33 +02:00
|
|
|
switch (opc & MO_SSIZE) {
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_SB:
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_ext8s(s, COND_AL, datalo, TCG_REG_R0);
|
2013-03-13 23:24:33 +01:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_SW:
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_ext16s(s, COND_AL, datalo, TCG_REG_R0);
|
2013-03-13 23:24:33 +01:00
|
|
|
break;
|
|
|
|
default:
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
|
2013-03-13 23:24:33 +01:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_Q:
|
2013-09-12 18:07:38 +02:00
|
|
|
if (datalo != TCG_REG_R1) {
|
|
|
|
tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
|
|
|
|
tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
|
|
|
|
} else if (datahi != TCG_REG_R0) {
|
|
|
|
tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
|
|
|
|
tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
|
2013-08-30 18:12:32 +02:00
|
|
|
} else {
|
|
|
|
tcg_out_mov_reg(s, COND_AL, TCG_REG_TMP, TCG_REG_R0);
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
|
|
|
|
tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_TMP);
|
2013-08-30 18:12:32 +02:00
|
|
|
}
|
2013-03-13 23:24:33 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-04-24 23:23:40 +02:00
|
|
|
tcg_out_goto(s, COND_AL, lb->raddr);
|
2019-04-21 23:51:00 +02:00
|
|
|
return true;
|
2013-03-13 23:24:33 +01:00
|
|
|
}
|
|
|
|
|
2019-04-21 23:51:00 +02:00
|
|
|
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
2013-03-13 23:24:33 +01:00
|
|
|
{
|
2013-09-12 18:07:38 +02:00
|
|
|
TCGReg argreg, datalo, datahi;
|
2015-05-13 18:10:33 +02:00
|
|
|
TCGMemOpIdx oi = lb->oi;
|
2019-08-23 20:10:58 +02:00
|
|
|
MemOp opc = get_memop(oi);
|
2013-03-13 23:24:33 +01:00
|
|
|
|
2019-04-21 23:51:00 +02:00
|
|
|
if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) {
|
|
|
|
return false;
|
|
|
|
}
|
2013-03-13 23:24:33 +01:00
|
|
|
|
|
|
|
argreg = TCG_REG_R0;
|
|
|
|
argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
|
|
|
|
if (TARGET_LONG_BITS == 64) {
|
|
|
|
argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
|
|
|
|
} else {
|
|
|
|
argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
|
|
|
|
}
|
|
|
|
|
2013-09-12 18:07:38 +02:00
|
|
|
datalo = lb->datalo_reg;
|
|
|
|
datahi = lb->datahi_reg;
|
2013-09-12 17:58:33 +02:00
|
|
|
switch (opc & MO_SIZE) {
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_8:
|
2013-09-12 18:07:38 +02:00
|
|
|
argreg = tcg_out_arg_reg8(s, argreg, datalo);
|
2013-03-13 23:24:33 +01:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_16:
|
2013-09-12 18:07:38 +02:00
|
|
|
argreg = tcg_out_arg_reg16(s, argreg, datalo);
|
2013-03-13 23:24:33 +01:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_32:
|
|
|
|
default:
|
2013-09-12 18:07:38 +02:00
|
|
|
argreg = tcg_out_arg_reg32(s, argreg, datalo);
|
2013-03-13 23:24:33 +01:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_64:
|
2013-09-12 18:07:38 +02:00
|
|
|
argreg = tcg_out_arg_reg64(s, argreg, datalo, datahi);
|
2013-03-13 23:24:33 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-05-13 18:10:33 +02:00
|
|
|
argreg = tcg_out_arg_imm32(s, argreg, oi);
|
2013-07-28 02:09:47 +02:00
|
|
|
argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
|
2013-03-13 23:24:33 +01:00
|
|
|
|
2013-07-28 02:09:47 +02:00
|
|
|
/* Tail-call to the helper, which will return to the fast path. */
|
2015-05-29 18:16:51 +02:00
|
|
|
tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
|
2019-04-21 23:51:00 +02:00
|
|
|
return true;
|
2013-03-13 23:24:33 +01:00
|
|
|
}
|
2013-03-13 02:18:07 +01:00
|
|
|
#endif /* SOFTMMU */
|
|
|
|
|
2019-08-23 20:10:58 +02:00
|
|
|
static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
|
2013-09-13 00:06:23 +02:00
|
|
|
TCGReg datalo, TCGReg datahi,
|
|
|
|
TCGReg addrlo, TCGReg addend)
|
2013-03-13 02:18:07 +01:00
|
|
|
{
|
2019-08-23 20:10:58 +02:00
|
|
|
MemOp bswap = opc & MO_BSWAP;
|
2013-03-13 23:24:33 +01:00
|
|
|
|
2013-09-04 01:16:47 +02:00
|
|
|
switch (opc & MO_SSIZE) {
|
|
|
|
case MO_UB:
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_ld8_r(s, COND_AL, datalo, addrlo, addend);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_SB:
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_ld8s_r(s, COND_AL, datalo, addrlo, addend);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_UW:
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend);
|
2010-04-09 20:52:48 +02:00
|
|
|
if (bswap) {
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_bswap16(s, COND_AL, datalo, datalo);
|
2010-04-09 20:52:48 +02:00
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_SW:
|
2010-04-09 20:52:48 +02:00
|
|
|
if (bswap) {
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend);
|
|
|
|
tcg_out_bswap16s(s, COND_AL, datalo, datalo);
|
2010-04-09 20:52:48 +02:00
|
|
|
} else {
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend);
|
2010-04-09 20:52:48 +02:00
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_UL:
|
2008-05-20 01:59:38 +02:00
|
|
|
default:
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend);
|
2010-04-09 20:52:48 +02:00
|
|
|
if (bswap) {
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_bswap32(s, COND_AL, datalo, datalo);
|
2010-04-09 20:52:48 +02:00
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_Q:
|
2013-08-30 18:12:32 +02:00
|
|
|
{
|
2013-09-12 18:07:38 +02:00
|
|
|
TCGReg dl = (bswap ? datahi : datalo);
|
|
|
|
TCGReg dh = (bswap ? datalo : datahi);
|
2013-08-30 18:12:32 +02:00
|
|
|
|
2014-03-25 22:11:37 +01:00
|
|
|
/* Avoid ldrd for user-only emulation, to handle unaligned. */
|
|
|
|
if (USING_SOFTMMU && use_armv6_instructions
|
|
|
|
&& (dl & 1) == 0 && dh == dl + 1) {
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_ldrd_r(s, COND_AL, dl, addrlo, addend);
|
2013-08-30 18:12:32 +02:00
|
|
|
} else if (dl != addend) {
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_ld32_rwb(s, COND_AL, dl, addend, addrlo);
|
2013-08-30 18:12:32 +02:00
|
|
|
tcg_out_ld32_12(s, COND_AL, dh, addend, 4);
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP,
|
2013-09-12 18:07:38 +02:00
|
|
|
addend, addrlo, SHIFT_IMM_LSL(0));
|
2013-08-30 18:12:32 +02:00
|
|
|
tcg_out_ld32_12(s, COND_AL, dl, TCG_REG_TMP, 0);
|
|
|
|
tcg_out_ld32_12(s, COND_AL, dh, TCG_REG_TMP, 4);
|
|
|
|
}
|
|
|
|
if (bswap) {
|
|
|
|
tcg_out_bswap32(s, COND_AL, dl, dl);
|
2013-09-13 00:06:23 +02:00
|
|
|
tcg_out_bswap32(s, COND_AL, dh, dh);
|
2013-08-30 18:12:32 +02:00
|
|
|
}
|
2010-04-09 20:52:48 +02:00
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
|
|
|
}
|
2013-09-13 00:06:23 +02:00
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2019-08-23 20:10:58 +02:00
|
|
|
static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc,
|
2013-09-13 00:06:23 +02:00
|
|
|
TCGReg datalo, TCGReg datahi,
|
|
|
|
TCGReg addrlo)
|
|
|
|
{
|
2019-08-23 20:10:58 +02:00
|
|
|
MemOp bswap = opc & MO_BSWAP;
|
2009-07-17 13:48:08 +02:00
|
|
|
|
2013-09-04 01:16:47 +02:00
|
|
|
switch (opc & MO_SSIZE) {
|
|
|
|
case MO_UB:
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_ld8_12(s, COND_AL, datalo, addrlo, 0);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_SB:
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_ld8s_8(s, COND_AL, datalo, addrlo, 0);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_UW:
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
|
2010-04-09 20:52:48 +02:00
|
|
|
if (bswap) {
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_bswap16(s, COND_AL, datalo, datalo);
|
2010-04-09 20:52:48 +02:00
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_SW:
|
2010-04-09 20:52:48 +02:00
|
|
|
if (bswap) {
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
|
|
|
|
tcg_out_bswap16s(s, COND_AL, datalo, datalo);
|
2010-04-09 20:52:48 +02:00
|
|
|
} else {
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0);
|
2010-04-09 20:52:48 +02:00
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_UL:
|
2008-05-20 01:59:38 +02:00
|
|
|
default:
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
|
2010-04-09 20:52:48 +02:00
|
|
|
if (bswap) {
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_bswap32(s, COND_AL, datalo, datalo);
|
2010-04-09 20:52:48 +02:00
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_Q:
|
2013-09-13 00:06:23 +02:00
|
|
|
{
|
|
|
|
TCGReg dl = (bswap ? datahi : datalo);
|
|
|
|
TCGReg dh = (bswap ? datalo : datahi);
|
|
|
|
|
2014-03-25 22:11:37 +01:00
|
|
|
/* Avoid ldrd for user-only emulation, to handle unaligned. */
|
|
|
|
if (USING_SOFTMMU && use_armv6_instructions
|
|
|
|
&& (dl & 1) == 0 && dh == dl + 1) {
|
2013-09-13 00:06:23 +02:00
|
|
|
tcg_out_ldrd_8(s, COND_AL, dl, addrlo, 0);
|
|
|
|
} else if (dl == addrlo) {
|
|
|
|
tcg_out_ld32_12(s, COND_AL, dh, addrlo, bswap ? 0 : 4);
|
|
|
|
tcg_out_ld32_12(s, COND_AL, dl, addrlo, bswap ? 4 : 0);
|
|
|
|
} else {
|
|
|
|
tcg_out_ld32_12(s, COND_AL, dl, addrlo, bswap ? 4 : 0);
|
|
|
|
tcg_out_ld32_12(s, COND_AL, dh, addrlo, bswap ? 0 : 4);
|
|
|
|
}
|
|
|
|
if (bswap) {
|
|
|
|
tcg_out_bswap32(s, COND_AL, dl, dl);
|
|
|
|
tcg_out_bswap32(s, COND_AL, dh, dh);
|
|
|
|
}
|
tcg-arm: fix qemu_ld64
Emulating fldl on arm doesn't seem to work too well. It's the way
qemu_ld64 is translated to arm instructions.
tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, 4);
Consider case where data_reg==0, data_reg2==1, and addr_reg==0. First load
overwrited addr_reg. So let's put an if (data_ref==addr_reg).
(Pablo Virolainen)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6808 c046a42c-6fe2-441c-8c8c-71466251a162
2009-03-10 22:43:25 +01:00
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-09-13 00:06:23 +02:00
|
|
|
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2013-09-12 19:17:45 +02:00
|
|
|
TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
|
2015-05-12 20:51:44 +02:00
|
|
|
TCGMemOpIdx oi;
|
2019-08-23 20:10:58 +02:00
|
|
|
MemOp opc;
|
2008-05-20 01:59:38 +02:00
|
|
|
#ifdef CONFIG_SOFTMMU
|
2013-09-04 01:16:47 +02:00
|
|
|
int mem_index;
|
2013-09-12 19:17:45 +02:00
|
|
|
TCGReg addend;
|
2014-04-24 23:23:40 +02:00
|
|
|
tcg_insn_unit *label_ptr;
|
2008-05-20 01:59:38 +02:00
|
|
|
#endif
|
2013-03-13 02:18:07 +01:00
|
|
|
|
2013-09-12 18:07:38 +02:00
|
|
|
datalo = *args++;
|
2013-09-12 19:17:45 +02:00
|
|
|
datahi = (is64 ? *args++ : 0);
|
2013-09-12 18:07:38 +02:00
|
|
|
addrlo = *args++;
|
|
|
|
addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
|
2015-05-12 20:51:44 +02:00
|
|
|
oi = *args++;
|
|
|
|
opc = get_memop(oi);
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2013-09-12 19:17:45 +02:00
|
|
|
#ifdef CONFIG_SOFTMMU
|
2015-05-12 20:51:44 +02:00
|
|
|
mem_index = get_mmuidx(oi);
|
2016-07-14 21:43:06 +02:00
|
|
|
addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 1);
|
2013-09-13 00:06:23 +02:00
|
|
|
|
|
|
|
/* This a conditional BL only to load a pointer within this opcode into LR
|
|
|
|
for the slow path. We will not be using the value for a tail call. */
|
|
|
|
label_ptr = s->code_ptr;
|
2018-11-29 22:13:59 +01:00
|
|
|
tcg_out_bl(s, COND_NE, 0);
|
2013-09-13 00:06:23 +02:00
|
|
|
|
|
|
|
tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend);
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2015-05-13 18:10:33 +02:00
|
|
|
add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
|
|
|
|
s->code_ptr, label_ptr);
|
2013-09-13 00:06:23 +02:00
|
|
|
#else /* !CONFIG_SOFTMMU */
|
2015-08-24 14:53:54 +02:00
|
|
|
if (guest_base) {
|
|
|
|
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base);
|
2013-09-13 00:06:23 +02:00
|
|
|
tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, TCG_REG_TMP);
|
|
|
|
} else {
|
|
|
|
tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-08-23 20:10:58 +02:00
|
|
|
static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, MemOp opc,
|
2013-09-13 00:06:23 +02:00
|
|
|
TCGReg datalo, TCGReg datahi,
|
|
|
|
TCGReg addrlo, TCGReg addend)
|
|
|
|
{
|
2019-08-23 20:10:58 +02:00
|
|
|
MemOp bswap = opc & MO_BSWAP;
|
2013-09-13 00:06:23 +02:00
|
|
|
|
|
|
|
switch (opc & MO_SIZE) {
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_8:
|
2013-09-13 00:06:23 +02:00
|
|
|
tcg_out_st8_r(s, cond, datalo, addrlo, addend);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_16:
|
2010-04-09 20:52:48 +02:00
|
|
|
if (bswap) {
|
2013-09-13 00:06:23 +02:00
|
|
|
tcg_out_bswap16st(s, cond, TCG_REG_R0, datalo);
|
|
|
|
tcg_out_st16_r(s, cond, TCG_REG_R0, addrlo, addend);
|
2010-04-09 20:52:48 +02:00
|
|
|
} else {
|
2013-09-13 00:06:23 +02:00
|
|
|
tcg_out_st16_r(s, cond, datalo, addrlo, addend);
|
2010-04-09 20:52:48 +02:00
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_32:
|
2008-05-20 01:59:38 +02:00
|
|
|
default:
|
2010-04-09 20:52:48 +02:00
|
|
|
if (bswap) {
|
2013-09-13 00:06:23 +02:00
|
|
|
tcg_out_bswap32(s, cond, TCG_REG_R0, datalo);
|
|
|
|
tcg_out_st32_r(s, cond, TCG_REG_R0, addrlo, addend);
|
2010-04-09 20:52:48 +02:00
|
|
|
} else {
|
2013-09-13 00:06:23 +02:00
|
|
|
tcg_out_st32_r(s, cond, datalo, addrlo, addend);
|
2010-04-09 20:52:48 +02:00
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_64:
|
2014-03-25 22:11:37 +01:00
|
|
|
/* Avoid strd for user-only emulation, to handle unaligned. */
|
2010-04-09 20:52:48 +02:00
|
|
|
if (bswap) {
|
2013-09-13 00:06:23 +02:00
|
|
|
tcg_out_bswap32(s, cond, TCG_REG_R0, datahi);
|
|
|
|
tcg_out_st32_rwb(s, cond, TCG_REG_R0, addend, addrlo);
|
|
|
|
tcg_out_bswap32(s, cond, TCG_REG_R0, datalo);
|
|
|
|
tcg_out_st32_12(s, cond, TCG_REG_R0, addend, 4);
|
2014-03-25 22:11:37 +01:00
|
|
|
} else if (USING_SOFTMMU && use_armv6_instructions
|
2013-09-12 18:07:38 +02:00
|
|
|
&& (datalo & 1) == 0 && datahi == datalo + 1) {
|
2013-09-13 00:06:23 +02:00
|
|
|
tcg_out_strd_r(s, cond, datalo, addrlo, addend);
|
2010-04-09 20:52:48 +02:00
|
|
|
} else {
|
2013-09-13 00:06:23 +02:00
|
|
|
tcg_out_st32_rwb(s, cond, datalo, addend, addrlo);
|
|
|
|
tcg_out_st32_12(s, cond, datahi, addend, 4);
|
2010-04-09 20:52:48 +02:00
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
|
|
|
}
|
2013-09-13 00:06:23 +02:00
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2019-08-23 20:10:58 +02:00
|
|
|
static inline void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc,
|
2013-09-13 00:06:23 +02:00
|
|
|
TCGReg datalo, TCGReg datahi,
|
|
|
|
TCGReg addrlo)
|
|
|
|
{
|
2019-08-23 20:10:58 +02:00
|
|
|
MemOp bswap = opc & MO_BSWAP;
|
2013-07-28 02:09:47 +02:00
|
|
|
|
2013-09-13 00:06:23 +02:00
|
|
|
switch (opc & MO_SIZE) {
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_8:
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_st8_12(s, COND_AL, datalo, addrlo, 0);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_16:
|
2010-04-09 20:52:48 +02:00
|
|
|
if (bswap) {
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_bswap16st(s, COND_AL, TCG_REG_R0, datalo);
|
|
|
|
tcg_out_st16_8(s, COND_AL, TCG_REG_R0, addrlo, 0);
|
2010-04-09 20:52:48 +02:00
|
|
|
} else {
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0);
|
2010-04-09 20:52:48 +02:00
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_32:
|
2008-05-20 01:59:38 +02:00
|
|
|
default:
|
2010-04-09 20:52:48 +02:00
|
|
|
if (bswap) {
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datalo);
|
|
|
|
tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 0);
|
2010-04-09 20:52:48 +02:00
|
|
|
} else {
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
|
2010-04-09 20:52:48 +02:00
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-04 01:16:47 +02:00
|
|
|
case MO_64:
|
2014-03-25 22:11:37 +01:00
|
|
|
/* Avoid strd for user-only emulation, to handle unaligned. */
|
2010-04-09 20:52:48 +02:00
|
|
|
if (bswap) {
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datahi);
|
|
|
|
tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 0);
|
|
|
|
tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datalo);
|
|
|
|
tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 4);
|
2014-03-25 22:11:37 +01:00
|
|
|
} else if (USING_SOFTMMU && use_armv6_instructions
|
2013-09-12 18:07:38 +02:00
|
|
|
&& (datalo & 1) == 0 && datahi == datalo + 1) {
|
|
|
|
tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0);
|
2010-04-09 20:52:48 +02:00
|
|
|
} else {
|
2013-09-12 18:07:38 +02:00
|
|
|
tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
|
|
|
|
tcg_out_st32_12(s, COND_AL, datahi, addrlo, 4);
|
2010-04-09 20:52:48 +02:00
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
|
|
|
}
|
2013-09-13 00:06:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
|
|
|
|
{
|
|
|
|
TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
|
2015-05-12 20:51:44 +02:00
|
|
|
TCGMemOpIdx oi;
|
2019-08-23 20:10:58 +02:00
|
|
|
MemOp opc;
|
2013-09-13 00:06:23 +02:00
|
|
|
#ifdef CONFIG_SOFTMMU
|
|
|
|
int mem_index;
|
|
|
|
TCGReg addend;
|
2014-04-24 23:23:40 +02:00
|
|
|
tcg_insn_unit *label_ptr;
|
2013-09-13 00:06:23 +02:00
|
|
|
#endif
|
|
|
|
|
|
|
|
datalo = *args++;
|
|
|
|
datahi = (is64 ? *args++ : 0);
|
|
|
|
addrlo = *args++;
|
|
|
|
addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
|
2015-05-12 20:51:44 +02:00
|
|
|
oi = *args++;
|
|
|
|
opc = get_memop(oi);
|
2013-09-13 00:06:23 +02:00
|
|
|
|
|
|
|
#ifdef CONFIG_SOFTMMU
|
2015-05-12 20:51:44 +02:00
|
|
|
mem_index = get_mmuidx(oi);
|
2016-07-14 21:43:06 +02:00
|
|
|
addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0);
|
2013-09-13 00:06:23 +02:00
|
|
|
|
|
|
|
tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, addrlo, addend);
|
|
|
|
|
|
|
|
/* The conditional call must come last, as we're going to return here. */
|
|
|
|
label_ptr = s->code_ptr;
|
2018-11-29 22:13:59 +01:00
|
|
|
tcg_out_bl(s, COND_NE, 0);
|
2013-09-13 00:06:23 +02:00
|
|
|
|
2015-05-13 18:10:33 +02:00
|
|
|
add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
|
|
|
|
s->code_ptr, label_ptr);
|
2013-09-13 00:06:23 +02:00
|
|
|
#else /* !CONFIG_SOFTMMU */
|
2015-08-24 14:53:54 +02:00
|
|
|
if (guest_base) {
|
|
|
|
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base);
|
2013-09-13 00:06:23 +02:00
|
|
|
tcg_out_qemu_st_index(s, COND_AL, opc, datalo,
|
|
|
|
datahi, addrlo, TCG_REG_TMP);
|
|
|
|
} else {
|
|
|
|
tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo);
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2014-04-24 23:23:40 +02:00
|
|
|
static tcg_insn_unit *tb_ret_addr;
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2010-03-19 19:12:29 +01:00
|
|
|
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
2008-05-20 01:59:38 +02:00
|
|
|
const TCGArg *args, const int *const_args)
|
|
|
|
{
|
2013-03-12 03:51:56 +01:00
|
|
|
TCGArg a0, a1, a2, a3, a4, a5;
|
2008-05-20 01:59:38 +02:00
|
|
|
int c;
|
|
|
|
|
|
|
|
switch (opc) {
|
|
|
|
case INDEX_op_exit_tb:
|
2017-04-28 09:49:45 +02:00
|
|
|
/* Reuse the zeroing that exists for goto_ptr. */
|
|
|
|
a0 = args[0];
|
|
|
|
if (a0 == 0) {
|
|
|
|
tcg_out_goto(s, COND_AL, s->code_gen_epilogue);
|
|
|
|
} else {
|
|
|
|
tcg_out_movi32(s, COND_AL, TCG_REG_R0, args[0]);
|
|
|
|
tcg_out_goto(s, COND_AL, tb_ret_addr);
|
|
|
|
}
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
|
|
|
case INDEX_op_goto_tb:
|
2017-06-06 01:13:56 +02:00
|
|
|
{
|
2008-05-20 01:59:38 +02:00
|
|
|
/* Indirect jump method */
|
2017-06-06 01:42:51 +02:00
|
|
|
intptr_t ptr, dif, dil;
|
|
|
|
TCGReg base = TCG_REG_PC;
|
|
|
|
|
|
|
|
tcg_debug_assert(s->tb_jmp_insn_offset == 0);
|
|
|
|
ptr = (intptr_t)(s->tb_jmp_target_addr + args[0]);
|
|
|
|
dif = ptr - ((intptr_t)s->code_ptr + 8);
|
|
|
|
dil = sextract32(dif, 0, 12);
|
|
|
|
if (dif != dil) {
|
|
|
|
/* The TB is close, but outside the 12 bits addressable by
|
|
|
|
the load. We can extend this to 20 bits with a sub of a
|
|
|
|
shifted immediate from pc. In the vastly unlikely event
|
|
|
|
the code requires more than 1MB, we'll use 2 insns and
|
|
|
|
be no worse off. */
|
|
|
|
base = TCG_REG_R0;
|
|
|
|
tcg_out_movi32(s, COND_AL, base, ptr - dil);
|
|
|
|
}
|
|
|
|
tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, base, dil);
|
2018-06-15 07:57:03 +02:00
|
|
|
set_jmp_reset_offset(s, args[0]);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
break;
|
2017-04-28 09:49:45 +02:00
|
|
|
case INDEX_op_goto_ptr:
|
|
|
|
tcg_out_bx(s, COND_AL, args[0]);
|
|
|
|
break;
|
2008-05-20 01:59:38 +02:00
|
|
|
case INDEX_op_br:
|
2015-02-13 22:39:54 +01:00
|
|
|
tcg_out_goto_label(s, COND_AL, arg_label(args[0]));
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
|
|
|
|
|
|
|
case INDEX_op_ld8u_i32:
|
|
|
|
tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_ld8s_i32:
|
|
|
|
tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_ld16u_i32:
|
|
|
|
tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_ld16s_i32:
|
|
|
|
tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_ld_i32:
|
|
|
|
tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_st8_i32:
|
2010-04-09 20:52:48 +02:00
|
|
|
tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
|
|
|
case INDEX_op_st16_i32:
|
2010-04-09 20:52:48 +02:00
|
|
|
tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
|
|
|
case INDEX_op_st_i32:
|
|
|
|
tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
|
2012-09-26 20:48:55 +02:00
|
|
|
case INDEX_op_movcond_i32:
|
|
|
|
/* Constraints mean that v2 is always in the same register as dest,
|
|
|
|
* so we only need to do "if condition passed, move v1 to dest".
|
|
|
|
*/
|
2013-03-12 02:21:59 +01:00
|
|
|
tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
|
|
|
|
args[1], args[2], const_args[2]);
|
|
|
|
tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV,
|
|
|
|
ARITH_MVN, args[0], 0, args[3], const_args[3]);
|
2012-09-26 20:48:55 +02:00
|
|
|
break;
|
2008-05-20 01:59:38 +02:00
|
|
|
case INDEX_op_add_i32:
|
2013-03-05 07:06:21 +01:00
|
|
|
tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
|
|
|
|
args[0], args[1], args[2], const_args[2]);
|
|
|
|
break;
|
2008-05-20 01:59:38 +02:00
|
|
|
case INDEX_op_sub_i32:
|
2013-03-12 02:04:14 +01:00
|
|
|
if (const_args[1]) {
|
|
|
|
if (const_args[2]) {
|
|
|
|
tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]);
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_rI(s, COND_AL, ARITH_RSB,
|
|
|
|
args[0], args[2], args[1], 1);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
|
|
|
|
args[0], args[1], args[2], const_args[2]);
|
|
|
|
}
|
2013-03-05 07:06:21 +01:00
|
|
|
break;
|
2008-05-20 01:59:38 +02:00
|
|
|
case INDEX_op_and_i32:
|
2013-03-05 06:36:45 +01:00
|
|
|
tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC,
|
|
|
|
args[0], args[1], args[2], const_args[2]);
|
|
|
|
break;
|
2010-03-03 00:13:43 +01:00
|
|
|
case INDEX_op_andc_i32:
|
2013-03-05 06:36:45 +01:00
|
|
|
tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
|
|
|
|
args[0], args[1], args[2], const_args[2]);
|
|
|
|
break;
|
2008-05-20 01:59:38 +02:00
|
|
|
case INDEX_op_or_i32:
|
|
|
|
c = ARITH_ORR;
|
|
|
|
goto gen_arith;
|
|
|
|
case INDEX_op_xor_i32:
|
|
|
|
c = ARITH_EOR;
|
|
|
|
/* Fall through. */
|
|
|
|
gen_arith:
|
2012-09-26 20:48:54 +02:00
|
|
|
tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
|
|
|
case INDEX_op_add2_i32:
|
2013-03-12 03:51:56 +01:00
|
|
|
a0 = args[0], a1 = args[1], a2 = args[2];
|
|
|
|
a3 = args[3], a4 = args[4], a5 = args[5];
|
|
|
|
if (a0 == a3 || (a0 == a5 && !const_args[5])) {
|
2013-03-12 17:49:04 +01:00
|
|
|
a0 = TCG_REG_TMP;
|
2013-03-12 03:51:56 +01:00
|
|
|
}
|
|
|
|
tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
|
|
|
|
a0, a2, a4, const_args[4]);
|
|
|
|
tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC,
|
|
|
|
a1, a3, a5, const_args[5]);
|
|
|
|
tcg_out_mov_reg(s, COND_AL, args[0], a0);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
|
|
|
case INDEX_op_sub2_i32:
|
2013-03-12 03:51:56 +01:00
|
|
|
a0 = args[0], a1 = args[1], a2 = args[2];
|
|
|
|
a3 = args[3], a4 = args[4], a5 = args[5];
|
|
|
|
if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
|
2013-03-12 17:49:04 +01:00
|
|
|
a0 = TCG_REG_TMP;
|
2013-03-12 03:51:56 +01:00
|
|
|
}
|
|
|
|
if (const_args[2]) {
|
|
|
|
if (const_args[4]) {
|
|
|
|
tcg_out_movi32(s, COND_AL, a0, a4);
|
|
|
|
a4 = a0;
|
|
|
|
}
|
|
|
|
tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1);
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR,
|
|
|
|
ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]);
|
|
|
|
}
|
|
|
|
if (const_args[3]) {
|
|
|
|
if (const_args[5]) {
|
|
|
|
tcg_out_movi32(s, COND_AL, a1, a5);
|
|
|
|
a5 = a1;
|
|
|
|
}
|
|
|
|
tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1);
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC,
|
|
|
|
a1, a3, a5, const_args[5]);
|
|
|
|
}
|
|
|
|
tcg_out_mov_reg(s, COND_AL, args[0], a0);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2008-05-20 13:26:40 +02:00
|
|
|
case INDEX_op_neg_i32:
|
|
|
|
tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
|
|
|
|
break;
|
2009-08-22 13:55:06 +02:00
|
|
|
case INDEX_op_not_i32:
|
|
|
|
tcg_out_dat_reg(s, COND_AL,
|
|
|
|
ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
|
|
|
|
break;
|
2008-05-20 01:59:38 +02:00
|
|
|
case INDEX_op_mul_i32:
|
|
|
|
tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_mulu2_i32:
|
|
|
|
tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
|
|
|
|
break;
|
2013-02-20 08:51:58 +01:00
|
|
|
case INDEX_op_muls2_i32:
|
|
|
|
tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
|
|
|
|
break;
|
2008-05-20 01:59:38 +02:00
|
|
|
/* XXX: Perhaps args[2] & 0x1f is wrong */
|
|
|
|
case INDEX_op_shl_i32:
|
|
|
|
c = const_args[2] ?
|
|
|
|
SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
|
|
|
|
goto gen_shift32;
|
|
|
|
case INDEX_op_shr_i32:
|
|
|
|
c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
|
|
|
|
SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
|
|
|
|
goto gen_shift32;
|
|
|
|
case INDEX_op_sar_i32:
|
|
|
|
c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
|
|
|
|
SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
|
2010-04-09 20:52:48 +02:00
|
|
|
goto gen_shift32;
|
|
|
|
case INDEX_op_rotr_i32:
|
|
|
|
c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
|
|
|
|
SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
|
2008-05-20 01:59:38 +02:00
|
|
|
/* Fall through. */
|
|
|
|
gen_shift32:
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
|
|
|
|
break;
|
|
|
|
|
2010-04-09 20:52:48 +02:00
|
|
|
case INDEX_op_rotl_i32:
|
|
|
|
if (const_args[2]) {
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
|
|
|
|
((0x20 - args[2]) & 0x1f) ?
|
|
|
|
SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
|
|
|
|
SHIFT_IMM_LSL(0));
|
|
|
|
} else {
|
2014-02-13 11:26:46 +01:00
|
|
|
tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20);
|
2010-04-09 20:52:48 +02:00
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
|
2013-03-12 17:49:04 +01:00
|
|
|
SHIFT_REG_ROR(TCG_REG_TMP));
|
2010-04-09 20:52:48 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2016-11-16 14:59:40 +01:00
|
|
|
case INDEX_op_ctz_i32:
|
|
|
|
tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0);
|
|
|
|
a1 = TCG_REG_TMP;
|
|
|
|
goto do_clz;
|
|
|
|
|
|
|
|
case INDEX_op_clz_i32:
|
|
|
|
a1 = args[1];
|
|
|
|
do_clz:
|
|
|
|
a0 = args[0];
|
|
|
|
a2 = args[2];
|
|
|
|
c = const_args[2];
|
|
|
|
if (c && a2 == 32) {
|
|
|
|
tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0);
|
|
|
|
tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0);
|
|
|
|
if (c || a0 != a2) {
|
|
|
|
tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2008-05-20 01:59:38 +02:00
|
|
|
case INDEX_op_brcond_i32:
|
2013-03-12 02:21:59 +01:00
|
|
|
tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
|
2012-09-26 20:48:54 +02:00
|
|
|
args[0], args[1], const_args[1]);
|
2015-02-13 22:39:54 +01:00
|
|
|
tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]],
|
|
|
|
arg_label(args[3]));
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2010-03-01 22:33:48 +01:00
|
|
|
case INDEX_op_setcond_i32:
|
2013-03-12 02:21:59 +01:00
|
|
|
tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
|
|
|
|
args[1], args[2], const_args[2]);
|
2010-03-01 22:33:48 +01:00
|
|
|
tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
|
|
|
|
ARITH_MOV, args[0], 0, 1);
|
|
|
|
tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
|
|
|
|
ARITH_MOV, args[0], 0, 0);
|
|
|
|
break;
|
2018-01-15 22:01:37 +01:00
|
|
|
|
|
|
|
case INDEX_op_brcond2_i32:
|
|
|
|
c = tcg_out_cmp2(s, args, const_args);
|
|
|
|
tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5]));
|
|
|
|
break;
|
2010-03-01 22:33:49 +01:00
|
|
|
case INDEX_op_setcond2_i32:
|
2018-01-15 22:01:37 +01:00
|
|
|
c = tcg_out_cmp2(s, args + 1, const_args + 1);
|
|
|
|
tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1);
|
|
|
|
tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
|
2010-03-01 22:33:49 +01:00
|
|
|
ARITH_MOV, args[0], 0, 0);
|
2010-03-02 22:26:04 +01:00
|
|
|
break;
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2013-09-12 19:17:45 +02:00
|
|
|
case INDEX_op_qemu_ld_i32:
|
|
|
|
tcg_out_qemu_ld(s, args, 0);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-12 19:17:45 +02:00
|
|
|
case INDEX_op_qemu_ld_i64:
|
|
|
|
tcg_out_qemu_ld(s, args, 1);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-12 19:17:45 +02:00
|
|
|
case INDEX_op_qemu_st_i32:
|
|
|
|
tcg_out_qemu_st(s, args, 0);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
2013-09-12 19:17:45 +02:00
|
|
|
case INDEX_op_qemu_st_i64:
|
|
|
|
tcg_out_qemu_st(s, args, 1);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
|
|
|
|
2010-04-09 20:52:48 +02:00
|
|
|
case INDEX_op_bswap16_i32:
|
|
|
|
tcg_out_bswap16(s, COND_AL, args[0], args[1]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_bswap32_i32:
|
|
|
|
tcg_out_bswap32(s, COND_AL, args[0], args[1]);
|
|
|
|
break;
|
|
|
|
|
2008-05-20 01:59:38 +02:00
|
|
|
case INDEX_op_ext8s_i32:
|
2010-04-09 20:52:48 +02:00
|
|
|
tcg_out_ext8s(s, COND_AL, args[0], args[1]);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
|
|
|
case INDEX_op_ext16s_i32:
|
2010-04-09 20:52:48 +02:00
|
|
|
tcg_out_ext16s(s, COND_AL, args[0], args[1]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_ext16u_i32:
|
|
|
|
tcg_out_ext16u(s, COND_AL, args[0], args[1]);
|
2008-05-20 01:59:38 +02:00
|
|
|
break;
|
|
|
|
|
2013-03-05 06:12:30 +01:00
|
|
|
case INDEX_op_deposit_i32:
|
|
|
|
tcg_out_deposit(s, COND_AL, args[0], args[2],
|
|
|
|
args[3], args[4], const_args[2]);
|
|
|
|
break;
|
2016-10-15 02:51:45 +02:00
|
|
|
case INDEX_op_extract_i32:
|
|
|
|
tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_sextract_i32:
|
|
|
|
tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]);
|
|
|
|
break;
|
2019-02-25 22:20:01 +01:00
|
|
|
case INDEX_op_extract2_i32:
|
|
|
|
/* ??? These optimization vs zero should be generic. */
|
|
|
|
/* ??? But we can't substitute 2 for 1 in the opcode stream yet. */
|
|
|
|
if (const_args[1]) {
|
|
|
|
if (const_args[2]) {
|
|
|
|
tcg_out_movi(s, TCG_TYPE_REG, args[0], 0);
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
|
|
|
|
args[2], SHIFT_IMM_LSL(32 - args[3]));
|
|
|
|
}
|
|
|
|
} else if (const_args[2]) {
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
|
|
|
|
args[1], SHIFT_IMM_LSR(args[3]));
|
|
|
|
} else {
|
|
|
|
/* We can do extract2 in 2 insns, vs the 3 required otherwise. */
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0,
|
|
|
|
args[2], SHIFT_IMM_LSL(32 - args[3]));
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP,
|
|
|
|
args[1], SHIFT_IMM_LSR(args[3]));
|
|
|
|
}
|
|
|
|
break;
|
2013-03-05 06:12:30 +01:00
|
|
|
|
2013-03-12 06:11:30 +01:00
|
|
|
case INDEX_op_div_i32:
|
|
|
|
tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_divu_i32:
|
|
|
|
tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
|
2016-07-14 22:20:16 +02:00
|
|
|
case INDEX_op_mb:
|
|
|
|
tcg_out_mb(s, args[0]);
|
|
|
|
break;
|
|
|
|
|
2014-04-25 21:19:33 +02:00
|
|
|
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
|
|
|
|
case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
|
|
|
|
case INDEX_op_call: /* Always emitted via tcg_out_call. */
|
2008-05-20 01:59:38 +02:00
|
|
|
default:
|
|
|
|
tcg_abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-14 02:38:44 +02:00
|
|
|
static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
|
|
|
|
{
|
|
|
|
static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
|
|
|
|
static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
|
|
|
|
static const TCGTargetOpDef s_s = { .args_ct_str = { "s", "s" } };
|
|
|
|
static const TCGTargetOpDef r_l = { .args_ct_str = { "r", "l" } };
|
|
|
|
static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } };
|
|
|
|
static const TCGTargetOpDef r_r_l = { .args_ct_str = { "r", "r", "l" } };
|
|
|
|
static const TCGTargetOpDef r_l_l = { .args_ct_str = { "r", "l", "l" } };
|
|
|
|
static const TCGTargetOpDef s_s_s = { .args_ct_str = { "s", "s", "s" } };
|
|
|
|
static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
|
|
|
|
static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } };
|
|
|
|
static const TCGTargetOpDef r_r_rIN
|
|
|
|
= { .args_ct_str = { "r", "r", "rIN" } };
|
|
|
|
static const TCGTargetOpDef r_r_rIK
|
|
|
|
= { .args_ct_str = { "r", "r", "rIK" } };
|
|
|
|
static const TCGTargetOpDef r_r_r_r
|
|
|
|
= { .args_ct_str = { "r", "r", "r", "r" } };
|
|
|
|
static const TCGTargetOpDef r_r_l_l
|
|
|
|
= { .args_ct_str = { "r", "r", "l", "l" } };
|
|
|
|
static const TCGTargetOpDef s_s_s_s
|
|
|
|
= { .args_ct_str = { "s", "s", "s", "s" } };
|
|
|
|
static const TCGTargetOpDef br
|
|
|
|
= { .args_ct_str = { "r", "rIN" } };
|
2019-02-25 22:20:01 +01:00
|
|
|
static const TCGTargetOpDef ext2
|
|
|
|
= { .args_ct_str = { "r", "rZ", "rZ" } };
|
2017-09-14 02:38:44 +02:00
|
|
|
static const TCGTargetOpDef dep
|
|
|
|
= { .args_ct_str = { "r", "0", "rZ" } };
|
|
|
|
static const TCGTargetOpDef movc
|
|
|
|
= { .args_ct_str = { "r", "r", "rIN", "rIK", "0" } };
|
|
|
|
static const TCGTargetOpDef add2
|
|
|
|
= { .args_ct_str = { "r", "r", "r", "r", "rIN", "rIK" } };
|
|
|
|
static const TCGTargetOpDef sub2
|
|
|
|
= { .args_ct_str = { "r", "r", "rI", "rI", "rIN", "rIK" } };
|
|
|
|
static const TCGTargetOpDef br2
|
2018-01-15 22:01:37 +01:00
|
|
|
= { .args_ct_str = { "r", "r", "rI", "rI" } };
|
2017-09-14 02:38:44 +02:00
|
|
|
static const TCGTargetOpDef setc2
|
2018-01-15 22:01:37 +01:00
|
|
|
= { .args_ct_str = { "r", "r", "r", "rI", "rI" } };
|
2017-09-14 02:38:44 +02:00
|
|
|
|
|
|
|
switch (op) {
|
|
|
|
case INDEX_op_goto_ptr:
|
|
|
|
return &r;
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2017-09-14 02:38:44 +02:00
|
|
|
case INDEX_op_ld8u_i32:
|
|
|
|
case INDEX_op_ld8s_i32:
|
|
|
|
case INDEX_op_ld16u_i32:
|
|
|
|
case INDEX_op_ld16s_i32:
|
|
|
|
case INDEX_op_ld_i32:
|
|
|
|
case INDEX_op_st8_i32:
|
|
|
|
case INDEX_op_st16_i32:
|
|
|
|
case INDEX_op_st_i32:
|
|
|
|
case INDEX_op_neg_i32:
|
|
|
|
case INDEX_op_not_i32:
|
|
|
|
case INDEX_op_bswap16_i32:
|
|
|
|
case INDEX_op_bswap32_i32:
|
|
|
|
case INDEX_op_ext8s_i32:
|
|
|
|
case INDEX_op_ext16s_i32:
|
|
|
|
case INDEX_op_ext16u_i32:
|
|
|
|
case INDEX_op_extract_i32:
|
|
|
|
case INDEX_op_sextract_i32:
|
|
|
|
return &r_r;
|
2013-03-05 06:12:30 +01:00
|
|
|
|
2017-09-14 02:38:44 +02:00
|
|
|
case INDEX_op_add_i32:
|
|
|
|
case INDEX_op_sub_i32:
|
|
|
|
case INDEX_op_setcond_i32:
|
|
|
|
return &r_r_rIN;
|
|
|
|
case INDEX_op_and_i32:
|
|
|
|
case INDEX_op_andc_i32:
|
|
|
|
case INDEX_op_clz_i32:
|
|
|
|
case INDEX_op_ctz_i32:
|
|
|
|
return &r_r_rIK;
|
|
|
|
case INDEX_op_mul_i32:
|
|
|
|
case INDEX_op_div_i32:
|
|
|
|
case INDEX_op_divu_i32:
|
|
|
|
return &r_r_r;
|
|
|
|
case INDEX_op_mulu2_i32:
|
|
|
|
case INDEX_op_muls2_i32:
|
|
|
|
return &r_r_r_r;
|
|
|
|
case INDEX_op_or_i32:
|
|
|
|
case INDEX_op_xor_i32:
|
|
|
|
return &r_r_rI;
|
|
|
|
case INDEX_op_shl_i32:
|
|
|
|
case INDEX_op_shr_i32:
|
|
|
|
case INDEX_op_sar_i32:
|
|
|
|
case INDEX_op_rotl_i32:
|
|
|
|
case INDEX_op_rotr_i32:
|
|
|
|
return &r_r_ri;
|
2013-03-12 06:11:30 +01:00
|
|
|
|
2017-09-14 02:38:44 +02:00
|
|
|
case INDEX_op_brcond_i32:
|
|
|
|
return &br;
|
|
|
|
case INDEX_op_deposit_i32:
|
|
|
|
return &dep;
|
2019-02-25 22:20:01 +01:00
|
|
|
case INDEX_op_extract2_i32:
|
|
|
|
return &ext2;
|
2017-09-14 02:38:44 +02:00
|
|
|
case INDEX_op_movcond_i32:
|
|
|
|
return &movc;
|
|
|
|
case INDEX_op_add2_i32:
|
|
|
|
return &add2;
|
|
|
|
case INDEX_op_sub2_i32:
|
|
|
|
return &sub2;
|
|
|
|
case INDEX_op_brcond2_i32:
|
|
|
|
return &br2;
|
|
|
|
case INDEX_op_setcond2_i32:
|
|
|
|
return &setc2;
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2017-09-14 02:38:44 +02:00
|
|
|
case INDEX_op_qemu_ld_i32:
|
|
|
|
return TARGET_LONG_BITS == 32 ? &r_l : &r_l_l;
|
|
|
|
case INDEX_op_qemu_ld_i64:
|
|
|
|
return TARGET_LONG_BITS == 32 ? &r_r_l : &r_r_l_l;
|
|
|
|
case INDEX_op_qemu_st_i32:
|
|
|
|
return TARGET_LONG_BITS == 32 ? &s_s : &s_s_s;
|
|
|
|
case INDEX_op_qemu_st_i64:
|
|
|
|
return TARGET_LONG_BITS == 32 ? &s_s_s : &s_s_s_s;
|
2016-11-18 09:31:40 +01:00
|
|
|
|
2017-09-14 02:38:44 +02:00
|
|
|
default:
|
|
|
|
return NULL;
|
2016-11-18 09:31:40 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-06-03 02:26:56 +02:00
|
|
|
static void tcg_target_init(TCGContext *s)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2013-06-06 19:46:35 +02:00
|
|
|
/* Only probe for the platform and capabilities if we havn't already
|
|
|
|
determined maximum values at compile time. */
|
2013-06-07 16:26:20 +02:00
|
|
|
#ifndef use_idiv_instructions
|
2013-05-02 13:18:38 +02:00
|
|
|
{
|
2013-06-07 16:26:20 +02:00
|
|
|
unsigned long hwcap = qemu_getauxval(AT_HWCAP);
|
2013-05-02 13:18:38 +02:00
|
|
|
use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0;
|
|
|
|
}
|
2013-06-07 16:26:20 +02:00
|
|
|
#endif
|
2013-06-06 19:46:35 +02:00
|
|
|
if (__ARM_ARCH < 7) {
|
2013-06-07 16:26:20 +02:00
|
|
|
const char *pl = (const char *)qemu_getauxval(AT_PLATFORM);
|
2013-06-06 19:46:35 +02:00
|
|
|
if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') {
|
|
|
|
arm_arch = pl[1] - '0';
|
|
|
|
}
|
|
|
|
}
|
2013-05-02 13:18:38 +02:00
|
|
|
|
2017-09-11 21:44:30 +02:00
|
|
|
tcg_target_available_regs[TCG_TYPE_I32] = 0xffff;
|
|
|
|
|
|
|
|
tcg_target_call_clobber_regs = 0;
|
|
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
|
|
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
|
|
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
|
|
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
|
|
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
|
|
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2017-09-11 20:25:55 +02:00
|
|
|
s->reserved_regs = 0;
|
2008-05-20 01:59:38 +02:00
|
|
|
tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
|
2013-03-12 17:49:04 +01:00
|
|
|
tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
|
2010-04-09 20:52:48 +02:00
|
|
|
tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
2011-11-09 09:03:34 +01:00
|
|
|
static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
|
2013-08-21 02:07:26 +02:00
|
|
|
TCGReg arg1, intptr_t arg2)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
|
|
|
tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
|
|
|
|
}
|
|
|
|
|
2011-11-09 09:03:34 +01:00
|
|
|
static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
|
2013-08-21 02:07:26 +02:00
|
|
|
TCGReg arg1, intptr_t arg2)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
|
|
|
tcg_out_st32(s, COND_AL, arg, arg1, arg2);
|
|
|
|
}
|
|
|
|
|
2016-06-20 07:59:13 +02:00
|
|
|
static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
|
|
|
|
TCGReg base, intptr_t ofs)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-03-16 18:48:18 +01:00
|
|
|
static inline bool tcg_out_mov(TCGContext *s, TCGType type,
|
2011-11-09 09:03:34 +01:00
|
|
|
TCGReg ret, TCGReg arg)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2019-05-01 22:26:46 +02:00
|
|
|
tcg_out_mov_reg(s, COND_AL, ret, arg);
|
2019-03-16 18:48:18 +01:00
|
|
|
return true;
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_movi(TCGContext *s, TCGType type,
|
2011-11-09 09:03:34 +01:00
|
|
|
TCGReg ret, tcg_target_long arg)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
|
|
|
tcg_out_movi32(s, COND_AL, ret, arg);
|
|
|
|
}
|
|
|
|
|
2017-07-28 05:47:56 +02:00
|
|
|
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < count; ++i) {
|
|
|
|
p[i] = INSN_NOP;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-05 16:55:33 +02:00
|
|
|
/* Compute frame size via macros, to share between tcg_target_qemu_prologue
|
|
|
|
and tcg_register_jit. */
|
|
|
|
|
|
|
|
#define PUSH_SIZE ((11 - 4 + 1 + 1) * sizeof(tcg_target_long))
|
|
|
|
|
|
|
|
#define FRAME_SIZE \
|
|
|
|
((PUSH_SIZE \
|
|
|
|
+ TCG_STATIC_CALL_ARGS_SIZE \
|
|
|
|
+ CPU_TEMP_BUF_NLONGS * sizeof(long) \
|
|
|
|
+ TCG_TARGET_STACK_ALIGN - 1) \
|
|
|
|
& -TCG_TARGET_STACK_ALIGN)
|
|
|
|
|
2010-06-03 02:26:56 +02:00
|
|
|
static void tcg_target_qemu_prologue(TCGContext *s)
|
2008-05-20 01:59:38 +02:00
|
|
|
{
|
2013-06-05 16:55:33 +02:00
|
|
|
int stack_addend;
|
2013-03-13 01:11:40 +01:00
|
|
|
|
|
|
|
/* Calling convention requires us to save r4-r11 and lr. */
|
|
|
|
/* stmdb sp!, { r4 - r11, lr } */
|
|
|
|
tcg_out32(s, (COND_AL << 28) | 0x092d4ff0);
|
2011-05-15 18:03:25 +02:00
|
|
|
|
2013-06-05 16:55:33 +02:00
|
|
|
/* Reserve callee argument and tcg temp space. */
|
|
|
|
stack_addend = FRAME_SIZE - PUSH_SIZE;
|
2013-03-13 01:11:40 +01:00
|
|
|
|
|
|
|
tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
|
2013-06-05 16:55:33 +02:00
|
|
|
TCG_REG_CALL_STACK, stack_addend, 1);
|
2013-03-13 01:11:40 +01:00
|
|
|
tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
|
|
|
|
CPU_TEMP_BUF_NLONGS * sizeof(long));
|
2010-03-05 08:35:07 +01:00
|
|
|
|
2011-05-15 18:03:25 +02:00
|
|
|
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2011-05-15 18:03:25 +02:00
|
|
|
tcg_out_bx(s, COND_AL, tcg_target_call_iarg_regs[1]);
|
2008-05-20 01:59:38 +02:00
|
|
|
|
2017-04-28 09:49:45 +02:00
|
|
|
/*
|
|
|
|
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
|
|
|
|
* and fall through to the rest of the epilogue.
|
|
|
|
*/
|
|
|
|
s->code_gen_epilogue = s->code_ptr;
|
|
|
|
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0);
|
|
|
|
|
|
|
|
/* TB epilogue */
|
|
|
|
tb_ret_addr = s->code_ptr;
|
2013-03-13 01:11:40 +01:00
|
|
|
tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
|
2013-06-05 16:55:33 +02:00
|
|
|
TCG_REG_CALL_STACK, stack_addend, 1);
|
2013-03-13 01:11:40 +01:00
|
|
|
|
|
|
|
/* ldmia sp!, { r4 - r11, pc } */
|
|
|
|
tcg_out32(s, (COND_AL << 28) | 0x08bd8ff0);
|
2008-05-20 01:59:38 +02:00
|
|
|
}
|
2013-06-05 16:55:33 +02:00
|
|
|
|
|
|
|
typedef struct {
|
2014-05-15 21:49:30 +02:00
|
|
|
DebugFrameHeader h;
|
2013-06-05 16:55:33 +02:00
|
|
|
uint8_t fde_def_cfa[4];
|
|
|
|
uint8_t fde_reg_ofs[18];
|
|
|
|
} DebugFrame;
|
|
|
|
|
|
|
|
#define ELF_HOST_MACHINE EM_ARM
|
|
|
|
|
|
|
|
/* We're expecting a 2 byte uleb128 encoded value. */
|
|
|
|
QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
|
|
|
|
|
2014-05-15 21:49:30 +02:00
|
|
|
static const DebugFrame debug_frame = {
|
|
|
|
.h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
|
|
|
|
.h.cie.id = -1,
|
|
|
|
.h.cie.version = 1,
|
|
|
|
.h.cie.code_align = 1,
|
|
|
|
.h.cie.data_align = 0x7c, /* sleb128 -4 */
|
|
|
|
.h.cie.return_column = 14,
|
2013-06-05 16:55:33 +02:00
|
|
|
|
|
|
|
/* Total FDE size does not include the "len" member. */
|
2014-05-15 21:49:30 +02:00
|
|
|
.h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
|
2013-06-05 16:55:33 +02:00
|
|
|
|
|
|
|
.fde_def_cfa = {
|
|
|
|
12, 13, /* DW_CFA_def_cfa sp, ... */
|
|
|
|
(FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
|
|
|
|
(FRAME_SIZE >> 7)
|
|
|
|
},
|
|
|
|
.fde_reg_ofs = {
|
|
|
|
/* The following must match the stmdb in the prologue. */
|
|
|
|
0x8e, 1, /* DW_CFA_offset, lr, -4 */
|
|
|
|
0x8b, 2, /* DW_CFA_offset, r11, -8 */
|
|
|
|
0x8a, 3, /* DW_CFA_offset, r10, -12 */
|
|
|
|
0x89, 4, /* DW_CFA_offset, r9, -16 */
|
|
|
|
0x88, 5, /* DW_CFA_offset, r8, -20 */
|
|
|
|
0x87, 6, /* DW_CFA_offset, r7, -24 */
|
|
|
|
0x86, 7, /* DW_CFA_offset, r6, -28 */
|
|
|
|
0x85, 8, /* DW_CFA_offset, r5, -32 */
|
|
|
|
0x84, 9, /* DW_CFA_offset, r4, -36 */
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
void tcg_register_jit(void *buf, size_t buf_size)
|
|
|
|
{
|
|
|
|
tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
|
|
|
|
}
|