2008-02-01 11:05:41 +01:00
|
|
|
/*
|
|
|
|
* Tiny Code Generator for QEMU
|
|
|
|
*
|
|
|
|
* Copyright (c) 2008 Fabrice Bellard
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
2016-01-26 19:17:08 +01:00
|
|
|
#include "qemu/osdep.h"
|
2009-04-16 11:58:30 +02:00
|
|
|
|
2012-03-19 20:25:11 +01:00
|
|
|
/* Define to jump the ELF file used to communicate with GDB. */
|
|
|
|
#undef DEBUG_JIT
|
|
|
|
|
2018-10-10 16:48:53 +02:00
|
|
|
#include "qemu/error-report.h"
|
2016-03-20 18:16:19 +01:00
|
|
|
#include "qemu/cutils.h"
|
2012-12-17 18:20:00 +01:00
|
|
|
#include "qemu/host-utils.h"
|
2019-04-17 21:17:51 +02:00
|
|
|
#include "qemu/qemu-print.h"
|
2020-12-14 15:02:33 +01:00
|
|
|
#include "qemu/cacheflush.h"
|
2022-02-08 21:08:55 +01:00
|
|
|
#include "qemu/cacheinfo.h"
|
2023-03-03 09:49:48 +01:00
|
|
|
#include "qemu/timer.h"
|
2023-04-02 06:22:06 +02:00
|
|
|
#include "exec/translation-block.h"
|
2023-03-28 01:07:15 +02:00
|
|
|
#include "exec/tlb-common.h"
|
2023-03-29 03:17:24 +02:00
|
|
|
#include "tcg/tcg-op-common.h"
|
2012-03-19 20:25:11 +01:00
|
|
|
|
2013-08-21 02:20:30 +02:00
|
|
|
#if UINTPTR_MAX == UINT32_MAX
|
2012-03-19 20:25:11 +01:00
|
|
|
# define ELF_CLASS ELFCLASS32
|
2013-08-21 02:20:30 +02:00
|
|
|
#else
|
|
|
|
# define ELF_CLASS ELFCLASS64
|
2012-03-19 20:25:11 +01:00
|
|
|
#endif
|
2022-03-23 16:57:17 +01:00
|
|
|
#if HOST_BIG_ENDIAN
|
2012-03-19 20:25:11 +01:00
|
|
|
# define ELF_DATA ELFDATA2MSB
|
|
|
|
#else
|
|
|
|
# define ELF_DATA ELFDATA2LSB
|
|
|
|
#endif
|
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
#include "elf.h"
|
2016-01-07 14:55:28 +01:00
|
|
|
#include "exec/log.h"
|
2021-07-27 23:10:22 +02:00
|
|
|
#include "tcg/tcg-ldst.h"
|
2023-02-25 09:45:43 +01:00
|
|
|
#include "tcg/tcg-temp-internal.h"
|
2021-03-09 23:24:14 +01:00
|
|
|
#include "tcg-internal.h"
|
2023-01-12 16:20:13 +01:00
|
|
|
#include "accel/tcg/perf.h"
|
2023-04-30 09:24:36 +02:00
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
#include "exec/user/guest-base.h"
|
|
|
|
#endif
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2020-02-04 12:41:01 +01:00
|
|
|
/* Forward declarations for functions declared in tcg-target.c.inc and
|
2016-02-23 15:49:41 +01:00
|
|
|
used here. */
|
2010-06-03 02:26:56 +02:00
|
|
|
static void tcg_target_init(TCGContext *s);
|
|
|
|
static void tcg_target_qemu_prologue(TCGContext *s);
|
2018-11-30 20:52:48 +01:00
|
|
|
static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
|
2013-08-21 00:30:10 +02:00
|
|
|
intptr_t value, intptr_t addend);
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2013-06-05 16:39:57 +02:00
|
|
|
/* The CIE and FDE header definitions will be common to all hosts. */
|
|
|
|
typedef struct {
|
|
|
|
uint32_t len __attribute__((aligned((sizeof(void *)))));
|
|
|
|
uint32_t id;
|
|
|
|
uint8_t version;
|
|
|
|
char augmentation[1];
|
|
|
|
uint8_t code_align;
|
|
|
|
uint8_t data_align;
|
|
|
|
uint8_t return_column;
|
|
|
|
} DebugFrameCIE;
|
|
|
|
|
|
|
|
typedef struct QEMU_PACKED {
|
|
|
|
uint32_t len __attribute__((aligned((sizeof(void *)))));
|
|
|
|
uint32_t cie_offset;
|
2013-08-21 02:20:30 +02:00
|
|
|
uintptr_t func_start;
|
|
|
|
uintptr_t func_len;
|
2013-06-05 16:39:57 +02:00
|
|
|
} DebugFrameFDEHeader;
|
|
|
|
|
2014-05-15 21:48:01 +02:00
|
|
|
typedef struct QEMU_PACKED {
|
|
|
|
DebugFrameCIE cie;
|
|
|
|
DebugFrameFDEHeader fde;
|
|
|
|
} DebugFrameHeader;
|
|
|
|
|
2023-04-08 01:18:03 +02:00
|
|
|
typedef struct TCGLabelQemuLdst {
|
|
|
|
bool is_ld; /* qemu_ld: true, qemu_st: false */
|
|
|
|
MemOpIdx oi;
|
|
|
|
TCGType type; /* result type of a load */
|
|
|
|
TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */
|
|
|
|
TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */
|
|
|
|
TCGReg datalo_reg; /* reg index for low word to be loaded or stored */
|
|
|
|
TCGReg datahi_reg; /* reg index for high word to be loaded or stored */
|
|
|
|
const tcg_insn_unit *raddr; /* addr of the next IR of qemu_ld/st IR */
|
|
|
|
tcg_insn_unit *label_ptr[2]; /* label pointers to be updated */
|
|
|
|
QSIMPLEQ_ENTRY(TCGLabelQemuLdst) next;
|
|
|
|
} TCGLabelQemuLdst;
|
|
|
|
|
2020-10-29 17:17:30 +01:00
|
|
|
static void tcg_register_jit_int(const void *buf, size_t size,
|
2014-05-15 21:48:01 +02:00
|
|
|
const void *debug_frame,
|
|
|
|
size_t debug_frame_size)
|
2012-03-19 20:25:11 +01:00
|
|
|
__attribute__((unused));
|
|
|
|
|
2020-02-04 12:41:01 +01:00
|
|
|
/* Forward declarations for functions declared and used in tcg-target.c.inc. */
|
2023-08-15 18:34:59 +02:00
|
|
|
static void tcg_out_tb_start(TCGContext *s);
|
2011-11-09 09:03:34 +01:00
|
|
|
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
|
2013-08-21 02:07:26 +02:00
|
|
|
intptr_t arg2);
|
2019-03-16 18:48:18 +01:00
|
|
|
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
|
2011-09-17 22:00:29 +02:00
|
|
|
static void tcg_out_movi(TCGContext *s, TCGType type,
|
2011-11-09 09:03:34 +01:00
|
|
|
TCGReg ret, tcg_target_long arg);
|
2023-04-05 20:17:01 +02:00
|
|
|
static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
|
2023-04-05 23:49:59 +02:00
|
|
|
static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
|
2023-04-05 22:26:51 +02:00
|
|
|
static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg);
|
2023-04-06 01:25:22 +02:00
|
|
|
static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg);
|
2023-04-06 02:50:09 +02:00
|
|
|
static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg);
|
2023-04-06 03:07:05 +02:00
|
|
|
static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg);
|
2023-04-06 03:30:56 +02:00
|
|
|
static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg);
|
2023-04-06 03:56:28 +02:00
|
|
|
static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg);
|
2023-04-06 04:58:35 +02:00
|
|
|
static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg);
|
2022-10-30 23:22:59 +01:00
|
|
|
static void tcg_out_addi_ptr(TCGContext *s, TCGReg, TCGReg, tcg_target_long);
|
2023-04-06 07:27:03 +02:00
|
|
|
static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2);
|
2022-11-26 21:42:06 +01:00
|
|
|
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
|
2022-11-27 02:14:05 +01:00
|
|
|
static void tcg_out_goto_tb(TCGContext *s, int which);
|
2021-03-12 13:14:18 +01:00
|
|
|
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
|
|
|
const TCGArg args[TCG_MAX_OP_ARGS],
|
|
|
|
const int const_args[TCG_MAX_OP_ARGS]);
|
2017-09-14 22:53:46 +02:00
|
|
|
#if TCG_TARGET_MAYBE_vec
|
2019-03-18 16:32:44 +01:00
|
|
|
static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
|
|
|
|
TCGReg dst, TCGReg src);
|
2019-03-18 20:00:39 +01:00
|
|
|
static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
|
|
|
|
TCGReg dst, TCGReg base, intptr_t offset);
|
2020-03-31 10:02:08 +02:00
|
|
|
static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
|
|
|
|
TCGReg dst, int64_t arg);
|
2021-03-12 13:14:18 +01:00
|
|
|
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
|
|
|
|
unsigned vecl, unsigned vece,
|
|
|
|
const TCGArg args[TCG_MAX_OP_ARGS],
|
|
|
|
const int const_args[TCG_MAX_OP_ARGS]);
|
2017-09-14 22:53:46 +02:00
|
|
|
#else
|
2019-03-18 16:32:44 +01:00
|
|
|
static inline bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
|
|
|
|
TCGReg dst, TCGReg src)
|
|
|
|
{
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
2019-03-18 20:00:39 +01:00
|
|
|
static inline bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
|
|
|
|
TCGReg dst, TCGReg base, intptr_t offset)
|
|
|
|
{
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
2020-03-31 10:02:08 +02:00
|
|
|
static inline void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
|
|
|
|
TCGReg dst, int64_t arg)
|
2019-03-18 16:32:44 +01:00
|
|
|
{
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
2021-03-12 13:14:18 +01:00
|
|
|
static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
|
|
|
|
unsigned vecl, unsigned vece,
|
|
|
|
const TCGArg args[TCG_MAX_OP_ARGS],
|
|
|
|
const int const_args[TCG_MAX_OP_ARGS])
|
2017-09-14 22:53:46 +02:00
|
|
|
{
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
#endif
|
2011-11-09 09:03:34 +01:00
|
|
|
static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
|
2013-08-21 02:07:26 +02:00
|
|
|
intptr_t arg2);
|
2016-06-20 07:59:13 +02:00
|
|
|
static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
|
|
|
|
TCGReg base, intptr_t ofs);
|
2021-01-30 23:24:25 +01:00
|
|
|
static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
|
2022-10-18 09:51:41 +02:00
|
|
|
const TCGHelperInfo *info);
|
2022-10-19 16:55:36 +02:00
|
|
|
static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot);
|
2023-09-08 04:21:10 +02:00
|
|
|
static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece);
|
2017-07-30 21:30:41 +02:00
|
|
|
#ifdef TCG_TARGET_NEED_LDST_LABELS
|
2019-04-21 23:51:00 +02:00
|
|
|
static int tcg_out_ldst_finalize(TCGContext *s);
|
2017-07-30 21:30:41 +02:00
|
|
|
#endif
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2023-04-10 07:59:09 +02:00
|
|
|
typedef struct TCGLdstHelperParam {
|
|
|
|
TCGReg (*ra_gen)(TCGContext *s, const TCGLabelQemuLdst *l, int arg_reg);
|
|
|
|
unsigned ntmp;
|
|
|
|
int tmp[3];
|
|
|
|
} TCGLdstHelperParam;
|
|
|
|
|
|
|
|
static void tcg_out_ld_helper_args(TCGContext *s, const TCGLabelQemuLdst *l,
|
|
|
|
const TCGLdstHelperParam *p)
|
|
|
|
__attribute__((unused));
|
|
|
|
static void tcg_out_ld_helper_ret(TCGContext *s, const TCGLabelQemuLdst *l,
|
|
|
|
bool load_sign, const TCGLdstHelperParam *p)
|
|
|
|
__attribute__((unused));
|
|
|
|
static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *l,
|
|
|
|
const TCGLdstHelperParam *p)
|
|
|
|
__attribute__((unused));
|
|
|
|
|
2022-11-07 09:08:33 +01:00
|
|
|
static void * const qemu_ld_helpers[MO_SSIZE + 1] __attribute__((unused)) = {
|
2022-11-01 02:51:04 +01:00
|
|
|
[MO_UB] = helper_ldub_mmu,
|
|
|
|
[MO_SB] = helper_ldsb_mmu,
|
|
|
|
[MO_UW] = helper_lduw_mmu,
|
|
|
|
[MO_SW] = helper_ldsw_mmu,
|
|
|
|
[MO_UL] = helper_ldul_mmu,
|
|
|
|
[MO_UQ] = helper_ldq_mmu,
|
|
|
|
#if TCG_TARGET_REG_BITS == 64
|
|
|
|
[MO_SL] = helper_ldsl_mmu,
|
2023-04-17 10:20:51 +02:00
|
|
|
[MO_128] = helper_ld16_mmu,
|
2022-11-01 02:51:04 +01:00
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
2022-11-07 09:08:33 +01:00
|
|
|
static void * const qemu_st_helpers[MO_SIZE + 1] __attribute__((unused)) = {
|
2022-11-01 02:51:04 +01:00
|
|
|
[MO_8] = helper_stb_mmu,
|
|
|
|
[MO_16] = helper_stw_mmu,
|
|
|
|
[MO_32] = helper_stl_mmu,
|
|
|
|
[MO_64] = helper_stq_mmu,
|
2023-04-17 10:20:51 +02:00
|
|
|
#if TCG_TARGET_REG_BITS == 64
|
|
|
|
[MO_128] = helper_st16_mmu,
|
|
|
|
#endif
|
2022-11-01 02:51:04 +01:00
|
|
|
};
|
|
|
|
|
2022-11-07 23:23:54 +01:00
|
|
|
typedef struct {
|
|
|
|
MemOp atom; /* lg2 bits of atomicity required */
|
|
|
|
MemOp align; /* lg2 bits of alignment to use */
|
|
|
|
} TCGAtomAlign;
|
|
|
|
|
|
|
|
static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc,
|
|
|
|
MemOp host_atom, bool allow_two_ops)
|
|
|
|
__attribute__((unused));
|
|
|
|
|
2021-03-13 20:36:51 +01:00
|
|
|
TCGContext tcg_init_ctx;
|
|
|
|
__thread TCGContext *tcg_ctx;
|
|
|
|
|
2021-03-09 23:24:14 +01:00
|
|
|
TCGContext **tcg_ctxs;
|
2021-03-10 06:06:32 +01:00
|
|
|
unsigned int tcg_cur_ctxs;
|
|
|
|
unsigned int tcg_max_ctxs;
|
2023-09-14 01:37:36 +02:00
|
|
|
TCGv_env tcg_env;
|
2020-11-06 00:41:38 +01:00
|
|
|
const void *tcg_code_gen_epilogue;
|
2020-10-28 20:05:44 +01:00
|
|
|
uintptr_t tcg_splitwx_diff;
|
2017-07-13 00:26:40 +02:00
|
|
|
|
2020-10-28 22:11:54 +01:00
|
|
|
#ifndef CONFIG_TCG_INTERPRETER
|
|
|
|
tcg_prologue_fn *tcg_qemu_tb_exec;
|
|
|
|
#endif
|
|
|
|
|
2017-09-14 22:53:46 +02:00
|
|
|
static TCGRegSet tcg_target_available_regs[TCG_TYPE_COUNT];
|
2008-10-26 14:43:07 +01:00
|
|
|
static TCGRegSet tcg_target_call_clobber_regs;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2014-03-28 20:56:22 +01:00
|
|
|
#if TCG_TARGET_INSN_UNIT_SIZE == 1
|
2014-06-07 19:08:44 +02:00
|
|
|
static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
|
|
|
*s->code_ptr++ = v;
|
|
|
|
}
|
|
|
|
|
2014-06-07 19:08:44 +02:00
|
|
|
static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
|
|
|
|
uint8_t v)
|
2014-03-28 16:29:48 +01:00
|
|
|
{
|
2014-03-28 20:56:22 +01:00
|
|
|
*p = v;
|
2014-03-28 16:29:48 +01:00
|
|
|
}
|
2014-03-28 20:56:22 +01:00
|
|
|
#endif
|
2014-03-28 16:29:48 +01:00
|
|
|
|
2014-03-28 20:56:22 +01:00
|
|
|
#if TCG_TARGET_INSN_UNIT_SIZE <= 2
|
2014-06-07 19:08:44 +02:00
|
|
|
static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2014-03-28 20:56:22 +01:00
|
|
|
if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
|
|
|
|
*s->code_ptr++ = v;
|
|
|
|
} else {
|
|
|
|
tcg_insn_unit *p = s->code_ptr;
|
|
|
|
memcpy(p, &v, sizeof(v));
|
|
|
|
s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
|
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2014-06-07 19:08:44 +02:00
|
|
|
static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
|
|
|
|
uint16_t v)
|
2014-03-28 16:29:48 +01:00
|
|
|
{
|
2014-03-28 20:56:22 +01:00
|
|
|
if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
|
|
|
|
*p = v;
|
|
|
|
} else {
|
|
|
|
memcpy(p, &v, sizeof(v));
|
|
|
|
}
|
2014-03-28 16:29:48 +01:00
|
|
|
}
|
2014-03-28 20:56:22 +01:00
|
|
|
#endif
|
2014-03-28 16:29:48 +01:00
|
|
|
|
2014-03-28 20:56:22 +01:00
|
|
|
#if TCG_TARGET_INSN_UNIT_SIZE <= 4
|
2014-06-07 19:08:44 +02:00
|
|
|
static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2014-03-28 20:56:22 +01:00
|
|
|
if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
|
|
|
|
*s->code_ptr++ = v;
|
|
|
|
} else {
|
|
|
|
tcg_insn_unit *p = s->code_ptr;
|
|
|
|
memcpy(p, &v, sizeof(v));
|
|
|
|
s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
|
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2014-06-07 19:08:44 +02:00
|
|
|
static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
|
|
|
|
uint32_t v)
|
2014-03-28 16:29:48 +01:00
|
|
|
{
|
2014-03-28 20:56:22 +01:00
|
|
|
if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
|
|
|
|
*p = v;
|
|
|
|
} else {
|
|
|
|
memcpy(p, &v, sizeof(v));
|
|
|
|
}
|
2014-03-28 16:29:48 +01:00
|
|
|
}
|
2014-03-28 20:56:22 +01:00
|
|
|
#endif
|
2014-03-28 16:29:48 +01:00
|
|
|
|
2014-03-28 20:56:22 +01:00
|
|
|
#if TCG_TARGET_INSN_UNIT_SIZE <= 8
|
2014-06-07 19:08:44 +02:00
|
|
|
static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
|
2013-07-25 21:42:17 +02:00
|
|
|
{
|
2014-03-28 20:56:22 +01:00
|
|
|
if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
|
|
|
|
*s->code_ptr++ = v;
|
|
|
|
} else {
|
|
|
|
tcg_insn_unit *p = s->code_ptr;
|
|
|
|
memcpy(p, &v, sizeof(v));
|
|
|
|
s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
|
|
|
|
}
|
2013-07-25 21:42:17 +02:00
|
|
|
}
|
|
|
|
|
2014-06-07 19:08:44 +02:00
|
|
|
static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
|
|
|
|
uint64_t v)
|
2014-03-28 16:29:48 +01:00
|
|
|
{
|
2014-03-28 20:56:22 +01:00
|
|
|
if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
|
|
|
|
*p = v;
|
|
|
|
} else {
|
|
|
|
memcpy(p, &v, sizeof(v));
|
|
|
|
}
|
2014-03-28 16:29:48 +01:00
|
|
|
}
|
2014-03-28 20:56:22 +01:00
|
|
|
#endif
|
2014-03-28 16:29:48 +01:00
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
/* label relocation processing */
|
|
|
|
|
2014-03-28 20:56:22 +01:00
|
|
|
static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
|
2015-02-13 22:39:54 +01:00
|
|
|
TCGLabel *l, intptr_t addend)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2019-04-21 22:34:35 +02:00
|
|
|
TCGRelocation *r = tcg_malloc(sizeof(TCGRelocation));
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2019-04-21 22:34:35 +02:00
|
|
|
r->type = type;
|
|
|
|
r->ptr = code_ptr;
|
|
|
|
r->addend = addend;
|
|
|
|
QSIMPLEQ_INSERT_TAIL(&l->relocs, r, next);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2020-10-29 02:55:50 +01:00
|
|
|
static void tcg_out_label(TCGContext *s, TCGLabel *l)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2016-04-21 10:48:49 +02:00
|
|
|
tcg_debug_assert(!l->has_value);
|
2008-02-01 11:05:41 +01:00
|
|
|
l->has_value = 1;
|
2020-10-29 02:55:50 +01:00
|
|
|
l->u.value_ptr = tcg_splitwx_to_rx(s->code_ptr);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2015-02-13 21:51:55 +01:00
|
|
|
TCGLabel *gen_new_label(void)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2017-07-12 23:15:52 +02:00
|
|
|
TCGContext *s = tcg_ctx;
|
2015-02-14 03:51:05 +01:00
|
|
|
TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2019-04-21 22:34:35 +02:00
|
|
|
memset(l, 0, sizeof(TCGLabel));
|
|
|
|
l->id = s->nb_labels++;
|
2023-03-03 22:47:27 +01:00
|
|
|
QSIMPLEQ_INIT(&l->branches);
|
2019-04-21 22:34:35 +02:00
|
|
|
QSIMPLEQ_INIT(&l->relocs);
|
|
|
|
|
2019-02-07 14:26:40 +01:00
|
|
|
QSIMPLEQ_INSERT_TAIL(&s->labels, l, next);
|
2015-02-13 21:51:55 +01:00
|
|
|
|
|
|
|
return l;
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2019-04-21 22:34:35 +02:00
|
|
|
static bool tcg_resolve_relocs(TCGContext *s)
|
|
|
|
{
|
|
|
|
TCGLabel *l;
|
|
|
|
|
|
|
|
QSIMPLEQ_FOREACH(l, &s->labels, next) {
|
|
|
|
TCGRelocation *r;
|
|
|
|
uintptr_t value = l->u.value;
|
|
|
|
|
|
|
|
QSIMPLEQ_FOREACH(r, &l->relocs, next) {
|
|
|
|
if (!patch_reloc(r->ptr, r->type, value, r->addend)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-06-15 07:57:03 +02:00
|
|
|
static void set_jmp_reset_offset(TCGContext *s, int which)
|
|
|
|
{
|
2020-11-03 04:36:20 +01:00
|
|
|
/*
|
|
|
|
* We will check for overflow at the end of the opcode loop in
|
|
|
|
* tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
|
|
|
|
*/
|
2022-11-27 03:39:55 +01:00
|
|
|
s->gen_tb->jmp_reset_offset[which] = tcg_current_code_size(s);
|
2018-06-15 07:57:03 +02:00
|
|
|
}
|
|
|
|
|
2022-11-27 00:18:44 +01:00
|
|
|
static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We will check for overflow at the end of the opcode loop in
|
|
|
|
* tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
|
|
|
|
*/
|
2022-11-27 03:54:23 +01:00
|
|
|
s->gen_tb->jmp_insn_offset[which] = tcg_current_code_size(s);
|
2022-11-27 00:18:44 +01:00
|
|
|
}
|
|
|
|
|
2022-11-27 02:42:11 +01:00
|
|
|
static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Return the read-execute version of the pointer, for the benefit
|
|
|
|
* of any pc-relative addressing mode.
|
|
|
|
*/
|
2022-11-27 03:54:23 +01:00
|
|
|
return (uintptr_t)tcg_splitwx_to_rx(&s->gen_tb->jmp_target_addr[which]);
|
2022-11-27 02:42:11 +01:00
|
|
|
}
|
|
|
|
|
2023-03-28 01:07:15 +02:00
|
|
|
#if defined(CONFIG_SOFTMMU) && !defined(CONFIG_TCG_INTERPRETER)
|
|
|
|
static int tlb_mask_table_ofs(TCGContext *s, int which)
|
|
|
|
{
|
2023-09-14 02:56:21 +02:00
|
|
|
return (offsetof(CPUNegativeOffsetState, tlb.f[which]) -
|
|
|
|
sizeof(CPUNegativeOffsetState));
|
2023-03-28 01:07:15 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-02-01 10:29:26 +01:00
|
|
|
/* Signal overflow, starting over with fewer guest insns. */
|
2022-04-20 15:26:02 +02:00
|
|
|
static G_NORETURN
|
|
|
|
void tcg_raise_tb_overflow(TCGContext *s)
|
2021-02-01 10:29:26 +01:00
|
|
|
{
|
|
|
|
siglongjmp(s->jmp_trans, -2);
|
|
|
|
}
|
|
|
|
|
2023-04-10 07:59:09 +02:00
|
|
|
/*
|
|
|
|
* Used by tcg_out_movext{1,2} to hold the arguments for tcg_out_movext.
|
|
|
|
* By the time we arrive at tcg_out_movext1, @dst is always a TCGReg.
|
|
|
|
*
|
|
|
|
* However, tcg_out_helper_load_slots reuses this field to hold an
|
|
|
|
* argument slot number (which may designate a argument register or an
|
|
|
|
* argument stack slot), converting to TCGReg once all arguments that
|
|
|
|
* are destined for the stack are processed.
|
|
|
|
*/
|
2023-04-06 07:27:03 +02:00
|
|
|
typedef struct TCGMovExtend {
|
2023-04-10 07:59:09 +02:00
|
|
|
unsigned dst;
|
2023-04-06 07:27:03 +02:00
|
|
|
TCGReg src;
|
|
|
|
TCGType dst_type;
|
|
|
|
TCGType src_type;
|
|
|
|
MemOp src_ext;
|
|
|
|
} TCGMovExtend;
|
|
|
|
|
2023-04-06 06:16:28 +02:00
|
|
|
/**
|
|
|
|
* tcg_out_movext -- move and extend
|
|
|
|
* @s: tcg context
|
|
|
|
* @dst_type: integral type for destination
|
|
|
|
* @dst: destination register
|
|
|
|
* @src_type: integral type for source
|
|
|
|
* @src_ext: extension to apply to source
|
|
|
|
* @src: source register
|
|
|
|
*
|
|
|
|
* Move or extend @src into @dst, depending on @src_ext and the types.
|
|
|
|
*/
|
2023-04-06 07:27:03 +02:00
|
|
|
static void tcg_out_movext(TCGContext *s, TCGType dst_type, TCGReg dst,
|
|
|
|
TCGType src_type, MemOp src_ext, TCGReg src)
|
2023-04-06 06:16:28 +02:00
|
|
|
{
|
|
|
|
switch (src_ext) {
|
|
|
|
case MO_UB:
|
|
|
|
tcg_out_ext8u(s, dst, src);
|
|
|
|
break;
|
|
|
|
case MO_SB:
|
|
|
|
tcg_out_ext8s(s, dst_type, dst, src);
|
|
|
|
break;
|
|
|
|
case MO_UW:
|
|
|
|
tcg_out_ext16u(s, dst, src);
|
|
|
|
break;
|
|
|
|
case MO_SW:
|
|
|
|
tcg_out_ext16s(s, dst_type, dst, src);
|
|
|
|
break;
|
|
|
|
case MO_UL:
|
|
|
|
case MO_SL:
|
|
|
|
if (dst_type == TCG_TYPE_I32) {
|
|
|
|
if (src_type == TCG_TYPE_I32) {
|
|
|
|
tcg_out_mov(s, TCG_TYPE_I32, dst, src);
|
|
|
|
} else {
|
|
|
|
tcg_out_extrl_i64_i32(s, dst, src);
|
|
|
|
}
|
|
|
|
} else if (src_type == TCG_TYPE_I32) {
|
|
|
|
if (src_ext & MO_SIGN) {
|
|
|
|
tcg_out_exts_i32_i64(s, dst, src);
|
|
|
|
} else {
|
|
|
|
tcg_out_extu_i32_i64(s, dst, src);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (src_ext & MO_SIGN) {
|
|
|
|
tcg_out_ext32s(s, dst, src);
|
|
|
|
} else {
|
|
|
|
tcg_out_ext32u(s, dst, src);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case MO_UQ:
|
|
|
|
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
|
|
|
|
if (dst_type == TCG_TYPE_I32) {
|
|
|
|
tcg_out_extrl_i64_i32(s, dst, src);
|
|
|
|
} else {
|
|
|
|
tcg_out_mov(s, TCG_TYPE_I64, dst, src);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-06 07:27:03 +02:00
|
|
|
/* Minor variations on a theme, using a structure. */
|
|
|
|
static void tcg_out_movext1_new_src(TCGContext *s, const TCGMovExtend *i,
|
|
|
|
TCGReg src)
|
|
|
|
{
|
|
|
|
tcg_out_movext(s, i->dst_type, i->dst, i->src_type, i->src_ext, src);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_out_movext1(TCGContext *s, const TCGMovExtend *i)
|
|
|
|
{
|
|
|
|
tcg_out_movext1_new_src(s, i, i->src);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tcg_out_movext2 -- move and extend two pair
|
|
|
|
* @s: tcg context
|
|
|
|
* @i1: first move description
|
|
|
|
* @i2: second move description
|
|
|
|
* @scratch: temporary register, or -1 for none
|
|
|
|
*
|
|
|
|
* As tcg_out_movext, for both @i1 and @i2, caring for overlap
|
|
|
|
* between the sources and destinations.
|
|
|
|
*/
|
|
|
|
|
2023-04-10 07:59:09 +02:00
|
|
|
static void tcg_out_movext2(TCGContext *s, const TCGMovExtend *i1,
|
|
|
|
const TCGMovExtend *i2, int scratch)
|
2023-04-06 07:27:03 +02:00
|
|
|
{
|
|
|
|
TCGReg src1 = i1->src;
|
|
|
|
TCGReg src2 = i2->src;
|
|
|
|
|
|
|
|
if (i1->dst != src2) {
|
|
|
|
tcg_out_movext1(s, i1);
|
|
|
|
tcg_out_movext1(s, i2);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (i2->dst == src1) {
|
|
|
|
TCGType src1_type = i1->src_type;
|
|
|
|
TCGType src2_type = i2->src_type;
|
|
|
|
|
|
|
|
if (tcg_out_xchg(s, MAX(src1_type, src2_type), src1, src2)) {
|
|
|
|
/* The data is now in the correct registers, now extend. */
|
|
|
|
src1 = i2->src;
|
|
|
|
src2 = i1->src;
|
|
|
|
} else {
|
|
|
|
tcg_debug_assert(scratch >= 0);
|
|
|
|
tcg_out_mov(s, src1_type, scratch, src1);
|
|
|
|
src1 = scratch;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
tcg_out_movext1_new_src(s, i2, src2);
|
|
|
|
tcg_out_movext1_new_src(s, i1, src1);
|
|
|
|
}
|
|
|
|
|
2023-05-14 18:58:39 +02:00
|
|
|
/**
|
|
|
|
* tcg_out_movext3 -- move and extend three pair
|
|
|
|
* @s: tcg context
|
|
|
|
* @i1: first move description
|
|
|
|
* @i2: second move description
|
|
|
|
* @i3: third move description
|
|
|
|
* @scratch: temporary register, or -1 for none
|
|
|
|
*
|
|
|
|
* As tcg_out_movext, for all of @i1, @i2 and @i3, caring for overlap
|
|
|
|
* between the sources and destinations.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void tcg_out_movext3(TCGContext *s, const TCGMovExtend *i1,
|
|
|
|
const TCGMovExtend *i2, const TCGMovExtend *i3,
|
|
|
|
int scratch)
|
|
|
|
{
|
|
|
|
TCGReg src1 = i1->src;
|
|
|
|
TCGReg src2 = i2->src;
|
|
|
|
TCGReg src3 = i3->src;
|
|
|
|
|
|
|
|
if (i1->dst != src2 && i1->dst != src3) {
|
|
|
|
tcg_out_movext1(s, i1);
|
|
|
|
tcg_out_movext2(s, i2, i3, scratch);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (i2->dst != src1 && i2->dst != src3) {
|
|
|
|
tcg_out_movext1(s, i2);
|
|
|
|
tcg_out_movext2(s, i1, i3, scratch);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (i3->dst != src1 && i3->dst != src2) {
|
|
|
|
tcg_out_movext1(s, i3);
|
|
|
|
tcg_out_movext2(s, i1, i2, scratch);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There is a cycle. Since there are only 3 nodes, the cycle is
|
|
|
|
* either "clockwise" or "anti-clockwise", and can be solved with
|
|
|
|
* a single scratch or two xchg.
|
|
|
|
*/
|
|
|
|
if (i1->dst == src2 && i2->dst == src3 && i3->dst == src1) {
|
|
|
|
/* "Clockwise" */
|
|
|
|
if (tcg_out_xchg(s, MAX(i1->src_type, i2->src_type), src1, src2)) {
|
|
|
|
tcg_out_xchg(s, MAX(i2->src_type, i3->src_type), src2, src3);
|
|
|
|
/* The data is now in the correct registers, now extend. */
|
|
|
|
tcg_out_movext1_new_src(s, i1, i1->dst);
|
|
|
|
tcg_out_movext1_new_src(s, i2, i2->dst);
|
|
|
|
tcg_out_movext1_new_src(s, i3, i3->dst);
|
|
|
|
} else {
|
|
|
|
tcg_debug_assert(scratch >= 0);
|
|
|
|
tcg_out_mov(s, i1->src_type, scratch, src1);
|
|
|
|
tcg_out_movext1(s, i3);
|
|
|
|
tcg_out_movext1(s, i2);
|
|
|
|
tcg_out_movext1_new_src(s, i1, scratch);
|
|
|
|
}
|
|
|
|
} else if (i1->dst == src3 && i2->dst == src1 && i3->dst == src2) {
|
|
|
|
/* "Anti-clockwise" */
|
|
|
|
if (tcg_out_xchg(s, MAX(i2->src_type, i3->src_type), src2, src3)) {
|
|
|
|
tcg_out_xchg(s, MAX(i1->src_type, i2->src_type), src1, src2);
|
|
|
|
/* The data is now in the correct registers, now extend. */
|
|
|
|
tcg_out_movext1_new_src(s, i1, i1->dst);
|
|
|
|
tcg_out_movext1_new_src(s, i2, i2->dst);
|
|
|
|
tcg_out_movext1_new_src(s, i3, i3->dst);
|
|
|
|
} else {
|
|
|
|
tcg_debug_assert(scratch >= 0);
|
|
|
|
tcg_out_mov(s, i1->src_type, scratch, src1);
|
|
|
|
tcg_out_movext1(s, i2);
|
|
|
|
tcg_out_movext1(s, i3);
|
|
|
|
tcg_out_movext1_new_src(s, i1, scratch);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-17 07:20:55 +02:00
|
|
|
#define C_PFX1(P, A) P##A
|
|
|
|
#define C_PFX2(P, A, B) P##A##_##B
|
|
|
|
#define C_PFX3(P, A, B, C) P##A##_##B##_##C
|
|
|
|
#define C_PFX4(P, A, B, C, D) P##A##_##B##_##C##_##D
|
|
|
|
#define C_PFX5(P, A, B, C, D, E) P##A##_##B##_##C##_##D##_##E
|
|
|
|
#define C_PFX6(P, A, B, C, D, E, F) P##A##_##B##_##C##_##D##_##E##_##F
|
|
|
|
|
|
|
|
/* Define an enumeration for the various combinations. */
|
|
|
|
|
|
|
|
#define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1),
|
|
|
|
#define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2),
|
|
|
|
#define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3),
|
|
|
|
#define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4),
|
|
|
|
|
|
|
|
#define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1),
|
|
|
|
#define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2),
|
|
|
|
#define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3),
|
|
|
|
#define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4),
|
|
|
|
|
|
|
|
#define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2),
|
|
|
|
|
|
|
|
#define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1),
|
|
|
|
#define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2),
|
|
|
|
#define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3),
|
|
|
|
#define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4),
|
tcg/{i386, s390x}: Add earlyclobber to the op_add2's first output
i386 and s390x implementations of op_add2 require an earlyclobber,
which is currently missing. This breaks VCKSM in s390x guests. E.g., on
x86_64 the following op:
add2_i32 tmp2,tmp3,tmp2,tmp3,tmp3,tmp2 dead: 0 2 3 4 5 pref=none,0xffff
is translated to:
addl %ebx, %r12d
adcl %r12d, %ebx
Introduce a new C_N1_O1_I4 constraint, and make sure that earlyclobber
of aliased outputs is honored.
Cc: qemu-stable@nongnu.org
Fixes: 82790a870992 ("tcg: Add markup for output requires new register")
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20230719221310.1968845-7-iii@linux.ibm.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2023-07-20 00:11:18 +02:00
|
|
|
#define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_n1_o1_i4_, O1, O2, I1, I2, I3, I4),
|
2020-10-17 07:20:55 +02:00
|
|
|
|
|
|
|
typedef enum {
|
|
|
|
#include "tcg-target-con-set.h"
|
|
|
|
} TCGConstraintSetIndex;
|
|
|
|
|
|
|
|
static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode);
|
|
|
|
|
|
|
|
#undef C_O0_I1
|
|
|
|
#undef C_O0_I2
|
|
|
|
#undef C_O0_I3
|
|
|
|
#undef C_O0_I4
|
|
|
|
#undef C_O1_I1
|
|
|
|
#undef C_O1_I2
|
|
|
|
#undef C_O1_I3
|
|
|
|
#undef C_O1_I4
|
|
|
|
#undef C_N1_I2
|
|
|
|
#undef C_O2_I1
|
|
|
|
#undef C_O2_I2
|
|
|
|
#undef C_O2_I3
|
|
|
|
#undef C_O2_I4
|
tcg/{i386, s390x}: Add earlyclobber to the op_add2's first output
i386 and s390x implementations of op_add2 require an earlyclobber,
which is currently missing. This breaks VCKSM in s390x guests. E.g., on
x86_64 the following op:
add2_i32 tmp2,tmp3,tmp2,tmp3,tmp3,tmp2 dead: 0 2 3 4 5 pref=none,0xffff
is translated to:
addl %ebx, %r12d
adcl %r12d, %ebx
Introduce a new C_N1_O1_I4 constraint, and make sure that earlyclobber
of aliased outputs is honored.
Cc: qemu-stable@nongnu.org
Fixes: 82790a870992 ("tcg: Add markup for output requires new register")
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20230719221310.1968845-7-iii@linux.ibm.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2023-07-20 00:11:18 +02:00
|
|
|
#undef C_N1_O1_I4
|
2020-10-17 07:20:55 +02:00
|
|
|
|
|
|
|
/* Put all of the constraint sets into an array, indexed by the enum. */
|
|
|
|
|
|
|
|
#define C_O0_I1(I1) { .args_ct_str = { #I1 } },
|
|
|
|
#define C_O0_I2(I1, I2) { .args_ct_str = { #I1, #I2 } },
|
|
|
|
#define C_O0_I3(I1, I2, I3) { .args_ct_str = { #I1, #I2, #I3 } },
|
|
|
|
#define C_O0_I4(I1, I2, I3, I4) { .args_ct_str = { #I1, #I2, #I3, #I4 } },
|
|
|
|
|
|
|
|
#define C_O1_I1(O1, I1) { .args_ct_str = { #O1, #I1 } },
|
|
|
|
#define C_O1_I2(O1, I1, I2) { .args_ct_str = { #O1, #I1, #I2 } },
|
|
|
|
#define C_O1_I3(O1, I1, I2, I3) { .args_ct_str = { #O1, #I1, #I2, #I3 } },
|
|
|
|
#define C_O1_I4(O1, I1, I2, I3, I4) { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } },
|
|
|
|
|
|
|
|
#define C_N1_I2(O1, I1, I2) { .args_ct_str = { "&" #O1, #I1, #I2 } },
|
|
|
|
|
|
|
|
#define C_O2_I1(O1, O2, I1) { .args_ct_str = { #O1, #O2, #I1 } },
|
|
|
|
#define C_O2_I2(O1, O2, I1, I2) { .args_ct_str = { #O1, #O2, #I1, #I2 } },
|
|
|
|
#define C_O2_I3(O1, O2, I1, I2, I3) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3 } },
|
|
|
|
#define C_O2_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3, #I4 } },
|
tcg/{i386, s390x}: Add earlyclobber to the op_add2's first output
i386 and s390x implementations of op_add2 require an earlyclobber,
which is currently missing. This breaks VCKSM in s390x guests. E.g., on
x86_64 the following op:
add2_i32 tmp2,tmp3,tmp2,tmp3,tmp3,tmp2 dead: 0 2 3 4 5 pref=none,0xffff
is translated to:
addl %ebx, %r12d
adcl %r12d, %ebx
Introduce a new C_N1_O1_I4 constraint, and make sure that earlyclobber
of aliased outputs is honored.
Cc: qemu-stable@nongnu.org
Fixes: 82790a870992 ("tcg: Add markup for output requires new register")
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20230719221310.1968845-7-iii@linux.ibm.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2023-07-20 00:11:18 +02:00
|
|
|
#define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { "&" #O1, #O2, #I1, #I2, #I3, #I4 } },
|
2020-10-17 07:20:55 +02:00
|
|
|
|
|
|
|
static const TCGTargetOpDef constraint_sets[] = {
|
|
|
|
#include "tcg-target-con-set.h"
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
#undef C_O0_I1
|
|
|
|
#undef C_O0_I2
|
|
|
|
#undef C_O0_I3
|
|
|
|
#undef C_O0_I4
|
|
|
|
#undef C_O1_I1
|
|
|
|
#undef C_O1_I2
|
|
|
|
#undef C_O1_I3
|
|
|
|
#undef C_O1_I4
|
|
|
|
#undef C_N1_I2
|
|
|
|
#undef C_O2_I1
|
|
|
|
#undef C_O2_I2
|
|
|
|
#undef C_O2_I3
|
|
|
|
#undef C_O2_I4
|
tcg/{i386, s390x}: Add earlyclobber to the op_add2's first output
i386 and s390x implementations of op_add2 require an earlyclobber,
which is currently missing. This breaks VCKSM in s390x guests. E.g., on
x86_64 the following op:
add2_i32 tmp2,tmp3,tmp2,tmp3,tmp3,tmp2 dead: 0 2 3 4 5 pref=none,0xffff
is translated to:
addl %ebx, %r12d
adcl %r12d, %ebx
Introduce a new C_N1_O1_I4 constraint, and make sure that earlyclobber
of aliased outputs is honored.
Cc: qemu-stable@nongnu.org
Fixes: 82790a870992 ("tcg: Add markup for output requires new register")
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20230719221310.1968845-7-iii@linux.ibm.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2023-07-20 00:11:18 +02:00
|
|
|
#undef C_N1_O1_I4
|
2020-10-17 07:20:55 +02:00
|
|
|
|
|
|
|
/* Expand the enumerator to be returned from tcg_target_op_def(). */
|
|
|
|
|
|
|
|
#define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1)
|
|
|
|
#define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2)
|
|
|
|
#define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3)
|
|
|
|
#define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4)
|
|
|
|
|
|
|
|
#define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1)
|
|
|
|
#define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2)
|
|
|
|
#define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3)
|
|
|
|
#define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4)
|
|
|
|
|
|
|
|
#define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2)
|
|
|
|
|
|
|
|
#define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1)
|
|
|
|
#define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2)
|
|
|
|
#define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3)
|
|
|
|
#define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4)
|
tcg/{i386, s390x}: Add earlyclobber to the op_add2's first output
i386 and s390x implementations of op_add2 require an earlyclobber,
which is currently missing. This breaks VCKSM in s390x guests. E.g., on
x86_64 the following op:
add2_i32 tmp2,tmp3,tmp2,tmp3,tmp3,tmp2 dead: 0 2 3 4 5 pref=none,0xffff
is translated to:
addl %ebx, %r12d
adcl %r12d, %ebx
Introduce a new C_N1_O1_I4 constraint, and make sure that earlyclobber
of aliased outputs is honored.
Cc: qemu-stable@nongnu.org
Fixes: 82790a870992 ("tcg: Add markup for output requires new register")
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20230719221310.1968845-7-iii@linux.ibm.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2023-07-20 00:11:18 +02:00
|
|
|
#define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_n1_o1_i4_, O1, O2, I1, I2, I3, I4)
|
2020-10-17 07:20:55 +02:00
|
|
|
|
2020-02-04 12:41:01 +01:00
|
|
|
#include "tcg-target.c.inc"
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2023-09-14 02:56:21 +02:00
|
|
|
#ifndef CONFIG_TCG_INTERPRETER
|
|
|
|
/* Validate CPUTLBDescFast placement. */
|
|
|
|
QEMU_BUILD_BUG_ON((int)(offsetof(CPUNegativeOffsetState, tlb.f[0]) -
|
|
|
|
sizeof(CPUNegativeOffsetState))
|
|
|
|
< MIN_TLB_MASK_TABLE_OFS);
|
|
|
|
#endif
|
|
|
|
|
plugin-gen: add module for TCG-related code
We first inject empty instrumentation from translator_loop.
After translation, we go through the plugins to see what
they want to register for, filling in the empty instrumentation.
If if turns out that some instrumentation remains unused, we
remove it.
This approach supports the following features:
- Inlining TCG code for simple operations. Note that we do not
export TCG ops to plugins. Instead, we give them a C API to
insert inlined ops. So far we only support adding an immediate
to a u64, e.g. to count events.
- "Direct" callbacks. These are callbacks that do not go via
a helper. Instead, the helper is defined at run-time, so that
the plugin code is directly called from TCG. This makes direct
callbacks as efficient as possible; they are therefore used
for very frequent events, e.g. memory callbacks.
- Passing the host address to memory callbacks. Most of this
is implemented in a later patch though.
- Instrumentation of memory accesses performed from helpers.
See the corresponding comment, as well as a later patch.
Signed-off-by: Emilio G. Cota <cota@braap.org>
[AJB: add alloc_tcg_plugin_context, use glib, rm hwaddr]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2018-12-07 21:33:56 +01:00
|
|
|
static void alloc_tcg_plugin_context(TCGContext *s)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_PLUGIN
|
|
|
|
s->plugin_tb = g_new0(struct qemu_plugin_tb, 1);
|
|
|
|
s->plugin_tb->insns =
|
|
|
|
g_ptr_array_new_with_free_func(qemu_plugin_insn_cleanup_fn);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
/*
|
|
|
|
* All TCG threads except the parent (i.e. the one that called tcg_context_init
|
|
|
|
* and registered the target's TCG globals) must register with this function
|
|
|
|
* before initiating translation.
|
|
|
|
*
|
|
|
|
* In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
|
|
|
|
* of tcg_region_init() for the reasoning behind this.
|
|
|
|
*
|
|
|
|
* In softmmu each caller registers its context in tcg_ctxs[]. Note that in
|
|
|
|
* softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context
|
|
|
|
* is not used anymore for translation once this function is called.
|
|
|
|
*
|
|
|
|
* Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates
|
|
|
|
* over the array (e.g. tcg_code_size() the same for both softmmu and user-mode.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
void tcg_register_thread(void)
|
|
|
|
{
|
|
|
|
tcg_ctx = &tcg_init_ctx;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
void tcg_register_thread(void)
|
|
|
|
{
|
|
|
|
TCGContext *s = g_malloc(sizeof(*s));
|
|
|
|
unsigned int i, n;
|
|
|
|
|
|
|
|
*s = tcg_init_ctx;
|
|
|
|
|
|
|
|
/* Relink mem_base. */
|
|
|
|
for (i = 0, n = tcg_init_ctx.nb_globals; i < n; ++i) {
|
|
|
|
if (tcg_init_ctx.temps[i].mem_base) {
|
|
|
|
ptrdiff_t b = tcg_init_ctx.temps[i].mem_base - tcg_init_ctx.temps;
|
|
|
|
tcg_debug_assert(b >= 0 && b < n);
|
|
|
|
s->temps[i].mem_base = &s->temps[b];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Claim an entry in tcg_ctxs */
|
2021-03-10 06:06:32 +01:00
|
|
|
n = qatomic_fetch_inc(&tcg_cur_ctxs);
|
|
|
|
g_assert(n < tcg_max_ctxs);
|
2020-09-23 12:56:46 +02:00
|
|
|
qatomic_set(&tcg_ctxs[n], s);
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
|
plugin-gen: add module for TCG-related code
We first inject empty instrumentation from translator_loop.
After translation, we go through the plugins to see what
they want to register for, filling in the empty instrumentation.
If if turns out that some instrumentation remains unused, we
remove it.
This approach supports the following features:
- Inlining TCG code for simple operations. Note that we do not
export TCG ops to plugins. Instead, we give them a C API to
insert inlined ops. So far we only support adding an immediate
to a u64, e.g. to count events.
- "Direct" callbacks. These are callbacks that do not go via
a helper. Instead, the helper is defined at run-time, so that
the plugin code is directly called from TCG. This makes direct
callbacks as efficient as possible; they are therefore used
for very frequent events, e.g. memory callbacks.
- Passing the host address to memory callbacks. Most of this
is implemented in a later patch though.
- Instrumentation of memory accesses performed from helpers.
See the corresponding comment, as well as a later patch.
Signed-off-by: Emilio G. Cota <cota@braap.org>
[AJB: add alloc_tcg_plugin_context, use glib, rm hwaddr]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2018-12-07 21:33:56 +01:00
|
|
|
if (n > 0) {
|
|
|
|
alloc_tcg_plugin_context(s);
|
2021-03-09 23:33:15 +01:00
|
|
|
tcg_region_initial_alloc(s);
|
plugin-gen: add module for TCG-related code
We first inject empty instrumentation from translator_loop.
After translation, we go through the plugins to see what
they want to register for, filling in the empty instrumentation.
If if turns out that some instrumentation remains unused, we
remove it.
This approach supports the following features:
- Inlining TCG code for simple operations. Note that we do not
export TCG ops to plugins. Instead, we give them a C API to
insert inlined ops. So far we only support adding an immediate
to a u64, e.g. to count events.
- "Direct" callbacks. These are callbacks that do not go via
a helper. Instead, the helper is defined at run-time, so that
the plugin code is directly called from TCG. This makes direct
callbacks as efficient as possible; they are therefore used
for very frequent events, e.g. memory callbacks.
- Passing the host address to memory callbacks. Most of this
is implemented in a later patch though.
- Instrumentation of memory accesses performed from helpers.
See the corresponding comment, as well as a later patch.
Signed-off-by: Emilio G. Cota <cota@braap.org>
[AJB: add alloc_tcg_plugin_context, use glib, rm hwaddr]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2018-12-07 21:33:56 +01:00
|
|
|
}
|
|
|
|
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
tcg_ctx = s;
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 01:24:20 +02:00
|
|
|
}
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
#endif /* !CONFIG_USER_ONLY */
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 01:24:20 +02:00
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
/* pool based memory allocation */
|
|
|
|
void *tcg_malloc_internal(TCGContext *s, int size)
|
|
|
|
{
|
|
|
|
TCGPool *p;
|
|
|
|
int pool_size;
|
2022-12-01 07:38:25 +01:00
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
if (size > TCG_POOL_CHUNK_SIZE) {
|
|
|
|
/* big malloc: insert a new pool (XXX: could optimize) */
|
2011-08-21 05:09:37 +02:00
|
|
|
p = g_malloc(sizeof(TCGPool) + size);
|
2008-02-01 11:05:41 +01:00
|
|
|
p->size = size;
|
2012-03-02 10:22:17 +01:00
|
|
|
p->next = s->pool_first_large;
|
|
|
|
s->pool_first_large = p;
|
|
|
|
return p->data;
|
2008-02-01 11:05:41 +01:00
|
|
|
} else {
|
|
|
|
p = s->pool_current;
|
|
|
|
if (!p) {
|
|
|
|
p = s->pool_first;
|
|
|
|
if (!p)
|
|
|
|
goto new_pool;
|
|
|
|
} else {
|
|
|
|
if (!p->next) {
|
|
|
|
new_pool:
|
|
|
|
pool_size = TCG_POOL_CHUNK_SIZE;
|
2011-08-21 05:09:37 +02:00
|
|
|
p = g_malloc(sizeof(TCGPool) + pool_size);
|
2008-02-01 11:05:41 +01:00
|
|
|
p->size = pool_size;
|
|
|
|
p->next = NULL;
|
2022-12-01 07:38:25 +01:00
|
|
|
if (s->pool_current) {
|
2008-02-01 11:05:41 +01:00
|
|
|
s->pool_current->next = p;
|
2022-12-01 07:38:25 +01:00
|
|
|
} else {
|
2008-02-01 11:05:41 +01:00
|
|
|
s->pool_first = p;
|
2022-12-01 07:38:25 +01:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
} else {
|
|
|
|
p = p->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s->pool_current = p;
|
|
|
|
s->pool_cur = p->data + size;
|
|
|
|
s->pool_end = p->data + p->size;
|
|
|
|
return p->data;
|
|
|
|
}
|
|
|
|
|
|
|
|
void tcg_pool_reset(TCGContext *s)
|
|
|
|
{
|
2012-03-02 10:22:17 +01:00
|
|
|
TCGPool *p, *t;
|
|
|
|
for (p = s->pool_first_large; p; p = t) {
|
|
|
|
t = p->next;
|
|
|
|
g_free(p);
|
|
|
|
}
|
|
|
|
s->pool_first_large = NULL;
|
2008-02-01 11:05:41 +01:00
|
|
|
s->pool_cur = s->pool_end = NULL;
|
|
|
|
s->pool_current = NULL;
|
|
|
|
}
|
|
|
|
|
2023-04-10 07:59:09 +02:00
|
|
|
/*
|
|
|
|
* Create TCGHelperInfo structures for "tcg/tcg-ldst.h" functions,
|
|
|
|
* akin to what "exec/helper-tcg.h" does with DEF_HELPER_FLAGS_N.
|
|
|
|
* We only use these for layout in tcg_out_ld_helper_ret and
|
|
|
|
* tcg_out_st_helper_args, and share them between several of
|
|
|
|
* the helpers, with the end result that it's easier to build manually.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#if TCG_TARGET_REG_BITS == 32
|
|
|
|
# define dh_typecode_ttl dh_typecode_i32
|
|
|
|
#else
|
|
|
|
# define dh_typecode_ttl dh_typecode_i64
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static TCGHelperInfo info_helper_ld32_mmu = {
|
|
|
|
.flags = TCG_CALL_NO_WG,
|
|
|
|
.typemask = dh_typemask(ttl, 0) /* return tcg_target_ulong */
|
|
|
|
| dh_typemask(env, 1)
|
2023-04-26 23:09:47 +02:00
|
|
|
| dh_typemask(i64, 2) /* uint64_t addr */
|
2023-04-10 07:59:09 +02:00
|
|
|
| dh_typemask(i32, 3) /* unsigned oi */
|
|
|
|
| dh_typemask(ptr, 4) /* uintptr_t ra */
|
|
|
|
};
|
|
|
|
|
|
|
|
static TCGHelperInfo info_helper_ld64_mmu = {
|
|
|
|
.flags = TCG_CALL_NO_WG,
|
|
|
|
.typemask = dh_typemask(i64, 0) /* return uint64_t */
|
|
|
|
| dh_typemask(env, 1)
|
2023-04-26 23:09:47 +02:00
|
|
|
| dh_typemask(i64, 2) /* uint64_t addr */
|
2023-04-10 07:59:09 +02:00
|
|
|
| dh_typemask(i32, 3) /* unsigned oi */
|
|
|
|
| dh_typemask(ptr, 4) /* uintptr_t ra */
|
|
|
|
};
|
|
|
|
|
2023-04-17 10:20:51 +02:00
|
|
|
static TCGHelperInfo info_helper_ld128_mmu = {
|
|
|
|
.flags = TCG_CALL_NO_WG,
|
|
|
|
.typemask = dh_typemask(i128, 0) /* return Int128 */
|
|
|
|
| dh_typemask(env, 1)
|
2023-04-26 23:09:47 +02:00
|
|
|
| dh_typemask(i64, 2) /* uint64_t addr */
|
2023-04-17 10:20:51 +02:00
|
|
|
| dh_typemask(i32, 3) /* unsigned oi */
|
|
|
|
| dh_typemask(ptr, 4) /* uintptr_t ra */
|
|
|
|
};
|
|
|
|
|
2023-04-10 07:59:09 +02:00
|
|
|
static TCGHelperInfo info_helper_st32_mmu = {
|
|
|
|
.flags = TCG_CALL_NO_WG,
|
|
|
|
.typemask = dh_typemask(void, 0)
|
|
|
|
| dh_typemask(env, 1)
|
2023-04-26 23:09:47 +02:00
|
|
|
| dh_typemask(i64, 2) /* uint64_t addr */
|
2023-04-10 07:59:09 +02:00
|
|
|
| dh_typemask(i32, 3) /* uint32_t data */
|
|
|
|
| dh_typemask(i32, 4) /* unsigned oi */
|
|
|
|
| dh_typemask(ptr, 5) /* uintptr_t ra */
|
|
|
|
};
|
|
|
|
|
|
|
|
static TCGHelperInfo info_helper_st64_mmu = {
|
|
|
|
.flags = TCG_CALL_NO_WG,
|
|
|
|
.typemask = dh_typemask(void, 0)
|
|
|
|
| dh_typemask(env, 1)
|
2023-04-26 23:09:47 +02:00
|
|
|
| dh_typemask(i64, 2) /* uint64_t addr */
|
2023-04-10 07:59:09 +02:00
|
|
|
| dh_typemask(i64, 3) /* uint64_t data */
|
|
|
|
| dh_typemask(i32, 4) /* unsigned oi */
|
|
|
|
| dh_typemask(ptr, 5) /* uintptr_t ra */
|
|
|
|
};
|
|
|
|
|
2023-04-17 10:20:51 +02:00
|
|
|
static TCGHelperInfo info_helper_st128_mmu = {
|
|
|
|
.flags = TCG_CALL_NO_WG,
|
|
|
|
.typemask = dh_typemask(void, 0)
|
|
|
|
| dh_typemask(env, 1)
|
2023-04-26 23:09:47 +02:00
|
|
|
| dh_typemask(i64, 2) /* uint64_t addr */
|
2023-04-17 10:20:51 +02:00
|
|
|
| dh_typemask(i128, 3) /* Int128 data */
|
|
|
|
| dh_typemask(i32, 4) /* unsigned oi */
|
|
|
|
| dh_typemask(ptr, 5) /* uintptr_t ra */
|
|
|
|
};
|
|
|
|
|
2021-03-18 19:46:44 +01:00
|
|
|
#ifdef CONFIG_TCG_INTERPRETER
|
2022-11-22 19:08:02 +01:00
|
|
|
static ffi_type *typecode_to_ffi(int argmask)
|
|
|
|
{
|
2022-10-21 02:47:54 +02:00
|
|
|
/*
|
|
|
|
* libffi does not support __int128_t, so we have forced Int128
|
|
|
|
* to use the structure definition instead of the builtin type.
|
|
|
|
*/
|
|
|
|
static ffi_type *ffi_type_i128_elements[3] = {
|
|
|
|
&ffi_type_uint64,
|
|
|
|
&ffi_type_uint64,
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
static ffi_type ffi_type_i128 = {
|
|
|
|
.size = 16,
|
|
|
|
.alignment = __alignof__(Int128),
|
|
|
|
.type = FFI_TYPE_STRUCT,
|
|
|
|
.elements = ffi_type_i128_elements,
|
|
|
|
};
|
|
|
|
|
2022-11-22 19:08:02 +01:00
|
|
|
switch (argmask) {
|
|
|
|
case dh_typecode_void:
|
|
|
|
return &ffi_type_void;
|
|
|
|
case dh_typecode_i32:
|
|
|
|
return &ffi_type_uint32;
|
|
|
|
case dh_typecode_s32:
|
|
|
|
return &ffi_type_sint32;
|
|
|
|
case dh_typecode_i64:
|
|
|
|
return &ffi_type_uint64;
|
|
|
|
case dh_typecode_s64:
|
|
|
|
return &ffi_type_sint64;
|
|
|
|
case dh_typecode_ptr:
|
|
|
|
return &ffi_type_pointer;
|
2022-10-21 02:47:54 +02:00
|
|
|
case dh_typecode_i128:
|
|
|
|
return &ffi_type_i128;
|
2022-11-22 19:08:02 +01:00
|
|
|
}
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
2022-11-22 19:08:03 +01:00
|
|
|
|
2023-03-31 19:37:04 +02:00
|
|
|
static ffi_cif *init_ffi_layout(TCGHelperInfo *info)
|
2022-11-22 19:08:03 +01:00
|
|
|
{
|
2023-03-31 19:37:04 +02:00
|
|
|
unsigned typemask = info->typemask;
|
|
|
|
struct {
|
|
|
|
ffi_cif cif;
|
|
|
|
ffi_type *args[];
|
|
|
|
} *ca;
|
|
|
|
ffi_status status;
|
|
|
|
int nargs;
|
|
|
|
|
|
|
|
/* Ignoring the return type, find the last non-zero field. */
|
|
|
|
nargs = 32 - clz32(typemask >> 3);
|
|
|
|
nargs = DIV_ROUND_UP(nargs, 3);
|
|
|
|
assert(nargs <= MAX_CALL_IARGS);
|
|
|
|
|
|
|
|
ca = g_malloc0(sizeof(*ca) + nargs * sizeof(ffi_type *));
|
|
|
|
ca->cif.rtype = typecode_to_ffi(typemask & 7);
|
|
|
|
ca->cif.nargs = nargs;
|
|
|
|
|
|
|
|
if (nargs != 0) {
|
|
|
|
ca->cif.arg_types = ca->args;
|
|
|
|
for (int j = 0; j < nargs; ++j) {
|
|
|
|
int typecode = extract32(typemask, (j + 1) * 3, 3);
|
|
|
|
ca->args[j] = typecode_to_ffi(typecode);
|
2022-11-22 19:08:03 +01:00
|
|
|
}
|
|
|
|
}
|
2022-11-22 19:08:04 +01:00
|
|
|
|
2023-03-31 19:37:04 +02:00
|
|
|
status = ffi_prep_cif(&ca->cif, FFI_DEFAULT_ABI, nargs,
|
|
|
|
ca->cif.rtype, ca->cif.arg_types);
|
|
|
|
assert(status == FFI_OK);
|
|
|
|
|
|
|
|
return &ca->cif;
|
2022-11-22 19:08:03 +01:00
|
|
|
}
|
2023-03-31 19:37:04 +02:00
|
|
|
|
|
|
|
#define HELPER_INFO_INIT(I) (&(I)->cif)
|
|
|
|
#define HELPER_INFO_INIT_VAL(I) init_ffi_layout(I)
|
|
|
|
#else
|
|
|
|
#define HELPER_INFO_INIT(I) (&(I)->init)
|
|
|
|
#define HELPER_INFO_INIT_VAL(I) 1
|
2022-11-22 19:08:03 +01:00
|
|
|
#endif /* CONFIG_TCG_INTERPRETER */
|
2021-03-18 19:46:44 +01:00
|
|
|
|
2023-04-09 02:28:07 +02:00
|
|
|
static inline bool arg_slot_reg_p(unsigned arg_slot)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Split the sizeof away from the comparison to avoid Werror from
|
|
|
|
* "unsigned < 0 is always false", when iarg_regs is empty.
|
|
|
|
*/
|
|
|
|
unsigned nreg = ARRAY_SIZE(tcg_target_call_iarg_regs);
|
|
|
|
return arg_slot < nreg;
|
|
|
|
}
|
|
|
|
|
2023-04-09 04:05:10 +02:00
|
|
|
static inline int arg_slot_stk_ofs(unsigned arg_slot)
|
|
|
|
{
|
|
|
|
unsigned max = TCG_STATIC_CALL_ARGS_SIZE / sizeof(tcg_target_long);
|
|
|
|
unsigned stk_slot = arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs);
|
|
|
|
|
|
|
|
tcg_debug_assert(stk_slot < max);
|
|
|
|
return TCG_TARGET_CALL_STACK_OFFSET + stk_slot * sizeof(tcg_target_long);
|
|
|
|
}
|
|
|
|
|
2022-11-11 01:09:37 +01:00
|
|
|
typedef struct TCGCumulativeArgs {
|
|
|
|
int arg_idx; /* tcg_gen_callN args[] */
|
|
|
|
int info_in_idx; /* TCGHelperInfo in[] */
|
|
|
|
int arg_slot; /* regs+stack slot */
|
|
|
|
int ref_slot; /* stack slots for references */
|
|
|
|
} TCGCumulativeArgs;
|
|
|
|
|
|
|
|
static void layout_arg_even(TCGCumulativeArgs *cum)
|
|
|
|
{
|
|
|
|
cum->arg_slot += cum->arg_slot & 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void layout_arg_1(TCGCumulativeArgs *cum, TCGHelperInfo *info,
|
|
|
|
TCGCallArgumentKind kind)
|
|
|
|
{
|
|
|
|
TCGCallArgumentLoc *loc = &info->in[cum->info_in_idx];
|
|
|
|
|
|
|
|
*loc = (TCGCallArgumentLoc){
|
|
|
|
.kind = kind,
|
|
|
|
.arg_idx = cum->arg_idx,
|
|
|
|
.arg_slot = cum->arg_slot,
|
|
|
|
};
|
|
|
|
cum->info_in_idx++;
|
|
|
|
cum->arg_slot++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void layout_arg_normal_n(TCGCumulativeArgs *cum,
|
|
|
|
TCGHelperInfo *info, int n)
|
|
|
|
{
|
|
|
|
TCGCallArgumentLoc *loc = &info->in[cum->info_in_idx];
|
|
|
|
|
|
|
|
for (int i = 0; i < n; ++i) {
|
|
|
|
/* Layout all using the same arg_idx, adjusting the subindex. */
|
|
|
|
loc[i] = (TCGCallArgumentLoc){
|
|
|
|
.kind = TCG_CALL_ARG_NORMAL,
|
|
|
|
.arg_idx = cum->arg_idx,
|
|
|
|
.tmp_subindex = i,
|
|
|
|
.arg_slot = cum->arg_slot + i,
|
|
|
|
};
|
|
|
|
}
|
|
|
|
cum->info_in_idx += n;
|
|
|
|
cum->arg_slot += n;
|
|
|
|
}
|
|
|
|
|
2022-10-30 23:22:59 +01:00
|
|
|
static void layout_arg_by_ref(TCGCumulativeArgs *cum, TCGHelperInfo *info)
|
|
|
|
{
|
|
|
|
TCGCallArgumentLoc *loc = &info->in[cum->info_in_idx];
|
|
|
|
int n = 128 / TCG_TARGET_REG_BITS;
|
|
|
|
|
|
|
|
/* The first subindex carries the pointer. */
|
|
|
|
layout_arg_1(cum, info, TCG_CALL_ARG_BY_REF);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The callee is allowed to clobber memory associated with
|
|
|
|
* structure pass by-reference. Therefore we must make copies.
|
|
|
|
* Allocate space from "ref_slot", which will be adjusted to
|
|
|
|
* follow the parameters on the stack.
|
|
|
|
*/
|
|
|
|
loc[0].ref_slot = cum->ref_slot;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Subsequent words also go into the reference slot, but
|
|
|
|
* do not accumulate into the regular arguments.
|
|
|
|
*/
|
|
|
|
for (int i = 1; i < n; ++i) {
|
|
|
|
loc[i] = (TCGCallArgumentLoc){
|
|
|
|
.kind = TCG_CALL_ARG_BY_REF_N,
|
|
|
|
.arg_idx = cum->arg_idx,
|
|
|
|
.tmp_subindex = i,
|
|
|
|
.ref_slot = cum->ref_slot + i,
|
|
|
|
};
|
|
|
|
}
|
2023-07-07 12:17:44 +02:00
|
|
|
cum->info_in_idx += n - 1; /* i=0 accounted for in layout_arg_1 */
|
2022-10-30 23:22:59 +01:00
|
|
|
cum->ref_slot += n;
|
|
|
|
}
|
|
|
|
|
2022-11-11 01:09:37 +01:00
|
|
|
static void init_call_layout(TCGHelperInfo *info)
|
|
|
|
{
|
|
|
|
int max_reg_slots = ARRAY_SIZE(tcg_target_call_iarg_regs);
|
|
|
|
int max_stk_slots = TCG_STATIC_CALL_ARGS_SIZE / sizeof(tcg_target_long);
|
|
|
|
unsigned typemask = info->typemask;
|
|
|
|
unsigned typecode;
|
|
|
|
TCGCumulativeArgs cum = { };
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Parse and place any function return value.
|
|
|
|
*/
|
|
|
|
typecode = typemask & 7;
|
|
|
|
switch (typecode) {
|
|
|
|
case dh_typecode_void:
|
|
|
|
info->nr_out = 0;
|
|
|
|
break;
|
|
|
|
case dh_typecode_i32:
|
|
|
|
case dh_typecode_s32:
|
|
|
|
case dh_typecode_ptr:
|
|
|
|
info->nr_out = 1;
|
|
|
|
info->out_kind = TCG_CALL_RET_NORMAL;
|
|
|
|
break;
|
|
|
|
case dh_typecode_i64:
|
|
|
|
case dh_typecode_s64:
|
|
|
|
info->nr_out = 64 / TCG_TARGET_REG_BITS;
|
|
|
|
info->out_kind = TCG_CALL_RET_NORMAL;
|
2022-10-19 16:55:36 +02:00
|
|
|
/* Query the last register now to trigger any assert early. */
|
|
|
|
tcg_target_call_oarg_reg(info->out_kind, info->nr_out - 1);
|
2022-11-11 02:01:13 +01:00
|
|
|
break;
|
|
|
|
case dh_typecode_i128:
|
|
|
|
info->nr_out = 128 / TCG_TARGET_REG_BITS;
|
2022-10-19 23:54:48 +02:00
|
|
|
info->out_kind = TCG_TARGET_CALL_RET_I128;
|
|
|
|
switch (TCG_TARGET_CALL_RET_I128) {
|
2022-11-11 02:01:13 +01:00
|
|
|
case TCG_CALL_RET_NORMAL:
|
2022-10-19 16:55:36 +02:00
|
|
|
/* Query the last register now to trigger any assert early. */
|
|
|
|
tcg_target_call_oarg_reg(info->out_kind, info->nr_out - 1);
|
2022-11-11 02:01:13 +01:00
|
|
|
break;
|
2022-10-19 17:13:52 +02:00
|
|
|
case TCG_CALL_RET_BY_VEC:
|
|
|
|
/* Query the single register now to trigger any assert early. */
|
|
|
|
tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC, 0);
|
|
|
|
break;
|
2022-10-30 23:22:59 +01:00
|
|
|
case TCG_CALL_RET_BY_REF:
|
|
|
|
/*
|
|
|
|
* Allocate the first argument to the output.
|
|
|
|
* We don't need to store this anywhere, just make it
|
|
|
|
* unavailable for use in the input loop below.
|
|
|
|
*/
|
|
|
|
cum.arg_slot = 1;
|
|
|
|
break;
|
2022-11-11 02:01:13 +01:00
|
|
|
default:
|
|
|
|
qemu_build_not_reached();
|
|
|
|
}
|
2022-11-11 01:09:37 +01:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Parse and place function arguments.
|
|
|
|
*/
|
|
|
|
for (typemask >>= 3; typemask; typemask >>= 3, cum.arg_idx++) {
|
|
|
|
TCGCallArgumentKind kind;
|
|
|
|
TCGType type;
|
|
|
|
|
|
|
|
typecode = typemask & 7;
|
|
|
|
switch (typecode) {
|
|
|
|
case dh_typecode_i32:
|
|
|
|
case dh_typecode_s32:
|
|
|
|
type = TCG_TYPE_I32;
|
|
|
|
break;
|
|
|
|
case dh_typecode_i64:
|
|
|
|
case dh_typecode_s64:
|
|
|
|
type = TCG_TYPE_I64;
|
|
|
|
break;
|
|
|
|
case dh_typecode_ptr:
|
|
|
|
type = TCG_TYPE_PTR;
|
|
|
|
break;
|
2022-11-11 02:01:13 +01:00
|
|
|
case dh_typecode_i128:
|
|
|
|
type = TCG_TYPE_I128;
|
|
|
|
break;
|
2022-11-11 01:09:37 +01:00
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case TCG_TYPE_I32:
|
|
|
|
switch (TCG_TARGET_CALL_ARG_I32) {
|
|
|
|
case TCG_CALL_ARG_EVEN:
|
|
|
|
layout_arg_even(&cum);
|
|
|
|
/* fall through */
|
|
|
|
case TCG_CALL_ARG_NORMAL:
|
|
|
|
layout_arg_1(&cum, info, TCG_CALL_ARG_NORMAL);
|
|
|
|
break;
|
|
|
|
case TCG_CALL_ARG_EXTEND:
|
|
|
|
kind = TCG_CALL_ARG_EXTEND_U + (typecode & 1);
|
|
|
|
layout_arg_1(&cum, info, kind);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
qemu_build_not_reached();
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case TCG_TYPE_I64:
|
|
|
|
switch (TCG_TARGET_CALL_ARG_I64) {
|
|
|
|
case TCG_CALL_ARG_EVEN:
|
|
|
|
layout_arg_even(&cum);
|
|
|
|
/* fall through */
|
|
|
|
case TCG_CALL_ARG_NORMAL:
|
|
|
|
if (TCG_TARGET_REG_BITS == 32) {
|
|
|
|
layout_arg_normal_n(&cum, info, 2);
|
|
|
|
} else {
|
|
|
|
layout_arg_1(&cum, info, TCG_CALL_ARG_NORMAL);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
qemu_build_not_reached();
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2022-11-11 02:01:13 +01:00
|
|
|
case TCG_TYPE_I128:
|
2022-10-19 23:54:48 +02:00
|
|
|
switch (TCG_TARGET_CALL_ARG_I128) {
|
2022-11-11 02:01:13 +01:00
|
|
|
case TCG_CALL_ARG_EVEN:
|
|
|
|
layout_arg_even(&cum);
|
|
|
|
/* fall through */
|
|
|
|
case TCG_CALL_ARG_NORMAL:
|
|
|
|
layout_arg_normal_n(&cum, info, 128 / TCG_TARGET_REG_BITS);
|
|
|
|
break;
|
2022-10-30 23:22:59 +01:00
|
|
|
case TCG_CALL_ARG_BY_REF:
|
|
|
|
layout_arg_by_ref(&cum, info);
|
|
|
|
break;
|
2022-11-11 02:01:13 +01:00
|
|
|
default:
|
|
|
|
qemu_build_not_reached();
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2022-11-11 01:09:37 +01:00
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
info->nr_in = cum.info_in_idx;
|
|
|
|
|
|
|
|
/* Validate that we didn't overrun the input array. */
|
|
|
|
assert(cum.info_in_idx <= ARRAY_SIZE(info->in));
|
|
|
|
/* Validate the backend has enough argument space. */
|
|
|
|
assert(cum.arg_slot <= max_reg_slots + max_stk_slots);
|
2022-10-30 23:22:59 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Relocate the "ref_slot" area to the end of the parameters.
|
|
|
|
* Minimizing this stack offset helps code size for x86,
|
|
|
|
* which has a signed 8-bit offset encoding.
|
|
|
|
*/
|
|
|
|
if (cum.ref_slot != 0) {
|
|
|
|
int ref_base = 0;
|
|
|
|
|
|
|
|
if (cum.arg_slot > max_reg_slots) {
|
|
|
|
int align = __alignof(Int128) / sizeof(tcg_target_long);
|
|
|
|
|
|
|
|
ref_base = cum.arg_slot - max_reg_slots;
|
|
|
|
if (align > 1) {
|
|
|
|
ref_base = ROUND_UP(ref_base, align);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert(ref_base + cum.ref_slot <= max_stk_slots);
|
2023-04-09 04:05:10 +02:00
|
|
|
ref_base += max_reg_slots;
|
2022-10-30 23:22:59 +01:00
|
|
|
|
|
|
|
if (ref_base != 0) {
|
|
|
|
for (int i = cum.info_in_idx - 1; i >= 0; --i) {
|
|
|
|
TCGCallArgumentLoc *loc = &info->in[i];
|
|
|
|
switch (loc->kind) {
|
|
|
|
case TCG_CALL_ARG_BY_REF:
|
|
|
|
case TCG_CALL_ARG_BY_REF_N:
|
|
|
|
loc->ref_slot += ref_base;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-11-11 01:09:37 +01:00
|
|
|
}
|
|
|
|
|
2015-08-19 08:23:08 +02:00
|
|
|
static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
|
2016-11-18 09:31:40 +01:00
|
|
|
static void process_op_defs(TCGContext *s);
|
2017-10-10 23:34:37 +02:00
|
|
|
static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
|
|
|
|
TCGReg reg, const char *name);
|
2015-08-19 08:23:08 +02:00
|
|
|
|
2021-03-10 05:52:45 +01:00
|
|
|
static void tcg_context_init(unsigned max_cpus)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2021-03-10 00:24:33 +01:00
|
|
|
TCGContext *s = &tcg_init_ctx;
|
2013-09-15 00:57:22 +02:00
|
|
|
int op, total_args, n, i;
|
2008-02-01 11:05:41 +01:00
|
|
|
TCGOpDef *def;
|
|
|
|
TCGArgConstraint *args_ct;
|
2017-10-10 23:34:37 +02:00
|
|
|
TCGTemp *ts;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
|
|
|
memset(s, 0, sizeof(*s));
|
|
|
|
s->nb_globals = 0;
|
2016-06-24 05:34:22 +02:00
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
/* Count total number of arguments and allocate the corresponding
|
|
|
|
space */
|
|
|
|
total_args = 0;
|
|
|
|
for(op = 0; op < NB_OPS; op++) {
|
|
|
|
def = &tcg_op_defs[op];
|
|
|
|
n = def->nb_iargs + def->nb_oargs;
|
|
|
|
total_args += n;
|
|
|
|
}
|
|
|
|
|
2019-04-05 04:34:19 +02:00
|
|
|
args_ct = g_new0(TCGArgConstraint, total_args);
|
2008-02-01 11:05:41 +01:00
|
|
|
|
|
|
|
for(op = 0; op < NB_OPS; op++) {
|
|
|
|
def = &tcg_op_defs[op];
|
|
|
|
def->args_ct = args_ct;
|
|
|
|
n = def->nb_iargs + def->nb_oargs;
|
|
|
|
args_ct += n;
|
|
|
|
}
|
2013-09-15 00:09:39 +02:00
|
|
|
|
2023-04-10 07:59:09 +02:00
|
|
|
init_call_layout(&info_helper_ld32_mmu);
|
|
|
|
init_call_layout(&info_helper_ld64_mmu);
|
2023-04-17 10:20:51 +02:00
|
|
|
init_call_layout(&info_helper_ld128_mmu);
|
2023-04-10 07:59:09 +02:00
|
|
|
init_call_layout(&info_helper_st32_mmu);
|
|
|
|
init_call_layout(&info_helper_st64_mmu);
|
2023-04-17 10:20:51 +02:00
|
|
|
init_call_layout(&info_helper_st128_mmu);
|
2023-04-10 07:59:09 +02:00
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
tcg_target_init(s);
|
2016-11-18 09:31:40 +01:00
|
|
|
process_op_defs(s);
|
2015-08-19 08:23:08 +02:00
|
|
|
|
|
|
|
/* Reverse the order of the saved registers, assuming they're all at
|
|
|
|
the start of tcg_target_reg_alloc_order. */
|
|
|
|
for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) {
|
|
|
|
int r = tcg_target_reg_alloc_order[n];
|
|
|
|
if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (i = 0; i < n; ++i) {
|
|
|
|
indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i];
|
|
|
|
}
|
|
|
|
for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) {
|
|
|
|
indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
|
|
|
|
}
|
2017-07-12 23:15:52 +02:00
|
|
|
|
plugin-gen: add module for TCG-related code
We first inject empty instrumentation from translator_loop.
After translation, we go through the plugins to see what
they want to register for, filling in the empty instrumentation.
If if turns out that some instrumentation remains unused, we
remove it.
This approach supports the following features:
- Inlining TCG code for simple operations. Note that we do not
export TCG ops to plugins. Instead, we give them a C API to
insert inlined ops. So far we only support adding an immediate
to a u64, e.g. to count events.
- "Direct" callbacks. These are callbacks that do not go via
a helper. Instead, the helper is defined at run-time, so that
the plugin code is directly called from TCG. This makes direct
callbacks as efficient as possible; they are therefore used
for very frequent events, e.g. memory callbacks.
- Passing the host address to memory callbacks. Most of this
is implemented in a later patch though.
- Instrumentation of memory accesses performed from helpers.
See the corresponding comment, as well as a later patch.
Signed-off-by: Emilio G. Cota <cota@braap.org>
[AJB: add alloc_tcg_plugin_context, use glib, rm hwaddr]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2018-12-07 21:33:56 +01:00
|
|
|
alloc_tcg_plugin_context(s);
|
|
|
|
|
2017-07-12 23:15:52 +02:00
|
|
|
tcg_ctx = s;
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
/*
|
|
|
|
* In user-mode we simply share the init context among threads, since we
|
|
|
|
* use a single region. See the documentation tcg_region_init() for the
|
|
|
|
* reasoning behind this.
|
|
|
|
* In softmmu we will have at most max_cpus TCG threads.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_USER_ONLY
|
2017-07-13 00:26:40 +02:00
|
|
|
tcg_ctxs = &tcg_ctx;
|
2021-03-10 06:06:32 +01:00
|
|
|
tcg_cur_ctxs = 1;
|
|
|
|
tcg_max_ctxs = 1;
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
#else
|
2021-03-10 06:06:32 +01:00
|
|
|
tcg_max_ctxs = max_cpus;
|
|
|
|
tcg_ctxs = g_new0(TCGContext *, max_cpus);
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
#endif
|
2017-10-10 23:34:37 +02:00
|
|
|
|
|
|
|
tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0));
|
|
|
|
ts = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, TCG_AREG0, "env");
|
2023-09-14 01:37:36 +02:00
|
|
|
tcg_env = temp_tcgv_ptr(ts);
|
2010-05-06 17:50:41 +02:00
|
|
|
}
|
2008-05-10 12:52:05 +02:00
|
|
|
|
2021-03-10 05:52:45 +01:00
|
|
|
void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus)
|
2021-03-10 00:24:33 +01:00
|
|
|
{
|
2021-03-10 05:52:45 +01:00
|
|
|
tcg_context_init(max_cpus);
|
|
|
|
tcg_region_init(tb_size, splitwx, max_cpus);
|
2021-03-10 00:24:33 +01:00
|
|
|
}
|
|
|
|
|
2017-06-07 01:12:25 +02:00
|
|
|
/*
|
|
|
|
* Allocate TBs right before their corresponding translated code, making
|
|
|
|
* sure that TBs and code are on different cache lines.
|
|
|
|
*/
|
|
|
|
TranslationBlock *tcg_tb_alloc(TCGContext *s)
|
|
|
|
{
|
|
|
|
uintptr_t align = qemu_icache_linesize;
|
|
|
|
TranslationBlock *tb;
|
|
|
|
void *next;
|
|
|
|
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 01:24:20 +02:00
|
|
|
retry:
|
2017-06-07 01:12:25 +02:00
|
|
|
tb = (void *)ROUND_UP((uintptr_t)s->code_gen_ptr, align);
|
|
|
|
next = (void *)ROUND_UP((uintptr_t)(tb + 1), align);
|
|
|
|
|
|
|
|
if (unlikely(next > s->code_gen_highwater)) {
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 01:24:20 +02:00
|
|
|
if (tcg_region_alloc(s)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
goto retry;
|
2017-06-07 01:12:25 +02:00
|
|
|
}
|
2020-09-23 12:56:46 +02:00
|
|
|
qatomic_set(&s->code_gen_ptr, next);
|
2017-07-30 22:13:21 +02:00
|
|
|
s->data_gen_ptr = NULL;
|
2017-06-07 01:12:25 +02:00
|
|
|
return tb;
|
|
|
|
}
|
|
|
|
|
2010-05-06 17:50:41 +02:00
|
|
|
void tcg_prologue_init(TCGContext *s)
|
|
|
|
{
|
2021-03-09 17:45:58 +01:00
|
|
|
size_t prologue_size;
|
2015-09-19 08:43:05 +02:00
|
|
|
|
2021-03-09 17:45:58 +01:00
|
|
|
s->code_ptr = s->code_gen_ptr;
|
|
|
|
s->code_buf = s->code_gen_ptr;
|
2017-10-25 16:14:20 +02:00
|
|
|
s->data_gen_ptr = NULL;
|
2020-10-28 22:11:54 +01:00
|
|
|
|
|
|
|
#ifndef CONFIG_TCG_INTERPRETER
|
2021-03-09 17:45:58 +01:00
|
|
|
tcg_qemu_tb_exec = (tcg_prologue_fn *)tcg_splitwx_to_rx(s->code_ptr);
|
2020-10-28 22:11:54 +01:00
|
|
|
#endif
|
2015-09-19 08:43:05 +02:00
|
|
|
|
2017-10-25 16:14:20 +02:00
|
|
|
#ifdef TCG_TARGET_NEED_POOL_LABELS
|
|
|
|
s->pool_labels = NULL;
|
|
|
|
#endif
|
|
|
|
|
2021-01-13 04:28:07 +01:00
|
|
|
qemu_thread_jit_write();
|
2015-09-19 08:43:05 +02:00
|
|
|
/* Generate the prologue. */
|
2008-05-10 12:52:05 +02:00
|
|
|
tcg_target_qemu_prologue(s);
|
2017-10-25 16:14:20 +02:00
|
|
|
|
|
|
|
#ifdef TCG_TARGET_NEED_POOL_LABELS
|
|
|
|
/* Allow the prologue to put e.g. guest_base into a pool entry. */
|
|
|
|
{
|
2019-04-21 22:51:56 +02:00
|
|
|
int result = tcg_out_pool_finalize(s);
|
|
|
|
tcg_debug_assert(result == 0);
|
2017-10-25 16:14:20 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-03-09 17:45:58 +01:00
|
|
|
prologue_size = tcg_current_code_size(s);
|
2023-01-12 16:20:13 +01:00
|
|
|
perf_report_prologue(s->code_gen_ptr, prologue_size);
|
2021-03-09 17:45:58 +01:00
|
|
|
|
2020-12-12 16:08:02 +01:00
|
|
|
#ifndef CONFIG_TCG_INTERPRETER
|
2021-03-09 17:45:58 +01:00
|
|
|
flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
|
|
|
|
(uintptr_t)s->code_buf, prologue_size);
|
2020-12-12 16:08:02 +01:00
|
|
|
#endif
|
2015-09-19 08:43:05 +02:00
|
|
|
|
2013-03-31 22:15:19 +02:00
|
|
|
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
|
2022-04-17 20:29:47 +02:00
|
|
|
FILE *logfile = qemu_log_trylock();
|
2022-04-17 20:29:49 +02:00
|
|
|
if (logfile) {
|
|
|
|
fprintf(logfile, "PROLOGUE: [size=%zu]\n", prologue_size);
|
|
|
|
if (s->data_gen_ptr) {
|
|
|
|
size_t code_size = s->data_gen_ptr - s->code_gen_ptr;
|
|
|
|
size_t data_size = prologue_size - code_size;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
disas(logfile, s->code_gen_ptr, code_size);
|
|
|
|
|
|
|
|
for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
|
|
|
|
if (sizeof(tcg_target_ulong) == 8) {
|
|
|
|
fprintf(logfile,
|
|
|
|
"0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
|
|
|
|
(uintptr_t)s->data_gen_ptr + i,
|
|
|
|
*(uint64_t *)(s->data_gen_ptr + i));
|
|
|
|
} else {
|
|
|
|
fprintf(logfile,
|
|
|
|
"0x%08" PRIxPTR ": .long 0x%08x\n",
|
|
|
|
(uintptr_t)s->data_gen_ptr + i,
|
|
|
|
*(uint32_t *)(s->data_gen_ptr + i));
|
|
|
|
}
|
2017-10-25 16:14:20 +02:00
|
|
|
}
|
2022-04-17 20:29:49 +02:00
|
|
|
} else {
|
|
|
|
disas(logfile, s->code_gen_ptr, prologue_size);
|
2017-10-25 16:14:20 +02:00
|
|
|
}
|
2022-04-17 20:29:49 +02:00
|
|
|
fprintf(logfile, "\n");
|
|
|
|
qemu_log_unlock(logfile);
|
2017-10-25 16:14:20 +02:00
|
|
|
}
|
2013-03-31 22:15:19 +02:00
|
|
|
}
|
2017-04-27 05:29:14 +02:00
|
|
|
|
2021-02-02 20:40:22 +01:00
|
|
|
#ifndef CONFIG_TCG_INTERPRETER
|
|
|
|
/*
|
|
|
|
* Assert that goto_ptr is implemented completely, setting an epilogue.
|
|
|
|
* For tci, we use NULL as the signal to return from the interpreter,
|
|
|
|
* so skip this check.
|
|
|
|
*/
|
2021-06-29 23:47:39 +02:00
|
|
|
tcg_debug_assert(tcg_code_gen_epilogue != NULL);
|
2021-02-02 20:40:22 +01:00
|
|
|
#endif
|
2021-07-10 04:45:42 +02:00
|
|
|
|
|
|
|
tcg_region_prologue_set(s);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void tcg_func_start(TCGContext *s)
|
|
|
|
{
|
|
|
|
tcg_pool_reset(s);
|
|
|
|
s->nb_temps = s->nb_globals;
|
2013-09-19 21:16:45 +02:00
|
|
|
|
|
|
|
/* No temps have been previously allocated for size or locality. */
|
|
|
|
memset(s->free_temps, 0, sizeof(s->free_temps));
|
|
|
|
|
2020-03-30 03:55:52 +02:00
|
|
|
/* No constant temps have been previously allocated. */
|
|
|
|
for (int i = 0; i < TCG_TYPE_COUNT; ++i) {
|
|
|
|
if (s->const_table[i]) {
|
|
|
|
g_hash_table_remove_all(s->const_table[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-08 21:18:59 +02:00
|
|
|
s->nb_ops = 0;
|
2008-02-01 11:05:41 +01:00
|
|
|
s->nb_labels = 0;
|
|
|
|
s->current_frame_offset = s->frame_start;
|
|
|
|
|
2012-09-22 02:18:16 +02:00
|
|
|
#ifdef CONFIG_DEBUG_TCG
|
|
|
|
s->goto_tb_issue_mask = 0;
|
|
|
|
#endif
|
|
|
|
|
2017-11-02 15:19:14 +01:00
|
|
|
QTAILQ_INIT(&s->ops);
|
|
|
|
QTAILQ_INIT(&s->free_ops);
|
2019-02-07 14:26:40 +01:00
|
|
|
QSIMPLEQ_INIT(&s->labels);
|
2023-03-10 02:46:16 +01:00
|
|
|
|
|
|
|
tcg_debug_assert(s->addr_type == TCG_TYPE_I32 ||
|
|
|
|
s->addr_type == TCG_TYPE_I64);
|
2023-03-28 01:07:15 +02:00
|
|
|
|
2023-04-01 06:30:31 +02:00
|
|
|
tcg_debug_assert(s->insn_start_words > 0);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2021-01-23 23:11:17 +01:00
|
|
|
static TCGTemp *tcg_temp_alloc(TCGContext *s)
|
2013-09-19 17:46:21 +02:00
|
|
|
{
|
|
|
|
int n = s->nb_temps++;
|
2021-01-23 23:11:17 +01:00
|
|
|
|
|
|
|
if (n >= TCG_MAX_TEMPS) {
|
2021-02-01 10:29:26 +01:00
|
|
|
tcg_raise_tb_overflow(s);
|
2021-01-23 23:11:17 +01:00
|
|
|
}
|
2013-09-19 17:46:21 +02:00
|
|
|
return memset(&s->temps[n], 0, sizeof(TCGTemp));
|
|
|
|
}
|
|
|
|
|
2021-01-23 23:11:17 +01:00
|
|
|
static TCGTemp *tcg_global_alloc(TCGContext *s)
|
2013-09-19 17:46:21 +02:00
|
|
|
{
|
2016-11-02 18:20:15 +01:00
|
|
|
TCGTemp *ts;
|
|
|
|
|
2013-09-19 17:46:21 +02:00
|
|
|
tcg_debug_assert(s->nb_globals == s->nb_temps);
|
2021-01-23 23:11:17 +01:00
|
|
|
tcg_debug_assert(s->nb_globals < TCG_MAX_TEMPS);
|
2013-09-19 17:46:21 +02:00
|
|
|
s->nb_globals++;
|
2016-11-02 18:20:15 +01:00
|
|
|
ts = tcg_temp_alloc(s);
|
2020-03-29 19:11:56 +02:00
|
|
|
ts->kind = TEMP_GLOBAL;
|
2016-11-02 18:20:15 +01:00
|
|
|
|
|
|
|
return ts;
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2017-10-20 09:05:45 +02:00
|
|
|
static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
|
|
|
|
TCGReg reg, const char *name)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
|
|
|
TCGTemp *ts;
|
|
|
|
|
2023-04-05 21:08:46 +02:00
|
|
|
tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
|
2013-09-19 17:46:21 +02:00
|
|
|
|
|
|
|
ts = tcg_global_alloc(s);
|
2008-02-01 11:05:41 +01:00
|
|
|
ts->base_type = type;
|
|
|
|
ts->type = type;
|
2020-03-29 19:11:56 +02:00
|
|
|
ts->kind = TEMP_FIXED;
|
2008-02-01 11:05:41 +01:00
|
|
|
ts->reg = reg;
|
|
|
|
ts->name = name;
|
|
|
|
tcg_regset_set_reg(s->reserved_regs, reg);
|
2013-09-19 17:46:21 +02:00
|
|
|
|
2017-10-20 09:05:45 +02:00
|
|
|
return ts;
|
2008-11-17 15:43:54 +01:00
|
|
|
}
|
|
|
|
|
2013-09-18 23:54:45 +02:00
|
|
|
void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
|
2013-09-18 23:12:53 +02:00
|
|
|
{
|
|
|
|
s->frame_start = start;
|
|
|
|
s->frame_end = start + size;
|
2017-10-20 09:05:45 +02:00
|
|
|
s->frame_temp
|
|
|
|
= tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
|
2013-09-18 23:12:53 +02:00
|
|
|
}
|
|
|
|
|
2017-10-20 09:05:45 +02:00
|
|
|
TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
|
|
|
|
intptr_t offset, const char *name)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2017-07-12 23:15:52 +02:00
|
|
|
TCGContext *s = tcg_ctx;
|
2017-10-20 09:30:24 +02:00
|
|
|
TCGTemp *base_ts = tcgv_ptr_temp(base);
|
2013-09-19 17:46:21 +02:00
|
|
|
TCGTemp *ts = tcg_global_alloc(s);
|
2022-10-19 03:53:27 +02:00
|
|
|
int indirect_reg = 0;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2020-03-30 03:55:52 +02:00
|
|
|
switch (base_ts->kind) {
|
|
|
|
case TEMP_FIXED:
|
|
|
|
break;
|
|
|
|
case TEMP_GLOBAL:
|
2016-06-24 05:34:33 +02:00
|
|
|
/* We do not support double-indirect registers. */
|
|
|
|
tcg_debug_assert(!base_ts->indirect_reg);
|
2013-09-19 19:36:18 +02:00
|
|
|
base_ts->indirect_base = 1;
|
2016-06-24 05:34:33 +02:00
|
|
|
s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64
|
|
|
|
? 2 : 1);
|
|
|
|
indirect_reg = 1;
|
2020-03-30 03:55:52 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
2013-09-19 19:36:18 +02:00
|
|
|
}
|
|
|
|
|
2013-09-19 17:46:21 +02:00
|
|
|
if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
|
|
|
|
TCGTemp *ts2 = tcg_global_alloc(s);
|
2008-02-01 11:05:41 +01:00
|
|
|
char buf[64];
|
2013-09-19 17:46:21 +02:00
|
|
|
|
|
|
|
ts->base_type = TCG_TYPE_I64;
|
2008-02-01 11:05:41 +01:00
|
|
|
ts->type = TCG_TYPE_I32;
|
2013-09-19 19:36:18 +02:00
|
|
|
ts->indirect_reg = indirect_reg;
|
2008-02-01 11:05:41 +01:00
|
|
|
ts->mem_allocated = 1;
|
2013-09-18 23:12:53 +02:00
|
|
|
ts->mem_base = base_ts;
|
2022-10-19 03:53:27 +02:00
|
|
|
ts->mem_offset = offset;
|
2008-02-01 11:05:41 +01:00
|
|
|
pstrcpy(buf, sizeof(buf), name);
|
|
|
|
pstrcat(buf, sizeof(buf), "_0");
|
|
|
|
ts->name = strdup(buf);
|
|
|
|
|
2013-09-19 17:46:21 +02:00
|
|
|
tcg_debug_assert(ts2 == ts + 1);
|
|
|
|
ts2->base_type = TCG_TYPE_I64;
|
|
|
|
ts2->type = TCG_TYPE_I32;
|
2013-09-19 19:36:18 +02:00
|
|
|
ts2->indirect_reg = indirect_reg;
|
2013-09-19 17:46:21 +02:00
|
|
|
ts2->mem_allocated = 1;
|
|
|
|
ts2->mem_base = base_ts;
|
2022-10-19 03:53:27 +02:00
|
|
|
ts2->mem_offset = offset + 4;
|
2022-10-19 03:26:37 +02:00
|
|
|
ts2->temp_subindex = 1;
|
2008-02-01 11:05:41 +01:00
|
|
|
pstrcpy(buf, sizeof(buf), name);
|
|
|
|
pstrcat(buf, sizeof(buf), "_1");
|
2016-06-18 02:02:20 +02:00
|
|
|
ts2->name = strdup(buf);
|
2013-09-19 17:46:21 +02:00
|
|
|
} else {
|
2008-02-01 11:05:41 +01:00
|
|
|
ts->base_type = type;
|
|
|
|
ts->type = type;
|
2013-09-19 19:36:18 +02:00
|
|
|
ts->indirect_reg = indirect_reg;
|
2008-02-01 11:05:41 +01:00
|
|
|
ts->mem_allocated = 1;
|
2013-09-18 23:12:53 +02:00
|
|
|
ts->mem_base = base_ts;
|
2008-02-01 11:05:41 +01:00
|
|
|
ts->mem_offset = offset;
|
|
|
|
ts->name = name;
|
|
|
|
}
|
2017-10-20 09:05:45 +02:00
|
|
|
return ts;
|
2008-11-17 15:43:54 +01:00
|
|
|
}
|
|
|
|
|
2023-01-30 00:46:06 +01:00
|
|
|
TCGTemp *tcg_temp_new_internal(TCGType type, TCGTempKind kind)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2017-07-12 23:15:52 +02:00
|
|
|
TCGContext *s = tcg_ctx;
|
2008-02-01 11:05:41 +01:00
|
|
|
TCGTemp *ts;
|
2023-01-30 01:02:59 +01:00
|
|
|
int n;
|
2013-09-19 17:46:21 +02:00
|
|
|
|
2023-01-30 01:02:59 +01:00
|
|
|
if (kind == TEMP_EBB) {
|
|
|
|
int idx = find_first_bit(s->free_temps[type].l, TCG_MAX_TEMPS);
|
|
|
|
|
|
|
|
if (idx < TCG_MAX_TEMPS) {
|
|
|
|
/* There is already an available temp with the right type. */
|
|
|
|
clear_bit(idx, s->free_temps[type].l);
|
|
|
|
|
|
|
|
ts = &s->temps[idx];
|
|
|
|
ts->temp_allocated = 1;
|
|
|
|
tcg_debug_assert(ts->base_type == type);
|
|
|
|
tcg_debug_assert(ts->kind == kind);
|
2023-02-25 03:15:18 +01:00
|
|
|
return ts;
|
2022-10-20 00:03:41 +02:00
|
|
|
}
|
2023-01-30 01:02:59 +01:00
|
|
|
} else {
|
|
|
|
tcg_debug_assert(kind == TEMP_TB);
|
|
|
|
}
|
2013-09-19 17:46:21 +02:00
|
|
|
|
2023-01-30 01:02:59 +01:00
|
|
|
switch (type) {
|
|
|
|
case TCG_TYPE_I32:
|
|
|
|
case TCG_TYPE_V64:
|
|
|
|
case TCG_TYPE_V128:
|
|
|
|
case TCG_TYPE_V256:
|
|
|
|
n = 1;
|
|
|
|
break;
|
|
|
|
case TCG_TYPE_I64:
|
|
|
|
n = 64 / TCG_TARGET_REG_BITS;
|
|
|
|
break;
|
|
|
|
case TCG_TYPE_I128:
|
|
|
|
n = 128 / TCG_TARGET_REG_BITS;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
2022-10-20 00:03:41 +02:00
|
|
|
|
2023-01-30 01:02:59 +01:00
|
|
|
ts = tcg_temp_alloc(s);
|
|
|
|
ts->base_type = type;
|
|
|
|
ts->temp_allocated = 1;
|
|
|
|
ts->kind = kind;
|
|
|
|
|
|
|
|
if (n == 1) {
|
|
|
|
ts->type = type;
|
|
|
|
} else {
|
|
|
|
ts->type = TCG_TYPE_REG;
|
2022-10-20 00:03:41 +02:00
|
|
|
|
2023-01-30 01:02:59 +01:00
|
|
|
for (int i = 1; i < n; ++i) {
|
|
|
|
TCGTemp *ts2 = tcg_temp_alloc(s);
|
2022-10-20 00:03:41 +02:00
|
|
|
|
2023-01-30 01:02:59 +01:00
|
|
|
tcg_debug_assert(ts2 == ts + i);
|
|
|
|
ts2->base_type = type;
|
|
|
|
ts2->type = TCG_TYPE_REG;
|
|
|
|
ts2->temp_allocated = 1;
|
|
|
|
ts2->temp_subindex = i;
|
|
|
|
ts2->kind = kind;
|
2008-05-23 19:33:39 +02:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2017-10-20 09:05:45 +02:00
|
|
|
return ts;
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2017-09-14 22:53:46 +02:00
|
|
|
TCGv_vec tcg_temp_new_vec(TCGType type)
|
|
|
|
{
|
|
|
|
TCGTemp *t;
|
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_TCG
|
|
|
|
switch (type) {
|
|
|
|
case TCG_TYPE_V64:
|
|
|
|
assert(TCG_TARGET_HAS_v64);
|
|
|
|
break;
|
|
|
|
case TCG_TYPE_V128:
|
|
|
|
assert(TCG_TARGET_HAS_v128);
|
|
|
|
break;
|
|
|
|
case TCG_TYPE_V256:
|
|
|
|
assert(TCG_TARGET_HAS_v256);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2023-01-30 00:46:06 +01:00
|
|
|
t = tcg_temp_new_internal(type, TEMP_EBB);
|
2017-09-14 22:53:46 +02:00
|
|
|
return temp_tcgv_vec(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create a new temp of the same type as an existing temp. */
|
|
|
|
TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match)
|
|
|
|
{
|
|
|
|
TCGTemp *t = tcgv_vec_temp(match);
|
|
|
|
|
|
|
|
tcg_debug_assert(t->temp_allocated != 0);
|
|
|
|
|
2023-01-30 00:46:06 +01:00
|
|
|
t = tcg_temp_new_internal(t->base_type, TEMP_EBB);
|
2017-09-14 22:53:46 +02:00
|
|
|
return temp_tcgv_vec(t);
|
|
|
|
}
|
|
|
|
|
2018-02-23 03:17:57 +01:00
|
|
|
void tcg_temp_free_internal(TCGTemp *ts)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2017-07-12 23:15:52 +02:00
|
|
|
TCGContext *s = tcg_ctx;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2022-03-16 17:34:18 +01:00
|
|
|
switch (ts->kind) {
|
|
|
|
case TEMP_CONST:
|
2023-01-29 21:55:52 +01:00
|
|
|
case TEMP_TB:
|
2023-02-25 03:15:18 +01:00
|
|
|
/* Silently ignore free. */
|
|
|
|
break;
|
|
|
|
case TEMP_EBB:
|
|
|
|
tcg_debug_assert(ts->temp_allocated != 0);
|
|
|
|
ts->temp_allocated = 0;
|
|
|
|
set_bit(temp_idx(ts), s->free_temps[ts->base_type].l);
|
2022-03-16 17:34:18 +01:00
|
|
|
break;
|
|
|
|
default:
|
2023-02-25 03:15:18 +01:00
|
|
|
/* It never made sense to free TEMP_FIXED or TEMP_GLOBAL. */
|
2022-03-16 17:34:18 +01:00
|
|
|
g_assert_not_reached();
|
2020-03-30 03:55:52 +02:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2020-03-30 03:55:52 +02:00
|
|
|
TCGTemp *tcg_constant_internal(TCGType type, int64_t val)
|
|
|
|
{
|
|
|
|
TCGContext *s = tcg_ctx;
|
|
|
|
GHashTable *h = s->const_table[type];
|
|
|
|
TCGTemp *ts;
|
|
|
|
|
|
|
|
if (h == NULL) {
|
|
|
|
h = g_hash_table_new(g_int64_hash, g_int64_equal);
|
|
|
|
s->const_table[type] = h;
|
|
|
|
}
|
|
|
|
|
|
|
|
ts = g_hash_table_lookup(h, &val);
|
|
|
|
if (ts == NULL) {
|
2022-10-19 03:53:27 +02:00
|
|
|
int64_t *val_ptr;
|
|
|
|
|
2020-03-30 03:55:52 +02:00
|
|
|
ts = tcg_temp_alloc(s);
|
|
|
|
|
|
|
|
if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
|
|
|
|
TCGTemp *ts2 = tcg_temp_alloc(s);
|
|
|
|
|
2022-10-19 03:53:27 +02:00
|
|
|
tcg_debug_assert(ts2 == ts + 1);
|
|
|
|
|
2020-03-30 03:55:52 +02:00
|
|
|
ts->base_type = TCG_TYPE_I64;
|
|
|
|
ts->type = TCG_TYPE_I32;
|
|
|
|
ts->kind = TEMP_CONST;
|
|
|
|
ts->temp_allocated = 1;
|
|
|
|
|
|
|
|
ts2->base_type = TCG_TYPE_I64;
|
|
|
|
ts2->type = TCG_TYPE_I32;
|
|
|
|
ts2->kind = TEMP_CONST;
|
|
|
|
ts2->temp_allocated = 1;
|
2022-10-19 03:26:37 +02:00
|
|
|
ts2->temp_subindex = 1;
|
2022-10-19 03:53:27 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Retain the full value of the 64-bit constant in the low
|
|
|
|
* part, so that the hash table works. Actual uses will
|
|
|
|
* truncate the value to the low part.
|
|
|
|
*/
|
|
|
|
ts[HOST_BIG_ENDIAN].val = val;
|
|
|
|
ts[!HOST_BIG_ENDIAN].val = val >> 32;
|
|
|
|
val_ptr = &ts[HOST_BIG_ENDIAN].val;
|
2020-03-30 03:55:52 +02:00
|
|
|
} else {
|
|
|
|
ts->base_type = type;
|
|
|
|
ts->type = type;
|
|
|
|
ts->kind = TEMP_CONST;
|
|
|
|
ts->temp_allocated = 1;
|
|
|
|
ts->val = val;
|
2022-10-19 03:53:27 +02:00
|
|
|
val_ptr = &ts->val;
|
2020-03-30 03:55:52 +02:00
|
|
|
}
|
2022-10-19 03:53:27 +02:00
|
|
|
g_hash_table_insert(h, val_ptr, ts);
|
2020-03-30 03:55:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return ts;
|
|
|
|
}
|
|
|
|
|
|
|
|
TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val)
|
|
|
|
{
|
|
|
|
val = dup_const(vece, val);
|
|
|
|
return temp_tcgv_vec(tcg_constant_internal(type, val));
|
|
|
|
}
|
|
|
|
|
2020-09-04 03:18:08 +02:00
|
|
|
TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val)
|
|
|
|
{
|
|
|
|
TCGTemp *t = tcgv_vec_temp(match);
|
|
|
|
|
|
|
|
tcg_debug_assert(t->temp_allocated != 0);
|
|
|
|
return tcg_constant_vec(t->base_type, vece, val);
|
|
|
|
}
|
|
|
|
|
2023-03-30 17:09:03 +02:00
|
|
|
#ifdef CONFIG_DEBUG_TCG
|
|
|
|
size_t temp_idx(TCGTemp *ts)
|
|
|
|
{
|
|
|
|
ptrdiff_t n = ts - tcg_ctx->temps;
|
|
|
|
assert(n >= 0 && n < tcg_ctx->nb_temps);
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
TCGTemp *tcgv_i32_temp(TCGv_i32 v)
|
|
|
|
{
|
|
|
|
uintptr_t o = (uintptr_t)v - offsetof(TCGContext, temps);
|
|
|
|
|
|
|
|
assert(o < sizeof(TCGTemp) * tcg_ctx->nb_temps);
|
|
|
|
assert(o % sizeof(TCGTemp) == 0);
|
|
|
|
|
|
|
|
return (void *)tcg_ctx + (uintptr_t)v;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_DEBUG_TCG */
|
|
|
|
|
2017-08-17 16:43:20 +02:00
|
|
|
/* Return true if OP may appear in the opcode stream.
|
|
|
|
Test the runtime variable that controls each opcode. */
|
|
|
|
bool tcg_op_supported(TCGOpcode op)
|
|
|
|
{
|
2017-09-14 22:53:46 +02:00
|
|
|
const bool have_vec
|
|
|
|
= TCG_TARGET_HAS_v64 | TCG_TARGET_HAS_v128 | TCG_TARGET_HAS_v256;
|
|
|
|
|
2017-08-17 16:43:20 +02:00
|
|
|
switch (op) {
|
|
|
|
case INDEX_op_discard:
|
|
|
|
case INDEX_op_set_label:
|
|
|
|
case INDEX_op_call:
|
|
|
|
case INDEX_op_br:
|
|
|
|
case INDEX_op_mb:
|
|
|
|
case INDEX_op_insn_start:
|
|
|
|
case INDEX_op_exit_tb:
|
|
|
|
case INDEX_op_goto_tb:
|
2021-06-29 23:47:39 +02:00
|
|
|
case INDEX_op_goto_ptr:
|
2023-05-17 05:07:20 +02:00
|
|
|
case INDEX_op_qemu_ld_a32_i32:
|
|
|
|
case INDEX_op_qemu_ld_a64_i32:
|
|
|
|
case INDEX_op_qemu_st_a32_i32:
|
|
|
|
case INDEX_op_qemu_st_a64_i32:
|
|
|
|
case INDEX_op_qemu_ld_a32_i64:
|
|
|
|
case INDEX_op_qemu_ld_a64_i64:
|
|
|
|
case INDEX_op_qemu_st_a32_i64:
|
|
|
|
case INDEX_op_qemu_st_a64_i64:
|
2017-08-17 16:43:20 +02:00
|
|
|
return true;
|
|
|
|
|
2023-05-17 05:07:20 +02:00
|
|
|
case INDEX_op_qemu_st8_a32_i32:
|
|
|
|
case INDEX_op_qemu_st8_a64_i32:
|
2020-12-09 20:58:39 +01:00
|
|
|
return TCG_TARGET_HAS_qemu_st8_i32;
|
|
|
|
|
2023-05-17 05:07:20 +02:00
|
|
|
case INDEX_op_qemu_ld_a32_i128:
|
|
|
|
case INDEX_op_qemu_ld_a64_i128:
|
|
|
|
case INDEX_op_qemu_st_a32_i128:
|
|
|
|
case INDEX_op_qemu_st_a64_i128:
|
2022-11-07 00:42:56 +01:00
|
|
|
return TCG_TARGET_HAS_qemu_ldst_i128;
|
|
|
|
|
2017-08-17 16:43:20 +02:00
|
|
|
case INDEX_op_mov_i32:
|
|
|
|
case INDEX_op_setcond_i32:
|
|
|
|
case INDEX_op_brcond_i32:
|
|
|
|
case INDEX_op_ld8u_i32:
|
|
|
|
case INDEX_op_ld8s_i32:
|
|
|
|
case INDEX_op_ld16u_i32:
|
|
|
|
case INDEX_op_ld16s_i32:
|
|
|
|
case INDEX_op_ld_i32:
|
|
|
|
case INDEX_op_st8_i32:
|
|
|
|
case INDEX_op_st16_i32:
|
|
|
|
case INDEX_op_st_i32:
|
|
|
|
case INDEX_op_add_i32:
|
|
|
|
case INDEX_op_sub_i32:
|
|
|
|
case INDEX_op_mul_i32:
|
|
|
|
case INDEX_op_and_i32:
|
|
|
|
case INDEX_op_or_i32:
|
|
|
|
case INDEX_op_xor_i32:
|
|
|
|
case INDEX_op_shl_i32:
|
|
|
|
case INDEX_op_shr_i32:
|
|
|
|
case INDEX_op_sar_i32:
|
|
|
|
return true;
|
|
|
|
|
2023-08-05 01:24:04 +02:00
|
|
|
case INDEX_op_negsetcond_i32:
|
|
|
|
return TCG_TARGET_HAS_negsetcond_i32;
|
2017-08-17 16:43:20 +02:00
|
|
|
case INDEX_op_movcond_i32:
|
|
|
|
return TCG_TARGET_HAS_movcond_i32;
|
|
|
|
case INDEX_op_div_i32:
|
|
|
|
case INDEX_op_divu_i32:
|
|
|
|
return TCG_TARGET_HAS_div_i32;
|
|
|
|
case INDEX_op_rem_i32:
|
|
|
|
case INDEX_op_remu_i32:
|
|
|
|
return TCG_TARGET_HAS_rem_i32;
|
|
|
|
case INDEX_op_div2_i32:
|
|
|
|
case INDEX_op_divu2_i32:
|
|
|
|
return TCG_TARGET_HAS_div2_i32;
|
|
|
|
case INDEX_op_rotl_i32:
|
|
|
|
case INDEX_op_rotr_i32:
|
|
|
|
return TCG_TARGET_HAS_rot_i32;
|
|
|
|
case INDEX_op_deposit_i32:
|
|
|
|
return TCG_TARGET_HAS_deposit_i32;
|
|
|
|
case INDEX_op_extract_i32:
|
|
|
|
return TCG_TARGET_HAS_extract_i32;
|
|
|
|
case INDEX_op_sextract_i32:
|
|
|
|
return TCG_TARGET_HAS_sextract_i32;
|
2019-02-25 19:29:25 +01:00
|
|
|
case INDEX_op_extract2_i32:
|
|
|
|
return TCG_TARGET_HAS_extract2_i32;
|
2017-08-17 16:43:20 +02:00
|
|
|
case INDEX_op_add2_i32:
|
|
|
|
return TCG_TARGET_HAS_add2_i32;
|
|
|
|
case INDEX_op_sub2_i32:
|
|
|
|
return TCG_TARGET_HAS_sub2_i32;
|
|
|
|
case INDEX_op_mulu2_i32:
|
|
|
|
return TCG_TARGET_HAS_mulu2_i32;
|
|
|
|
case INDEX_op_muls2_i32:
|
|
|
|
return TCG_TARGET_HAS_muls2_i32;
|
|
|
|
case INDEX_op_muluh_i32:
|
|
|
|
return TCG_TARGET_HAS_muluh_i32;
|
|
|
|
case INDEX_op_mulsh_i32:
|
|
|
|
return TCG_TARGET_HAS_mulsh_i32;
|
|
|
|
case INDEX_op_ext8s_i32:
|
|
|
|
return TCG_TARGET_HAS_ext8s_i32;
|
|
|
|
case INDEX_op_ext16s_i32:
|
|
|
|
return TCG_TARGET_HAS_ext16s_i32;
|
|
|
|
case INDEX_op_ext8u_i32:
|
|
|
|
return TCG_TARGET_HAS_ext8u_i32;
|
|
|
|
case INDEX_op_ext16u_i32:
|
|
|
|
return TCG_TARGET_HAS_ext16u_i32;
|
|
|
|
case INDEX_op_bswap16_i32:
|
|
|
|
return TCG_TARGET_HAS_bswap16_i32;
|
|
|
|
case INDEX_op_bswap32_i32:
|
|
|
|
return TCG_TARGET_HAS_bswap32_i32;
|
|
|
|
case INDEX_op_not_i32:
|
|
|
|
return TCG_TARGET_HAS_not_i32;
|
|
|
|
case INDEX_op_neg_i32:
|
|
|
|
return TCG_TARGET_HAS_neg_i32;
|
|
|
|
case INDEX_op_andc_i32:
|
|
|
|
return TCG_TARGET_HAS_andc_i32;
|
|
|
|
case INDEX_op_orc_i32:
|
|
|
|
return TCG_TARGET_HAS_orc_i32;
|
|
|
|
case INDEX_op_eqv_i32:
|
|
|
|
return TCG_TARGET_HAS_eqv_i32;
|
|
|
|
case INDEX_op_nand_i32:
|
|
|
|
return TCG_TARGET_HAS_nand_i32;
|
|
|
|
case INDEX_op_nor_i32:
|
|
|
|
return TCG_TARGET_HAS_nor_i32;
|
|
|
|
case INDEX_op_clz_i32:
|
|
|
|
return TCG_TARGET_HAS_clz_i32;
|
|
|
|
case INDEX_op_ctz_i32:
|
|
|
|
return TCG_TARGET_HAS_ctz_i32;
|
|
|
|
case INDEX_op_ctpop_i32:
|
|
|
|
return TCG_TARGET_HAS_ctpop_i32;
|
|
|
|
|
|
|
|
case INDEX_op_brcond2_i32:
|
|
|
|
case INDEX_op_setcond2_i32:
|
|
|
|
return TCG_TARGET_REG_BITS == 32;
|
|
|
|
|
|
|
|
case INDEX_op_mov_i64:
|
|
|
|
case INDEX_op_setcond_i64:
|
|
|
|
case INDEX_op_brcond_i64:
|
|
|
|
case INDEX_op_ld8u_i64:
|
|
|
|
case INDEX_op_ld8s_i64:
|
|
|
|
case INDEX_op_ld16u_i64:
|
|
|
|
case INDEX_op_ld16s_i64:
|
|
|
|
case INDEX_op_ld32u_i64:
|
|
|
|
case INDEX_op_ld32s_i64:
|
|
|
|
case INDEX_op_ld_i64:
|
|
|
|
case INDEX_op_st8_i64:
|
|
|
|
case INDEX_op_st16_i64:
|
|
|
|
case INDEX_op_st32_i64:
|
|
|
|
case INDEX_op_st_i64:
|
|
|
|
case INDEX_op_add_i64:
|
|
|
|
case INDEX_op_sub_i64:
|
|
|
|
case INDEX_op_mul_i64:
|
|
|
|
case INDEX_op_and_i64:
|
|
|
|
case INDEX_op_or_i64:
|
|
|
|
case INDEX_op_xor_i64:
|
|
|
|
case INDEX_op_shl_i64:
|
|
|
|
case INDEX_op_shr_i64:
|
|
|
|
case INDEX_op_sar_i64:
|
|
|
|
case INDEX_op_ext_i32_i64:
|
|
|
|
case INDEX_op_extu_i32_i64:
|
|
|
|
return TCG_TARGET_REG_BITS == 64;
|
|
|
|
|
2023-08-05 01:24:04 +02:00
|
|
|
case INDEX_op_negsetcond_i64:
|
|
|
|
return TCG_TARGET_HAS_negsetcond_i64;
|
2017-08-17 16:43:20 +02:00
|
|
|
case INDEX_op_movcond_i64:
|
|
|
|
return TCG_TARGET_HAS_movcond_i64;
|
|
|
|
case INDEX_op_div_i64:
|
|
|
|
case INDEX_op_divu_i64:
|
|
|
|
return TCG_TARGET_HAS_div_i64;
|
|
|
|
case INDEX_op_rem_i64:
|
|
|
|
case INDEX_op_remu_i64:
|
|
|
|
return TCG_TARGET_HAS_rem_i64;
|
|
|
|
case INDEX_op_div2_i64:
|
|
|
|
case INDEX_op_divu2_i64:
|
|
|
|
return TCG_TARGET_HAS_div2_i64;
|
|
|
|
case INDEX_op_rotl_i64:
|
|
|
|
case INDEX_op_rotr_i64:
|
|
|
|
return TCG_TARGET_HAS_rot_i64;
|
|
|
|
case INDEX_op_deposit_i64:
|
|
|
|
return TCG_TARGET_HAS_deposit_i64;
|
|
|
|
case INDEX_op_extract_i64:
|
|
|
|
return TCG_TARGET_HAS_extract_i64;
|
|
|
|
case INDEX_op_sextract_i64:
|
|
|
|
return TCG_TARGET_HAS_sextract_i64;
|
2019-02-25 19:29:25 +01:00
|
|
|
case INDEX_op_extract2_i64:
|
|
|
|
return TCG_TARGET_HAS_extract2_i64;
|
2017-08-17 16:43:20 +02:00
|
|
|
case INDEX_op_extrl_i64_i32:
|
|
|
|
case INDEX_op_extrh_i64_i32:
|
2023-08-22 19:51:10 +02:00
|
|
|
return TCG_TARGET_HAS_extr_i64_i32;
|
2017-08-17 16:43:20 +02:00
|
|
|
case INDEX_op_ext8s_i64:
|
|
|
|
return TCG_TARGET_HAS_ext8s_i64;
|
|
|
|
case INDEX_op_ext16s_i64:
|
|
|
|
return TCG_TARGET_HAS_ext16s_i64;
|
|
|
|
case INDEX_op_ext32s_i64:
|
|
|
|
return TCG_TARGET_HAS_ext32s_i64;
|
|
|
|
case INDEX_op_ext8u_i64:
|
|
|
|
return TCG_TARGET_HAS_ext8u_i64;
|
|
|
|
case INDEX_op_ext16u_i64:
|
|
|
|
return TCG_TARGET_HAS_ext16u_i64;
|
|
|
|
case INDEX_op_ext32u_i64:
|
|
|
|
return TCG_TARGET_HAS_ext32u_i64;
|
|
|
|
case INDEX_op_bswap16_i64:
|
|
|
|
return TCG_TARGET_HAS_bswap16_i64;
|
|
|
|
case INDEX_op_bswap32_i64:
|
|
|
|
return TCG_TARGET_HAS_bswap32_i64;
|
|
|
|
case INDEX_op_bswap64_i64:
|
|
|
|
return TCG_TARGET_HAS_bswap64_i64;
|
|
|
|
case INDEX_op_not_i64:
|
|
|
|
return TCG_TARGET_HAS_not_i64;
|
|
|
|
case INDEX_op_neg_i64:
|
|
|
|
return TCG_TARGET_HAS_neg_i64;
|
|
|
|
case INDEX_op_andc_i64:
|
|
|
|
return TCG_TARGET_HAS_andc_i64;
|
|
|
|
case INDEX_op_orc_i64:
|
|
|
|
return TCG_TARGET_HAS_orc_i64;
|
|
|
|
case INDEX_op_eqv_i64:
|
|
|
|
return TCG_TARGET_HAS_eqv_i64;
|
|
|
|
case INDEX_op_nand_i64:
|
|
|
|
return TCG_TARGET_HAS_nand_i64;
|
|
|
|
case INDEX_op_nor_i64:
|
|
|
|
return TCG_TARGET_HAS_nor_i64;
|
|
|
|
case INDEX_op_clz_i64:
|
|
|
|
return TCG_TARGET_HAS_clz_i64;
|
|
|
|
case INDEX_op_ctz_i64:
|
|
|
|
return TCG_TARGET_HAS_ctz_i64;
|
|
|
|
case INDEX_op_ctpop_i64:
|
|
|
|
return TCG_TARGET_HAS_ctpop_i64;
|
|
|
|
case INDEX_op_add2_i64:
|
|
|
|
return TCG_TARGET_HAS_add2_i64;
|
|
|
|
case INDEX_op_sub2_i64:
|
|
|
|
return TCG_TARGET_HAS_sub2_i64;
|
|
|
|
case INDEX_op_mulu2_i64:
|
|
|
|
return TCG_TARGET_HAS_mulu2_i64;
|
|
|
|
case INDEX_op_muls2_i64:
|
|
|
|
return TCG_TARGET_HAS_muls2_i64;
|
|
|
|
case INDEX_op_muluh_i64:
|
|
|
|
return TCG_TARGET_HAS_muluh_i64;
|
|
|
|
case INDEX_op_mulsh_i64:
|
|
|
|
return TCG_TARGET_HAS_mulsh_i64;
|
|
|
|
|
2017-09-14 22:53:46 +02:00
|
|
|
case INDEX_op_mov_vec:
|
|
|
|
case INDEX_op_dup_vec:
|
tcg: Add INDEX_op_dupm_vec
Allow the backend to expand dup from memory directly, instead of
forcing the value into a temp first. This is especially important
if integer/vector register moves do not exist.
Note that officially tcg_out_dupm_vec is allowed to fail.
If it did, we could fix this up relatively easily:
VECE == 32/64:
Load the value into a vector register, then dup.
Both of these must work.
VECE == 8/16:
If the value happens to be at an offset such that an aligned
load would place the desired value in the least significant
end of the register, go ahead and load w/garbage in high bits.
Load the value w/INDEX_op_ld{8,16}_i32.
Attempt a move directly to vector reg, which may fail.
Store the value into the backing store for OTS.
Load the value into the vector reg w/TCG_TYPE_I32, which must work.
Duplicate from the vector reg into itself, which must work.
All of which is well and good, except that all supported
hosts can support dupm for all vece, so all of the failure
paths would be dead code and untestable.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2019-03-17 02:55:22 +01:00
|
|
|
case INDEX_op_dupm_vec:
|
2017-09-14 22:53:46 +02:00
|
|
|
case INDEX_op_ld_vec:
|
|
|
|
case INDEX_op_st_vec:
|
|
|
|
case INDEX_op_add_vec:
|
|
|
|
case INDEX_op_sub_vec:
|
|
|
|
case INDEX_op_and_vec:
|
|
|
|
case INDEX_op_or_vec:
|
|
|
|
case INDEX_op_xor_vec:
|
2017-11-17 20:47:42 +01:00
|
|
|
case INDEX_op_cmp_vec:
|
2017-09-14 22:53:46 +02:00
|
|
|
return have_vec;
|
|
|
|
case INDEX_op_dup2_vec:
|
|
|
|
return have_vec && TCG_TARGET_REG_BITS == 32;
|
|
|
|
case INDEX_op_not_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_not_vec;
|
|
|
|
case INDEX_op_neg_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_neg_vec;
|
2019-04-18 01:53:02 +02:00
|
|
|
case INDEX_op_abs_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_abs_vec;
|
2017-09-14 22:53:46 +02:00
|
|
|
case INDEX_op_andc_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_andc_vec;
|
|
|
|
case INDEX_op_orc_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_orc_vec;
|
2021-12-16 20:17:46 +01:00
|
|
|
case INDEX_op_nand_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_nand_vec;
|
|
|
|
case INDEX_op_nor_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_nor_vec;
|
|
|
|
case INDEX_op_eqv_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_eqv_vec;
|
2017-11-21 10:11:14 +01:00
|
|
|
case INDEX_op_mul_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_mul_vec;
|
2017-11-17 14:35:11 +01:00
|
|
|
case INDEX_op_shli_vec:
|
|
|
|
case INDEX_op_shri_vec:
|
|
|
|
case INDEX_op_sari_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_shi_vec;
|
|
|
|
case INDEX_op_shls_vec:
|
|
|
|
case INDEX_op_shrs_vec:
|
|
|
|
case INDEX_op_sars_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_shs_vec;
|
|
|
|
case INDEX_op_shlv_vec:
|
|
|
|
case INDEX_op_shrv_vec:
|
|
|
|
case INDEX_op_sarv_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_shv_vec;
|
2020-04-20 03:01:52 +02:00
|
|
|
case INDEX_op_rotli_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_roti_vec;
|
2020-04-20 17:22:44 +02:00
|
|
|
case INDEX_op_rotls_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_rots_vec;
|
2020-04-20 04:47:59 +02:00
|
|
|
case INDEX_op_rotlv_vec:
|
|
|
|
case INDEX_op_rotrv_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_rotv_vec;
|
2018-12-18 03:01:47 +01:00
|
|
|
case INDEX_op_ssadd_vec:
|
|
|
|
case INDEX_op_usadd_vec:
|
|
|
|
case INDEX_op_sssub_vec:
|
|
|
|
case INDEX_op_ussub_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_sat_vec;
|
2018-12-18 04:35:46 +01:00
|
|
|
case INDEX_op_smin_vec:
|
|
|
|
case INDEX_op_umin_vec:
|
|
|
|
case INDEX_op_smax_vec:
|
|
|
|
case INDEX_op_umax_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_minmax_vec;
|
2019-04-30 20:02:23 +02:00
|
|
|
case INDEX_op_bitsel_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_bitsel_vec;
|
2019-04-30 22:01:12 +02:00
|
|
|
case INDEX_op_cmpsel_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_cmpsel_vec;
|
2017-09-14 22:53:46 +02:00
|
|
|
|
2017-09-15 23:11:45 +02:00
|
|
|
default:
|
|
|
|
tcg_debug_assert(op > INDEX_op_last_generic && op < NB_OPS);
|
|
|
|
return true;
|
2017-08-17 16:43:20 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-11 01:09:37 +01:00
|
|
|
static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs);
|
|
|
|
|
2023-03-30 07:14:36 +02:00
|
|
|
static void tcg_gen_callN(TCGHelperInfo *info, TCGTemp *ret, TCGTemp **args)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2022-11-11 01:09:37 +01:00
|
|
|
TCGv_i64 extend_free[MAX_CALL_IARGS];
|
|
|
|
int n_extend = 0;
|
2016-12-08 19:52:57 +01:00
|
|
|
TCGOp *op;
|
2022-11-11 01:09:37 +01:00
|
|
|
int i, n, pi = 0, total_args;
|
2014-04-08 00:10:05 +02:00
|
|
|
|
2023-03-31 19:37:04 +02:00
|
|
|
if (unlikely(g_once_init_enter(HELPER_INFO_INIT(info)))) {
|
|
|
|
init_call_layout(info);
|
|
|
|
g_once_init_leave(HELPER_INFO_INIT(info), HELPER_INFO_INIT_VAL(info));
|
|
|
|
}
|
|
|
|
|
2022-11-11 01:09:37 +01:00
|
|
|
total_args = info->nr_out + info->nr_in + 2;
|
|
|
|
op = tcg_op_alloc(INDEX_op_call, total_args);
|
2010-06-15 02:35:27 +02:00
|
|
|
|
plugin-gen: add module for TCG-related code
We first inject empty instrumentation from translator_loop.
After translation, we go through the plugins to see what
they want to register for, filling in the empty instrumentation.
If if turns out that some instrumentation remains unused, we
remove it.
This approach supports the following features:
- Inlining TCG code for simple operations. Note that we do not
export TCG ops to plugins. Instead, we give them a C API to
insert inlined ops. So far we only support adding an immediate
to a u64, e.g. to count events.
- "Direct" callbacks. These are callbacks that do not go via
a helper. Instead, the helper is defined at run-time, so that
the plugin code is directly called from TCG. This makes direct
callbacks as efficient as possible; they are therefore used
for very frequent events, e.g. memory callbacks.
- Passing the host address to memory callbacks. Most of this
is implemented in a later patch though.
- Instrumentation of memory accesses performed from helpers.
See the corresponding comment, as well as a later patch.
Signed-off-by: Emilio G. Cota <cota@braap.org>
[AJB: add alloc_tcg_plugin_context, use glib, rm hwaddr]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2018-12-07 21:33:56 +01:00
|
|
|
#ifdef CONFIG_PLUGIN
|
2023-01-24 19:01:25 +01:00
|
|
|
/* Flag helpers that may affect guest state */
|
|
|
|
if (tcg_ctx->plugin_insn &&
|
|
|
|
!(info->flags & TCG_CALL_PLUGIN) &&
|
|
|
|
!(info->flags & TCG_CALL_NO_SIDE_EFFECTS)) {
|
plugin-gen: add module for TCG-related code
We first inject empty instrumentation from translator_loop.
After translation, we go through the plugins to see what
they want to register for, filling in the empty instrumentation.
If if turns out that some instrumentation remains unused, we
remove it.
This approach supports the following features:
- Inlining TCG code for simple operations. Note that we do not
export TCG ops to plugins. Instead, we give them a C API to
insert inlined ops. So far we only support adding an immediate
to a u64, e.g. to count events.
- "Direct" callbacks. These are callbacks that do not go via
a helper. Instead, the helper is defined at run-time, so that
the plugin code is directly called from TCG. This makes direct
callbacks as efficient as possible; they are therefore used
for very frequent events, e.g. memory callbacks.
- Passing the host address to memory callbacks. Most of this
is implemented in a later patch though.
- Instrumentation of memory accesses performed from helpers.
See the corresponding comment, as well as a later patch.
Signed-off-by: Emilio G. Cota <cota@braap.org>
[AJB: add alloc_tcg_plugin_context, use glib, rm hwaddr]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2018-12-07 21:33:56 +01:00
|
|
|
tcg_ctx->plugin_insn->calls_helpers = true;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2022-11-11 01:09:37 +01:00
|
|
|
TCGOP_CALLO(op) = n = info->nr_out;
|
|
|
|
switch (n) {
|
|
|
|
case 0:
|
|
|
|
tcg_debug_assert(ret == NULL);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
tcg_debug_assert(ret != NULL);
|
|
|
|
op->args[pi++] = temp_arg(ret);
|
|
|
|
break;
|
|
|
|
case 2:
|
2022-11-11 02:01:13 +01:00
|
|
|
case 4:
|
2022-11-11 01:09:37 +01:00
|
|
|
tcg_debug_assert(ret != NULL);
|
2022-11-11 02:01:13 +01:00
|
|
|
tcg_debug_assert(ret->base_type == ret->type + ctz32(n));
|
2022-11-11 01:09:37 +01:00
|
|
|
tcg_debug_assert(ret->temp_subindex == 0);
|
2022-11-11 02:01:13 +01:00
|
|
|
for (i = 0; i < n; ++i) {
|
|
|
|
op->args[pi++] = temp_arg(ret + i);
|
|
|
|
}
|
2022-11-11 01:09:37 +01:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
TCGOP_CALLI(op) = n = info->nr_in;
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
const TCGCallArgumentLoc *loc = &info->in[i];
|
|
|
|
TCGTemp *ts = args[loc->arg_idx] + loc->tmp_subindex;
|
|
|
|
|
|
|
|
switch (loc->kind) {
|
|
|
|
case TCG_CALL_ARG_NORMAL:
|
2022-10-30 23:22:59 +01:00
|
|
|
case TCG_CALL_ARG_BY_REF:
|
|
|
|
case TCG_CALL_ARG_BY_REF_N:
|
2022-11-11 01:09:37 +01:00
|
|
|
op->args[pi++] = temp_arg(ts);
|
|
|
|
break;
|
2022-10-16 12:07:48 +02:00
|
|
|
|
2022-11-11 01:09:37 +01:00
|
|
|
case TCG_CALL_ARG_EXTEND_U:
|
|
|
|
case TCG_CALL_ARG_EXTEND_S:
|
|
|
|
{
|
2023-01-30 00:26:49 +01:00
|
|
|
TCGv_i64 temp = tcg_temp_ebb_new_i64();
|
2022-11-11 01:09:37 +01:00
|
|
|
TCGv_i32 orig = temp_tcgv_i32(ts);
|
|
|
|
|
|
|
|
if (loc->kind == TCG_CALL_ARG_EXTEND_S) {
|
2022-10-16 12:07:48 +02:00
|
|
|
tcg_gen_ext_i32_i64(temp, orig);
|
|
|
|
} else {
|
|
|
|
tcg_gen_extu_i32_i64(temp, orig);
|
|
|
|
}
|
2022-11-11 01:09:37 +01:00
|
|
|
op->args[pi++] = tcgv_i64_arg(temp);
|
|
|
|
extend_free[n_extend++] = temp;
|
2010-06-15 02:35:27 +02:00
|
|
|
}
|
2022-10-17 07:55:56 +02:00
|
|
|
break;
|
2021-01-30 23:24:25 +01:00
|
|
|
|
2022-10-17 07:55:56 +02:00
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
2023-03-31 19:37:04 +02:00
|
|
|
op->args[pi++] = (uintptr_t)info->func;
|
2021-03-18 18:29:50 +01:00
|
|
|
op->args[pi++] = (uintptr_t)info;
|
2022-11-11 01:09:37 +01:00
|
|
|
tcg_debug_assert(pi == total_args);
|
2008-11-17 15:43:54 +01:00
|
|
|
|
2022-11-11 01:09:37 +01:00
|
|
|
QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
|
2021-03-18 17:01:01 +01:00
|
|
|
|
2022-11-11 01:09:37 +01:00
|
|
|
tcg_debug_assert(n_extend < ARRAY_SIZE(extend_free));
|
|
|
|
for (i = 0; i < n_extend; ++i) {
|
|
|
|
tcg_temp_free_i64(extend_free[i]);
|
2010-06-15 02:35:27 +02:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2023-03-30 07:14:36 +02:00
|
|
|
void tcg_gen_call0(TCGHelperInfo *info, TCGTemp *ret)
|
|
|
|
{
|
|
|
|
tcg_gen_callN(info, ret, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
void tcg_gen_call1(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1)
|
|
|
|
{
|
|
|
|
tcg_gen_callN(info, ret, &t1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void tcg_gen_call2(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1, TCGTemp *t2)
|
|
|
|
{
|
|
|
|
TCGTemp *args[2] = { t1, t2 };
|
|
|
|
tcg_gen_callN(info, ret, args);
|
|
|
|
}
|
|
|
|
|
|
|
|
void tcg_gen_call3(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
|
|
|
|
TCGTemp *t2, TCGTemp *t3)
|
|
|
|
{
|
|
|
|
TCGTemp *args[3] = { t1, t2, t3 };
|
|
|
|
tcg_gen_callN(info, ret, args);
|
|
|
|
}
|
|
|
|
|
|
|
|
void tcg_gen_call4(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
|
|
|
|
TCGTemp *t2, TCGTemp *t3, TCGTemp *t4)
|
|
|
|
{
|
|
|
|
TCGTemp *args[4] = { t1, t2, t3, t4 };
|
|
|
|
tcg_gen_callN(info, ret, args);
|
|
|
|
}
|
|
|
|
|
|
|
|
void tcg_gen_call5(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
|
|
|
|
TCGTemp *t2, TCGTemp *t3, TCGTemp *t4, TCGTemp *t5)
|
|
|
|
{
|
|
|
|
TCGTemp *args[5] = { t1, t2, t3, t4, t5 };
|
|
|
|
tcg_gen_callN(info, ret, args);
|
|
|
|
}
|
|
|
|
|
|
|
|
void tcg_gen_call6(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1, TCGTemp *t2,
|
|
|
|
TCGTemp *t3, TCGTemp *t4, TCGTemp *t5, TCGTemp *t6)
|
|
|
|
{
|
|
|
|
TCGTemp *args[6] = { t1, t2, t3, t4, t5, t6 };
|
|
|
|
tcg_gen_callN(info, ret, args);
|
|
|
|
}
|
|
|
|
|
|
|
|
void tcg_gen_call7(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
|
|
|
|
TCGTemp *t2, TCGTemp *t3, TCGTemp *t4,
|
|
|
|
TCGTemp *t5, TCGTemp *t6, TCGTemp *t7)
|
|
|
|
{
|
|
|
|
TCGTemp *args[7] = { t1, t2, t3, t4, t5, t6, t7 };
|
|
|
|
tcg_gen_callN(info, ret, args);
|
|
|
|
}
|
|
|
|
|
2008-08-17 22:26:25 +02:00
|
|
|
static void tcg_reg_alloc_start(TCGContext *s)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2016-11-02 18:21:44 +01:00
|
|
|
int i, n;
|
|
|
|
|
2020-03-29 19:11:56 +02:00
|
|
|
for (i = 0, n = s->nb_temps; i < n; i++) {
|
|
|
|
TCGTemp *ts = &s->temps[i];
|
|
|
|
TCGTempVal val = TEMP_VAL_MEM;
|
|
|
|
|
|
|
|
switch (ts->kind) {
|
2020-03-30 03:55:52 +02:00
|
|
|
case TEMP_CONST:
|
|
|
|
val = TEMP_VAL_CONST;
|
|
|
|
break;
|
2020-03-29 19:11:56 +02:00
|
|
|
case TEMP_FIXED:
|
|
|
|
val = TEMP_VAL_REG;
|
|
|
|
break;
|
|
|
|
case TEMP_GLOBAL:
|
|
|
|
break;
|
2022-03-16 17:34:18 +01:00
|
|
|
case TEMP_EBB:
|
2020-03-29 19:11:56 +02:00
|
|
|
val = TEMP_VAL_DEAD;
|
|
|
|
/* fall through */
|
2023-01-29 21:55:52 +01:00
|
|
|
case TEMP_TB:
|
2020-03-29 19:11:56 +02:00
|
|
|
ts->mem_allocated = 0;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
ts->val_type = val;
|
2008-05-23 19:33:39 +02:00
|
|
|
}
|
2013-09-19 00:21:56 +02:00
|
|
|
|
|
|
|
memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2013-09-19 00:21:56 +02:00
|
|
|
static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
|
|
|
|
TCGTemp *ts)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2017-06-20 21:24:57 +02:00
|
|
|
int idx = temp_idx(ts);
|
2008-02-03 20:56:33 +01:00
|
|
|
|
2020-03-29 19:11:56 +02:00
|
|
|
switch (ts->kind) {
|
|
|
|
case TEMP_FIXED:
|
|
|
|
case TEMP_GLOBAL:
|
2008-02-03 20:56:33 +01:00
|
|
|
pstrcpy(buf, buf_size, ts->name);
|
2020-03-29 19:11:56 +02:00
|
|
|
break;
|
2023-01-29 21:55:52 +01:00
|
|
|
case TEMP_TB:
|
2013-09-19 00:21:56 +02:00
|
|
|
snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
|
2020-03-29 19:11:56 +02:00
|
|
|
break;
|
2022-03-16 17:34:18 +01:00
|
|
|
case TEMP_EBB:
|
2013-09-19 00:21:56 +02:00
|
|
|
snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
|
2020-03-29 19:11:56 +02:00
|
|
|
break;
|
2020-03-30 03:55:52 +02:00
|
|
|
case TEMP_CONST:
|
|
|
|
switch (ts->type) {
|
|
|
|
case TCG_TYPE_I32:
|
|
|
|
snprintf(buf, buf_size, "$0x%x", (int32_t)ts->val);
|
|
|
|
break;
|
|
|
|
#if TCG_TARGET_REG_BITS > 32
|
|
|
|
case TCG_TYPE_I64:
|
|
|
|
snprintf(buf, buf_size, "$0x%" PRIx64, ts->val);
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
case TCG_TYPE_V64:
|
|
|
|
case TCG_TYPE_V128:
|
|
|
|
case TCG_TYPE_V256:
|
|
|
|
snprintf(buf, buf_size, "v%d$0x%" PRIx64,
|
|
|
|
64 << (ts->type - TCG_TYPE_V64), ts->val);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
break;
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2017-06-20 08:18:10 +02:00
|
|
|
static char *tcg_get_arg_str(TCGContext *s, char *buf,
|
|
|
|
int buf_size, TCGArg arg)
|
2013-09-19 00:21:56 +02:00
|
|
|
{
|
2017-06-20 08:18:10 +02:00
|
|
|
return tcg_get_arg_str_ptr(s, buf, buf_size, arg_temp(arg));
|
2013-09-19 00:21:56 +02:00
|
|
|
}
|
|
|
|
|
2008-09-14 09:45:17 +02:00
|
|
|
static const char * const cond_name[] =
|
|
|
|
{
|
2012-09-24 23:21:40 +02:00
|
|
|
[TCG_COND_NEVER] = "never",
|
|
|
|
[TCG_COND_ALWAYS] = "always",
|
2008-09-14 09:45:17 +02:00
|
|
|
[TCG_COND_EQ] = "eq",
|
|
|
|
[TCG_COND_NE] = "ne",
|
|
|
|
[TCG_COND_LT] = "lt",
|
|
|
|
[TCG_COND_GE] = "ge",
|
|
|
|
[TCG_COND_LE] = "le",
|
|
|
|
[TCG_COND_GT] = "gt",
|
|
|
|
[TCG_COND_LTU] = "ltu",
|
|
|
|
[TCG_COND_GEU] = "geu",
|
|
|
|
[TCG_COND_LEU] = "leu",
|
|
|
|
[TCG_COND_GTU] = "gtu"
|
|
|
|
};
|
|
|
|
|
2022-11-07 00:42:56 +01:00
|
|
|
static const char * const ldst_name[(MO_BSWAP | MO_SSIZE) + 1] =
|
2013-09-04 17:11:05 +02:00
|
|
|
{
|
|
|
|
[MO_UB] = "ub",
|
|
|
|
[MO_SB] = "sb",
|
|
|
|
[MO_LEUW] = "leuw",
|
|
|
|
[MO_LESW] = "lesw",
|
|
|
|
[MO_LEUL] = "leul",
|
|
|
|
[MO_LESL] = "lesl",
|
2022-01-06 22:00:51 +01:00
|
|
|
[MO_LEUQ] = "leq",
|
2013-09-04 17:11:05 +02:00
|
|
|
[MO_BEUW] = "beuw",
|
|
|
|
[MO_BESW] = "besw",
|
|
|
|
[MO_BEUL] = "beul",
|
|
|
|
[MO_BESL] = "besl",
|
2022-01-06 22:00:51 +01:00
|
|
|
[MO_BEUQ] = "beq",
|
2022-11-07 00:42:56 +01:00
|
|
|
[MO_128 + MO_BE] = "beo",
|
|
|
|
[MO_128 + MO_LE] = "leo",
|
2013-09-04 17:11:05 +02:00
|
|
|
};
|
|
|
|
|
2016-06-23 20:16:46 +02:00
|
|
|
static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
|
|
|
|
[MO_UNALN >> MO_ASHIFT] = "un+",
|
|
|
|
[MO_ALIGN >> MO_ASHIFT] = "al+",
|
|
|
|
[MO_ALIGN_2 >> MO_ASHIFT] = "al2+",
|
|
|
|
[MO_ALIGN_4 >> MO_ASHIFT] = "al4+",
|
|
|
|
[MO_ALIGN_8 >> MO_ASHIFT] = "al8+",
|
|
|
|
[MO_ALIGN_16 >> MO_ASHIFT] = "al16+",
|
|
|
|
[MO_ALIGN_32 >> MO_ASHIFT] = "al32+",
|
|
|
|
[MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
|
|
|
|
};
|
|
|
|
|
2022-10-21 13:24:40 +02:00
|
|
|
static const char * const atom_name[(MO_ATOM_MASK >> MO_ATOM_SHIFT) + 1] = {
|
|
|
|
[MO_ATOM_IFALIGN >> MO_ATOM_SHIFT] = "",
|
|
|
|
[MO_ATOM_IFALIGN_PAIR >> MO_ATOM_SHIFT] = "pair+",
|
|
|
|
[MO_ATOM_WITHIN16 >> MO_ATOM_SHIFT] = "w16+",
|
|
|
|
[MO_ATOM_WITHIN16_PAIR >> MO_ATOM_SHIFT] = "w16p+",
|
|
|
|
[MO_ATOM_SUBALIGN >> MO_ATOM_SHIFT] = "sub+",
|
|
|
|
[MO_ATOM_NONE >> MO_ATOM_SHIFT] = "noat+",
|
|
|
|
};
|
|
|
|
|
2021-06-13 06:32:27 +02:00
|
|
|
static const char bswap_flag_name[][6] = {
|
|
|
|
[TCG_BSWAP_IZ] = "iz",
|
|
|
|
[TCG_BSWAP_OZ] = "oz",
|
|
|
|
[TCG_BSWAP_OS] = "os",
|
|
|
|
[TCG_BSWAP_IZ | TCG_BSWAP_OZ] = "iz,oz",
|
|
|
|
[TCG_BSWAP_IZ | TCG_BSWAP_OS] = "iz,os",
|
|
|
|
};
|
|
|
|
|
2018-11-27 16:16:21 +01:00
|
|
|
static inline bool tcg_regset_single(TCGRegSet d)
|
|
|
|
{
|
|
|
|
return (d & (d - 1)) == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline TCGReg tcg_regset_first(TCGRegSet d)
|
|
|
|
{
|
|
|
|
if (TCG_TARGET_NB_REGS <= 32) {
|
|
|
|
return ctz32(d);
|
|
|
|
} else {
|
|
|
|
return ctz64(d);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-17 20:29:51 +02:00
|
|
|
/* Return only the number of characters output -- no error return. */
|
|
|
|
#define ne_fprintf(...) \
|
|
|
|
({ int ret_ = fprintf(__VA_ARGS__); ret_ >= 0 ? ret_ : 0; })
|
|
|
|
|
|
|
|
static void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
|
|
|
char buf[128];
|
2014-09-19 22:49:15 +02:00
|
|
|
TCGOp *op;
|
|
|
|
|
2017-11-02 15:19:14 +01:00
|
|
|
QTAILQ_FOREACH(op, &s->ops, link) {
|
2014-09-19 22:49:15 +02:00
|
|
|
int i, k, nb_oargs, nb_iargs, nb_cargs;
|
|
|
|
const TCGOpDef *def;
|
|
|
|
TCGOpcode c;
|
2016-06-24 04:15:55 +02:00
|
|
|
int col = 0;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2014-09-19 22:49:15 +02:00
|
|
|
c = op->opc;
|
2008-02-01 11:05:41 +01:00
|
|
|
def = &tcg_op_defs[c];
|
2014-09-19 22:49:15 +02:00
|
|
|
|
2015-08-29 21:37:33 +02:00
|
|
|
if (c == INDEX_op_insn_start) {
|
2018-11-27 16:16:21 +01:00
|
|
|
nb_oargs = 0;
|
2022-04-17 20:29:51 +02:00
|
|
|
col += ne_fprintf(f, "\n ----");
|
2015-08-30 18:21:33 +02:00
|
|
|
|
2023-04-01 06:30:31 +02:00
|
|
|
for (i = 0, k = s->insn_start_words; i < k; ++i) {
|
2023-03-08 21:24:41 +01:00
|
|
|
col += ne_fprintf(f, " %016" PRIx64,
|
|
|
|
tcg_get_insn_start_param(op, i));
|
2012-06-03 18:35:32 +02:00
|
|
|
}
|
2008-05-22 18:56:05 +02:00
|
|
|
} else if (c == INDEX_op_call) {
|
2021-03-18 18:29:50 +01:00
|
|
|
const TCGHelperInfo *info = tcg_call_info(op);
|
2021-03-18 23:40:07 +01:00
|
|
|
void *func = tcg_call_func(op);
|
2021-03-18 18:29:50 +01:00
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
/* variable number of arguments */
|
2017-11-14 13:02:51 +01:00
|
|
|
nb_oargs = TCGOP_CALLO(op);
|
|
|
|
nb_iargs = TCGOP_CALLI(op);
|
2008-02-01 11:05:41 +01:00
|
|
|
nb_cargs = def->nb_cargs;
|
|
|
|
|
2022-04-17 20:29:51 +02:00
|
|
|
col += ne_fprintf(f, " %s ", def->name);
|
2021-03-18 18:29:50 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Print the function name from TCGHelperInfo, if available.
|
|
|
|
* Note that plugins have a template function for the info,
|
|
|
|
* but the actual function pointer comes from the plugin.
|
|
|
|
*/
|
|
|
|
if (func == info->func) {
|
2022-04-17 20:29:51 +02:00
|
|
|
col += ne_fprintf(f, "%s", info->name);
|
2021-03-18 18:29:50 +01:00
|
|
|
} else {
|
2022-04-17 20:29:51 +02:00
|
|
|
col += ne_fprintf(f, "plugin(%p)", func);
|
2021-03-18 18:29:50 +01:00
|
|
|
}
|
|
|
|
|
2022-04-17 20:29:51 +02:00
|
|
|
col += ne_fprintf(f, ",$0x%x,$%d", info->flags, nb_oargs);
|
2014-03-23 04:06:52 +01:00
|
|
|
for (i = 0; i < nb_oargs; i++) {
|
2022-04-17 20:29:51 +02:00
|
|
|
col += ne_fprintf(f, ",%s", tcg_get_arg_str(s, buf, sizeof(buf),
|
|
|
|
op->args[i]));
|
2008-05-10 12:52:05 +02:00
|
|
|
}
|
2014-03-23 04:06:52 +01:00
|
|
|
for (i = 0; i < nb_iargs; i++) {
|
2016-12-08 22:12:08 +01:00
|
|
|
TCGArg arg = op->args[nb_oargs + i];
|
2022-11-11 01:09:37 +01:00
|
|
|
const char *t = tcg_get_arg_str(s, buf, sizeof(buf), arg);
|
2022-04-17 20:29:51 +02:00
|
|
|
col += ne_fprintf(f, ",%s", t);
|
2008-05-23 19:33:39 +02:00
|
|
|
}
|
2008-05-10 12:52:05 +02:00
|
|
|
} else {
|
2022-04-17 20:29:51 +02:00
|
|
|
col += ne_fprintf(f, " %s ", def->name);
|
2014-09-19 22:49:15 +02:00
|
|
|
|
|
|
|
nb_oargs = def->nb_oargs;
|
|
|
|
nb_iargs = def->nb_iargs;
|
|
|
|
nb_cargs = def->nb_cargs;
|
|
|
|
|
2017-09-14 22:53:46 +02:00
|
|
|
if (def->flags & TCG_OPF_VECTOR) {
|
2022-04-17 20:29:51 +02:00
|
|
|
col += ne_fprintf(f, "v%d,e%d,", 64 << TCGOP_VECL(op),
|
|
|
|
8 << TCGOP_VECE(op));
|
2017-09-14 22:53:46 +02:00
|
|
|
}
|
|
|
|
|
2008-05-10 12:52:05 +02:00
|
|
|
k = 0;
|
2014-09-19 22:49:15 +02:00
|
|
|
for (i = 0; i < nb_oargs; i++) {
|
2022-04-17 20:29:51 +02:00
|
|
|
const char *sep = k ? "," : "";
|
|
|
|
col += ne_fprintf(f, "%s%s", sep,
|
|
|
|
tcg_get_arg_str(s, buf, sizeof(buf),
|
|
|
|
op->args[k++]));
|
2008-05-10 12:52:05 +02:00
|
|
|
}
|
2014-09-19 22:49:15 +02:00
|
|
|
for (i = 0; i < nb_iargs; i++) {
|
2022-04-17 20:29:51 +02:00
|
|
|
const char *sep = k ? "," : "";
|
|
|
|
col += ne_fprintf(f, "%s%s", sep,
|
|
|
|
tcg_get_arg_str(s, buf, sizeof(buf),
|
|
|
|
op->args[k++]));
|
2008-05-10 12:52:05 +02:00
|
|
|
}
|
2010-01-07 19:13:31 +01:00
|
|
|
switch (c) {
|
|
|
|
case INDEX_op_brcond_i32:
|
|
|
|
case INDEX_op_setcond_i32:
|
2023-08-05 01:24:04 +02:00
|
|
|
case INDEX_op_negsetcond_i32:
|
2012-09-21 19:13:34 +02:00
|
|
|
case INDEX_op_movcond_i32:
|
|
|
|
case INDEX_op_brcond2_i32:
|
2010-01-07 19:13:31 +01:00
|
|
|
case INDEX_op_setcond2_i32:
|
2012-09-21 19:13:34 +02:00
|
|
|
case INDEX_op_brcond_i64:
|
2010-01-07 19:13:31 +01:00
|
|
|
case INDEX_op_setcond_i64:
|
2023-08-05 01:24:04 +02:00
|
|
|
case INDEX_op_negsetcond_i64:
|
2012-09-21 19:13:34 +02:00
|
|
|
case INDEX_op_movcond_i64:
|
2017-11-17 20:47:42 +01:00
|
|
|
case INDEX_op_cmp_vec:
|
2019-04-30 22:01:12 +02:00
|
|
|
case INDEX_op_cmpsel_vec:
|
2016-12-08 22:12:08 +01:00
|
|
|
if (op->args[k] < ARRAY_SIZE(cond_name)
|
|
|
|
&& cond_name[op->args[k]]) {
|
2022-04-17 20:29:51 +02:00
|
|
|
col += ne_fprintf(f, ",%s", cond_name[op->args[k++]]);
|
2012-06-03 18:35:32 +02:00
|
|
|
} else {
|
2022-04-17 20:29:51 +02:00
|
|
|
col += ne_fprintf(f, ",$0x%" TCG_PRIlx, op->args[k++]);
|
2012-06-03 18:35:32 +02:00
|
|
|
}
|
2008-09-14 09:45:17 +02:00
|
|
|
i = 1;
|
2010-01-07 19:13:31 +01:00
|
|
|
break;
|
2023-05-17 05:07:20 +02:00
|
|
|
case INDEX_op_qemu_ld_a32_i32:
|
|
|
|
case INDEX_op_qemu_ld_a64_i32:
|
|
|
|
case INDEX_op_qemu_st_a32_i32:
|
|
|
|
case INDEX_op_qemu_st_a64_i32:
|
|
|
|
case INDEX_op_qemu_st8_a32_i32:
|
|
|
|
case INDEX_op_qemu_st8_a64_i32:
|
|
|
|
case INDEX_op_qemu_ld_a32_i64:
|
|
|
|
case INDEX_op_qemu_ld_a64_i64:
|
|
|
|
case INDEX_op_qemu_st_a32_i64:
|
|
|
|
case INDEX_op_qemu_st_a64_i64:
|
|
|
|
case INDEX_op_qemu_ld_a32_i128:
|
|
|
|
case INDEX_op_qemu_ld_a64_i128:
|
|
|
|
case INDEX_op_qemu_st_a32_i128:
|
|
|
|
case INDEX_op_qemu_st_a64_i128:
|
2015-05-12 20:51:44 +02:00
|
|
|
{
|
2022-10-21 13:24:40 +02:00
|
|
|
const char *s_al, *s_op, *s_at;
|
2021-07-26 00:06:49 +02:00
|
|
|
MemOpIdx oi = op->args[k++];
|
2023-09-04 18:12:13 +02:00
|
|
|
MemOp mop = get_memop(oi);
|
2015-05-12 20:51:44 +02:00
|
|
|
unsigned ix = get_mmuidx(oi);
|
|
|
|
|
2023-09-04 18:12:13 +02:00
|
|
|
s_al = alignment_name[(mop & MO_AMASK) >> MO_ASHIFT];
|
|
|
|
s_op = ldst_name[mop & (MO_BSWAP | MO_SSIZE)];
|
|
|
|
s_at = atom_name[(mop & MO_ATOM_MASK) >> MO_ATOM_SHIFT];
|
|
|
|
mop &= ~(MO_AMASK | MO_BSWAP | MO_SSIZE | MO_ATOM_MASK);
|
2022-10-21 13:24:40 +02:00
|
|
|
|
|
|
|
/* If all fields are accounted for, print symbolically. */
|
2023-09-04 18:12:13 +02:00
|
|
|
if (!mop && s_al && s_op && s_at) {
|
2022-10-21 13:24:40 +02:00
|
|
|
col += ne_fprintf(f, ",%s%s%s,%u",
|
|
|
|
s_at, s_al, s_op, ix);
|
2015-06-01 23:38:56 +02:00
|
|
|
} else {
|
2023-09-04 18:12:13 +02:00
|
|
|
mop = get_memop(oi);
|
|
|
|
col += ne_fprintf(f, ",$0x%x,%u", mop, ix);
|
2015-05-12 20:51:44 +02:00
|
|
|
}
|
|
|
|
i = 1;
|
2013-09-04 17:11:05 +02:00
|
|
|
}
|
|
|
|
break;
|
2021-06-13 06:32:27 +02:00
|
|
|
case INDEX_op_bswap16_i32:
|
|
|
|
case INDEX_op_bswap16_i64:
|
|
|
|
case INDEX_op_bswap32_i32:
|
|
|
|
case INDEX_op_bswap32_i64:
|
|
|
|
case INDEX_op_bswap64_i64:
|
|
|
|
{
|
|
|
|
TCGArg flags = op->args[k];
|
|
|
|
const char *name = NULL;
|
|
|
|
|
|
|
|
if (flags < ARRAY_SIZE(bswap_flag_name)) {
|
|
|
|
name = bswap_flag_name[flags];
|
|
|
|
}
|
|
|
|
if (name) {
|
2022-04-17 20:29:51 +02:00
|
|
|
col += ne_fprintf(f, ",%s", name);
|
2021-06-13 06:32:27 +02:00
|
|
|
} else {
|
2022-04-17 20:29:51 +02:00
|
|
|
col += ne_fprintf(f, ",$0x%" TCG_PRIlx, flags);
|
2021-06-13 06:32:27 +02:00
|
|
|
}
|
|
|
|
i = k = 1;
|
|
|
|
}
|
|
|
|
break;
|
2010-01-07 19:13:31 +01:00
|
|
|
default:
|
2008-09-14 09:45:17 +02:00
|
|
|
i = 0;
|
2010-01-07 19:13:31 +01:00
|
|
|
break;
|
|
|
|
}
|
2015-02-14 03:51:05 +01:00
|
|
|
switch (c) {
|
|
|
|
case INDEX_op_set_label:
|
|
|
|
case INDEX_op_br:
|
|
|
|
case INDEX_op_brcond_i32:
|
|
|
|
case INDEX_op_brcond_i64:
|
|
|
|
case INDEX_op_brcond2_i32:
|
2022-04-17 20:29:51 +02:00
|
|
|
col += ne_fprintf(f, "%s$L%d", k ? "," : "",
|
|
|
|
arg_label(op->args[k])->id);
|
2015-02-14 03:51:05 +01:00
|
|
|
i++, k++;
|
|
|
|
break;
|
2021-02-19 02:05:55 +01:00
|
|
|
case INDEX_op_mb:
|
|
|
|
{
|
|
|
|
TCGBar membar = op->args[k];
|
|
|
|
const char *b_op, *m_op;
|
|
|
|
|
|
|
|
switch (membar & TCG_BAR_SC) {
|
|
|
|
case 0:
|
|
|
|
b_op = "none";
|
|
|
|
break;
|
|
|
|
case TCG_BAR_LDAQ:
|
|
|
|
b_op = "acq";
|
|
|
|
break;
|
|
|
|
case TCG_BAR_STRL:
|
|
|
|
b_op = "rel";
|
|
|
|
break;
|
|
|
|
case TCG_BAR_SC:
|
|
|
|
b_op = "seq";
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (membar & TCG_MO_ALL) {
|
|
|
|
case 0:
|
|
|
|
m_op = "none";
|
|
|
|
break;
|
|
|
|
case TCG_MO_LD_LD:
|
|
|
|
m_op = "rr";
|
|
|
|
break;
|
|
|
|
case TCG_MO_LD_ST:
|
|
|
|
m_op = "rw";
|
|
|
|
break;
|
|
|
|
case TCG_MO_ST_LD:
|
|
|
|
m_op = "wr";
|
|
|
|
break;
|
|
|
|
case TCG_MO_ST_ST:
|
|
|
|
m_op = "ww";
|
|
|
|
break;
|
|
|
|
case TCG_MO_LD_LD | TCG_MO_LD_ST:
|
|
|
|
m_op = "rr+rw";
|
|
|
|
break;
|
|
|
|
case TCG_MO_LD_LD | TCG_MO_ST_LD:
|
|
|
|
m_op = "rr+wr";
|
|
|
|
break;
|
|
|
|
case TCG_MO_LD_LD | TCG_MO_ST_ST:
|
|
|
|
m_op = "rr+ww";
|
|
|
|
break;
|
|
|
|
case TCG_MO_LD_ST | TCG_MO_ST_LD:
|
|
|
|
m_op = "rw+wr";
|
|
|
|
break;
|
|
|
|
case TCG_MO_LD_ST | TCG_MO_ST_ST:
|
|
|
|
m_op = "rw+ww";
|
|
|
|
break;
|
|
|
|
case TCG_MO_ST_LD | TCG_MO_ST_ST:
|
|
|
|
m_op = "wr+ww";
|
|
|
|
break;
|
|
|
|
case TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_LD:
|
|
|
|
m_op = "rr+rw+wr";
|
|
|
|
break;
|
|
|
|
case TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST:
|
|
|
|
m_op = "rr+rw+ww";
|
|
|
|
break;
|
|
|
|
case TCG_MO_LD_LD | TCG_MO_ST_LD | TCG_MO_ST_ST:
|
|
|
|
m_op = "rr+wr+ww";
|
|
|
|
break;
|
|
|
|
case TCG_MO_LD_ST | TCG_MO_ST_LD | TCG_MO_ST_ST:
|
|
|
|
m_op = "rw+wr+ww";
|
|
|
|
break;
|
|
|
|
case TCG_MO_ALL:
|
|
|
|
m_op = "all";
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
col += ne_fprintf(f, "%s%s:%s", (k ? "," : ""), b_op, m_op);
|
|
|
|
i++, k++;
|
|
|
|
}
|
|
|
|
break;
|
2015-02-14 03:51:05 +01:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
for (; i < nb_cargs; i++, k++) {
|
2022-04-17 20:29:51 +02:00
|
|
|
col += ne_fprintf(f, "%s$0x%" TCG_PRIlx, k ? "," : "",
|
|
|
|
op->args[k]);
|
2016-06-24 04:15:55 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-27 21:46:00 +01:00
|
|
|
if (have_prefs || op->life) {
|
2022-04-17 20:29:51 +02:00
|
|
|
for (; col < 40; ++col) {
|
|
|
|
putc(' ', f);
|
2016-06-24 04:15:55 +02:00
|
|
|
}
|
2018-11-27 21:46:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (op->life) {
|
|
|
|
unsigned life = op->life;
|
2016-06-24 04:15:55 +02:00
|
|
|
|
|
|
|
if (life & (SYNC_ARG * 3)) {
|
2022-04-17 20:29:51 +02:00
|
|
|
ne_fprintf(f, " sync:");
|
2016-06-24 04:15:55 +02:00
|
|
|
for (i = 0; i < 2; ++i) {
|
|
|
|
if (life & (SYNC_ARG << i)) {
|
2022-04-17 20:29:51 +02:00
|
|
|
ne_fprintf(f, " %d", i);
|
2016-06-24 04:15:55 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
life /= DEAD_ARG;
|
|
|
|
if (life) {
|
2022-04-17 20:29:51 +02:00
|
|
|
ne_fprintf(f, " dead:");
|
2016-06-24 04:15:55 +02:00
|
|
|
for (i = 0; life; ++i, life >>= 1) {
|
|
|
|
if (life & 1) {
|
2022-04-17 20:29:51 +02:00
|
|
|
ne_fprintf(f, " %d", i);
|
2016-06-24 04:15:55 +02:00
|
|
|
}
|
|
|
|
}
|
2008-05-10 12:52:05 +02:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2018-11-27 21:46:00 +01:00
|
|
|
|
|
|
|
if (have_prefs) {
|
|
|
|
for (i = 0; i < nb_oargs; ++i) {
|
2022-11-11 06:10:51 +01:00
|
|
|
TCGRegSet set = output_pref(op, i);
|
2018-11-27 21:46:00 +01:00
|
|
|
|
|
|
|
if (i == 0) {
|
2022-04-17 20:29:51 +02:00
|
|
|
ne_fprintf(f, " pref=");
|
2018-11-27 21:46:00 +01:00
|
|
|
} else {
|
2022-04-17 20:29:51 +02:00
|
|
|
ne_fprintf(f, ",");
|
2018-11-27 21:46:00 +01:00
|
|
|
}
|
|
|
|
if (set == 0) {
|
2022-04-17 20:29:51 +02:00
|
|
|
ne_fprintf(f, "none");
|
2018-11-27 21:46:00 +01:00
|
|
|
} else if (set == MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS)) {
|
2022-04-17 20:29:51 +02:00
|
|
|
ne_fprintf(f, "all");
|
2018-11-27 21:46:00 +01:00
|
|
|
#ifdef CONFIG_DEBUG_TCG
|
|
|
|
} else if (tcg_regset_single(set)) {
|
|
|
|
TCGReg reg = tcg_regset_first(set);
|
2022-04-17 20:29:51 +02:00
|
|
|
ne_fprintf(f, "%s", tcg_target_reg_names[reg]);
|
2018-11-27 21:46:00 +01:00
|
|
|
#endif
|
|
|
|
} else if (TCG_TARGET_NB_REGS <= 32) {
|
2022-04-17 20:29:51 +02:00
|
|
|
ne_fprintf(f, "0x%x", (uint32_t)set);
|
2018-11-27 21:46:00 +01:00
|
|
|
} else {
|
2022-04-17 20:29:51 +02:00
|
|
|
ne_fprintf(f, "0x%" PRIx64, (uint64_t)set);
|
2018-11-27 21:46:00 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-17 20:29:51 +02:00
|
|
|
putc('\n', f);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* we give more priority to constraints with less registers */
|
|
|
|
static int get_constraint_priority(const TCGOpDef *def, int k)
|
|
|
|
{
|
2020-09-04 00:56:24 +02:00
|
|
|
const TCGArgConstraint *arg_ct = &def->args_ct[k];
|
2022-10-13 22:37:38 +02:00
|
|
|
int n = ctpop64(arg_ct->regs);
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2022-10-13 22:37:38 +02:00
|
|
|
/*
|
|
|
|
* Sort constraints of a single register first, which includes output
|
|
|
|
* aliases (which must exactly match the input already allocated).
|
|
|
|
*/
|
|
|
|
if (n == 1 || arg_ct->oalias) {
|
|
|
|
return INT_MAX;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Sort register pairs next, first then second immediately after.
|
|
|
|
* Arbitrarily sort multiple pairs by the index of the first reg;
|
|
|
|
* there shouldn't be many pairs.
|
|
|
|
*/
|
|
|
|
switch (arg_ct->pair) {
|
|
|
|
case 1:
|
|
|
|
case 3:
|
|
|
|
return (k + 1) * 2;
|
|
|
|
case 2:
|
|
|
|
return (arg_ct->pair_index + 1) * 2 - 1;
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2022-10-13 22:37:38 +02:00
|
|
|
|
|
|
|
/* Finally, sort by decreasing register count. */
|
|
|
|
assert(n > 1);
|
|
|
|
return -n;
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* sort from highest priority to lowest */
|
|
|
|
static void sort_constraints(TCGOpDef *def, int start, int n)
|
|
|
|
{
|
2019-04-04 04:37:38 +02:00
|
|
|
int i, j;
|
|
|
|
TCGArgConstraint *a = def->args_ct;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2019-04-04 04:37:38 +02:00
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
a[start + i].sort_index = start + i;
|
|
|
|
}
|
|
|
|
if (n <= 1) {
|
2008-02-01 11:05:41 +01:00
|
|
|
return;
|
2019-04-04 04:37:38 +02:00
|
|
|
}
|
|
|
|
for (i = 0; i < n - 1; i++) {
|
|
|
|
for (j = i + 1; j < n; j++) {
|
|
|
|
int p1 = get_constraint_priority(def, a[start + i].sort_index);
|
|
|
|
int p2 = get_constraint_priority(def, a[start + j].sort_index);
|
2008-02-01 11:05:41 +01:00
|
|
|
if (p1 < p2) {
|
2019-04-04 04:37:38 +02:00
|
|
|
int tmp = a[start + i].sort_index;
|
|
|
|
a[start + i].sort_index = a[start + j].sort_index;
|
|
|
|
a[start + j].sort_index = tmp;
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-18 09:31:40 +01:00
|
|
|
static void process_op_defs(TCGContext *s)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2010-03-19 19:12:29 +01:00
|
|
|
TCGOpcode op;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2016-11-18 09:31:40 +01:00
|
|
|
for (op = 0; op < NB_OPS; op++) {
|
|
|
|
TCGOpDef *def = &tcg_op_defs[op];
|
|
|
|
const TCGTargetOpDef *tdefs;
|
2022-10-13 22:37:38 +02:00
|
|
|
bool saw_alias_pair = false;
|
|
|
|
int i, o, i2, o2, nb_args;
|
2016-11-18 09:31:40 +01:00
|
|
|
|
|
|
|
if (def->flags & TCG_OPF_NOT_PRESENT) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
nb_args = def->nb_iargs + def->nb_oargs;
|
2016-11-18 09:31:40 +01:00
|
|
|
if (nb_args == 0) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2020-10-17 07:20:55 +02:00
|
|
|
/*
|
|
|
|
* Macro magic should make it impossible, but double-check that
|
|
|
|
* the array index is in range. Since the signness of an enum
|
|
|
|
* is implementation defined, force the result to unsigned.
|
|
|
|
*/
|
|
|
|
unsigned con_set = tcg_target_op_def(op);
|
|
|
|
tcg_debug_assert(con_set < ARRAY_SIZE(constraint_sets));
|
|
|
|
tdefs = &constraint_sets[con_set];
|
2016-11-18 09:31:40 +01:00
|
|
|
|
|
|
|
for (i = 0; i < nb_args; i++) {
|
|
|
|
const char *ct_str = tdefs->args_ct_str[i];
|
2022-12-19 23:09:23 +01:00
|
|
|
bool input_p = i >= def->nb_oargs;
|
|
|
|
|
2016-11-18 09:31:40 +01:00
|
|
|
/* Incomplete TCGTargetOpDef entry. */
|
2016-04-21 10:48:49 +02:00
|
|
|
tcg_debug_assert(ct_str != NULL);
|
2016-11-18 09:31:40 +01:00
|
|
|
|
2022-12-19 23:09:23 +01:00
|
|
|
switch (*ct_str) {
|
|
|
|
case '0' ... '9':
|
|
|
|
o = *ct_str - '0';
|
|
|
|
tcg_debug_assert(input_p);
|
|
|
|
tcg_debug_assert(o < def->nb_oargs);
|
|
|
|
tcg_debug_assert(def->args_ct[o].regs != 0);
|
|
|
|
tcg_debug_assert(!def->args_ct[o].oalias);
|
|
|
|
def->args_ct[i] = def->args_ct[o];
|
|
|
|
/* The output sets oalias. */
|
|
|
|
def->args_ct[o].oalias = 1;
|
|
|
|
def->args_ct[o].alias_index = i;
|
|
|
|
/* The input sets ialias. */
|
|
|
|
def->args_ct[i].ialias = 1;
|
|
|
|
def->args_ct[i].alias_index = o;
|
2022-10-13 22:37:38 +02:00
|
|
|
if (def->args_ct[i].pair) {
|
|
|
|
saw_alias_pair = true;
|
|
|
|
}
|
2022-12-19 23:09:23 +01:00
|
|
|
tcg_debug_assert(ct_str[1] == '\0');
|
|
|
|
continue;
|
|
|
|
|
|
|
|
case '&':
|
|
|
|
tcg_debug_assert(!input_p);
|
|
|
|
def->args_ct[i].newreg = true;
|
|
|
|
ct_str++;
|
|
|
|
break;
|
2022-10-13 22:37:38 +02:00
|
|
|
|
|
|
|
case 'p': /* plus */
|
|
|
|
/* Allocate to the register after the previous. */
|
|
|
|
tcg_debug_assert(i > (input_p ? def->nb_oargs : 0));
|
|
|
|
o = i - 1;
|
|
|
|
tcg_debug_assert(!def->args_ct[o].pair);
|
|
|
|
tcg_debug_assert(!def->args_ct[o].ct);
|
|
|
|
def->args_ct[i] = (TCGArgConstraint){
|
|
|
|
.pair = 2,
|
|
|
|
.pair_index = o,
|
|
|
|
.regs = def->args_ct[o].regs << 1,
|
|
|
|
};
|
|
|
|
def->args_ct[o].pair = 1;
|
|
|
|
def->args_ct[o].pair_index = i;
|
|
|
|
tcg_debug_assert(ct_str[1] == '\0');
|
|
|
|
continue;
|
|
|
|
|
|
|
|
case 'm': /* minus */
|
|
|
|
/* Allocate to the register before the previous. */
|
|
|
|
tcg_debug_assert(i > (input_p ? def->nb_oargs : 0));
|
|
|
|
o = i - 1;
|
|
|
|
tcg_debug_assert(!def->args_ct[o].pair);
|
|
|
|
tcg_debug_assert(!def->args_ct[o].ct);
|
|
|
|
def->args_ct[i] = (TCGArgConstraint){
|
|
|
|
.pair = 1,
|
|
|
|
.pair_index = o,
|
|
|
|
.regs = def->args_ct[o].regs >> 1,
|
|
|
|
};
|
|
|
|
def->args_ct[o].pair = 2;
|
|
|
|
def->args_ct[o].pair_index = i;
|
|
|
|
tcg_debug_assert(ct_str[1] == '\0');
|
|
|
|
continue;
|
2022-12-19 23:09:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
do {
|
|
|
|
switch (*ct_str) {
|
2016-11-18 17:41:24 +01:00
|
|
|
case 'i':
|
|
|
|
def->args_ct[i].ct |= TCG_CT_CONST;
|
|
|
|
break;
|
2020-10-17 00:27:46 +02:00
|
|
|
|
|
|
|
/* Include all of the target-specific constraints. */
|
|
|
|
|
|
|
|
#undef CONST
|
|
|
|
#define CONST(CASE, MASK) \
|
2022-12-19 23:09:23 +01:00
|
|
|
case CASE: def->args_ct[i].ct |= MASK; break;
|
2020-10-17 00:27:46 +02:00
|
|
|
#define REGS(CASE, MASK) \
|
2022-12-19 23:09:23 +01:00
|
|
|
case CASE: def->args_ct[i].regs |= MASK; break;
|
2020-10-17 00:27:46 +02:00
|
|
|
|
|
|
|
#include "tcg-target-con-str.h"
|
|
|
|
|
|
|
|
#undef REGS
|
|
|
|
#undef CONST
|
2016-11-18 17:41:24 +01:00
|
|
|
default:
|
2022-12-19 23:09:23 +01:00
|
|
|
case '0' ... '9':
|
|
|
|
case '&':
|
2022-10-13 22:37:38 +02:00
|
|
|
case 'p':
|
|
|
|
case 'm':
|
2016-11-18 17:41:24 +01:00
|
|
|
/* Typo in TCGTargetOpDef constraint. */
|
2020-10-17 00:27:46 +02:00
|
|
|
g_assert_not_reached();
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2022-12-19 23:09:23 +01:00
|
|
|
} while (*++ct_str != '\0');
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2010-02-15 17:17:21 +01:00
|
|
|
/* TCGTargetOpDef entry with too much information? */
|
2016-04-21 10:48:49 +02:00
|
|
|
tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
|
2010-02-15 17:17:21 +01:00
|
|
|
|
2022-10-13 22:37:38 +02:00
|
|
|
/*
|
|
|
|
* Fix up output pairs that are aliased with inputs.
|
|
|
|
* When we created the alias, we copied pair from the output.
|
|
|
|
* There are three cases:
|
|
|
|
* (1a) Pairs of inputs alias pairs of outputs.
|
|
|
|
* (1b) One input aliases the first of a pair of outputs.
|
|
|
|
* (2) One input aliases the second of a pair of outputs.
|
|
|
|
*
|
|
|
|
* Case 1a is handled by making sure that the pair_index'es are
|
|
|
|
* properly updated so that they appear the same as a pair of inputs.
|
|
|
|
*
|
|
|
|
* Case 1b is handled by setting the pair_index of the input to
|
|
|
|
* itself, simply so it doesn't point to an unrelated argument.
|
|
|
|
* Since we don't encounter the "second" during the input allocation
|
|
|
|
* phase, nothing happens with the second half of the input pair.
|
|
|
|
*
|
|
|
|
* Case 2 is handled by setting the second input to pair=3, the
|
|
|
|
* first output to pair=3, and the pair_index'es to match.
|
|
|
|
*/
|
|
|
|
if (saw_alias_pair) {
|
|
|
|
for (i = def->nb_oargs; i < nb_args; i++) {
|
|
|
|
/*
|
|
|
|
* Since [0-9pm] must be alone in the constraint string,
|
|
|
|
* the only way they can both be set is if the pair comes
|
|
|
|
* from the output alias.
|
|
|
|
*/
|
|
|
|
if (!def->args_ct[i].ialias) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
switch (def->args_ct[i].pair) {
|
|
|
|
case 0:
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
o = def->args_ct[i].alias_index;
|
|
|
|
o2 = def->args_ct[o].pair_index;
|
|
|
|
tcg_debug_assert(def->args_ct[o].pair == 1);
|
|
|
|
tcg_debug_assert(def->args_ct[o2].pair == 2);
|
|
|
|
if (def->args_ct[o2].oalias) {
|
|
|
|
/* Case 1a */
|
|
|
|
i2 = def->args_ct[o2].alias_index;
|
|
|
|
tcg_debug_assert(def->args_ct[i2].pair == 2);
|
|
|
|
def->args_ct[i2].pair_index = i;
|
|
|
|
def->args_ct[i].pair_index = i2;
|
|
|
|
} else {
|
|
|
|
/* Case 1b */
|
|
|
|
def->args_ct[i].pair_index = i;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
o = def->args_ct[i].alias_index;
|
|
|
|
o2 = def->args_ct[o].pair_index;
|
|
|
|
tcg_debug_assert(def->args_ct[o].pair == 2);
|
|
|
|
tcg_debug_assert(def->args_ct[o2].pair == 1);
|
|
|
|
if (def->args_ct[o2].oalias) {
|
|
|
|
/* Case 1a */
|
|
|
|
i2 = def->args_ct[o2].alias_index;
|
|
|
|
tcg_debug_assert(def->args_ct[i2].pair == 1);
|
|
|
|
def->args_ct[i2].pair_index = i;
|
|
|
|
def->args_ct[i].pair_index = i2;
|
|
|
|
} else {
|
|
|
|
/* Case 2 */
|
|
|
|
def->args_ct[i].pair = 3;
|
|
|
|
def->args_ct[o2].pair = 3;
|
|
|
|
def->args_ct[i].pair_index = o2;
|
|
|
|
def->args_ct[o2].pair_index = i;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
/* sort the constraints (XXX: this is just an heuristic) */
|
|
|
|
sort_constraints(def, 0, def->nb_oargs);
|
|
|
|
sort_constraints(def, def->nb_oargs, def->nb_iargs);
|
2010-03-19 19:12:29 +01:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2023-03-03 22:47:27 +01:00
|
|
|
static void remove_label_use(TCGOp *op, int idx)
|
2014-03-31 01:51:54 +02:00
|
|
|
{
|
2023-03-03 22:47:27 +01:00
|
|
|
TCGLabel *label = arg_label(op->args[idx]);
|
|
|
|
TCGLabelUse *use;
|
2018-11-26 21:47:28 +01:00
|
|
|
|
2023-03-03 22:47:27 +01:00
|
|
|
QSIMPLEQ_FOREACH(use, &label->branches, next) {
|
|
|
|
if (use->op == op) {
|
|
|
|
QSIMPLEQ_REMOVE(&label->branches, use, TCGLabelUse, next);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
void tcg_op_remove(TCGContext *s, TCGOp *op)
|
|
|
|
{
|
2018-11-26 21:47:28 +01:00
|
|
|
switch (op->opc) {
|
|
|
|
case INDEX_op_br:
|
2023-03-03 22:47:27 +01:00
|
|
|
remove_label_use(op, 0);
|
2018-11-26 21:47:28 +01:00
|
|
|
break;
|
|
|
|
case INDEX_op_brcond_i32:
|
|
|
|
case INDEX_op_brcond_i64:
|
2023-03-03 22:47:27 +01:00
|
|
|
remove_label_use(op, 3);
|
2018-11-26 21:47:28 +01:00
|
|
|
break;
|
|
|
|
case INDEX_op_brcond2_i32:
|
2023-03-03 22:47:27 +01:00
|
|
|
remove_label_use(op, 5);
|
2018-11-26 21:47:28 +01:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-11-02 15:19:14 +01:00
|
|
|
QTAILQ_REMOVE(&s->ops, op, link);
|
|
|
|
QTAILQ_INSERT_TAIL(&s->free_ops, op, link);
|
2018-05-08 21:18:59 +02:00
|
|
|
s->nb_ops--;
|
2014-03-31 01:51:54 +02:00
|
|
|
}
|
|
|
|
|
2021-06-04 23:26:45 +02:00
|
|
|
void tcg_remove_ops_after(TCGOp *op)
|
|
|
|
{
|
|
|
|
TCGContext *s = tcg_ctx;
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
TCGOp *last = tcg_last_op();
|
|
|
|
if (last == op) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
tcg_op_remove(s, last);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-18 22:18:31 +01:00
|
|
|
static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs)
|
2016-06-24 05:34:33 +02:00
|
|
|
{
|
2017-11-02 15:19:14 +01:00
|
|
|
TCGContext *s = tcg_ctx;
|
2022-12-18 22:18:32 +01:00
|
|
|
TCGOp *op = NULL;
|
|
|
|
|
|
|
|
if (unlikely(!QTAILQ_EMPTY(&s->free_ops))) {
|
|
|
|
QTAILQ_FOREACH(op, &s->free_ops, link) {
|
|
|
|
if (nargs <= op->nargs) {
|
|
|
|
QTAILQ_REMOVE(&s->free_ops, op, link);
|
|
|
|
nargs = op->nargs;
|
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
}
|
2017-11-02 15:19:14 +01:00
|
|
|
}
|
2022-12-18 22:18:32 +01:00
|
|
|
|
|
|
|
/* Most opcodes have 3 or 4 operands: reduce fragmentation. */
|
|
|
|
nargs = MAX(4, nargs);
|
|
|
|
op = tcg_malloc(sizeof(TCGOp) + sizeof(TCGArg) * nargs);
|
|
|
|
|
|
|
|
found:
|
2017-11-02 15:19:14 +01:00
|
|
|
memset(op, 0, offsetof(TCGOp, link));
|
|
|
|
op->opc = opc;
|
2022-12-18 22:18:32 +01:00
|
|
|
op->nargs = nargs;
|
|
|
|
|
|
|
|
/* Check for bitfield overflow. */
|
|
|
|
tcg_debug_assert(op->nargs == nargs);
|
2016-06-24 05:34:33 +02:00
|
|
|
|
2022-12-18 22:18:32 +01:00
|
|
|
s->nb_ops++;
|
2017-11-02 15:19:14 +01:00
|
|
|
return op;
|
|
|
|
}
|
|
|
|
|
2022-12-18 22:18:31 +01:00
|
|
|
TCGOp *tcg_emit_op(TCGOpcode opc, unsigned nargs)
|
2017-11-02 15:19:14 +01:00
|
|
|
{
|
2022-12-18 22:18:31 +01:00
|
|
|
TCGOp *op = tcg_op_alloc(opc, nargs);
|
2017-11-02 15:19:14 +01:00
|
|
|
QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
|
|
|
|
return op;
|
|
|
|
}
|
2016-06-24 05:34:33 +02:00
|
|
|
|
2022-12-18 22:18:31 +01:00
|
|
|
TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op,
|
|
|
|
TCGOpcode opc, unsigned nargs)
|
2017-11-02 15:19:14 +01:00
|
|
|
{
|
2022-12-18 22:18:31 +01:00
|
|
|
TCGOp *new_op = tcg_op_alloc(opc, nargs);
|
2017-11-02 15:19:14 +01:00
|
|
|
QTAILQ_INSERT_BEFORE(old_op, new_op, link);
|
2016-06-24 05:34:33 +02:00
|
|
|
return new_op;
|
|
|
|
}
|
|
|
|
|
2022-12-18 22:18:31 +01:00
|
|
|
TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op,
|
|
|
|
TCGOpcode opc, unsigned nargs)
|
2016-06-24 05:34:33 +02:00
|
|
|
{
|
2022-12-18 22:18:31 +01:00
|
|
|
TCGOp *new_op = tcg_op_alloc(opc, nargs);
|
2017-11-02 15:19:14 +01:00
|
|
|
QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link);
|
2016-06-24 05:34:33 +02:00
|
|
|
return new_op;
|
|
|
|
}
|
|
|
|
|
2023-03-03 23:22:02 +01:00
|
|
|
static void move_label_uses(TCGLabel *to, TCGLabel *from)
|
|
|
|
{
|
|
|
|
TCGLabelUse *u;
|
|
|
|
|
|
|
|
QSIMPLEQ_FOREACH(u, &from->branches, next) {
|
|
|
|
TCGOp *op = u->op;
|
|
|
|
switch (op->opc) {
|
|
|
|
case INDEX_op_br:
|
|
|
|
op->args[0] = label_arg(to);
|
|
|
|
break;
|
|
|
|
case INDEX_op_brcond_i32:
|
|
|
|
case INDEX_op_brcond_i64:
|
|
|
|
op->args[3] = label_arg(to);
|
|
|
|
break;
|
|
|
|
case INDEX_op_brcond2_i32:
|
|
|
|
op->args[5] = label_arg(to);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
QSIMPLEQ_CONCAT(&to->branches, &from->branches);
|
|
|
|
}
|
|
|
|
|
2018-11-26 23:28:28 +01:00
|
|
|
/* Reachable analysis : remove unreachable code. */
|
2023-02-24 23:07:33 +01:00
|
|
|
static void __attribute__((noinline))
|
|
|
|
reachable_code_pass(TCGContext *s)
|
2018-11-26 23:28:28 +01:00
|
|
|
{
|
2023-01-29 21:37:19 +01:00
|
|
|
TCGOp *op, *op_next, *op_prev;
|
2018-11-26 23:28:28 +01:00
|
|
|
bool dead = false;
|
|
|
|
|
|
|
|
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
|
|
|
|
bool remove = dead;
|
|
|
|
TCGLabel *label;
|
|
|
|
|
|
|
|
switch (op->opc) {
|
|
|
|
case INDEX_op_set_label:
|
|
|
|
label = arg_label(op->args[0]);
|
2023-01-29 21:37:19 +01:00
|
|
|
|
2023-03-03 23:22:02 +01:00
|
|
|
/*
|
|
|
|
* Note that the first op in the TB is always a load,
|
|
|
|
* so there is always something before a label.
|
|
|
|
*/
|
|
|
|
op_prev = QTAILQ_PREV(op, link);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we find two sequential labels, move all branches to
|
|
|
|
* reference the second label and remove the first label.
|
|
|
|
* Do this before branch to next optimization, so that the
|
|
|
|
* middle label is out of the way.
|
|
|
|
*/
|
|
|
|
if (op_prev->opc == INDEX_op_set_label) {
|
|
|
|
move_label_uses(label, arg_label(op_prev->args[0]));
|
|
|
|
tcg_op_remove(s, op_prev);
|
|
|
|
op_prev = QTAILQ_PREV(op, link);
|
|
|
|
}
|
|
|
|
|
2023-01-29 21:37:19 +01:00
|
|
|
/*
|
|
|
|
* Optimization can fold conditional branches to unconditional.
|
|
|
|
* If we find a label which is preceded by an unconditional
|
|
|
|
* branch to next, remove the branch. We couldn't do this when
|
|
|
|
* processing the branch because any dead code between the branch
|
|
|
|
* and label had not yet been removed.
|
|
|
|
*/
|
|
|
|
if (op_prev->opc == INDEX_op_br &&
|
|
|
|
label == arg_label(op_prev->args[0])) {
|
|
|
|
tcg_op_remove(s, op_prev);
|
|
|
|
/* Fall through means insns become live again. */
|
|
|
|
dead = false;
|
|
|
|
}
|
|
|
|
|
2023-03-03 22:47:27 +01:00
|
|
|
if (QSIMPLEQ_EMPTY(&label->branches)) {
|
2018-11-26 23:28:28 +01:00
|
|
|
/*
|
|
|
|
* While there is an occasional backward branch, virtually
|
|
|
|
* all branches generated by the translators are forward.
|
|
|
|
* Which means that generally we will have already removed
|
|
|
|
* all references to the label that will be, and there is
|
|
|
|
* little to be gained by iterating.
|
|
|
|
*/
|
|
|
|
remove = true;
|
|
|
|
} else {
|
|
|
|
/* Once we see a label, insns become live again. */
|
|
|
|
dead = false;
|
|
|
|
remove = false;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case INDEX_op_br:
|
|
|
|
case INDEX_op_exit_tb:
|
|
|
|
case INDEX_op_goto_ptr:
|
|
|
|
/* Unconditional branches; everything following is dead. */
|
|
|
|
dead = true;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case INDEX_op_call:
|
|
|
|
/* Notice noreturn helper calls, raising exceptions. */
|
2021-03-18 17:21:45 +01:00
|
|
|
if (tcg_call_flags(op) & TCG_CALL_NO_RETURN) {
|
2018-11-26 23:28:28 +01:00
|
|
|
dead = true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case INDEX_op_insn_start:
|
|
|
|
/* Never remove -- we need to keep these for unwind. */
|
|
|
|
remove = false;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (remove) {
|
|
|
|
tcg_op_remove(s, op);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-24 05:34:22 +02:00
|
|
|
#define TS_DEAD 1
|
|
|
|
#define TS_MEM 2
|
|
|
|
|
2016-06-24 05:34:33 +02:00
|
|
|
#define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
|
|
|
|
#define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
|
|
|
|
|
2018-11-27 21:45:26 +01:00
|
|
|
/* For liveness_pass_1, the register preferences for a given temp. */
|
|
|
|
static inline TCGRegSet *la_temp_pref(TCGTemp *ts)
|
|
|
|
{
|
|
|
|
return ts->state_ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* For liveness_pass_1, reset the preferences for a given temp to the
|
|
|
|
* maximal regset for its type.
|
|
|
|
*/
|
|
|
|
static inline void la_reset_pref(TCGTemp *ts)
|
|
|
|
{
|
|
|
|
*la_temp_pref(ts)
|
|
|
|
= (ts->state == TS_DEAD ? 0 : tcg_target_available_regs[ts->type]);
|
|
|
|
}
|
|
|
|
|
2012-10-09 21:53:07 +02:00
|
|
|
/* liveness analysis: end of function: all temps are dead, and globals
|
|
|
|
should be in memory. */
|
2018-11-27 22:37:24 +01:00
|
|
|
static void la_func_end(TCGContext *s, int ng, int nt)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2016-11-01 22:56:04 +01:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ng; ++i) {
|
|
|
|
s->temps[i].state = TS_DEAD | TS_MEM;
|
2018-11-27 21:45:26 +01:00
|
|
|
la_reset_pref(&s->temps[i]);
|
2016-11-01 22:56:04 +01:00
|
|
|
}
|
|
|
|
for (i = ng; i < nt; ++i) {
|
|
|
|
s->temps[i].state = TS_DEAD;
|
2018-11-27 21:45:26 +01:00
|
|
|
la_reset_pref(&s->temps[i]);
|
2016-11-01 22:56:04 +01:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2012-10-09 21:53:07 +02:00
|
|
|
/* liveness analysis: end of basic block: all temps are dead, globals
|
|
|
|
and local temps should be in memory. */
|
2018-11-27 22:37:24 +01:00
|
|
|
static void la_bb_end(TCGContext *s, int ng, int nt)
|
2008-05-25 19:24:00 +02:00
|
|
|
{
|
2016-11-01 22:56:04 +01:00
|
|
|
int i;
|
2008-05-25 19:24:00 +02:00
|
|
|
|
2020-03-29 19:11:56 +02:00
|
|
|
for (i = 0; i < nt; ++i) {
|
|
|
|
TCGTemp *ts = &s->temps[i];
|
|
|
|
int state;
|
|
|
|
|
|
|
|
switch (ts->kind) {
|
|
|
|
case TEMP_FIXED:
|
|
|
|
case TEMP_GLOBAL:
|
2023-01-29 21:55:52 +01:00
|
|
|
case TEMP_TB:
|
2020-03-29 19:11:56 +02:00
|
|
|
state = TS_DEAD | TS_MEM;
|
|
|
|
break;
|
2022-03-16 17:34:18 +01:00
|
|
|
case TEMP_EBB:
|
2020-03-30 03:55:52 +02:00
|
|
|
case TEMP_CONST:
|
2020-03-29 19:11:56 +02:00
|
|
|
state = TS_DEAD;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
ts->state = state;
|
|
|
|
la_reset_pref(ts);
|
2008-05-25 19:24:00 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-27 23:00:35 +01:00
|
|
|
/* liveness analysis: sync globals back to memory. */
|
|
|
|
static void la_global_sync(TCGContext *s, int ng)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ng; ++i) {
|
2018-11-27 21:45:26 +01:00
|
|
|
int state = s->temps[i].state;
|
|
|
|
s->temps[i].state = state | TS_MEM;
|
|
|
|
if (state == TS_DEAD) {
|
|
|
|
/* If the global was previously dead, reset prefs. */
|
|
|
|
la_reset_pref(&s->temps[i]);
|
|
|
|
}
|
2018-11-27 23:00:35 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-08 22:21:43 +02:00
|
|
|
/*
|
2022-03-16 17:34:18 +01:00
|
|
|
* liveness analysis: conditional branch: all temps are dead unless
|
|
|
|
* explicitly live-across-conditional-branch, globals and local temps
|
|
|
|
* should be synced.
|
2020-10-08 22:21:43 +02:00
|
|
|
*/
|
|
|
|
static void la_bb_sync(TCGContext *s, int ng, int nt)
|
|
|
|
{
|
|
|
|
la_global_sync(s, ng);
|
|
|
|
|
|
|
|
for (int i = ng; i < nt; ++i) {
|
2020-03-30 03:55:52 +02:00
|
|
|
TCGTemp *ts = &s->temps[i];
|
|
|
|
int state;
|
|
|
|
|
|
|
|
switch (ts->kind) {
|
2023-01-29 21:55:52 +01:00
|
|
|
case TEMP_TB:
|
2020-03-30 03:55:52 +02:00
|
|
|
state = ts->state;
|
|
|
|
ts->state = state | TS_MEM;
|
2020-10-08 22:21:43 +02:00
|
|
|
if (state != TS_DEAD) {
|
|
|
|
continue;
|
|
|
|
}
|
2020-03-30 03:55:52 +02:00
|
|
|
break;
|
2022-03-16 17:34:18 +01:00
|
|
|
case TEMP_EBB:
|
2020-03-30 03:55:52 +02:00
|
|
|
case TEMP_CONST:
|
|
|
|
continue;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
2020-10-08 22:21:43 +02:00
|
|
|
}
|
|
|
|
la_reset_pref(&s->temps[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-27 23:00:35 +01:00
|
|
|
/* liveness analysis: sync globals back to memory and kill. */
|
|
|
|
static void la_global_kill(TCGContext *s, int ng)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ng; i++) {
|
|
|
|
s->temps[i].state = TS_DEAD | TS_MEM;
|
2018-11-27 21:45:26 +01:00
|
|
|
la_reset_pref(&s->temps[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* liveness analysis: note live globals crossing calls. */
|
|
|
|
static void la_cross_call(TCGContext *s, int nt)
|
|
|
|
{
|
|
|
|
TCGRegSet mask = ~tcg_target_call_clobber_regs;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < nt; i++) {
|
|
|
|
TCGTemp *ts = &s->temps[i];
|
|
|
|
if (!(ts->state & TS_DEAD)) {
|
|
|
|
TCGRegSet *pset = la_temp_pref(ts);
|
|
|
|
TCGRegSet set = *pset;
|
|
|
|
|
|
|
|
set &= mask;
|
|
|
|
/* If the combination is not possible, restart. */
|
|
|
|
if (set == 0) {
|
|
|
|
set = tcg_target_available_regs[ts->type] & mask;
|
|
|
|
}
|
|
|
|
*pset = set;
|
|
|
|
}
|
2018-11-27 23:00:35 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-29 22:50:20 +01:00
|
|
|
/*
|
|
|
|
* Liveness analysis: Verify the lifetime of TEMP_TB, and reduce
|
|
|
|
* to TEMP_EBB, if possible.
|
|
|
|
*/
|
|
|
|
static void __attribute__((noinline))
|
|
|
|
liveness_pass_0(TCGContext *s)
|
|
|
|
{
|
|
|
|
void * const multiple_ebb = (void *)(uintptr_t)-1;
|
|
|
|
int nb_temps = s->nb_temps;
|
|
|
|
TCGOp *op, *ebb;
|
|
|
|
|
|
|
|
for (int i = s->nb_globals; i < nb_temps; ++i) {
|
|
|
|
s->temps[i].state_ptr = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Represent each EBB by the op at which it begins. In the case of
|
|
|
|
* the first EBB, this is the first op, otherwise it is a label.
|
|
|
|
* Collect the uses of each TEMP_TB: NULL for unused, EBB for use
|
|
|
|
* within a single EBB, else MULTIPLE_EBB.
|
|
|
|
*/
|
|
|
|
ebb = QTAILQ_FIRST(&s->ops);
|
|
|
|
QTAILQ_FOREACH(op, &s->ops, link) {
|
|
|
|
const TCGOpDef *def;
|
|
|
|
int nb_oargs, nb_iargs;
|
|
|
|
|
|
|
|
switch (op->opc) {
|
|
|
|
case INDEX_op_set_label:
|
|
|
|
ebb = op;
|
|
|
|
continue;
|
|
|
|
case INDEX_op_discard:
|
|
|
|
continue;
|
|
|
|
case INDEX_op_call:
|
|
|
|
nb_oargs = TCGOP_CALLO(op);
|
|
|
|
nb_iargs = TCGOP_CALLI(op);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
def = &tcg_op_defs[op->opc];
|
|
|
|
nb_oargs = def->nb_oargs;
|
|
|
|
nb_iargs = def->nb_iargs;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int i = 0; i < nb_oargs + nb_iargs; ++i) {
|
|
|
|
TCGTemp *ts = arg_temp(op->args[i]);
|
|
|
|
|
|
|
|
if (ts->kind != TEMP_TB) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (ts->state_ptr == NULL) {
|
|
|
|
ts->state_ptr = ebb;
|
|
|
|
} else if (ts->state_ptr != ebb) {
|
|
|
|
ts->state_ptr = multiple_ebb;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For TEMP_TB that turned out not to be used beyond one EBB,
|
|
|
|
* reduce the liveness to TEMP_EBB.
|
|
|
|
*/
|
|
|
|
for (int i = s->nb_globals; i < nb_temps; ++i) {
|
|
|
|
TCGTemp *ts = &s->temps[i];
|
|
|
|
if (ts->kind == TEMP_TB && ts->state_ptr != multiple_ebb) {
|
|
|
|
ts->kind = TEMP_EBB;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-23 00:46:09 +02:00
|
|
|
/* Liveness analysis : update the opc_arg_life array to tell if a
|
2008-02-01 11:05:41 +01:00
|
|
|
given input arguments is dead. Instructions updating dead
|
|
|
|
temporaries are removed. */
|
2023-02-24 23:07:33 +01:00
|
|
|
static void __attribute__((noinline))
|
|
|
|
liveness_pass_1(TCGContext *s)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2016-06-24 05:34:22 +02:00
|
|
|
int nb_globals = s->nb_globals;
|
2018-11-27 22:37:24 +01:00
|
|
|
int nb_temps = s->nb_temps;
|
2017-11-02 15:19:14 +01:00
|
|
|
TCGOp *op, *op_prev;
|
2018-11-27 21:45:26 +01:00
|
|
|
TCGRegSet *prefs;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
prefs = tcg_malloc(sizeof(TCGRegSet) * nb_temps);
|
|
|
|
for (i = 0; i < nb_temps; ++i) {
|
|
|
|
s->temps[i].state_ptr = prefs + i;
|
|
|
|
}
|
2016-06-23 00:46:09 +02:00
|
|
|
|
2018-11-27 22:45:08 +01:00
|
|
|
/* ??? Should be redundant with the exit_tb that ends the TB. */
|
2018-11-27 22:37:24 +01:00
|
|
|
la_func_end(s, nb_globals, nb_temps);
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2018-12-06 13:10:34 +01:00
|
|
|
QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, link, op_prev) {
|
2018-11-27 21:45:26 +01:00
|
|
|
int nb_iargs, nb_oargs;
|
2014-09-19 22:49:15 +02:00
|
|
|
TCGOpcode opc_new, opc_new2;
|
|
|
|
bool have_opc_new2;
|
2016-06-23 00:46:09 +02:00
|
|
|
TCGLifeData arg_life = 0;
|
2018-11-27 21:45:26 +01:00
|
|
|
TCGTemp *ts;
|
2014-09-19 22:49:15 +02:00
|
|
|
TCGOpcode opc = op->opc;
|
|
|
|
const TCGOpDef *def = &tcg_op_defs[opc];
|
|
|
|
|
|
|
|
switch (opc) {
|
2008-02-01 11:05:41 +01:00
|
|
|
case INDEX_op_call:
|
2008-05-17 14:42:15 +02:00
|
|
|
{
|
2022-11-11 01:09:37 +01:00
|
|
|
const TCGHelperInfo *info = tcg_call_info(op);
|
|
|
|
int call_flags = tcg_call_flags(op);
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2017-11-14 13:02:51 +01:00
|
|
|
nb_oargs = TCGOP_CALLO(op);
|
|
|
|
nb_iargs = TCGOP_CALLI(op);
|
2008-05-17 14:42:15 +02:00
|
|
|
|
2014-09-19 22:49:15 +02:00
|
|
|
/* pure functions can be removed if their result is unused */
|
2012-10-09 21:53:08 +02:00
|
|
|
if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
|
2014-03-23 04:06:52 +01:00
|
|
|
for (i = 0; i < nb_oargs; i++) {
|
2018-11-27 21:45:26 +01:00
|
|
|
ts = arg_temp(op->args[i]);
|
|
|
|
if (ts->state != TS_DEAD) {
|
2008-05-17 14:42:15 +02:00
|
|
|
goto do_not_remove_call;
|
2012-10-09 21:53:07 +02:00
|
|
|
}
|
2008-05-17 14:42:15 +02:00
|
|
|
}
|
2014-09-19 22:49:15 +02:00
|
|
|
goto do_remove;
|
2018-11-27 22:32:33 +01:00
|
|
|
}
|
|
|
|
do_not_remove_call:
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2018-11-27 21:45:26 +01:00
|
|
|
/* Output args are dead. */
|
2018-11-27 22:32:33 +01:00
|
|
|
for (i = 0; i < nb_oargs; i++) {
|
2018-11-27 21:45:26 +01:00
|
|
|
ts = arg_temp(op->args[i]);
|
|
|
|
if (ts->state & TS_DEAD) {
|
2018-11-27 22:32:33 +01:00
|
|
|
arg_life |= DEAD_ARG << i;
|
|
|
|
}
|
2018-11-27 21:45:26 +01:00
|
|
|
if (ts->state & TS_MEM) {
|
2018-11-27 22:32:33 +01:00
|
|
|
arg_life |= SYNC_ARG << i;
|
2008-05-17 14:42:15 +02:00
|
|
|
}
|
2018-11-27 21:45:26 +01:00
|
|
|
ts->state = TS_DEAD;
|
|
|
|
la_reset_pref(ts);
|
2018-11-27 22:32:33 +01:00
|
|
|
}
|
2012-10-09 21:53:08 +02:00
|
|
|
|
2022-11-11 06:10:51 +01:00
|
|
|
/* Not used -- it will be tcg_target_call_oarg_reg(). */
|
|
|
|
memset(op->output_pref, 0, sizeof(op->output_pref));
|
|
|
|
|
2018-11-27 22:32:33 +01:00
|
|
|
if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
|
|
|
|
TCG_CALL_NO_READ_GLOBALS))) {
|
2018-11-27 23:00:35 +01:00
|
|
|
la_global_kill(s, nb_globals);
|
2018-11-27 22:32:33 +01:00
|
|
|
} else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
|
2018-11-27 23:00:35 +01:00
|
|
|
la_global_sync(s, nb_globals);
|
2018-11-27 22:32:33 +01:00
|
|
|
}
|
2009-04-06 14:33:59 +02:00
|
|
|
|
2018-11-27 21:45:26 +01:00
|
|
|
/* Record arguments that die in this helper. */
|
2018-11-27 22:32:33 +01:00
|
|
|
for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
|
2018-11-27 21:45:26 +01:00
|
|
|
ts = arg_temp(op->args[i]);
|
2022-11-11 01:09:37 +01:00
|
|
|
if (ts->state & TS_DEAD) {
|
2018-11-27 22:32:33 +01:00
|
|
|
arg_life |= DEAD_ARG << i;
|
2008-05-17 14:42:15 +02:00
|
|
|
}
|
2018-11-27 22:32:33 +01:00
|
|
|
}
|
2018-11-27 21:45:26 +01:00
|
|
|
|
|
|
|
/* For all live registers, remove call-clobbered prefs. */
|
|
|
|
la_cross_call(s, nb_temps);
|
|
|
|
|
2022-11-11 01:09:37 +01:00
|
|
|
/*
|
|
|
|
* Input arguments are live for preceding opcodes.
|
|
|
|
*
|
|
|
|
* For those arguments that die, and will be allocated in
|
|
|
|
* registers, clear the register set for that arg, to be
|
|
|
|
* filled in below. For args that will be on the stack,
|
|
|
|
* reset to any available reg. Process arguments in reverse
|
|
|
|
* order so that if a temp is used more than once, the stack
|
|
|
|
* reset to max happens before the register reset to 0.
|
|
|
|
*/
|
|
|
|
for (i = nb_iargs - 1; i >= 0; i--) {
|
|
|
|
const TCGCallArgumentLoc *loc = &info->in[i];
|
|
|
|
ts = arg_temp(op->args[nb_oargs + i]);
|
2018-11-27 21:45:26 +01:00
|
|
|
|
2022-11-11 01:09:37 +01:00
|
|
|
if (ts->state & TS_DEAD) {
|
|
|
|
switch (loc->kind) {
|
|
|
|
case TCG_CALL_ARG_NORMAL:
|
|
|
|
case TCG_CALL_ARG_EXTEND_U:
|
|
|
|
case TCG_CALL_ARG_EXTEND_S:
|
2023-04-09 02:28:07 +02:00
|
|
|
if (arg_slot_reg_p(loc->arg_slot)) {
|
2022-11-11 01:09:37 +01:00
|
|
|
*la_temp_pref(ts) = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* fall through */
|
|
|
|
default:
|
|
|
|
*la_temp_pref(ts) =
|
|
|
|
tcg_target_available_regs[ts->type];
|
|
|
|
break;
|
|
|
|
}
|
2018-11-27 21:45:26 +01:00
|
|
|
ts->state &= ~TS_DEAD;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-11 01:09:37 +01:00
|
|
|
/*
|
|
|
|
* For each input argument, add its input register to prefs.
|
|
|
|
* If a temp is used once, this produces a single set bit;
|
|
|
|
* if a temp is used multiple times, this produces a set.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < nb_iargs; i++) {
|
|
|
|
const TCGCallArgumentLoc *loc = &info->in[i];
|
|
|
|
ts = arg_temp(op->args[nb_oargs + i]);
|
|
|
|
|
|
|
|
switch (loc->kind) {
|
|
|
|
case TCG_CALL_ARG_NORMAL:
|
|
|
|
case TCG_CALL_ARG_EXTEND_U:
|
|
|
|
case TCG_CALL_ARG_EXTEND_S:
|
2023-04-09 02:28:07 +02:00
|
|
|
if (arg_slot_reg_p(loc->arg_slot)) {
|
2022-11-11 01:09:37 +01:00
|
|
|
tcg_regset_set_reg(*la_temp_pref(ts),
|
|
|
|
tcg_target_call_iarg_regs[loc->arg_slot]);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
2015-06-04 21:47:08 +02:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2015-08-29 21:37:33 +02:00
|
|
|
case INDEX_op_insn_start:
|
2008-02-01 11:05:41 +01:00
|
|
|
break;
|
2008-02-04 01:37:54 +01:00
|
|
|
case INDEX_op_discard:
|
|
|
|
/* mark the temporary as dead */
|
2018-11-27 21:45:26 +01:00
|
|
|
ts = arg_temp(op->args[0]);
|
|
|
|
ts->state = TS_DEAD;
|
|
|
|
la_reset_pref(ts);
|
2008-02-04 01:37:54 +01:00
|
|
|
break;
|
2012-10-02 20:32:29 +02:00
|
|
|
|
|
|
|
case INDEX_op_add2_i32:
|
2014-09-19 22:49:15 +02:00
|
|
|
opc_new = INDEX_op_add_i32;
|
2013-02-20 08:52:02 +01:00
|
|
|
goto do_addsub2;
|
2012-10-02 20:32:29 +02:00
|
|
|
case INDEX_op_sub2_i32:
|
2014-09-19 22:49:15 +02:00
|
|
|
opc_new = INDEX_op_sub_i32;
|
2013-02-20 08:52:02 +01:00
|
|
|
goto do_addsub2;
|
|
|
|
case INDEX_op_add2_i64:
|
2014-09-19 22:49:15 +02:00
|
|
|
opc_new = INDEX_op_add_i64;
|
2013-02-20 08:52:02 +01:00
|
|
|
goto do_addsub2;
|
|
|
|
case INDEX_op_sub2_i64:
|
2014-09-19 22:49:15 +02:00
|
|
|
opc_new = INDEX_op_sub_i64;
|
2013-02-20 08:52:02 +01:00
|
|
|
do_addsub2:
|
2012-10-02 20:32:29 +02:00
|
|
|
nb_iargs = 4;
|
|
|
|
nb_oargs = 2;
|
|
|
|
/* Test if the high part of the operation is dead, but not
|
|
|
|
the low part. The result can be optimized to a simple
|
|
|
|
add or sub. This happens often for x86_64 guest when the
|
|
|
|
cpu mode is set to 32 bit. */
|
2016-11-01 22:56:04 +01:00
|
|
|
if (arg_temp(op->args[1])->state == TS_DEAD) {
|
|
|
|
if (arg_temp(op->args[0])->state == TS_DEAD) {
|
2012-10-02 20:32:29 +02:00
|
|
|
goto do_remove;
|
|
|
|
}
|
2014-09-19 22:49:15 +02:00
|
|
|
/* Replace the opcode and adjust the args in place,
|
|
|
|
leaving 3 unused args at the end. */
|
|
|
|
op->opc = opc = opc_new;
|
2016-12-08 22:12:08 +01:00
|
|
|
op->args[1] = op->args[2];
|
|
|
|
op->args[2] = op->args[4];
|
2012-10-02 20:32:29 +02:00
|
|
|
/* Fall through and mark the single-word operation live. */
|
|
|
|
nb_iargs = 2;
|
|
|
|
nb_oargs = 1;
|
|
|
|
}
|
|
|
|
goto do_not_remove;
|
|
|
|
|
2012-10-02 20:32:30 +02:00
|
|
|
case INDEX_op_mulu2_i32:
|
2014-09-19 22:49:15 +02:00
|
|
|
opc_new = INDEX_op_mul_i32;
|
|
|
|
opc_new2 = INDEX_op_muluh_i32;
|
|
|
|
have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
|
2013-08-14 23:35:56 +02:00
|
|
|
goto do_mul2;
|
2013-02-20 08:52:02 +01:00
|
|
|
case INDEX_op_muls2_i32:
|
2014-09-19 22:49:15 +02:00
|
|
|
opc_new = INDEX_op_mul_i32;
|
|
|
|
opc_new2 = INDEX_op_mulsh_i32;
|
|
|
|
have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
|
2013-02-20 08:52:02 +01:00
|
|
|
goto do_mul2;
|
|
|
|
case INDEX_op_mulu2_i64:
|
2014-09-19 22:49:15 +02:00
|
|
|
opc_new = INDEX_op_mul_i64;
|
|
|
|
opc_new2 = INDEX_op_muluh_i64;
|
|
|
|
have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
|
2013-08-14 23:35:56 +02:00
|
|
|
goto do_mul2;
|
2013-02-20 08:52:02 +01:00
|
|
|
case INDEX_op_muls2_i64:
|
2014-09-19 22:49:15 +02:00
|
|
|
opc_new = INDEX_op_mul_i64;
|
|
|
|
opc_new2 = INDEX_op_mulsh_i64;
|
|
|
|
have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
|
2013-08-14 23:35:56 +02:00
|
|
|
goto do_mul2;
|
2013-02-20 08:52:02 +01:00
|
|
|
do_mul2:
|
2012-10-02 20:32:30 +02:00
|
|
|
nb_iargs = 2;
|
|
|
|
nb_oargs = 2;
|
2016-11-01 22:56:04 +01:00
|
|
|
if (arg_temp(op->args[1])->state == TS_DEAD) {
|
|
|
|
if (arg_temp(op->args[0])->state == TS_DEAD) {
|
2013-08-14 23:35:56 +02:00
|
|
|
/* Both parts of the operation are dead. */
|
2012-10-02 20:32:30 +02:00
|
|
|
goto do_remove;
|
|
|
|
}
|
2013-08-14 23:35:56 +02:00
|
|
|
/* The high part of the operation is dead; generate the low. */
|
2014-09-19 22:49:15 +02:00
|
|
|
op->opc = opc = opc_new;
|
2016-12-08 22:12:08 +01:00
|
|
|
op->args[1] = op->args[2];
|
|
|
|
op->args[2] = op->args[3];
|
2016-11-01 22:56:04 +01:00
|
|
|
} else if (arg_temp(op->args[0])->state == TS_DEAD && have_opc_new2) {
|
2014-09-19 22:49:15 +02:00
|
|
|
/* The low part of the operation is dead; generate the high. */
|
|
|
|
op->opc = opc = opc_new2;
|
2016-12-08 22:12:08 +01:00
|
|
|
op->args[0] = op->args[1];
|
|
|
|
op->args[1] = op->args[2];
|
|
|
|
op->args[2] = op->args[3];
|
2013-08-14 23:35:56 +02:00
|
|
|
} else {
|
|
|
|
goto do_not_remove;
|
2012-10-02 20:32:30 +02:00
|
|
|
}
|
2013-08-14 23:35:56 +02:00
|
|
|
/* Mark the single-word operation live. */
|
|
|
|
nb_oargs = 1;
|
2012-10-02 20:32:30 +02:00
|
|
|
goto do_not_remove;
|
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
default:
|
2012-10-02 20:32:29 +02:00
|
|
|
/* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
|
2008-12-07 19:15:45 +01:00
|
|
|
nb_iargs = def->nb_iargs;
|
|
|
|
nb_oargs = def->nb_oargs;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2008-12-07 19:15:45 +01:00
|
|
|
/* Test if the operation can be removed because all
|
|
|
|
its outputs are dead. We assume that nb_oargs == 0
|
|
|
|
implies side effects */
|
|
|
|
if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
|
2014-09-19 22:49:15 +02:00
|
|
|
for (i = 0; i < nb_oargs; i++) {
|
2016-11-01 22:56:04 +01:00
|
|
|
if (arg_temp(op->args[i])->state != TS_DEAD) {
|
2008-12-07 19:15:45 +01:00
|
|
|
goto do_not_remove;
|
2012-10-09 21:53:07 +02:00
|
|
|
}
|
2008-12-07 19:15:45 +01:00
|
|
|
}
|
2018-11-27 22:32:33 +01:00
|
|
|
goto do_remove;
|
|
|
|
}
|
|
|
|
goto do_not_remove;
|
2008-12-07 19:15:45 +01:00
|
|
|
|
2018-11-27 22:32:33 +01:00
|
|
|
do_remove:
|
|
|
|
tcg_op_remove(s, op);
|
|
|
|
break;
|
|
|
|
|
|
|
|
do_not_remove:
|
|
|
|
for (i = 0; i < nb_oargs; i++) {
|
2018-11-27 21:45:26 +01:00
|
|
|
ts = arg_temp(op->args[i]);
|
|
|
|
|
|
|
|
/* Remember the preference of the uses that followed. */
|
2022-11-11 06:10:51 +01:00
|
|
|
if (i < ARRAY_SIZE(op->output_pref)) {
|
|
|
|
op->output_pref[i] = *la_temp_pref(ts);
|
|
|
|
}
|
2018-11-27 21:45:26 +01:00
|
|
|
|
|
|
|
/* Output args are dead. */
|
|
|
|
if (ts->state & TS_DEAD) {
|
2018-11-27 22:32:33 +01:00
|
|
|
arg_life |= DEAD_ARG << i;
|
2008-12-07 19:15:45 +01:00
|
|
|
}
|
2018-11-27 21:45:26 +01:00
|
|
|
if (ts->state & TS_MEM) {
|
2018-11-27 22:32:33 +01:00
|
|
|
arg_life |= SYNC_ARG << i;
|
|
|
|
}
|
2018-11-27 21:45:26 +01:00
|
|
|
ts->state = TS_DEAD;
|
|
|
|
la_reset_pref(ts);
|
2018-11-27 22:32:33 +01:00
|
|
|
}
|
2008-12-07 19:15:45 +01:00
|
|
|
|
2018-11-27 21:45:26 +01:00
|
|
|
/* If end of basic block, update. */
|
2018-11-27 22:45:08 +01:00
|
|
|
if (def->flags & TCG_OPF_BB_EXIT) {
|
|
|
|
la_func_end(s, nb_globals, nb_temps);
|
2020-10-08 22:21:43 +02:00
|
|
|
} else if (def->flags & TCG_OPF_COND_BRANCH) {
|
|
|
|
la_bb_sync(s, nb_globals, nb_temps);
|
2018-11-27 22:45:08 +01:00
|
|
|
} else if (def->flags & TCG_OPF_BB_END) {
|
2018-11-27 22:37:24 +01:00
|
|
|
la_bb_end(s, nb_globals, nb_temps);
|
2018-11-27 22:32:33 +01:00
|
|
|
} else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
|
2018-11-27 23:00:35 +01:00
|
|
|
la_global_sync(s, nb_globals);
|
2018-11-27 21:45:26 +01:00
|
|
|
if (def->flags & TCG_OPF_CALL_CLOBBER) {
|
|
|
|
la_cross_call(s, nb_temps);
|
|
|
|
}
|
2018-11-27 22:32:33 +01:00
|
|
|
}
|
|
|
|
|
2018-11-27 21:45:26 +01:00
|
|
|
/* Record arguments that die in this opcode. */
|
2018-11-27 22:32:33 +01:00
|
|
|
for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
|
2018-11-27 21:45:26 +01:00
|
|
|
ts = arg_temp(op->args[i]);
|
|
|
|
if (ts->state & TS_DEAD) {
|
2018-11-27 22:32:33 +01:00
|
|
|
arg_life |= DEAD_ARG << i;
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
2018-11-27 21:45:26 +01:00
|
|
|
|
|
|
|
/* Input arguments are live for preceding opcodes. */
|
2018-11-27 22:32:33 +01:00
|
|
|
for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
|
2018-11-27 21:45:26 +01:00
|
|
|
ts = arg_temp(op->args[i]);
|
|
|
|
if (ts->state & TS_DEAD) {
|
|
|
|
/* For operands that were dead, initially allow
|
|
|
|
all regs for the type. */
|
|
|
|
*la_temp_pref(ts) = tcg_target_available_regs[ts->type];
|
|
|
|
ts->state &= ~TS_DEAD;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Incorporate constraints for this operand. */
|
|
|
|
switch (opc) {
|
|
|
|
case INDEX_op_mov_i32:
|
|
|
|
case INDEX_op_mov_i64:
|
|
|
|
/* Note that these are TCG_OPF_NOT_PRESENT and do not
|
|
|
|
have proper constraints. That said, special case
|
|
|
|
moves to propagate preferences backward. */
|
|
|
|
if (IS_DEAD_ARG(1)) {
|
|
|
|
*la_temp_pref(arg_temp(op->args[0]))
|
|
|
|
= *la_temp_pref(arg_temp(op->args[1]));
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
|
|
|
|
const TCGArgConstraint *ct = &def->args_ct[i];
|
|
|
|
TCGRegSet set, *pset;
|
|
|
|
|
|
|
|
ts = arg_temp(op->args[i]);
|
|
|
|
pset = la_temp_pref(ts);
|
|
|
|
set = *pset;
|
|
|
|
|
2020-09-04 00:19:03 +02:00
|
|
|
set &= ct->regs;
|
2019-04-05 04:34:19 +02:00
|
|
|
if (ct->ialias) {
|
2022-11-11 06:10:51 +01:00
|
|
|
set &= output_pref(op, ct->alias_index);
|
2018-11-27 21:45:26 +01:00
|
|
|
}
|
|
|
|
/* If the combination is not possible, restart. */
|
|
|
|
if (set == 0) {
|
2020-09-04 00:19:03 +02:00
|
|
|
set = ct->regs;
|
2018-11-27 21:45:26 +01:00
|
|
|
}
|
|
|
|
*pset = set;
|
|
|
|
}
|
|
|
|
break;
|
2018-11-27 22:32:33 +01:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
break;
|
|
|
|
}
|
2016-06-23 05:43:29 +02:00
|
|
|
op->life = arg_life;
|
2012-11-12 10:27:48 +01:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2016-06-24 05:34:33 +02:00
|
|
|
/* Liveness analysis: Convert indirect regs to direct temporaries. */
|
2023-02-24 23:07:33 +01:00
|
|
|
static bool __attribute__((noinline))
|
|
|
|
liveness_pass_2(TCGContext *s)
|
2016-06-24 05:34:33 +02:00
|
|
|
{
|
|
|
|
int nb_globals = s->nb_globals;
|
2017-11-02 15:19:14 +01:00
|
|
|
int nb_temps, i;
|
2016-06-24 05:34:33 +02:00
|
|
|
bool changes = false;
|
2017-11-02 15:19:14 +01:00
|
|
|
TCGOp *op, *op_next;
|
2016-06-24 05:34:33 +02:00
|
|
|
|
|
|
|
/* Create a temporary for each indirect global. */
|
|
|
|
for (i = 0; i < nb_globals; ++i) {
|
|
|
|
TCGTemp *its = &s->temps[i];
|
|
|
|
if (its->indirect_reg) {
|
|
|
|
TCGTemp *dts = tcg_temp_alloc(s);
|
|
|
|
dts->type = its->type;
|
|
|
|
dts->base_type = its->base_type;
|
2023-02-03 23:58:12 +01:00
|
|
|
dts->temp_subindex = its->temp_subindex;
|
2022-03-16 17:34:18 +01:00
|
|
|
dts->kind = TEMP_EBB;
|
2016-11-01 22:56:04 +01:00
|
|
|
its->state_ptr = dts;
|
|
|
|
} else {
|
|
|
|
its->state_ptr = NULL;
|
2016-06-24 05:34:33 +02:00
|
|
|
}
|
2016-11-01 22:56:04 +01:00
|
|
|
/* All globals begin dead. */
|
|
|
|
its->state = TS_DEAD;
|
|
|
|
}
|
|
|
|
for (nb_temps = s->nb_temps; i < nb_temps; ++i) {
|
|
|
|
TCGTemp *its = &s->temps[i];
|
|
|
|
its->state_ptr = NULL;
|
|
|
|
its->state = TS_DEAD;
|
2016-06-24 05:34:33 +02:00
|
|
|
}
|
|
|
|
|
2017-11-02 15:19:14 +01:00
|
|
|
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
|
2016-06-24 05:34:33 +02:00
|
|
|
TCGOpcode opc = op->opc;
|
|
|
|
const TCGOpDef *def = &tcg_op_defs[opc];
|
|
|
|
TCGLifeData arg_life = op->life;
|
|
|
|
int nb_iargs, nb_oargs, call_flags;
|
2016-11-01 22:56:04 +01:00
|
|
|
TCGTemp *arg_ts, *dir_ts;
|
2016-06-24 05:34:33 +02:00
|
|
|
|
|
|
|
if (opc == INDEX_op_call) {
|
2017-11-14 13:02:51 +01:00
|
|
|
nb_oargs = TCGOP_CALLO(op);
|
|
|
|
nb_iargs = TCGOP_CALLI(op);
|
2021-03-18 17:21:45 +01:00
|
|
|
call_flags = tcg_call_flags(op);
|
2016-06-24 05:34:33 +02:00
|
|
|
} else {
|
|
|
|
nb_iargs = def->nb_iargs;
|
|
|
|
nb_oargs = def->nb_oargs;
|
|
|
|
|
|
|
|
/* Set flags similar to how calls require. */
|
2020-10-08 22:21:43 +02:00
|
|
|
if (def->flags & TCG_OPF_COND_BRANCH) {
|
|
|
|
/* Like reading globals: sync_globals */
|
|
|
|
call_flags = TCG_CALL_NO_WRITE_GLOBALS;
|
|
|
|
} else if (def->flags & TCG_OPF_BB_END) {
|
2016-06-24 05:34:33 +02:00
|
|
|
/* Like writing globals: save_globals */
|
|
|
|
call_flags = 0;
|
|
|
|
} else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
|
|
|
|
/* Like reading globals: sync_globals */
|
|
|
|
call_flags = TCG_CALL_NO_WRITE_GLOBALS;
|
|
|
|
} else {
|
|
|
|
/* No effect on globals. */
|
|
|
|
call_flags = (TCG_CALL_NO_READ_GLOBALS |
|
|
|
|
TCG_CALL_NO_WRITE_GLOBALS);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make sure that input arguments are available. */
|
|
|
|
for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
|
2016-11-01 22:56:04 +01:00
|
|
|
arg_ts = arg_temp(op->args[i]);
|
2022-11-11 01:09:37 +01:00
|
|
|
dir_ts = arg_ts->state_ptr;
|
|
|
|
if (dir_ts && arg_ts->state == TS_DEAD) {
|
|
|
|
TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32
|
|
|
|
? INDEX_op_ld_i32
|
|
|
|
: INDEX_op_ld_i64);
|
|
|
|
TCGOp *lop = tcg_op_insert_before(s, op, lopc, 3);
|
|
|
|
|
|
|
|
lop->args[0] = temp_arg(dir_ts);
|
|
|
|
lop->args[1] = temp_arg(arg_ts->mem_base);
|
|
|
|
lop->args[2] = arg_ts->mem_offset;
|
|
|
|
|
|
|
|
/* Loaded, but synced with memory. */
|
|
|
|
arg_ts->state = TS_MEM;
|
2016-06-24 05:34:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Perform input replacement, and mark inputs that became dead.
|
|
|
|
No action is required except keeping temp_state up to date
|
|
|
|
so that we reload when needed. */
|
|
|
|
for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
|
2016-11-01 22:56:04 +01:00
|
|
|
arg_ts = arg_temp(op->args[i]);
|
2022-11-11 01:09:37 +01:00
|
|
|
dir_ts = arg_ts->state_ptr;
|
|
|
|
if (dir_ts) {
|
|
|
|
op->args[i] = temp_arg(dir_ts);
|
|
|
|
changes = true;
|
|
|
|
if (IS_DEAD_ARG(i)) {
|
|
|
|
arg_ts->state = TS_DEAD;
|
2016-06-24 05:34:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Liveness analysis should ensure that the following are
|
|
|
|
all correct, for call sites and basic block end points. */
|
|
|
|
if (call_flags & TCG_CALL_NO_READ_GLOBALS) {
|
|
|
|
/* Nothing to do */
|
|
|
|
} else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) {
|
|
|
|
for (i = 0; i < nb_globals; ++i) {
|
|
|
|
/* Liveness should see that globals are synced back,
|
|
|
|
that is, either TS_DEAD or TS_MEM. */
|
2016-11-01 22:56:04 +01:00
|
|
|
arg_ts = &s->temps[i];
|
|
|
|
tcg_debug_assert(arg_ts->state_ptr == 0
|
|
|
|
|| arg_ts->state != 0);
|
2016-06-24 05:34:33 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (i = 0; i < nb_globals; ++i) {
|
|
|
|
/* Liveness should see that globals are saved back,
|
|
|
|
that is, TS_DEAD, waiting to be reloaded. */
|
2016-11-01 22:56:04 +01:00
|
|
|
arg_ts = &s->temps[i];
|
|
|
|
tcg_debug_assert(arg_ts->state_ptr == 0
|
|
|
|
|| arg_ts->state == TS_DEAD);
|
2016-06-24 05:34:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Outputs become available. */
|
2020-04-23 21:27:53 +02:00
|
|
|
if (opc == INDEX_op_mov_i32 || opc == INDEX_op_mov_i64) {
|
|
|
|
arg_ts = arg_temp(op->args[0]);
|
2016-11-01 22:56:04 +01:00
|
|
|
dir_ts = arg_ts->state_ptr;
|
2020-04-23 21:27:53 +02:00
|
|
|
if (dir_ts) {
|
|
|
|
op->args[0] = temp_arg(dir_ts);
|
|
|
|
changes = true;
|
|
|
|
|
|
|
|
/* The output is now live and modified. */
|
|
|
|
arg_ts->state = 0;
|
|
|
|
|
|
|
|
if (NEED_SYNC_ARG(0)) {
|
|
|
|
TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
|
|
|
|
? INDEX_op_st_i32
|
|
|
|
: INDEX_op_st_i64);
|
2022-12-18 22:18:31 +01:00
|
|
|
TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
|
2020-04-23 21:27:53 +02:00
|
|
|
TCGTemp *out_ts = dir_ts;
|
|
|
|
|
|
|
|
if (IS_DEAD_ARG(0)) {
|
|
|
|
out_ts = arg_temp(op->args[1]);
|
|
|
|
arg_ts->state = TS_DEAD;
|
|
|
|
tcg_op_remove(s, op);
|
|
|
|
} else {
|
|
|
|
arg_ts->state = TS_MEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
sop->args[0] = temp_arg(out_ts);
|
|
|
|
sop->args[1] = temp_arg(arg_ts->mem_base);
|
|
|
|
sop->args[2] = arg_ts->mem_offset;
|
|
|
|
} else {
|
|
|
|
tcg_debug_assert(!IS_DEAD_ARG(0));
|
|
|
|
}
|
2016-06-24 05:34:33 +02:00
|
|
|
}
|
2020-04-23 21:27:53 +02:00
|
|
|
} else {
|
|
|
|
for (i = 0; i < nb_oargs; i++) {
|
|
|
|
arg_ts = arg_temp(op->args[i]);
|
|
|
|
dir_ts = arg_ts->state_ptr;
|
|
|
|
if (!dir_ts) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
op->args[i] = temp_arg(dir_ts);
|
|
|
|
changes = true;
|
2016-06-24 05:34:33 +02:00
|
|
|
|
2020-04-23 21:27:53 +02:00
|
|
|
/* The output is now live and modified. */
|
|
|
|
arg_ts->state = 0;
|
2016-06-24 05:34:33 +02:00
|
|
|
|
2020-04-23 21:27:53 +02:00
|
|
|
/* Sync outputs upon their last write. */
|
|
|
|
if (NEED_SYNC_ARG(i)) {
|
|
|
|
TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
|
|
|
|
? INDEX_op_st_i32
|
|
|
|
: INDEX_op_st_i64);
|
2022-12-18 22:18:31 +01:00
|
|
|
TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
|
2016-06-24 05:34:33 +02:00
|
|
|
|
2020-04-23 21:27:53 +02:00
|
|
|
sop->args[0] = temp_arg(dir_ts);
|
|
|
|
sop->args[1] = temp_arg(arg_ts->mem_base);
|
|
|
|
sop->args[2] = arg_ts->mem_offset;
|
2016-06-24 05:34:33 +02:00
|
|
|
|
2020-04-23 21:27:53 +02:00
|
|
|
arg_ts->state = TS_MEM;
|
|
|
|
}
|
|
|
|
/* Drop outputs that are dead. */
|
|
|
|
if (IS_DEAD_ARG(i)) {
|
|
|
|
arg_ts->state = TS_DEAD;
|
|
|
|
}
|
2016-06-24 05:34:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return changes;
|
|
|
|
}
|
|
|
|
|
2016-11-09 15:25:09 +01:00
|
|
|
static void temp_allocate_frame(TCGContext *s, TCGTemp *ts)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2022-10-19 04:41:15 +02:00
|
|
|
intptr_t off;
|
2022-10-19 04:03:40 +02:00
|
|
|
int size, align;
|
2021-06-19 06:53:27 +02:00
|
|
|
|
2022-10-19 04:03:40 +02:00
|
|
|
/* When allocating an object, look at the full type. */
|
|
|
|
size = tcg_type_size(ts->base_type);
|
|
|
|
switch (ts->base_type) {
|
2021-06-19 06:53:27 +02:00
|
|
|
case TCG_TYPE_I32:
|
2022-10-19 04:41:15 +02:00
|
|
|
align = 4;
|
2021-06-19 06:53:27 +02:00
|
|
|
break;
|
|
|
|
case TCG_TYPE_I64:
|
|
|
|
case TCG_TYPE_V64:
|
2022-10-19 04:41:15 +02:00
|
|
|
align = 8;
|
2021-06-19 06:53:27 +02:00
|
|
|
break;
|
2022-10-20 00:03:41 +02:00
|
|
|
case TCG_TYPE_I128:
|
2021-06-19 06:53:27 +02:00
|
|
|
case TCG_TYPE_V128:
|
|
|
|
case TCG_TYPE_V256:
|
2022-10-20 00:03:41 +02:00
|
|
|
/*
|
|
|
|
* Note that we do not require aligned storage for V256,
|
|
|
|
* and that we provide alignment for I128 to match V128,
|
|
|
|
* even if that's above what the host ABI requires.
|
|
|
|
*/
|
2022-10-19 04:41:15 +02:00
|
|
|
align = 16;
|
2021-06-19 06:53:27 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
2011-05-14 16:03:22 +02:00
|
|
|
}
|
2021-06-19 06:53:27 +02:00
|
|
|
|
2021-09-12 19:49:25 +02:00
|
|
|
/*
|
|
|
|
* Assume the stack is sufficiently aligned.
|
|
|
|
* This affects e.g. ARM NEON, where we have 8 byte stack alignment
|
|
|
|
* and do not require 16 byte vector alignment. This seems slightly
|
|
|
|
* easier than fully parameterizing the above switch statement.
|
|
|
|
*/
|
|
|
|
align = MIN(TCG_TARGET_STACK_ALIGN, align);
|
2021-06-19 06:53:27 +02:00
|
|
|
off = ROUND_UP(s->current_frame_offset, align);
|
2021-06-19 15:32:03 +02:00
|
|
|
|
|
|
|
/* If we've exhausted the stack frame, restart with a smaller TB. */
|
|
|
|
if (off + size > s->frame_end) {
|
|
|
|
tcg_raise_tb_overflow(s);
|
|
|
|
}
|
2021-06-19 06:53:27 +02:00
|
|
|
s->current_frame_offset = off + size;
|
2021-06-19 01:49:26 +02:00
|
|
|
#if defined(__sparc__)
|
2022-10-19 04:03:40 +02:00
|
|
|
off += TCG_TARGET_STACK_BIAS;
|
2021-06-19 01:49:26 +02:00
|
|
|
#endif
|
2022-10-19 04:03:40 +02:00
|
|
|
|
|
|
|
/* If the object was subdivided, assign memory to all the parts. */
|
|
|
|
if (ts->base_type != ts->type) {
|
|
|
|
int part_size = tcg_type_size(ts->type);
|
|
|
|
int part_count = size / part_size;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Each part is allocated sequentially in tcg_temp_new_internal.
|
|
|
|
* Jump back to the first part by subtracting the current index.
|
|
|
|
*/
|
|
|
|
ts -= ts->temp_subindex;
|
|
|
|
for (int i = 0; i < part_count; ++i) {
|
|
|
|
ts[i].mem_offset = off + i * part_size;
|
|
|
|
ts[i].mem_base = s->frame_temp;
|
|
|
|
ts[i].mem_allocated = 1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ts->mem_offset = off;
|
|
|
|
ts->mem_base = s->frame_temp;
|
|
|
|
ts->mem_allocated = 1;
|
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2022-12-01 10:05:05 +01:00
|
|
|
/* Assign @reg to @ts, and update reg_to_temp[]. */
|
|
|
|
static void set_temp_val_reg(TCGContext *s, TCGTemp *ts, TCGReg reg)
|
|
|
|
{
|
|
|
|
if (ts->val_type == TEMP_VAL_REG) {
|
|
|
|
TCGReg old = ts->reg;
|
|
|
|
tcg_debug_assert(s->reg_to_temp[old] == ts);
|
|
|
|
if (old == reg) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
s->reg_to_temp[old] = NULL;
|
|
|
|
}
|
|
|
|
tcg_debug_assert(s->reg_to_temp[reg] == NULL);
|
|
|
|
s->reg_to_temp[reg] = ts;
|
|
|
|
ts->val_type = TEMP_VAL_REG;
|
|
|
|
ts->reg = reg;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Assign a non-register value type to @ts, and update reg_to_temp[]. */
|
|
|
|
static void set_temp_val_nonreg(TCGContext *s, TCGTemp *ts, TCGTempVal type)
|
|
|
|
{
|
|
|
|
tcg_debug_assert(type != TEMP_VAL_REG);
|
|
|
|
if (ts->val_type == TEMP_VAL_REG) {
|
|
|
|
TCGReg reg = ts->reg;
|
|
|
|
tcg_debug_assert(s->reg_to_temp[reg] == ts);
|
|
|
|
s->reg_to_temp[reg] = NULL;
|
|
|
|
}
|
|
|
|
ts->val_type = type;
|
|
|
|
}
|
|
|
|
|
2018-11-27 16:48:06 +01:00
|
|
|
static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet, TCGRegSet);
|
2013-09-19 19:36:18 +02:00
|
|
|
|
2016-06-20 07:59:13 +02:00
|
|
|
/* Mark a temporary as free or dead. If 'free_or_dead' is negative,
|
|
|
|
mark it free; otherwise mark it dead. */
|
|
|
|
static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
|
2012-10-09 21:53:06 +02:00
|
|
|
{
|
2020-03-30 03:55:52 +02:00
|
|
|
TCGTempVal new_type;
|
|
|
|
|
|
|
|
switch (ts->kind) {
|
|
|
|
case TEMP_FIXED:
|
2016-06-20 07:59:13 +02:00
|
|
|
return;
|
2020-03-30 03:55:52 +02:00
|
|
|
case TEMP_GLOBAL:
|
2023-01-29 21:55:52 +01:00
|
|
|
case TEMP_TB:
|
2020-03-30 03:55:52 +02:00
|
|
|
new_type = TEMP_VAL_MEM;
|
|
|
|
break;
|
2022-03-16 17:34:18 +01:00
|
|
|
case TEMP_EBB:
|
2020-03-30 03:55:52 +02:00
|
|
|
new_type = free_or_dead < 0 ? TEMP_VAL_MEM : TEMP_VAL_DEAD;
|
|
|
|
break;
|
|
|
|
case TEMP_CONST:
|
|
|
|
new_type = TEMP_VAL_CONST;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
2016-06-20 07:59:13 +02:00
|
|
|
}
|
2022-12-01 10:05:05 +01:00
|
|
|
set_temp_val_nonreg(s, ts, new_type);
|
2016-06-20 07:59:13 +02:00
|
|
|
}
|
2012-10-09 21:53:06 +02:00
|
|
|
|
2016-06-20 07:59:13 +02:00
|
|
|
/* Mark a temporary as dead. */
|
|
|
|
static inline void temp_dead(TCGContext *s, TCGTemp *ts)
|
|
|
|
{
|
|
|
|
temp_free_or_dead(s, ts, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
|
|
|
|
registers needs to be allocated to store a constant. If 'free_or_dead'
|
|
|
|
is non-zero, subsequently release the temporary; if it is positive, the
|
|
|
|
temp is dead; if it is negative, the temp is free. */
|
2018-11-28 00:35:04 +01:00
|
|
|
static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs,
|
|
|
|
TCGRegSet preferred_regs, int free_or_dead)
|
2016-06-20 07:59:13 +02:00
|
|
|
{
|
2020-03-30 03:55:52 +02:00
|
|
|
if (!temp_readonly(ts) && !ts->mem_coherent) {
|
2012-10-09 21:53:06 +02:00
|
|
|
if (!ts->mem_allocated) {
|
2016-11-09 15:25:09 +01:00
|
|
|
temp_allocate_frame(s, ts);
|
2016-06-20 07:59:13 +02:00
|
|
|
}
|
|
|
|
switch (ts->val_type) {
|
|
|
|
case TEMP_VAL_CONST:
|
|
|
|
/* If we're going to free the temp immediately, then we won't
|
|
|
|
require it later in a register, so attempt to store the
|
|
|
|
constant to memory directly. */
|
|
|
|
if (free_or_dead
|
|
|
|
&& tcg_out_sti(s, ts->type, ts->val,
|
|
|
|
ts->mem_base->reg, ts->mem_offset)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
temp_load(s, ts, tcg_target_available_regs[ts->type],
|
2018-11-28 00:35:04 +01:00
|
|
|
allocated_regs, preferred_regs);
|
2016-06-20 07:59:13 +02:00
|
|
|
/* fallthrough */
|
|
|
|
|
|
|
|
case TEMP_VAL_REG:
|
|
|
|
tcg_out_st(s, ts->type, ts->reg,
|
|
|
|
ts->mem_base->reg, ts->mem_offset);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case TEMP_VAL_MEM:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case TEMP_VAL_DEAD:
|
|
|
|
default:
|
2023-04-05 21:09:14 +02:00
|
|
|
g_assert_not_reached();
|
2016-06-20 07:59:13 +02:00
|
|
|
}
|
|
|
|
ts->mem_coherent = 1;
|
|
|
|
}
|
|
|
|
if (free_or_dead) {
|
|
|
|
temp_free_or_dead(s, ts, free_or_dead);
|
2012-10-09 21:53:06 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
/* free register 'reg' by spilling the corresponding temporary if necessary */
|
2013-09-19 19:36:18 +02:00
|
|
|
static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2013-09-19 00:21:56 +02:00
|
|
|
TCGTemp *ts = s->reg_to_temp[reg];
|
|
|
|
if (ts != NULL) {
|
2018-11-28 00:35:04 +01:00
|
|
|
temp_sync(s, ts, allocated_regs, 0, -1);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-27 16:16:21 +01:00
|
|
|
/**
|
|
|
|
* tcg_reg_alloc:
|
|
|
|
* @required_regs: Set of registers in which we must allocate.
|
|
|
|
* @allocated_regs: Set of registers which must be avoided.
|
|
|
|
* @preferred_regs: Set of registers we should prefer.
|
|
|
|
* @rev: True if we search the registers in "indirect" order.
|
|
|
|
*
|
|
|
|
* The allocated register must be in @required_regs & ~@allocated_regs,
|
|
|
|
* but if we can put it in @preferred_regs we may save a move later.
|
|
|
|
*/
|
|
|
|
static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet required_regs,
|
|
|
|
TCGRegSet allocated_regs,
|
|
|
|
TCGRegSet preferred_regs, bool rev)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2018-11-27 16:16:21 +01:00
|
|
|
int i, j, f, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
|
|
|
|
TCGRegSet reg_ct[2];
|
2015-08-19 08:23:08 +02:00
|
|
|
const int *order;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2018-11-27 16:16:21 +01:00
|
|
|
reg_ct[1] = required_regs & ~allocated_regs;
|
|
|
|
tcg_debug_assert(reg_ct[1] != 0);
|
|
|
|
reg_ct[0] = reg_ct[1] & preferred_regs;
|
|
|
|
|
|
|
|
/* Skip the preferred_regs option if it cannot be satisfied,
|
|
|
|
or if the preference made no difference. */
|
|
|
|
f = reg_ct[0] == 0 || reg_ct[0] == reg_ct[1];
|
|
|
|
|
2015-08-19 08:23:08 +02:00
|
|
|
order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2018-11-27 16:16:21 +01:00
|
|
|
/* Try free registers, preferences first. */
|
|
|
|
for (j = f; j < 2; j++) {
|
|
|
|
TCGRegSet set = reg_ct[j];
|
|
|
|
|
|
|
|
if (tcg_regset_single(set)) {
|
|
|
|
/* One register in the set. */
|
|
|
|
TCGReg reg = tcg_regset_first(set);
|
|
|
|
if (s->reg_to_temp[reg] == NULL) {
|
|
|
|
return reg;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
TCGReg reg = order[i];
|
|
|
|
if (s->reg_to_temp[reg] == NULL &&
|
|
|
|
tcg_regset_test_reg(set, reg)) {
|
|
|
|
return reg;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2018-11-27 16:16:21 +01:00
|
|
|
/* We must spill something. */
|
|
|
|
for (j = f; j < 2; j++) {
|
|
|
|
TCGRegSet set = reg_ct[j];
|
|
|
|
|
|
|
|
if (tcg_regset_single(set)) {
|
|
|
|
/* One register in the set. */
|
|
|
|
TCGReg reg = tcg_regset_first(set);
|
2013-09-19 19:36:18 +02:00
|
|
|
tcg_reg_free(s, reg, allocated_regs);
|
2008-02-01 11:05:41 +01:00
|
|
|
return reg;
|
2018-11-27 16:16:21 +01:00
|
|
|
} else {
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
TCGReg reg = order[i];
|
|
|
|
if (tcg_regset_test_reg(set, reg)) {
|
|
|
|
tcg_reg_free(s, reg, allocated_regs);
|
|
|
|
return reg;
|
|
|
|
}
|
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-05 21:09:14 +02:00
|
|
|
g_assert_not_reached();
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2022-10-13 22:37:38 +02:00
|
|
|
static TCGReg tcg_reg_alloc_pair(TCGContext *s, TCGRegSet required_regs,
|
|
|
|
TCGRegSet allocated_regs,
|
|
|
|
TCGRegSet preferred_regs, bool rev)
|
|
|
|
{
|
|
|
|
int i, j, k, fmin, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
|
|
|
|
TCGRegSet reg_ct[2];
|
|
|
|
const int *order;
|
|
|
|
|
|
|
|
/* Ensure that if I is not in allocated_regs, I+1 is not either. */
|
|
|
|
reg_ct[1] = required_regs & ~(allocated_regs | (allocated_regs >> 1));
|
|
|
|
tcg_debug_assert(reg_ct[1] != 0);
|
|
|
|
reg_ct[0] = reg_ct[1] & preferred_regs;
|
|
|
|
|
|
|
|
order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Skip the preferred_regs option if it cannot be satisfied,
|
|
|
|
* or if the preference made no difference.
|
|
|
|
*/
|
|
|
|
k = reg_ct[0] == 0 || reg_ct[0] == reg_ct[1];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Minimize the number of flushes by looking for 2 free registers first,
|
|
|
|
* then a single flush, then two flushes.
|
|
|
|
*/
|
|
|
|
for (fmin = 2; fmin >= 0; fmin--) {
|
|
|
|
for (j = k; j < 2; j++) {
|
|
|
|
TCGRegSet set = reg_ct[j];
|
|
|
|
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
TCGReg reg = order[i];
|
|
|
|
|
|
|
|
if (tcg_regset_test_reg(set, reg)) {
|
|
|
|
int f = !s->reg_to_temp[reg] + !s->reg_to_temp[reg + 1];
|
|
|
|
if (f >= fmin) {
|
|
|
|
tcg_reg_free(s, reg, allocated_regs);
|
|
|
|
tcg_reg_free(s, reg + 1, allocated_regs);
|
|
|
|
return reg;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-04-05 21:09:14 +02:00
|
|
|
g_assert_not_reached();
|
2022-10-13 22:37:38 +02:00
|
|
|
}
|
|
|
|
|
2013-09-19 17:02:05 +02:00
|
|
|
/* Make sure the temporary is in a register. If needed, allocate the register
|
|
|
|
from DESIRED while avoiding ALLOCATED. */
|
|
|
|
static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
|
2018-11-27 16:48:06 +01:00
|
|
|
TCGRegSet allocated_regs, TCGRegSet preferred_regs)
|
2013-09-19 17:02:05 +02:00
|
|
|
{
|
|
|
|
TCGReg reg;
|
|
|
|
|
|
|
|
switch (ts->val_type) {
|
|
|
|
case TEMP_VAL_REG:
|
|
|
|
return;
|
|
|
|
case TEMP_VAL_CONST:
|
2018-11-27 16:16:21 +01:00
|
|
|
reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
|
2018-11-27 16:48:06 +01:00
|
|
|
preferred_regs, ts->indirect_base);
|
2020-03-31 14:43:23 +02:00
|
|
|
if (ts->type <= TCG_TYPE_I64) {
|
|
|
|
tcg_out_movi(s, ts->type, reg, ts->val);
|
|
|
|
} else {
|
2020-03-31 10:02:08 +02:00
|
|
|
uint64_t val = ts->val;
|
|
|
|
MemOp vece = MO_64;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the minimal vector element that matches the constant.
|
|
|
|
* The targets will, in general, have to do this search anyway,
|
|
|
|
* do this generically.
|
|
|
|
*/
|
|
|
|
if (val == dup_const(MO_8, val)) {
|
|
|
|
vece = MO_8;
|
|
|
|
} else if (val == dup_const(MO_16, val)) {
|
|
|
|
vece = MO_16;
|
2020-09-07 02:33:18 +02:00
|
|
|
} else if (val == dup_const(MO_32, val)) {
|
2020-03-31 10:02:08 +02:00
|
|
|
vece = MO_32;
|
|
|
|
}
|
|
|
|
|
|
|
|
tcg_out_dupi_vec(s, ts->type, vece, reg, ts->val);
|
2020-03-31 14:43:23 +02:00
|
|
|
}
|
2013-09-19 17:02:05 +02:00
|
|
|
ts->mem_coherent = 0;
|
|
|
|
break;
|
|
|
|
case TEMP_VAL_MEM:
|
2018-11-27 16:16:21 +01:00
|
|
|
reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
|
2018-11-27 16:48:06 +01:00
|
|
|
preferred_regs, ts->indirect_base);
|
2013-09-19 17:02:05 +02:00
|
|
|
tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
|
|
|
|
ts->mem_coherent = 1;
|
|
|
|
break;
|
|
|
|
case TEMP_VAL_DEAD:
|
|
|
|
default:
|
2023-04-05 21:09:14 +02:00
|
|
|
g_assert_not_reached();
|
2013-09-19 17:02:05 +02:00
|
|
|
}
|
2022-12-01 10:05:05 +01:00
|
|
|
set_temp_val_reg(s, ts, reg);
|
2013-09-19 17:02:05 +02:00
|
|
|
}
|
|
|
|
|
2016-06-20 07:59:13 +02:00
|
|
|
/* Save a temporary to memory. 'allocated_regs' is used in case a
|
|
|
|
temporary registers needs to be allocated to store a constant. */
|
|
|
|
static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
|
2012-10-09 21:53:06 +02:00
|
|
|
{
|
2016-06-24 05:34:33 +02:00
|
|
|
/* The liveness analysis already ensures that globals are back
|
|
|
|
in memory. Keep an tcg_debug_assert for safety. */
|
2020-03-29 19:40:49 +02:00
|
|
|
tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || temp_readonly(ts));
|
2012-10-09 21:53:06 +02:00
|
|
|
}
|
|
|
|
|
2011-11-22 11:06:22 +01:00
|
|
|
/* save globals to their canonical location and assume they can be
|
2008-05-23 19:33:39 +02:00
|
|
|
modified be the following code. 'allocated_regs' is used in case a
|
|
|
|
temporary registers needs to be allocated to store a constant. */
|
|
|
|
static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2016-11-02 18:21:44 +01:00
|
|
|
int i, n;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2016-11-02 18:21:44 +01:00
|
|
|
for (i = 0, n = s->nb_globals; i < n; i++) {
|
2013-09-19 00:35:32 +02:00
|
|
|
temp_save(s, &s->temps[i], allocated_regs);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2008-05-21 18:24:20 +02:00
|
|
|
}
|
|
|
|
|
2012-10-09 21:53:08 +02:00
|
|
|
/* sync globals to their canonical location and assume they can be
|
|
|
|
read by the following code. 'allocated_regs' is used in case a
|
|
|
|
temporary registers needs to be allocated to store a constant. */
|
|
|
|
static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
|
|
|
|
{
|
2016-11-02 18:21:44 +01:00
|
|
|
int i, n;
|
2012-10-09 21:53:08 +02:00
|
|
|
|
2016-11-02 18:21:44 +01:00
|
|
|
for (i = 0, n = s->nb_globals; i < n; i++) {
|
2013-09-19 00:33:00 +02:00
|
|
|
TCGTemp *ts = &s->temps[i];
|
2016-06-24 05:34:33 +02:00
|
|
|
tcg_debug_assert(ts->val_type != TEMP_VAL_REG
|
2020-03-29 19:11:56 +02:00
|
|
|
|| ts->kind == TEMP_FIXED
|
2016-06-24 05:34:33 +02:00
|
|
|
|| ts->mem_coherent);
|
2012-10-09 21:53:08 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-05-21 18:24:20 +02:00
|
|
|
/* at the end of a basic block, we assume all temporaries are dead and
|
2008-05-23 19:33:39 +02:00
|
|
|
all globals are stored at their canonical location. */
|
|
|
|
static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
|
2008-05-21 18:24:20 +02:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2013-09-19 00:35:32 +02:00
|
|
|
for (i = s->nb_globals; i < s->nb_temps; i++) {
|
|
|
|
TCGTemp *ts = &s->temps[i];
|
2020-03-30 03:55:52 +02:00
|
|
|
|
|
|
|
switch (ts->kind) {
|
2023-01-29 21:55:52 +01:00
|
|
|
case TEMP_TB:
|
2013-09-19 00:35:32 +02:00
|
|
|
temp_save(s, ts, allocated_regs);
|
2020-03-30 03:55:52 +02:00
|
|
|
break;
|
2022-03-16 17:34:18 +01:00
|
|
|
case TEMP_EBB:
|
2016-06-24 05:34:33 +02:00
|
|
|
/* The liveness analysis already ensures that temps are dead.
|
|
|
|
Keep an tcg_debug_assert for safety. */
|
|
|
|
tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
|
2020-03-30 03:55:52 +02:00
|
|
|
break;
|
|
|
|
case TEMP_CONST:
|
|
|
|
/* Similarly, we should have freed any allocated register. */
|
|
|
|
tcg_debug_assert(ts->val_type == TEMP_VAL_CONST);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
2008-05-23 19:33:39 +02:00
|
|
|
|
|
|
|
save_globals(s, allocated_regs);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2020-10-08 22:21:43 +02:00
|
|
|
/*
|
2022-03-16 17:34:18 +01:00
|
|
|
* At a conditional branch, we assume all temporaries are dead unless
|
|
|
|
* explicitly live-across-conditional-branch; all globals and local
|
|
|
|
* temps are synced to their location.
|
2020-10-08 22:21:43 +02:00
|
|
|
*/
|
|
|
|
static void tcg_reg_alloc_cbranch(TCGContext *s, TCGRegSet allocated_regs)
|
|
|
|
{
|
|
|
|
sync_globals(s, allocated_regs);
|
|
|
|
|
|
|
|
for (int i = s->nb_globals; i < s->nb_temps; i++) {
|
|
|
|
TCGTemp *ts = &s->temps[i];
|
|
|
|
/*
|
|
|
|
* The liveness analysis already ensures that temps are dead.
|
|
|
|
* Keep tcg_debug_asserts for safety.
|
|
|
|
*/
|
2020-03-30 03:55:52 +02:00
|
|
|
switch (ts->kind) {
|
2023-01-29 21:55:52 +01:00
|
|
|
case TEMP_TB:
|
2020-10-08 22:21:43 +02:00
|
|
|
tcg_debug_assert(ts->val_type != TEMP_VAL_REG || ts->mem_coherent);
|
2020-03-30 03:55:52 +02:00
|
|
|
break;
|
2022-03-16 17:34:18 +01:00
|
|
|
case TEMP_EBB:
|
2020-03-30 03:55:52 +02:00
|
|
|
case TEMP_CONST:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
2020-10-08 22:21:43 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-18 19:20:27 +01:00
|
|
|
/*
|
2020-04-17 22:22:43 +02:00
|
|
|
* Specialized code generation for INDEX_op_mov_* with a constant.
|
2019-03-18 19:20:27 +01:00
|
|
|
*/
|
2016-09-15 15:16:00 +02:00
|
|
|
static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots,
|
2018-11-28 00:39:21 +01:00
|
|
|
tcg_target_ulong val, TCGLifeData arg_life,
|
|
|
|
TCGRegSet preferred_regs)
|
2008-05-23 19:33:39 +02:00
|
|
|
{
|
2019-03-16 18:48:02 +01:00
|
|
|
/* ENV should not be modified. */
|
2020-03-29 19:40:49 +02:00
|
|
|
tcg_debug_assert(!temp_readonly(ots));
|
2016-06-20 07:59:13 +02:00
|
|
|
|
|
|
|
/* The movi is not explicitly generated here. */
|
2022-12-01 10:05:05 +01:00
|
|
|
set_temp_val_nonreg(s, ots, TEMP_VAL_CONST);
|
2016-06-20 07:59:13 +02:00
|
|
|
ots->val = val;
|
|
|
|
ots->mem_coherent = 0;
|
|
|
|
if (NEED_SYNC_ARG(0)) {
|
2018-11-28 00:39:21 +01:00
|
|
|
temp_sync(s, ots, s->reserved_regs, preferred_regs, IS_DEAD_ARG(0));
|
2016-06-20 07:59:13 +02:00
|
|
|
} else if (IS_DEAD_ARG(0)) {
|
2013-09-19 00:29:18 +02:00
|
|
|
temp_dead(s, ots);
|
2012-10-09 21:53:07 +02:00
|
|
|
}
|
2008-05-23 19:33:39 +02:00
|
|
|
}
|
|
|
|
|
2019-03-18 19:20:27 +01:00
|
|
|
/*
|
|
|
|
* Specialized code generation for INDEX_op_mov_*.
|
|
|
|
*/
|
2016-12-08 22:42:08 +01:00
|
|
|
static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2016-12-08 22:42:08 +01:00
|
|
|
const TCGLifeData arg_life = op->life;
|
2018-11-27 16:44:51 +01:00
|
|
|
TCGRegSet allocated_regs, preferred_regs;
|
2008-02-01 11:05:41 +01:00
|
|
|
TCGTemp *ts, *ots;
|
2014-05-13 23:50:18 +02:00
|
|
|
TCGType otype, itype;
|
2022-12-01 10:05:05 +01:00
|
|
|
TCGReg oreg, ireg;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2017-09-11 20:58:44 +02:00
|
|
|
allocated_regs = s->reserved_regs;
|
2022-11-11 06:10:51 +01:00
|
|
|
preferred_regs = output_pref(op, 0);
|
2017-06-20 08:18:10 +02:00
|
|
|
ots = arg_temp(op->args[0]);
|
|
|
|
ts = arg_temp(op->args[1]);
|
2014-05-13 23:50:18 +02:00
|
|
|
|
2019-03-16 18:48:02 +01:00
|
|
|
/* ENV should not be modified. */
|
2020-03-29 19:40:49 +02:00
|
|
|
tcg_debug_assert(!temp_readonly(ots));
|
2019-03-16 18:48:02 +01:00
|
|
|
|
2014-05-13 23:50:18 +02:00
|
|
|
/* Note that otype != itype for no-op truncation. */
|
|
|
|
otype = ots->type;
|
|
|
|
itype = ts->type;
|
2012-10-09 21:53:07 +02:00
|
|
|
|
2016-09-15 15:16:00 +02:00
|
|
|
if (ts->val_type == TEMP_VAL_CONST) {
|
|
|
|
/* propagate constant or generate sti */
|
|
|
|
tcg_target_ulong val = ts->val;
|
|
|
|
if (IS_DEAD_ARG(1)) {
|
|
|
|
temp_dead(s, ts);
|
|
|
|
}
|
2018-11-27 16:44:51 +01:00
|
|
|
tcg_reg_alloc_do_movi(s, ots, val, arg_life, preferred_regs);
|
2016-09-15 15:16:00 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the source value is in memory we're going to be forced
|
|
|
|
to have it in a register in order to perform the copy. Copy
|
|
|
|
the SOURCE value into its own register first, that way we
|
|
|
|
don't have to reload SOURCE the next time it is used. */
|
|
|
|
if (ts->val_type == TEMP_VAL_MEM) {
|
2018-11-27 16:44:51 +01:00
|
|
|
temp_load(s, ts, tcg_target_available_regs[itype],
|
|
|
|
allocated_regs, preferred_regs);
|
2012-10-09 21:53:07 +02:00
|
|
|
}
|
2016-09-15 15:16:00 +02:00
|
|
|
tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
|
2022-12-01 10:05:05 +01:00
|
|
|
ireg = ts->reg;
|
|
|
|
|
2019-03-16 18:48:02 +01:00
|
|
|
if (IS_DEAD_ARG(0)) {
|
2012-10-09 21:53:07 +02:00
|
|
|
/* mov to a non-saved dead register makes no sense (even with
|
|
|
|
liveness analysis disabled). */
|
2016-04-21 10:48:49 +02:00
|
|
|
tcg_debug_assert(NEED_SYNC_ARG(0));
|
2012-10-09 21:53:07 +02:00
|
|
|
if (!ots->mem_allocated) {
|
2016-11-09 15:25:09 +01:00
|
|
|
temp_allocate_frame(s, ots);
|
2012-10-09 21:53:07 +02:00
|
|
|
}
|
2022-12-01 10:05:05 +01:00
|
|
|
tcg_out_st(s, otype, ireg, ots->mem_base->reg, ots->mem_offset);
|
2012-10-09 21:53:07 +02:00
|
|
|
if (IS_DEAD_ARG(1)) {
|
2013-09-19 00:29:18 +02:00
|
|
|
temp_dead(s, ts);
|
2012-10-09 21:53:07 +02:00
|
|
|
}
|
2013-09-19 00:29:18 +02:00
|
|
|
temp_dead(s, ots);
|
2022-12-01 10:05:05 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_DEAD_ARG(1) && ts->kind != TEMP_FIXED) {
|
|
|
|
/*
|
|
|
|
* The mov can be suppressed. Kill input first, so that it
|
|
|
|
* is unlinked from reg_to_temp, then set the output to the
|
|
|
|
* reg that we saved from the input.
|
|
|
|
*/
|
|
|
|
temp_dead(s, ts);
|
|
|
|
oreg = ireg;
|
2012-10-09 21:53:07 +02:00
|
|
|
} else {
|
2022-12-01 10:05:05 +01:00
|
|
|
if (ots->val_type == TEMP_VAL_REG) {
|
|
|
|
oreg = ots->reg;
|
2008-02-01 11:05:41 +01:00
|
|
|
} else {
|
2022-12-01 10:05:05 +01:00
|
|
|
/* Make sure to not spill the input register during allocation. */
|
|
|
|
oreg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
|
|
|
|
allocated_regs | ((TCGRegSet)1 << ireg),
|
|
|
|
preferred_regs, ots->indirect_base);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2022-12-01 10:05:05 +01:00
|
|
|
if (!tcg_out_mov(s, otype, oreg, ireg)) {
|
|
|
|
/*
|
|
|
|
* Cross register class move not supported.
|
|
|
|
* Store the source register into the destination slot
|
|
|
|
* and leave the destination temp as TEMP_VAL_MEM.
|
|
|
|
*/
|
|
|
|
assert(!temp_readonly(ots));
|
|
|
|
if (!ts->mem_allocated) {
|
|
|
|
temp_allocate_frame(s, ots);
|
|
|
|
}
|
|
|
|
tcg_out_st(s, ts->type, ireg, ots->mem_base->reg, ots->mem_offset);
|
|
|
|
set_temp_val_nonreg(s, ts, TEMP_VAL_MEM);
|
|
|
|
ots->mem_coherent = 1;
|
|
|
|
return;
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2012-10-09 21:53:07 +02:00
|
|
|
}
|
2022-12-01 10:05:05 +01:00
|
|
|
set_temp_val_reg(s, ots, oreg);
|
|
|
|
ots->mem_coherent = 0;
|
|
|
|
|
|
|
|
if (NEED_SYNC_ARG(0)) {
|
|
|
|
temp_sync(s, ots, allocated_regs, 0, 0);
|
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2019-03-18 19:20:27 +01:00
|
|
|
/*
|
|
|
|
* Specialized code generation for INDEX_op_dup_vec.
|
|
|
|
*/
|
|
|
|
static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op)
|
|
|
|
{
|
|
|
|
const TCGLifeData arg_life = op->life;
|
|
|
|
TCGRegSet dup_out_regs, dup_in_regs;
|
|
|
|
TCGTemp *its, *ots;
|
|
|
|
TCGType itype, vtype;
|
|
|
|
unsigned vece;
|
2022-10-19 04:41:15 +02:00
|
|
|
int lowpart_ofs;
|
2019-03-18 19:20:27 +01:00
|
|
|
bool ok;
|
|
|
|
|
|
|
|
ots = arg_temp(op->args[0]);
|
|
|
|
its = arg_temp(op->args[1]);
|
|
|
|
|
|
|
|
/* ENV should not be modified. */
|
2020-03-29 19:40:49 +02:00
|
|
|
tcg_debug_assert(!temp_readonly(ots));
|
2019-03-18 19:20:27 +01:00
|
|
|
|
|
|
|
itype = its->type;
|
|
|
|
vece = TCGOP_VECE(op);
|
|
|
|
vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
|
|
|
|
|
|
|
|
if (its->val_type == TEMP_VAL_CONST) {
|
|
|
|
/* Propagate constant via movi -> dupi. */
|
|
|
|
tcg_target_ulong val = its->val;
|
|
|
|
if (IS_DEAD_ARG(1)) {
|
|
|
|
temp_dead(s, its);
|
|
|
|
}
|
2022-11-11 06:10:51 +01:00
|
|
|
tcg_reg_alloc_do_movi(s, ots, val, arg_life, output_pref(op, 0));
|
2019-03-18 19:20:27 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-09-04 00:19:03 +02:00
|
|
|
dup_out_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
|
|
|
|
dup_in_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[1].regs;
|
2019-03-18 19:20:27 +01:00
|
|
|
|
|
|
|
/* Allocate the output register now. */
|
|
|
|
if (ots->val_type != TEMP_VAL_REG) {
|
|
|
|
TCGRegSet allocated_regs = s->reserved_regs;
|
2022-12-01 10:05:05 +01:00
|
|
|
TCGReg oreg;
|
2019-03-18 19:20:27 +01:00
|
|
|
|
|
|
|
if (!IS_DEAD_ARG(1) && its->val_type == TEMP_VAL_REG) {
|
|
|
|
/* Make sure to not spill the input register. */
|
|
|
|
tcg_regset_set_reg(allocated_regs, its->reg);
|
|
|
|
}
|
2022-12-01 10:05:05 +01:00
|
|
|
oreg = tcg_reg_alloc(s, dup_out_regs, allocated_regs,
|
2022-11-11 06:10:51 +01:00
|
|
|
output_pref(op, 0), ots->indirect_base);
|
2022-12-01 10:05:05 +01:00
|
|
|
set_temp_val_reg(s, ots, oreg);
|
2019-03-18 19:20:27 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (its->val_type) {
|
|
|
|
case TEMP_VAL_REG:
|
|
|
|
/*
|
|
|
|
* The dup constriaints must be broad, covering all possible VECE.
|
|
|
|
* However, tcg_op_dup_vec() gets to see the VECE and we allow it
|
|
|
|
* to fail, indicating that extra moves are required for that case.
|
|
|
|
*/
|
|
|
|
if (tcg_regset_test_reg(dup_in_regs, its->reg)) {
|
|
|
|
if (tcg_out_dup_vec(s, vtype, vece, ots->reg, its->reg)) {
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
/* Try again from memory or a vector input register. */
|
|
|
|
}
|
|
|
|
if (!its->mem_coherent) {
|
|
|
|
/*
|
|
|
|
* The input register is not synced, and so an extra store
|
|
|
|
* would be required to use memory. Attempt an integer-vector
|
|
|
|
* register move first. We do not have a TCGRegSet for this.
|
|
|
|
*/
|
|
|
|
if (tcg_out_mov(s, itype, ots->reg, its->reg)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Sync the temp back to its slot and load from there. */
|
|
|
|
temp_sync(s, its, s->reserved_regs, 0, 0);
|
|
|
|
}
|
|
|
|
/* fall through */
|
|
|
|
|
|
|
|
case TEMP_VAL_MEM:
|
2022-10-19 04:41:15 +02:00
|
|
|
lowpart_ofs = 0;
|
|
|
|
if (HOST_BIG_ENDIAN) {
|
|
|
|
lowpart_ofs = tcg_type_size(itype) - (1 << vece);
|
|
|
|
}
|
2019-03-18 20:00:39 +01:00
|
|
|
if (tcg_out_dupm_vec(s, vtype, vece, ots->reg, its->mem_base->reg,
|
2022-10-19 04:41:15 +02:00
|
|
|
its->mem_offset + lowpart_ofs)) {
|
2019-03-18 20:00:39 +01:00
|
|
|
goto done;
|
|
|
|
}
|
2022-12-01 10:05:05 +01:00
|
|
|
/* Load the input into the destination vector register. */
|
2019-03-18 19:20:27 +01:00
|
|
|
tcg_out_ld(s, itype, ots->reg, its->mem_base->reg, its->mem_offset);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We now have a vector input register, so dup must succeed. */
|
|
|
|
ok = tcg_out_dup_vec(s, vtype, vece, ots->reg, ots->reg);
|
|
|
|
tcg_debug_assert(ok);
|
|
|
|
|
|
|
|
done:
|
2022-12-01 09:44:13 +01:00
|
|
|
ots->mem_coherent = 0;
|
2019-03-18 19:20:27 +01:00
|
|
|
if (IS_DEAD_ARG(1)) {
|
|
|
|
temp_dead(s, its);
|
|
|
|
}
|
|
|
|
if (NEED_SYNC_ARG(0)) {
|
|
|
|
temp_sync(s, ots, s->reserved_regs, 0, 0);
|
|
|
|
}
|
|
|
|
if (IS_DEAD_ARG(0)) {
|
|
|
|
temp_dead(s, ots);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-08 22:42:08 +01:00
|
|
|
static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2016-12-08 22:42:08 +01:00
|
|
|
const TCGLifeData arg_life = op->life;
|
|
|
|
const TCGOpDef * const def = &tcg_op_defs[op->opc];
|
2016-11-18 08:35:03 +01:00
|
|
|
TCGRegSet i_allocated_regs;
|
|
|
|
TCGRegSet o_allocated_regs;
|
2013-09-18 23:54:45 +02:00
|
|
|
int i, k, nb_iargs, nb_oargs;
|
|
|
|
TCGReg reg;
|
2008-02-01 11:05:41 +01:00
|
|
|
TCGArg arg;
|
|
|
|
const TCGArgConstraint *arg_ct;
|
|
|
|
TCGTemp *ts;
|
|
|
|
TCGArg new_args[TCG_MAX_OP_ARGS];
|
|
|
|
int const_args[TCG_MAX_OP_ARGS];
|
|
|
|
|
|
|
|
nb_oargs = def->nb_oargs;
|
|
|
|
nb_iargs = def->nb_iargs;
|
|
|
|
|
|
|
|
/* copy constants */
|
2022-12-01 07:38:25 +01:00
|
|
|
memcpy(new_args + nb_oargs + nb_iargs,
|
2016-12-08 22:42:08 +01:00
|
|
|
op->args + nb_oargs + nb_iargs,
|
2008-02-01 11:05:41 +01:00
|
|
|
sizeof(TCGArg) * def->nb_cargs);
|
|
|
|
|
2017-09-11 20:58:44 +02:00
|
|
|
i_allocated_regs = s->reserved_regs;
|
|
|
|
o_allocated_regs = s->reserved_regs;
|
2016-11-18 08:35:03 +01:00
|
|
|
|
2022-12-01 07:38:25 +01:00
|
|
|
/* satisfy input constraints */
|
2016-12-08 22:42:08 +01:00
|
|
|
for (k = 0; k < nb_iargs; k++) {
|
2022-10-13 22:37:38 +02:00
|
|
|
TCGRegSet i_preferred_regs, i_required_regs;
|
|
|
|
bool allocate_new_reg, copyto_new_reg;
|
|
|
|
TCGTemp *ts2;
|
|
|
|
int i1, i2;
|
2018-11-28 05:21:31 +01:00
|
|
|
|
2019-04-04 04:37:38 +02:00
|
|
|
i = def->args_ct[nb_oargs + k].sort_index;
|
2016-12-08 22:42:08 +01:00
|
|
|
arg = op->args[i];
|
2008-02-01 11:05:41 +01:00
|
|
|
arg_ct = &def->args_ct[i];
|
2017-06-20 08:18:10 +02:00
|
|
|
ts = arg_temp(arg);
|
2013-09-19 17:02:05 +02:00
|
|
|
|
|
|
|
if (ts->val_type == TEMP_VAL_CONST
|
2023-09-08 04:21:10 +02:00
|
|
|
&& tcg_target_const_match(ts->val, ts->type, arg_ct->ct, TCGOP_VECE(op))) {
|
2013-09-19 17:02:05 +02:00
|
|
|
/* constant is OK for instruction */
|
|
|
|
const_args[i] = 1;
|
|
|
|
new_args[i] = ts->val;
|
2018-11-28 05:21:31 +01:00
|
|
|
continue;
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2013-09-19 17:02:05 +02:00
|
|
|
|
2022-10-10 06:06:31 +02:00
|
|
|
reg = ts->reg;
|
|
|
|
i_preferred_regs = 0;
|
2022-10-13 22:37:38 +02:00
|
|
|
i_required_regs = arg_ct->regs;
|
2022-10-10 06:06:31 +02:00
|
|
|
allocate_new_reg = false;
|
2022-10-13 22:37:38 +02:00
|
|
|
copyto_new_reg = false;
|
|
|
|
|
|
|
|
switch (arg_ct->pair) {
|
|
|
|
case 0: /* not paired */
|
|
|
|
if (arg_ct->ialias) {
|
2022-11-11 06:10:51 +01:00
|
|
|
i_preferred_regs = output_pref(op, arg_ct->alias_index);
|
2022-10-13 22:37:38 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the input is readonly, then it cannot also be an
|
|
|
|
* output and aliased to itself. If the input is not
|
|
|
|
* dead after the instruction, we must allocate a new
|
|
|
|
* register and move it.
|
|
|
|
*/
|
tcg/{i386, s390x}: Add earlyclobber to the op_add2's first output
i386 and s390x implementations of op_add2 require an earlyclobber,
which is currently missing. This breaks VCKSM in s390x guests. E.g., on
x86_64 the following op:
add2_i32 tmp2,tmp3,tmp2,tmp3,tmp3,tmp2 dead: 0 2 3 4 5 pref=none,0xffff
is translated to:
addl %ebx, %r12d
adcl %r12d, %ebx
Introduce a new C_N1_O1_I4 constraint, and make sure that earlyclobber
of aliased outputs is honored.
Cc: qemu-stable@nongnu.org
Fixes: 82790a870992 ("tcg: Add markup for output requires new register")
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20230719221310.1968845-7-iii@linux.ibm.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2023-07-20 00:11:18 +02:00
|
|
|
if (temp_readonly(ts) || !IS_DEAD_ARG(i)
|
|
|
|
|| def->args_ct[arg_ct->alias_index].newreg) {
|
2022-10-13 22:37:38 +02:00
|
|
|
allocate_new_reg = true;
|
|
|
|
} else if (ts->val_type == TEMP_VAL_REG) {
|
|
|
|
/*
|
|
|
|
* Check if the current register has already been
|
|
|
|
* allocated for another input.
|
|
|
|
*/
|
|
|
|
allocate_new_reg =
|
|
|
|
tcg_regset_test_reg(i_allocated_regs, reg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!allocate_new_reg) {
|
|
|
|
temp_load(s, ts, i_required_regs, i_allocated_regs,
|
|
|
|
i_preferred_regs);
|
|
|
|
reg = ts->reg;
|
|
|
|
allocate_new_reg = !tcg_regset_test_reg(i_required_regs, reg);
|
|
|
|
}
|
|
|
|
if (allocate_new_reg) {
|
|
|
|
/*
|
|
|
|
* Allocate a new register matching the constraint
|
|
|
|
* and move the temporary register into it.
|
|
|
|
*/
|
|
|
|
temp_load(s, ts, tcg_target_available_regs[ts->type],
|
|
|
|
i_allocated_regs, 0);
|
|
|
|
reg = tcg_reg_alloc(s, i_required_regs, i_allocated_regs,
|
|
|
|
i_preferred_regs, ts->indirect_base);
|
|
|
|
copyto_new_reg = true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 1:
|
|
|
|
/* First of an input pair; if i1 == i2, the second is an output. */
|
|
|
|
i1 = i;
|
|
|
|
i2 = arg_ct->pair_index;
|
|
|
|
ts2 = i1 != i2 ? arg_temp(op->args[i2]) : NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* It is easier to default to allocating a new pair
|
|
|
|
* and to identify a few cases where it's not required.
|
|
|
|
*/
|
|
|
|
if (arg_ct->ialias) {
|
2022-11-11 06:10:51 +01:00
|
|
|
i_preferred_regs = output_pref(op, arg_ct->alias_index);
|
2022-10-13 22:37:38 +02:00
|
|
|
if (IS_DEAD_ARG(i1) &&
|
|
|
|
IS_DEAD_ARG(i2) &&
|
|
|
|
!temp_readonly(ts) &&
|
|
|
|
ts->val_type == TEMP_VAL_REG &&
|
|
|
|
ts->reg < TCG_TARGET_NB_REGS - 1 &&
|
|
|
|
tcg_regset_test_reg(i_required_regs, reg) &&
|
|
|
|
!tcg_regset_test_reg(i_allocated_regs, reg) &&
|
|
|
|
!tcg_regset_test_reg(i_allocated_regs, reg + 1) &&
|
|
|
|
(ts2
|
|
|
|
? ts2->val_type == TEMP_VAL_REG &&
|
|
|
|
ts2->reg == reg + 1 &&
|
|
|
|
!temp_readonly(ts2)
|
|
|
|
: s->reg_to_temp[reg + 1] == NULL)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Without aliasing, the pair must also be an input. */
|
|
|
|
tcg_debug_assert(ts2);
|
|
|
|
if (ts->val_type == TEMP_VAL_REG &&
|
|
|
|
ts2->val_type == TEMP_VAL_REG &&
|
|
|
|
ts2->reg == reg + 1 &&
|
|
|
|
tcg_regset_test_reg(i_required_regs, reg)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
reg = tcg_reg_alloc_pair(s, i_required_regs, i_allocated_regs,
|
|
|
|
0, ts->indirect_base);
|
|
|
|
goto do_pair;
|
|
|
|
|
|
|
|
case 2: /* pair second */
|
|
|
|
reg = new_args[arg_ct->pair_index] + 1;
|
|
|
|
goto do_pair;
|
2022-10-10 06:06:31 +02:00
|
|
|
|
2022-10-13 22:37:38 +02:00
|
|
|
case 3: /* ialias with second output, no first input */
|
|
|
|
tcg_debug_assert(arg_ct->ialias);
|
2022-11-11 06:10:51 +01:00
|
|
|
i_preferred_regs = output_pref(op, arg_ct->alias_index);
|
2018-11-28 05:21:31 +01:00
|
|
|
|
2022-10-13 22:37:38 +02:00
|
|
|
if (IS_DEAD_ARG(i) &&
|
|
|
|
!temp_readonly(ts) &&
|
|
|
|
ts->val_type == TEMP_VAL_REG &&
|
|
|
|
reg > 0 &&
|
|
|
|
s->reg_to_temp[reg - 1] == NULL &&
|
|
|
|
tcg_regset_test_reg(i_required_regs, reg) &&
|
|
|
|
!tcg_regset_test_reg(i_allocated_regs, reg) &&
|
|
|
|
!tcg_regset_test_reg(i_allocated_regs, reg - 1)) {
|
|
|
|
tcg_regset_set_reg(i_allocated_regs, reg - 1);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
reg = tcg_reg_alloc_pair(s, i_required_regs >> 1,
|
|
|
|
i_allocated_regs, 0,
|
|
|
|
ts->indirect_base);
|
|
|
|
tcg_regset_set_reg(i_allocated_regs, reg);
|
|
|
|
reg += 1;
|
|
|
|
goto do_pair;
|
|
|
|
|
|
|
|
do_pair:
|
2020-03-30 03:55:52 +02:00
|
|
|
/*
|
2022-10-13 22:37:38 +02:00
|
|
|
* If an aliased input is not dead after the instruction,
|
|
|
|
* we must allocate a new register and move it.
|
2020-03-30 03:55:52 +02:00
|
|
|
*/
|
2022-10-13 22:37:38 +02:00
|
|
|
if (arg_ct->ialias && (!IS_DEAD_ARG(i) || temp_readonly(ts))) {
|
|
|
|
TCGRegSet t_allocated_regs = i_allocated_regs;
|
|
|
|
|
2022-10-10 06:06:31 +02:00
|
|
|
/*
|
2022-10-13 22:37:38 +02:00
|
|
|
* Because of the alias, and the continued life, make sure
|
|
|
|
* that the temp is somewhere *other* than the reg pair,
|
|
|
|
* and we get a copy in reg.
|
2022-10-10 06:06:31 +02:00
|
|
|
*/
|
2022-10-13 22:37:38 +02:00
|
|
|
tcg_regset_set_reg(t_allocated_regs, reg);
|
|
|
|
tcg_regset_set_reg(t_allocated_regs, reg + 1);
|
|
|
|
if (ts->val_type == TEMP_VAL_REG && ts->reg == reg) {
|
|
|
|
/* If ts was already in reg, copy it somewhere else. */
|
|
|
|
TCGReg nr;
|
|
|
|
bool ok;
|
|
|
|
|
|
|
|
tcg_debug_assert(ts->kind != TEMP_FIXED);
|
|
|
|
nr = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
|
|
|
|
t_allocated_regs, 0, ts->indirect_base);
|
|
|
|
ok = tcg_out_mov(s, ts->type, nr, reg);
|
|
|
|
tcg_debug_assert(ok);
|
|
|
|
|
|
|
|
set_temp_val_reg(s, ts, nr);
|
|
|
|
} else {
|
|
|
|
temp_load(s, ts, tcg_target_available_regs[ts->type],
|
|
|
|
t_allocated_regs, 0);
|
|
|
|
copyto_new_reg = true;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Preferably allocate to reg, otherwise copy. */
|
|
|
|
i_required_regs = (TCGRegSet)1 << reg;
|
|
|
|
temp_load(s, ts, i_required_regs, i_allocated_regs,
|
|
|
|
i_preferred_regs);
|
|
|
|
copyto_new_reg = ts->reg != reg;
|
2008-02-04 01:37:54 +01:00
|
|
|
}
|
2022-10-13 22:37:38 +02:00
|
|
|
break;
|
2018-11-28 05:21:31 +01:00
|
|
|
|
2022-10-13 22:37:38 +02:00
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
2022-10-10 06:06:31 +02:00
|
|
|
}
|
2018-11-28 05:21:31 +01:00
|
|
|
|
2022-10-13 22:37:38 +02:00
|
|
|
if (copyto_new_reg) {
|
2019-03-16 18:48:18 +01:00
|
|
|
if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
|
2019-03-16 18:48:32 +01:00
|
|
|
/*
|
|
|
|
* Cross register class move not supported. Sync the
|
|
|
|
* temp back to its slot and load from there.
|
|
|
|
*/
|
|
|
|
temp_sync(s, ts, i_allocated_regs, 0, 0);
|
|
|
|
tcg_out_ld(s, ts->type, reg,
|
|
|
|
ts->mem_base->reg, ts->mem_offset);
|
2019-03-16 18:48:18 +01:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
new_args[i] = reg;
|
|
|
|
const_args[i] = 0;
|
2016-11-18 08:35:03 +01:00
|
|
|
tcg_regset_set_reg(i_allocated_regs, reg);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2022-12-01 07:38:25 +01:00
|
|
|
|
2012-10-09 21:53:07 +02:00
|
|
|
/* mark dead temporaries and free the associated registers */
|
|
|
|
for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
|
|
|
|
if (IS_DEAD_ARG(i)) {
|
2017-06-20 08:18:10 +02:00
|
|
|
temp_dead(s, arg_temp(op->args[i]));
|
2012-10-09 21:53:07 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-08 22:21:43 +02:00
|
|
|
if (def->flags & TCG_OPF_COND_BRANCH) {
|
|
|
|
tcg_reg_alloc_cbranch(s, i_allocated_regs);
|
|
|
|
} else if (def->flags & TCG_OPF_BB_END) {
|
2016-11-18 08:35:03 +01:00
|
|
|
tcg_reg_alloc_bb_end(s, i_allocated_regs);
|
2008-05-23 19:33:39 +02:00
|
|
|
} else {
|
|
|
|
if (def->flags & TCG_OPF_CALL_CLOBBER) {
|
2022-12-01 07:38:25 +01:00
|
|
|
/* XXX: permit generic clobber register list ? */
|
2016-02-09 00:43:42 +01:00
|
|
|
for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
|
|
|
|
if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
|
2016-11-18 08:35:03 +01:00
|
|
|
tcg_reg_free(s, i, i_allocated_regs);
|
2008-05-23 19:33:39 +02:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2012-10-09 21:53:08 +02:00
|
|
|
}
|
|
|
|
if (def->flags & TCG_OPF_SIDE_EFFECTS) {
|
|
|
|
/* sync globals if the op has side effects and might trigger
|
|
|
|
an exception. */
|
2016-11-18 08:35:03 +01:00
|
|
|
sync_globals(s, i_allocated_regs);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2022-12-01 07:38:25 +01:00
|
|
|
|
2008-05-23 19:33:39 +02:00
|
|
|
/* satisfy the output constraints */
|
|
|
|
for(k = 0; k < nb_oargs; k++) {
|
2019-04-04 04:37:38 +02:00
|
|
|
i = def->args_ct[k].sort_index;
|
2016-12-08 22:42:08 +01:00
|
|
|
arg = op->args[i];
|
2008-05-23 19:33:39 +02:00
|
|
|
arg_ct = &def->args_ct[i];
|
2017-06-20 08:18:10 +02:00
|
|
|
ts = arg_temp(arg);
|
2019-03-16 18:48:02 +01:00
|
|
|
|
|
|
|
/* ENV should not be modified. */
|
2020-03-29 19:40:49 +02:00
|
|
|
tcg_debug_assert(!temp_readonly(ts));
|
2019-03-16 18:48:02 +01:00
|
|
|
|
2022-10-13 22:37:38 +02:00
|
|
|
switch (arg_ct->pair) {
|
|
|
|
case 0: /* not paired */
|
|
|
|
if (arg_ct->oalias && !const_args[arg_ct->alias_index]) {
|
|
|
|
reg = new_args[arg_ct->alias_index];
|
|
|
|
} else if (arg_ct->newreg) {
|
|
|
|
reg = tcg_reg_alloc(s, arg_ct->regs,
|
|
|
|
i_allocated_regs | o_allocated_regs,
|
2022-11-11 06:10:51 +01:00
|
|
|
output_pref(op, k), ts->indirect_base);
|
2022-10-13 22:37:38 +02:00
|
|
|
} else {
|
|
|
|
reg = tcg_reg_alloc(s, arg_ct->regs, o_allocated_regs,
|
2022-11-11 06:10:51 +01:00
|
|
|
output_pref(op, k), ts->indirect_base);
|
2022-10-13 22:37:38 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 1: /* first of pair */
|
|
|
|
tcg_debug_assert(!arg_ct->newreg);
|
|
|
|
if (arg_ct->oalias) {
|
|
|
|
reg = new_args[arg_ct->alias_index];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
reg = tcg_reg_alloc_pair(s, arg_ct->regs, o_allocated_regs,
|
2022-11-11 06:10:51 +01:00
|
|
|
output_pref(op, k), ts->indirect_base);
|
2022-10-13 22:37:38 +02:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 2: /* second of pair */
|
|
|
|
tcg_debug_assert(!arg_ct->newreg);
|
|
|
|
if (arg_ct->oalias) {
|
|
|
|
reg = new_args[arg_ct->alias_index];
|
|
|
|
} else {
|
|
|
|
reg = new_args[arg_ct->pair_index] + 1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 3: /* first of pair, aliasing with a second input */
|
|
|
|
tcg_debug_assert(!arg_ct->newreg);
|
|
|
|
reg = new_args[arg_ct->pair_index] - 1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2016-11-18 08:35:03 +01:00
|
|
|
tcg_regset_set_reg(o_allocated_regs, reg);
|
2022-12-01 10:05:05 +01:00
|
|
|
set_temp_val_reg(s, ts, reg);
|
2019-03-16 18:48:02 +01:00
|
|
|
ts->mem_coherent = 0;
|
2008-05-23 19:33:39 +02:00
|
|
|
new_args[i] = reg;
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* emit instruction */
|
2023-04-05 20:17:01 +02:00
|
|
|
switch (op->opc) {
|
|
|
|
case INDEX_op_ext8s_i32:
|
|
|
|
tcg_out_ext8s(s, TCG_TYPE_I32, new_args[0], new_args[1]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_ext8s_i64:
|
|
|
|
tcg_out_ext8s(s, TCG_TYPE_I64, new_args[0], new_args[1]);
|
|
|
|
break;
|
2023-04-05 22:26:51 +02:00
|
|
|
case INDEX_op_ext8u_i32:
|
|
|
|
case INDEX_op_ext8u_i64:
|
|
|
|
tcg_out_ext8u(s, new_args[0], new_args[1]);
|
|
|
|
break;
|
2023-04-05 23:49:59 +02:00
|
|
|
case INDEX_op_ext16s_i32:
|
|
|
|
tcg_out_ext16s(s, TCG_TYPE_I32, new_args[0], new_args[1]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_ext16s_i64:
|
|
|
|
tcg_out_ext16s(s, TCG_TYPE_I64, new_args[0], new_args[1]);
|
|
|
|
break;
|
2023-04-06 01:25:22 +02:00
|
|
|
case INDEX_op_ext16u_i32:
|
|
|
|
case INDEX_op_ext16u_i64:
|
|
|
|
tcg_out_ext16u(s, new_args[0], new_args[1]);
|
|
|
|
break;
|
2023-04-06 02:50:09 +02:00
|
|
|
case INDEX_op_ext32s_i64:
|
|
|
|
tcg_out_ext32s(s, new_args[0], new_args[1]);
|
|
|
|
break;
|
2023-04-06 03:07:05 +02:00
|
|
|
case INDEX_op_ext32u_i64:
|
|
|
|
tcg_out_ext32u(s, new_args[0], new_args[1]);
|
|
|
|
break;
|
2023-04-06 03:30:56 +02:00
|
|
|
case INDEX_op_ext_i32_i64:
|
|
|
|
tcg_out_exts_i32_i64(s, new_args[0], new_args[1]);
|
|
|
|
break;
|
2023-04-06 03:56:28 +02:00
|
|
|
case INDEX_op_extu_i32_i64:
|
|
|
|
tcg_out_extu_i32_i64(s, new_args[0], new_args[1]);
|
|
|
|
break;
|
2023-04-06 04:58:35 +02:00
|
|
|
case INDEX_op_extrl_i64_i32:
|
|
|
|
tcg_out_extrl_i64_i32(s, new_args[0], new_args[1]);
|
|
|
|
break;
|
2023-04-05 20:17:01 +02:00
|
|
|
default:
|
|
|
|
if (def->flags & TCG_OPF_VECTOR) {
|
|
|
|
tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
|
|
|
|
new_args, const_args);
|
|
|
|
} else {
|
|
|
|
tcg_out_op(s, op->opc, new_args, const_args);
|
|
|
|
}
|
|
|
|
break;
|
2017-09-14 22:53:46 +02:00
|
|
|
}
|
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
/* move the outputs in the correct register if needed */
|
|
|
|
for(i = 0; i < nb_oargs; i++) {
|
2017-06-20 08:18:10 +02:00
|
|
|
ts = arg_temp(op->args[i]);
|
2019-03-16 18:48:02 +01:00
|
|
|
|
|
|
|
/* ENV should not be modified. */
|
2020-03-29 19:40:49 +02:00
|
|
|
tcg_debug_assert(!temp_readonly(ts));
|
2019-03-16 18:48:02 +01:00
|
|
|
|
2012-10-09 21:53:07 +02:00
|
|
|
if (NEED_SYNC_ARG(i)) {
|
2018-11-28 00:35:04 +01:00
|
|
|
temp_sync(s, ts, o_allocated_regs, 0, IS_DEAD_ARG(i));
|
2016-06-20 07:59:13 +02:00
|
|
|
} else if (IS_DEAD_ARG(i)) {
|
2013-09-19 00:29:18 +02:00
|
|
|
temp_dead(s, ts);
|
2012-10-09 21:53:07 +02:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-31 11:33:21 +02:00
|
|
|
static bool tcg_reg_alloc_dup2(TCGContext *s, const TCGOp *op)
|
|
|
|
{
|
|
|
|
const TCGLifeData arg_life = op->life;
|
|
|
|
TCGTemp *ots, *itsl, *itsh;
|
|
|
|
TCGType vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
|
|
|
|
|
|
|
|
/* This opcode is only valid for 32-bit hosts, for 64-bit elements. */
|
|
|
|
tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
|
|
|
|
tcg_debug_assert(TCGOP_VECE(op) == MO_64);
|
|
|
|
|
|
|
|
ots = arg_temp(op->args[0]);
|
|
|
|
itsl = arg_temp(op->args[1]);
|
|
|
|
itsh = arg_temp(op->args[2]);
|
|
|
|
|
|
|
|
/* ENV should not be modified. */
|
|
|
|
tcg_debug_assert(!temp_readonly(ots));
|
|
|
|
|
|
|
|
/* Allocate the output register now. */
|
|
|
|
if (ots->val_type != TEMP_VAL_REG) {
|
|
|
|
TCGRegSet allocated_regs = s->reserved_regs;
|
|
|
|
TCGRegSet dup_out_regs =
|
|
|
|
tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
|
2022-12-01 10:05:05 +01:00
|
|
|
TCGReg oreg;
|
2020-03-31 11:33:21 +02:00
|
|
|
|
|
|
|
/* Make sure to not spill the input registers. */
|
|
|
|
if (!IS_DEAD_ARG(1) && itsl->val_type == TEMP_VAL_REG) {
|
|
|
|
tcg_regset_set_reg(allocated_regs, itsl->reg);
|
|
|
|
}
|
|
|
|
if (!IS_DEAD_ARG(2) && itsh->val_type == TEMP_VAL_REG) {
|
|
|
|
tcg_regset_set_reg(allocated_regs, itsh->reg);
|
|
|
|
}
|
|
|
|
|
2022-12-01 10:05:05 +01:00
|
|
|
oreg = tcg_reg_alloc(s, dup_out_regs, allocated_regs,
|
2022-11-11 06:10:51 +01:00
|
|
|
output_pref(op, 0), ots->indirect_base);
|
2022-12-01 10:05:05 +01:00
|
|
|
set_temp_val_reg(s, ots, oreg);
|
2020-03-31 11:33:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Promote dup2 of immediates to dupi_vec. */
|
|
|
|
if (itsl->val_type == TEMP_VAL_CONST && itsh->val_type == TEMP_VAL_CONST) {
|
|
|
|
uint64_t val = deposit64(itsl->val, 32, 32, itsh->val);
|
|
|
|
MemOp vece = MO_64;
|
|
|
|
|
|
|
|
if (val == dup_const(MO_8, val)) {
|
|
|
|
vece = MO_8;
|
|
|
|
} else if (val == dup_const(MO_16, val)) {
|
|
|
|
vece = MO_16;
|
|
|
|
} else if (val == dup_const(MO_32, val)) {
|
|
|
|
vece = MO_32;
|
|
|
|
}
|
|
|
|
|
|
|
|
tcg_out_dupi_vec(s, vtype, vece, ots->reg, val);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the two inputs form one 64-bit value, try dupm_vec. */
|
2022-10-19 03:53:27 +02:00
|
|
|
if (itsl->temp_subindex == HOST_BIG_ENDIAN &&
|
|
|
|
itsh->temp_subindex == !HOST_BIG_ENDIAN &&
|
|
|
|
itsl == itsh + (HOST_BIG_ENDIAN ? 1 : -1)) {
|
|
|
|
TCGTemp *its = itsl - HOST_BIG_ENDIAN;
|
|
|
|
|
|
|
|
temp_sync(s, its + 0, s->reserved_regs, 0, 0);
|
|
|
|
temp_sync(s, its + 1, s->reserved_regs, 0, 0);
|
|
|
|
|
2020-03-31 11:33:21 +02:00
|
|
|
if (tcg_out_dupm_vec(s, vtype, MO_64, ots->reg,
|
|
|
|
its->mem_base->reg, its->mem_offset)) {
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fall back to generic expansion. */
|
|
|
|
return false;
|
|
|
|
|
|
|
|
done:
|
2022-12-01 09:44:13 +01:00
|
|
|
ots->mem_coherent = 0;
|
2020-03-31 11:33:21 +02:00
|
|
|
if (IS_DEAD_ARG(1)) {
|
|
|
|
temp_dead(s, itsl);
|
|
|
|
}
|
|
|
|
if (IS_DEAD_ARG(2)) {
|
|
|
|
temp_dead(s, itsh);
|
|
|
|
}
|
|
|
|
if (NEED_SYNC_ARG(0)) {
|
|
|
|
temp_sync(s, ots, s->reserved_regs, 0, IS_DEAD_ARG(0));
|
|
|
|
} else if (IS_DEAD_ARG(0)) {
|
|
|
|
temp_dead(s, ots);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-11-11 01:09:37 +01:00
|
|
|
static void load_arg_reg(TCGContext *s, TCGReg reg, TCGTemp *ts,
|
|
|
|
TCGRegSet allocated_regs)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2022-11-11 01:09:37 +01:00
|
|
|
if (ts->val_type == TEMP_VAL_REG) {
|
|
|
|
if (ts->reg != reg) {
|
|
|
|
tcg_reg_free(s, reg, allocated_regs);
|
|
|
|
if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
|
|
|
|
/*
|
|
|
|
* Cross register class move not supported. Sync the
|
|
|
|
* temp back to its slot and load from there.
|
|
|
|
*/
|
|
|
|
temp_sync(s, ts, allocated_regs, 0, 0);
|
|
|
|
tcg_out_ld(s, ts->type, reg,
|
|
|
|
ts->mem_base->reg, ts->mem_offset);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
TCGRegSet arg_set = 0;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2022-11-11 01:09:37 +01:00
|
|
|
tcg_reg_free(s, reg, allocated_regs);
|
|
|
|
tcg_regset_set_reg(arg_set, reg);
|
|
|
|
temp_load(s, ts, arg_set, allocated_regs, 0);
|
2008-05-10 12:52:05 +02:00
|
|
|
}
|
2022-11-11 01:09:37 +01:00
|
|
|
}
|
2008-05-22 16:59:57 +02:00
|
|
|
|
2023-04-09 04:05:10 +02:00
|
|
|
static void load_arg_stk(TCGContext *s, unsigned arg_slot, TCGTemp *ts,
|
2022-11-11 01:09:37 +01:00
|
|
|
TCGRegSet allocated_regs)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* When the destination is on the stack, load up the temp and store.
|
|
|
|
* If there are many call-saved registers, the temp might live to
|
|
|
|
* see another use; otherwise it'll be discarded.
|
|
|
|
*/
|
|
|
|
temp_load(s, ts, tcg_target_available_regs[ts->type], allocated_regs, 0);
|
|
|
|
tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK,
|
2023-04-09 04:05:10 +02:00
|
|
|
arg_slot_stk_ofs(arg_slot));
|
2022-11-11 01:09:37 +01:00
|
|
|
}
|
2022-12-01 07:38:25 +01:00
|
|
|
|
2022-11-11 01:09:37 +01:00
|
|
|
static void load_arg_normal(TCGContext *s, const TCGCallArgumentLoc *l,
|
|
|
|
TCGTemp *ts, TCGRegSet *allocated_regs)
|
|
|
|
{
|
2023-04-09 02:28:07 +02:00
|
|
|
if (arg_slot_reg_p(l->arg_slot)) {
|
2022-11-11 01:09:37 +01:00
|
|
|
TCGReg reg = tcg_target_call_iarg_regs[l->arg_slot];
|
|
|
|
load_arg_reg(s, reg, ts, *allocated_regs);
|
|
|
|
tcg_regset_set_reg(*allocated_regs, reg);
|
|
|
|
} else {
|
2023-04-09 04:05:10 +02:00
|
|
|
load_arg_stk(s, l->arg_slot, ts, *allocated_regs);
|
2022-11-11 01:09:37 +01:00
|
|
|
}
|
|
|
|
}
|
2013-09-19 17:02:05 +02:00
|
|
|
|
2023-04-09 04:05:10 +02:00
|
|
|
static void load_arg_ref(TCGContext *s, unsigned arg_slot, TCGReg ref_base,
|
2022-10-30 23:22:59 +01:00
|
|
|
intptr_t ref_off, TCGRegSet *allocated_regs)
|
|
|
|
{
|
|
|
|
TCGReg reg;
|
|
|
|
|
2023-04-09 04:05:10 +02:00
|
|
|
if (arg_slot_reg_p(arg_slot)) {
|
2022-10-30 23:22:59 +01:00
|
|
|
reg = tcg_target_call_iarg_regs[arg_slot];
|
|
|
|
tcg_reg_free(s, reg, *allocated_regs);
|
|
|
|
tcg_out_addi_ptr(s, reg, ref_base, ref_off);
|
|
|
|
tcg_regset_set_reg(*allocated_regs, reg);
|
|
|
|
} else {
|
|
|
|
reg = tcg_reg_alloc(s, tcg_target_available_regs[TCG_TYPE_PTR],
|
|
|
|
*allocated_regs, 0, false);
|
|
|
|
tcg_out_addi_ptr(s, reg, ref_base, ref_off);
|
|
|
|
tcg_out_st(s, TCG_TYPE_PTR, reg, TCG_REG_CALL_STACK,
|
2023-04-09 04:05:10 +02:00
|
|
|
arg_slot_stk_ofs(arg_slot));
|
2022-10-30 23:22:59 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-11 01:09:37 +01:00
|
|
|
static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
|
|
|
|
{
|
|
|
|
const int nb_oargs = TCGOP_CALLO(op);
|
|
|
|
const int nb_iargs = TCGOP_CALLI(op);
|
|
|
|
const TCGLifeData arg_life = op->life;
|
|
|
|
const TCGHelperInfo *info = tcg_call_info(op);
|
|
|
|
TCGRegSet allocated_regs = s->reserved_regs;
|
|
|
|
int i;
|
2013-09-19 17:02:05 +02:00
|
|
|
|
2022-11-11 01:09:37 +01:00
|
|
|
/*
|
|
|
|
* Move inputs into place in reverse order,
|
|
|
|
* so that we place stacked arguments first.
|
|
|
|
*/
|
|
|
|
for (i = nb_iargs - 1; i >= 0; --i) {
|
|
|
|
const TCGCallArgumentLoc *loc = &info->in[i];
|
|
|
|
TCGTemp *ts = arg_temp(op->args[nb_oargs + i]);
|
2013-09-19 17:02:05 +02:00
|
|
|
|
2022-11-11 01:09:37 +01:00
|
|
|
switch (loc->kind) {
|
|
|
|
case TCG_CALL_ARG_NORMAL:
|
|
|
|
case TCG_CALL_ARG_EXTEND_U:
|
|
|
|
case TCG_CALL_ARG_EXTEND_S:
|
|
|
|
load_arg_normal(s, loc, ts, &allocated_regs);
|
|
|
|
break;
|
2022-10-30 23:22:59 +01:00
|
|
|
case TCG_CALL_ARG_BY_REF:
|
|
|
|
load_arg_stk(s, loc->ref_slot, ts, allocated_regs);
|
|
|
|
load_arg_ref(s, loc->arg_slot, TCG_REG_CALL_STACK,
|
2023-04-09 04:05:10 +02:00
|
|
|
arg_slot_stk_ofs(loc->ref_slot),
|
2022-10-30 23:22:59 +01:00
|
|
|
&allocated_regs);
|
|
|
|
break;
|
|
|
|
case TCG_CALL_ARG_BY_REF_N:
|
|
|
|
load_arg_stk(s, loc->ref_slot, ts, allocated_regs);
|
|
|
|
break;
|
2022-11-11 01:09:37 +01:00
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
2022-12-01 07:38:25 +01:00
|
|
|
|
2022-11-11 01:09:37 +01:00
|
|
|
/* Mark dead temporaries and free the associated registers. */
|
2016-12-08 22:42:08 +01:00
|
|
|
for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
|
2011-05-17 18:25:45 +02:00
|
|
|
if (IS_DEAD_ARG(i)) {
|
2017-06-20 08:18:10 +02:00
|
|
|
temp_dead(s, arg_temp(op->args[i]));
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
2022-12-01 07:38:25 +01:00
|
|
|
|
2022-11-11 01:09:37 +01:00
|
|
|
/* Clobber call registers. */
|
2016-02-09 00:43:42 +01:00
|
|
|
for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
|
|
|
|
if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
|
2013-09-19 19:36:18 +02:00
|
|
|
tcg_reg_free(s, i, allocated_regs);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
2012-10-09 21:53:08 +02:00
|
|
|
|
2022-11-11 01:09:37 +01:00
|
|
|
/*
|
|
|
|
* Save globals if they might be written by the helper,
|
|
|
|
* sync them if they might be read.
|
|
|
|
*/
|
|
|
|
if (info->flags & TCG_CALL_NO_READ_GLOBALS) {
|
2012-10-09 21:53:08 +02:00
|
|
|
/* Nothing to do */
|
2022-11-11 01:09:37 +01:00
|
|
|
} else if (info->flags & TCG_CALL_NO_WRITE_GLOBALS) {
|
2012-10-09 21:53:08 +02:00
|
|
|
sync_globals(s, allocated_regs);
|
|
|
|
} else {
|
2009-04-06 14:33:59 +02:00
|
|
|
save_globals(s, allocated_regs);
|
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2022-10-30 23:22:59 +01:00
|
|
|
/*
|
|
|
|
* If the ABI passes a pointer to the returned struct as the first
|
|
|
|
* argument, load that now. Pass a pointer to the output home slot.
|
|
|
|
*/
|
|
|
|
if (info->out_kind == TCG_CALL_RET_BY_REF) {
|
|
|
|
TCGTemp *ts = arg_temp(op->args[0]);
|
|
|
|
|
|
|
|
if (!ts->mem_allocated) {
|
|
|
|
temp_allocate_frame(s, ts);
|
|
|
|
}
|
|
|
|
load_arg_ref(s, 0, ts->mem_base->reg, ts->mem_offset, &allocated_regs);
|
|
|
|
}
|
|
|
|
|
2022-10-18 09:51:41 +02:00
|
|
|
tcg_out_call(s, tcg_call_func(op), info);
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2022-11-11 01:09:37 +01:00
|
|
|
/* Assign output registers and emit moves if needed. */
|
|
|
|
switch (info->out_kind) {
|
|
|
|
case TCG_CALL_RET_NORMAL:
|
|
|
|
for (i = 0; i < nb_oargs; i++) {
|
|
|
|
TCGTemp *ts = arg_temp(op->args[i]);
|
2022-10-19 16:55:36 +02:00
|
|
|
TCGReg reg = tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, i);
|
2019-03-16 18:48:02 +01:00
|
|
|
|
2022-11-11 01:09:37 +01:00
|
|
|
/* ENV should not be modified. */
|
|
|
|
tcg_debug_assert(!temp_readonly(ts));
|
2019-03-16 18:48:02 +01:00
|
|
|
|
2022-11-11 01:09:37 +01:00
|
|
|
set_temp_val_reg(s, ts, reg);
|
|
|
|
ts->mem_coherent = 0;
|
|
|
|
}
|
|
|
|
break;
|
2022-10-30 23:22:59 +01:00
|
|
|
|
2022-10-19 17:13:52 +02:00
|
|
|
case TCG_CALL_RET_BY_VEC:
|
|
|
|
{
|
|
|
|
TCGTemp *ts = arg_temp(op->args[0]);
|
|
|
|
|
|
|
|
tcg_debug_assert(ts->base_type == TCG_TYPE_I128);
|
|
|
|
tcg_debug_assert(ts->temp_subindex == 0);
|
|
|
|
if (!ts->mem_allocated) {
|
|
|
|
temp_allocate_frame(s, ts);
|
|
|
|
}
|
|
|
|
tcg_out_st(s, TCG_TYPE_V128,
|
|
|
|
tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC, 0),
|
|
|
|
ts->mem_base->reg, ts->mem_offset);
|
|
|
|
}
|
|
|
|
/* fall through to mark all parts in memory */
|
|
|
|
|
2022-10-30 23:22:59 +01:00
|
|
|
case TCG_CALL_RET_BY_REF:
|
|
|
|
/* The callee has performed a write through the reference. */
|
|
|
|
for (i = 0; i < nb_oargs; i++) {
|
|
|
|
TCGTemp *ts = arg_temp(op->args[i]);
|
|
|
|
ts->val_type = TEMP_VAL_MEM;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2022-11-11 01:09:37 +01:00
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Flush or discard output registers as needed. */
|
|
|
|
for (i = 0; i < nb_oargs; i++) {
|
|
|
|
TCGTemp *ts = arg_temp(op->args[i]);
|
2019-03-16 18:48:02 +01:00
|
|
|
if (NEED_SYNC_ARG(i)) {
|
2022-11-11 01:09:37 +01:00
|
|
|
temp_sync(s, ts, s->reserved_regs, 0, IS_DEAD_ARG(i));
|
2019-03-16 18:48:02 +01:00
|
|
|
} else if (IS_DEAD_ARG(i)) {
|
|
|
|
temp_dead(s, ts);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-07 23:23:54 +01:00
|
|
|
/**
|
|
|
|
* atom_and_align_for_opc:
|
|
|
|
* @s: tcg context
|
|
|
|
* @opc: memory operation code
|
|
|
|
* @host_atom: MO_ATOM_{IFALIGN,WITHIN16,SUBALIGN} for host operations
|
|
|
|
* @allow_two_ops: true if we are prepared to issue two operations
|
|
|
|
*
|
|
|
|
* Return the alignment and atomicity to use for the inline fast path
|
|
|
|
* for the given memory operation. The alignment may be larger than
|
|
|
|
* that specified in @opc, and the correct alignment will be diagnosed
|
|
|
|
* by the slow path helper.
|
|
|
|
*
|
|
|
|
* If @allow_two_ops, the host is prepared to test for 2x alignment,
|
|
|
|
* and issue two loads or stores for subalignment.
|
|
|
|
*/
|
|
|
|
static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc,
|
|
|
|
MemOp host_atom, bool allow_two_ops)
|
|
|
|
{
|
|
|
|
MemOp align = get_alignment_bits(opc);
|
|
|
|
MemOp size = opc & MO_SIZE;
|
|
|
|
MemOp half = size ? size - 1 : 0;
|
|
|
|
MemOp atmax;
|
|
|
|
MemOp atom;
|
|
|
|
|
|
|
|
/* When serialized, no further atomicity required. */
|
|
|
|
if (s->gen_tb->cflags & CF_PARALLEL) {
|
|
|
|
atom = opc & MO_ATOM_MASK;
|
|
|
|
} else {
|
|
|
|
atom = MO_ATOM_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (atom) {
|
|
|
|
case MO_ATOM_NONE:
|
|
|
|
/* The operation requires no specific atomicity. */
|
|
|
|
atmax = MO_8;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MO_ATOM_IFALIGN:
|
|
|
|
atmax = size;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MO_ATOM_IFALIGN_PAIR:
|
|
|
|
atmax = half;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MO_ATOM_WITHIN16:
|
|
|
|
atmax = size;
|
|
|
|
if (size == MO_128) {
|
|
|
|
/* Misalignment implies !within16, and therefore no atomicity. */
|
|
|
|
} else if (host_atom != MO_ATOM_WITHIN16) {
|
|
|
|
/* The host does not implement within16, so require alignment. */
|
|
|
|
align = MAX(align, size);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MO_ATOM_WITHIN16_PAIR:
|
|
|
|
atmax = size;
|
|
|
|
/*
|
|
|
|
* Misalignment implies !within16, and therefore half atomicity.
|
|
|
|
* Any host prepared for two operations can implement this with
|
|
|
|
* half alignment.
|
|
|
|
*/
|
|
|
|
if (host_atom != MO_ATOM_WITHIN16 && allow_two_ops) {
|
|
|
|
align = MAX(align, half);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MO_ATOM_SUBALIGN:
|
|
|
|
atmax = size;
|
|
|
|
if (host_atom != MO_ATOM_SUBALIGN) {
|
|
|
|
/* If unaligned but not odd, there are subobjects up to half. */
|
|
|
|
if (allow_two_ops) {
|
|
|
|
align = MAX(align, half);
|
|
|
|
} else {
|
|
|
|
align = MAX(align, size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
return (TCGAtomAlign){ .atom = atmax, .align = align };
|
|
|
|
}
|
|
|
|
|
2023-04-10 07:59:09 +02:00
|
|
|
/*
|
|
|
|
* Similarly for qemu_ld/st slow path helpers.
|
|
|
|
* We must re-implement tcg_gen_callN and tcg_reg_alloc_call simultaneously,
|
|
|
|
* using only the provided backend tcg_out_* functions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int tcg_out_helper_stk_ofs(TCGType type, unsigned slot)
|
|
|
|
{
|
|
|
|
int ofs = arg_slot_stk_ofs(slot);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Each stack slot is TCG_TARGET_LONG_BITS. If the host does not
|
|
|
|
* require extension to uint64_t, adjust the address for uint32_t.
|
|
|
|
*/
|
|
|
|
if (HOST_BIG_ENDIAN &&
|
|
|
|
TCG_TARGET_REG_BITS == 64 &&
|
|
|
|
type == TCG_TYPE_I32) {
|
|
|
|
ofs += 4;
|
|
|
|
}
|
|
|
|
return ofs;
|
|
|
|
}
|
|
|
|
|
2023-05-14 19:07:22 +02:00
|
|
|
static void tcg_out_helper_load_slots(TCGContext *s,
|
|
|
|
unsigned nmov, TCGMovExtend *mov,
|
|
|
|
const TCGLdstHelperParam *parm)
|
2023-04-10 07:59:09 +02:00
|
|
|
{
|
2023-05-14 19:07:22 +02:00
|
|
|
unsigned i;
|
2023-05-14 18:58:39 +02:00
|
|
|
TCGReg dst3;
|
|
|
|
|
2023-05-14 19:07:22 +02:00
|
|
|
/*
|
|
|
|
* Start from the end, storing to the stack first.
|
|
|
|
* This frees those registers, so we need not consider overlap.
|
|
|
|
*/
|
|
|
|
for (i = nmov; i-- > 0; ) {
|
|
|
|
unsigned slot = mov[i].dst;
|
|
|
|
|
|
|
|
if (arg_slot_reg_p(slot)) {
|
|
|
|
goto found_reg;
|
|
|
|
}
|
|
|
|
|
|
|
|
TCGReg src = mov[i].src;
|
|
|
|
TCGType dst_type = mov[i].dst_type;
|
|
|
|
MemOp dst_mo = dst_type == TCG_TYPE_I32 ? MO_32 : MO_64;
|
|
|
|
|
|
|
|
/* The argument is going onto the stack; extend into scratch. */
|
|
|
|
if ((mov[i].src_ext & MO_SIZE) != dst_mo) {
|
|
|
|
tcg_debug_assert(parm->ntmp != 0);
|
|
|
|
mov[i].dst = src = parm->tmp[0];
|
|
|
|
tcg_out_movext1(s, &mov[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
tcg_out_st(s, dst_type, src, TCG_REG_CALL_STACK,
|
|
|
|
tcg_out_helper_stk_ofs(dst_type, slot));
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
|
|
|
|
found_reg:
|
|
|
|
/*
|
|
|
|
* The remaining arguments are in registers.
|
|
|
|
* Convert slot numbers to argument registers.
|
|
|
|
*/
|
|
|
|
nmov = i + 1;
|
|
|
|
for (i = 0; i < nmov; ++i) {
|
|
|
|
mov[i].dst = tcg_target_call_iarg_regs[mov[i].dst];
|
|
|
|
}
|
|
|
|
|
2023-04-10 07:59:09 +02:00
|
|
|
switch (nmov) {
|
2023-05-14 18:58:39 +02:00
|
|
|
case 4:
|
2023-04-10 07:59:09 +02:00
|
|
|
/* The backend must have provided enough temps for the worst case. */
|
2023-05-14 18:58:39 +02:00
|
|
|
tcg_debug_assert(parm->ntmp >= 2);
|
2023-04-10 07:59:09 +02:00
|
|
|
|
2023-05-14 18:58:39 +02:00
|
|
|
dst3 = mov[3].dst;
|
|
|
|
for (unsigned j = 0; j < 3; ++j) {
|
|
|
|
if (dst3 == mov[j].src) {
|
|
|
|
/*
|
|
|
|
* Conflict. Copy the source to a temporary, perform the
|
|
|
|
* remaining moves, then the extension from our scratch
|
|
|
|
* on the way out.
|
|
|
|
*/
|
|
|
|
TCGReg scratch = parm->tmp[1];
|
2023-04-10 07:59:09 +02:00
|
|
|
|
2023-05-14 18:58:39 +02:00
|
|
|
tcg_out_mov(s, mov[3].src_type, scratch, mov[3].src);
|
|
|
|
tcg_out_movext3(s, mov, mov + 1, mov + 2, parm->tmp[0]);
|
|
|
|
tcg_out_movext1_new_src(s, &mov[3], scratch);
|
|
|
|
break;
|
2023-04-10 07:59:09 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-14 18:58:39 +02:00
|
|
|
/* No conflicts: perform this move and continue. */
|
|
|
|
tcg_out_movext1(s, &mov[3]);
|
|
|
|
/* fall through */
|
|
|
|
|
|
|
|
case 3:
|
|
|
|
tcg_out_movext3(s, mov, mov + 1, mov + 2,
|
|
|
|
parm->ntmp ? parm->tmp[0] : -1);
|
|
|
|
break;
|
2023-04-10 07:59:09 +02:00
|
|
|
case 2:
|
2023-05-14 18:58:39 +02:00
|
|
|
tcg_out_movext2(s, mov, mov + 1,
|
|
|
|
parm->ntmp ? parm->tmp[0] : -1);
|
|
|
|
break;
|
2023-04-10 07:59:09 +02:00
|
|
|
case 1:
|
|
|
|
tcg_out_movext1(s, mov);
|
2023-05-14 18:58:39 +02:00
|
|
|
break;
|
|
|
|
default:
|
2023-04-10 07:59:09 +02:00
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_out_helper_load_imm(TCGContext *s, unsigned slot,
|
|
|
|
TCGType type, tcg_target_long imm,
|
|
|
|
const TCGLdstHelperParam *parm)
|
|
|
|
{
|
|
|
|
if (arg_slot_reg_p(slot)) {
|
|
|
|
tcg_out_movi(s, type, tcg_target_call_iarg_regs[slot], imm);
|
|
|
|
} else {
|
|
|
|
int ofs = tcg_out_helper_stk_ofs(type, slot);
|
|
|
|
if (!tcg_out_sti(s, type, imm, TCG_REG_CALL_STACK, ofs)) {
|
|
|
|
tcg_debug_assert(parm->ntmp != 0);
|
|
|
|
tcg_out_movi(s, type, parm->tmp[0], imm);
|
|
|
|
tcg_out_st(s, type, parm->tmp[0], TCG_REG_CALL_STACK, ofs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_out_helper_load_common_args(TCGContext *s,
|
|
|
|
const TCGLabelQemuLdst *ldst,
|
|
|
|
const TCGLdstHelperParam *parm,
|
|
|
|
const TCGHelperInfo *info,
|
|
|
|
unsigned next_arg)
|
|
|
|
{
|
|
|
|
TCGMovExtend ptr_mov = {
|
|
|
|
.dst_type = TCG_TYPE_PTR,
|
|
|
|
.src_type = TCG_TYPE_PTR,
|
|
|
|
.src_ext = sizeof(void *) == 4 ? MO_32 : MO_64
|
|
|
|
};
|
|
|
|
const TCGCallArgumentLoc *loc = &info->in[0];
|
|
|
|
TCGType type;
|
|
|
|
unsigned slot;
|
|
|
|
tcg_target_ulong imm;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle env, which is always first.
|
|
|
|
*/
|
|
|
|
ptr_mov.dst = loc->arg_slot;
|
|
|
|
ptr_mov.src = TCG_AREG0;
|
|
|
|
tcg_out_helper_load_slots(s, 1, &ptr_mov, parm);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle oi.
|
|
|
|
*/
|
|
|
|
imm = ldst->oi;
|
|
|
|
loc = &info->in[next_arg];
|
|
|
|
type = TCG_TYPE_I32;
|
|
|
|
switch (loc->kind) {
|
|
|
|
case TCG_CALL_ARG_NORMAL:
|
|
|
|
break;
|
|
|
|
case TCG_CALL_ARG_EXTEND_U:
|
|
|
|
case TCG_CALL_ARG_EXTEND_S:
|
|
|
|
/* No extension required for MemOpIdx. */
|
|
|
|
tcg_debug_assert(imm <= INT32_MAX);
|
|
|
|
type = TCG_TYPE_REG;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
tcg_out_helper_load_imm(s, loc->arg_slot, type, imm, parm);
|
|
|
|
next_arg++;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle ra.
|
|
|
|
*/
|
|
|
|
loc = &info->in[next_arg];
|
|
|
|
slot = loc->arg_slot;
|
|
|
|
if (parm->ra_gen) {
|
|
|
|
int arg_reg = -1;
|
|
|
|
TCGReg ra_reg;
|
|
|
|
|
|
|
|
if (arg_slot_reg_p(slot)) {
|
|
|
|
arg_reg = tcg_target_call_iarg_regs[slot];
|
|
|
|
}
|
|
|
|
ra_reg = parm->ra_gen(s, ldst, arg_reg);
|
|
|
|
|
|
|
|
ptr_mov.dst = slot;
|
|
|
|
ptr_mov.src = ra_reg;
|
|
|
|
tcg_out_helper_load_slots(s, 1, &ptr_mov, parm);
|
|
|
|
} else {
|
|
|
|
imm = (uintptr_t)ldst->raddr;
|
|
|
|
tcg_out_helper_load_imm(s, slot, TCG_TYPE_PTR, imm, parm);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned tcg_out_helper_add_mov(TCGMovExtend *mov,
|
|
|
|
const TCGCallArgumentLoc *loc,
|
|
|
|
TCGType dst_type, TCGType src_type,
|
|
|
|
TCGReg lo, TCGReg hi)
|
|
|
|
{
|
2023-04-17 10:20:51 +02:00
|
|
|
MemOp reg_mo;
|
|
|
|
|
2023-04-10 07:59:09 +02:00
|
|
|
if (dst_type <= TCG_TYPE_REG) {
|
|
|
|
MemOp src_ext;
|
|
|
|
|
|
|
|
switch (loc->kind) {
|
|
|
|
case TCG_CALL_ARG_NORMAL:
|
|
|
|
src_ext = src_type == TCG_TYPE_I32 ? MO_32 : MO_64;
|
|
|
|
break;
|
|
|
|
case TCG_CALL_ARG_EXTEND_U:
|
|
|
|
dst_type = TCG_TYPE_REG;
|
|
|
|
src_ext = MO_UL;
|
|
|
|
break;
|
|
|
|
case TCG_CALL_ARG_EXTEND_S:
|
|
|
|
dst_type = TCG_TYPE_REG;
|
|
|
|
src_ext = MO_SL;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
mov[0].dst = loc->arg_slot;
|
|
|
|
mov[0].dst_type = dst_type;
|
|
|
|
mov[0].src = lo;
|
|
|
|
mov[0].src_type = src_type;
|
|
|
|
mov[0].src_ext = src_ext;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2023-04-17 10:20:51 +02:00
|
|
|
if (TCG_TARGET_REG_BITS == 32) {
|
|
|
|
assert(dst_type == TCG_TYPE_I64);
|
|
|
|
reg_mo = MO_32;
|
|
|
|
} else {
|
|
|
|
assert(dst_type == TCG_TYPE_I128);
|
|
|
|
reg_mo = MO_64;
|
|
|
|
}
|
2023-04-10 07:59:09 +02:00
|
|
|
|
|
|
|
mov[0].dst = loc[HOST_BIG_ENDIAN].arg_slot;
|
|
|
|
mov[0].src = lo;
|
2023-04-17 10:20:51 +02:00
|
|
|
mov[0].dst_type = TCG_TYPE_REG;
|
|
|
|
mov[0].src_type = TCG_TYPE_REG;
|
|
|
|
mov[0].src_ext = reg_mo;
|
2023-04-10 07:59:09 +02:00
|
|
|
|
|
|
|
mov[1].dst = loc[!HOST_BIG_ENDIAN].arg_slot;
|
|
|
|
mov[1].src = hi;
|
2023-04-17 10:20:51 +02:00
|
|
|
mov[1].dst_type = TCG_TYPE_REG;
|
|
|
|
mov[1].src_type = TCG_TYPE_REG;
|
|
|
|
mov[1].src_ext = reg_mo;
|
2023-04-10 07:59:09 +02:00
|
|
|
|
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_out_ld_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
|
|
|
|
const TCGLdstHelperParam *parm)
|
|
|
|
{
|
|
|
|
const TCGHelperInfo *info;
|
|
|
|
const TCGCallArgumentLoc *loc;
|
|
|
|
TCGMovExtend mov[2];
|
|
|
|
unsigned next_arg, nmov;
|
|
|
|
MemOp mop = get_memop(ldst->oi);
|
|
|
|
|
|
|
|
switch (mop & MO_SIZE) {
|
|
|
|
case MO_8:
|
|
|
|
case MO_16:
|
|
|
|
case MO_32:
|
|
|
|
info = &info_helper_ld32_mmu;
|
|
|
|
break;
|
|
|
|
case MO_64:
|
|
|
|
info = &info_helper_ld64_mmu;
|
|
|
|
break;
|
2023-04-17 10:20:51 +02:00
|
|
|
case MO_128:
|
|
|
|
info = &info_helper_ld128_mmu;
|
|
|
|
break;
|
2023-04-10 07:59:09 +02:00
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Defer env argument. */
|
|
|
|
next_arg = 1;
|
|
|
|
|
|
|
|
loc = &info->in[next_arg];
|
2023-04-28 10:14:17 +02:00
|
|
|
if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) {
|
2023-04-26 23:09:47 +02:00
|
|
|
/*
|
|
|
|
* 32-bit host with 32-bit guest: zero-extend the guest address
|
|
|
|
* to 64-bits for the helper by storing the low part, then
|
|
|
|
* load a zero for the high part.
|
|
|
|
*/
|
|
|
|
tcg_out_helper_add_mov(mov, loc + HOST_BIG_ENDIAN,
|
|
|
|
TCG_TYPE_I32, TCG_TYPE_I32,
|
|
|
|
ldst->addrlo_reg, -1);
|
|
|
|
tcg_out_helper_load_slots(s, 1, mov, parm);
|
2023-04-10 07:59:09 +02:00
|
|
|
|
2023-04-26 23:09:47 +02:00
|
|
|
tcg_out_helper_load_imm(s, loc[!HOST_BIG_ENDIAN].arg_slot,
|
|
|
|
TCG_TYPE_I32, 0, parm);
|
|
|
|
next_arg += 2;
|
2023-04-28 10:14:17 +02:00
|
|
|
} else {
|
|
|
|
nmov = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, s->addr_type,
|
|
|
|
ldst->addrlo_reg, ldst->addrhi_reg);
|
|
|
|
tcg_out_helper_load_slots(s, nmov, mov, parm);
|
|
|
|
next_arg += nmov;
|
2023-04-26 23:09:47 +02:00
|
|
|
}
|
2023-04-10 07:59:09 +02:00
|
|
|
|
2023-04-17 10:20:51 +02:00
|
|
|
switch (info->out_kind) {
|
|
|
|
case TCG_CALL_RET_NORMAL:
|
|
|
|
case TCG_CALL_RET_BY_VEC:
|
|
|
|
break;
|
|
|
|
case TCG_CALL_RET_BY_REF:
|
|
|
|
/*
|
|
|
|
* The return reference is in the first argument slot.
|
|
|
|
* We need memory in which to return: re-use the top of stack.
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
int ofs_slot0 = TCG_TARGET_CALL_STACK_OFFSET;
|
|
|
|
|
|
|
|
if (arg_slot_reg_p(0)) {
|
|
|
|
tcg_out_addi_ptr(s, tcg_target_call_iarg_regs[0],
|
|
|
|
TCG_REG_CALL_STACK, ofs_slot0);
|
|
|
|
} else {
|
|
|
|
tcg_debug_assert(parm->ntmp != 0);
|
|
|
|
tcg_out_addi_ptr(s, parm->tmp[0],
|
|
|
|
TCG_REG_CALL_STACK, ofs_slot0);
|
|
|
|
tcg_out_st(s, TCG_TYPE_PTR, parm->tmp[0],
|
|
|
|
TCG_REG_CALL_STACK, ofs_slot0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
2023-04-10 07:59:09 +02:00
|
|
|
|
|
|
|
tcg_out_helper_load_common_args(s, ldst, parm, info, next_arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_out_ld_helper_ret(TCGContext *s, const TCGLabelQemuLdst *ldst,
|
|
|
|
bool load_sign,
|
|
|
|
const TCGLdstHelperParam *parm)
|
|
|
|
{
|
2023-04-17 10:20:51 +02:00
|
|
|
MemOp mop = get_memop(ldst->oi);
|
2023-04-10 07:59:09 +02:00
|
|
|
TCGMovExtend mov[2];
|
2023-04-17 10:20:51 +02:00
|
|
|
int ofs_slot0;
|
2023-04-10 07:59:09 +02:00
|
|
|
|
2023-04-17 10:20:51 +02:00
|
|
|
switch (ldst->type) {
|
|
|
|
case TCG_TYPE_I64:
|
|
|
|
if (TCG_TARGET_REG_BITS == 32) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* fall through */
|
2023-04-10 07:59:09 +02:00
|
|
|
|
2023-04-17 10:20:51 +02:00
|
|
|
case TCG_TYPE_I32:
|
2023-04-10 07:59:09 +02:00
|
|
|
mov[0].dst = ldst->datalo_reg;
|
|
|
|
mov[0].src = tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, 0);
|
|
|
|
mov[0].dst_type = ldst->type;
|
|
|
|
mov[0].src_type = TCG_TYPE_REG;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If load_sign, then we allowed the helper to perform the
|
|
|
|
* appropriate sign extension to tcg_target_ulong, and all
|
|
|
|
* we need now is a plain move.
|
|
|
|
*
|
|
|
|
* If they do not, then we expect the relevant extension
|
|
|
|
* instruction to be no more expensive than a move, and
|
|
|
|
* we thus save the icache etc by only using one of two
|
|
|
|
* helper functions.
|
|
|
|
*/
|
|
|
|
if (load_sign || !(mop & MO_SIGN)) {
|
|
|
|
if (TCG_TARGET_REG_BITS == 32 || ldst->type == TCG_TYPE_I32) {
|
|
|
|
mov[0].src_ext = MO_32;
|
|
|
|
} else {
|
|
|
|
mov[0].src_ext = MO_64;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
mov[0].src_ext = mop & MO_SSIZE;
|
|
|
|
}
|
|
|
|
tcg_out_movext1(s, mov);
|
2023-04-17 10:20:51 +02:00
|
|
|
return;
|
2023-04-10 07:59:09 +02:00
|
|
|
|
2023-04-17 10:20:51 +02:00
|
|
|
case TCG_TYPE_I128:
|
|
|
|
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
|
|
|
|
ofs_slot0 = TCG_TARGET_CALL_STACK_OFFSET;
|
|
|
|
switch (TCG_TARGET_CALL_RET_I128) {
|
|
|
|
case TCG_CALL_RET_NORMAL:
|
|
|
|
break;
|
|
|
|
case TCG_CALL_RET_BY_VEC:
|
|
|
|
tcg_out_st(s, TCG_TYPE_V128,
|
|
|
|
tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC, 0),
|
|
|
|
TCG_REG_CALL_STACK, ofs_slot0);
|
|
|
|
/* fall through */
|
|
|
|
case TCG_CALL_RET_BY_REF:
|
|
|
|
tcg_out_ld(s, TCG_TYPE_I64, ldst->datalo_reg,
|
|
|
|
TCG_REG_CALL_STACK, ofs_slot0 + 8 * HOST_BIG_ENDIAN);
|
|
|
|
tcg_out_ld(s, TCG_TYPE_I64, ldst->datahi_reg,
|
|
|
|
TCG_REG_CALL_STACK, ofs_slot0 + 8 * !HOST_BIG_ENDIAN);
|
|
|
|
return;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
break;
|
2023-04-10 07:59:09 +02:00
|
|
|
|
2023-04-17 10:20:51 +02:00
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
2023-04-10 07:59:09 +02:00
|
|
|
}
|
2023-04-17 10:20:51 +02:00
|
|
|
|
|
|
|
mov[0].dst = ldst->datalo_reg;
|
|
|
|
mov[0].src =
|
|
|
|
tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, HOST_BIG_ENDIAN);
|
2023-05-24 21:59:12 +02:00
|
|
|
mov[0].dst_type = TCG_TYPE_REG;
|
|
|
|
mov[0].src_type = TCG_TYPE_REG;
|
2023-04-17 10:20:51 +02:00
|
|
|
mov[0].src_ext = TCG_TARGET_REG_BITS == 32 ? MO_32 : MO_64;
|
|
|
|
|
|
|
|
mov[1].dst = ldst->datahi_reg;
|
|
|
|
mov[1].src =
|
|
|
|
tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, !HOST_BIG_ENDIAN);
|
|
|
|
mov[1].dst_type = TCG_TYPE_REG;
|
|
|
|
mov[1].src_type = TCG_TYPE_REG;
|
|
|
|
mov[1].src_ext = TCG_TARGET_REG_BITS == 32 ? MO_32 : MO_64;
|
|
|
|
|
|
|
|
tcg_out_movext2(s, mov, mov + 1, parm->ntmp ? parm->tmp[0] : -1);
|
2023-04-10 07:59:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
|
|
|
|
const TCGLdstHelperParam *parm)
|
|
|
|
{
|
|
|
|
const TCGHelperInfo *info;
|
|
|
|
const TCGCallArgumentLoc *loc;
|
|
|
|
TCGMovExtend mov[4];
|
|
|
|
TCGType data_type;
|
|
|
|
unsigned next_arg, nmov, n;
|
|
|
|
MemOp mop = get_memop(ldst->oi);
|
|
|
|
|
|
|
|
switch (mop & MO_SIZE) {
|
|
|
|
case MO_8:
|
|
|
|
case MO_16:
|
|
|
|
case MO_32:
|
|
|
|
info = &info_helper_st32_mmu;
|
|
|
|
data_type = TCG_TYPE_I32;
|
|
|
|
break;
|
|
|
|
case MO_64:
|
|
|
|
info = &info_helper_st64_mmu;
|
|
|
|
data_type = TCG_TYPE_I64;
|
|
|
|
break;
|
2023-04-17 10:20:51 +02:00
|
|
|
case MO_128:
|
|
|
|
info = &info_helper_st128_mmu;
|
|
|
|
data_type = TCG_TYPE_I128;
|
|
|
|
break;
|
2023-04-10 07:59:09 +02:00
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Defer env argument. */
|
|
|
|
next_arg = 1;
|
|
|
|
nmov = 0;
|
|
|
|
|
|
|
|
/* Handle addr argument. */
|
|
|
|
loc = &info->in[next_arg];
|
2023-04-28 10:14:17 +02:00
|
|
|
if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) {
|
2023-04-26 23:09:47 +02:00
|
|
|
/*
|
|
|
|
* 32-bit host with 32-bit guest: zero-extend the guest address
|
|
|
|
* to 64-bits for the helper by storing the low part. Later,
|
|
|
|
* after we have processed the register inputs, we will load a
|
|
|
|
* zero for the high part.
|
|
|
|
*/
|
|
|
|
tcg_out_helper_add_mov(mov, loc + HOST_BIG_ENDIAN,
|
|
|
|
TCG_TYPE_I32, TCG_TYPE_I32,
|
|
|
|
ldst->addrlo_reg, -1);
|
|
|
|
next_arg += 2;
|
|
|
|
nmov += 1;
|
2023-04-28 10:14:17 +02:00
|
|
|
} else {
|
|
|
|
n = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, s->addr_type,
|
|
|
|
ldst->addrlo_reg, ldst->addrhi_reg);
|
|
|
|
next_arg += n;
|
|
|
|
nmov += n;
|
2023-04-26 23:09:47 +02:00
|
|
|
}
|
2023-04-10 07:59:09 +02:00
|
|
|
|
|
|
|
/* Handle data argument. */
|
|
|
|
loc = &info->in[next_arg];
|
2023-04-17 10:20:51 +02:00
|
|
|
switch (loc->kind) {
|
|
|
|
case TCG_CALL_ARG_NORMAL:
|
|
|
|
case TCG_CALL_ARG_EXTEND_U:
|
|
|
|
case TCG_CALL_ARG_EXTEND_S:
|
|
|
|
n = tcg_out_helper_add_mov(mov + nmov, loc, data_type, ldst->type,
|
|
|
|
ldst->datalo_reg, ldst->datahi_reg);
|
|
|
|
next_arg += n;
|
|
|
|
nmov += n;
|
|
|
|
tcg_out_helper_load_slots(s, nmov, mov, parm);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case TCG_CALL_ARG_BY_REF:
|
|
|
|
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
|
|
|
|
tcg_debug_assert(data_type == TCG_TYPE_I128);
|
|
|
|
tcg_out_st(s, TCG_TYPE_I64,
|
|
|
|
HOST_BIG_ENDIAN ? ldst->datahi_reg : ldst->datalo_reg,
|
|
|
|
TCG_REG_CALL_STACK, arg_slot_stk_ofs(loc[0].ref_slot));
|
|
|
|
tcg_out_st(s, TCG_TYPE_I64,
|
|
|
|
HOST_BIG_ENDIAN ? ldst->datalo_reg : ldst->datahi_reg,
|
|
|
|
TCG_REG_CALL_STACK, arg_slot_stk_ofs(loc[1].ref_slot));
|
|
|
|
|
|
|
|
tcg_out_helper_load_slots(s, nmov, mov, parm);
|
|
|
|
|
|
|
|
if (arg_slot_reg_p(loc->arg_slot)) {
|
|
|
|
tcg_out_addi_ptr(s, tcg_target_call_iarg_regs[loc->arg_slot],
|
|
|
|
TCG_REG_CALL_STACK,
|
|
|
|
arg_slot_stk_ofs(loc->ref_slot));
|
|
|
|
} else {
|
|
|
|
tcg_debug_assert(parm->ntmp != 0);
|
|
|
|
tcg_out_addi_ptr(s, parm->tmp[0], TCG_REG_CALL_STACK,
|
|
|
|
arg_slot_stk_ofs(loc->ref_slot));
|
|
|
|
tcg_out_st(s, TCG_TYPE_PTR, parm->tmp[0],
|
|
|
|
TCG_REG_CALL_STACK, arg_slot_stk_ofs(loc->arg_slot));
|
|
|
|
}
|
|
|
|
next_arg += 2;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
2023-04-10 07:59:09 +02:00
|
|
|
|
2023-04-28 10:14:17 +02:00
|
|
|
if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) {
|
|
|
|
/* Zero extend the address by loading a zero for the high part. */
|
2023-04-26 23:09:47 +02:00
|
|
|
loc = &info->in[1 + !HOST_BIG_ENDIAN];
|
|
|
|
tcg_out_helper_load_imm(s, loc->arg_slot, TCG_TYPE_I32, 0, parm);
|
|
|
|
}
|
|
|
|
|
2023-04-10 07:59:09 +02:00
|
|
|
tcg_out_helper_load_common_args(s, ldst, parm, info, next_arg);
|
|
|
|
}
|
|
|
|
|
2021-09-08 11:35:43 +02:00
|
|
|
void tcg_dump_op_count(GString *buf)
|
2014-11-02 09:04:18 +01:00
|
|
|
{
|
2021-09-08 11:35:43 +02:00
|
|
|
g_string_append_printf(buf, "[TCG profiler not compiled]\n");
|
2014-11-02 09:04:18 +01:00
|
|
|
}
|
2018-10-10 16:48:53 +02:00
|
|
|
|
2023-03-09 01:48:02 +01:00
|
|
|
int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2023-04-01 06:30:31 +02:00
|
|
|
int i, start_words, num_insns;
|
2017-11-02 15:19:14 +01:00
|
|
|
TCGOp *op;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2016-03-15 15:30:21 +01:00
|
|
|
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
|
2022-08-15 22:16:06 +02:00
|
|
|
&& qemu_log_in_addr_range(pc_start))) {
|
2022-04-17 20:29:47 +02:00
|
|
|
FILE *logfile = qemu_log_trylock();
|
2022-04-17 20:29:49 +02:00
|
|
|
if (logfile) {
|
|
|
|
fprintf(logfile, "OP:\n");
|
2022-04-17 20:29:51 +02:00
|
|
|
tcg_dump_ops(s, logfile, false);
|
2022-04-17 20:29:49 +02:00
|
|
|
fprintf(logfile, "\n");
|
|
|
|
qemu_log_unlock(logfile);
|
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2019-02-07 14:26:40 +01:00
|
|
|
#ifdef CONFIG_DEBUG_TCG
|
|
|
|
/* Ensure all labels referenced have been emitted. */
|
|
|
|
{
|
|
|
|
TCGLabel *l;
|
|
|
|
bool error = false;
|
|
|
|
|
|
|
|
QSIMPLEQ_FOREACH(l, &s->labels, next) {
|
2023-03-03 22:47:27 +01:00
|
|
|
if (unlikely(!l->present) && !QSIMPLEQ_EMPTY(&l->branches)) {
|
2019-02-07 14:26:40 +01:00
|
|
|
qemu_log_mask(CPU_LOG_TB_OP,
|
|
|
|
"$L%d referenced but not present.\n", l->id);
|
|
|
|
error = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert(!error);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-09-19 22:49:15 +02:00
|
|
|
tcg_optimize(s);
|
2011-07-07 14:37:12 +02:00
|
|
|
|
2018-11-26 23:28:28 +01:00
|
|
|
reachable_code_pass(s);
|
2023-01-29 22:50:20 +01:00
|
|
|
liveness_pass_0(s);
|
2016-11-01 22:56:04 +01:00
|
|
|
liveness_pass_1(s);
|
2016-06-24 05:34:33 +02:00
|
|
|
|
2016-11-01 22:56:04 +01:00
|
|
|
if (s->nb_indirects > 0) {
|
|
|
|
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
|
2022-08-15 22:16:06 +02:00
|
|
|
&& qemu_log_in_addr_range(pc_start))) {
|
2022-04-17 20:29:47 +02:00
|
|
|
FILE *logfile = qemu_log_trylock();
|
2022-04-17 20:29:49 +02:00
|
|
|
if (logfile) {
|
|
|
|
fprintf(logfile, "OP before indirect lowering:\n");
|
2022-04-17 20:29:51 +02:00
|
|
|
tcg_dump_ops(s, logfile, false);
|
2022-04-17 20:29:49 +02:00
|
|
|
fprintf(logfile, "\n");
|
|
|
|
qemu_log_unlock(logfile);
|
|
|
|
}
|
2016-11-01 22:56:04 +01:00
|
|
|
}
|
2023-04-02 01:06:47 +02:00
|
|
|
|
2016-11-01 22:56:04 +01:00
|
|
|
/* Replace indirect temps with direct temps. */
|
|
|
|
if (liveness_pass_2(s)) {
|
|
|
|
/* If changes were made, re-run liveness. */
|
|
|
|
liveness_pass_1(s);
|
2016-06-24 05:34:33 +02:00
|
|
|
}
|
|
|
|
}
|
2012-09-06 16:47:13 +02:00
|
|
|
|
2016-03-15 15:30:21 +01:00
|
|
|
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
|
2022-08-15 22:16:06 +02:00
|
|
|
&& qemu_log_in_addr_range(pc_start))) {
|
2022-04-17 20:29:47 +02:00
|
|
|
FILE *logfile = qemu_log_trylock();
|
2022-04-17 20:29:49 +02:00
|
|
|
if (logfile) {
|
|
|
|
fprintf(logfile, "OP after optimization and liveness analysis:\n");
|
2022-04-17 20:29:51 +02:00
|
|
|
tcg_dump_ops(s, logfile, true);
|
2022-04-17 20:29:49 +02:00
|
|
|
fprintf(logfile, "\n");
|
|
|
|
qemu_log_unlock(logfile);
|
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2022-11-06 00:55:37 +01:00
|
|
|
/* Initialize goto_tb jump offsets. */
|
2022-11-27 03:20:57 +01:00
|
|
|
tb->jmp_reset_offset[0] = TB_JMP_OFFSET_INVALID;
|
|
|
|
tb->jmp_reset_offset[1] = TB_JMP_OFFSET_INVALID;
|
2022-11-27 03:54:23 +01:00
|
|
|
tb->jmp_insn_offset[0] = TB_JMP_OFFSET_INVALID;
|
|
|
|
tb->jmp_insn_offset[1] = TB_JMP_OFFSET_INVALID;
|
2022-11-06 00:55:37 +01:00
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
tcg_reg_alloc_start(s);
|
|
|
|
|
2020-10-28 20:05:44 +01:00
|
|
|
/*
|
|
|
|
* Reset the buffer pointers when restarting after overflow.
|
|
|
|
* TODO: Move this into translate-all.c with the rest of the
|
|
|
|
* buffer management. Having only this done here is confusing.
|
|
|
|
*/
|
|
|
|
s->code_buf = tcg_splitwx_to_rw(tb->tc.ptr);
|
|
|
|
s->code_ptr = s->code_buf;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2017-07-30 21:30:41 +02:00
|
|
|
#ifdef TCG_TARGET_NEED_LDST_LABELS
|
2018-04-30 01:58:40 +02:00
|
|
|
QSIMPLEQ_INIT(&s->ldst_labels);
|
2017-07-30 21:30:41 +02:00
|
|
|
#endif
|
2017-07-30 22:13:21 +02:00
|
|
|
#ifdef TCG_TARGET_NEED_POOL_LABELS
|
|
|
|
s->pool_labels = NULL;
|
|
|
|
#endif
|
2013-10-03 21:51:24 +02:00
|
|
|
|
2023-04-01 06:30:31 +02:00
|
|
|
start_words = s->insn_start_words;
|
|
|
|
s->gen_insn_data =
|
|
|
|
tcg_malloc(sizeof(uint64_t) * s->gen_tb->icount * start_words);
|
|
|
|
|
2023-08-15 18:34:59 +02:00
|
|
|
tcg_out_tb_start(s);
|
|
|
|
|
2015-09-02 04:11:45 +02:00
|
|
|
num_insns = -1;
|
2017-11-02 15:19:14 +01:00
|
|
|
QTAILQ_FOREACH(op, &s->ops, link) {
|
2014-09-19 22:49:15 +02:00
|
|
|
TCGOpcode opc = op->opc;
|
2008-03-08 14:33:42 +01:00
|
|
|
|
2014-09-19 22:49:15 +02:00
|
|
|
switch (opc) {
|
2008-02-01 11:05:41 +01:00
|
|
|
case INDEX_op_mov_i32:
|
|
|
|
case INDEX_op_mov_i64:
|
2017-09-14 22:53:46 +02:00
|
|
|
case INDEX_op_mov_vec:
|
2016-12-08 22:42:08 +01:00
|
|
|
tcg_reg_alloc_mov(s, op);
|
2008-02-01 11:05:41 +01:00
|
|
|
break;
|
2019-03-18 19:20:27 +01:00
|
|
|
case INDEX_op_dup_vec:
|
|
|
|
tcg_reg_alloc_dup(s, op);
|
|
|
|
break;
|
2015-08-29 21:37:33 +02:00
|
|
|
case INDEX_op_insn_start:
|
2015-09-02 04:11:45 +02:00
|
|
|
if (num_insns >= 0) {
|
2018-06-15 07:57:03 +02:00
|
|
|
size_t off = tcg_current_code_size(s);
|
|
|
|
s->gen_insn_end_off[num_insns] = off;
|
|
|
|
/* Assert that we do not overflow our stored offset. */
|
|
|
|
assert(s->gen_insn_end_off[num_insns] == off);
|
2015-09-02 04:11:45 +02:00
|
|
|
}
|
|
|
|
num_insns++;
|
2023-04-01 06:30:31 +02:00
|
|
|
for (i = 0; i < start_words; ++i) {
|
|
|
|
s->gen_insn_data[num_insns * start_words + i] =
|
2023-03-08 21:24:41 +01:00
|
|
|
tcg_get_insn_start_param(op, i);
|
2015-09-02 00:51:12 +02:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
break;
|
2008-02-04 01:37:54 +01:00
|
|
|
case INDEX_op_discard:
|
2017-06-20 08:18:10 +02:00
|
|
|
temp_dead(s, arg_temp(op->args[0]));
|
2008-02-04 01:37:54 +01:00
|
|
|
break;
|
2008-02-01 11:05:41 +01:00
|
|
|
case INDEX_op_set_label:
|
2008-05-23 19:33:39 +02:00
|
|
|
tcg_reg_alloc_bb_end(s, s->reserved_regs);
|
2020-10-29 02:55:50 +01:00
|
|
|
tcg_out_label(s, arg_label(op->args[0]));
|
2008-02-01 11:05:41 +01:00
|
|
|
break;
|
|
|
|
case INDEX_op_call:
|
2016-12-08 22:42:08 +01:00
|
|
|
tcg_reg_alloc_call(s, op);
|
2014-09-19 22:49:15 +02:00
|
|
|
break;
|
2022-11-26 21:42:06 +01:00
|
|
|
case INDEX_op_exit_tb:
|
|
|
|
tcg_out_exit_tb(s, op->args[0]);
|
|
|
|
break;
|
2022-11-27 02:14:05 +01:00
|
|
|
case INDEX_op_goto_tb:
|
|
|
|
tcg_out_goto_tb(s, op->args[0]);
|
|
|
|
break;
|
2020-03-31 11:33:21 +02:00
|
|
|
case INDEX_op_dup2_vec:
|
|
|
|
if (tcg_reg_alloc_dup2(s, op)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* fall through */
|
2008-02-01 11:05:41 +01:00
|
|
|
default:
|
2011-08-17 23:11:46 +02:00
|
|
|
/* Sanity check that we've not introduced any unhandled opcodes. */
|
2017-08-17 16:43:20 +02:00
|
|
|
tcg_debug_assert(tcg_op_supported(opc));
|
2008-02-01 11:05:41 +01:00
|
|
|
/* Note: in order to speed up the code, it would be much
|
|
|
|
faster to have specialized register allocator functions for
|
|
|
|
some common argument patterns */
|
2016-12-08 22:42:08 +01:00
|
|
|
tcg_reg_alloc_op(s, op);
|
2008-02-01 11:05:41 +01:00
|
|
|
break;
|
|
|
|
}
|
2015-09-22 22:01:15 +02:00
|
|
|
/* Test for (pending) buffer overflow. The assumption is that any
|
|
|
|
one operation beginning below the high water mark cannot overrun
|
|
|
|
the buffer completely. Thus we can test for overflow after
|
|
|
|
generating code without having to check during generation. */
|
2015-11-19 10:30:50 +01:00
|
|
|
if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
|
2015-09-22 22:01:15 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2019-04-16 10:06:39 +02:00
|
|
|
/* Test for TB overflow, as seen by gen_insn_end_off. */
|
|
|
|
if (unlikely(tcg_current_code_size(s) > UINT16_MAX)) {
|
|
|
|
return -2;
|
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2023-04-01 06:30:31 +02:00
|
|
|
tcg_debug_assert(num_insns + 1 == s->gen_tb->icount);
|
2015-09-02 04:11:45 +02:00
|
|
|
s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
|
2014-09-19 22:49:15 +02:00
|
|
|
|
2012-10-31 08:04:25 +01:00
|
|
|
/* Generate TB finalization at the end of block */
|
2017-07-30 21:30:41 +02:00
|
|
|
#ifdef TCG_TARGET_NEED_LDST_LABELS
|
2019-04-21 23:51:00 +02:00
|
|
|
i = tcg_out_ldst_finalize(s);
|
|
|
|
if (i < 0) {
|
|
|
|
return i;
|
2015-12-02 22:59:59 +01:00
|
|
|
}
|
2017-07-30 21:30:41 +02:00
|
|
|
#endif
|
2017-07-30 22:13:21 +02:00
|
|
|
#ifdef TCG_TARGET_NEED_POOL_LABELS
|
2019-04-21 22:51:56 +02:00
|
|
|
i = tcg_out_pool_finalize(s);
|
|
|
|
if (i < 0) {
|
|
|
|
return i;
|
2017-07-30 22:13:21 +02:00
|
|
|
}
|
|
|
|
#endif
|
2019-04-21 22:34:35 +02:00
|
|
|
if (!tcg_resolve_relocs(s)) {
|
|
|
|
return -2;
|
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2020-12-12 16:08:02 +01:00
|
|
|
#ifndef CONFIG_TCG_INTERPRETER
|
2008-02-01 11:05:41 +01:00
|
|
|
/* flush instruction cache */
|
2020-10-28 20:05:44 +01:00
|
|
|
flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
|
|
|
|
(uintptr_t)s->code_buf,
|
2020-12-12 17:38:21 +01:00
|
|
|
tcg_ptr_byte_diff(s->code_ptr, s->code_buf));
|
2020-12-12 16:08:02 +01:00
|
|
|
#endif
|
2012-03-02 23:30:07 +01:00
|
|
|
|
2014-03-28 20:56:22 +01:00
|
|
|
return tcg_current_code_size(s);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2021-09-08 11:35:43 +02:00
|
|
|
void tcg_dump_info(GString *buf)
|
2008-05-23 11:52:20 +02:00
|
|
|
{
|
2021-09-08 11:35:43 +02:00
|
|
|
g_string_append_printf(buf, "[TCG profiler not compiled]\n");
|
2008-05-23 11:52:20 +02:00
|
|
|
}
|
2012-03-19 20:25:11 +01:00
|
|
|
|
|
|
|
#ifdef ELF_HOST_MACHINE
|
2012-03-24 18:47:36 +01:00
|
|
|
/* In order to use this feature, the backend needs to do three things:
|
|
|
|
|
|
|
|
(1) Define ELF_HOST_MACHINE to indicate both what value to
|
|
|
|
put into the ELF image and to indicate support for the feature.
|
|
|
|
|
|
|
|
(2) Define tcg_register_jit. This should create a buffer containing
|
|
|
|
the contents of a .debug_frame section that describes the post-
|
|
|
|
prologue unwind info for the tcg machine.
|
|
|
|
|
|
|
|
(3) Call tcg_register_jit_int, with the constructed .debug_frame.
|
|
|
|
*/
|
2012-03-19 20:25:11 +01:00
|
|
|
|
|
|
|
/* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
|
|
|
|
typedef enum {
|
|
|
|
JIT_NOACTION = 0,
|
|
|
|
JIT_REGISTER_FN,
|
|
|
|
JIT_UNREGISTER_FN
|
|
|
|
} jit_actions_t;
|
|
|
|
|
|
|
|
struct jit_code_entry {
|
|
|
|
struct jit_code_entry *next_entry;
|
|
|
|
struct jit_code_entry *prev_entry;
|
|
|
|
const void *symfile_addr;
|
|
|
|
uint64_t symfile_size;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct jit_descriptor {
|
|
|
|
uint32_t version;
|
|
|
|
uint32_t action_flag;
|
|
|
|
struct jit_code_entry *relevant_entry;
|
|
|
|
struct jit_code_entry *first_entry;
|
|
|
|
};
|
|
|
|
|
|
|
|
void __jit_debug_register_code(void) __attribute__((noinline));
|
|
|
|
void __jit_debug_register_code(void)
|
|
|
|
{
|
|
|
|
asm("");
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Must statically initialize the version, because GDB may check
|
|
|
|
the version before we can set it. */
|
|
|
|
struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
|
|
|
|
|
|
|
|
/* End GDB interface. */
|
|
|
|
|
|
|
|
static int find_string(const char *strtab, const char *str)
|
|
|
|
{
|
|
|
|
const char *p = strtab + 1;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
if (strcmp(p, str) == 0) {
|
|
|
|
return p - strtab;
|
|
|
|
}
|
|
|
|
p += strlen(p) + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-29 17:17:30 +01:00
|
|
|
static void tcg_register_jit_int(const void *buf_ptr, size_t buf_size,
|
2014-05-15 21:48:01 +02:00
|
|
|
const void *debug_frame,
|
|
|
|
size_t debug_frame_size)
|
2012-03-19 20:25:11 +01:00
|
|
|
{
|
2012-03-24 18:47:36 +01:00
|
|
|
struct __attribute__((packed)) DebugInfo {
|
|
|
|
uint32_t len;
|
|
|
|
uint16_t version;
|
|
|
|
uint32_t abbrev;
|
|
|
|
uint8_t ptr_size;
|
|
|
|
uint8_t cu_die;
|
|
|
|
uint16_t cu_lang;
|
|
|
|
uintptr_t cu_low_pc;
|
|
|
|
uintptr_t cu_high_pc;
|
|
|
|
uint8_t fn_die;
|
|
|
|
char fn_name[16];
|
|
|
|
uintptr_t fn_low_pc;
|
|
|
|
uintptr_t fn_high_pc;
|
|
|
|
uint8_t cu_eoc;
|
|
|
|
};
|
2012-03-19 20:25:11 +01:00
|
|
|
|
|
|
|
struct ElfImage {
|
|
|
|
ElfW(Ehdr) ehdr;
|
|
|
|
ElfW(Phdr) phdr;
|
2012-03-24 18:47:36 +01:00
|
|
|
ElfW(Shdr) shdr[7];
|
|
|
|
ElfW(Sym) sym[2];
|
|
|
|
struct DebugInfo di;
|
|
|
|
uint8_t da[24];
|
|
|
|
char str[80];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ElfImage *img;
|
|
|
|
|
|
|
|
static const struct ElfImage img_template = {
|
|
|
|
.ehdr = {
|
|
|
|
.e_ident[EI_MAG0] = ELFMAG0,
|
|
|
|
.e_ident[EI_MAG1] = ELFMAG1,
|
|
|
|
.e_ident[EI_MAG2] = ELFMAG2,
|
|
|
|
.e_ident[EI_MAG3] = ELFMAG3,
|
|
|
|
.e_ident[EI_CLASS] = ELF_CLASS,
|
|
|
|
.e_ident[EI_DATA] = ELF_DATA,
|
|
|
|
.e_ident[EI_VERSION] = EV_CURRENT,
|
|
|
|
.e_type = ET_EXEC,
|
|
|
|
.e_machine = ELF_HOST_MACHINE,
|
|
|
|
.e_version = EV_CURRENT,
|
|
|
|
.e_phoff = offsetof(struct ElfImage, phdr),
|
|
|
|
.e_shoff = offsetof(struct ElfImage, shdr),
|
|
|
|
.e_ehsize = sizeof(ElfW(Shdr)),
|
|
|
|
.e_phentsize = sizeof(ElfW(Phdr)),
|
|
|
|
.e_phnum = 1,
|
|
|
|
.e_shentsize = sizeof(ElfW(Shdr)),
|
|
|
|
.e_shnum = ARRAY_SIZE(img->shdr),
|
|
|
|
.e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
|
2012-03-24 18:47:37 +01:00
|
|
|
#ifdef ELF_HOST_FLAGS
|
|
|
|
.e_flags = ELF_HOST_FLAGS,
|
|
|
|
#endif
|
|
|
|
#ifdef ELF_OSABI
|
|
|
|
.e_ident[EI_OSABI] = ELF_OSABI,
|
|
|
|
#endif
|
2012-03-24 18:47:36 +01:00
|
|
|
},
|
|
|
|
.phdr = {
|
|
|
|
.p_type = PT_LOAD,
|
|
|
|
.p_flags = PF_X,
|
|
|
|
},
|
|
|
|
.shdr = {
|
|
|
|
[0] = { .sh_type = SHT_NULL },
|
|
|
|
/* Trick: The contents of code_gen_buffer are not present in
|
|
|
|
this fake ELF file; that got allocated elsewhere. Therefore
|
|
|
|
we mark .text as SHT_NOBITS (similar to .bss) so that readers
|
|
|
|
will not look for contents. We can record any address. */
|
|
|
|
[1] = { /* .text */
|
|
|
|
.sh_type = SHT_NOBITS,
|
|
|
|
.sh_flags = SHF_EXECINSTR | SHF_ALLOC,
|
|
|
|
},
|
|
|
|
[2] = { /* .debug_info */
|
|
|
|
.sh_type = SHT_PROGBITS,
|
|
|
|
.sh_offset = offsetof(struct ElfImage, di),
|
|
|
|
.sh_size = sizeof(struct DebugInfo),
|
|
|
|
},
|
|
|
|
[3] = { /* .debug_abbrev */
|
|
|
|
.sh_type = SHT_PROGBITS,
|
|
|
|
.sh_offset = offsetof(struct ElfImage, da),
|
|
|
|
.sh_size = sizeof(img->da),
|
|
|
|
},
|
|
|
|
[4] = { /* .debug_frame */
|
|
|
|
.sh_type = SHT_PROGBITS,
|
|
|
|
.sh_offset = sizeof(struct ElfImage),
|
|
|
|
},
|
|
|
|
[5] = { /* .symtab */
|
|
|
|
.sh_type = SHT_SYMTAB,
|
|
|
|
.sh_offset = offsetof(struct ElfImage, sym),
|
|
|
|
.sh_size = sizeof(img->sym),
|
|
|
|
.sh_info = 1,
|
|
|
|
.sh_link = ARRAY_SIZE(img->shdr) - 1,
|
|
|
|
.sh_entsize = sizeof(ElfW(Sym)),
|
|
|
|
},
|
|
|
|
[6] = { /* .strtab */
|
|
|
|
.sh_type = SHT_STRTAB,
|
|
|
|
.sh_offset = offsetof(struct ElfImage, str),
|
|
|
|
.sh_size = sizeof(img->str),
|
|
|
|
}
|
|
|
|
},
|
|
|
|
.sym = {
|
|
|
|
[1] = { /* code_gen_buffer */
|
|
|
|
.st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
|
|
|
|
.st_shndx = 1,
|
|
|
|
}
|
|
|
|
},
|
|
|
|
.di = {
|
|
|
|
.len = sizeof(struct DebugInfo) - 4,
|
|
|
|
.version = 2,
|
|
|
|
.ptr_size = sizeof(void *),
|
|
|
|
.cu_die = 1,
|
|
|
|
.cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */
|
|
|
|
.fn_die = 2,
|
|
|
|
.fn_name = "code_gen_buffer"
|
|
|
|
},
|
|
|
|
.da = {
|
|
|
|
1, /* abbrev number (the cu) */
|
|
|
|
0x11, 1, /* DW_TAG_compile_unit, has children */
|
|
|
|
0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
|
|
|
|
0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
|
|
|
|
0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
|
|
|
|
0, 0, /* end of abbrev */
|
|
|
|
2, /* abbrev number (the fn) */
|
|
|
|
0x2e, 0, /* DW_TAG_subprogram, no children */
|
|
|
|
0x3, 0x8, /* DW_AT_name, DW_FORM_string */
|
|
|
|
0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
|
|
|
|
0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
|
|
|
|
0, 0, /* end of abbrev */
|
|
|
|
0 /* no more abbrev */
|
|
|
|
},
|
|
|
|
.str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
|
|
|
|
".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
|
2012-03-19 20:25:11 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
/* We only need a single jit entry; statically allocate it. */
|
|
|
|
static struct jit_code_entry one_entry;
|
|
|
|
|
2012-03-24 18:47:36 +01:00
|
|
|
uintptr_t buf = (uintptr_t)buf_ptr;
|
2012-03-19 20:25:11 +01:00
|
|
|
size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
|
2014-05-15 21:48:01 +02:00
|
|
|
DebugFrameHeader *dfh;
|
2012-03-19 20:25:11 +01:00
|
|
|
|
2012-03-24 18:47:36 +01:00
|
|
|
img = g_malloc(img_size);
|
|
|
|
*img = img_template;
|
2012-03-19 20:25:11 +01:00
|
|
|
|
2012-03-24 18:47:36 +01:00
|
|
|
img->phdr.p_vaddr = buf;
|
|
|
|
img->phdr.p_paddr = buf;
|
|
|
|
img->phdr.p_memsz = buf_size;
|
2012-03-19 20:25:11 +01:00
|
|
|
|
|
|
|
img->shdr[1].sh_name = find_string(img->str, ".text");
|
2012-03-24 18:47:36 +01:00
|
|
|
img->shdr[1].sh_addr = buf;
|
2012-03-19 20:25:11 +01:00
|
|
|
img->shdr[1].sh_size = buf_size;
|
|
|
|
|
2012-03-24 18:47:36 +01:00
|
|
|
img->shdr[2].sh_name = find_string(img->str, ".debug_info");
|
|
|
|
img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
|
|
|
|
|
|
|
|
img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
|
|
|
|
img->shdr[4].sh_size = debug_frame_size;
|
|
|
|
|
|
|
|
img->shdr[5].sh_name = find_string(img->str, ".symtab");
|
|
|
|
img->shdr[6].sh_name = find_string(img->str, ".strtab");
|
|
|
|
|
|
|
|
img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
|
|
|
|
img->sym[1].st_value = buf;
|
|
|
|
img->sym[1].st_size = buf_size;
|
2012-03-19 20:25:11 +01:00
|
|
|
|
2012-03-24 18:47:36 +01:00
|
|
|
img->di.cu_low_pc = buf;
|
2013-05-24 23:16:14 +02:00
|
|
|
img->di.cu_high_pc = buf + buf_size;
|
2012-03-24 18:47:36 +01:00
|
|
|
img->di.fn_low_pc = buf;
|
2013-05-24 23:16:14 +02:00
|
|
|
img->di.fn_high_pc = buf + buf_size;
|
2012-03-19 20:25:11 +01:00
|
|
|
|
2014-05-15 21:48:01 +02:00
|
|
|
dfh = (DebugFrameHeader *)(img + 1);
|
|
|
|
memcpy(dfh, debug_frame, debug_frame_size);
|
|
|
|
dfh->fde.func_start = buf;
|
|
|
|
dfh->fde.func_len = buf_size;
|
|
|
|
|
2012-03-19 20:25:11 +01:00
|
|
|
#ifdef DEBUG_JIT
|
|
|
|
/* Enable this block to be able to debug the ELF image file creation.
|
|
|
|
One can use readelf, objdump, or other inspection utilities. */
|
|
|
|
{
|
2022-10-27 20:36:17 +02:00
|
|
|
g_autofree char *jit = g_strdup_printf("%s/qemu.jit", g_get_tmp_dir());
|
|
|
|
FILE *f = fopen(jit, "w+b");
|
2012-03-19 20:25:11 +01:00
|
|
|
if (f) {
|
2012-03-24 18:47:36 +01:00
|
|
|
if (fwrite(img, img_size, 1, f) != img_size) {
|
2012-03-19 20:25:11 +01:00
|
|
|
/* Avoid stupid unused return value warning for fwrite. */
|
|
|
|
}
|
|
|
|
fclose(f);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
one_entry.symfile_addr = img;
|
|
|
|
one_entry.symfile_size = img_size;
|
|
|
|
|
|
|
|
__jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
|
|
|
|
__jit_debug_descriptor.relevant_entry = &one_entry;
|
|
|
|
__jit_debug_descriptor.first_entry = &one_entry;
|
|
|
|
__jit_debug_register_code();
|
|
|
|
}
|
|
|
|
#else
|
2012-03-24 18:47:36 +01:00
|
|
|
/* No support for the feature. Provide the entry point expected by exec.c,
|
|
|
|
and implement the internal function we declared earlier. */
|
2012-03-19 20:25:11 +01:00
|
|
|
|
2020-10-29 17:17:30 +01:00
|
|
|
static void tcg_register_jit_int(const void *buf, size_t size,
|
2014-05-15 21:48:01 +02:00
|
|
|
const void *debug_frame,
|
|
|
|
size_t debug_frame_size)
|
2012-03-19 20:25:11 +01:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2020-10-29 17:17:30 +01:00
|
|
|
void tcg_register_jit(const void *buf, size_t buf_size)
|
2012-03-19 20:25:11 +01:00
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif /* ELF_HOST_MACHINE */
|
2017-09-15 23:11:45 +02:00
|
|
|
|
|
|
|
#if !TCG_TARGET_MAYBE_vec
|
|
|
|
void tcg_expand_vec_op(TCGOpcode o, TCGType t, unsigned e, TCGArg a0, ...)
|
|
|
|
{
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
#endif
|