2008-02-01 11:05:41 +01:00
|
|
|
/*
|
|
|
|
* Tiny Code Generator for QEMU
|
|
|
|
*
|
|
|
|
* Copyright (c) 2008 Fabrice Bellard
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* define it to use liveness analysis (better code) */
|
2011-07-07 14:37:12 +02:00
|
|
|
#define USE_TCG_OPTIMIZATIONS
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2016-01-26 19:17:08 +01:00
|
|
|
#include "qemu/osdep.h"
|
2009-04-16 11:58:30 +02:00
|
|
|
|
2012-03-19 20:25:11 +01:00
|
|
|
/* Define to jump the ELF file used to communicate with GDB. */
|
|
|
|
#undef DEBUG_JIT
|
|
|
|
|
2018-10-10 16:48:53 +02:00
|
|
|
#include "qemu/error-report.h"
|
2016-03-20 18:16:19 +01:00
|
|
|
#include "qemu/cutils.h"
|
2012-12-17 18:20:00 +01:00
|
|
|
#include "qemu/host-utils.h"
|
2019-04-17 21:17:51 +02:00
|
|
|
#include "qemu/qemu-print.h"
|
2012-12-17 18:20:00 +01:00
|
|
|
#include "qemu/timer.h"
|
2020-12-14 15:02:33 +01:00
|
|
|
#include "qemu/cacheflush.h"
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2014-02-21 20:52:39 +01:00
|
|
|
/* Note: the long term plan is to reduce the dependencies on the QEMU
|
2008-02-01 11:05:41 +01:00
|
|
|
CPU definitions. Currently they are used for qemu_ld/st
|
|
|
|
instructions */
|
|
|
|
#define NO_CPU_IO_DEFS
|
|
|
|
#include "cpu.h"
|
|
|
|
|
2016-03-15 13:18:37 +01:00
|
|
|
#include "exec/exec-all.h"
|
|
|
|
|
2019-05-18 22:54:21 +02:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
|
|
#include "hw/boards.h"
|
|
|
|
#endif
|
|
|
|
|
2020-01-01 12:23:00 +01:00
|
|
|
#include "tcg/tcg-op.h"
|
2012-03-19 20:25:11 +01:00
|
|
|
|
2013-08-21 02:20:30 +02:00
|
|
|
#if UINTPTR_MAX == UINT32_MAX
|
2012-03-19 20:25:11 +01:00
|
|
|
# define ELF_CLASS ELFCLASS32
|
2013-08-21 02:20:30 +02:00
|
|
|
#else
|
|
|
|
# define ELF_CLASS ELFCLASS64
|
2012-03-19 20:25:11 +01:00
|
|
|
#endif
|
|
|
|
#ifdef HOST_WORDS_BIGENDIAN
|
|
|
|
# define ELF_DATA ELFDATA2MSB
|
|
|
|
#else
|
|
|
|
# define ELF_DATA ELFDATA2LSB
|
|
|
|
#endif
|
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
#include "elf.h"
|
2016-01-07 14:55:28 +01:00
|
|
|
#include "exec/log.h"
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
#include "sysemu/sysemu.h"
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2020-02-04 12:41:01 +01:00
|
|
|
/* Forward declarations for functions declared in tcg-target.c.inc and
|
2016-02-23 15:49:41 +01:00
|
|
|
used here. */
|
2010-06-03 02:26:56 +02:00
|
|
|
static void tcg_target_init(TCGContext *s);
|
2016-11-18 09:31:40 +01:00
|
|
|
static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode);
|
2010-06-03 02:26:56 +02:00
|
|
|
static void tcg_target_qemu_prologue(TCGContext *s);
|
2018-11-30 20:52:48 +01:00
|
|
|
static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
|
2013-08-21 00:30:10 +02:00
|
|
|
intptr_t value, intptr_t addend);
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2013-06-05 16:39:57 +02:00
|
|
|
/* The CIE and FDE header definitions will be common to all hosts. */
|
|
|
|
typedef struct {
|
|
|
|
uint32_t len __attribute__((aligned((sizeof(void *)))));
|
|
|
|
uint32_t id;
|
|
|
|
uint8_t version;
|
|
|
|
char augmentation[1];
|
|
|
|
uint8_t code_align;
|
|
|
|
uint8_t data_align;
|
|
|
|
uint8_t return_column;
|
|
|
|
} DebugFrameCIE;
|
|
|
|
|
|
|
|
typedef struct QEMU_PACKED {
|
|
|
|
uint32_t len __attribute__((aligned((sizeof(void *)))));
|
|
|
|
uint32_t cie_offset;
|
2013-08-21 02:20:30 +02:00
|
|
|
uintptr_t func_start;
|
|
|
|
uintptr_t func_len;
|
2013-06-05 16:39:57 +02:00
|
|
|
} DebugFrameFDEHeader;
|
|
|
|
|
2014-05-15 21:48:01 +02:00
|
|
|
typedef struct QEMU_PACKED {
|
|
|
|
DebugFrameCIE cie;
|
|
|
|
DebugFrameFDEHeader fde;
|
|
|
|
} DebugFrameHeader;
|
|
|
|
|
2020-10-29 17:17:30 +01:00
|
|
|
static void tcg_register_jit_int(const void *buf, size_t size,
|
2014-05-15 21:48:01 +02:00
|
|
|
const void *debug_frame,
|
|
|
|
size_t debug_frame_size)
|
2012-03-19 20:25:11 +01:00
|
|
|
__attribute__((unused));
|
|
|
|
|
2020-02-04 12:41:01 +01:00
|
|
|
/* Forward declarations for functions declared and used in tcg-target.c.inc. */
|
2016-11-18 11:50:59 +01:00
|
|
|
static const char *target_parse_constraint(TCGArgConstraint *ct,
|
|
|
|
const char *ct_str, TCGType type);
|
2011-11-09 09:03:34 +01:00
|
|
|
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
|
2013-08-21 02:07:26 +02:00
|
|
|
intptr_t arg2);
|
2019-03-16 18:48:18 +01:00
|
|
|
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
|
2011-09-17 22:00:29 +02:00
|
|
|
static void tcg_out_movi(TCGContext *s, TCGType type,
|
2011-11-09 09:03:34 +01:00
|
|
|
TCGReg ret, tcg_target_long arg);
|
2011-09-17 22:00:29 +02:00
|
|
|
static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
|
|
|
|
const int *const_args);
|
2017-09-14 22:53:46 +02:00
|
|
|
#if TCG_TARGET_MAYBE_vec
|
2019-03-18 16:32:44 +01:00
|
|
|
static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
|
|
|
|
TCGReg dst, TCGReg src);
|
2019-03-18 20:00:39 +01:00
|
|
|
static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
|
|
|
|
TCGReg dst, TCGReg base, intptr_t offset);
|
2019-03-18 16:32:44 +01:00
|
|
|
static void tcg_out_dupi_vec(TCGContext *s, TCGType type,
|
|
|
|
TCGReg dst, tcg_target_long arg);
|
2017-09-14 22:53:46 +02:00
|
|
|
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl,
|
|
|
|
unsigned vece, const TCGArg *args,
|
|
|
|
const int *const_args);
|
|
|
|
#else
|
2019-03-18 16:32:44 +01:00
|
|
|
static inline bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
|
|
|
|
TCGReg dst, TCGReg src)
|
|
|
|
{
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
2019-03-18 20:00:39 +01:00
|
|
|
static inline bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
|
|
|
|
TCGReg dst, TCGReg base, intptr_t offset)
|
|
|
|
{
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
2019-03-18 16:32:44 +01:00
|
|
|
static inline void tcg_out_dupi_vec(TCGContext *s, TCGType type,
|
|
|
|
TCGReg dst, tcg_target_long arg)
|
|
|
|
{
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
2017-09-14 22:53:46 +02:00
|
|
|
static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl,
|
|
|
|
unsigned vece, const TCGArg *args,
|
|
|
|
const int *const_args)
|
|
|
|
{
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
#endif
|
2011-11-09 09:03:34 +01:00
|
|
|
static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
|
2013-08-21 02:07:26 +02:00
|
|
|
intptr_t arg2);
|
2016-06-20 07:59:13 +02:00
|
|
|
static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
|
|
|
|
TCGReg base, intptr_t ofs);
|
2020-10-28 23:29:04 +01:00
|
|
|
static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target);
|
2014-03-31 06:22:11 +02:00
|
|
|
static int tcg_target_const_match(tcg_target_long val, TCGType type,
|
2011-09-17 22:00:29 +02:00
|
|
|
const TCGArgConstraint *arg_ct);
|
2017-07-30 21:30:41 +02:00
|
|
|
#ifdef TCG_TARGET_NEED_LDST_LABELS
|
2019-04-21 23:51:00 +02:00
|
|
|
static int tcg_out_ldst_finalize(TCGContext *s);
|
2017-07-30 21:30:41 +02:00
|
|
|
#endif
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2017-07-08 01:00:30 +02:00
|
|
|
#define TCG_HIGHWATER 1024
|
|
|
|
|
2017-07-13 00:26:40 +02:00
|
|
|
static TCGContext **tcg_ctxs;
|
|
|
|
static unsigned int n_tcg_ctxs;
|
2017-10-10 23:34:37 +02:00
|
|
|
TCGv_env cpu_env = 0;
|
2020-11-06 00:41:38 +01:00
|
|
|
const void *tcg_code_gen_epilogue;
|
2020-10-28 20:05:44 +01:00
|
|
|
uintptr_t tcg_splitwx_diff;
|
2017-07-13 00:26:40 +02:00
|
|
|
|
2020-10-28 22:11:54 +01:00
|
|
|
#ifndef CONFIG_TCG_INTERPRETER
|
|
|
|
tcg_prologue_fn *tcg_qemu_tb_exec;
|
|
|
|
#endif
|
|
|
|
|
tcg: track TBs with per-region BST's
This paves the way for enabling scalable parallel generation of TCG code.
Instead of tracking TBs with a single binary search tree (BST), use a
BST for each TCG region, protecting it with a lock. This is as scalable
as it gets, since each TCG thread operates on a separate region.
The core of this change is the introduction of struct tcg_region_tree,
which contains a pointer to a GTree and an associated lock to serialize
accesses to it. We then allocate an array of tcg_region_tree's, adding
the appropriate padding to avoid false sharing based on
qemu_dcache_linesize.
Given a tc_ptr, we first find the corresponding region_tree. This
is done by special-casing the first and last regions first, since they
might be of size != region.size; otherwise we just divide the offset
by region.stride. I was worried about this division (several dozen
cycles of latency), but profiling shows that this is not a fast path.
Note that region.stride is not required to be a power of two; it
is only required to be a multiple of the host's page size.
Note that with this design we can also provide consistent snapshots
about all region trees at once; for instance, tcg_tb_foreach
acquires/releases all region_tree locks before/after iterating over them.
For this reason we now drop tb_lock in dump_exec_info().
As an alternative I considered implementing a concurrent BST, but this
can be tricky to get right, offers no consistent snapshots of the BST,
and performance and scalability-wise I don't think it could ever beat
having separate GTrees, given that our workload is insert-mostly (all
concurrent BST designs I've seen focus, understandably, on making
lookups fast, which comes at the expense of convoluted, non-wait-free
insertions/removals).
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-26 22:58:05 +02:00
|
|
|
struct tcg_region_tree {
|
|
|
|
QemuMutex lock;
|
|
|
|
GTree *tree;
|
|
|
|
/* padding to avoid false sharing is computed at run-time */
|
|
|
|
};
|
|
|
|
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 01:24:20 +02:00
|
|
|
/*
|
|
|
|
* We divide code_gen_buffer into equally-sized "regions" that TCG threads
|
|
|
|
* dynamically allocate from as demand dictates. Given appropriate region
|
|
|
|
* sizing, this minimizes flushes even when some TCG threads generate a lot
|
|
|
|
* more code than others.
|
|
|
|
*/
|
|
|
|
struct tcg_region_state {
|
|
|
|
QemuMutex lock;
|
|
|
|
|
|
|
|
/* fields set at init time */
|
|
|
|
void *start;
|
|
|
|
void *start_aligned;
|
|
|
|
void *end;
|
|
|
|
size_t n;
|
|
|
|
size_t size; /* size of one region */
|
|
|
|
size_t stride; /* .size + guard size */
|
|
|
|
|
|
|
|
/* fields protected by the lock */
|
|
|
|
size_t current; /* current region index */
|
|
|
|
size_t agg_size_full; /* aggregate size of full regions */
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct tcg_region_state region;
|
tcg: track TBs with per-region BST's
This paves the way for enabling scalable parallel generation of TCG code.
Instead of tracking TBs with a single binary search tree (BST), use a
BST for each TCG region, protecting it with a lock. This is as scalable
as it gets, since each TCG thread operates on a separate region.
The core of this change is the introduction of struct tcg_region_tree,
which contains a pointer to a GTree and an associated lock to serialize
accesses to it. We then allocate an array of tcg_region_tree's, adding
the appropriate padding to avoid false sharing based on
qemu_dcache_linesize.
Given a tc_ptr, we first find the corresponding region_tree. This
is done by special-casing the first and last regions first, since they
might be of size != region.size; otherwise we just divide the offset
by region.stride. I was worried about this division (several dozen
cycles of latency), but profiling shows that this is not a fast path.
Note that region.stride is not required to be a power of two; it
is only required to be a multiple of the host's page size.
Note that with this design we can also provide consistent snapshots
about all region trees at once; for instance, tcg_tb_foreach
acquires/releases all region_tree locks before/after iterating over them.
For this reason we now drop tb_lock in dump_exec_info().
As an alternative I considered implementing a concurrent BST, but this
can be tricky to get right, offers no consistent snapshots of the BST,
and performance and scalability-wise I don't think it could ever beat
having separate GTrees, given that our workload is insert-mostly (all
concurrent BST designs I've seen focus, understandably, on making
lookups fast, which comes at the expense of convoluted, non-wait-free
insertions/removals).
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-26 22:58:05 +02:00
|
|
|
/*
|
|
|
|
* This is an array of struct tcg_region_tree's, with padding.
|
|
|
|
* We use void * to simplify the computation of region_trees[i]; each
|
|
|
|
* struct is found every tree_size bytes.
|
|
|
|
*/
|
|
|
|
static void *region_trees;
|
|
|
|
static size_t tree_size;
|
2017-09-14 22:53:46 +02:00
|
|
|
static TCGRegSet tcg_target_available_regs[TCG_TYPE_COUNT];
|
2008-10-26 14:43:07 +01:00
|
|
|
static TCGRegSet tcg_target_call_clobber_regs;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2014-03-28 20:56:22 +01:00
|
|
|
#if TCG_TARGET_INSN_UNIT_SIZE == 1
|
2014-06-07 19:08:44 +02:00
|
|
|
static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
|
|
|
*s->code_ptr++ = v;
|
|
|
|
}
|
|
|
|
|
2014-06-07 19:08:44 +02:00
|
|
|
static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
|
|
|
|
uint8_t v)
|
2014-03-28 16:29:48 +01:00
|
|
|
{
|
2014-03-28 20:56:22 +01:00
|
|
|
*p = v;
|
2014-03-28 16:29:48 +01:00
|
|
|
}
|
2014-03-28 20:56:22 +01:00
|
|
|
#endif
|
2014-03-28 16:29:48 +01:00
|
|
|
|
2014-03-28 20:56:22 +01:00
|
|
|
#if TCG_TARGET_INSN_UNIT_SIZE <= 2
|
2014-06-07 19:08:44 +02:00
|
|
|
static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2014-03-28 20:56:22 +01:00
|
|
|
if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
|
|
|
|
*s->code_ptr++ = v;
|
|
|
|
} else {
|
|
|
|
tcg_insn_unit *p = s->code_ptr;
|
|
|
|
memcpy(p, &v, sizeof(v));
|
|
|
|
s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
|
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2014-06-07 19:08:44 +02:00
|
|
|
static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
|
|
|
|
uint16_t v)
|
2014-03-28 16:29:48 +01:00
|
|
|
{
|
2014-03-28 20:56:22 +01:00
|
|
|
if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
|
|
|
|
*p = v;
|
|
|
|
} else {
|
|
|
|
memcpy(p, &v, sizeof(v));
|
|
|
|
}
|
2014-03-28 16:29:48 +01:00
|
|
|
}
|
2014-03-28 20:56:22 +01:00
|
|
|
#endif
|
2014-03-28 16:29:48 +01:00
|
|
|
|
2014-03-28 20:56:22 +01:00
|
|
|
#if TCG_TARGET_INSN_UNIT_SIZE <= 4
|
2014-06-07 19:08:44 +02:00
|
|
|
static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2014-03-28 20:56:22 +01:00
|
|
|
if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
|
|
|
|
*s->code_ptr++ = v;
|
|
|
|
} else {
|
|
|
|
tcg_insn_unit *p = s->code_ptr;
|
|
|
|
memcpy(p, &v, sizeof(v));
|
|
|
|
s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
|
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2014-06-07 19:08:44 +02:00
|
|
|
static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
|
|
|
|
uint32_t v)
|
2014-03-28 16:29:48 +01:00
|
|
|
{
|
2014-03-28 20:56:22 +01:00
|
|
|
if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
|
|
|
|
*p = v;
|
|
|
|
} else {
|
|
|
|
memcpy(p, &v, sizeof(v));
|
|
|
|
}
|
2014-03-28 16:29:48 +01:00
|
|
|
}
|
2014-03-28 20:56:22 +01:00
|
|
|
#endif
|
2014-03-28 16:29:48 +01:00
|
|
|
|
2014-03-28 20:56:22 +01:00
|
|
|
#if TCG_TARGET_INSN_UNIT_SIZE <= 8
|
2014-06-07 19:08:44 +02:00
|
|
|
static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
|
2013-07-25 21:42:17 +02:00
|
|
|
{
|
2014-03-28 20:56:22 +01:00
|
|
|
if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
|
|
|
|
*s->code_ptr++ = v;
|
|
|
|
} else {
|
|
|
|
tcg_insn_unit *p = s->code_ptr;
|
|
|
|
memcpy(p, &v, sizeof(v));
|
|
|
|
s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
|
|
|
|
}
|
2013-07-25 21:42:17 +02:00
|
|
|
}
|
|
|
|
|
2014-06-07 19:08:44 +02:00
|
|
|
static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
|
|
|
|
uint64_t v)
|
2014-03-28 16:29:48 +01:00
|
|
|
{
|
2014-03-28 20:56:22 +01:00
|
|
|
if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
|
|
|
|
*p = v;
|
|
|
|
} else {
|
|
|
|
memcpy(p, &v, sizeof(v));
|
|
|
|
}
|
2014-03-28 16:29:48 +01:00
|
|
|
}
|
2014-03-28 20:56:22 +01:00
|
|
|
#endif
|
2014-03-28 16:29:48 +01:00
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
/* label relocation processing */
|
|
|
|
|
2014-03-28 20:56:22 +01:00
|
|
|
static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
|
2015-02-13 22:39:54 +01:00
|
|
|
TCGLabel *l, intptr_t addend)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2019-04-21 22:34:35 +02:00
|
|
|
TCGRelocation *r = tcg_malloc(sizeof(TCGRelocation));
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2019-04-21 22:34:35 +02:00
|
|
|
r->type = type;
|
|
|
|
r->ptr = code_ptr;
|
|
|
|
r->addend = addend;
|
|
|
|
QSIMPLEQ_INSERT_TAIL(&l->relocs, r, next);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2020-10-29 02:55:50 +01:00
|
|
|
static void tcg_out_label(TCGContext *s, TCGLabel *l)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2016-04-21 10:48:49 +02:00
|
|
|
tcg_debug_assert(!l->has_value);
|
2008-02-01 11:05:41 +01:00
|
|
|
l->has_value = 1;
|
2020-10-29 02:55:50 +01:00
|
|
|
l->u.value_ptr = tcg_splitwx_to_rx(s->code_ptr);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2015-02-13 21:51:55 +01:00
|
|
|
TCGLabel *gen_new_label(void)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2017-07-12 23:15:52 +02:00
|
|
|
TCGContext *s = tcg_ctx;
|
2015-02-14 03:51:05 +01:00
|
|
|
TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2019-04-21 22:34:35 +02:00
|
|
|
memset(l, 0, sizeof(TCGLabel));
|
|
|
|
l->id = s->nb_labels++;
|
|
|
|
QSIMPLEQ_INIT(&l->relocs);
|
|
|
|
|
2019-02-07 14:26:40 +01:00
|
|
|
QSIMPLEQ_INSERT_TAIL(&s->labels, l, next);
|
2015-02-13 21:51:55 +01:00
|
|
|
|
|
|
|
return l;
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2019-04-21 22:34:35 +02:00
|
|
|
static bool tcg_resolve_relocs(TCGContext *s)
|
|
|
|
{
|
|
|
|
TCGLabel *l;
|
|
|
|
|
|
|
|
QSIMPLEQ_FOREACH(l, &s->labels, next) {
|
|
|
|
TCGRelocation *r;
|
|
|
|
uintptr_t value = l->u.value;
|
|
|
|
|
|
|
|
QSIMPLEQ_FOREACH(r, &l->relocs, next) {
|
|
|
|
if (!patch_reloc(r->ptr, r->type, value, r->addend)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-06-15 07:57:03 +02:00
|
|
|
static void set_jmp_reset_offset(TCGContext *s, int which)
|
|
|
|
{
|
2020-11-03 04:36:20 +01:00
|
|
|
/*
|
|
|
|
* We will check for overflow at the end of the opcode loop in
|
|
|
|
* tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
|
|
|
|
*/
|
|
|
|
s->tb_jmp_reset_offset[which] = tcg_current_code_size(s);
|
2018-06-15 07:57:03 +02:00
|
|
|
}
|
|
|
|
|
2020-02-04 12:41:01 +01:00
|
|
|
#include "tcg-target.c.inc"
|
2008-02-01 11:05:41 +01:00
|
|
|
|
tcg: track TBs with per-region BST's
This paves the way for enabling scalable parallel generation of TCG code.
Instead of tracking TBs with a single binary search tree (BST), use a
BST for each TCG region, protecting it with a lock. This is as scalable
as it gets, since each TCG thread operates on a separate region.
The core of this change is the introduction of struct tcg_region_tree,
which contains a pointer to a GTree and an associated lock to serialize
accesses to it. We then allocate an array of tcg_region_tree's, adding
the appropriate padding to avoid false sharing based on
qemu_dcache_linesize.
Given a tc_ptr, we first find the corresponding region_tree. This
is done by special-casing the first and last regions first, since they
might be of size != region.size; otherwise we just divide the offset
by region.stride. I was worried about this division (several dozen
cycles of latency), but profiling shows that this is not a fast path.
Note that region.stride is not required to be a power of two; it
is only required to be a multiple of the host's page size.
Note that with this design we can also provide consistent snapshots
about all region trees at once; for instance, tcg_tb_foreach
acquires/releases all region_tree locks before/after iterating over them.
For this reason we now drop tb_lock in dump_exec_info().
As an alternative I considered implementing a concurrent BST, but this
can be tricky to get right, offers no consistent snapshots of the BST,
and performance and scalability-wise I don't think it could ever beat
having separate GTrees, given that our workload is insert-mostly (all
concurrent BST designs I've seen focus, understandably, on making
lookups fast, which comes at the expense of convoluted, non-wait-free
insertions/removals).
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-26 22:58:05 +02:00
|
|
|
/* compare a pointer @ptr and a tb_tc @s */
|
|
|
|
static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
|
|
|
|
{
|
|
|
|
if (ptr >= s->ptr + s->size) {
|
|
|
|
return 1;
|
|
|
|
} else if (ptr < s->ptr) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp)
|
|
|
|
{
|
|
|
|
const struct tb_tc *a = ap;
|
|
|
|
const struct tb_tc *b = bp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When both sizes are set, we know this isn't a lookup.
|
|
|
|
* This is the most likely case: every TB must be inserted; lookups
|
|
|
|
* are a lot less frequent.
|
|
|
|
*/
|
|
|
|
if (likely(a->size && b->size)) {
|
|
|
|
if (a->ptr > b->ptr) {
|
|
|
|
return 1;
|
|
|
|
} else if (a->ptr < b->ptr) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
/* a->ptr == b->ptr should happen only on deletions */
|
|
|
|
g_assert(a->size == b->size);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* All lookups have either .size field set to 0.
|
|
|
|
* From the glib sources we see that @ap is always the lookup key. However
|
|
|
|
* the docs provide no guarantee, so we just mark this case as likely.
|
|
|
|
*/
|
|
|
|
if (likely(a->size == 0)) {
|
|
|
|
return ptr_cmp_tb_tc(a->ptr, b);
|
|
|
|
}
|
|
|
|
return ptr_cmp_tb_tc(b->ptr, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_region_trees_init(void)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
tree_size = ROUND_UP(sizeof(struct tcg_region_tree), qemu_dcache_linesize);
|
|
|
|
region_trees = qemu_memalign(qemu_dcache_linesize, region.n * tree_size);
|
|
|
|
for (i = 0; i < region.n; i++) {
|
|
|
|
struct tcg_region_tree *rt = region_trees + i * tree_size;
|
|
|
|
|
|
|
|
qemu_mutex_init(&rt->lock);
|
|
|
|
rt->tree = g_tree_new(tb_tc_cmp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-28 20:05:44 +01:00
|
|
|
static struct tcg_region_tree *tc_ptr_to_region_tree(const void *cp)
|
tcg: track TBs with per-region BST's
This paves the way for enabling scalable parallel generation of TCG code.
Instead of tracking TBs with a single binary search tree (BST), use a
BST for each TCG region, protecting it with a lock. This is as scalable
as it gets, since each TCG thread operates on a separate region.
The core of this change is the introduction of struct tcg_region_tree,
which contains a pointer to a GTree and an associated lock to serialize
accesses to it. We then allocate an array of tcg_region_tree's, adding
the appropriate padding to avoid false sharing based on
qemu_dcache_linesize.
Given a tc_ptr, we first find the corresponding region_tree. This
is done by special-casing the first and last regions first, since they
might be of size != region.size; otherwise we just divide the offset
by region.stride. I was worried about this division (several dozen
cycles of latency), but profiling shows that this is not a fast path.
Note that region.stride is not required to be a power of two; it
is only required to be a multiple of the host's page size.
Note that with this design we can also provide consistent snapshots
about all region trees at once; for instance, tcg_tb_foreach
acquires/releases all region_tree locks before/after iterating over them.
For this reason we now drop tb_lock in dump_exec_info().
As an alternative I considered implementing a concurrent BST, but this
can be tricky to get right, offers no consistent snapshots of the BST,
and performance and scalability-wise I don't think it could ever beat
having separate GTrees, given that our workload is insert-mostly (all
concurrent BST designs I've seen focus, understandably, on making
lookups fast, which comes at the expense of convoluted, non-wait-free
insertions/removals).
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-26 22:58:05 +02:00
|
|
|
{
|
2020-10-28 20:05:44 +01:00
|
|
|
void *p = tcg_splitwx_to_rw(cp);
|
tcg: track TBs with per-region BST's
This paves the way for enabling scalable parallel generation of TCG code.
Instead of tracking TBs with a single binary search tree (BST), use a
BST for each TCG region, protecting it with a lock. This is as scalable
as it gets, since each TCG thread operates on a separate region.
The core of this change is the introduction of struct tcg_region_tree,
which contains a pointer to a GTree and an associated lock to serialize
accesses to it. We then allocate an array of tcg_region_tree's, adding
the appropriate padding to avoid false sharing based on
qemu_dcache_linesize.
Given a tc_ptr, we first find the corresponding region_tree. This
is done by special-casing the first and last regions first, since they
might be of size != region.size; otherwise we just divide the offset
by region.stride. I was worried about this division (several dozen
cycles of latency), but profiling shows that this is not a fast path.
Note that region.stride is not required to be a power of two; it
is only required to be a multiple of the host's page size.
Note that with this design we can also provide consistent snapshots
about all region trees at once; for instance, tcg_tb_foreach
acquires/releases all region_tree locks before/after iterating over them.
For this reason we now drop tb_lock in dump_exec_info().
As an alternative I considered implementing a concurrent BST, but this
can be tricky to get right, offers no consistent snapshots of the BST,
and performance and scalability-wise I don't think it could ever beat
having separate GTrees, given that our workload is insert-mostly (all
concurrent BST designs I've seen focus, understandably, on making
lookups fast, which comes at the expense of convoluted, non-wait-free
insertions/removals).
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-26 22:58:05 +02:00
|
|
|
size_t region_idx;
|
|
|
|
|
|
|
|
if (p < region.start_aligned) {
|
|
|
|
region_idx = 0;
|
|
|
|
} else {
|
|
|
|
ptrdiff_t offset = p - region.start_aligned;
|
|
|
|
|
|
|
|
if (offset > region.stride * (region.n - 1)) {
|
|
|
|
region_idx = region.n - 1;
|
|
|
|
} else {
|
|
|
|
region_idx = offset / region.stride;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return region_trees + region_idx * tree_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
void tcg_tb_insert(TranslationBlock *tb)
|
|
|
|
{
|
|
|
|
struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
|
|
|
|
|
|
|
|
qemu_mutex_lock(&rt->lock);
|
|
|
|
g_tree_insert(rt->tree, &tb->tc, tb);
|
|
|
|
qemu_mutex_unlock(&rt->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void tcg_tb_remove(TranslationBlock *tb)
|
|
|
|
{
|
|
|
|
struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
|
|
|
|
|
|
|
|
qemu_mutex_lock(&rt->lock);
|
|
|
|
g_tree_remove(rt->tree, &tb->tc);
|
|
|
|
qemu_mutex_unlock(&rt->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the TB 'tb' such that
|
|
|
|
* tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size
|
|
|
|
* Return NULL if not found.
|
|
|
|
*/
|
|
|
|
TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr)
|
|
|
|
{
|
|
|
|
struct tcg_region_tree *rt = tc_ptr_to_region_tree((void *)tc_ptr);
|
|
|
|
TranslationBlock *tb;
|
|
|
|
struct tb_tc s = { .ptr = (void *)tc_ptr };
|
|
|
|
|
|
|
|
qemu_mutex_lock(&rt->lock);
|
|
|
|
tb = g_tree_lookup(rt->tree, &s);
|
|
|
|
qemu_mutex_unlock(&rt->lock);
|
|
|
|
return tb;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_region_tree_lock_all(void)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < region.n; i++) {
|
|
|
|
struct tcg_region_tree *rt = region_trees + i * tree_size;
|
|
|
|
|
|
|
|
qemu_mutex_lock(&rt->lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_region_tree_unlock_all(void)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < region.n; i++) {
|
|
|
|
struct tcg_region_tree *rt = region_trees + i * tree_size;
|
|
|
|
|
|
|
|
qemu_mutex_unlock(&rt->lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void tcg_tb_foreach(GTraverseFunc func, gpointer user_data)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
tcg_region_tree_lock_all();
|
|
|
|
for (i = 0; i < region.n; i++) {
|
|
|
|
struct tcg_region_tree *rt = region_trees + i * tree_size;
|
|
|
|
|
|
|
|
g_tree_foreach(rt->tree, func, user_data);
|
|
|
|
}
|
|
|
|
tcg_region_tree_unlock_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t tcg_nb_tbs(void)
|
|
|
|
{
|
|
|
|
size_t nb_tbs = 0;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
tcg_region_tree_lock_all();
|
|
|
|
for (i = 0; i < region.n; i++) {
|
|
|
|
struct tcg_region_tree *rt = region_trees + i * tree_size;
|
|
|
|
|
|
|
|
nb_tbs += g_tree_nnodes(rt->tree);
|
|
|
|
}
|
|
|
|
tcg_region_tree_unlock_all();
|
|
|
|
return nb_tbs;
|
|
|
|
}
|
|
|
|
|
2020-06-12 21:02:28 +02:00
|
|
|
static gboolean tcg_region_tree_traverse(gpointer k, gpointer v, gpointer data)
|
|
|
|
{
|
|
|
|
TranslationBlock *tb = v;
|
|
|
|
|
|
|
|
tb_destroy(tb);
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
tcg: track TBs with per-region BST's
This paves the way for enabling scalable parallel generation of TCG code.
Instead of tracking TBs with a single binary search tree (BST), use a
BST for each TCG region, protecting it with a lock. This is as scalable
as it gets, since each TCG thread operates on a separate region.
The core of this change is the introduction of struct tcg_region_tree,
which contains a pointer to a GTree and an associated lock to serialize
accesses to it. We then allocate an array of tcg_region_tree's, adding
the appropriate padding to avoid false sharing based on
qemu_dcache_linesize.
Given a tc_ptr, we first find the corresponding region_tree. This
is done by special-casing the first and last regions first, since they
might be of size != region.size; otherwise we just divide the offset
by region.stride. I was worried about this division (several dozen
cycles of latency), but profiling shows that this is not a fast path.
Note that region.stride is not required to be a power of two; it
is only required to be a multiple of the host's page size.
Note that with this design we can also provide consistent snapshots
about all region trees at once; for instance, tcg_tb_foreach
acquires/releases all region_tree locks before/after iterating over them.
For this reason we now drop tb_lock in dump_exec_info().
As an alternative I considered implementing a concurrent BST, but this
can be tricky to get right, offers no consistent snapshots of the BST,
and performance and scalability-wise I don't think it could ever beat
having separate GTrees, given that our workload is insert-mostly (all
concurrent BST designs I've seen focus, understandably, on making
lookups fast, which comes at the expense of convoluted, non-wait-free
insertions/removals).
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-26 22:58:05 +02:00
|
|
|
static void tcg_region_tree_reset_all(void)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
tcg_region_tree_lock_all();
|
|
|
|
for (i = 0; i < region.n; i++) {
|
|
|
|
struct tcg_region_tree *rt = region_trees + i * tree_size;
|
|
|
|
|
2020-06-12 21:02:28 +02:00
|
|
|
g_tree_foreach(rt->tree, tcg_region_tree_traverse, NULL);
|
tcg: track TBs with per-region BST's
This paves the way for enabling scalable parallel generation of TCG code.
Instead of tracking TBs with a single binary search tree (BST), use a
BST for each TCG region, protecting it with a lock. This is as scalable
as it gets, since each TCG thread operates on a separate region.
The core of this change is the introduction of struct tcg_region_tree,
which contains a pointer to a GTree and an associated lock to serialize
accesses to it. We then allocate an array of tcg_region_tree's, adding
the appropriate padding to avoid false sharing based on
qemu_dcache_linesize.
Given a tc_ptr, we first find the corresponding region_tree. This
is done by special-casing the first and last regions first, since they
might be of size != region.size; otherwise we just divide the offset
by region.stride. I was worried about this division (several dozen
cycles of latency), but profiling shows that this is not a fast path.
Note that region.stride is not required to be a power of two; it
is only required to be a multiple of the host's page size.
Note that with this design we can also provide consistent snapshots
about all region trees at once; for instance, tcg_tb_foreach
acquires/releases all region_tree locks before/after iterating over them.
For this reason we now drop tb_lock in dump_exec_info().
As an alternative I considered implementing a concurrent BST, but this
can be tricky to get right, offers no consistent snapshots of the BST,
and performance and scalability-wise I don't think it could ever beat
having separate GTrees, given that our workload is insert-mostly (all
concurrent BST designs I've seen focus, understandably, on making
lookups fast, which comes at the expense of convoluted, non-wait-free
insertions/removals).
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-26 22:58:05 +02:00
|
|
|
/* Increment the refcount first so that destroy acts as a reset */
|
|
|
|
g_tree_ref(rt->tree);
|
|
|
|
g_tree_destroy(rt->tree);
|
|
|
|
}
|
|
|
|
tcg_region_tree_unlock_all();
|
|
|
|
}
|
|
|
|
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 01:24:20 +02:00
|
|
|
static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend)
|
|
|
|
{
|
|
|
|
void *start, *end;
|
|
|
|
|
|
|
|
start = region.start_aligned + curr_region * region.stride;
|
|
|
|
end = start + region.size;
|
|
|
|
|
|
|
|
if (curr_region == 0) {
|
|
|
|
start = region.start;
|
|
|
|
}
|
|
|
|
if (curr_region == region.n - 1) {
|
|
|
|
end = region.end;
|
|
|
|
}
|
|
|
|
|
|
|
|
*pstart = start;
|
|
|
|
*pend = end;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_region_assign(TCGContext *s, size_t curr_region)
|
|
|
|
{
|
|
|
|
void *start, *end;
|
|
|
|
|
|
|
|
tcg_region_bounds(curr_region, &start, &end);
|
|
|
|
|
|
|
|
s->code_gen_buffer = start;
|
|
|
|
s->code_gen_ptr = start;
|
|
|
|
s->code_gen_buffer_size = end - start;
|
|
|
|
s->code_gen_highwater = end - TCG_HIGHWATER;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool tcg_region_alloc__locked(TCGContext *s)
|
|
|
|
{
|
|
|
|
if (region.current == region.n) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
tcg_region_assign(s, region.current);
|
|
|
|
region.current++;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Request a new region once the one in use has filled up.
|
|
|
|
* Returns true on error.
|
|
|
|
*/
|
|
|
|
static bool tcg_region_alloc(TCGContext *s)
|
|
|
|
{
|
|
|
|
bool err;
|
|
|
|
/* read the region size now; alloc__locked will overwrite it on success */
|
|
|
|
size_t size_full = s->code_gen_buffer_size;
|
|
|
|
|
|
|
|
qemu_mutex_lock(®ion.lock);
|
|
|
|
err = tcg_region_alloc__locked(s);
|
|
|
|
if (!err) {
|
|
|
|
region.agg_size_full += size_full - TCG_HIGHWATER;
|
|
|
|
}
|
|
|
|
qemu_mutex_unlock(®ion.lock);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Perform a context's first region allocation.
|
|
|
|
* This function does _not_ increment region.agg_size_full.
|
|
|
|
*/
|
|
|
|
static inline bool tcg_region_initial_alloc__locked(TCGContext *s)
|
|
|
|
{
|
|
|
|
return tcg_region_alloc__locked(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Call from a safe-work context */
|
|
|
|
void tcg_region_reset_all(void)
|
|
|
|
{
|
2020-09-23 12:56:46 +02:00
|
|
|
unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 01:24:20 +02:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
qemu_mutex_lock(®ion.lock);
|
|
|
|
region.current = 0;
|
|
|
|
region.agg_size_full = 0;
|
|
|
|
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
for (i = 0; i < n_ctxs; i++) {
|
2020-09-23 12:56:46 +02:00
|
|
|
TCGContext *s = qatomic_read(&tcg_ctxs[i]);
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
bool err = tcg_region_initial_alloc__locked(s);
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 01:24:20 +02:00
|
|
|
|
|
|
|
g_assert(!err);
|
|
|
|
}
|
|
|
|
qemu_mutex_unlock(®ion.lock);
|
tcg: track TBs with per-region BST's
This paves the way for enabling scalable parallel generation of TCG code.
Instead of tracking TBs with a single binary search tree (BST), use a
BST for each TCG region, protecting it with a lock. This is as scalable
as it gets, since each TCG thread operates on a separate region.
The core of this change is the introduction of struct tcg_region_tree,
which contains a pointer to a GTree and an associated lock to serialize
accesses to it. We then allocate an array of tcg_region_tree's, adding
the appropriate padding to avoid false sharing based on
qemu_dcache_linesize.
Given a tc_ptr, we first find the corresponding region_tree. This
is done by special-casing the first and last regions first, since they
might be of size != region.size; otherwise we just divide the offset
by region.stride. I was worried about this division (several dozen
cycles of latency), but profiling shows that this is not a fast path.
Note that region.stride is not required to be a power of two; it
is only required to be a multiple of the host's page size.
Note that with this design we can also provide consistent snapshots
about all region trees at once; for instance, tcg_tb_foreach
acquires/releases all region_tree locks before/after iterating over them.
For this reason we now drop tb_lock in dump_exec_info().
As an alternative I considered implementing a concurrent BST, but this
can be tricky to get right, offers no consistent snapshots of the BST,
and performance and scalability-wise I don't think it could ever beat
having separate GTrees, given that our workload is insert-mostly (all
concurrent BST designs I've seen focus, understandably, on making
lookups fast, which comes at the expense of convoluted, non-wait-free
insertions/removals).
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-26 22:58:05 +02:00
|
|
|
|
|
|
|
tcg_region_tree_reset_all();
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 01:24:20 +02:00
|
|
|
}
|
|
|
|
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
static size_t tcg_n_regions(void)
|
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* It is likely that some vCPUs will translate more code than others, so we
|
|
|
|
* first try to set more regions than max_cpus, with those regions being of
|
|
|
|
* reasonable size. If that's not possible we make do by evenly dividing
|
|
|
|
* the code_gen_buffer among the vCPUs.
|
|
|
|
*/
|
|
|
|
static size_t tcg_n_regions(void)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
/* Use a single region if all we have is one vCPU thread */
|
2019-05-18 22:54:21 +02:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
|
|
MachineState *ms = MACHINE(qdev_get_machine());
|
|
|
|
unsigned int max_cpus = ms->smp.max_cpus;
|
|
|
|
#endif
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Try to have more regions than max_cpus, with each region being >= 2 MB */
|
|
|
|
for (i = 8; i > 0; i--) {
|
|
|
|
size_t regions_per_thread = i;
|
|
|
|
size_t region_size;
|
|
|
|
|
|
|
|
region_size = tcg_init_ctx.code_gen_buffer_size;
|
|
|
|
region_size /= max_cpus * regions_per_thread;
|
|
|
|
|
|
|
|
if (region_size >= 2 * 1024u * 1024) {
|
|
|
|
return max_cpus * regions_per_thread;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* If we can't, then just allocate one region per vCPU thread */
|
|
|
|
return max_cpus;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 01:24:20 +02:00
|
|
|
/*
|
|
|
|
* Initializes region partitioning.
|
|
|
|
*
|
|
|
|
* Called at init time from the parent thread (i.e. the one calling
|
|
|
|
* tcg_context_init), after the target's TCG globals have been set.
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
*
|
|
|
|
* Region partitioning works by splitting code_gen_buffer into separate regions,
|
|
|
|
* and then assigning regions to TCG threads so that the threads can translate
|
|
|
|
* code in parallel without synchronization.
|
|
|
|
*
|
|
|
|
* In softmmu the number of TCG threads is bounded by max_cpus, so we use at
|
|
|
|
* least max_cpus regions in MTTCG. In !MTTCG we use a single region.
|
|
|
|
* Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...])
|
|
|
|
* must have been parsed before calling this function, since it calls
|
|
|
|
* qemu_tcg_mttcg_enabled().
|
|
|
|
*
|
|
|
|
* In user-mode we use a single region. Having multiple regions in user-mode
|
|
|
|
* is not supported, because the number of vCPU threads (recall that each thread
|
|
|
|
* spawned by the guest corresponds to a vCPU thread) is only bounded by the
|
|
|
|
* OS, and usually this number is huge (tens of thousands is not uncommon).
|
|
|
|
* Thus, given this large bound on the number of vCPU threads and the fact
|
|
|
|
* that code_gen_buffer is allocated at compile-time, we cannot guarantee
|
|
|
|
* that the availability of at least one region per vCPU thread.
|
|
|
|
*
|
|
|
|
* However, this user-mode limitation is unlikely to be a significant problem
|
|
|
|
* in practice. Multi-threaded guests share most if not all of their translated
|
|
|
|
* code, which makes parallel code generation less appealing than in softmmu.
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 01:24:20 +02:00
|
|
|
*/
|
|
|
|
void tcg_region_init(void)
|
|
|
|
{
|
|
|
|
void *buf = tcg_init_ctx.code_gen_buffer;
|
|
|
|
void *aligned;
|
|
|
|
size_t size = tcg_init_ctx.code_gen_buffer_size;
|
|
|
|
size_t page_size = qemu_real_host_page_size;
|
|
|
|
size_t region_size;
|
|
|
|
size_t n_regions;
|
|
|
|
size_t i;
|
2020-10-28 20:05:44 +01:00
|
|
|
uintptr_t splitwx_diff;
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 01:24:20 +02:00
|
|
|
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
n_regions = tcg_n_regions();
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 01:24:20 +02:00
|
|
|
|
|
|
|
/* The first region will be 'aligned - buf' bytes larger than the others */
|
|
|
|
aligned = QEMU_ALIGN_PTR_UP(buf, page_size);
|
|
|
|
g_assert(aligned < tcg_init_ctx.code_gen_buffer + size);
|
|
|
|
/*
|
|
|
|
* Make region_size a multiple of page_size, using aligned as the start.
|
|
|
|
* As a result of this we might end up with a few extra pages at the end of
|
|
|
|
* the buffer; we will assign those to the last region.
|
|
|
|
*/
|
|
|
|
region_size = (size - (aligned - buf)) / n_regions;
|
|
|
|
region_size = QEMU_ALIGN_DOWN(region_size, page_size);
|
|
|
|
|
|
|
|
/* A region must have at least 2 pages; one code, one guard */
|
|
|
|
g_assert(region_size >= 2 * page_size);
|
|
|
|
|
|
|
|
/* init the region struct */
|
|
|
|
qemu_mutex_init(®ion.lock);
|
|
|
|
region.n = n_regions;
|
|
|
|
region.size = region_size - page_size;
|
|
|
|
region.stride = region_size;
|
|
|
|
region.start = buf;
|
|
|
|
region.start_aligned = aligned;
|
|
|
|
/* page-align the end, since its last page will be a guard page */
|
|
|
|
region.end = QEMU_ALIGN_PTR_DOWN(buf + size, page_size);
|
|
|
|
/* account for that last guard page */
|
|
|
|
region.end -= page_size;
|
|
|
|
|
|
|
|
/* set guard pages */
|
2020-10-28 20:05:44 +01:00
|
|
|
splitwx_diff = tcg_splitwx_diff;
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 01:24:20 +02:00
|
|
|
for (i = 0; i < region.n; i++) {
|
|
|
|
void *start, *end;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
tcg_region_bounds(i, &start, &end);
|
|
|
|
rc = qemu_mprotect_none(end, page_size);
|
|
|
|
g_assert(!rc);
|
2020-10-28 20:05:44 +01:00
|
|
|
if (splitwx_diff) {
|
|
|
|
rc = qemu_mprotect_none(end + splitwx_diff, page_size);
|
|
|
|
g_assert(!rc);
|
|
|
|
}
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 01:24:20 +02:00
|
|
|
}
|
|
|
|
|
tcg: track TBs with per-region BST's
This paves the way for enabling scalable parallel generation of TCG code.
Instead of tracking TBs with a single binary search tree (BST), use a
BST for each TCG region, protecting it with a lock. This is as scalable
as it gets, since each TCG thread operates on a separate region.
The core of this change is the introduction of struct tcg_region_tree,
which contains a pointer to a GTree and an associated lock to serialize
accesses to it. We then allocate an array of tcg_region_tree's, adding
the appropriate padding to avoid false sharing based on
qemu_dcache_linesize.
Given a tc_ptr, we first find the corresponding region_tree. This
is done by special-casing the first and last regions first, since they
might be of size != region.size; otherwise we just divide the offset
by region.stride. I was worried about this division (several dozen
cycles of latency), but profiling shows that this is not a fast path.
Note that region.stride is not required to be a power of two; it
is only required to be a multiple of the host's page size.
Note that with this design we can also provide consistent snapshots
about all region trees at once; for instance, tcg_tb_foreach
acquires/releases all region_tree locks before/after iterating over them.
For this reason we now drop tb_lock in dump_exec_info().
As an alternative I considered implementing a concurrent BST, but this
can be tricky to get right, offers no consistent snapshots of the BST,
and performance and scalability-wise I don't think it could ever beat
having separate GTrees, given that our workload is insert-mostly (all
concurrent BST designs I've seen focus, understandably, on making
lookups fast, which comes at the expense of convoluted, non-wait-free
insertions/removals).
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-26 22:58:05 +02:00
|
|
|
tcg_region_trees_init();
|
|
|
|
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
/* In user-mode we support only one ctx, so do the initial allocation now */
|
|
|
|
#ifdef CONFIG_USER_ONLY
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 01:24:20 +02:00
|
|
|
{
|
|
|
|
bool err = tcg_region_initial_alloc__locked(tcg_ctx);
|
|
|
|
|
|
|
|
g_assert(!err);
|
|
|
|
}
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-10-28 20:05:44 +01:00
|
|
|
#ifdef CONFIG_DEBUG_TCG
|
|
|
|
const void *tcg_splitwx_to_rx(void *rw)
|
|
|
|
{
|
|
|
|
/* Pass NULL pointers unchanged. */
|
|
|
|
if (rw) {
|
|
|
|
g_assert(in_code_gen_buffer(rw));
|
|
|
|
rw += tcg_splitwx_diff;
|
|
|
|
}
|
|
|
|
return rw;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *tcg_splitwx_to_rw(const void *rx)
|
|
|
|
{
|
|
|
|
/* Pass NULL pointers unchanged. */
|
|
|
|
if (rx) {
|
|
|
|
rx -= tcg_splitwx_diff;
|
|
|
|
/* Assert that we end with a pointer in the rw region. */
|
|
|
|
g_assert(in_code_gen_buffer(rx));
|
|
|
|
}
|
|
|
|
return (void *)rx;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_DEBUG_TCG */
|
|
|
|
|
plugin-gen: add module for TCG-related code
We first inject empty instrumentation from translator_loop.
After translation, we go through the plugins to see what
they want to register for, filling in the empty instrumentation.
If if turns out that some instrumentation remains unused, we
remove it.
This approach supports the following features:
- Inlining TCG code for simple operations. Note that we do not
export TCG ops to plugins. Instead, we give them a C API to
insert inlined ops. So far we only support adding an immediate
to a u64, e.g. to count events.
- "Direct" callbacks. These are callbacks that do not go via
a helper. Instead, the helper is defined at run-time, so that
the plugin code is directly called from TCG. This makes direct
callbacks as efficient as possible; they are therefore used
for very frequent events, e.g. memory callbacks.
- Passing the host address to memory callbacks. Most of this
is implemented in a later patch though.
- Instrumentation of memory accesses performed from helpers.
See the corresponding comment, as well as a later patch.
Signed-off-by: Emilio G. Cota <cota@braap.org>
[AJB: add alloc_tcg_plugin_context, use glib, rm hwaddr]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2018-12-07 21:33:56 +01:00
|
|
|
static void alloc_tcg_plugin_context(TCGContext *s)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_PLUGIN
|
|
|
|
s->plugin_tb = g_new0(struct qemu_plugin_tb, 1);
|
|
|
|
s->plugin_tb->insns =
|
|
|
|
g_ptr_array_new_with_free_func(qemu_plugin_insn_cleanup_fn);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
/*
|
|
|
|
* All TCG threads except the parent (i.e. the one that called tcg_context_init
|
|
|
|
* and registered the target's TCG globals) must register with this function
|
|
|
|
* before initiating translation.
|
|
|
|
*
|
|
|
|
* In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
|
|
|
|
* of tcg_region_init() for the reasoning behind this.
|
|
|
|
*
|
|
|
|
* In softmmu each caller registers its context in tcg_ctxs[]. Note that in
|
|
|
|
* softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context
|
|
|
|
* is not used anymore for translation once this function is called.
|
|
|
|
*
|
|
|
|
* Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates
|
|
|
|
* over the array (e.g. tcg_code_size() the same for both softmmu and user-mode.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
void tcg_register_thread(void)
|
|
|
|
{
|
|
|
|
tcg_ctx = &tcg_init_ctx;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
void tcg_register_thread(void)
|
|
|
|
{
|
2019-05-18 22:54:21 +02:00
|
|
|
MachineState *ms = MACHINE(qdev_get_machine());
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
TCGContext *s = g_malloc(sizeof(*s));
|
|
|
|
unsigned int i, n;
|
|
|
|
bool err;
|
|
|
|
|
|
|
|
*s = tcg_init_ctx;
|
|
|
|
|
|
|
|
/* Relink mem_base. */
|
|
|
|
for (i = 0, n = tcg_init_ctx.nb_globals; i < n; ++i) {
|
|
|
|
if (tcg_init_ctx.temps[i].mem_base) {
|
|
|
|
ptrdiff_t b = tcg_init_ctx.temps[i].mem_base - tcg_init_ctx.temps;
|
|
|
|
tcg_debug_assert(b >= 0 && b < n);
|
|
|
|
s->temps[i].mem_base = &s->temps[b];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Claim an entry in tcg_ctxs */
|
2020-09-23 12:56:46 +02:00
|
|
|
n = qatomic_fetch_inc(&n_tcg_ctxs);
|
2019-05-18 22:54:21 +02:00
|
|
|
g_assert(n < ms->smp.max_cpus);
|
2020-09-23 12:56:46 +02:00
|
|
|
qatomic_set(&tcg_ctxs[n], s);
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
|
plugin-gen: add module for TCG-related code
We first inject empty instrumentation from translator_loop.
After translation, we go through the plugins to see what
they want to register for, filling in the empty instrumentation.
If if turns out that some instrumentation remains unused, we
remove it.
This approach supports the following features:
- Inlining TCG code for simple operations. Note that we do not
export TCG ops to plugins. Instead, we give them a C API to
insert inlined ops. So far we only support adding an immediate
to a u64, e.g. to count events.
- "Direct" callbacks. These are callbacks that do not go via
a helper. Instead, the helper is defined at run-time, so that
the plugin code is directly called from TCG. This makes direct
callbacks as efficient as possible; they are therefore used
for very frequent events, e.g. memory callbacks.
- Passing the host address to memory callbacks. Most of this
is implemented in a later patch though.
- Instrumentation of memory accesses performed from helpers.
See the corresponding comment, as well as a later patch.
Signed-off-by: Emilio G. Cota <cota@braap.org>
[AJB: add alloc_tcg_plugin_context, use glib, rm hwaddr]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2018-12-07 21:33:56 +01:00
|
|
|
if (n > 0) {
|
|
|
|
alloc_tcg_plugin_context(s);
|
|
|
|
}
|
|
|
|
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
tcg_ctx = s;
|
|
|
|
qemu_mutex_lock(®ion.lock);
|
|
|
|
err = tcg_region_initial_alloc__locked(tcg_ctx);
|
|
|
|
g_assert(!err);
|
|
|
|
qemu_mutex_unlock(®ion.lock);
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 01:24:20 +02:00
|
|
|
}
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
#endif /* !CONFIG_USER_ONLY */
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 01:24:20 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns the size (in bytes) of all translated code (i.e. from all regions)
|
|
|
|
* currently in the cache.
|
|
|
|
* See also: tcg_code_capacity()
|
|
|
|
* Do not confuse with tcg_current_code_size(); that one applies to a single
|
|
|
|
* TCG context.
|
|
|
|
*/
|
|
|
|
size_t tcg_code_size(void)
|
|
|
|
{
|
2020-09-23 12:56:46 +02:00
|
|
|
unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 01:24:20 +02:00
|
|
|
unsigned int i;
|
|
|
|
size_t total;
|
|
|
|
|
|
|
|
qemu_mutex_lock(®ion.lock);
|
|
|
|
total = region.agg_size_full;
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
for (i = 0; i < n_ctxs; i++) {
|
2020-09-23 12:56:46 +02:00
|
|
|
const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 01:24:20 +02:00
|
|
|
size_t size;
|
|
|
|
|
2020-09-23 12:56:46 +02:00
|
|
|
size = qatomic_read(&s->code_gen_ptr) - s->code_gen_buffer;
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 01:24:20 +02:00
|
|
|
g_assert(size <= s->code_gen_buffer_size);
|
|
|
|
total += size;
|
|
|
|
}
|
|
|
|
qemu_mutex_unlock(®ion.lock);
|
|
|
|
return total;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns the code capacity (in bytes) of the entire cache, i.e. including all
|
|
|
|
* regions.
|
|
|
|
* See also: tcg_code_size()
|
|
|
|
*/
|
|
|
|
size_t tcg_code_capacity(void)
|
|
|
|
{
|
|
|
|
size_t guard_size, capacity;
|
|
|
|
|
|
|
|
/* no need for synchronization; these variables are set at init time */
|
|
|
|
guard_size = region.stride - region.size;
|
|
|
|
capacity = region.end + guard_size - region.start;
|
|
|
|
capacity -= region.n * (guard_size + TCG_HIGHWATER);
|
|
|
|
return capacity;
|
|
|
|
}
|
|
|
|
|
2017-08-01 21:11:12 +02:00
|
|
|
size_t tcg_tb_phys_invalidate_count(void)
|
|
|
|
{
|
2020-09-23 12:56:46 +02:00
|
|
|
unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
|
2017-08-01 21:11:12 +02:00
|
|
|
unsigned int i;
|
|
|
|
size_t total = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < n_ctxs; i++) {
|
2020-09-23 12:56:46 +02:00
|
|
|
const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
|
2017-08-01 21:11:12 +02:00
|
|
|
|
2020-09-23 12:56:46 +02:00
|
|
|
total += qatomic_read(&s->tb_phys_invalidate_count);
|
2017-08-01 21:11:12 +02:00
|
|
|
}
|
|
|
|
return total;
|
|
|
|
}
|
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
/* pool based memory allocation */
|
|
|
|
void *tcg_malloc_internal(TCGContext *s, int size)
|
|
|
|
{
|
|
|
|
TCGPool *p;
|
|
|
|
int pool_size;
|
|
|
|
|
|
|
|
if (size > TCG_POOL_CHUNK_SIZE) {
|
|
|
|
/* big malloc: insert a new pool (XXX: could optimize) */
|
2011-08-21 05:09:37 +02:00
|
|
|
p = g_malloc(sizeof(TCGPool) + size);
|
2008-02-01 11:05:41 +01:00
|
|
|
p->size = size;
|
2012-03-02 10:22:17 +01:00
|
|
|
p->next = s->pool_first_large;
|
|
|
|
s->pool_first_large = p;
|
|
|
|
return p->data;
|
2008-02-01 11:05:41 +01:00
|
|
|
} else {
|
|
|
|
p = s->pool_current;
|
|
|
|
if (!p) {
|
|
|
|
p = s->pool_first;
|
|
|
|
if (!p)
|
|
|
|
goto new_pool;
|
|
|
|
} else {
|
|
|
|
if (!p->next) {
|
|
|
|
new_pool:
|
|
|
|
pool_size = TCG_POOL_CHUNK_SIZE;
|
2011-08-21 05:09:37 +02:00
|
|
|
p = g_malloc(sizeof(TCGPool) + pool_size);
|
2008-02-01 11:05:41 +01:00
|
|
|
p->size = pool_size;
|
|
|
|
p->next = NULL;
|
|
|
|
if (s->pool_current)
|
|
|
|
s->pool_current->next = p;
|
|
|
|
else
|
|
|
|
s->pool_first = p;
|
|
|
|
} else {
|
|
|
|
p = p->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s->pool_current = p;
|
|
|
|
s->pool_cur = p->data + size;
|
|
|
|
s->pool_end = p->data + p->size;
|
|
|
|
return p->data;
|
|
|
|
}
|
|
|
|
|
|
|
|
void tcg_pool_reset(TCGContext *s)
|
|
|
|
{
|
2012-03-02 10:22:17 +01:00
|
|
|
TCGPool *p, *t;
|
|
|
|
for (p = s->pool_first_large; p; p = t) {
|
|
|
|
t = p->next;
|
|
|
|
g_free(p);
|
|
|
|
}
|
|
|
|
s->pool_first_large = NULL;
|
2008-02-01 11:05:41 +01:00
|
|
|
s->pool_cur = s->pool_end = NULL;
|
|
|
|
s->pool_current = NULL;
|
|
|
|
}
|
|
|
|
|
2013-09-15 00:57:22 +02:00
|
|
|
typedef struct TCGHelperInfo {
|
|
|
|
void *func;
|
|
|
|
const char *name;
|
2014-04-08 00:10:05 +02:00
|
|
|
unsigned flags;
|
|
|
|
unsigned sizemask;
|
2013-09-15 00:57:22 +02:00
|
|
|
} TCGHelperInfo;
|
|
|
|
|
2014-04-08 07:31:41 +02:00
|
|
|
#include "exec/helper-proto.h"
|
|
|
|
|
2013-09-15 00:57:22 +02:00
|
|
|
static const TCGHelperInfo all_helpers[] = {
|
2014-04-08 07:31:41 +02:00
|
|
|
#include "exec/helper-tcg.h"
|
2013-09-15 00:57:22 +02:00
|
|
|
};
|
2017-07-06 00:41:23 +02:00
|
|
|
static GHashTable *helper_table;
|
2013-09-15 00:57:22 +02:00
|
|
|
|
2015-08-19 08:23:08 +02:00
|
|
|
static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
|
2016-11-18 09:31:40 +01:00
|
|
|
static void process_op_defs(TCGContext *s);
|
2017-10-10 23:34:37 +02:00
|
|
|
static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
|
|
|
|
TCGReg reg, const char *name);
|
2015-08-19 08:23:08 +02:00
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
void tcg_context_init(TCGContext *s)
|
|
|
|
{
|
2013-09-15 00:57:22 +02:00
|
|
|
int op, total_args, n, i;
|
2008-02-01 11:05:41 +01:00
|
|
|
TCGOpDef *def;
|
|
|
|
TCGArgConstraint *args_ct;
|
2017-10-10 23:34:37 +02:00
|
|
|
TCGTemp *ts;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
|
|
|
memset(s, 0, sizeof(*s));
|
|
|
|
s->nb_globals = 0;
|
2016-06-24 05:34:22 +02:00
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
/* Count total number of arguments and allocate the corresponding
|
|
|
|
space */
|
|
|
|
total_args = 0;
|
|
|
|
for(op = 0; op < NB_OPS; op++) {
|
|
|
|
def = &tcg_op_defs[op];
|
|
|
|
n = def->nb_iargs + def->nb_oargs;
|
|
|
|
total_args += n;
|
|
|
|
}
|
|
|
|
|
2019-04-05 04:34:19 +02:00
|
|
|
args_ct = g_new0(TCGArgConstraint, total_args);
|
2008-02-01 11:05:41 +01:00
|
|
|
|
|
|
|
for(op = 0; op < NB_OPS; op++) {
|
|
|
|
def = &tcg_op_defs[op];
|
|
|
|
def->args_ct = args_ct;
|
|
|
|
n = def->nb_iargs + def->nb_oargs;
|
|
|
|
args_ct += n;
|
|
|
|
}
|
2013-09-15 00:09:39 +02:00
|
|
|
|
|
|
|
/* Register helpers. */
|
2013-09-15 01:44:31 +02:00
|
|
|
/* Use g_direct_hash/equal for direct pointer comparisons on func. */
|
2017-07-06 00:41:23 +02:00
|
|
|
helper_table = g_hash_table_new(NULL, NULL);
|
2013-09-15 01:44:31 +02:00
|
|
|
|
2013-09-15 00:57:22 +02:00
|
|
|
for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
|
2013-09-15 01:44:31 +02:00
|
|
|
g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
|
2014-04-08 09:17:53 +02:00
|
|
|
(gpointer)&all_helpers[i]);
|
2013-09-15 00:57:22 +02:00
|
|
|
}
|
2013-09-15 00:09:39 +02:00
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
tcg_target_init(s);
|
2016-11-18 09:31:40 +01:00
|
|
|
process_op_defs(s);
|
2015-08-19 08:23:08 +02:00
|
|
|
|
|
|
|
/* Reverse the order of the saved registers, assuming they're all at
|
|
|
|
the start of tcg_target_reg_alloc_order. */
|
|
|
|
for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) {
|
|
|
|
int r = tcg_target_reg_alloc_order[n];
|
|
|
|
if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (i = 0; i < n; ++i) {
|
|
|
|
indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i];
|
|
|
|
}
|
|
|
|
for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) {
|
|
|
|
indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
|
|
|
|
}
|
2017-07-12 23:15:52 +02:00
|
|
|
|
plugin-gen: add module for TCG-related code
We first inject empty instrumentation from translator_loop.
After translation, we go through the plugins to see what
they want to register for, filling in the empty instrumentation.
If if turns out that some instrumentation remains unused, we
remove it.
This approach supports the following features:
- Inlining TCG code for simple operations. Note that we do not
export TCG ops to plugins. Instead, we give them a C API to
insert inlined ops. So far we only support adding an immediate
to a u64, e.g. to count events.
- "Direct" callbacks. These are callbacks that do not go via
a helper. Instead, the helper is defined at run-time, so that
the plugin code is directly called from TCG. This makes direct
callbacks as efficient as possible; they are therefore used
for very frequent events, e.g. memory callbacks.
- Passing the host address to memory callbacks. Most of this
is implemented in a later patch though.
- Instrumentation of memory accesses performed from helpers.
See the corresponding comment, as well as a later patch.
Signed-off-by: Emilio G. Cota <cota@braap.org>
[AJB: add alloc_tcg_plugin_context, use glib, rm hwaddr]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2018-12-07 21:33:56 +01:00
|
|
|
alloc_tcg_plugin_context(s);
|
|
|
|
|
2017-07-12 23:15:52 +02:00
|
|
|
tcg_ctx = s;
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
/*
|
|
|
|
* In user-mode we simply share the init context among threads, since we
|
|
|
|
* use a single region. See the documentation tcg_region_init() for the
|
|
|
|
* reasoning behind this.
|
|
|
|
* In softmmu we will have at most max_cpus TCG threads.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_USER_ONLY
|
2017-07-13 00:26:40 +02:00
|
|
|
tcg_ctxs = &tcg_ctx;
|
|
|
|
n_tcg_ctxs = 1;
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
#else
|
2019-05-18 22:54:21 +02:00
|
|
|
MachineState *ms = MACHINE(qdev_get_machine());
|
|
|
|
unsigned int max_cpus = ms->smp.max_cpus;
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
tcg_ctxs = g_new(TCGContext *, max_cpus);
|
|
|
|
#endif
|
2017-10-10 23:34:37 +02:00
|
|
|
|
|
|
|
tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0));
|
|
|
|
ts = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, TCG_AREG0, "env");
|
|
|
|
cpu_env = temp_tcgv_ptr(ts);
|
2010-05-06 17:50:41 +02:00
|
|
|
}
|
2008-05-10 12:52:05 +02:00
|
|
|
|
2017-06-07 01:12:25 +02:00
|
|
|
/*
|
|
|
|
* Allocate TBs right before their corresponding translated code, making
|
|
|
|
* sure that TBs and code are on different cache lines.
|
|
|
|
*/
|
|
|
|
TranslationBlock *tcg_tb_alloc(TCGContext *s)
|
|
|
|
{
|
|
|
|
uintptr_t align = qemu_icache_linesize;
|
|
|
|
TranslationBlock *tb;
|
|
|
|
void *next;
|
|
|
|
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 01:24:20 +02:00
|
|
|
retry:
|
2017-06-07 01:12:25 +02:00
|
|
|
tb = (void *)ROUND_UP((uintptr_t)s->code_gen_ptr, align);
|
|
|
|
next = (void *)ROUND_UP((uintptr_t)(tb + 1), align);
|
|
|
|
|
|
|
|
if (unlikely(next > s->code_gen_highwater)) {
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 01:24:20 +02:00
|
|
|
if (tcg_region_alloc(s)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
goto retry;
|
2017-06-07 01:12:25 +02:00
|
|
|
}
|
2020-09-23 12:56:46 +02:00
|
|
|
qatomic_set(&s->code_gen_ptr, next);
|
2017-07-30 22:13:21 +02:00
|
|
|
s->data_gen_ptr = NULL;
|
2017-06-07 01:12:25 +02:00
|
|
|
return tb;
|
|
|
|
}
|
|
|
|
|
2010-05-06 17:50:41 +02:00
|
|
|
void tcg_prologue_init(TCGContext *s)
|
|
|
|
{
|
2015-09-19 08:43:05 +02:00
|
|
|
size_t prologue_size, total_size;
|
|
|
|
void *buf0, *buf1;
|
|
|
|
|
|
|
|
/* Put the prologue at the beginning of code_gen_buffer. */
|
|
|
|
buf0 = s->code_gen_buffer;
|
2017-10-25 16:14:20 +02:00
|
|
|
total_size = s->code_gen_buffer_size;
|
2015-09-19 08:43:05 +02:00
|
|
|
s->code_ptr = buf0;
|
|
|
|
s->code_buf = buf0;
|
2017-10-25 16:14:20 +02:00
|
|
|
s->data_gen_ptr = NULL;
|
2020-10-28 22:11:54 +01:00
|
|
|
|
2020-10-28 20:05:44 +01:00
|
|
|
/*
|
|
|
|
* The region trees are not yet configured, but tcg_splitwx_to_rx
|
|
|
|
* needs the bounds for an assert.
|
|
|
|
*/
|
|
|
|
region.start = buf0;
|
|
|
|
region.end = buf0 + total_size;
|
|
|
|
|
2020-10-28 22:11:54 +01:00
|
|
|
#ifndef CONFIG_TCG_INTERPRETER
|
2020-10-28 20:05:44 +01:00
|
|
|
tcg_qemu_tb_exec = (tcg_prologue_fn *)tcg_splitwx_to_rx(buf0);
|
2020-10-28 22:11:54 +01:00
|
|
|
#endif
|
2015-09-19 08:43:05 +02:00
|
|
|
|
2017-10-25 16:14:20 +02:00
|
|
|
/* Compute a high-water mark, at which we voluntarily flush the buffer
|
|
|
|
and start over. The size here is arbitrary, significantly larger
|
|
|
|
than we expect the code generation for any one opcode to require. */
|
|
|
|
s->code_gen_highwater = s->code_gen_buffer + (total_size - TCG_HIGHWATER);
|
|
|
|
|
|
|
|
#ifdef TCG_TARGET_NEED_POOL_LABELS
|
|
|
|
s->pool_labels = NULL;
|
|
|
|
#endif
|
|
|
|
|
2015-09-19 08:43:05 +02:00
|
|
|
/* Generate the prologue. */
|
2008-05-10 12:52:05 +02:00
|
|
|
tcg_target_qemu_prologue(s);
|
2017-10-25 16:14:20 +02:00
|
|
|
|
|
|
|
#ifdef TCG_TARGET_NEED_POOL_LABELS
|
|
|
|
/* Allow the prologue to put e.g. guest_base into a pool entry. */
|
|
|
|
{
|
2019-04-21 22:51:56 +02:00
|
|
|
int result = tcg_out_pool_finalize(s);
|
|
|
|
tcg_debug_assert(result == 0);
|
2017-10-25 16:14:20 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-09-19 08:43:05 +02:00
|
|
|
buf1 = s->code_ptr;
|
2020-12-12 16:08:02 +01:00
|
|
|
#ifndef CONFIG_TCG_INTERPRETER
|
2020-10-28 20:05:44 +01:00
|
|
|
flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(buf0), (uintptr_t)buf0,
|
2020-12-12 17:38:21 +01:00
|
|
|
tcg_ptr_byte_diff(buf1, buf0));
|
2020-12-12 16:08:02 +01:00
|
|
|
#endif
|
2015-09-19 08:43:05 +02:00
|
|
|
|
|
|
|
/* Deduct the prologue from the buffer. */
|
|
|
|
prologue_size = tcg_current_code_size(s);
|
|
|
|
s->code_gen_ptr = buf1;
|
|
|
|
s->code_gen_buffer = buf1;
|
|
|
|
s->code_buf = buf1;
|
2017-10-25 16:14:20 +02:00
|
|
|
total_size -= prologue_size;
|
2015-09-19 08:43:05 +02:00
|
|
|
s->code_gen_buffer_size = total_size;
|
|
|
|
|
2020-10-29 17:17:30 +01:00
|
|
|
tcg_register_jit(tcg_splitwx_to_rx(s->code_gen_buffer), total_size);
|
2013-03-31 22:15:19 +02:00
|
|
|
|
|
|
|
#ifdef DEBUG_DISAS
|
|
|
|
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
|
2019-11-18 22:15:26 +01:00
|
|
|
FILE *logfile = qemu_log_lock();
|
2015-09-19 08:43:05 +02:00
|
|
|
qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
|
2017-10-25 16:14:20 +02:00
|
|
|
if (s->data_gen_ptr) {
|
|
|
|
size_t code_size = s->data_gen_ptr - buf0;
|
|
|
|
size_t data_size = prologue_size - code_size;
|
|
|
|
size_t i;
|
|
|
|
|
2020-09-10 21:15:04 +02:00
|
|
|
log_disas(buf0, code_size);
|
2017-10-25 16:14:20 +02:00
|
|
|
|
|
|
|
for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
|
|
|
|
if (sizeof(tcg_target_ulong) == 8) {
|
|
|
|
qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
|
|
|
|
(uintptr_t)s->data_gen_ptr + i,
|
|
|
|
*(uint64_t *)(s->data_gen_ptr + i));
|
|
|
|
} else {
|
|
|
|
qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n",
|
|
|
|
(uintptr_t)s->data_gen_ptr + i,
|
|
|
|
*(uint32_t *)(s->data_gen_ptr + i));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2020-09-10 21:15:04 +02:00
|
|
|
log_disas(buf0, prologue_size);
|
2017-10-25 16:14:20 +02:00
|
|
|
}
|
2013-03-31 22:15:19 +02:00
|
|
|
qemu_log("\n");
|
|
|
|
qemu_log_flush();
|
2019-11-18 22:15:26 +01:00
|
|
|
qemu_log_unlock(logfile);
|
2013-03-31 22:15:19 +02:00
|
|
|
}
|
|
|
|
#endif
|
2017-04-27 05:29:14 +02:00
|
|
|
|
|
|
|
/* Assert that goto_ptr is implemented completely. */
|
|
|
|
if (TCG_TARGET_HAS_goto_ptr) {
|
2020-10-28 22:48:55 +01:00
|
|
|
tcg_debug_assert(tcg_code_gen_epilogue != NULL);
|
2017-04-27 05:29:14 +02:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void tcg_func_start(TCGContext *s)
|
|
|
|
{
|
|
|
|
tcg_pool_reset(s);
|
|
|
|
s->nb_temps = s->nb_globals;
|
2013-09-19 21:16:45 +02:00
|
|
|
|
|
|
|
/* No temps have been previously allocated for size or locality. */
|
|
|
|
memset(s->free_temps, 0, sizeof(s->free_temps));
|
|
|
|
|
2018-05-08 21:18:59 +02:00
|
|
|
s->nb_ops = 0;
|
2008-02-01 11:05:41 +01:00
|
|
|
s->nb_labels = 0;
|
|
|
|
s->current_frame_offset = s->frame_start;
|
|
|
|
|
2012-09-22 02:18:16 +02:00
|
|
|
#ifdef CONFIG_DEBUG_TCG
|
|
|
|
s->goto_tb_issue_mask = 0;
|
|
|
|
#endif
|
|
|
|
|
2017-11-02 15:19:14 +01:00
|
|
|
QTAILQ_INIT(&s->ops);
|
|
|
|
QTAILQ_INIT(&s->free_ops);
|
2019-02-07 14:26:40 +01:00
|
|
|
QSIMPLEQ_INIT(&s->labels);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2013-09-19 17:46:21 +02:00
|
|
|
static inline TCGTemp *tcg_temp_alloc(TCGContext *s)
|
|
|
|
{
|
|
|
|
int n = s->nb_temps++;
|
|
|
|
tcg_debug_assert(n < TCG_MAX_TEMPS);
|
|
|
|
return memset(&s->temps[n], 0, sizeof(TCGTemp));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline TCGTemp *tcg_global_alloc(TCGContext *s)
|
|
|
|
{
|
2016-11-02 18:20:15 +01:00
|
|
|
TCGTemp *ts;
|
|
|
|
|
2013-09-19 17:46:21 +02:00
|
|
|
tcg_debug_assert(s->nb_globals == s->nb_temps);
|
|
|
|
s->nb_globals++;
|
2016-11-02 18:20:15 +01:00
|
|
|
ts = tcg_temp_alloc(s);
|
|
|
|
ts->temp_global = 1;
|
|
|
|
|
|
|
|
return ts;
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2017-10-20 09:05:45 +02:00
|
|
|
static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
|
|
|
|
TCGReg reg, const char *name)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
|
|
|
TCGTemp *ts;
|
|
|
|
|
2013-09-18 23:12:53 +02:00
|
|
|
if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) {
|
2008-02-01 11:05:41 +01:00
|
|
|
tcg_abort();
|
2013-09-18 23:12:53 +02:00
|
|
|
}
|
2013-09-19 17:46:21 +02:00
|
|
|
|
|
|
|
ts = tcg_global_alloc(s);
|
2008-02-01 11:05:41 +01:00
|
|
|
ts->base_type = type;
|
|
|
|
ts->type = type;
|
|
|
|
ts->fixed_reg = 1;
|
|
|
|
ts->reg = reg;
|
|
|
|
ts->name = name;
|
|
|
|
tcg_regset_set_reg(s->reserved_regs, reg);
|
2013-09-19 17:46:21 +02:00
|
|
|
|
2017-10-20 09:05:45 +02:00
|
|
|
return ts;
|
2008-11-17 15:43:54 +01:00
|
|
|
}
|
|
|
|
|
2013-09-18 23:54:45 +02:00
|
|
|
void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
|
2013-09-18 23:12:53 +02:00
|
|
|
{
|
|
|
|
s->frame_start = start;
|
|
|
|
s->frame_end = start + size;
|
2017-10-20 09:05:45 +02:00
|
|
|
s->frame_temp
|
|
|
|
= tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
|
2013-09-18 23:12:53 +02:00
|
|
|
}
|
|
|
|
|
2017-10-20 09:05:45 +02:00
|
|
|
TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
|
|
|
|
intptr_t offset, const char *name)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2017-07-12 23:15:52 +02:00
|
|
|
TCGContext *s = tcg_ctx;
|
2017-10-20 09:30:24 +02:00
|
|
|
TCGTemp *base_ts = tcgv_ptr_temp(base);
|
2013-09-19 17:46:21 +02:00
|
|
|
TCGTemp *ts = tcg_global_alloc(s);
|
2013-09-19 19:36:18 +02:00
|
|
|
int indirect_reg = 0, bigendian = 0;
|
2013-09-19 17:46:21 +02:00
|
|
|
#ifdef HOST_WORDS_BIGENDIAN
|
|
|
|
bigendian = 1;
|
|
|
|
#endif
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2013-09-19 19:36:18 +02:00
|
|
|
if (!base_ts->fixed_reg) {
|
2016-06-24 05:34:33 +02:00
|
|
|
/* We do not support double-indirect registers. */
|
|
|
|
tcg_debug_assert(!base_ts->indirect_reg);
|
2013-09-19 19:36:18 +02:00
|
|
|
base_ts->indirect_base = 1;
|
2016-06-24 05:34:33 +02:00
|
|
|
s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64
|
|
|
|
? 2 : 1);
|
|
|
|
indirect_reg = 1;
|
2013-09-19 19:36:18 +02:00
|
|
|
}
|
|
|
|
|
2013-09-19 17:46:21 +02:00
|
|
|
if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
|
|
|
|
TCGTemp *ts2 = tcg_global_alloc(s);
|
2008-02-01 11:05:41 +01:00
|
|
|
char buf[64];
|
2013-09-19 17:46:21 +02:00
|
|
|
|
|
|
|
ts->base_type = TCG_TYPE_I64;
|
2008-02-01 11:05:41 +01:00
|
|
|
ts->type = TCG_TYPE_I32;
|
2013-09-19 19:36:18 +02:00
|
|
|
ts->indirect_reg = indirect_reg;
|
2008-02-01 11:05:41 +01:00
|
|
|
ts->mem_allocated = 1;
|
2013-09-18 23:12:53 +02:00
|
|
|
ts->mem_base = base_ts;
|
2013-09-19 17:46:21 +02:00
|
|
|
ts->mem_offset = offset + bigendian * 4;
|
2008-02-01 11:05:41 +01:00
|
|
|
pstrcpy(buf, sizeof(buf), name);
|
|
|
|
pstrcat(buf, sizeof(buf), "_0");
|
|
|
|
ts->name = strdup(buf);
|
|
|
|
|
2013-09-19 17:46:21 +02:00
|
|
|
tcg_debug_assert(ts2 == ts + 1);
|
|
|
|
ts2->base_type = TCG_TYPE_I64;
|
|
|
|
ts2->type = TCG_TYPE_I32;
|
2013-09-19 19:36:18 +02:00
|
|
|
ts2->indirect_reg = indirect_reg;
|
2013-09-19 17:46:21 +02:00
|
|
|
ts2->mem_allocated = 1;
|
|
|
|
ts2->mem_base = base_ts;
|
|
|
|
ts2->mem_offset = offset + (1 - bigendian) * 4;
|
2008-02-01 11:05:41 +01:00
|
|
|
pstrcpy(buf, sizeof(buf), name);
|
|
|
|
pstrcat(buf, sizeof(buf), "_1");
|
2016-06-18 02:02:20 +02:00
|
|
|
ts2->name = strdup(buf);
|
2013-09-19 17:46:21 +02:00
|
|
|
} else {
|
2008-02-01 11:05:41 +01:00
|
|
|
ts->base_type = type;
|
|
|
|
ts->type = type;
|
2013-09-19 19:36:18 +02:00
|
|
|
ts->indirect_reg = indirect_reg;
|
2008-02-01 11:05:41 +01:00
|
|
|
ts->mem_allocated = 1;
|
2013-09-18 23:12:53 +02:00
|
|
|
ts->mem_base = base_ts;
|
2008-02-01 11:05:41 +01:00
|
|
|
ts->mem_offset = offset;
|
|
|
|
ts->name = name;
|
|
|
|
}
|
2017-10-20 09:05:45 +02:00
|
|
|
return ts;
|
2008-11-17 15:43:54 +01:00
|
|
|
}
|
|
|
|
|
2018-02-23 03:17:57 +01:00
|
|
|
TCGTemp *tcg_temp_new_internal(TCGType type, bool temp_local)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2017-07-12 23:15:52 +02:00
|
|
|
TCGContext *s = tcg_ctx;
|
2008-02-01 11:05:41 +01:00
|
|
|
TCGTemp *ts;
|
2008-05-25 19:24:00 +02:00
|
|
|
int idx, k;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2013-09-19 21:16:45 +02:00
|
|
|
k = type + (temp_local ? TCG_TYPE_COUNT : 0);
|
|
|
|
idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
|
|
|
|
if (idx < TCG_MAX_TEMPS) {
|
|
|
|
/* There is already an available temp with the right type. */
|
|
|
|
clear_bit(idx, s->free_temps[k].l);
|
|
|
|
|
2008-05-23 19:33:39 +02:00
|
|
|
ts = &s->temps[idx];
|
|
|
|
ts->temp_allocated = 1;
|
2013-09-19 17:46:21 +02:00
|
|
|
tcg_debug_assert(ts->base_type == type);
|
|
|
|
tcg_debug_assert(ts->temp_local == temp_local);
|
2008-05-23 19:33:39 +02:00
|
|
|
} else {
|
2013-09-19 17:46:21 +02:00
|
|
|
ts = tcg_temp_alloc(s);
|
|
|
|
if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
|
|
|
|
TCGTemp *ts2 = tcg_temp_alloc(s);
|
|
|
|
|
2014-01-21 17:36:38 +01:00
|
|
|
ts->base_type = type;
|
2008-05-23 19:33:39 +02:00
|
|
|
ts->type = TCG_TYPE_I32;
|
|
|
|
ts->temp_allocated = 1;
|
2008-05-25 19:24:00 +02:00
|
|
|
ts->temp_local = temp_local;
|
2013-09-19 17:46:21 +02:00
|
|
|
|
|
|
|
tcg_debug_assert(ts2 == ts + 1);
|
|
|
|
ts2->base_type = TCG_TYPE_I64;
|
|
|
|
ts2->type = TCG_TYPE_I32;
|
|
|
|
ts2->temp_allocated = 1;
|
|
|
|
ts2->temp_local = temp_local;
|
|
|
|
} else {
|
2008-05-23 19:33:39 +02:00
|
|
|
ts->base_type = type;
|
|
|
|
ts->type = type;
|
|
|
|
ts->temp_allocated = 1;
|
2008-05-25 19:24:00 +02:00
|
|
|
ts->temp_local = temp_local;
|
2008-05-23 19:33:39 +02:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2011-03-06 22:39:53 +01:00
|
|
|
|
|
|
|
#if defined(CONFIG_DEBUG_TCG)
|
|
|
|
s->temps_in_use++;
|
|
|
|
#endif
|
2017-10-20 09:05:45 +02:00
|
|
|
return ts;
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2017-09-14 22:53:46 +02:00
|
|
|
TCGv_vec tcg_temp_new_vec(TCGType type)
|
|
|
|
{
|
|
|
|
TCGTemp *t;
|
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_TCG
|
|
|
|
switch (type) {
|
|
|
|
case TCG_TYPE_V64:
|
|
|
|
assert(TCG_TARGET_HAS_v64);
|
|
|
|
break;
|
|
|
|
case TCG_TYPE_V128:
|
|
|
|
assert(TCG_TARGET_HAS_v128);
|
|
|
|
break;
|
|
|
|
case TCG_TYPE_V256:
|
|
|
|
assert(TCG_TARGET_HAS_v256);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
t = tcg_temp_new_internal(type, 0);
|
|
|
|
return temp_tcgv_vec(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create a new temp of the same type as an existing temp. */
|
|
|
|
TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match)
|
|
|
|
{
|
|
|
|
TCGTemp *t = tcgv_vec_temp(match);
|
|
|
|
|
|
|
|
tcg_debug_assert(t->temp_allocated != 0);
|
|
|
|
|
|
|
|
t = tcg_temp_new_internal(t->base_type, 0);
|
|
|
|
return temp_tcgv_vec(t);
|
|
|
|
}
|
|
|
|
|
2018-02-23 03:17:57 +01:00
|
|
|
void tcg_temp_free_internal(TCGTemp *ts)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2017-07-12 23:15:52 +02:00
|
|
|
TCGContext *s = tcg_ctx;
|
2017-10-20 09:05:45 +02:00
|
|
|
int k, idx;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2011-03-06 22:39:53 +01:00
|
|
|
#if defined(CONFIG_DEBUG_TCG)
|
|
|
|
s->temps_in_use--;
|
|
|
|
if (s->temps_in_use < 0) {
|
|
|
|
fprintf(stderr, "More temporaries freed than allocated!\n");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-10-20 09:05:45 +02:00
|
|
|
tcg_debug_assert(ts->temp_global == 0);
|
2016-04-21 10:48:49 +02:00
|
|
|
tcg_debug_assert(ts->temp_allocated != 0);
|
2008-05-23 19:33:39 +02:00
|
|
|
ts->temp_allocated = 0;
|
2013-09-19 21:16:45 +02:00
|
|
|
|
2017-10-20 09:05:45 +02:00
|
|
|
idx = temp_idx(ts);
|
2014-01-19 16:53:31 +01:00
|
|
|
k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0);
|
2013-09-19 21:16:45 +02:00
|
|
|
set_bit(idx, s->free_temps[k].l);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2008-11-17 15:43:54 +01:00
|
|
|
TCGv_i32 tcg_const_i32(int32_t val)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2008-11-17 15:43:54 +01:00
|
|
|
TCGv_i32 t0;
|
|
|
|
t0 = tcg_temp_new_i32();
|
2008-05-23 19:33:39 +02:00
|
|
|
tcg_gen_movi_i32(t0, val);
|
|
|
|
return t0;
|
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2008-11-17 15:43:54 +01:00
|
|
|
TCGv_i64 tcg_const_i64(int64_t val)
|
2008-05-23 19:33:39 +02:00
|
|
|
{
|
2008-11-17 15:43:54 +01:00
|
|
|
TCGv_i64 t0;
|
|
|
|
t0 = tcg_temp_new_i64();
|
2008-05-23 19:33:39 +02:00
|
|
|
tcg_gen_movi_i64(t0, val);
|
|
|
|
return t0;
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2008-11-17 15:43:54 +01:00
|
|
|
TCGv_i32 tcg_const_local_i32(int32_t val)
|
2008-10-21 13:30:45 +02:00
|
|
|
{
|
2008-11-17 15:43:54 +01:00
|
|
|
TCGv_i32 t0;
|
|
|
|
t0 = tcg_temp_local_new_i32();
|
2008-10-21 13:30:45 +02:00
|
|
|
tcg_gen_movi_i32(t0, val);
|
|
|
|
return t0;
|
|
|
|
}
|
|
|
|
|
2008-11-17 15:43:54 +01:00
|
|
|
TCGv_i64 tcg_const_local_i64(int64_t val)
|
2008-10-21 13:30:45 +02:00
|
|
|
{
|
2008-11-17 15:43:54 +01:00
|
|
|
TCGv_i64 t0;
|
|
|
|
t0 = tcg_temp_local_new_i64();
|
2008-10-21 13:30:45 +02:00
|
|
|
tcg_gen_movi_i64(t0, val);
|
|
|
|
return t0;
|
|
|
|
}
|
|
|
|
|
2011-03-06 22:39:53 +01:00
|
|
|
#if defined(CONFIG_DEBUG_TCG)
|
|
|
|
void tcg_clear_temp_count(void)
|
|
|
|
{
|
2017-07-12 23:15:52 +02:00
|
|
|
TCGContext *s = tcg_ctx;
|
2011-03-06 22:39:53 +01:00
|
|
|
s->temps_in_use = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int tcg_check_temp_count(void)
|
|
|
|
{
|
2017-07-12 23:15:52 +02:00
|
|
|
TCGContext *s = tcg_ctx;
|
2011-03-06 22:39:53 +01:00
|
|
|
if (s->temps_in_use) {
|
|
|
|
/* Clear the count so that we don't give another
|
|
|
|
* warning immediately next time around.
|
|
|
|
*/
|
|
|
|
s->temps_in_use = 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-08-17 16:43:20 +02:00
|
|
|
/* Return true if OP may appear in the opcode stream.
|
|
|
|
Test the runtime variable that controls each opcode. */
|
|
|
|
bool tcg_op_supported(TCGOpcode op)
|
|
|
|
{
|
2017-09-14 22:53:46 +02:00
|
|
|
const bool have_vec
|
|
|
|
= TCG_TARGET_HAS_v64 | TCG_TARGET_HAS_v128 | TCG_TARGET_HAS_v256;
|
|
|
|
|
2017-08-17 16:43:20 +02:00
|
|
|
switch (op) {
|
|
|
|
case INDEX_op_discard:
|
|
|
|
case INDEX_op_set_label:
|
|
|
|
case INDEX_op_call:
|
|
|
|
case INDEX_op_br:
|
|
|
|
case INDEX_op_mb:
|
|
|
|
case INDEX_op_insn_start:
|
|
|
|
case INDEX_op_exit_tb:
|
|
|
|
case INDEX_op_goto_tb:
|
|
|
|
case INDEX_op_qemu_ld_i32:
|
|
|
|
case INDEX_op_qemu_st_i32:
|
|
|
|
case INDEX_op_qemu_ld_i64:
|
|
|
|
case INDEX_op_qemu_st_i64:
|
|
|
|
return true;
|
|
|
|
|
2020-12-09 20:58:39 +01:00
|
|
|
case INDEX_op_qemu_st8_i32:
|
|
|
|
return TCG_TARGET_HAS_qemu_st8_i32;
|
|
|
|
|
2017-08-17 16:43:20 +02:00
|
|
|
case INDEX_op_goto_ptr:
|
|
|
|
return TCG_TARGET_HAS_goto_ptr;
|
|
|
|
|
|
|
|
case INDEX_op_mov_i32:
|
|
|
|
case INDEX_op_movi_i32:
|
|
|
|
case INDEX_op_setcond_i32:
|
|
|
|
case INDEX_op_brcond_i32:
|
|
|
|
case INDEX_op_ld8u_i32:
|
|
|
|
case INDEX_op_ld8s_i32:
|
|
|
|
case INDEX_op_ld16u_i32:
|
|
|
|
case INDEX_op_ld16s_i32:
|
|
|
|
case INDEX_op_ld_i32:
|
|
|
|
case INDEX_op_st8_i32:
|
|
|
|
case INDEX_op_st16_i32:
|
|
|
|
case INDEX_op_st_i32:
|
|
|
|
case INDEX_op_add_i32:
|
|
|
|
case INDEX_op_sub_i32:
|
|
|
|
case INDEX_op_mul_i32:
|
|
|
|
case INDEX_op_and_i32:
|
|
|
|
case INDEX_op_or_i32:
|
|
|
|
case INDEX_op_xor_i32:
|
|
|
|
case INDEX_op_shl_i32:
|
|
|
|
case INDEX_op_shr_i32:
|
|
|
|
case INDEX_op_sar_i32:
|
|
|
|
return true;
|
|
|
|
|
|
|
|
case INDEX_op_movcond_i32:
|
|
|
|
return TCG_TARGET_HAS_movcond_i32;
|
|
|
|
case INDEX_op_div_i32:
|
|
|
|
case INDEX_op_divu_i32:
|
|
|
|
return TCG_TARGET_HAS_div_i32;
|
|
|
|
case INDEX_op_rem_i32:
|
|
|
|
case INDEX_op_remu_i32:
|
|
|
|
return TCG_TARGET_HAS_rem_i32;
|
|
|
|
case INDEX_op_div2_i32:
|
|
|
|
case INDEX_op_divu2_i32:
|
|
|
|
return TCG_TARGET_HAS_div2_i32;
|
|
|
|
case INDEX_op_rotl_i32:
|
|
|
|
case INDEX_op_rotr_i32:
|
|
|
|
return TCG_TARGET_HAS_rot_i32;
|
|
|
|
case INDEX_op_deposit_i32:
|
|
|
|
return TCG_TARGET_HAS_deposit_i32;
|
|
|
|
case INDEX_op_extract_i32:
|
|
|
|
return TCG_TARGET_HAS_extract_i32;
|
|
|
|
case INDEX_op_sextract_i32:
|
|
|
|
return TCG_TARGET_HAS_sextract_i32;
|
2019-02-25 19:29:25 +01:00
|
|
|
case INDEX_op_extract2_i32:
|
|
|
|
return TCG_TARGET_HAS_extract2_i32;
|
2017-08-17 16:43:20 +02:00
|
|
|
case INDEX_op_add2_i32:
|
|
|
|
return TCG_TARGET_HAS_add2_i32;
|
|
|
|
case INDEX_op_sub2_i32:
|
|
|
|
return TCG_TARGET_HAS_sub2_i32;
|
|
|
|
case INDEX_op_mulu2_i32:
|
|
|
|
return TCG_TARGET_HAS_mulu2_i32;
|
|
|
|
case INDEX_op_muls2_i32:
|
|
|
|
return TCG_TARGET_HAS_muls2_i32;
|
|
|
|
case INDEX_op_muluh_i32:
|
|
|
|
return TCG_TARGET_HAS_muluh_i32;
|
|
|
|
case INDEX_op_mulsh_i32:
|
|
|
|
return TCG_TARGET_HAS_mulsh_i32;
|
|
|
|
case INDEX_op_ext8s_i32:
|
|
|
|
return TCG_TARGET_HAS_ext8s_i32;
|
|
|
|
case INDEX_op_ext16s_i32:
|
|
|
|
return TCG_TARGET_HAS_ext16s_i32;
|
|
|
|
case INDEX_op_ext8u_i32:
|
|
|
|
return TCG_TARGET_HAS_ext8u_i32;
|
|
|
|
case INDEX_op_ext16u_i32:
|
|
|
|
return TCG_TARGET_HAS_ext16u_i32;
|
|
|
|
case INDEX_op_bswap16_i32:
|
|
|
|
return TCG_TARGET_HAS_bswap16_i32;
|
|
|
|
case INDEX_op_bswap32_i32:
|
|
|
|
return TCG_TARGET_HAS_bswap32_i32;
|
|
|
|
case INDEX_op_not_i32:
|
|
|
|
return TCG_TARGET_HAS_not_i32;
|
|
|
|
case INDEX_op_neg_i32:
|
|
|
|
return TCG_TARGET_HAS_neg_i32;
|
|
|
|
case INDEX_op_andc_i32:
|
|
|
|
return TCG_TARGET_HAS_andc_i32;
|
|
|
|
case INDEX_op_orc_i32:
|
|
|
|
return TCG_TARGET_HAS_orc_i32;
|
|
|
|
case INDEX_op_eqv_i32:
|
|
|
|
return TCG_TARGET_HAS_eqv_i32;
|
|
|
|
case INDEX_op_nand_i32:
|
|
|
|
return TCG_TARGET_HAS_nand_i32;
|
|
|
|
case INDEX_op_nor_i32:
|
|
|
|
return TCG_TARGET_HAS_nor_i32;
|
|
|
|
case INDEX_op_clz_i32:
|
|
|
|
return TCG_TARGET_HAS_clz_i32;
|
|
|
|
case INDEX_op_ctz_i32:
|
|
|
|
return TCG_TARGET_HAS_ctz_i32;
|
|
|
|
case INDEX_op_ctpop_i32:
|
|
|
|
return TCG_TARGET_HAS_ctpop_i32;
|
|
|
|
|
|
|
|
case INDEX_op_brcond2_i32:
|
|
|
|
case INDEX_op_setcond2_i32:
|
|
|
|
return TCG_TARGET_REG_BITS == 32;
|
|
|
|
|
|
|
|
case INDEX_op_mov_i64:
|
|
|
|
case INDEX_op_movi_i64:
|
|
|
|
case INDEX_op_setcond_i64:
|
|
|
|
case INDEX_op_brcond_i64:
|
|
|
|
case INDEX_op_ld8u_i64:
|
|
|
|
case INDEX_op_ld8s_i64:
|
|
|
|
case INDEX_op_ld16u_i64:
|
|
|
|
case INDEX_op_ld16s_i64:
|
|
|
|
case INDEX_op_ld32u_i64:
|
|
|
|
case INDEX_op_ld32s_i64:
|
|
|
|
case INDEX_op_ld_i64:
|
|
|
|
case INDEX_op_st8_i64:
|
|
|
|
case INDEX_op_st16_i64:
|
|
|
|
case INDEX_op_st32_i64:
|
|
|
|
case INDEX_op_st_i64:
|
|
|
|
case INDEX_op_add_i64:
|
|
|
|
case INDEX_op_sub_i64:
|
|
|
|
case INDEX_op_mul_i64:
|
|
|
|
case INDEX_op_and_i64:
|
|
|
|
case INDEX_op_or_i64:
|
|
|
|
case INDEX_op_xor_i64:
|
|
|
|
case INDEX_op_shl_i64:
|
|
|
|
case INDEX_op_shr_i64:
|
|
|
|
case INDEX_op_sar_i64:
|
|
|
|
case INDEX_op_ext_i32_i64:
|
|
|
|
case INDEX_op_extu_i32_i64:
|
|
|
|
return TCG_TARGET_REG_BITS == 64;
|
|
|
|
|
|
|
|
case INDEX_op_movcond_i64:
|
|
|
|
return TCG_TARGET_HAS_movcond_i64;
|
|
|
|
case INDEX_op_div_i64:
|
|
|
|
case INDEX_op_divu_i64:
|
|
|
|
return TCG_TARGET_HAS_div_i64;
|
|
|
|
case INDEX_op_rem_i64:
|
|
|
|
case INDEX_op_remu_i64:
|
|
|
|
return TCG_TARGET_HAS_rem_i64;
|
|
|
|
case INDEX_op_div2_i64:
|
|
|
|
case INDEX_op_divu2_i64:
|
|
|
|
return TCG_TARGET_HAS_div2_i64;
|
|
|
|
case INDEX_op_rotl_i64:
|
|
|
|
case INDEX_op_rotr_i64:
|
|
|
|
return TCG_TARGET_HAS_rot_i64;
|
|
|
|
case INDEX_op_deposit_i64:
|
|
|
|
return TCG_TARGET_HAS_deposit_i64;
|
|
|
|
case INDEX_op_extract_i64:
|
|
|
|
return TCG_TARGET_HAS_extract_i64;
|
|
|
|
case INDEX_op_sextract_i64:
|
|
|
|
return TCG_TARGET_HAS_sextract_i64;
|
2019-02-25 19:29:25 +01:00
|
|
|
case INDEX_op_extract2_i64:
|
|
|
|
return TCG_TARGET_HAS_extract2_i64;
|
2017-08-17 16:43:20 +02:00
|
|
|
case INDEX_op_extrl_i64_i32:
|
|
|
|
return TCG_TARGET_HAS_extrl_i64_i32;
|
|
|
|
case INDEX_op_extrh_i64_i32:
|
|
|
|
return TCG_TARGET_HAS_extrh_i64_i32;
|
|
|
|
case INDEX_op_ext8s_i64:
|
|
|
|
return TCG_TARGET_HAS_ext8s_i64;
|
|
|
|
case INDEX_op_ext16s_i64:
|
|
|
|
return TCG_TARGET_HAS_ext16s_i64;
|
|
|
|
case INDEX_op_ext32s_i64:
|
|
|
|
return TCG_TARGET_HAS_ext32s_i64;
|
|
|
|
case INDEX_op_ext8u_i64:
|
|
|
|
return TCG_TARGET_HAS_ext8u_i64;
|
|
|
|
case INDEX_op_ext16u_i64:
|
|
|
|
return TCG_TARGET_HAS_ext16u_i64;
|
|
|
|
case INDEX_op_ext32u_i64:
|
|
|
|
return TCG_TARGET_HAS_ext32u_i64;
|
|
|
|
case INDEX_op_bswap16_i64:
|
|
|
|
return TCG_TARGET_HAS_bswap16_i64;
|
|
|
|
case INDEX_op_bswap32_i64:
|
|
|
|
return TCG_TARGET_HAS_bswap32_i64;
|
|
|
|
case INDEX_op_bswap64_i64:
|
|
|
|
return TCG_TARGET_HAS_bswap64_i64;
|
|
|
|
case INDEX_op_not_i64:
|
|
|
|
return TCG_TARGET_HAS_not_i64;
|
|
|
|
case INDEX_op_neg_i64:
|
|
|
|
return TCG_TARGET_HAS_neg_i64;
|
|
|
|
case INDEX_op_andc_i64:
|
|
|
|
return TCG_TARGET_HAS_andc_i64;
|
|
|
|
case INDEX_op_orc_i64:
|
|
|
|
return TCG_TARGET_HAS_orc_i64;
|
|
|
|
case INDEX_op_eqv_i64:
|
|
|
|
return TCG_TARGET_HAS_eqv_i64;
|
|
|
|
case INDEX_op_nand_i64:
|
|
|
|
return TCG_TARGET_HAS_nand_i64;
|
|
|
|
case INDEX_op_nor_i64:
|
|
|
|
return TCG_TARGET_HAS_nor_i64;
|
|
|
|
case INDEX_op_clz_i64:
|
|
|
|
return TCG_TARGET_HAS_clz_i64;
|
|
|
|
case INDEX_op_ctz_i64:
|
|
|
|
return TCG_TARGET_HAS_ctz_i64;
|
|
|
|
case INDEX_op_ctpop_i64:
|
|
|
|
return TCG_TARGET_HAS_ctpop_i64;
|
|
|
|
case INDEX_op_add2_i64:
|
|
|
|
return TCG_TARGET_HAS_add2_i64;
|
|
|
|
case INDEX_op_sub2_i64:
|
|
|
|
return TCG_TARGET_HAS_sub2_i64;
|
|
|
|
case INDEX_op_mulu2_i64:
|
|
|
|
return TCG_TARGET_HAS_mulu2_i64;
|
|
|
|
case INDEX_op_muls2_i64:
|
|
|
|
return TCG_TARGET_HAS_muls2_i64;
|
|
|
|
case INDEX_op_muluh_i64:
|
|
|
|
return TCG_TARGET_HAS_muluh_i64;
|
|
|
|
case INDEX_op_mulsh_i64:
|
|
|
|
return TCG_TARGET_HAS_mulsh_i64;
|
|
|
|
|
2017-09-14 22:53:46 +02:00
|
|
|
case INDEX_op_mov_vec:
|
|
|
|
case INDEX_op_dup_vec:
|
|
|
|
case INDEX_op_dupi_vec:
|
tcg: Add INDEX_op_dupm_vec
Allow the backend to expand dup from memory directly, instead of
forcing the value into a temp first. This is especially important
if integer/vector register moves do not exist.
Note that officially tcg_out_dupm_vec is allowed to fail.
If it did, we could fix this up relatively easily:
VECE == 32/64:
Load the value into a vector register, then dup.
Both of these must work.
VECE == 8/16:
If the value happens to be at an offset such that an aligned
load would place the desired value in the least significant
end of the register, go ahead and load w/garbage in high bits.
Load the value w/INDEX_op_ld{8,16}_i32.
Attempt a move directly to vector reg, which may fail.
Store the value into the backing store for OTS.
Load the value into the vector reg w/TCG_TYPE_I32, which must work.
Duplicate from the vector reg into itself, which must work.
All of which is well and good, except that all supported
hosts can support dupm for all vece, so all of the failure
paths would be dead code and untestable.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2019-03-17 02:55:22 +01:00
|
|
|
case INDEX_op_dupm_vec:
|
2017-09-14 22:53:46 +02:00
|
|
|
case INDEX_op_ld_vec:
|
|
|
|
case INDEX_op_st_vec:
|
|
|
|
case INDEX_op_add_vec:
|
|
|
|
case INDEX_op_sub_vec:
|
|
|
|
case INDEX_op_and_vec:
|
|
|
|
case INDEX_op_or_vec:
|
|
|
|
case INDEX_op_xor_vec:
|
2017-11-17 20:47:42 +01:00
|
|
|
case INDEX_op_cmp_vec:
|
2017-09-14 22:53:46 +02:00
|
|
|
return have_vec;
|
|
|
|
case INDEX_op_dup2_vec:
|
|
|
|
return have_vec && TCG_TARGET_REG_BITS == 32;
|
|
|
|
case INDEX_op_not_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_not_vec;
|
|
|
|
case INDEX_op_neg_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_neg_vec;
|
2019-04-18 01:53:02 +02:00
|
|
|
case INDEX_op_abs_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_abs_vec;
|
2017-09-14 22:53:46 +02:00
|
|
|
case INDEX_op_andc_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_andc_vec;
|
|
|
|
case INDEX_op_orc_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_orc_vec;
|
2017-11-21 10:11:14 +01:00
|
|
|
case INDEX_op_mul_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_mul_vec;
|
2017-11-17 14:35:11 +01:00
|
|
|
case INDEX_op_shli_vec:
|
|
|
|
case INDEX_op_shri_vec:
|
|
|
|
case INDEX_op_sari_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_shi_vec;
|
|
|
|
case INDEX_op_shls_vec:
|
|
|
|
case INDEX_op_shrs_vec:
|
|
|
|
case INDEX_op_sars_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_shs_vec;
|
|
|
|
case INDEX_op_shlv_vec:
|
|
|
|
case INDEX_op_shrv_vec:
|
|
|
|
case INDEX_op_sarv_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_shv_vec;
|
2020-04-20 03:01:52 +02:00
|
|
|
case INDEX_op_rotli_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_roti_vec;
|
2020-04-20 17:22:44 +02:00
|
|
|
case INDEX_op_rotls_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_rots_vec;
|
2020-04-20 04:47:59 +02:00
|
|
|
case INDEX_op_rotlv_vec:
|
|
|
|
case INDEX_op_rotrv_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_rotv_vec;
|
2018-12-18 03:01:47 +01:00
|
|
|
case INDEX_op_ssadd_vec:
|
|
|
|
case INDEX_op_usadd_vec:
|
|
|
|
case INDEX_op_sssub_vec:
|
|
|
|
case INDEX_op_ussub_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_sat_vec;
|
2018-12-18 04:35:46 +01:00
|
|
|
case INDEX_op_smin_vec:
|
|
|
|
case INDEX_op_umin_vec:
|
|
|
|
case INDEX_op_smax_vec:
|
|
|
|
case INDEX_op_umax_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_minmax_vec;
|
2019-04-30 20:02:23 +02:00
|
|
|
case INDEX_op_bitsel_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_bitsel_vec;
|
2019-04-30 22:01:12 +02:00
|
|
|
case INDEX_op_cmpsel_vec:
|
|
|
|
return have_vec && TCG_TARGET_HAS_cmpsel_vec;
|
2017-09-14 22:53:46 +02:00
|
|
|
|
2017-09-15 23:11:45 +02:00
|
|
|
default:
|
|
|
|
tcg_debug_assert(op > INDEX_op_last_generic && op < NB_OPS);
|
|
|
|
return true;
|
2017-08-17 16:43:20 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-05-22 16:59:57 +02:00
|
|
|
/* Note: we convert the 64 bit args to 32 bit and do some alignment
|
|
|
|
and endian swap. Maybe it would be better to do the alignment
|
|
|
|
and endian swap in tcg_reg_alloc_call(). */
|
2017-10-15 22:27:56 +02:00
|
|
|
void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2016-12-08 19:52:57 +01:00
|
|
|
int i, real_args, nb_rets, pi;
|
2014-04-08 17:39:43 +02:00
|
|
|
unsigned sizemask, flags;
|
2014-04-08 00:10:05 +02:00
|
|
|
TCGHelperInfo *info;
|
2016-12-08 19:52:57 +01:00
|
|
|
TCGOp *op;
|
2014-04-08 00:10:05 +02:00
|
|
|
|
2017-07-06 00:41:23 +02:00
|
|
|
info = g_hash_table_lookup(helper_table, (gpointer)func);
|
2014-04-08 17:39:43 +02:00
|
|
|
flags = info->flags;
|
|
|
|
sizemask = info->sizemask;
|
2010-06-15 02:35:27 +02:00
|
|
|
|
plugin-gen: add module for TCG-related code
We first inject empty instrumentation from translator_loop.
After translation, we go through the plugins to see what
they want to register for, filling in the empty instrumentation.
If if turns out that some instrumentation remains unused, we
remove it.
This approach supports the following features:
- Inlining TCG code for simple operations. Note that we do not
export TCG ops to plugins. Instead, we give them a C API to
insert inlined ops. So far we only support adding an immediate
to a u64, e.g. to count events.
- "Direct" callbacks. These are callbacks that do not go via
a helper. Instead, the helper is defined at run-time, so that
the plugin code is directly called from TCG. This makes direct
callbacks as efficient as possible; they are therefore used
for very frequent events, e.g. memory callbacks.
- Passing the host address to memory callbacks. Most of this
is implemented in a later patch though.
- Instrumentation of memory accesses performed from helpers.
See the corresponding comment, as well as a later patch.
Signed-off-by: Emilio G. Cota <cota@braap.org>
[AJB: add alloc_tcg_plugin_context, use glib, rm hwaddr]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2018-12-07 21:33:56 +01:00
|
|
|
#ifdef CONFIG_PLUGIN
|
|
|
|
/* detect non-plugin helpers */
|
|
|
|
if (tcg_ctx->plugin_insn && unlikely(strncmp(info->name, "plugin_", 7))) {
|
|
|
|
tcg_ctx->plugin_insn->calls_helpers = true;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-03-04 22:39:48 +01:00
|
|
|
#if defined(__sparc__) && !defined(__arch64__) \
|
|
|
|
&& !defined(CONFIG_TCG_INTERPRETER)
|
|
|
|
/* We have 64-bit values in one register, but need to pass as two
|
|
|
|
separate parameters. Split them. */
|
|
|
|
int orig_sizemask = sizemask;
|
|
|
|
int orig_nargs = nargs;
|
|
|
|
TCGv_i64 retl, reth;
|
2017-10-15 22:27:56 +02:00
|
|
|
TCGTemp *split_args[MAX_OPC_PARAM];
|
2014-03-04 22:39:48 +01:00
|
|
|
|
2017-11-02 12:47:37 +01:00
|
|
|
retl = NULL;
|
|
|
|
reth = NULL;
|
2014-03-04 22:39:48 +01:00
|
|
|
if (sizemask != 0) {
|
|
|
|
for (i = real_args = 0; i < nargs; ++i) {
|
|
|
|
int is_64bit = sizemask & (1 << (i+1)*2);
|
|
|
|
if (is_64bit) {
|
2017-10-20 09:05:45 +02:00
|
|
|
TCGv_i64 orig = temp_tcgv_i64(args[i]);
|
2014-03-04 22:39:48 +01:00
|
|
|
TCGv_i32 h = tcg_temp_new_i32();
|
|
|
|
TCGv_i32 l = tcg_temp_new_i32();
|
|
|
|
tcg_gen_extr_i64_i32(l, h, orig);
|
2017-10-15 22:27:56 +02:00
|
|
|
split_args[real_args++] = tcgv_i32_temp(h);
|
|
|
|
split_args[real_args++] = tcgv_i32_temp(l);
|
2014-03-04 22:39:48 +01:00
|
|
|
} else {
|
|
|
|
split_args[real_args++] = args[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
nargs = real_args;
|
|
|
|
args = split_args;
|
|
|
|
sizemask = 0;
|
|
|
|
}
|
|
|
|
#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
|
2010-06-15 02:35:27 +02:00
|
|
|
for (i = 0; i < nargs; ++i) {
|
|
|
|
int is_64bit = sizemask & (1 << (i+1)*2);
|
|
|
|
int is_signed = sizemask & (2 << (i+1)*2);
|
|
|
|
if (!is_64bit) {
|
|
|
|
TCGv_i64 temp = tcg_temp_new_i64();
|
2017-10-20 09:05:45 +02:00
|
|
|
TCGv_i64 orig = temp_tcgv_i64(args[i]);
|
2010-06-15 02:35:27 +02:00
|
|
|
if (is_signed) {
|
|
|
|
tcg_gen_ext32s_i64(temp, orig);
|
|
|
|
} else {
|
|
|
|
tcg_gen_ext32u_i64(temp, orig);
|
|
|
|
}
|
2017-10-15 22:27:56 +02:00
|
|
|
args[i] = tcgv_i64_temp(temp);
|
2010-06-15 02:35:27 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* TCG_TARGET_EXTEND_ARGS */
|
|
|
|
|
2017-11-02 15:19:14 +01:00
|
|
|
op = tcg_emit_op(INDEX_op_call);
|
2016-12-08 19:52:57 +01:00
|
|
|
|
|
|
|
pi = 0;
|
2017-10-15 22:27:56 +02:00
|
|
|
if (ret != NULL) {
|
2014-03-04 22:39:48 +01:00
|
|
|
#if defined(__sparc__) && !defined(__arch64__) \
|
|
|
|
&& !defined(CONFIG_TCG_INTERPRETER)
|
|
|
|
if (orig_sizemask & 1) {
|
|
|
|
/* The 32-bit ABI is going to return the 64-bit value in
|
|
|
|
the %o0/%o1 register pair. Prepare for this by using
|
|
|
|
two return temporaries, and reassemble below. */
|
|
|
|
retl = tcg_temp_new_i64();
|
|
|
|
reth = tcg_temp_new_i64();
|
2017-10-15 22:27:56 +02:00
|
|
|
op->args[pi++] = tcgv_i64_arg(reth);
|
|
|
|
op->args[pi++] = tcgv_i64_arg(retl);
|
2014-03-04 22:39:48 +01:00
|
|
|
nb_rets = 2;
|
|
|
|
} else {
|
2017-10-15 22:27:56 +02:00
|
|
|
op->args[pi++] = temp_arg(ret);
|
2014-03-04 22:39:48 +01:00
|
|
|
nb_rets = 1;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) {
|
2014-03-31 23:09:13 +02:00
|
|
|
#ifdef HOST_WORDS_BIGENDIAN
|
2017-10-15 22:27:56 +02:00
|
|
|
op->args[pi++] = temp_arg(ret + 1);
|
|
|
|
op->args[pi++] = temp_arg(ret);
|
2008-05-22 16:59:57 +02:00
|
|
|
#else
|
2017-10-15 22:27:56 +02:00
|
|
|
op->args[pi++] = temp_arg(ret);
|
|
|
|
op->args[pi++] = temp_arg(ret + 1);
|
2008-05-22 16:59:57 +02:00
|
|
|
#endif
|
2008-11-17 15:43:54 +01:00
|
|
|
nb_rets = 2;
|
2014-03-04 22:39:48 +01:00
|
|
|
} else {
|
2017-10-15 22:27:56 +02:00
|
|
|
op->args[pi++] = temp_arg(ret);
|
2008-11-17 15:43:54 +01:00
|
|
|
nb_rets = 1;
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2014-03-04 22:39:48 +01:00
|
|
|
#endif
|
2008-11-17 15:43:54 +01:00
|
|
|
} else {
|
|
|
|
nb_rets = 0;
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2017-11-14 13:02:51 +01:00
|
|
|
TCGOP_CALLO(op) = nb_rets;
|
2016-12-08 19:52:57 +01:00
|
|
|
|
2008-11-17 15:43:54 +01:00
|
|
|
real_args = 0;
|
|
|
|
for (i = 0; i < nargs; i++) {
|
2010-06-15 02:35:27 +02:00
|
|
|
int is_64bit = sizemask & (1 << (i+1)*2);
|
2014-04-08 17:39:43 +02:00
|
|
|
if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
|
2008-05-22 16:59:57 +02:00
|
|
|
#ifdef TCG_TARGET_CALL_ALIGN_ARGS
|
|
|
|
/* some targets want aligned 64 bit args */
|
2008-11-29 20:55:15 +01:00
|
|
|
if (real_args & 1) {
|
2016-12-08 19:52:57 +01:00
|
|
|
op->args[pi++] = TCG_CALL_DUMMY_ARG;
|
2008-11-29 20:55:15 +01:00
|
|
|
real_args++;
|
2008-05-22 16:59:57 +02:00
|
|
|
}
|
|
|
|
#endif
|
2016-06-24 05:34:22 +02:00
|
|
|
/* If stack grows up, then we will be placing successive
|
|
|
|
arguments at lower addresses, which means we need to
|
|
|
|
reverse the order compared to how we would normally
|
|
|
|
treat either big or little-endian. For those arguments
|
|
|
|
that will wind up in registers, this still works for
|
|
|
|
HPPA (the only current STACK_GROWSUP target) since the
|
|
|
|
argument registers are *also* allocated in decreasing
|
|
|
|
order. If another such target is added, this logic may
|
|
|
|
have to get more complicated to differentiate between
|
|
|
|
stack arguments and register arguments. */
|
2014-03-31 23:09:13 +02:00
|
|
|
#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
|
2017-10-15 22:27:56 +02:00
|
|
|
op->args[pi++] = temp_arg(args[i] + 1);
|
|
|
|
op->args[pi++] = temp_arg(args[i]);
|
2008-02-01 11:05:41 +01:00
|
|
|
#else
|
2017-10-15 22:27:56 +02:00
|
|
|
op->args[pi++] = temp_arg(args[i]);
|
|
|
|
op->args[pi++] = temp_arg(args[i] + 1);
|
2008-02-01 11:05:41 +01:00
|
|
|
#endif
|
2008-11-17 15:43:54 +01:00
|
|
|
real_args += 2;
|
2010-06-15 02:35:27 +02:00
|
|
|
continue;
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2010-06-15 02:35:27 +02:00
|
|
|
|
2017-10-15 22:27:56 +02:00
|
|
|
op->args[pi++] = temp_arg(args[i]);
|
2010-06-15 02:35:27 +02:00
|
|
|
real_args++;
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2016-12-08 19:52:57 +01:00
|
|
|
op->args[pi++] = (uintptr_t)func;
|
|
|
|
op->args[pi++] = flags;
|
2017-11-14 13:02:51 +01:00
|
|
|
TCGOP_CALLI(op) = real_args;
|
2008-11-17 15:43:54 +01:00
|
|
|
|
2016-12-08 19:52:57 +01:00
|
|
|
/* Make sure the fields didn't overflow. */
|
2017-11-14 13:02:51 +01:00
|
|
|
tcg_debug_assert(TCGOP_CALLI(op) == real_args);
|
2016-12-08 19:52:57 +01:00
|
|
|
tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
|
2010-06-15 02:35:27 +02:00
|
|
|
|
2014-03-04 22:39:48 +01:00
|
|
|
#if defined(__sparc__) && !defined(__arch64__) \
|
|
|
|
&& !defined(CONFIG_TCG_INTERPRETER)
|
|
|
|
/* Free all of the parts we allocated above. */
|
|
|
|
for (i = real_args = 0; i < orig_nargs; ++i) {
|
|
|
|
int is_64bit = orig_sizemask & (1 << (i+1)*2);
|
|
|
|
if (is_64bit) {
|
2017-10-20 09:05:45 +02:00
|
|
|
tcg_temp_free_internal(args[real_args++]);
|
|
|
|
tcg_temp_free_internal(args[real_args++]);
|
2014-03-04 22:39:48 +01:00
|
|
|
} else {
|
|
|
|
real_args++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (orig_sizemask & 1) {
|
|
|
|
/* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
|
|
|
|
Note that describing these as TCGv_i64 eliminates an unnecessary
|
|
|
|
zero-extension that tcg_gen_concat_i32_i64 would create. */
|
2017-10-20 09:05:45 +02:00
|
|
|
tcg_gen_concat32_i64(temp_tcgv_i64(ret), retl, reth);
|
2014-03-04 22:39:48 +01:00
|
|
|
tcg_temp_free_i64(retl);
|
|
|
|
tcg_temp_free_i64(reth);
|
|
|
|
}
|
|
|
|
#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
|
2010-06-15 02:35:27 +02:00
|
|
|
for (i = 0; i < nargs; ++i) {
|
|
|
|
int is_64bit = sizemask & (1 << (i+1)*2);
|
|
|
|
if (!is_64bit) {
|
2017-10-20 09:05:45 +02:00
|
|
|
tcg_temp_free_internal(args[i]);
|
2010-06-15 02:35:27 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* TCG_TARGET_EXTEND_ARGS */
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2008-08-17 22:26:25 +02:00
|
|
|
static void tcg_reg_alloc_start(TCGContext *s)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2016-11-02 18:21:44 +01:00
|
|
|
int i, n;
|
2008-02-01 11:05:41 +01:00
|
|
|
TCGTemp *ts;
|
2016-11-02 18:21:44 +01:00
|
|
|
|
|
|
|
for (i = 0, n = s->nb_globals; i < n; i++) {
|
2008-02-01 11:05:41 +01:00
|
|
|
ts = &s->temps[i];
|
2016-11-02 18:21:44 +01:00
|
|
|
ts->val_type = (ts->fixed_reg ? TEMP_VAL_REG : TEMP_VAL_MEM);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2016-11-02 18:21:44 +01:00
|
|
|
for (n = s->nb_temps; i < n; i++) {
|
2008-05-23 19:33:39 +02:00
|
|
|
ts = &s->temps[i];
|
2016-11-02 18:21:44 +01:00
|
|
|
ts->val_type = (ts->temp_local ? TEMP_VAL_MEM : TEMP_VAL_DEAD);
|
2008-05-23 19:33:39 +02:00
|
|
|
ts->mem_allocated = 0;
|
|
|
|
ts->fixed_reg = 0;
|
|
|
|
}
|
2013-09-19 00:21:56 +02:00
|
|
|
|
|
|
|
memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2013-09-19 00:21:56 +02:00
|
|
|
static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
|
|
|
|
TCGTemp *ts)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2017-06-20 21:24:57 +02:00
|
|
|
int idx = temp_idx(ts);
|
2008-02-03 20:56:33 +01:00
|
|
|
|
2016-11-02 18:20:15 +01:00
|
|
|
if (ts->temp_global) {
|
2008-02-03 20:56:33 +01:00
|
|
|
pstrcpy(buf, buf_size, ts->name);
|
2013-09-19 00:21:56 +02:00
|
|
|
} else if (ts->temp_local) {
|
|
|
|
snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
|
2008-02-01 11:05:41 +01:00
|
|
|
} else {
|
2013-09-19 00:21:56 +02:00
|
|
|
snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2017-06-20 08:18:10 +02:00
|
|
|
static char *tcg_get_arg_str(TCGContext *s, char *buf,
|
|
|
|
int buf_size, TCGArg arg)
|
2013-09-19 00:21:56 +02:00
|
|
|
{
|
2017-06-20 08:18:10 +02:00
|
|
|
return tcg_get_arg_str_ptr(s, buf, buf_size, arg_temp(arg));
|
2013-09-19 00:21:56 +02:00
|
|
|
}
|
|
|
|
|
2013-09-14 23:37:06 +02:00
|
|
|
/* Find helper name. */
|
|
|
|
static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val)
|
2008-05-22 18:08:32 +02:00
|
|
|
{
|
2013-09-14 23:37:06 +02:00
|
|
|
const char *ret = NULL;
|
2017-07-06 00:41:23 +02:00
|
|
|
if (helper_table) {
|
|
|
|
TCGHelperInfo *info = g_hash_table_lookup(helper_table, (gpointer)val);
|
2014-04-08 09:17:53 +02:00
|
|
|
if (info) {
|
|
|
|
ret = info->name;
|
|
|
|
}
|
2008-05-22 18:08:32 +02:00
|
|
|
}
|
2013-09-14 23:37:06 +02:00
|
|
|
return ret;
|
2008-05-22 18:08:32 +02:00
|
|
|
}
|
|
|
|
|
2008-09-14 09:45:17 +02:00
|
|
|
static const char * const cond_name[] =
|
|
|
|
{
|
2012-09-24 23:21:40 +02:00
|
|
|
[TCG_COND_NEVER] = "never",
|
|
|
|
[TCG_COND_ALWAYS] = "always",
|
2008-09-14 09:45:17 +02:00
|
|
|
[TCG_COND_EQ] = "eq",
|
|
|
|
[TCG_COND_NE] = "ne",
|
|
|
|
[TCG_COND_LT] = "lt",
|
|
|
|
[TCG_COND_GE] = "ge",
|
|
|
|
[TCG_COND_LE] = "le",
|
|
|
|
[TCG_COND_GT] = "gt",
|
|
|
|
[TCG_COND_LTU] = "ltu",
|
|
|
|
[TCG_COND_GEU] = "geu",
|
|
|
|
[TCG_COND_LEU] = "leu",
|
|
|
|
[TCG_COND_GTU] = "gtu"
|
|
|
|
};
|
|
|
|
|
2013-09-04 17:11:05 +02:00
|
|
|
static const char * const ldst_name[] =
|
|
|
|
{
|
|
|
|
[MO_UB] = "ub",
|
|
|
|
[MO_SB] = "sb",
|
|
|
|
[MO_LEUW] = "leuw",
|
|
|
|
[MO_LESW] = "lesw",
|
|
|
|
[MO_LEUL] = "leul",
|
|
|
|
[MO_LESL] = "lesl",
|
|
|
|
[MO_LEQ] = "leq",
|
|
|
|
[MO_BEUW] = "beuw",
|
|
|
|
[MO_BESW] = "besw",
|
|
|
|
[MO_BEUL] = "beul",
|
|
|
|
[MO_BESL] = "besl",
|
|
|
|
[MO_BEQ] = "beq",
|
|
|
|
};
|
|
|
|
|
2016-06-23 20:16:46 +02:00
|
|
|
static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
|
2019-07-18 08:01:31 +02:00
|
|
|
#ifdef TARGET_ALIGNED_ONLY
|
2016-06-23 20:16:46 +02:00
|
|
|
[MO_UNALN >> MO_ASHIFT] = "un+",
|
|
|
|
[MO_ALIGN >> MO_ASHIFT] = "",
|
|
|
|
#else
|
|
|
|
[MO_UNALN >> MO_ASHIFT] = "",
|
|
|
|
[MO_ALIGN >> MO_ASHIFT] = "al+",
|
|
|
|
#endif
|
|
|
|
[MO_ALIGN_2 >> MO_ASHIFT] = "al2+",
|
|
|
|
[MO_ALIGN_4 >> MO_ASHIFT] = "al4+",
|
|
|
|
[MO_ALIGN_8 >> MO_ASHIFT] = "al8+",
|
|
|
|
[MO_ALIGN_16 >> MO_ASHIFT] = "al16+",
|
|
|
|
[MO_ALIGN_32 >> MO_ASHIFT] = "al32+",
|
|
|
|
[MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
|
|
|
|
};
|
|
|
|
|
2018-11-27 16:16:21 +01:00
|
|
|
static inline bool tcg_regset_single(TCGRegSet d)
|
|
|
|
{
|
|
|
|
return (d & (d - 1)) == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline TCGReg tcg_regset_first(TCGRegSet d)
|
|
|
|
{
|
|
|
|
if (TCG_TARGET_NB_REGS <= 32) {
|
|
|
|
return ctz32(d);
|
|
|
|
} else {
|
|
|
|
return ctz64(d);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-27 21:46:00 +01:00
|
|
|
static void tcg_dump_ops(TCGContext *s, bool have_prefs)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
|
|
|
char buf[128];
|
2014-09-19 22:49:15 +02:00
|
|
|
TCGOp *op;
|
|
|
|
|
2017-11-02 15:19:14 +01:00
|
|
|
QTAILQ_FOREACH(op, &s->ops, link) {
|
2014-09-19 22:49:15 +02:00
|
|
|
int i, k, nb_oargs, nb_iargs, nb_cargs;
|
|
|
|
const TCGOpDef *def;
|
|
|
|
TCGOpcode c;
|
2016-06-24 04:15:55 +02:00
|
|
|
int col = 0;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2014-09-19 22:49:15 +02:00
|
|
|
c = op->opc;
|
2008-02-01 11:05:41 +01:00
|
|
|
def = &tcg_op_defs[c];
|
2014-09-19 22:49:15 +02:00
|
|
|
|
2015-08-29 21:37:33 +02:00
|
|
|
if (c == INDEX_op_insn_start) {
|
2018-11-27 16:16:21 +01:00
|
|
|
nb_oargs = 0;
|
2017-11-02 15:19:14 +01:00
|
|
|
col += qemu_log("\n ----");
|
2015-08-30 18:21:33 +02:00
|
|
|
|
|
|
|
for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
|
|
|
|
target_ulong a;
|
2008-05-22 18:56:05 +02:00
|
|
|
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
|
2016-12-08 22:12:08 +01:00
|
|
|
a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
|
2008-05-22 18:56:05 +02:00
|
|
|
#else
|
2016-12-08 22:12:08 +01:00
|
|
|
a = op->args[i];
|
2008-05-22 18:56:05 +02:00
|
|
|
#endif
|
2016-06-24 04:15:55 +02:00
|
|
|
col += qemu_log(" " TARGET_FMT_lx, a);
|
2012-06-03 18:35:32 +02:00
|
|
|
}
|
2008-05-22 18:56:05 +02:00
|
|
|
} else if (c == INDEX_op_call) {
|
2008-02-01 11:05:41 +01:00
|
|
|
/* variable number of arguments */
|
2017-11-14 13:02:51 +01:00
|
|
|
nb_oargs = TCGOP_CALLO(op);
|
|
|
|
nb_iargs = TCGOP_CALLI(op);
|
2008-02-01 11:05:41 +01:00
|
|
|
nb_cargs = def->nb_cargs;
|
|
|
|
|
2014-03-23 04:06:52 +01:00
|
|
|
/* function name, flags, out args */
|
2016-06-24 04:15:55 +02:00
|
|
|
col += qemu_log(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name,
|
2016-12-08 22:12:08 +01:00
|
|
|
tcg_find_helper(s, op->args[nb_oargs + nb_iargs]),
|
|
|
|
op->args[nb_oargs + nb_iargs + 1], nb_oargs);
|
2014-03-23 04:06:52 +01:00
|
|
|
for (i = 0; i < nb_oargs; i++) {
|
2017-06-20 08:18:10 +02:00
|
|
|
col += qemu_log(",%s", tcg_get_arg_str(s, buf, sizeof(buf),
|
|
|
|
op->args[i]));
|
2008-05-10 12:52:05 +02:00
|
|
|
}
|
2014-03-23 04:06:52 +01:00
|
|
|
for (i = 0; i < nb_iargs; i++) {
|
2016-12-08 22:12:08 +01:00
|
|
|
TCGArg arg = op->args[nb_oargs + i];
|
2014-03-23 04:06:52 +01:00
|
|
|
const char *t = "<dummy>";
|
|
|
|
if (arg != TCG_CALL_DUMMY_ARG) {
|
2017-06-20 08:18:10 +02:00
|
|
|
t = tcg_get_arg_str(s, buf, sizeof(buf), arg);
|
2012-06-03 18:35:32 +02:00
|
|
|
}
|
2016-06-24 04:15:55 +02:00
|
|
|
col += qemu_log(",%s", t);
|
2008-05-23 19:33:39 +02:00
|
|
|
}
|
2008-05-10 12:52:05 +02:00
|
|
|
} else {
|
2016-06-24 04:15:55 +02:00
|
|
|
col += qemu_log(" %s ", def->name);
|
2014-09-19 22:49:15 +02:00
|
|
|
|
|
|
|
nb_oargs = def->nb_oargs;
|
|
|
|
nb_iargs = def->nb_iargs;
|
|
|
|
nb_cargs = def->nb_cargs;
|
|
|
|
|
2017-09-14 22:53:46 +02:00
|
|
|
if (def->flags & TCG_OPF_VECTOR) {
|
|
|
|
col += qemu_log("v%d,e%d,", 64 << TCGOP_VECL(op),
|
|
|
|
8 << TCGOP_VECE(op));
|
|
|
|
}
|
|
|
|
|
2008-05-10 12:52:05 +02:00
|
|
|
k = 0;
|
2014-09-19 22:49:15 +02:00
|
|
|
for (i = 0; i < nb_oargs; i++) {
|
2012-06-03 18:35:32 +02:00
|
|
|
if (k != 0) {
|
2016-06-24 04:15:55 +02:00
|
|
|
col += qemu_log(",");
|
2012-06-03 18:35:32 +02:00
|
|
|
}
|
2017-06-20 08:18:10 +02:00
|
|
|
col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf),
|
|
|
|
op->args[k++]));
|
2008-05-10 12:52:05 +02:00
|
|
|
}
|
2014-09-19 22:49:15 +02:00
|
|
|
for (i = 0; i < nb_iargs; i++) {
|
2012-06-03 18:35:32 +02:00
|
|
|
if (k != 0) {
|
2016-06-24 04:15:55 +02:00
|
|
|
col += qemu_log(",");
|
2012-06-03 18:35:32 +02:00
|
|
|
}
|
2017-06-20 08:18:10 +02:00
|
|
|
col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf),
|
|
|
|
op->args[k++]));
|
2008-05-10 12:52:05 +02:00
|
|
|
}
|
2010-01-07 19:13:31 +01:00
|
|
|
switch (c) {
|
|
|
|
case INDEX_op_brcond_i32:
|
|
|
|
case INDEX_op_setcond_i32:
|
2012-09-21 19:13:34 +02:00
|
|
|
case INDEX_op_movcond_i32:
|
|
|
|
case INDEX_op_brcond2_i32:
|
2010-01-07 19:13:31 +01:00
|
|
|
case INDEX_op_setcond2_i32:
|
2012-09-21 19:13:34 +02:00
|
|
|
case INDEX_op_brcond_i64:
|
2010-01-07 19:13:31 +01:00
|
|
|
case INDEX_op_setcond_i64:
|
2012-09-21 19:13:34 +02:00
|
|
|
case INDEX_op_movcond_i64:
|
2017-11-17 20:47:42 +01:00
|
|
|
case INDEX_op_cmp_vec:
|
2019-04-30 22:01:12 +02:00
|
|
|
case INDEX_op_cmpsel_vec:
|
2016-12-08 22:12:08 +01:00
|
|
|
if (op->args[k] < ARRAY_SIZE(cond_name)
|
|
|
|
&& cond_name[op->args[k]]) {
|
|
|
|
col += qemu_log(",%s", cond_name[op->args[k++]]);
|
2012-06-03 18:35:32 +02:00
|
|
|
} else {
|
2016-12-08 22:12:08 +01:00
|
|
|
col += qemu_log(",$0x%" TCG_PRIlx, op->args[k++]);
|
2012-06-03 18:35:32 +02:00
|
|
|
}
|
2008-09-14 09:45:17 +02:00
|
|
|
i = 1;
|
2010-01-07 19:13:31 +01:00
|
|
|
break;
|
2013-09-04 17:11:05 +02:00
|
|
|
case INDEX_op_qemu_ld_i32:
|
|
|
|
case INDEX_op_qemu_st_i32:
|
2020-12-09 20:58:39 +01:00
|
|
|
case INDEX_op_qemu_st8_i32:
|
2013-09-04 17:11:05 +02:00
|
|
|
case INDEX_op_qemu_ld_i64:
|
|
|
|
case INDEX_op_qemu_st_i64:
|
2015-05-12 20:51:44 +02:00
|
|
|
{
|
2016-12-08 22:12:08 +01:00
|
|
|
TCGMemOpIdx oi = op->args[k++];
|
2019-08-23 20:10:58 +02:00
|
|
|
MemOp op = get_memop(oi);
|
2015-05-12 20:51:44 +02:00
|
|
|
unsigned ix = get_mmuidx(oi);
|
|
|
|
|
2015-06-01 23:38:56 +02:00
|
|
|
if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
|
2016-06-24 04:15:55 +02:00
|
|
|
col += qemu_log(",$0x%x,%u", op, ix);
|
2015-06-01 23:38:56 +02:00
|
|
|
} else {
|
2016-06-23 20:16:46 +02:00
|
|
|
const char *s_al, *s_op;
|
|
|
|
s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT];
|
2015-06-01 23:38:56 +02:00
|
|
|
s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
|
2016-06-24 04:15:55 +02:00
|
|
|
col += qemu_log(",%s%s,%u", s_al, s_op, ix);
|
2015-05-12 20:51:44 +02:00
|
|
|
}
|
|
|
|
i = 1;
|
2013-09-04 17:11:05 +02:00
|
|
|
}
|
|
|
|
break;
|
2010-01-07 19:13:31 +01:00
|
|
|
default:
|
2008-09-14 09:45:17 +02:00
|
|
|
i = 0;
|
2010-01-07 19:13:31 +01:00
|
|
|
break;
|
|
|
|
}
|
2015-02-14 03:51:05 +01:00
|
|
|
switch (c) {
|
|
|
|
case INDEX_op_set_label:
|
|
|
|
case INDEX_op_br:
|
|
|
|
case INDEX_op_brcond_i32:
|
|
|
|
case INDEX_op_brcond_i64:
|
|
|
|
case INDEX_op_brcond2_i32:
|
2016-12-08 22:12:08 +01:00
|
|
|
col += qemu_log("%s$L%d", k ? "," : "",
|
|
|
|
arg_label(op->args[k])->id);
|
2015-02-14 03:51:05 +01:00
|
|
|
i++, k++;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
for (; i < nb_cargs; i++, k++) {
|
2016-12-08 22:12:08 +01:00
|
|
|
col += qemu_log("%s$0x%" TCG_PRIlx, k ? "," : "", op->args[k]);
|
2016-06-24 04:15:55 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-27 21:46:00 +01:00
|
|
|
if (have_prefs || op->life) {
|
2019-11-18 22:15:27 +01:00
|
|
|
|
|
|
|
QemuLogFile *logfile;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
2020-09-23 12:56:46 +02:00
|
|
|
logfile = qatomic_rcu_read(&qemu_logfile);
|
2019-11-18 22:15:27 +01:00
|
|
|
if (logfile) {
|
|
|
|
for (; col < 40; ++col) {
|
|
|
|
putc(' ', logfile->fd);
|
|
|
|
}
|
2016-06-24 04:15:55 +02:00
|
|
|
}
|
2019-11-18 22:15:27 +01:00
|
|
|
rcu_read_unlock();
|
2018-11-27 21:46:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (op->life) {
|
|
|
|
unsigned life = op->life;
|
2016-06-24 04:15:55 +02:00
|
|
|
|
|
|
|
if (life & (SYNC_ARG * 3)) {
|
|
|
|
qemu_log(" sync:");
|
|
|
|
for (i = 0; i < 2; ++i) {
|
|
|
|
if (life & (SYNC_ARG << i)) {
|
|
|
|
qemu_log(" %d", i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
life /= DEAD_ARG;
|
|
|
|
if (life) {
|
|
|
|
qemu_log(" dead:");
|
|
|
|
for (i = 0; life; ++i, life >>= 1) {
|
|
|
|
if (life & 1) {
|
|
|
|
qemu_log(" %d", i);
|
|
|
|
}
|
|
|
|
}
|
2008-05-10 12:52:05 +02:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2018-11-27 21:46:00 +01:00
|
|
|
|
|
|
|
if (have_prefs) {
|
|
|
|
for (i = 0; i < nb_oargs; ++i) {
|
|
|
|
TCGRegSet set = op->output_pref[i];
|
|
|
|
|
|
|
|
if (i == 0) {
|
|
|
|
qemu_log(" pref=");
|
|
|
|
} else {
|
|
|
|
qemu_log(",");
|
|
|
|
}
|
|
|
|
if (set == 0) {
|
|
|
|
qemu_log("none");
|
|
|
|
} else if (set == MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS)) {
|
|
|
|
qemu_log("all");
|
|
|
|
#ifdef CONFIG_DEBUG_TCG
|
|
|
|
} else if (tcg_regset_single(set)) {
|
|
|
|
TCGReg reg = tcg_regset_first(set);
|
|
|
|
qemu_log("%s", tcg_target_reg_names[reg]);
|
|
|
|
#endif
|
|
|
|
} else if (TCG_TARGET_NB_REGS <= 32) {
|
|
|
|
qemu_log("%#x", (uint32_t)set);
|
|
|
|
} else {
|
|
|
|
qemu_log("%#" PRIx64, (uint64_t)set);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-03 18:35:32 +02:00
|
|
|
qemu_log("\n");
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* we give more priority to constraints with less registers */
|
|
|
|
static int get_constraint_priority(const TCGOpDef *def, int k)
|
|
|
|
{
|
2020-09-04 00:56:24 +02:00
|
|
|
const TCGArgConstraint *arg_ct = &def->args_ct[k];
|
|
|
|
int n;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2019-04-05 04:34:19 +02:00
|
|
|
if (arg_ct->oalias) {
|
2008-02-01 11:05:41 +01:00
|
|
|
/* an alias is equivalent to a single register */
|
|
|
|
n = 1;
|
|
|
|
} else {
|
2020-09-04 00:56:24 +02:00
|
|
|
n = ctpop64(arg_ct->regs);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
return TCG_TARGET_NB_REGS - n + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* sort from highest priority to lowest */
|
|
|
|
static void sort_constraints(TCGOpDef *def, int start, int n)
|
|
|
|
{
|
2019-04-04 04:37:38 +02:00
|
|
|
int i, j;
|
|
|
|
TCGArgConstraint *a = def->args_ct;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2019-04-04 04:37:38 +02:00
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
a[start + i].sort_index = start + i;
|
|
|
|
}
|
|
|
|
if (n <= 1) {
|
2008-02-01 11:05:41 +01:00
|
|
|
return;
|
2019-04-04 04:37:38 +02:00
|
|
|
}
|
|
|
|
for (i = 0; i < n - 1; i++) {
|
|
|
|
for (j = i + 1; j < n; j++) {
|
|
|
|
int p1 = get_constraint_priority(def, a[start + i].sort_index);
|
|
|
|
int p2 = get_constraint_priority(def, a[start + j].sort_index);
|
2008-02-01 11:05:41 +01:00
|
|
|
if (p1 < p2) {
|
2019-04-04 04:37:38 +02:00
|
|
|
int tmp = a[start + i].sort_index;
|
|
|
|
a[start + i].sort_index = a[start + j].sort_index;
|
|
|
|
a[start + j].sort_index = tmp;
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-18 09:31:40 +01:00
|
|
|
static void process_op_defs(TCGContext *s)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2010-03-19 19:12:29 +01:00
|
|
|
TCGOpcode op;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2016-11-18 09:31:40 +01:00
|
|
|
for (op = 0; op < NB_OPS; op++) {
|
|
|
|
TCGOpDef *def = &tcg_op_defs[op];
|
|
|
|
const TCGTargetOpDef *tdefs;
|
2016-11-18 11:50:59 +01:00
|
|
|
TCGType type;
|
|
|
|
int i, nb_args;
|
2016-11-18 09:31:40 +01:00
|
|
|
|
|
|
|
if (def->flags & TCG_OPF_NOT_PRESENT) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
nb_args = def->nb_iargs + def->nb_oargs;
|
2016-11-18 09:31:40 +01:00
|
|
|
if (nb_args == 0) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
tdefs = tcg_target_op_def(op);
|
|
|
|
/* Missing TCGTargetOpDef entry. */
|
|
|
|
tcg_debug_assert(tdefs != NULL);
|
|
|
|
|
2016-11-18 11:50:59 +01:00
|
|
|
type = (def->flags & TCG_OPF_64BIT ? TCG_TYPE_I64 : TCG_TYPE_I32);
|
2016-11-18 09:31:40 +01:00
|
|
|
for (i = 0; i < nb_args; i++) {
|
|
|
|
const char *ct_str = tdefs->args_ct_str[i];
|
|
|
|
/* Incomplete TCGTargetOpDef entry. */
|
2016-04-21 10:48:49 +02:00
|
|
|
tcg_debug_assert(ct_str != NULL);
|
2016-11-18 09:31:40 +01:00
|
|
|
|
2016-11-18 17:41:24 +01:00
|
|
|
while (*ct_str != '\0') {
|
|
|
|
switch(*ct_str) {
|
|
|
|
case '0' ... '9':
|
|
|
|
{
|
|
|
|
int oarg = *ct_str - '0';
|
|
|
|
tcg_debug_assert(ct_str == tdefs->args_ct_str[i]);
|
|
|
|
tcg_debug_assert(oarg < def->nb_oargs);
|
2020-09-04 00:56:24 +02:00
|
|
|
tcg_debug_assert(def->args_ct[oarg].regs != 0);
|
2016-11-18 17:41:24 +01:00
|
|
|
def->args_ct[i] = def->args_ct[oarg];
|
2019-04-05 04:34:19 +02:00
|
|
|
/* The output sets oalias. */
|
|
|
|
def->args_ct[oarg].oalias = true;
|
2016-11-18 17:41:24 +01:00
|
|
|
def->args_ct[oarg].alias_index = i;
|
2019-04-05 04:34:19 +02:00
|
|
|
/* The input sets ialias. */
|
|
|
|
def->args_ct[i].ialias = true;
|
2016-11-18 17:41:24 +01:00
|
|
|
def->args_ct[i].alias_index = oarg;
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2016-11-18 17:41:24 +01:00
|
|
|
ct_str++;
|
|
|
|
break;
|
|
|
|
case '&':
|
2019-04-05 04:34:19 +02:00
|
|
|
def->args_ct[i].newreg = true;
|
2016-11-18 17:41:24 +01:00
|
|
|
ct_str++;
|
|
|
|
break;
|
|
|
|
case 'i':
|
|
|
|
def->args_ct[i].ct |= TCG_CT_CONST;
|
|
|
|
ct_str++;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ct_str = target_parse_constraint(&def->args_ct[i],
|
|
|
|
ct_str, type);
|
|
|
|
/* Typo in TCGTargetOpDef constraint. */
|
|
|
|
tcg_debug_assert(ct_str != NULL);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-02-15 17:17:21 +01:00
|
|
|
/* TCGTargetOpDef entry with too much information? */
|
2016-04-21 10:48:49 +02:00
|
|
|
tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
|
2010-02-15 17:17:21 +01:00
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
/* sort the constraints (XXX: this is just an heuristic) */
|
|
|
|
sort_constraints(def, 0, def->nb_oargs);
|
|
|
|
sort_constraints(def, def->nb_oargs, def->nb_iargs);
|
2010-03-19 19:12:29 +01:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2014-03-31 01:51:54 +02:00
|
|
|
void tcg_op_remove(TCGContext *s, TCGOp *op)
|
|
|
|
{
|
2018-11-26 21:47:28 +01:00
|
|
|
TCGLabel *label;
|
|
|
|
|
|
|
|
switch (op->opc) {
|
|
|
|
case INDEX_op_br:
|
|
|
|
label = arg_label(op->args[0]);
|
|
|
|
label->refs--;
|
|
|
|
break;
|
|
|
|
case INDEX_op_brcond_i32:
|
|
|
|
case INDEX_op_brcond_i64:
|
|
|
|
label = arg_label(op->args[3]);
|
|
|
|
label->refs--;
|
|
|
|
break;
|
|
|
|
case INDEX_op_brcond2_i32:
|
|
|
|
label = arg_label(op->args[5]);
|
|
|
|
label->refs--;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-11-02 15:19:14 +01:00
|
|
|
QTAILQ_REMOVE(&s->ops, op, link);
|
|
|
|
QTAILQ_INSERT_TAIL(&s->free_ops, op, link);
|
2018-05-08 21:18:59 +02:00
|
|
|
s->nb_ops--;
|
2014-03-31 01:51:54 +02:00
|
|
|
|
|
|
|
#ifdef CONFIG_PROFILER
|
2020-09-23 12:56:46 +02:00
|
|
|
qatomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1);
|
2014-03-31 01:51:54 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2017-11-02 15:19:14 +01:00
|
|
|
static TCGOp *tcg_op_alloc(TCGOpcode opc)
|
2016-06-24 05:34:33 +02:00
|
|
|
{
|
2017-11-02 15:19:14 +01:00
|
|
|
TCGContext *s = tcg_ctx;
|
|
|
|
TCGOp *op;
|
2016-06-24 05:34:33 +02:00
|
|
|
|
2017-11-02 15:19:14 +01:00
|
|
|
if (likely(QTAILQ_EMPTY(&s->free_ops))) {
|
|
|
|
op = tcg_malloc(sizeof(TCGOp));
|
|
|
|
} else {
|
|
|
|
op = QTAILQ_FIRST(&s->free_ops);
|
|
|
|
QTAILQ_REMOVE(&s->free_ops, op, link);
|
|
|
|
}
|
|
|
|
memset(op, 0, offsetof(TCGOp, link));
|
|
|
|
op->opc = opc;
|
2018-05-08 21:18:59 +02:00
|
|
|
s->nb_ops++;
|
2016-06-24 05:34:33 +02:00
|
|
|
|
2017-11-02 15:19:14 +01:00
|
|
|
return op;
|
|
|
|
}
|
|
|
|
|
|
|
|
TCGOp *tcg_emit_op(TCGOpcode opc)
|
|
|
|
{
|
|
|
|
TCGOp *op = tcg_op_alloc(opc);
|
|
|
|
QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
|
|
|
|
return op;
|
|
|
|
}
|
2016-06-24 05:34:33 +02:00
|
|
|
|
2018-12-09 20:37:19 +01:00
|
|
|
TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op, TCGOpcode opc)
|
2017-11-02 15:19:14 +01:00
|
|
|
{
|
|
|
|
TCGOp *new_op = tcg_op_alloc(opc);
|
|
|
|
QTAILQ_INSERT_BEFORE(old_op, new_op, link);
|
2016-06-24 05:34:33 +02:00
|
|
|
return new_op;
|
|
|
|
}
|
|
|
|
|
2018-12-09 20:37:19 +01:00
|
|
|
TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op, TCGOpcode opc)
|
2016-06-24 05:34:33 +02:00
|
|
|
{
|
2017-11-02 15:19:14 +01:00
|
|
|
TCGOp *new_op = tcg_op_alloc(opc);
|
|
|
|
QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link);
|
2016-06-24 05:34:33 +02:00
|
|
|
return new_op;
|
|
|
|
}
|
|
|
|
|
2018-11-26 23:28:28 +01:00
|
|
|
/* Reachable analysis : remove unreachable code. */
|
|
|
|
static void reachable_code_pass(TCGContext *s)
|
|
|
|
{
|
|
|
|
TCGOp *op, *op_next;
|
|
|
|
bool dead = false;
|
|
|
|
|
|
|
|
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
|
|
|
|
bool remove = dead;
|
|
|
|
TCGLabel *label;
|
|
|
|
int call_flags;
|
|
|
|
|
|
|
|
switch (op->opc) {
|
|
|
|
case INDEX_op_set_label:
|
|
|
|
label = arg_label(op->args[0]);
|
|
|
|
if (label->refs == 0) {
|
|
|
|
/*
|
|
|
|
* While there is an occasional backward branch, virtually
|
|
|
|
* all branches generated by the translators are forward.
|
|
|
|
* Which means that generally we will have already removed
|
|
|
|
* all references to the label that will be, and there is
|
|
|
|
* little to be gained by iterating.
|
|
|
|
*/
|
|
|
|
remove = true;
|
|
|
|
} else {
|
|
|
|
/* Once we see a label, insns become live again. */
|
|
|
|
dead = false;
|
|
|
|
remove = false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Optimization can fold conditional branches to unconditional.
|
|
|
|
* If we find a label with one reference which is preceded by
|
|
|
|
* an unconditional branch to it, remove both. This needed to
|
|
|
|
* wait until the dead code in between them was removed.
|
|
|
|
*/
|
|
|
|
if (label->refs == 1) {
|
2018-12-06 13:10:34 +01:00
|
|
|
TCGOp *op_prev = QTAILQ_PREV(op, link);
|
2018-11-26 23:28:28 +01:00
|
|
|
if (op_prev->opc == INDEX_op_br &&
|
|
|
|
label == arg_label(op_prev->args[0])) {
|
|
|
|
tcg_op_remove(s, op_prev);
|
|
|
|
remove = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case INDEX_op_br:
|
|
|
|
case INDEX_op_exit_tb:
|
|
|
|
case INDEX_op_goto_ptr:
|
|
|
|
/* Unconditional branches; everything following is dead. */
|
|
|
|
dead = true;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case INDEX_op_call:
|
|
|
|
/* Notice noreturn helper calls, raising exceptions. */
|
|
|
|
call_flags = op->args[TCGOP_CALLO(op) + TCGOP_CALLI(op) + 1];
|
|
|
|
if (call_flags & TCG_CALL_NO_RETURN) {
|
|
|
|
dead = true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case INDEX_op_insn_start:
|
|
|
|
/* Never remove -- we need to keep these for unwind. */
|
|
|
|
remove = false;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (remove) {
|
|
|
|
tcg_op_remove(s, op);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-24 05:34:22 +02:00
|
|
|
#define TS_DEAD 1
|
|
|
|
#define TS_MEM 2
|
|
|
|
|
2016-06-24 05:34:33 +02:00
|
|
|
#define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
|
|
|
|
#define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
|
|
|
|
|
2018-11-27 21:45:26 +01:00
|
|
|
/* For liveness_pass_1, the register preferences for a given temp. */
|
|
|
|
static inline TCGRegSet *la_temp_pref(TCGTemp *ts)
|
|
|
|
{
|
|
|
|
return ts->state_ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* For liveness_pass_1, reset the preferences for a given temp to the
|
|
|
|
* maximal regset for its type.
|
|
|
|
*/
|
|
|
|
static inline void la_reset_pref(TCGTemp *ts)
|
|
|
|
{
|
|
|
|
*la_temp_pref(ts)
|
|
|
|
= (ts->state == TS_DEAD ? 0 : tcg_target_available_regs[ts->type]);
|
|
|
|
}
|
|
|
|
|
2012-10-09 21:53:07 +02:00
|
|
|
/* liveness analysis: end of function: all temps are dead, and globals
|
|
|
|
should be in memory. */
|
2018-11-27 22:37:24 +01:00
|
|
|
static void la_func_end(TCGContext *s, int ng, int nt)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2016-11-01 22:56:04 +01:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ng; ++i) {
|
|
|
|
s->temps[i].state = TS_DEAD | TS_MEM;
|
2018-11-27 21:45:26 +01:00
|
|
|
la_reset_pref(&s->temps[i]);
|
2016-11-01 22:56:04 +01:00
|
|
|
}
|
|
|
|
for (i = ng; i < nt; ++i) {
|
|
|
|
s->temps[i].state = TS_DEAD;
|
2018-11-27 21:45:26 +01:00
|
|
|
la_reset_pref(&s->temps[i]);
|
2016-11-01 22:56:04 +01:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2012-10-09 21:53:07 +02:00
|
|
|
/* liveness analysis: end of basic block: all temps are dead, globals
|
|
|
|
and local temps should be in memory. */
|
2018-11-27 22:37:24 +01:00
|
|
|
static void la_bb_end(TCGContext *s, int ng, int nt)
|
2008-05-25 19:24:00 +02:00
|
|
|
{
|
2016-11-01 22:56:04 +01:00
|
|
|
int i;
|
2008-05-25 19:24:00 +02:00
|
|
|
|
2016-11-01 22:56:04 +01:00
|
|
|
for (i = 0; i < ng; ++i) {
|
|
|
|
s->temps[i].state = TS_DEAD | TS_MEM;
|
2018-11-27 21:45:26 +01:00
|
|
|
la_reset_pref(&s->temps[i]);
|
2016-11-01 22:56:04 +01:00
|
|
|
}
|
|
|
|
for (i = ng; i < nt; ++i) {
|
|
|
|
s->temps[i].state = (s->temps[i].temp_local
|
|
|
|
? TS_DEAD | TS_MEM
|
|
|
|
: TS_DEAD);
|
2018-11-27 21:45:26 +01:00
|
|
|
la_reset_pref(&s->temps[i]);
|
2008-05-25 19:24:00 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-27 23:00:35 +01:00
|
|
|
/* liveness analysis: sync globals back to memory. */
|
|
|
|
static void la_global_sync(TCGContext *s, int ng)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ng; ++i) {
|
2018-11-27 21:45:26 +01:00
|
|
|
int state = s->temps[i].state;
|
|
|
|
s->temps[i].state = state | TS_MEM;
|
|
|
|
if (state == TS_DEAD) {
|
|
|
|
/* If the global was previously dead, reset prefs. */
|
|
|
|
la_reset_pref(&s->temps[i]);
|
|
|
|
}
|
2018-11-27 23:00:35 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-08 22:21:43 +02:00
|
|
|
/*
|
|
|
|
* liveness analysis: conditional branch: all temps are dead,
|
|
|
|
* globals and local temps should be synced.
|
|
|
|
*/
|
|
|
|
static void la_bb_sync(TCGContext *s, int ng, int nt)
|
|
|
|
{
|
|
|
|
la_global_sync(s, ng);
|
|
|
|
|
|
|
|
for (int i = ng; i < nt; ++i) {
|
|
|
|
if (s->temps[i].temp_local) {
|
|
|
|
int state = s->temps[i].state;
|
|
|
|
s->temps[i].state = state | TS_MEM;
|
|
|
|
if (state != TS_DEAD) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
s->temps[i].state = TS_DEAD;
|
|
|
|
}
|
|
|
|
la_reset_pref(&s->temps[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-27 23:00:35 +01:00
|
|
|
/* liveness analysis: sync globals back to memory and kill. */
|
|
|
|
static void la_global_kill(TCGContext *s, int ng)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ng; i++) {
|
|
|
|
s->temps[i].state = TS_DEAD | TS_MEM;
|
2018-11-27 21:45:26 +01:00
|
|
|
la_reset_pref(&s->temps[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* liveness analysis: note live globals crossing calls. */
|
|
|
|
static void la_cross_call(TCGContext *s, int nt)
|
|
|
|
{
|
|
|
|
TCGRegSet mask = ~tcg_target_call_clobber_regs;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < nt; i++) {
|
|
|
|
TCGTemp *ts = &s->temps[i];
|
|
|
|
if (!(ts->state & TS_DEAD)) {
|
|
|
|
TCGRegSet *pset = la_temp_pref(ts);
|
|
|
|
TCGRegSet set = *pset;
|
|
|
|
|
|
|
|
set &= mask;
|
|
|
|
/* If the combination is not possible, restart. */
|
|
|
|
if (set == 0) {
|
|
|
|
set = tcg_target_available_regs[ts->type] & mask;
|
|
|
|
}
|
|
|
|
*pset = set;
|
|
|
|
}
|
2018-11-27 23:00:35 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-23 00:46:09 +02:00
|
|
|
/* Liveness analysis : update the opc_arg_life array to tell if a
|
2008-02-01 11:05:41 +01:00
|
|
|
given input arguments is dead. Instructions updating dead
|
|
|
|
temporaries are removed. */
|
2016-11-01 22:56:04 +01:00
|
|
|
static void liveness_pass_1(TCGContext *s)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2016-06-24 05:34:22 +02:00
|
|
|
int nb_globals = s->nb_globals;
|
2018-11-27 22:37:24 +01:00
|
|
|
int nb_temps = s->nb_temps;
|
2017-11-02 15:19:14 +01:00
|
|
|
TCGOp *op, *op_prev;
|
2018-11-27 21:45:26 +01:00
|
|
|
TCGRegSet *prefs;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
prefs = tcg_malloc(sizeof(TCGRegSet) * nb_temps);
|
|
|
|
for (i = 0; i < nb_temps; ++i) {
|
|
|
|
s->temps[i].state_ptr = prefs + i;
|
|
|
|
}
|
2016-06-23 00:46:09 +02:00
|
|
|
|
2018-11-27 22:45:08 +01:00
|
|
|
/* ??? Should be redundant with the exit_tb that ends the TB. */
|
2018-11-27 22:37:24 +01:00
|
|
|
la_func_end(s, nb_globals, nb_temps);
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2018-12-06 13:10:34 +01:00
|
|
|
QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, link, op_prev) {
|
2018-11-27 21:45:26 +01:00
|
|
|
int nb_iargs, nb_oargs;
|
2014-09-19 22:49:15 +02:00
|
|
|
TCGOpcode opc_new, opc_new2;
|
|
|
|
bool have_opc_new2;
|
2016-06-23 00:46:09 +02:00
|
|
|
TCGLifeData arg_life = 0;
|
2018-11-27 21:45:26 +01:00
|
|
|
TCGTemp *ts;
|
2014-09-19 22:49:15 +02:00
|
|
|
TCGOpcode opc = op->opc;
|
|
|
|
const TCGOpDef *def = &tcg_op_defs[opc];
|
|
|
|
|
|
|
|
switch (opc) {
|
2008-02-01 11:05:41 +01:00
|
|
|
case INDEX_op_call:
|
2008-05-17 14:42:15 +02:00
|
|
|
{
|
|
|
|
int call_flags;
|
2018-11-27 21:45:26 +01:00
|
|
|
int nb_call_regs;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2017-11-14 13:02:51 +01:00
|
|
|
nb_oargs = TCGOP_CALLO(op);
|
|
|
|
nb_iargs = TCGOP_CALLI(op);
|
2016-12-08 22:12:08 +01:00
|
|
|
call_flags = op->args[nb_oargs + nb_iargs + 1];
|
2008-05-17 14:42:15 +02:00
|
|
|
|
2014-09-19 22:49:15 +02:00
|
|
|
/* pure functions can be removed if their result is unused */
|
2012-10-09 21:53:08 +02:00
|
|
|
if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
|
2014-03-23 04:06:52 +01:00
|
|
|
for (i = 0; i < nb_oargs; i++) {
|
2018-11-27 21:45:26 +01:00
|
|
|
ts = arg_temp(op->args[i]);
|
|
|
|
if (ts->state != TS_DEAD) {
|
2008-05-17 14:42:15 +02:00
|
|
|
goto do_not_remove_call;
|
2012-10-09 21:53:07 +02:00
|
|
|
}
|
2008-05-17 14:42:15 +02:00
|
|
|
}
|
2014-09-19 22:49:15 +02:00
|
|
|
goto do_remove;
|
2018-11-27 22:32:33 +01:00
|
|
|
}
|
|
|
|
do_not_remove_call:
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2018-11-27 21:45:26 +01:00
|
|
|
/* Output args are dead. */
|
2018-11-27 22:32:33 +01:00
|
|
|
for (i = 0; i < nb_oargs; i++) {
|
2018-11-27 21:45:26 +01:00
|
|
|
ts = arg_temp(op->args[i]);
|
|
|
|
if (ts->state & TS_DEAD) {
|
2018-11-27 22:32:33 +01:00
|
|
|
arg_life |= DEAD_ARG << i;
|
|
|
|
}
|
2018-11-27 21:45:26 +01:00
|
|
|
if (ts->state & TS_MEM) {
|
2018-11-27 22:32:33 +01:00
|
|
|
arg_life |= SYNC_ARG << i;
|
2008-05-17 14:42:15 +02:00
|
|
|
}
|
2018-11-27 21:45:26 +01:00
|
|
|
ts->state = TS_DEAD;
|
|
|
|
la_reset_pref(ts);
|
|
|
|
|
|
|
|
/* Not used -- it will be tcg_target_call_oarg_regs[i]. */
|
|
|
|
op->output_pref[i] = 0;
|
2018-11-27 22:32:33 +01:00
|
|
|
}
|
2012-10-09 21:53:08 +02:00
|
|
|
|
2018-11-27 22:32:33 +01:00
|
|
|
if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
|
|
|
|
TCG_CALL_NO_READ_GLOBALS))) {
|
2018-11-27 23:00:35 +01:00
|
|
|
la_global_kill(s, nb_globals);
|
2018-11-27 22:32:33 +01:00
|
|
|
} else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
|
2018-11-27 23:00:35 +01:00
|
|
|
la_global_sync(s, nb_globals);
|
2018-11-27 22:32:33 +01:00
|
|
|
}
|
2009-04-06 14:33:59 +02:00
|
|
|
|
2018-11-27 21:45:26 +01:00
|
|
|
/* Record arguments that die in this helper. */
|
2018-11-27 22:32:33 +01:00
|
|
|
for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
|
2018-11-27 21:45:26 +01:00
|
|
|
ts = arg_temp(op->args[i]);
|
|
|
|
if (ts && ts->state & TS_DEAD) {
|
2018-11-27 22:32:33 +01:00
|
|
|
arg_life |= DEAD_ARG << i;
|
2008-05-17 14:42:15 +02:00
|
|
|
}
|
2018-11-27 22:32:33 +01:00
|
|
|
}
|
2018-11-27 21:45:26 +01:00
|
|
|
|
|
|
|
/* For all live registers, remove call-clobbered prefs. */
|
|
|
|
la_cross_call(s, nb_temps);
|
|
|
|
|
|
|
|
nb_call_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
|
|
|
|
|
|
|
|
/* Input arguments are live for preceding opcodes. */
|
|
|
|
for (i = 0; i < nb_iargs; i++) {
|
|
|
|
ts = arg_temp(op->args[i + nb_oargs]);
|
|
|
|
if (ts && ts->state & TS_DEAD) {
|
|
|
|
/* For those arguments that die, and will be allocated
|
|
|
|
* in registers, clear the register set for that arg,
|
|
|
|
* to be filled in below. For args that will be on
|
|
|
|
* the stack, reset to any available reg.
|
|
|
|
*/
|
|
|
|
*la_temp_pref(ts)
|
|
|
|
= (i < nb_call_regs ? 0 :
|
|
|
|
tcg_target_available_regs[ts->type]);
|
|
|
|
ts->state &= ~TS_DEAD;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* For each input argument, add its input register to prefs.
|
|
|
|
If a temp is used once, this produces a single set bit. */
|
|
|
|
for (i = 0; i < MIN(nb_call_regs, nb_iargs); i++) {
|
|
|
|
ts = arg_temp(op->args[i + nb_oargs]);
|
|
|
|
if (ts) {
|
|
|
|
tcg_regset_set_reg(*la_temp_pref(ts),
|
|
|
|
tcg_target_call_iarg_regs[i]);
|
2015-06-04 21:47:08 +02:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2015-08-29 21:37:33 +02:00
|
|
|
case INDEX_op_insn_start:
|
2008-02-01 11:05:41 +01:00
|
|
|
break;
|
2008-02-04 01:37:54 +01:00
|
|
|
case INDEX_op_discard:
|
|
|
|
/* mark the temporary as dead */
|
2018-11-27 21:45:26 +01:00
|
|
|
ts = arg_temp(op->args[0]);
|
|
|
|
ts->state = TS_DEAD;
|
|
|
|
la_reset_pref(ts);
|
2008-02-04 01:37:54 +01:00
|
|
|
break;
|
2012-10-02 20:32:29 +02:00
|
|
|
|
|
|
|
case INDEX_op_add2_i32:
|
2014-09-19 22:49:15 +02:00
|
|
|
opc_new = INDEX_op_add_i32;
|
2013-02-20 08:52:02 +01:00
|
|
|
goto do_addsub2;
|
2012-10-02 20:32:29 +02:00
|
|
|
case INDEX_op_sub2_i32:
|
2014-09-19 22:49:15 +02:00
|
|
|
opc_new = INDEX_op_sub_i32;
|
2013-02-20 08:52:02 +01:00
|
|
|
goto do_addsub2;
|
|
|
|
case INDEX_op_add2_i64:
|
2014-09-19 22:49:15 +02:00
|
|
|
opc_new = INDEX_op_add_i64;
|
2013-02-20 08:52:02 +01:00
|
|
|
goto do_addsub2;
|
|
|
|
case INDEX_op_sub2_i64:
|
2014-09-19 22:49:15 +02:00
|
|
|
opc_new = INDEX_op_sub_i64;
|
2013-02-20 08:52:02 +01:00
|
|
|
do_addsub2:
|
2012-10-02 20:32:29 +02:00
|
|
|
nb_iargs = 4;
|
|
|
|
nb_oargs = 2;
|
|
|
|
/* Test if the high part of the operation is dead, but not
|
|
|
|
the low part. The result can be optimized to a simple
|
|
|
|
add or sub. This happens often for x86_64 guest when the
|
|
|
|
cpu mode is set to 32 bit. */
|
2016-11-01 22:56:04 +01:00
|
|
|
if (arg_temp(op->args[1])->state == TS_DEAD) {
|
|
|
|
if (arg_temp(op->args[0])->state == TS_DEAD) {
|
2012-10-02 20:32:29 +02:00
|
|
|
goto do_remove;
|
|
|
|
}
|
2014-09-19 22:49:15 +02:00
|
|
|
/* Replace the opcode and adjust the args in place,
|
|
|
|
leaving 3 unused args at the end. */
|
|
|
|
op->opc = opc = opc_new;
|
2016-12-08 22:12:08 +01:00
|
|
|
op->args[1] = op->args[2];
|
|
|
|
op->args[2] = op->args[4];
|
2012-10-02 20:32:29 +02:00
|
|
|
/* Fall through and mark the single-word operation live. */
|
|
|
|
nb_iargs = 2;
|
|
|
|
nb_oargs = 1;
|
|
|
|
}
|
|
|
|
goto do_not_remove;
|
|
|
|
|
2012-10-02 20:32:30 +02:00
|
|
|
case INDEX_op_mulu2_i32:
|
2014-09-19 22:49:15 +02:00
|
|
|
opc_new = INDEX_op_mul_i32;
|
|
|
|
opc_new2 = INDEX_op_muluh_i32;
|
|
|
|
have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
|
2013-08-14 23:35:56 +02:00
|
|
|
goto do_mul2;
|
2013-02-20 08:52:02 +01:00
|
|
|
case INDEX_op_muls2_i32:
|
2014-09-19 22:49:15 +02:00
|
|
|
opc_new = INDEX_op_mul_i32;
|
|
|
|
opc_new2 = INDEX_op_mulsh_i32;
|
|
|
|
have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
|
2013-02-20 08:52:02 +01:00
|
|
|
goto do_mul2;
|
|
|
|
case INDEX_op_mulu2_i64:
|
2014-09-19 22:49:15 +02:00
|
|
|
opc_new = INDEX_op_mul_i64;
|
|
|
|
opc_new2 = INDEX_op_muluh_i64;
|
|
|
|
have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
|
2013-08-14 23:35:56 +02:00
|
|
|
goto do_mul2;
|
2013-02-20 08:52:02 +01:00
|
|
|
case INDEX_op_muls2_i64:
|
2014-09-19 22:49:15 +02:00
|
|
|
opc_new = INDEX_op_mul_i64;
|
|
|
|
opc_new2 = INDEX_op_mulsh_i64;
|
|
|
|
have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
|
2013-08-14 23:35:56 +02:00
|
|
|
goto do_mul2;
|
2013-02-20 08:52:02 +01:00
|
|
|
do_mul2:
|
2012-10-02 20:32:30 +02:00
|
|
|
nb_iargs = 2;
|
|
|
|
nb_oargs = 2;
|
2016-11-01 22:56:04 +01:00
|
|
|
if (arg_temp(op->args[1])->state == TS_DEAD) {
|
|
|
|
if (arg_temp(op->args[0])->state == TS_DEAD) {
|
2013-08-14 23:35:56 +02:00
|
|
|
/* Both parts of the operation are dead. */
|
2012-10-02 20:32:30 +02:00
|
|
|
goto do_remove;
|
|
|
|
}
|
2013-08-14 23:35:56 +02:00
|
|
|
/* The high part of the operation is dead; generate the low. */
|
2014-09-19 22:49:15 +02:00
|
|
|
op->opc = opc = opc_new;
|
2016-12-08 22:12:08 +01:00
|
|
|
op->args[1] = op->args[2];
|
|
|
|
op->args[2] = op->args[3];
|
2016-11-01 22:56:04 +01:00
|
|
|
} else if (arg_temp(op->args[0])->state == TS_DEAD && have_opc_new2) {
|
2014-09-19 22:49:15 +02:00
|
|
|
/* The low part of the operation is dead; generate the high. */
|
|
|
|
op->opc = opc = opc_new2;
|
2016-12-08 22:12:08 +01:00
|
|
|
op->args[0] = op->args[1];
|
|
|
|
op->args[1] = op->args[2];
|
|
|
|
op->args[2] = op->args[3];
|
2013-08-14 23:35:56 +02:00
|
|
|
} else {
|
|
|
|
goto do_not_remove;
|
2012-10-02 20:32:30 +02:00
|
|
|
}
|
2013-08-14 23:35:56 +02:00
|
|
|
/* Mark the single-word operation live. */
|
|
|
|
nb_oargs = 1;
|
2012-10-02 20:32:30 +02:00
|
|
|
goto do_not_remove;
|
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
default:
|
2012-10-02 20:32:29 +02:00
|
|
|
/* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
|
2008-12-07 19:15:45 +01:00
|
|
|
nb_iargs = def->nb_iargs;
|
|
|
|
nb_oargs = def->nb_oargs;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2008-12-07 19:15:45 +01:00
|
|
|
/* Test if the operation can be removed because all
|
|
|
|
its outputs are dead. We assume that nb_oargs == 0
|
|
|
|
implies side effects */
|
|
|
|
if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
|
2014-09-19 22:49:15 +02:00
|
|
|
for (i = 0; i < nb_oargs; i++) {
|
2016-11-01 22:56:04 +01:00
|
|
|
if (arg_temp(op->args[i])->state != TS_DEAD) {
|
2008-12-07 19:15:45 +01:00
|
|
|
goto do_not_remove;
|
2012-10-09 21:53:07 +02:00
|
|
|
}
|
2008-12-07 19:15:45 +01:00
|
|
|
}
|
2018-11-27 22:32:33 +01:00
|
|
|
goto do_remove;
|
|
|
|
}
|
|
|
|
goto do_not_remove;
|
2008-12-07 19:15:45 +01:00
|
|
|
|
2018-11-27 22:32:33 +01:00
|
|
|
do_remove:
|
|
|
|
tcg_op_remove(s, op);
|
|
|
|
break;
|
|
|
|
|
|
|
|
do_not_remove:
|
|
|
|
for (i = 0; i < nb_oargs; i++) {
|
2018-11-27 21:45:26 +01:00
|
|
|
ts = arg_temp(op->args[i]);
|
|
|
|
|
|
|
|
/* Remember the preference of the uses that followed. */
|
|
|
|
op->output_pref[i] = *la_temp_pref(ts);
|
|
|
|
|
|
|
|
/* Output args are dead. */
|
|
|
|
if (ts->state & TS_DEAD) {
|
2018-11-27 22:32:33 +01:00
|
|
|
arg_life |= DEAD_ARG << i;
|
2008-12-07 19:15:45 +01:00
|
|
|
}
|
2018-11-27 21:45:26 +01:00
|
|
|
if (ts->state & TS_MEM) {
|
2018-11-27 22:32:33 +01:00
|
|
|
arg_life |= SYNC_ARG << i;
|
|
|
|
}
|
2018-11-27 21:45:26 +01:00
|
|
|
ts->state = TS_DEAD;
|
|
|
|
la_reset_pref(ts);
|
2018-11-27 22:32:33 +01:00
|
|
|
}
|
2008-12-07 19:15:45 +01:00
|
|
|
|
2018-11-27 21:45:26 +01:00
|
|
|
/* If end of basic block, update. */
|
2018-11-27 22:45:08 +01:00
|
|
|
if (def->flags & TCG_OPF_BB_EXIT) {
|
|
|
|
la_func_end(s, nb_globals, nb_temps);
|
2020-10-08 22:21:43 +02:00
|
|
|
} else if (def->flags & TCG_OPF_COND_BRANCH) {
|
|
|
|
la_bb_sync(s, nb_globals, nb_temps);
|
2018-11-27 22:45:08 +01:00
|
|
|
} else if (def->flags & TCG_OPF_BB_END) {
|
2018-11-27 22:37:24 +01:00
|
|
|
la_bb_end(s, nb_globals, nb_temps);
|
2018-11-27 22:32:33 +01:00
|
|
|
} else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
|
2018-11-27 23:00:35 +01:00
|
|
|
la_global_sync(s, nb_globals);
|
2018-11-27 21:45:26 +01:00
|
|
|
if (def->flags & TCG_OPF_CALL_CLOBBER) {
|
|
|
|
la_cross_call(s, nb_temps);
|
|
|
|
}
|
2018-11-27 22:32:33 +01:00
|
|
|
}
|
|
|
|
|
2018-11-27 21:45:26 +01:00
|
|
|
/* Record arguments that die in this opcode. */
|
2018-11-27 22:32:33 +01:00
|
|
|
for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
|
2018-11-27 21:45:26 +01:00
|
|
|
ts = arg_temp(op->args[i]);
|
|
|
|
if (ts->state & TS_DEAD) {
|
2018-11-27 22:32:33 +01:00
|
|
|
arg_life |= DEAD_ARG << i;
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
2018-11-27 21:45:26 +01:00
|
|
|
|
|
|
|
/* Input arguments are live for preceding opcodes. */
|
2018-11-27 22:32:33 +01:00
|
|
|
for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
|
2018-11-27 21:45:26 +01:00
|
|
|
ts = arg_temp(op->args[i]);
|
|
|
|
if (ts->state & TS_DEAD) {
|
|
|
|
/* For operands that were dead, initially allow
|
|
|
|
all regs for the type. */
|
|
|
|
*la_temp_pref(ts) = tcg_target_available_regs[ts->type];
|
|
|
|
ts->state &= ~TS_DEAD;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Incorporate constraints for this operand. */
|
|
|
|
switch (opc) {
|
|
|
|
case INDEX_op_mov_i32:
|
|
|
|
case INDEX_op_mov_i64:
|
|
|
|
/* Note that these are TCG_OPF_NOT_PRESENT and do not
|
|
|
|
have proper constraints. That said, special case
|
|
|
|
moves to propagate preferences backward. */
|
|
|
|
if (IS_DEAD_ARG(1)) {
|
|
|
|
*la_temp_pref(arg_temp(op->args[0]))
|
|
|
|
= *la_temp_pref(arg_temp(op->args[1]));
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
|
|
|
|
const TCGArgConstraint *ct = &def->args_ct[i];
|
|
|
|
TCGRegSet set, *pset;
|
|
|
|
|
|
|
|
ts = arg_temp(op->args[i]);
|
|
|
|
pset = la_temp_pref(ts);
|
|
|
|
set = *pset;
|
|
|
|
|
2020-09-04 00:19:03 +02:00
|
|
|
set &= ct->regs;
|
2019-04-05 04:34:19 +02:00
|
|
|
if (ct->ialias) {
|
2018-11-27 21:45:26 +01:00
|
|
|
set &= op->output_pref[ct->alias_index];
|
|
|
|
}
|
|
|
|
/* If the combination is not possible, restart. */
|
|
|
|
if (set == 0) {
|
2020-09-04 00:19:03 +02:00
|
|
|
set = ct->regs;
|
2018-11-27 21:45:26 +01:00
|
|
|
}
|
|
|
|
*pset = set;
|
|
|
|
}
|
|
|
|
break;
|
2018-11-27 22:32:33 +01:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
break;
|
|
|
|
}
|
2016-06-23 05:43:29 +02:00
|
|
|
op->life = arg_life;
|
2012-11-12 10:27:48 +01:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2016-06-24 05:34:33 +02:00
|
|
|
/* Liveness analysis: Convert indirect regs to direct temporaries. */
|
2016-11-01 22:56:04 +01:00
|
|
|
static bool liveness_pass_2(TCGContext *s)
|
2016-06-24 05:34:33 +02:00
|
|
|
{
|
|
|
|
int nb_globals = s->nb_globals;
|
2017-11-02 15:19:14 +01:00
|
|
|
int nb_temps, i;
|
2016-06-24 05:34:33 +02:00
|
|
|
bool changes = false;
|
2017-11-02 15:19:14 +01:00
|
|
|
TCGOp *op, *op_next;
|
2016-06-24 05:34:33 +02:00
|
|
|
|
|
|
|
/* Create a temporary for each indirect global. */
|
|
|
|
for (i = 0; i < nb_globals; ++i) {
|
|
|
|
TCGTemp *its = &s->temps[i];
|
|
|
|
if (its->indirect_reg) {
|
|
|
|
TCGTemp *dts = tcg_temp_alloc(s);
|
|
|
|
dts->type = its->type;
|
|
|
|
dts->base_type = its->base_type;
|
2016-11-01 22:56:04 +01:00
|
|
|
its->state_ptr = dts;
|
|
|
|
} else {
|
|
|
|
its->state_ptr = NULL;
|
2016-06-24 05:34:33 +02:00
|
|
|
}
|
2016-11-01 22:56:04 +01:00
|
|
|
/* All globals begin dead. */
|
|
|
|
its->state = TS_DEAD;
|
|
|
|
}
|
|
|
|
for (nb_temps = s->nb_temps; i < nb_temps; ++i) {
|
|
|
|
TCGTemp *its = &s->temps[i];
|
|
|
|
its->state_ptr = NULL;
|
|
|
|
its->state = TS_DEAD;
|
2016-06-24 05:34:33 +02:00
|
|
|
}
|
|
|
|
|
2017-11-02 15:19:14 +01:00
|
|
|
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
|
2016-06-24 05:34:33 +02:00
|
|
|
TCGOpcode opc = op->opc;
|
|
|
|
const TCGOpDef *def = &tcg_op_defs[opc];
|
|
|
|
TCGLifeData arg_life = op->life;
|
|
|
|
int nb_iargs, nb_oargs, call_flags;
|
2016-11-01 22:56:04 +01:00
|
|
|
TCGTemp *arg_ts, *dir_ts;
|
2016-06-24 05:34:33 +02:00
|
|
|
|
|
|
|
if (opc == INDEX_op_call) {
|
2017-11-14 13:02:51 +01:00
|
|
|
nb_oargs = TCGOP_CALLO(op);
|
|
|
|
nb_iargs = TCGOP_CALLI(op);
|
2016-12-08 22:12:08 +01:00
|
|
|
call_flags = op->args[nb_oargs + nb_iargs + 1];
|
2016-06-24 05:34:33 +02:00
|
|
|
} else {
|
|
|
|
nb_iargs = def->nb_iargs;
|
|
|
|
nb_oargs = def->nb_oargs;
|
|
|
|
|
|
|
|
/* Set flags similar to how calls require. */
|
2020-10-08 22:21:43 +02:00
|
|
|
if (def->flags & TCG_OPF_COND_BRANCH) {
|
|
|
|
/* Like reading globals: sync_globals */
|
|
|
|
call_flags = TCG_CALL_NO_WRITE_GLOBALS;
|
|
|
|
} else if (def->flags & TCG_OPF_BB_END) {
|
2016-06-24 05:34:33 +02:00
|
|
|
/* Like writing globals: save_globals */
|
|
|
|
call_flags = 0;
|
|
|
|
} else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
|
|
|
|
/* Like reading globals: sync_globals */
|
|
|
|
call_flags = TCG_CALL_NO_WRITE_GLOBALS;
|
|
|
|
} else {
|
|
|
|
/* No effect on globals. */
|
|
|
|
call_flags = (TCG_CALL_NO_READ_GLOBALS |
|
|
|
|
TCG_CALL_NO_WRITE_GLOBALS);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make sure that input arguments are available. */
|
|
|
|
for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
|
2016-11-01 22:56:04 +01:00
|
|
|
arg_ts = arg_temp(op->args[i]);
|
|
|
|
if (arg_ts) {
|
|
|
|
dir_ts = arg_ts->state_ptr;
|
|
|
|
if (dir_ts && arg_ts->state == TS_DEAD) {
|
|
|
|
TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32
|
2016-06-24 05:34:33 +02:00
|
|
|
? INDEX_op_ld_i32
|
|
|
|
: INDEX_op_ld_i64);
|
2018-12-09 20:37:19 +01:00
|
|
|
TCGOp *lop = tcg_op_insert_before(s, op, lopc);
|
2016-06-24 05:34:33 +02:00
|
|
|
|
2016-11-01 22:56:04 +01:00
|
|
|
lop->args[0] = temp_arg(dir_ts);
|
|
|
|
lop->args[1] = temp_arg(arg_ts->mem_base);
|
|
|
|
lop->args[2] = arg_ts->mem_offset;
|
2016-06-24 05:34:33 +02:00
|
|
|
|
|
|
|
/* Loaded, but synced with memory. */
|
2016-11-01 22:56:04 +01:00
|
|
|
arg_ts->state = TS_MEM;
|
2016-06-24 05:34:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Perform input replacement, and mark inputs that became dead.
|
|
|
|
No action is required except keeping temp_state up to date
|
|
|
|
so that we reload when needed. */
|
|
|
|
for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
|
2016-11-01 22:56:04 +01:00
|
|
|
arg_ts = arg_temp(op->args[i]);
|
|
|
|
if (arg_ts) {
|
|
|
|
dir_ts = arg_ts->state_ptr;
|
|
|
|
if (dir_ts) {
|
|
|
|
op->args[i] = temp_arg(dir_ts);
|
2016-06-24 05:34:33 +02:00
|
|
|
changes = true;
|
|
|
|
if (IS_DEAD_ARG(i)) {
|
2016-11-01 22:56:04 +01:00
|
|
|
arg_ts->state = TS_DEAD;
|
2016-06-24 05:34:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Liveness analysis should ensure that the following are
|
|
|
|
all correct, for call sites and basic block end points. */
|
|
|
|
if (call_flags & TCG_CALL_NO_READ_GLOBALS) {
|
|
|
|
/* Nothing to do */
|
|
|
|
} else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) {
|
|
|
|
for (i = 0; i < nb_globals; ++i) {
|
|
|
|
/* Liveness should see that globals are synced back,
|
|
|
|
that is, either TS_DEAD or TS_MEM. */
|
2016-11-01 22:56:04 +01:00
|
|
|
arg_ts = &s->temps[i];
|
|
|
|
tcg_debug_assert(arg_ts->state_ptr == 0
|
|
|
|
|| arg_ts->state != 0);
|
2016-06-24 05:34:33 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (i = 0; i < nb_globals; ++i) {
|
|
|
|
/* Liveness should see that globals are saved back,
|
|
|
|
that is, TS_DEAD, waiting to be reloaded. */
|
2016-11-01 22:56:04 +01:00
|
|
|
arg_ts = &s->temps[i];
|
|
|
|
tcg_debug_assert(arg_ts->state_ptr == 0
|
|
|
|
|| arg_ts->state == TS_DEAD);
|
2016-06-24 05:34:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Outputs become available. */
|
2020-04-23 21:27:53 +02:00
|
|
|
if (opc == INDEX_op_mov_i32 || opc == INDEX_op_mov_i64) {
|
|
|
|
arg_ts = arg_temp(op->args[0]);
|
2016-11-01 22:56:04 +01:00
|
|
|
dir_ts = arg_ts->state_ptr;
|
2020-04-23 21:27:53 +02:00
|
|
|
if (dir_ts) {
|
|
|
|
op->args[0] = temp_arg(dir_ts);
|
|
|
|
changes = true;
|
|
|
|
|
|
|
|
/* The output is now live and modified. */
|
|
|
|
arg_ts->state = 0;
|
|
|
|
|
|
|
|
if (NEED_SYNC_ARG(0)) {
|
|
|
|
TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
|
|
|
|
? INDEX_op_st_i32
|
|
|
|
: INDEX_op_st_i64);
|
|
|
|
TCGOp *sop = tcg_op_insert_after(s, op, sopc);
|
|
|
|
TCGTemp *out_ts = dir_ts;
|
|
|
|
|
|
|
|
if (IS_DEAD_ARG(0)) {
|
|
|
|
out_ts = arg_temp(op->args[1]);
|
|
|
|
arg_ts->state = TS_DEAD;
|
|
|
|
tcg_op_remove(s, op);
|
|
|
|
} else {
|
|
|
|
arg_ts->state = TS_MEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
sop->args[0] = temp_arg(out_ts);
|
|
|
|
sop->args[1] = temp_arg(arg_ts->mem_base);
|
|
|
|
sop->args[2] = arg_ts->mem_offset;
|
|
|
|
} else {
|
|
|
|
tcg_debug_assert(!IS_DEAD_ARG(0));
|
|
|
|
}
|
2016-06-24 05:34:33 +02:00
|
|
|
}
|
2020-04-23 21:27:53 +02:00
|
|
|
} else {
|
|
|
|
for (i = 0; i < nb_oargs; i++) {
|
|
|
|
arg_ts = arg_temp(op->args[i]);
|
|
|
|
dir_ts = arg_ts->state_ptr;
|
|
|
|
if (!dir_ts) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
op->args[i] = temp_arg(dir_ts);
|
|
|
|
changes = true;
|
2016-06-24 05:34:33 +02:00
|
|
|
|
2020-04-23 21:27:53 +02:00
|
|
|
/* The output is now live and modified. */
|
|
|
|
arg_ts->state = 0;
|
2016-06-24 05:34:33 +02:00
|
|
|
|
2020-04-23 21:27:53 +02:00
|
|
|
/* Sync outputs upon their last write. */
|
|
|
|
if (NEED_SYNC_ARG(i)) {
|
|
|
|
TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
|
|
|
|
? INDEX_op_st_i32
|
|
|
|
: INDEX_op_st_i64);
|
|
|
|
TCGOp *sop = tcg_op_insert_after(s, op, sopc);
|
2016-06-24 05:34:33 +02:00
|
|
|
|
2020-04-23 21:27:53 +02:00
|
|
|
sop->args[0] = temp_arg(dir_ts);
|
|
|
|
sop->args[1] = temp_arg(arg_ts->mem_base);
|
|
|
|
sop->args[2] = arg_ts->mem_offset;
|
2016-06-24 05:34:33 +02:00
|
|
|
|
2020-04-23 21:27:53 +02:00
|
|
|
arg_ts->state = TS_MEM;
|
|
|
|
}
|
|
|
|
/* Drop outputs that are dead. */
|
|
|
|
if (IS_DEAD_ARG(i)) {
|
|
|
|
arg_ts->state = TS_DEAD;
|
|
|
|
}
|
2016-06-24 05:34:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return changes;
|
|
|
|
}
|
|
|
|
|
2016-04-21 10:48:50 +02:00
|
|
|
#ifdef CONFIG_DEBUG_TCG
|
2008-02-01 11:05:41 +01:00
|
|
|
static void dump_regs(TCGContext *s)
|
|
|
|
{
|
|
|
|
TCGTemp *ts;
|
|
|
|
int i;
|
|
|
|
char buf[64];
|
|
|
|
|
|
|
|
for(i = 0; i < s->nb_temps; i++) {
|
|
|
|
ts = &s->temps[i];
|
2017-06-20 08:18:10 +02:00
|
|
|
printf(" %10s: ", tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
|
2008-02-01 11:05:41 +01:00
|
|
|
switch(ts->val_type) {
|
|
|
|
case TEMP_VAL_REG:
|
|
|
|
printf("%s", tcg_target_reg_names[ts->reg]);
|
|
|
|
break;
|
|
|
|
case TEMP_VAL_MEM:
|
2013-09-18 23:12:53 +02:00
|
|
|
printf("%d(%s)", (int)ts->mem_offset,
|
|
|
|
tcg_target_reg_names[ts->mem_base->reg]);
|
2008-02-01 11:05:41 +01:00
|
|
|
break;
|
|
|
|
case TEMP_VAL_CONST:
|
|
|
|
printf("$0x%" TCG_PRIlx, ts->val);
|
|
|
|
break;
|
|
|
|
case TEMP_VAL_DEAD:
|
|
|
|
printf("D");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
printf("???");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
printf("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
|
2013-09-19 00:21:56 +02:00
|
|
|
if (s->reg_to_temp[i] != NULL) {
|
2008-02-01 11:05:41 +01:00
|
|
|
printf("%s: %s\n",
|
|
|
|
tcg_target_reg_names[i],
|
2013-09-19 00:21:56 +02:00
|
|
|
tcg_get_arg_str_ptr(s, buf, sizeof(buf), s->reg_to_temp[i]));
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void check_regs(TCGContext *s)
|
|
|
|
{
|
2016-02-09 19:20:16 +01:00
|
|
|
int reg;
|
2013-09-18 23:54:45 +02:00
|
|
|
int k;
|
2008-02-01 11:05:41 +01:00
|
|
|
TCGTemp *ts;
|
|
|
|
char buf[64];
|
|
|
|
|
2013-09-19 00:21:56 +02:00
|
|
|
for (reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
|
|
|
|
ts = s->reg_to_temp[reg];
|
|
|
|
if (ts != NULL) {
|
|
|
|
if (ts->val_type != TEMP_VAL_REG || ts->reg != reg) {
|
2008-02-01 11:05:41 +01:00
|
|
|
printf("Inconsistency for register %s:\n",
|
|
|
|
tcg_target_reg_names[reg]);
|
2008-05-10 12:52:05 +02:00
|
|
|
goto fail;
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-09-19 00:21:56 +02:00
|
|
|
for (k = 0; k < s->nb_temps; k++) {
|
2008-02-01 11:05:41 +01:00
|
|
|
ts = &s->temps[k];
|
2013-09-19 00:21:56 +02:00
|
|
|
if (ts->val_type == TEMP_VAL_REG && !ts->fixed_reg
|
|
|
|
&& s->reg_to_temp[ts->reg] != ts) {
|
|
|
|
printf("Inconsistency for temp %s:\n",
|
|
|
|
tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
|
2008-05-10 12:52:05 +02:00
|
|
|
fail:
|
2013-09-19 00:21:56 +02:00
|
|
|
printf("reg state:\n");
|
|
|
|
dump_regs(s);
|
|
|
|
tcg_abort();
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-11-09 15:25:09 +01:00
|
|
|
static void temp_allocate_frame(TCGContext *s, TCGTemp *ts)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2012-09-21 19:34:21 +02:00
|
|
|
#if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
|
|
|
|
/* Sparc64 stack is accessed with offset of 2047 */
|
2011-05-14 16:03:22 +02:00
|
|
|
s->current_frame_offset = (s->current_frame_offset +
|
|
|
|
(tcg_target_long)sizeof(tcg_target_long) - 1) &
|
|
|
|
~(sizeof(tcg_target_long) - 1);
|
2011-05-14 19:06:56 +02:00
|
|
|
#endif
|
2011-05-14 16:03:22 +02:00
|
|
|
if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
|
|
|
|
s->frame_end) {
|
2008-02-04 01:37:54 +01:00
|
|
|
tcg_abort();
|
2011-05-14 16:03:22 +02:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
ts->mem_offset = s->current_frame_offset;
|
2013-09-18 23:12:53 +02:00
|
|
|
ts->mem_base = s->frame_temp;
|
2008-02-01 11:05:41 +01:00
|
|
|
ts->mem_allocated = 1;
|
2013-08-21 00:12:31 +02:00
|
|
|
s->current_frame_offset += sizeof(tcg_target_long);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2018-11-27 16:48:06 +01:00
|
|
|
static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet, TCGRegSet);
|
2013-09-19 19:36:18 +02:00
|
|
|
|
2016-06-20 07:59:13 +02:00
|
|
|
/* Mark a temporary as free or dead. If 'free_or_dead' is negative,
|
|
|
|
mark it free; otherwise mark it dead. */
|
|
|
|
static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
|
2012-10-09 21:53:06 +02:00
|
|
|
{
|
2016-06-20 07:59:13 +02:00
|
|
|
if (ts->fixed_reg) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (ts->val_type == TEMP_VAL_REG) {
|
|
|
|
s->reg_to_temp[ts->reg] = NULL;
|
|
|
|
}
|
|
|
|
ts->val_type = (free_or_dead < 0
|
|
|
|
|| ts->temp_local
|
2016-11-02 18:20:15 +01:00
|
|
|
|| ts->temp_global
|
2016-06-20 07:59:13 +02:00
|
|
|
? TEMP_VAL_MEM : TEMP_VAL_DEAD);
|
|
|
|
}
|
2012-10-09 21:53:06 +02:00
|
|
|
|
2016-06-20 07:59:13 +02:00
|
|
|
/* Mark a temporary as dead. */
|
|
|
|
static inline void temp_dead(TCGContext *s, TCGTemp *ts)
|
|
|
|
{
|
|
|
|
temp_free_or_dead(s, ts, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
|
|
|
|
registers needs to be allocated to store a constant. If 'free_or_dead'
|
|
|
|
is non-zero, subsequently release the temporary; if it is positive, the
|
|
|
|
temp is dead; if it is negative, the temp is free. */
|
2018-11-28 00:35:04 +01:00
|
|
|
static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs,
|
|
|
|
TCGRegSet preferred_regs, int free_or_dead)
|
2016-06-20 07:59:13 +02:00
|
|
|
{
|
|
|
|
if (ts->fixed_reg) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!ts->mem_coherent) {
|
2012-10-09 21:53:06 +02:00
|
|
|
if (!ts->mem_allocated) {
|
2016-11-09 15:25:09 +01:00
|
|
|
temp_allocate_frame(s, ts);
|
2016-06-20 07:59:13 +02:00
|
|
|
}
|
|
|
|
switch (ts->val_type) {
|
|
|
|
case TEMP_VAL_CONST:
|
|
|
|
/* If we're going to free the temp immediately, then we won't
|
|
|
|
require it later in a register, so attempt to store the
|
|
|
|
constant to memory directly. */
|
|
|
|
if (free_or_dead
|
|
|
|
&& tcg_out_sti(s, ts->type, ts->val,
|
|
|
|
ts->mem_base->reg, ts->mem_offset)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
temp_load(s, ts, tcg_target_available_regs[ts->type],
|
2018-11-28 00:35:04 +01:00
|
|
|
allocated_regs, preferred_regs);
|
2016-06-20 07:59:13 +02:00
|
|
|
/* fallthrough */
|
|
|
|
|
|
|
|
case TEMP_VAL_REG:
|
|
|
|
tcg_out_st(s, ts->type, ts->reg,
|
|
|
|
ts->mem_base->reg, ts->mem_offset);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case TEMP_VAL_MEM:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case TEMP_VAL_DEAD:
|
|
|
|
default:
|
|
|
|
tcg_abort();
|
|
|
|
}
|
|
|
|
ts->mem_coherent = 1;
|
|
|
|
}
|
|
|
|
if (free_or_dead) {
|
|
|
|
temp_free_or_dead(s, ts, free_or_dead);
|
2012-10-09 21:53:06 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
/* free register 'reg' by spilling the corresponding temporary if necessary */
|
2013-09-19 19:36:18 +02:00
|
|
|
static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2013-09-19 00:21:56 +02:00
|
|
|
TCGTemp *ts = s->reg_to_temp[reg];
|
|
|
|
if (ts != NULL) {
|
2018-11-28 00:35:04 +01:00
|
|
|
temp_sync(s, ts, allocated_regs, 0, -1);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-27 16:16:21 +01:00
|
|
|
/**
|
|
|
|
* tcg_reg_alloc:
|
|
|
|
* @required_regs: Set of registers in which we must allocate.
|
|
|
|
* @allocated_regs: Set of registers which must be avoided.
|
|
|
|
* @preferred_regs: Set of registers we should prefer.
|
|
|
|
* @rev: True if we search the registers in "indirect" order.
|
|
|
|
*
|
|
|
|
* The allocated register must be in @required_regs & ~@allocated_regs,
|
|
|
|
* but if we can put it in @preferred_regs we may save a move later.
|
|
|
|
*/
|
|
|
|
static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet required_regs,
|
|
|
|
TCGRegSet allocated_regs,
|
|
|
|
TCGRegSet preferred_regs, bool rev)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2018-11-27 16:16:21 +01:00
|
|
|
int i, j, f, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
|
|
|
|
TCGRegSet reg_ct[2];
|
2015-08-19 08:23:08 +02:00
|
|
|
const int *order;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2018-11-27 16:16:21 +01:00
|
|
|
reg_ct[1] = required_regs & ~allocated_regs;
|
|
|
|
tcg_debug_assert(reg_ct[1] != 0);
|
|
|
|
reg_ct[0] = reg_ct[1] & preferred_regs;
|
|
|
|
|
|
|
|
/* Skip the preferred_regs option if it cannot be satisfied,
|
|
|
|
or if the preference made no difference. */
|
|
|
|
f = reg_ct[0] == 0 || reg_ct[0] == reg_ct[1];
|
|
|
|
|
2015-08-19 08:23:08 +02:00
|
|
|
order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2018-11-27 16:16:21 +01:00
|
|
|
/* Try free registers, preferences first. */
|
|
|
|
for (j = f; j < 2; j++) {
|
|
|
|
TCGRegSet set = reg_ct[j];
|
|
|
|
|
|
|
|
if (tcg_regset_single(set)) {
|
|
|
|
/* One register in the set. */
|
|
|
|
TCGReg reg = tcg_regset_first(set);
|
|
|
|
if (s->reg_to_temp[reg] == NULL) {
|
|
|
|
return reg;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
TCGReg reg = order[i];
|
|
|
|
if (s->reg_to_temp[reg] == NULL &&
|
|
|
|
tcg_regset_test_reg(set, reg)) {
|
|
|
|
return reg;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2018-11-27 16:16:21 +01:00
|
|
|
/* We must spill something. */
|
|
|
|
for (j = f; j < 2; j++) {
|
|
|
|
TCGRegSet set = reg_ct[j];
|
|
|
|
|
|
|
|
if (tcg_regset_single(set)) {
|
|
|
|
/* One register in the set. */
|
|
|
|
TCGReg reg = tcg_regset_first(set);
|
2013-09-19 19:36:18 +02:00
|
|
|
tcg_reg_free(s, reg, allocated_regs);
|
2008-02-01 11:05:41 +01:00
|
|
|
return reg;
|
2018-11-27 16:16:21 +01:00
|
|
|
} else {
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
TCGReg reg = order[i];
|
|
|
|
if (tcg_regset_test_reg(set, reg)) {
|
|
|
|
tcg_reg_free(s, reg, allocated_regs);
|
|
|
|
return reg;
|
|
|
|
}
|
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
tcg_abort();
|
|
|
|
}
|
|
|
|
|
2013-09-19 17:02:05 +02:00
|
|
|
/* Make sure the temporary is in a register. If needed, allocate the register
|
|
|
|
from DESIRED while avoiding ALLOCATED. */
|
|
|
|
static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
|
2018-11-27 16:48:06 +01:00
|
|
|
TCGRegSet allocated_regs, TCGRegSet preferred_regs)
|
2013-09-19 17:02:05 +02:00
|
|
|
{
|
|
|
|
TCGReg reg;
|
|
|
|
|
|
|
|
switch (ts->val_type) {
|
|
|
|
case TEMP_VAL_REG:
|
|
|
|
return;
|
|
|
|
case TEMP_VAL_CONST:
|
2018-11-27 16:16:21 +01:00
|
|
|
reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
|
2018-11-27 16:48:06 +01:00
|
|
|
preferred_regs, ts->indirect_base);
|
2013-09-19 17:02:05 +02:00
|
|
|
tcg_out_movi(s, ts->type, reg, ts->val);
|
|
|
|
ts->mem_coherent = 0;
|
|
|
|
break;
|
|
|
|
case TEMP_VAL_MEM:
|
2018-11-27 16:16:21 +01:00
|
|
|
reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
|
2018-11-27 16:48:06 +01:00
|
|
|
preferred_regs, ts->indirect_base);
|
2013-09-19 17:02:05 +02:00
|
|
|
tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
|
|
|
|
ts->mem_coherent = 1;
|
|
|
|
break;
|
|
|
|
case TEMP_VAL_DEAD:
|
|
|
|
default:
|
|
|
|
tcg_abort();
|
|
|
|
}
|
|
|
|
ts->reg = reg;
|
|
|
|
ts->val_type = TEMP_VAL_REG;
|
|
|
|
s->reg_to_temp[reg] = ts;
|
|
|
|
}
|
|
|
|
|
2016-06-20 07:59:13 +02:00
|
|
|
/* Save a temporary to memory. 'allocated_regs' is used in case a
|
|
|
|
temporary registers needs to be allocated to store a constant. */
|
|
|
|
static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
|
2012-10-09 21:53:06 +02:00
|
|
|
{
|
2016-06-24 05:34:33 +02:00
|
|
|
/* The liveness analysis already ensures that globals are back
|
|
|
|
in memory. Keep an tcg_debug_assert for safety. */
|
|
|
|
tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || ts->fixed_reg);
|
2012-10-09 21:53:06 +02:00
|
|
|
}
|
|
|
|
|
2011-11-22 11:06:22 +01:00
|
|
|
/* save globals to their canonical location and assume they can be
|
2008-05-23 19:33:39 +02:00
|
|
|
modified be the following code. 'allocated_regs' is used in case a
|
|
|
|
temporary registers needs to be allocated to store a constant. */
|
|
|
|
static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2016-11-02 18:21:44 +01:00
|
|
|
int i, n;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2016-11-02 18:21:44 +01:00
|
|
|
for (i = 0, n = s->nb_globals; i < n; i++) {
|
2013-09-19 00:35:32 +02:00
|
|
|
temp_save(s, &s->temps[i], allocated_regs);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2008-05-21 18:24:20 +02:00
|
|
|
}
|
|
|
|
|
2012-10-09 21:53:08 +02:00
|
|
|
/* sync globals to their canonical location and assume they can be
|
|
|
|
read by the following code. 'allocated_regs' is used in case a
|
|
|
|
temporary registers needs to be allocated to store a constant. */
|
|
|
|
static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
|
|
|
|
{
|
2016-11-02 18:21:44 +01:00
|
|
|
int i, n;
|
2012-10-09 21:53:08 +02:00
|
|
|
|
2016-11-02 18:21:44 +01:00
|
|
|
for (i = 0, n = s->nb_globals; i < n; i++) {
|
2013-09-19 00:33:00 +02:00
|
|
|
TCGTemp *ts = &s->temps[i];
|
2016-06-24 05:34:33 +02:00
|
|
|
tcg_debug_assert(ts->val_type != TEMP_VAL_REG
|
|
|
|
|| ts->fixed_reg
|
|
|
|
|| ts->mem_coherent);
|
2012-10-09 21:53:08 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-05-21 18:24:20 +02:00
|
|
|
/* at the end of a basic block, we assume all temporaries are dead and
|
2008-05-23 19:33:39 +02:00
|
|
|
all globals are stored at their canonical location. */
|
|
|
|
static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
|
2008-05-21 18:24:20 +02:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2013-09-19 00:35:32 +02:00
|
|
|
for (i = s->nb_globals; i < s->nb_temps; i++) {
|
|
|
|
TCGTemp *ts = &s->temps[i];
|
2008-05-25 19:24:00 +02:00
|
|
|
if (ts->temp_local) {
|
2013-09-19 00:35:32 +02:00
|
|
|
temp_save(s, ts, allocated_regs);
|
2008-05-25 19:24:00 +02:00
|
|
|
} else {
|
2016-06-24 05:34:33 +02:00
|
|
|
/* The liveness analysis already ensures that temps are dead.
|
|
|
|
Keep an tcg_debug_assert for safety. */
|
|
|
|
tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
2008-05-23 19:33:39 +02:00
|
|
|
|
|
|
|
save_globals(s, allocated_regs);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2020-10-08 22:21:43 +02:00
|
|
|
/*
|
|
|
|
* At a conditional branch, we assume all temporaries are dead and
|
|
|
|
* all globals and local temps are synced to their location.
|
|
|
|
*/
|
|
|
|
static void tcg_reg_alloc_cbranch(TCGContext *s, TCGRegSet allocated_regs)
|
|
|
|
{
|
|
|
|
sync_globals(s, allocated_regs);
|
|
|
|
|
|
|
|
for (int i = s->nb_globals; i < s->nb_temps; i++) {
|
|
|
|
TCGTemp *ts = &s->temps[i];
|
|
|
|
/*
|
|
|
|
* The liveness analysis already ensures that temps are dead.
|
|
|
|
* Keep tcg_debug_asserts for safety.
|
|
|
|
*/
|
|
|
|
if (ts->temp_local) {
|
|
|
|
tcg_debug_assert(ts->val_type != TEMP_VAL_REG || ts->mem_coherent);
|
|
|
|
} else {
|
|
|
|
tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-18 19:20:27 +01:00
|
|
|
/*
|
|
|
|
* Specialized code generation for INDEX_op_movi_*.
|
|
|
|
*/
|
2016-09-15 15:16:00 +02:00
|
|
|
static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots,
|
2018-11-28 00:39:21 +01:00
|
|
|
tcg_target_ulong val, TCGLifeData arg_life,
|
|
|
|
TCGRegSet preferred_regs)
|
2008-05-23 19:33:39 +02:00
|
|
|
{
|
2019-03-16 18:48:02 +01:00
|
|
|
/* ENV should not be modified. */
|
|
|
|
tcg_debug_assert(!ots->fixed_reg);
|
2016-06-20 07:59:13 +02:00
|
|
|
|
|
|
|
/* The movi is not explicitly generated here. */
|
|
|
|
if (ots->val_type == TEMP_VAL_REG) {
|
|
|
|
s->reg_to_temp[ots->reg] = NULL;
|
2012-10-09 21:53:07 +02:00
|
|
|
}
|
2016-06-20 07:59:13 +02:00
|
|
|
ots->val_type = TEMP_VAL_CONST;
|
|
|
|
ots->val = val;
|
|
|
|
ots->mem_coherent = 0;
|
|
|
|
if (NEED_SYNC_ARG(0)) {
|
2018-11-28 00:39:21 +01:00
|
|
|
temp_sync(s, ots, s->reserved_regs, preferred_regs, IS_DEAD_ARG(0));
|
2016-06-20 07:59:13 +02:00
|
|
|
} else if (IS_DEAD_ARG(0)) {
|
2013-09-19 00:29:18 +02:00
|
|
|
temp_dead(s, ots);
|
2012-10-09 21:53:07 +02:00
|
|
|
}
|
2008-05-23 19:33:39 +02:00
|
|
|
}
|
|
|
|
|
2016-12-08 22:42:08 +01:00
|
|
|
static void tcg_reg_alloc_movi(TCGContext *s, const TCGOp *op)
|
2016-09-15 15:16:00 +02:00
|
|
|
{
|
2017-06-20 08:18:10 +02:00
|
|
|
TCGTemp *ots = arg_temp(op->args[0]);
|
2016-12-08 22:42:08 +01:00
|
|
|
tcg_target_ulong val = op->args[1];
|
2016-09-15 15:16:00 +02:00
|
|
|
|
2018-11-27 16:44:51 +01:00
|
|
|
tcg_reg_alloc_do_movi(s, ots, val, op->life, op->output_pref[0]);
|
2016-09-15 15:16:00 +02:00
|
|
|
}
|
|
|
|
|
2019-03-18 19:20:27 +01:00
|
|
|
/*
|
|
|
|
* Specialized code generation for INDEX_op_mov_*.
|
|
|
|
*/
|
2016-12-08 22:42:08 +01:00
|
|
|
static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2016-12-08 22:42:08 +01:00
|
|
|
const TCGLifeData arg_life = op->life;
|
2018-11-27 16:44:51 +01:00
|
|
|
TCGRegSet allocated_regs, preferred_regs;
|
2008-02-01 11:05:41 +01:00
|
|
|
TCGTemp *ts, *ots;
|
2014-05-13 23:50:18 +02:00
|
|
|
TCGType otype, itype;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2017-09-11 20:58:44 +02:00
|
|
|
allocated_regs = s->reserved_regs;
|
2018-11-27 16:44:51 +01:00
|
|
|
preferred_regs = op->output_pref[0];
|
2017-06-20 08:18:10 +02:00
|
|
|
ots = arg_temp(op->args[0]);
|
|
|
|
ts = arg_temp(op->args[1]);
|
2014-05-13 23:50:18 +02:00
|
|
|
|
2019-03-16 18:48:02 +01:00
|
|
|
/* ENV should not be modified. */
|
|
|
|
tcg_debug_assert(!ots->fixed_reg);
|
|
|
|
|
2014-05-13 23:50:18 +02:00
|
|
|
/* Note that otype != itype for no-op truncation. */
|
|
|
|
otype = ots->type;
|
|
|
|
itype = ts->type;
|
2012-10-09 21:53:07 +02:00
|
|
|
|
2016-09-15 15:16:00 +02:00
|
|
|
if (ts->val_type == TEMP_VAL_CONST) {
|
|
|
|
/* propagate constant or generate sti */
|
|
|
|
tcg_target_ulong val = ts->val;
|
|
|
|
if (IS_DEAD_ARG(1)) {
|
|
|
|
temp_dead(s, ts);
|
|
|
|
}
|
2018-11-27 16:44:51 +01:00
|
|
|
tcg_reg_alloc_do_movi(s, ots, val, arg_life, preferred_regs);
|
2016-09-15 15:16:00 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the source value is in memory we're going to be forced
|
|
|
|
to have it in a register in order to perform the copy. Copy
|
|
|
|
the SOURCE value into its own register first, that way we
|
|
|
|
don't have to reload SOURCE the next time it is used. */
|
|
|
|
if (ts->val_type == TEMP_VAL_MEM) {
|
2018-11-27 16:44:51 +01:00
|
|
|
temp_load(s, ts, tcg_target_available_regs[itype],
|
|
|
|
allocated_regs, preferred_regs);
|
2012-10-09 21:53:07 +02:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2016-09-15 15:16:00 +02:00
|
|
|
tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
|
2019-03-16 18:48:02 +01:00
|
|
|
if (IS_DEAD_ARG(0)) {
|
2012-10-09 21:53:07 +02:00
|
|
|
/* mov to a non-saved dead register makes no sense (even with
|
|
|
|
liveness analysis disabled). */
|
2016-04-21 10:48:49 +02:00
|
|
|
tcg_debug_assert(NEED_SYNC_ARG(0));
|
2012-10-09 21:53:07 +02:00
|
|
|
if (!ots->mem_allocated) {
|
2016-11-09 15:25:09 +01:00
|
|
|
temp_allocate_frame(s, ots);
|
2012-10-09 21:53:07 +02:00
|
|
|
}
|
2013-09-18 23:12:53 +02:00
|
|
|
tcg_out_st(s, otype, ts->reg, ots->mem_base->reg, ots->mem_offset);
|
2012-10-09 21:53:07 +02:00
|
|
|
if (IS_DEAD_ARG(1)) {
|
2013-09-19 00:29:18 +02:00
|
|
|
temp_dead(s, ts);
|
2012-10-09 21:53:07 +02:00
|
|
|
}
|
2013-09-19 00:29:18 +02:00
|
|
|
temp_dead(s, ots);
|
2012-10-09 21:53:07 +02:00
|
|
|
} else {
|
2019-03-16 18:48:02 +01:00
|
|
|
if (IS_DEAD_ARG(1) && !ts->fixed_reg) {
|
2008-02-01 11:05:41 +01:00
|
|
|
/* the mov can be suppressed */
|
2012-10-09 21:53:07 +02:00
|
|
|
if (ots->val_type == TEMP_VAL_REG) {
|
2013-09-19 00:21:56 +02:00
|
|
|
s->reg_to_temp[ots->reg] = NULL;
|
2012-10-09 21:53:07 +02:00
|
|
|
}
|
|
|
|
ots->reg = ts->reg;
|
2013-09-19 00:29:18 +02:00
|
|
|
temp_dead(s, ts);
|
2008-02-01 11:05:41 +01:00
|
|
|
} else {
|
2012-10-09 21:53:07 +02:00
|
|
|
if (ots->val_type != TEMP_VAL_REG) {
|
|
|
|
/* When allocating a new register, make sure to not spill the
|
|
|
|
input one. */
|
|
|
|
tcg_regset_set_reg(allocated_regs, ts->reg);
|
2014-05-13 23:50:18 +02:00
|
|
|
ots->reg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
|
2018-11-27 16:44:51 +01:00
|
|
|
allocated_regs, preferred_regs,
|
2018-11-27 16:16:21 +01:00
|
|
|
ots->indirect_base);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2019-03-16 18:48:18 +01:00
|
|
|
if (!tcg_out_mov(s, otype, ots->reg, ts->reg)) {
|
2019-03-16 18:48:32 +01:00
|
|
|
/*
|
|
|
|
* Cross register class move not supported.
|
|
|
|
* Store the source register into the destination slot
|
|
|
|
* and leave the destination temp as TEMP_VAL_MEM.
|
|
|
|
*/
|
|
|
|
assert(!ots->fixed_reg);
|
|
|
|
if (!ts->mem_allocated) {
|
|
|
|
temp_allocate_frame(s, ots);
|
|
|
|
}
|
|
|
|
tcg_out_st(s, ts->type, ts->reg,
|
|
|
|
ots->mem_base->reg, ots->mem_offset);
|
|
|
|
ots->mem_coherent = 1;
|
|
|
|
temp_free_or_dead(s, ots, -1);
|
|
|
|
return;
|
2019-03-16 18:48:18 +01:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2012-10-09 21:53:07 +02:00
|
|
|
ots->val_type = TEMP_VAL_REG;
|
|
|
|
ots->mem_coherent = 0;
|
2013-09-19 00:21:56 +02:00
|
|
|
s->reg_to_temp[ots->reg] = ots;
|
2012-10-09 21:53:07 +02:00
|
|
|
if (NEED_SYNC_ARG(0)) {
|
2018-11-28 00:35:04 +01:00
|
|
|
temp_sync(s, ots, allocated_regs, 0, 0);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2012-10-09 21:53:07 +02:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2019-03-18 19:20:27 +01:00
|
|
|
/*
|
|
|
|
* Specialized code generation for INDEX_op_dup_vec.
|
|
|
|
*/
|
|
|
|
static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op)
|
|
|
|
{
|
|
|
|
const TCGLifeData arg_life = op->life;
|
|
|
|
TCGRegSet dup_out_regs, dup_in_regs;
|
|
|
|
TCGTemp *its, *ots;
|
|
|
|
TCGType itype, vtype;
|
2019-03-18 20:00:39 +01:00
|
|
|
intptr_t endian_fixup;
|
2019-03-18 19:20:27 +01:00
|
|
|
unsigned vece;
|
|
|
|
bool ok;
|
|
|
|
|
|
|
|
ots = arg_temp(op->args[0]);
|
|
|
|
its = arg_temp(op->args[1]);
|
|
|
|
|
|
|
|
/* ENV should not be modified. */
|
|
|
|
tcg_debug_assert(!ots->fixed_reg);
|
|
|
|
|
|
|
|
itype = its->type;
|
|
|
|
vece = TCGOP_VECE(op);
|
|
|
|
vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
|
|
|
|
|
|
|
|
if (its->val_type == TEMP_VAL_CONST) {
|
|
|
|
/* Propagate constant via movi -> dupi. */
|
|
|
|
tcg_target_ulong val = its->val;
|
|
|
|
if (IS_DEAD_ARG(1)) {
|
|
|
|
temp_dead(s, its);
|
|
|
|
}
|
|
|
|
tcg_reg_alloc_do_movi(s, ots, val, arg_life, op->output_pref[0]);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-09-04 00:19:03 +02:00
|
|
|
dup_out_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
|
|
|
|
dup_in_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[1].regs;
|
2019-03-18 19:20:27 +01:00
|
|
|
|
|
|
|
/* Allocate the output register now. */
|
|
|
|
if (ots->val_type != TEMP_VAL_REG) {
|
|
|
|
TCGRegSet allocated_regs = s->reserved_regs;
|
|
|
|
|
|
|
|
if (!IS_DEAD_ARG(1) && its->val_type == TEMP_VAL_REG) {
|
|
|
|
/* Make sure to not spill the input register. */
|
|
|
|
tcg_regset_set_reg(allocated_regs, its->reg);
|
|
|
|
}
|
|
|
|
ots->reg = tcg_reg_alloc(s, dup_out_regs, allocated_regs,
|
|
|
|
op->output_pref[0], ots->indirect_base);
|
|
|
|
ots->val_type = TEMP_VAL_REG;
|
|
|
|
ots->mem_coherent = 0;
|
|
|
|
s->reg_to_temp[ots->reg] = ots;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (its->val_type) {
|
|
|
|
case TEMP_VAL_REG:
|
|
|
|
/*
|
|
|
|
* The dup constriaints must be broad, covering all possible VECE.
|
|
|
|
* However, tcg_op_dup_vec() gets to see the VECE and we allow it
|
|
|
|
* to fail, indicating that extra moves are required for that case.
|
|
|
|
*/
|
|
|
|
if (tcg_regset_test_reg(dup_in_regs, its->reg)) {
|
|
|
|
if (tcg_out_dup_vec(s, vtype, vece, ots->reg, its->reg)) {
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
/* Try again from memory or a vector input register. */
|
|
|
|
}
|
|
|
|
if (!its->mem_coherent) {
|
|
|
|
/*
|
|
|
|
* The input register is not synced, and so an extra store
|
|
|
|
* would be required to use memory. Attempt an integer-vector
|
|
|
|
* register move first. We do not have a TCGRegSet for this.
|
|
|
|
*/
|
|
|
|
if (tcg_out_mov(s, itype, ots->reg, its->reg)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Sync the temp back to its slot and load from there. */
|
|
|
|
temp_sync(s, its, s->reserved_regs, 0, 0);
|
|
|
|
}
|
|
|
|
/* fall through */
|
|
|
|
|
|
|
|
case TEMP_VAL_MEM:
|
2019-03-18 20:00:39 +01:00
|
|
|
#ifdef HOST_WORDS_BIGENDIAN
|
|
|
|
endian_fixup = itype == TCG_TYPE_I32 ? 4 : 8;
|
|
|
|
endian_fixup -= 1 << vece;
|
|
|
|
#else
|
|
|
|
endian_fixup = 0;
|
|
|
|
#endif
|
|
|
|
if (tcg_out_dupm_vec(s, vtype, vece, ots->reg, its->mem_base->reg,
|
|
|
|
its->mem_offset + endian_fixup)) {
|
|
|
|
goto done;
|
|
|
|
}
|
2019-03-18 19:20:27 +01:00
|
|
|
tcg_out_ld(s, itype, ots->reg, its->mem_base->reg, its->mem_offset);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We now have a vector input register, so dup must succeed. */
|
|
|
|
ok = tcg_out_dup_vec(s, vtype, vece, ots->reg, ots->reg);
|
|
|
|
tcg_debug_assert(ok);
|
|
|
|
|
|
|
|
done:
|
|
|
|
if (IS_DEAD_ARG(1)) {
|
|
|
|
temp_dead(s, its);
|
|
|
|
}
|
|
|
|
if (NEED_SYNC_ARG(0)) {
|
|
|
|
temp_sync(s, ots, s->reserved_regs, 0, 0);
|
|
|
|
}
|
|
|
|
if (IS_DEAD_ARG(0)) {
|
|
|
|
temp_dead(s, ots);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-08 22:42:08 +01:00
|
|
|
static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2016-12-08 22:42:08 +01:00
|
|
|
const TCGLifeData arg_life = op->life;
|
|
|
|
const TCGOpDef * const def = &tcg_op_defs[op->opc];
|
2016-11-18 08:35:03 +01:00
|
|
|
TCGRegSet i_allocated_regs;
|
|
|
|
TCGRegSet o_allocated_regs;
|
2013-09-18 23:54:45 +02:00
|
|
|
int i, k, nb_iargs, nb_oargs;
|
|
|
|
TCGReg reg;
|
2008-02-01 11:05:41 +01:00
|
|
|
TCGArg arg;
|
|
|
|
const TCGArgConstraint *arg_ct;
|
|
|
|
TCGTemp *ts;
|
|
|
|
TCGArg new_args[TCG_MAX_OP_ARGS];
|
|
|
|
int const_args[TCG_MAX_OP_ARGS];
|
|
|
|
|
|
|
|
nb_oargs = def->nb_oargs;
|
|
|
|
nb_iargs = def->nb_iargs;
|
|
|
|
|
|
|
|
/* copy constants */
|
|
|
|
memcpy(new_args + nb_oargs + nb_iargs,
|
2016-12-08 22:42:08 +01:00
|
|
|
op->args + nb_oargs + nb_iargs,
|
2008-02-01 11:05:41 +01:00
|
|
|
sizeof(TCGArg) * def->nb_cargs);
|
|
|
|
|
2017-09-11 20:58:44 +02:00
|
|
|
i_allocated_regs = s->reserved_regs;
|
|
|
|
o_allocated_regs = s->reserved_regs;
|
2016-11-18 08:35:03 +01:00
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
/* satisfy input constraints */
|
2016-12-08 22:42:08 +01:00
|
|
|
for (k = 0; k < nb_iargs; k++) {
|
2018-11-28 05:21:31 +01:00
|
|
|
TCGRegSet i_preferred_regs, o_preferred_regs;
|
|
|
|
|
2019-04-04 04:37:38 +02:00
|
|
|
i = def->args_ct[nb_oargs + k].sort_index;
|
2016-12-08 22:42:08 +01:00
|
|
|
arg = op->args[i];
|
2008-02-01 11:05:41 +01:00
|
|
|
arg_ct = &def->args_ct[i];
|
2017-06-20 08:18:10 +02:00
|
|
|
ts = arg_temp(arg);
|
2013-09-19 17:02:05 +02:00
|
|
|
|
|
|
|
if (ts->val_type == TEMP_VAL_CONST
|
|
|
|
&& tcg_target_const_match(ts->val, ts->type, arg_ct)) {
|
|
|
|
/* constant is OK for instruction */
|
|
|
|
const_args[i] = 1;
|
|
|
|
new_args[i] = ts->val;
|
2018-11-28 05:21:31 +01:00
|
|
|
continue;
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2013-09-19 17:02:05 +02:00
|
|
|
|
2018-11-28 05:21:31 +01:00
|
|
|
i_preferred_regs = o_preferred_regs = 0;
|
2019-04-05 04:34:19 +02:00
|
|
|
if (arg_ct->ialias) {
|
2018-11-28 05:21:31 +01:00
|
|
|
o_preferred_regs = op->output_pref[arg_ct->alias_index];
|
2008-02-04 01:37:54 +01:00
|
|
|
if (ts->fixed_reg) {
|
|
|
|
/* if fixed register, we must allocate a new register
|
|
|
|
if the alias is not the same register */
|
2018-11-28 05:21:31 +01:00
|
|
|
if (arg != op->args[arg_ct->alias_index]) {
|
2008-02-04 01:37:54 +01:00
|
|
|
goto allocate_in_reg;
|
2018-11-28 05:21:31 +01:00
|
|
|
}
|
2008-02-04 01:37:54 +01:00
|
|
|
} else {
|
|
|
|
/* if the input is aliased to an output and if it is
|
|
|
|
not dead after the instruction, we must allocate
|
|
|
|
a new register and move it */
|
2011-05-17 18:25:45 +02:00
|
|
|
if (!IS_DEAD_ARG(i)) {
|
2008-02-04 01:37:54 +01:00
|
|
|
goto allocate_in_reg;
|
2011-05-17 18:25:45 +02:00
|
|
|
}
|
2018-11-28 05:21:31 +01:00
|
|
|
|
tcg: fix register allocation with two aliased dead inputs
For TCG ops with two outputs registers (add2, sub2, div2, div2u), when
the same input temp is used for the two inputs aliased to the two
outputs, and when these inputs are both dead, the register allocation
code wrongly assigned the same register to the same output.
This happens for example with sub2 t1, t2, t3, t3, t4, t5, when t3 is
not used anymore after the TCG op. In that case the same register is
used for t1, t2 and t3.
The fix is to look for already allocated aliased input when allocating
a dead aliased input and check that the register is not already
used.
Cc: Richard Henderson <rth@twiddle.net>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
Message-Id: <1433447228-29425-2-git-send-email-aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2015-06-04 21:47:07 +02:00
|
|
|
/* check if the current register has already been allocated
|
|
|
|
for another input aliased to an output */
|
2018-11-28 05:21:31 +01:00
|
|
|
if (ts->val_type == TEMP_VAL_REG) {
|
|
|
|
int k2, i2;
|
|
|
|
reg = ts->reg;
|
|
|
|
for (k2 = 0 ; k2 < k ; k2++) {
|
2019-04-04 04:37:38 +02:00
|
|
|
i2 = def->args_ct[nb_oargs + k2].sort_index;
|
2019-04-05 04:34:19 +02:00
|
|
|
if (def->args_ct[i2].ialias && reg == new_args[i2]) {
|
2018-11-28 05:21:31 +01:00
|
|
|
goto allocate_in_reg;
|
|
|
|
}
|
tcg: fix register allocation with two aliased dead inputs
For TCG ops with two outputs registers (add2, sub2, div2, div2u), when
the same input temp is used for the two inputs aliased to the two
outputs, and when these inputs are both dead, the register allocation
code wrongly assigned the same register to the same output.
This happens for example with sub2 t1, t2, t3, t3, t4, t5, when t3 is
not used anymore after the TCG op. In that case the same register is
used for t1, t2 and t3.
The fix is to look for already allocated aliased input when allocating
a dead aliased input and check that the register is not already
used.
Cc: Richard Henderson <rth@twiddle.net>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
Message-Id: <1433447228-29425-2-git-send-email-aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2015-06-04 21:47:07 +02:00
|
|
|
}
|
|
|
|
}
|
2018-11-28 05:21:31 +01:00
|
|
|
i_preferred_regs = o_preferred_regs;
|
2008-02-04 01:37:54 +01:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2018-11-28 05:21:31 +01:00
|
|
|
|
2020-09-04 00:19:03 +02:00
|
|
|
temp_load(s, ts, arg_ct->regs, i_allocated_regs, i_preferred_regs);
|
2008-02-01 11:05:41 +01:00
|
|
|
reg = ts->reg;
|
2018-11-28 05:21:31 +01:00
|
|
|
|
2020-09-04 00:19:03 +02:00
|
|
|
if (tcg_regset_test_reg(arg_ct->regs, reg)) {
|
2008-02-01 11:05:41 +01:00
|
|
|
/* nothing to do : the constraint is satisfied */
|
|
|
|
} else {
|
|
|
|
allocate_in_reg:
|
|
|
|
/* allocate a new register matching the constraint
|
|
|
|
and move the temporary register into it */
|
2018-11-28 05:21:31 +01:00
|
|
|
temp_load(s, ts, tcg_target_available_regs[ts->type],
|
|
|
|
i_allocated_regs, 0);
|
2020-09-04 00:19:03 +02:00
|
|
|
reg = tcg_reg_alloc(s, arg_ct->regs, i_allocated_regs,
|
2018-11-28 05:21:31 +01:00
|
|
|
o_preferred_regs, ts->indirect_base);
|
2019-03-16 18:48:18 +01:00
|
|
|
if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
|
2019-03-16 18:48:32 +01:00
|
|
|
/*
|
|
|
|
* Cross register class move not supported. Sync the
|
|
|
|
* temp back to its slot and load from there.
|
|
|
|
*/
|
|
|
|
temp_sync(s, ts, i_allocated_regs, 0, 0);
|
|
|
|
tcg_out_ld(s, ts->type, reg,
|
|
|
|
ts->mem_base->reg, ts->mem_offset);
|
2019-03-16 18:48:18 +01:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
new_args[i] = reg;
|
|
|
|
const_args[i] = 0;
|
2016-11-18 08:35:03 +01:00
|
|
|
tcg_regset_set_reg(i_allocated_regs, reg);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2012-10-09 21:53:07 +02:00
|
|
|
/* mark dead temporaries and free the associated registers */
|
|
|
|
for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
|
|
|
|
if (IS_DEAD_ARG(i)) {
|
2017-06-20 08:18:10 +02:00
|
|
|
temp_dead(s, arg_temp(op->args[i]));
|
2012-10-09 21:53:07 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-08 22:21:43 +02:00
|
|
|
if (def->flags & TCG_OPF_COND_BRANCH) {
|
|
|
|
tcg_reg_alloc_cbranch(s, i_allocated_regs);
|
|
|
|
} else if (def->flags & TCG_OPF_BB_END) {
|
2016-11-18 08:35:03 +01:00
|
|
|
tcg_reg_alloc_bb_end(s, i_allocated_regs);
|
2008-05-23 19:33:39 +02:00
|
|
|
} else {
|
|
|
|
if (def->flags & TCG_OPF_CALL_CLOBBER) {
|
|
|
|
/* XXX: permit generic clobber register list ? */
|
2016-02-09 00:43:42 +01:00
|
|
|
for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
|
|
|
|
if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
|
2016-11-18 08:35:03 +01:00
|
|
|
tcg_reg_free(s, i, i_allocated_regs);
|
2008-05-23 19:33:39 +02:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2012-10-09 21:53:08 +02:00
|
|
|
}
|
|
|
|
if (def->flags & TCG_OPF_SIDE_EFFECTS) {
|
|
|
|
/* sync globals if the op has side effects and might trigger
|
|
|
|
an exception. */
|
2016-11-18 08:35:03 +01:00
|
|
|
sync_globals(s, i_allocated_regs);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2008-05-23 19:33:39 +02:00
|
|
|
|
|
|
|
/* satisfy the output constraints */
|
|
|
|
for(k = 0; k < nb_oargs; k++) {
|
2019-04-04 04:37:38 +02:00
|
|
|
i = def->args_ct[k].sort_index;
|
2016-12-08 22:42:08 +01:00
|
|
|
arg = op->args[i];
|
2008-05-23 19:33:39 +02:00
|
|
|
arg_ct = &def->args_ct[i];
|
2017-06-20 08:18:10 +02:00
|
|
|
ts = arg_temp(arg);
|
2019-03-16 18:48:02 +01:00
|
|
|
|
|
|
|
/* ENV should not be modified. */
|
|
|
|
tcg_debug_assert(!ts->fixed_reg);
|
|
|
|
|
2019-04-05 04:34:19 +02:00
|
|
|
if (arg_ct->oalias && !const_args[arg_ct->alias_index]) {
|
2008-05-23 19:33:39 +02:00
|
|
|
reg = new_args[arg_ct->alias_index];
|
2019-04-05 04:34:19 +02:00
|
|
|
} else if (arg_ct->newreg) {
|
2020-09-04 00:19:03 +02:00
|
|
|
reg = tcg_reg_alloc(s, arg_ct->regs,
|
2016-11-18 08:35:03 +01:00
|
|
|
i_allocated_regs | o_allocated_regs,
|
2018-11-27 16:44:51 +01:00
|
|
|
op->output_pref[k], ts->indirect_base);
|
2008-05-23 19:33:39 +02:00
|
|
|
} else {
|
2020-09-04 00:19:03 +02:00
|
|
|
reg = tcg_reg_alloc(s, arg_ct->regs, o_allocated_regs,
|
2018-11-27 16:44:51 +01:00
|
|
|
op->output_pref[k], ts->indirect_base);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2016-11-18 08:35:03 +01:00
|
|
|
tcg_regset_set_reg(o_allocated_regs, reg);
|
2019-03-16 18:48:02 +01:00
|
|
|
if (ts->val_type == TEMP_VAL_REG) {
|
|
|
|
s->reg_to_temp[ts->reg] = NULL;
|
2008-05-23 19:33:39 +02:00
|
|
|
}
|
2019-03-16 18:48:02 +01:00
|
|
|
ts->val_type = TEMP_VAL_REG;
|
|
|
|
ts->reg = reg;
|
|
|
|
/*
|
|
|
|
* Temp value is modified, so the value kept in memory is
|
|
|
|
* potentially not the same.
|
|
|
|
*/
|
|
|
|
ts->mem_coherent = 0;
|
|
|
|
s->reg_to_temp[reg] = ts;
|
2008-05-23 19:33:39 +02:00
|
|
|
new_args[i] = reg;
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* emit instruction */
|
2017-09-14 22:53:46 +02:00
|
|
|
if (def->flags & TCG_OPF_VECTOR) {
|
|
|
|
tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
|
|
|
|
new_args, const_args);
|
|
|
|
} else {
|
|
|
|
tcg_out_op(s, op->opc, new_args, const_args);
|
|
|
|
}
|
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
/* move the outputs in the correct register if needed */
|
|
|
|
for(i = 0; i < nb_oargs; i++) {
|
2017-06-20 08:18:10 +02:00
|
|
|
ts = arg_temp(op->args[i]);
|
2019-03-16 18:48:02 +01:00
|
|
|
|
|
|
|
/* ENV should not be modified. */
|
|
|
|
tcg_debug_assert(!ts->fixed_reg);
|
|
|
|
|
2012-10-09 21:53:07 +02:00
|
|
|
if (NEED_SYNC_ARG(i)) {
|
2018-11-28 00:35:04 +01:00
|
|
|
temp_sync(s, ts, o_allocated_regs, 0, IS_DEAD_ARG(i));
|
2016-06-20 07:59:13 +02:00
|
|
|
} else if (IS_DEAD_ARG(i)) {
|
2013-09-19 00:29:18 +02:00
|
|
|
temp_dead(s, ts);
|
2012-10-09 21:53:07 +02:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-05-10 12:52:05 +02:00
|
|
|
#ifdef TCG_TARGET_STACK_GROWSUP
|
|
|
|
#define STACK_DIR(x) (-(x))
|
|
|
|
#else
|
|
|
|
#define STACK_DIR(x) (x)
|
|
|
|
#endif
|
|
|
|
|
2016-12-08 22:42:08 +01:00
|
|
|
static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2017-11-14 13:02:51 +01:00
|
|
|
const int nb_oargs = TCGOP_CALLO(op);
|
|
|
|
const int nb_iargs = TCGOP_CALLI(op);
|
2016-12-08 22:42:08 +01:00
|
|
|
const TCGLifeData arg_life = op->life;
|
2013-09-18 23:54:45 +02:00
|
|
|
int flags, nb_regs, i;
|
|
|
|
TCGReg reg;
|
2014-03-23 04:06:52 +01:00
|
|
|
TCGArg arg;
|
2008-02-01 11:05:41 +01:00
|
|
|
TCGTemp *ts;
|
2013-08-21 02:12:38 +02:00
|
|
|
intptr_t stack_offset;
|
|
|
|
size_t call_stack_size;
|
2014-03-23 04:06:52 +01:00
|
|
|
tcg_insn_unit *func_addr;
|
|
|
|
int allocate_args;
|
2008-02-01 11:05:41 +01:00
|
|
|
TCGRegSet allocated_regs;
|
|
|
|
|
2016-12-08 22:42:08 +01:00
|
|
|
func_addr = (tcg_insn_unit *)(intptr_t)op->args[nb_oargs + nb_iargs];
|
|
|
|
flags = op->args[nb_oargs + nb_iargs + 1];
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2012-09-13 19:37:46 +02:00
|
|
|
nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
|
2014-09-19 22:49:15 +02:00
|
|
|
if (nb_regs > nb_iargs) {
|
|
|
|
nb_regs = nb_iargs;
|
2014-03-23 04:06:52 +01:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
|
|
|
|
/* assign stack slots first */
|
2014-09-19 22:49:15 +02:00
|
|
|
call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long);
|
2008-02-01 11:05:41 +01:00
|
|
|
call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
|
|
|
|
~(TCG_TARGET_STACK_ALIGN - 1);
|
2008-05-10 12:52:05 +02:00
|
|
|
allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
|
|
|
|
if (allocate_args) {
|
2011-05-28 09:13:05 +02:00
|
|
|
/* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
|
|
|
|
preallocate call stack */
|
|
|
|
tcg_abort();
|
2008-05-10 12:52:05 +02:00
|
|
|
}
|
2008-05-22 16:59:57 +02:00
|
|
|
|
|
|
|
stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
|
2016-12-08 22:42:08 +01:00
|
|
|
for (i = nb_regs; i < nb_iargs; i++) {
|
|
|
|
arg = op->args[nb_oargs + i];
|
2008-05-22 16:59:57 +02:00
|
|
|
#ifdef TCG_TARGET_STACK_GROWSUP
|
|
|
|
stack_offset -= sizeof(tcg_target_long);
|
|
|
|
#endif
|
|
|
|
if (arg != TCG_CALL_DUMMY_ARG) {
|
2017-06-20 08:18:10 +02:00
|
|
|
ts = arg_temp(arg);
|
2013-09-19 17:02:05 +02:00
|
|
|
temp_load(s, ts, tcg_target_available_regs[ts->type],
|
2018-11-27 16:48:06 +01:00
|
|
|
s->reserved_regs, 0);
|
2013-09-19 17:02:05 +02:00
|
|
|
tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2008-05-22 16:59:57 +02:00
|
|
|
#ifndef TCG_TARGET_STACK_GROWSUP
|
|
|
|
stack_offset += sizeof(tcg_target_long);
|
|
|
|
#endif
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* assign input registers */
|
2017-09-11 20:58:44 +02:00
|
|
|
allocated_regs = s->reserved_regs;
|
2016-12-08 22:42:08 +01:00
|
|
|
for (i = 0; i < nb_regs; i++) {
|
|
|
|
arg = op->args[nb_oargs + i];
|
2008-05-22 16:59:57 +02:00
|
|
|
if (arg != TCG_CALL_DUMMY_ARG) {
|
2017-06-20 08:18:10 +02:00
|
|
|
ts = arg_temp(arg);
|
2008-05-22 16:59:57 +02:00
|
|
|
reg = tcg_target_call_iarg_regs[i];
|
2013-09-19 17:02:05 +02:00
|
|
|
|
2008-05-22 16:59:57 +02:00
|
|
|
if (ts->val_type == TEMP_VAL_REG) {
|
|
|
|
if (ts->reg != reg) {
|
2018-12-11 17:25:02 +01:00
|
|
|
tcg_reg_free(s, reg, allocated_regs);
|
2019-03-16 18:48:18 +01:00
|
|
|
if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
|
2019-03-16 18:48:32 +01:00
|
|
|
/*
|
|
|
|
* Cross register class move not supported. Sync the
|
|
|
|
* temp back to its slot and load from there.
|
|
|
|
*/
|
|
|
|
temp_sync(s, ts, allocated_regs, 0, 0);
|
|
|
|
tcg_out_ld(s, ts->type, reg,
|
|
|
|
ts->mem_base->reg, ts->mem_offset);
|
2019-03-16 18:48:18 +01:00
|
|
|
}
|
2008-05-22 16:59:57 +02:00
|
|
|
}
|
|
|
|
} else {
|
2017-09-11 20:25:55 +02:00
|
|
|
TCGRegSet arg_set = 0;
|
2013-09-19 17:02:05 +02:00
|
|
|
|
2018-12-11 17:25:02 +01:00
|
|
|
tcg_reg_free(s, reg, allocated_regs);
|
2013-09-19 17:02:05 +02:00
|
|
|
tcg_regset_set_reg(arg_set, reg);
|
2018-11-27 16:48:06 +01:00
|
|
|
temp_load(s, ts, arg_set, allocated_regs, 0);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2013-09-19 17:02:05 +02:00
|
|
|
|
2008-05-22 16:59:57 +02:00
|
|
|
tcg_regset_set_reg(allocated_regs, reg);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* mark dead temporaries and free the associated registers */
|
2016-12-08 22:42:08 +01:00
|
|
|
for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
|
2011-05-17 18:25:45 +02:00
|
|
|
if (IS_DEAD_ARG(i)) {
|
2017-06-20 08:18:10 +02:00
|
|
|
temp_dead(s, arg_temp(op->args[i]));
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* clobber call registers */
|
2016-02-09 00:43:42 +01:00
|
|
|
for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
|
|
|
|
if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
|
2013-09-19 19:36:18 +02:00
|
|
|
tcg_reg_free(s, i, allocated_regs);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
2012-10-09 21:53:08 +02:00
|
|
|
|
|
|
|
/* Save globals if they might be written by the helper, sync them if
|
|
|
|
they might be read. */
|
|
|
|
if (flags & TCG_CALL_NO_READ_GLOBALS) {
|
|
|
|
/* Nothing to do */
|
|
|
|
} else if (flags & TCG_CALL_NO_WRITE_GLOBALS) {
|
|
|
|
sync_globals(s, allocated_regs);
|
|
|
|
} else {
|
2009-04-06 14:33:59 +02:00
|
|
|
save_globals(s, allocated_regs);
|
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2014-03-23 04:06:52 +01:00
|
|
|
tcg_out_call(s, func_addr);
|
2008-02-01 11:05:41 +01:00
|
|
|
|
|
|
|
/* assign output registers and emit moves if needed */
|
|
|
|
for(i = 0; i < nb_oargs; i++) {
|
2016-12-08 22:42:08 +01:00
|
|
|
arg = op->args[i];
|
2017-06-20 08:18:10 +02:00
|
|
|
ts = arg_temp(arg);
|
2019-03-16 18:48:02 +01:00
|
|
|
|
|
|
|
/* ENV should not be modified. */
|
|
|
|
tcg_debug_assert(!ts->fixed_reg);
|
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
reg = tcg_target_call_oarg_regs[i];
|
2016-04-21 10:48:49 +02:00
|
|
|
tcg_debug_assert(s->reg_to_temp[reg] == NULL);
|
2019-03-16 18:48:02 +01:00
|
|
|
if (ts->val_type == TEMP_VAL_REG) {
|
|
|
|
s->reg_to_temp[ts->reg] = NULL;
|
|
|
|
}
|
|
|
|
ts->val_type = TEMP_VAL_REG;
|
|
|
|
ts->reg = reg;
|
|
|
|
ts->mem_coherent = 0;
|
|
|
|
s->reg_to_temp[reg] = ts;
|
|
|
|
if (NEED_SYNC_ARG(i)) {
|
|
|
|
temp_sync(s, ts, allocated_regs, 0, IS_DEAD_ARG(i));
|
|
|
|
} else if (IS_DEAD_ARG(i)) {
|
|
|
|
temp_dead(s, ts);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_PROFILER
|
|
|
|
|
2017-07-06 01:35:06 +02:00
|
|
|
/* avoid copy/paste errors */
|
|
|
|
#define PROF_ADD(to, from, field) \
|
|
|
|
do { \
|
2020-09-23 12:56:46 +02:00
|
|
|
(to)->field += qatomic_read(&((from)->field)); \
|
2017-07-06 01:35:06 +02:00
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define PROF_MAX(to, from, field) \
|
|
|
|
do { \
|
2020-09-23 12:56:46 +02:00
|
|
|
typeof((from)->field) val__ = qatomic_read(&((from)->field)); \
|
2017-07-06 01:35:06 +02:00
|
|
|
if (val__ > (to)->field) { \
|
|
|
|
(to)->field = val__; \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
/* Pass in a zero'ed @prof */
|
|
|
|
static inline
|
|
|
|
void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table)
|
|
|
|
{
|
2020-09-23 12:56:46 +02:00
|
|
|
unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
|
2017-07-06 01:35:06 +02:00
|
|
|
unsigned int i;
|
|
|
|
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
for (i = 0; i < n_ctxs; i++) {
|
2020-09-23 12:56:46 +02:00
|
|
|
TCGContext *s = qatomic_read(&tcg_ctxs[i]);
|
tcg: enable multiple TCG contexts in softmmu
This enables parallel TCG code generation. However, we do not take
advantage of it yet since tb_lock is still held during tb_gen_code.
In user-mode we use a single TCG context; see the documentation
added to tcg_region_init for the rationale.
Note that targets do not need any conversion: targets initialize a
TCGContext (e.g. defining TCG globals), and after this initialization
has finished, the context is cloned by the vCPU threads, each of
them keeping a separate copy.
TCG threads claim one entry in tcg_ctxs[] by atomically increasing
n_tcg_ctxs. Do not be too annoyed by the subsequent atomic_read's
of that variable and tcg_ctxs; they are there just to play nice with
analysis tools such as thread sanitizer.
Note that we do not allocate an array of contexts (we allocate
an array of pointers instead) because when tcg_context_init
is called, we do not know yet how many contexts we'll use since
the bool behind qemu_tcg_mttcg_enabled() isn't set yet.
Previous patches folded some TCG globals into TCGContext. The non-const
globals remaining are only set at init time, i.e. before the TCG
threads are spawned. Here is a list of these set-at-init-time globals
under tcg/:
Only written by tcg_context_init:
- indirect_reg_alloc_order
- tcg_op_defs
Only written by tcg_target_init (called from tcg_context_init):
- tcg_target_available_regs
- tcg_target_call_clobber_regs
- arm: arm_arch, use_idiv_instructions
- i386: have_cmov, have_bmi1, have_bmi2, have_lzcnt,
have_movbe, have_popcnt
- mips: use_movnz_instructions, use_mips32_instructions,
use_mips32r2_instructions, got_sigill (tcg_target_detect_isa)
- ppc: have_isa_2_06, have_isa_3_00, tb_ret_addr
- s390: tb_ret_addr, s390_facilities
- sparc: qemu_ld_trampoline, qemu_st_trampoline (build_trampolines),
use_vis3_instructions
Only written by tcg_prologue_init:
- 'struct jit_code_entry one_entry'
- aarch64: tb_ret_addr
- arm: tb_ret_addr
- i386: tb_ret_addr, guest_base_flags
- ia64: tb_ret_addr
- mips: tb_ret_addr, bswap32_addr, bswap32u_addr, bswap64_addr
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-20 00:57:58 +02:00
|
|
|
const TCGProfile *orig = &s->prof;
|
2017-07-06 01:35:06 +02:00
|
|
|
|
|
|
|
if (counters) {
|
2018-10-10 16:48:53 +02:00
|
|
|
PROF_ADD(prof, orig, cpu_exec_time);
|
2017-07-06 01:35:06 +02:00
|
|
|
PROF_ADD(prof, orig, tb_count1);
|
|
|
|
PROF_ADD(prof, orig, tb_count);
|
|
|
|
PROF_ADD(prof, orig, op_count);
|
|
|
|
PROF_MAX(prof, orig, op_count_max);
|
|
|
|
PROF_ADD(prof, orig, temp_count);
|
|
|
|
PROF_MAX(prof, orig, temp_count_max);
|
|
|
|
PROF_ADD(prof, orig, del_op_count);
|
|
|
|
PROF_ADD(prof, orig, code_in_len);
|
|
|
|
PROF_ADD(prof, orig, code_out_len);
|
|
|
|
PROF_ADD(prof, orig, search_out_len);
|
|
|
|
PROF_ADD(prof, orig, interm_time);
|
|
|
|
PROF_ADD(prof, orig, code_time);
|
|
|
|
PROF_ADD(prof, orig, la_time);
|
|
|
|
PROF_ADD(prof, orig, opt_time);
|
|
|
|
PROF_ADD(prof, orig, restore_count);
|
|
|
|
PROF_ADD(prof, orig, restore_time);
|
|
|
|
}
|
|
|
|
if (table) {
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < NB_OPS; i++) {
|
|
|
|
PROF_ADD(prof, orig, table_op_count[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#undef PROF_ADD
|
|
|
|
#undef PROF_MAX
|
|
|
|
|
|
|
|
static void tcg_profile_snapshot_counters(TCGProfile *prof)
|
|
|
|
{
|
|
|
|
tcg_profile_snapshot(prof, true, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_profile_snapshot_table(TCGProfile *prof)
|
|
|
|
{
|
|
|
|
tcg_profile_snapshot(prof, false, true);
|
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2019-04-17 21:17:51 +02:00
|
|
|
void tcg_dump_op_count(void)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2017-07-06 01:35:06 +02:00
|
|
|
TCGProfile prof = {};
|
2008-02-01 11:05:41 +01:00
|
|
|
int i;
|
2014-08-18 09:58:08 +02:00
|
|
|
|
2017-07-06 01:35:06 +02:00
|
|
|
tcg_profile_snapshot_table(&prof);
|
2014-03-31 05:40:35 +02:00
|
|
|
for (i = 0; i < NB_OPS; i++) {
|
2019-04-17 21:17:51 +02:00
|
|
|
qemu_printf("%s %" PRId64 "\n", tcg_op_defs[i].name,
|
2017-07-06 01:35:06 +02:00
|
|
|
prof.table_op_count[i]);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
}
|
2018-10-10 16:48:53 +02:00
|
|
|
|
|
|
|
int64_t tcg_cpu_exec_time(void)
|
|
|
|
{
|
2020-09-23 12:56:46 +02:00
|
|
|
unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
|
2018-10-10 16:48:53 +02:00
|
|
|
unsigned int i;
|
|
|
|
int64_t ret = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < n_ctxs; i++) {
|
2020-09-23 12:56:46 +02:00
|
|
|
const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
|
2018-10-10 16:48:53 +02:00
|
|
|
const TCGProfile *prof = &s->prof;
|
|
|
|
|
2020-09-23 12:56:46 +02:00
|
|
|
ret += qatomic_read(&prof->cpu_exec_time);
|
2018-10-10 16:48:53 +02:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
2014-11-02 09:04:18 +01:00
|
|
|
#else
|
2019-04-17 21:17:51 +02:00
|
|
|
void tcg_dump_op_count(void)
|
2014-11-02 09:04:18 +01:00
|
|
|
{
|
2019-04-17 21:17:51 +02:00
|
|
|
qemu_printf("[TCG profiler not compiled]\n");
|
2014-11-02 09:04:18 +01:00
|
|
|
}
|
2018-10-10 16:48:53 +02:00
|
|
|
|
|
|
|
int64_t tcg_cpu_exec_time(void)
|
|
|
|
{
|
|
|
|
error_report("%s: TCG profiler not compiled", __func__);
|
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2016-03-15 15:30:16 +01:00
|
|
|
int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
|
2008-02-01 11:05:41 +01:00
|
|
|
{
|
2017-07-06 01:35:06 +02:00
|
|
|
#ifdef CONFIG_PROFILER
|
|
|
|
TCGProfile *prof = &s->prof;
|
|
|
|
#endif
|
2017-11-02 15:19:14 +01:00
|
|
|
int i, num_insns;
|
|
|
|
TCGOp *op;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2015-09-02 05:07:48 +02:00
|
|
|
#ifdef CONFIG_PROFILER
|
|
|
|
{
|
2018-10-10 16:48:51 +02:00
|
|
|
int n = 0;
|
2015-09-02 05:07:48 +02:00
|
|
|
|
2017-11-02 15:19:14 +01:00
|
|
|
QTAILQ_FOREACH(op, &s->ops, link) {
|
|
|
|
n++;
|
|
|
|
}
|
2020-09-23 12:56:46 +02:00
|
|
|
qatomic_set(&prof->op_count, prof->op_count + n);
|
2017-07-06 01:35:06 +02:00
|
|
|
if (n > prof->op_count_max) {
|
2020-09-23 12:56:46 +02:00
|
|
|
qatomic_set(&prof->op_count_max, n);
|
2015-09-02 05:07:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
n = s->nb_temps;
|
2020-09-23 12:56:46 +02:00
|
|
|
qatomic_set(&prof->temp_count, prof->temp_count + n);
|
2017-07-06 01:35:06 +02:00
|
|
|
if (n > prof->temp_count_max) {
|
2020-09-23 12:56:46 +02:00
|
|
|
qatomic_set(&prof->temp_count_max, n);
|
2015-09-02 05:07:48 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
#ifdef DEBUG_DISAS
|
2016-03-15 15:30:21 +01:00
|
|
|
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
|
|
|
|
&& qemu_log_in_addr_range(tb->pc))) {
|
2019-11-18 22:15:26 +01:00
|
|
|
FILE *logfile = qemu_log_lock();
|
2009-01-15 23:34:14 +01:00
|
|
|
qemu_log("OP:\n");
|
2018-11-27 21:46:00 +01:00
|
|
|
tcg_dump_ops(s, false);
|
2009-01-15 23:34:14 +01:00
|
|
|
qemu_log("\n");
|
2019-11-18 22:15:26 +01:00
|
|
|
qemu_log_unlock(logfile);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-02-07 14:26:40 +01:00
|
|
|
#ifdef CONFIG_DEBUG_TCG
|
|
|
|
/* Ensure all labels referenced have been emitted. */
|
|
|
|
{
|
|
|
|
TCGLabel *l;
|
|
|
|
bool error = false;
|
|
|
|
|
|
|
|
QSIMPLEQ_FOREACH(l, &s->labels, next) {
|
|
|
|
if (unlikely(!l->present) && l->refs) {
|
|
|
|
qemu_log_mask(CPU_LOG_TB_OP,
|
|
|
|
"$L%d referenced but not present.\n", l->id);
|
|
|
|
error = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert(!error);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-09-06 16:47:13 +02:00
|
|
|
#ifdef CONFIG_PROFILER
|
2020-09-23 12:56:46 +02:00
|
|
|
qatomic_set(&prof->opt_time, prof->opt_time - profile_getclock());
|
2012-09-06 16:47:13 +02:00
|
|
|
#endif
|
|
|
|
|
2011-07-07 14:37:12 +02:00
|
|
|
#ifdef USE_TCG_OPTIMIZATIONS
|
2014-09-19 22:49:15 +02:00
|
|
|
tcg_optimize(s);
|
2011-07-07 14:37:12 +02:00
|
|
|
#endif
|
|
|
|
|
2008-05-23 11:52:20 +02:00
|
|
|
#ifdef CONFIG_PROFILER
|
2020-09-23 12:56:46 +02:00
|
|
|
qatomic_set(&prof->opt_time, prof->opt_time + profile_getclock());
|
|
|
|
qatomic_set(&prof->la_time, prof->la_time - profile_getclock());
|
2008-05-23 11:52:20 +02:00
|
|
|
#endif
|
2012-09-06 16:47:13 +02:00
|
|
|
|
2018-11-26 23:28:28 +01:00
|
|
|
reachable_code_pass(s);
|
2016-11-01 22:56:04 +01:00
|
|
|
liveness_pass_1(s);
|
2016-06-24 05:34:33 +02:00
|
|
|
|
2016-11-01 22:56:04 +01:00
|
|
|
if (s->nb_indirects > 0) {
|
2016-06-24 05:34:33 +02:00
|
|
|
#ifdef DEBUG_DISAS
|
2016-11-01 22:56:04 +01:00
|
|
|
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
|
|
|
|
&& qemu_log_in_addr_range(tb->pc))) {
|
2019-11-18 22:15:26 +01:00
|
|
|
FILE *logfile = qemu_log_lock();
|
2016-11-01 22:56:04 +01:00
|
|
|
qemu_log("OP before indirect lowering:\n");
|
2018-11-27 21:46:00 +01:00
|
|
|
tcg_dump_ops(s, false);
|
2016-11-01 22:56:04 +01:00
|
|
|
qemu_log("\n");
|
2019-11-18 22:15:26 +01:00
|
|
|
qemu_log_unlock(logfile);
|
2016-11-01 22:56:04 +01:00
|
|
|
}
|
2016-06-24 05:34:33 +02:00
|
|
|
#endif
|
2016-11-01 22:56:04 +01:00
|
|
|
/* Replace indirect temps with direct temps. */
|
|
|
|
if (liveness_pass_2(s)) {
|
|
|
|
/* If changes were made, re-run liveness. */
|
|
|
|
liveness_pass_1(s);
|
2016-06-24 05:34:33 +02:00
|
|
|
}
|
|
|
|
}
|
2012-09-06 16:47:13 +02:00
|
|
|
|
2008-05-23 11:52:20 +02:00
|
|
|
#ifdef CONFIG_PROFILER
|
2020-09-23 12:56:46 +02:00
|
|
|
qatomic_set(&prof->la_time, prof->la_time + profile_getclock());
|
2008-05-23 11:52:20 +02:00
|
|
|
#endif
|
2008-02-01 11:05:41 +01:00
|
|
|
|
|
|
|
#ifdef DEBUG_DISAS
|
2016-03-15 15:30:21 +01:00
|
|
|
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
|
|
|
|
&& qemu_log_in_addr_range(tb->pc))) {
|
2019-11-18 22:15:26 +01:00
|
|
|
FILE *logfile = qemu_log_lock();
|
2012-09-06 16:47:13 +02:00
|
|
|
qemu_log("OP after optimization and liveness analysis:\n");
|
2018-11-27 21:46:00 +01:00
|
|
|
tcg_dump_ops(s, true);
|
2009-01-15 23:34:14 +01:00
|
|
|
qemu_log("\n");
|
2019-11-18 22:15:26 +01:00
|
|
|
qemu_log_unlock(logfile);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
tcg_reg_alloc_start(s);
|
|
|
|
|
2020-10-28 20:05:44 +01:00
|
|
|
/*
|
|
|
|
* Reset the buffer pointers when restarting after overflow.
|
|
|
|
* TODO: Move this into translate-all.c with the rest of the
|
|
|
|
* buffer management. Having only this done here is confusing.
|
|
|
|
*/
|
|
|
|
s->code_buf = tcg_splitwx_to_rw(tb->tc.ptr);
|
|
|
|
s->code_ptr = s->code_buf;
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2017-07-30 21:30:41 +02:00
|
|
|
#ifdef TCG_TARGET_NEED_LDST_LABELS
|
2018-04-30 01:58:40 +02:00
|
|
|
QSIMPLEQ_INIT(&s->ldst_labels);
|
2017-07-30 21:30:41 +02:00
|
|
|
#endif
|
2017-07-30 22:13:21 +02:00
|
|
|
#ifdef TCG_TARGET_NEED_POOL_LABELS
|
|
|
|
s->pool_labels = NULL;
|
|
|
|
#endif
|
2013-10-03 21:51:24 +02:00
|
|
|
|
2015-09-02 04:11:45 +02:00
|
|
|
num_insns = -1;
|
2017-11-02 15:19:14 +01:00
|
|
|
QTAILQ_FOREACH(op, &s->ops, link) {
|
2014-09-19 22:49:15 +02:00
|
|
|
TCGOpcode opc = op->opc;
|
2008-03-08 14:33:42 +01:00
|
|
|
|
2008-02-01 11:05:41 +01:00
|
|
|
#ifdef CONFIG_PROFILER
|
2020-09-23 12:56:46 +02:00
|
|
|
qatomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1);
|
2008-02-01 11:05:41 +01:00
|
|
|
#endif
|
2014-09-19 22:49:15 +02:00
|
|
|
|
|
|
|
switch (opc) {
|
2008-02-01 11:05:41 +01:00
|
|
|
case INDEX_op_mov_i32:
|
|
|
|
case INDEX_op_mov_i64:
|
2017-09-14 22:53:46 +02:00
|
|
|
case INDEX_op_mov_vec:
|
2016-12-08 22:42:08 +01:00
|
|
|
tcg_reg_alloc_mov(s, op);
|
2008-02-01 11:05:41 +01:00
|
|
|
break;
|
2008-05-23 19:33:39 +02:00
|
|
|
case INDEX_op_movi_i32:
|
|
|
|
case INDEX_op_movi_i64:
|
2017-09-14 22:53:46 +02:00
|
|
|
case INDEX_op_dupi_vec:
|
2016-12-08 22:42:08 +01:00
|
|
|
tcg_reg_alloc_movi(s, op);
|
2008-05-23 19:33:39 +02:00
|
|
|
break;
|
2019-03-18 19:20:27 +01:00
|
|
|
case INDEX_op_dup_vec:
|
|
|
|
tcg_reg_alloc_dup(s, op);
|
|
|
|
break;
|
2015-08-29 21:37:33 +02:00
|
|
|
case INDEX_op_insn_start:
|
2015-09-02 04:11:45 +02:00
|
|
|
if (num_insns >= 0) {
|
2018-06-15 07:57:03 +02:00
|
|
|
size_t off = tcg_current_code_size(s);
|
|
|
|
s->gen_insn_end_off[num_insns] = off;
|
|
|
|
/* Assert that we do not overflow our stored offset. */
|
|
|
|
assert(s->gen_insn_end_off[num_insns] == off);
|
2015-09-02 04:11:45 +02:00
|
|
|
}
|
|
|
|
num_insns++;
|
2015-09-02 00:51:12 +02:00
|
|
|
for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
|
|
|
|
target_ulong a;
|
|
|
|
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
|
2016-12-08 22:12:08 +01:00
|
|
|
a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
|
2015-09-02 00:51:12 +02:00
|
|
|
#else
|
2016-12-08 22:12:08 +01:00
|
|
|
a = op->args[i];
|
2015-09-02 00:51:12 +02:00
|
|
|
#endif
|
2015-09-02 04:11:45 +02:00
|
|
|
s->gen_insn_data[num_insns][i] = a;
|
2015-09-02 00:51:12 +02:00
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
break;
|
2008-02-04 01:37:54 +01:00
|
|
|
case INDEX_op_discard:
|
2017-06-20 08:18:10 +02:00
|
|
|
temp_dead(s, arg_temp(op->args[0]));
|
2008-02-04 01:37:54 +01:00
|
|
|
break;
|
2008-02-01 11:05:41 +01:00
|
|
|
case INDEX_op_set_label:
|
2008-05-23 19:33:39 +02:00
|
|
|
tcg_reg_alloc_bb_end(s, s->reserved_regs);
|
2020-10-29 02:55:50 +01:00
|
|
|
tcg_out_label(s, arg_label(op->args[0]));
|
2008-02-01 11:05:41 +01:00
|
|
|
break;
|
|
|
|
case INDEX_op_call:
|
2016-12-08 22:42:08 +01:00
|
|
|
tcg_reg_alloc_call(s, op);
|
2014-09-19 22:49:15 +02:00
|
|
|
break;
|
2008-02-01 11:05:41 +01:00
|
|
|
default:
|
2011-08-17 23:11:46 +02:00
|
|
|
/* Sanity check that we've not introduced any unhandled opcodes. */
|
2017-08-17 16:43:20 +02:00
|
|
|
tcg_debug_assert(tcg_op_supported(opc));
|
2008-02-01 11:05:41 +01:00
|
|
|
/* Note: in order to speed up the code, it would be much
|
|
|
|
faster to have specialized register allocator functions for
|
|
|
|
some common argument patterns */
|
2016-12-08 22:42:08 +01:00
|
|
|
tcg_reg_alloc_op(s, op);
|
2008-02-01 11:05:41 +01:00
|
|
|
break;
|
|
|
|
}
|
2016-04-21 10:48:50 +02:00
|
|
|
#ifdef CONFIG_DEBUG_TCG
|
2008-02-01 11:05:41 +01:00
|
|
|
check_regs(s);
|
|
|
|
#endif
|
2015-09-22 22:01:15 +02:00
|
|
|
/* Test for (pending) buffer overflow. The assumption is that any
|
|
|
|
one operation beginning below the high water mark cannot overrun
|
|
|
|
the buffer completely. Thus we can test for overflow after
|
|
|
|
generating code without having to check during generation. */
|
2015-11-19 10:30:50 +01:00
|
|
|
if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
|
2015-09-22 22:01:15 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2019-04-16 10:06:39 +02:00
|
|
|
/* Test for TB overflow, as seen by gen_insn_end_off. */
|
|
|
|
if (unlikely(tcg_current_code_size(s) > UINT16_MAX)) {
|
|
|
|
return -2;
|
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
2015-09-02 04:11:45 +02:00
|
|
|
tcg_debug_assert(num_insns >= 0);
|
|
|
|
s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
|
2014-09-19 22:49:15 +02:00
|
|
|
|
2012-10-31 08:04:25 +01:00
|
|
|
/* Generate TB finalization at the end of block */
|
2017-07-30 21:30:41 +02:00
|
|
|
#ifdef TCG_TARGET_NEED_LDST_LABELS
|
2019-04-21 23:51:00 +02:00
|
|
|
i = tcg_out_ldst_finalize(s);
|
|
|
|
if (i < 0) {
|
|
|
|
return i;
|
2015-12-02 22:59:59 +01:00
|
|
|
}
|
2017-07-30 21:30:41 +02:00
|
|
|
#endif
|
2017-07-30 22:13:21 +02:00
|
|
|
#ifdef TCG_TARGET_NEED_POOL_LABELS
|
2019-04-21 22:51:56 +02:00
|
|
|
i = tcg_out_pool_finalize(s);
|
|
|
|
if (i < 0) {
|
|
|
|
return i;
|
2017-07-30 22:13:21 +02:00
|
|
|
}
|
|
|
|
#endif
|
2019-04-21 22:34:35 +02:00
|
|
|
if (!tcg_resolve_relocs(s)) {
|
|
|
|
return -2;
|
|
|
|
}
|
2008-02-01 11:05:41 +01:00
|
|
|
|
2020-12-12 16:08:02 +01:00
|
|
|
#ifndef CONFIG_TCG_INTERPRETER
|
2008-02-01 11:05:41 +01:00
|
|
|
/* flush instruction cache */
|
2020-10-28 20:05:44 +01:00
|
|
|
flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
|
|
|
|
(uintptr_t)s->code_buf,
|
2020-12-12 17:38:21 +01:00
|
|
|
tcg_ptr_byte_diff(s->code_ptr, s->code_buf));
|
2020-12-12 16:08:02 +01:00
|
|
|
#endif
|
2012-03-02 23:30:07 +01:00
|
|
|
|
2014-03-28 20:56:22 +01:00
|
|
|
return tcg_current_code_size(s);
|
2008-02-01 11:05:41 +01:00
|
|
|
}
|
|
|
|
|
2008-05-23 11:52:20 +02:00
|
|
|
#ifdef CONFIG_PROFILER
|
2019-04-17 21:17:52 +02:00
|
|
|
void tcg_dump_info(void)
|
2008-05-23 11:52:20 +02:00
|
|
|
{
|
2017-07-06 01:35:06 +02:00
|
|
|
TCGProfile prof = {};
|
|
|
|
const TCGProfile *s;
|
|
|
|
int64_t tb_count;
|
|
|
|
int64_t tb_div_count;
|
|
|
|
int64_t tot;
|
|
|
|
|
|
|
|
tcg_profile_snapshot_counters(&prof);
|
|
|
|
s = &prof;
|
|
|
|
tb_count = s->tb_count;
|
|
|
|
tb_div_count = tb_count ? tb_count : 1;
|
|
|
|
tot = s->interm_time + s->code_time;
|
2008-05-23 11:52:20 +02:00
|
|
|
|
2019-04-17 21:17:52 +02:00
|
|
|
qemu_printf("JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
|
2008-05-23 11:52:20 +02:00
|
|
|
tot, tot / 2.4e9);
|
2019-04-17 21:17:52 +02:00
|
|
|
qemu_printf("translated TBs %" PRId64 " (aborted=%" PRId64
|
|
|
|
" %0.1f%%)\n",
|
2015-09-02 04:11:45 +02:00
|
|
|
tb_count, s->tb_count1 - tb_count,
|
|
|
|
(double)(s->tb_count1 - s->tb_count)
|
|
|
|
/ (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
|
2019-04-17 21:17:52 +02:00
|
|
|
qemu_printf("avg ops/TB %0.1f max=%d\n",
|
2015-09-02 04:11:45 +02:00
|
|
|
(double)s->op_count / tb_div_count, s->op_count_max);
|
2019-04-17 21:17:52 +02:00
|
|
|
qemu_printf("deleted ops/TB %0.2f\n",
|
2015-09-02 04:11:45 +02:00
|
|
|
(double)s->del_op_count / tb_div_count);
|
2019-04-17 21:17:52 +02:00
|
|
|
qemu_printf("avg temps/TB %0.2f max=%d\n",
|
2015-09-02 04:11:45 +02:00
|
|
|
(double)s->temp_count / tb_div_count, s->temp_count_max);
|
2019-04-17 21:17:52 +02:00
|
|
|
qemu_printf("avg host code/TB %0.1f\n",
|
2015-09-02 04:11:45 +02:00
|
|
|
(double)s->code_out_len / tb_div_count);
|
2019-04-17 21:17:52 +02:00
|
|
|
qemu_printf("avg search data/TB %0.1f\n",
|
2015-09-02 04:11:45 +02:00
|
|
|
(double)s->search_out_len / tb_div_count);
|
2008-05-23 11:52:20 +02:00
|
|
|
|
2019-04-17 21:17:52 +02:00
|
|
|
qemu_printf("cycles/op %0.1f\n",
|
2008-05-23 11:52:20 +02:00
|
|
|
s->op_count ? (double)tot / s->op_count : 0);
|
2019-04-17 21:17:52 +02:00
|
|
|
qemu_printf("cycles/in byte %0.1f\n",
|
2008-05-23 11:52:20 +02:00
|
|
|
s->code_in_len ? (double)tot / s->code_in_len : 0);
|
2019-04-17 21:17:52 +02:00
|
|
|
qemu_printf("cycles/out byte %0.1f\n",
|
2008-05-23 11:52:20 +02:00
|
|
|
s->code_out_len ? (double)tot / s->code_out_len : 0);
|
2019-04-17 21:17:52 +02:00
|
|
|
qemu_printf("cycles/search byte %0.1f\n",
|
2015-09-02 04:11:45 +02:00
|
|
|
s->search_out_len ? (double)tot / s->search_out_len : 0);
|
|
|
|
if (tot == 0) {
|
2008-05-23 11:52:20 +02:00
|
|
|
tot = 1;
|
2015-09-02 04:11:45 +02:00
|
|
|
}
|
2019-04-17 21:17:52 +02:00
|
|
|
qemu_printf(" gen_interm time %0.1f%%\n",
|
2008-05-23 11:52:20 +02:00
|
|
|
(double)s->interm_time / tot * 100.0);
|
2019-04-17 21:17:52 +02:00
|
|
|
qemu_printf(" gen_code time %0.1f%%\n",
|
2008-05-23 11:52:20 +02:00
|
|
|
(double)s->code_time / tot * 100.0);
|
2019-04-17 21:17:52 +02:00
|
|
|
qemu_printf("optim./code time %0.1f%%\n",
|
2012-09-06 16:47:13 +02:00
|
|
|
(double)s->opt_time / (s->code_time ? s->code_time : 1)
|
|
|
|
* 100.0);
|
2019-04-17 21:17:52 +02:00
|
|
|
qemu_printf("liveness/code time %0.1f%%\n",
|
2008-05-23 11:52:20 +02:00
|
|
|
(double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
|
2019-04-17 21:17:52 +02:00
|
|
|
qemu_printf("cpu_restore count %" PRId64 "\n",
|
2008-05-23 11:52:20 +02:00
|
|
|
s->restore_count);
|
2019-04-17 21:17:52 +02:00
|
|
|
qemu_printf(" avg cycles %0.1f\n",
|
2008-05-23 11:52:20 +02:00
|
|
|
s->restore_count ? (double)s->restore_time / s->restore_count : 0);
|
|
|
|
}
|
|
|
|
#else
|
2019-04-17 21:17:52 +02:00
|
|
|
void tcg_dump_info(void)
|
2008-05-23 11:52:20 +02:00
|
|
|
{
|
2019-04-17 21:17:52 +02:00
|
|
|
qemu_printf("[TCG profiler not compiled]\n");
|
2008-05-23 11:52:20 +02:00
|
|
|
}
|
|
|
|
#endif
|
2012-03-19 20:25:11 +01:00
|
|
|
|
|
|
|
#ifdef ELF_HOST_MACHINE
|
2012-03-24 18:47:36 +01:00
|
|
|
/* In order to use this feature, the backend needs to do three things:
|
|
|
|
|
|
|
|
(1) Define ELF_HOST_MACHINE to indicate both what value to
|
|
|
|
put into the ELF image and to indicate support for the feature.
|
|
|
|
|
|
|
|
(2) Define tcg_register_jit. This should create a buffer containing
|
|
|
|
the contents of a .debug_frame section that describes the post-
|
|
|
|
prologue unwind info for the tcg machine.
|
|
|
|
|
|
|
|
(3) Call tcg_register_jit_int, with the constructed .debug_frame.
|
|
|
|
*/
|
2012-03-19 20:25:11 +01:00
|
|
|
|
|
|
|
/* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
|
|
|
|
typedef enum {
|
|
|
|
JIT_NOACTION = 0,
|
|
|
|
JIT_REGISTER_FN,
|
|
|
|
JIT_UNREGISTER_FN
|
|
|
|
} jit_actions_t;
|
|
|
|
|
|
|
|
struct jit_code_entry {
|
|
|
|
struct jit_code_entry *next_entry;
|
|
|
|
struct jit_code_entry *prev_entry;
|
|
|
|
const void *symfile_addr;
|
|
|
|
uint64_t symfile_size;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct jit_descriptor {
|
|
|
|
uint32_t version;
|
|
|
|
uint32_t action_flag;
|
|
|
|
struct jit_code_entry *relevant_entry;
|
|
|
|
struct jit_code_entry *first_entry;
|
|
|
|
};
|
|
|
|
|
|
|
|
void __jit_debug_register_code(void) __attribute__((noinline));
|
|
|
|
void __jit_debug_register_code(void)
|
|
|
|
{
|
|
|
|
asm("");
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Must statically initialize the version, because GDB may check
|
|
|
|
the version before we can set it. */
|
|
|
|
struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
|
|
|
|
|
|
|
|
/* End GDB interface. */
|
|
|
|
|
|
|
|
static int find_string(const char *strtab, const char *str)
|
|
|
|
{
|
|
|
|
const char *p = strtab + 1;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
if (strcmp(p, str) == 0) {
|
|
|
|
return p - strtab;
|
|
|
|
}
|
|
|
|
p += strlen(p) + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-29 17:17:30 +01:00
|
|
|
static void tcg_register_jit_int(const void *buf_ptr, size_t buf_size,
|
2014-05-15 21:48:01 +02:00
|
|
|
const void *debug_frame,
|
|
|
|
size_t debug_frame_size)
|
2012-03-19 20:25:11 +01:00
|
|
|
{
|
2012-03-24 18:47:36 +01:00
|
|
|
struct __attribute__((packed)) DebugInfo {
|
|
|
|
uint32_t len;
|
|
|
|
uint16_t version;
|
|
|
|
uint32_t abbrev;
|
|
|
|
uint8_t ptr_size;
|
|
|
|
uint8_t cu_die;
|
|
|
|
uint16_t cu_lang;
|
|
|
|
uintptr_t cu_low_pc;
|
|
|
|
uintptr_t cu_high_pc;
|
|
|
|
uint8_t fn_die;
|
|
|
|
char fn_name[16];
|
|
|
|
uintptr_t fn_low_pc;
|
|
|
|
uintptr_t fn_high_pc;
|
|
|
|
uint8_t cu_eoc;
|
|
|
|
};
|
2012-03-19 20:25:11 +01:00
|
|
|
|
|
|
|
struct ElfImage {
|
|
|
|
ElfW(Ehdr) ehdr;
|
|
|
|
ElfW(Phdr) phdr;
|
2012-03-24 18:47:36 +01:00
|
|
|
ElfW(Shdr) shdr[7];
|
|
|
|
ElfW(Sym) sym[2];
|
|
|
|
struct DebugInfo di;
|
|
|
|
uint8_t da[24];
|
|
|
|
char str[80];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ElfImage *img;
|
|
|
|
|
|
|
|
static const struct ElfImage img_template = {
|
|
|
|
.ehdr = {
|
|
|
|
.e_ident[EI_MAG0] = ELFMAG0,
|
|
|
|
.e_ident[EI_MAG1] = ELFMAG1,
|
|
|
|
.e_ident[EI_MAG2] = ELFMAG2,
|
|
|
|
.e_ident[EI_MAG3] = ELFMAG3,
|
|
|
|
.e_ident[EI_CLASS] = ELF_CLASS,
|
|
|
|
.e_ident[EI_DATA] = ELF_DATA,
|
|
|
|
.e_ident[EI_VERSION] = EV_CURRENT,
|
|
|
|
.e_type = ET_EXEC,
|
|
|
|
.e_machine = ELF_HOST_MACHINE,
|
|
|
|
.e_version = EV_CURRENT,
|
|
|
|
.e_phoff = offsetof(struct ElfImage, phdr),
|
|
|
|
.e_shoff = offsetof(struct ElfImage, shdr),
|
|
|
|
.e_ehsize = sizeof(ElfW(Shdr)),
|
|
|
|
.e_phentsize = sizeof(ElfW(Phdr)),
|
|
|
|
.e_phnum = 1,
|
|
|
|
.e_shentsize = sizeof(ElfW(Shdr)),
|
|
|
|
.e_shnum = ARRAY_SIZE(img->shdr),
|
|
|
|
.e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
|
2012-03-24 18:47:37 +01:00
|
|
|
#ifdef ELF_HOST_FLAGS
|
|
|
|
.e_flags = ELF_HOST_FLAGS,
|
|
|
|
#endif
|
|
|
|
#ifdef ELF_OSABI
|
|
|
|
.e_ident[EI_OSABI] = ELF_OSABI,
|
|
|
|
#endif
|
2012-03-24 18:47:36 +01:00
|
|
|
},
|
|
|
|
.phdr = {
|
|
|
|
.p_type = PT_LOAD,
|
|
|
|
.p_flags = PF_X,
|
|
|
|
},
|
|
|
|
.shdr = {
|
|
|
|
[0] = { .sh_type = SHT_NULL },
|
|
|
|
/* Trick: The contents of code_gen_buffer are not present in
|
|
|
|
this fake ELF file; that got allocated elsewhere. Therefore
|
|
|
|
we mark .text as SHT_NOBITS (similar to .bss) so that readers
|
|
|
|
will not look for contents. We can record any address. */
|
|
|
|
[1] = { /* .text */
|
|
|
|
.sh_type = SHT_NOBITS,
|
|
|
|
.sh_flags = SHF_EXECINSTR | SHF_ALLOC,
|
|
|
|
},
|
|
|
|
[2] = { /* .debug_info */
|
|
|
|
.sh_type = SHT_PROGBITS,
|
|
|
|
.sh_offset = offsetof(struct ElfImage, di),
|
|
|
|
.sh_size = sizeof(struct DebugInfo),
|
|
|
|
},
|
|
|
|
[3] = { /* .debug_abbrev */
|
|
|
|
.sh_type = SHT_PROGBITS,
|
|
|
|
.sh_offset = offsetof(struct ElfImage, da),
|
|
|
|
.sh_size = sizeof(img->da),
|
|
|
|
},
|
|
|
|
[4] = { /* .debug_frame */
|
|
|
|
.sh_type = SHT_PROGBITS,
|
|
|
|
.sh_offset = sizeof(struct ElfImage),
|
|
|
|
},
|
|
|
|
[5] = { /* .symtab */
|
|
|
|
.sh_type = SHT_SYMTAB,
|
|
|
|
.sh_offset = offsetof(struct ElfImage, sym),
|
|
|
|
.sh_size = sizeof(img->sym),
|
|
|
|
.sh_info = 1,
|
|
|
|
.sh_link = ARRAY_SIZE(img->shdr) - 1,
|
|
|
|
.sh_entsize = sizeof(ElfW(Sym)),
|
|
|
|
},
|
|
|
|
[6] = { /* .strtab */
|
|
|
|
.sh_type = SHT_STRTAB,
|
|
|
|
.sh_offset = offsetof(struct ElfImage, str),
|
|
|
|
.sh_size = sizeof(img->str),
|
|
|
|
}
|
|
|
|
},
|
|
|
|
.sym = {
|
|
|
|
[1] = { /* code_gen_buffer */
|
|
|
|
.st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
|
|
|
|
.st_shndx = 1,
|
|
|
|
}
|
|
|
|
},
|
|
|
|
.di = {
|
|
|
|
.len = sizeof(struct DebugInfo) - 4,
|
|
|
|
.version = 2,
|
|
|
|
.ptr_size = sizeof(void *),
|
|
|
|
.cu_die = 1,
|
|
|
|
.cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */
|
|
|
|
.fn_die = 2,
|
|
|
|
.fn_name = "code_gen_buffer"
|
|
|
|
},
|
|
|
|
.da = {
|
|
|
|
1, /* abbrev number (the cu) */
|
|
|
|
0x11, 1, /* DW_TAG_compile_unit, has children */
|
|
|
|
0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
|
|
|
|
0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
|
|
|
|
0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
|
|
|
|
0, 0, /* end of abbrev */
|
|
|
|
2, /* abbrev number (the fn) */
|
|
|
|
0x2e, 0, /* DW_TAG_subprogram, no children */
|
|
|
|
0x3, 0x8, /* DW_AT_name, DW_FORM_string */
|
|
|
|
0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
|
|
|
|
0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
|
|
|
|
0, 0, /* end of abbrev */
|
|
|
|
0 /* no more abbrev */
|
|
|
|
},
|
|
|
|
.str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
|
|
|
|
".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
|
2012-03-19 20:25:11 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
/* We only need a single jit entry; statically allocate it. */
|
|
|
|
static struct jit_code_entry one_entry;
|
|
|
|
|
2012-03-24 18:47:36 +01:00
|
|
|
uintptr_t buf = (uintptr_t)buf_ptr;
|
2012-03-19 20:25:11 +01:00
|
|
|
size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
|
2014-05-15 21:48:01 +02:00
|
|
|
DebugFrameHeader *dfh;
|
2012-03-19 20:25:11 +01:00
|
|
|
|
2012-03-24 18:47:36 +01:00
|
|
|
img = g_malloc(img_size);
|
|
|
|
*img = img_template;
|
2012-03-19 20:25:11 +01:00
|
|
|
|
2012-03-24 18:47:36 +01:00
|
|
|
img->phdr.p_vaddr = buf;
|
|
|
|
img->phdr.p_paddr = buf;
|
|
|
|
img->phdr.p_memsz = buf_size;
|
2012-03-19 20:25:11 +01:00
|
|
|
|
|
|
|
img->shdr[1].sh_name = find_string(img->str, ".text");
|
2012-03-24 18:47:36 +01:00
|
|
|
img->shdr[1].sh_addr = buf;
|
2012-03-19 20:25:11 +01:00
|
|
|
img->shdr[1].sh_size = buf_size;
|
|
|
|
|
2012-03-24 18:47:36 +01:00
|
|
|
img->shdr[2].sh_name = find_string(img->str, ".debug_info");
|
|
|
|
img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
|
|
|
|
|
|
|
|
img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
|
|
|
|
img->shdr[4].sh_size = debug_frame_size;
|
|
|
|
|
|
|
|
img->shdr[5].sh_name = find_string(img->str, ".symtab");
|
|
|
|
img->shdr[6].sh_name = find_string(img->str, ".strtab");
|
|
|
|
|
|
|
|
img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
|
|
|
|
img->sym[1].st_value = buf;
|
|
|
|
img->sym[1].st_size = buf_size;
|
2012-03-19 20:25:11 +01:00
|
|
|
|
2012-03-24 18:47:36 +01:00
|
|
|
img->di.cu_low_pc = buf;
|
2013-05-24 23:16:14 +02:00
|
|
|
img->di.cu_high_pc = buf + buf_size;
|
2012-03-24 18:47:36 +01:00
|
|
|
img->di.fn_low_pc = buf;
|
2013-05-24 23:16:14 +02:00
|
|
|
img->di.fn_high_pc = buf + buf_size;
|
2012-03-19 20:25:11 +01:00
|
|
|
|
2014-05-15 21:48:01 +02:00
|
|
|
dfh = (DebugFrameHeader *)(img + 1);
|
|
|
|
memcpy(dfh, debug_frame, debug_frame_size);
|
|
|
|
dfh->fde.func_start = buf;
|
|
|
|
dfh->fde.func_len = buf_size;
|
|
|
|
|
2012-03-19 20:25:11 +01:00
|
|
|
#ifdef DEBUG_JIT
|
|
|
|
/* Enable this block to be able to debug the ELF image file creation.
|
|
|
|
One can use readelf, objdump, or other inspection utilities. */
|
|
|
|
{
|
|
|
|
FILE *f = fopen("/tmp/qemu.jit", "w+b");
|
|
|
|
if (f) {
|
2012-03-24 18:47:36 +01:00
|
|
|
if (fwrite(img, img_size, 1, f) != img_size) {
|
2012-03-19 20:25:11 +01:00
|
|
|
/* Avoid stupid unused return value warning for fwrite. */
|
|
|
|
}
|
|
|
|
fclose(f);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
one_entry.symfile_addr = img;
|
|
|
|
one_entry.symfile_size = img_size;
|
|
|
|
|
|
|
|
__jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
|
|
|
|
__jit_debug_descriptor.relevant_entry = &one_entry;
|
|
|
|
__jit_debug_descriptor.first_entry = &one_entry;
|
|
|
|
__jit_debug_register_code();
|
|
|
|
}
|
|
|
|
#else
|
2012-03-24 18:47:36 +01:00
|
|
|
/* No support for the feature. Provide the entry point expected by exec.c,
|
|
|
|
and implement the internal function we declared earlier. */
|
2012-03-19 20:25:11 +01:00
|
|
|
|
2020-10-29 17:17:30 +01:00
|
|
|
static void tcg_register_jit_int(const void *buf, size_t size,
|
2014-05-15 21:48:01 +02:00
|
|
|
const void *debug_frame,
|
|
|
|
size_t debug_frame_size)
|
2012-03-19 20:25:11 +01:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2020-10-29 17:17:30 +01:00
|
|
|
void tcg_register_jit(const void *buf, size_t buf_size)
|
2012-03-19 20:25:11 +01:00
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif /* ELF_HOST_MACHINE */
|
2017-09-15 23:11:45 +02:00
|
|
|
|
|
|
|
#if !TCG_TARGET_MAYBE_vec
|
|
|
|
void tcg_expand_vec_op(TCGOpcode o, TCGType t, unsigned e, TCGArg a0, ...)
|
|
|
|
{
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
#endif
|