Do away with TB retranslation

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJWFOjwAAoJEK0ScMxN0CebJb0IAJT1JFDVkxsdkOAB6ZsM7xlj
 8INCI00ayO/nK4U93haO/i4d5kCwYdPNtv98f1NDsSoUsmw+DJTzDiZQ0qvXd+bD
 byF8XYqNyIbft9C1MdUch2v4jT29QZPOO0Fpcfy6/yOHTs9SyYyiC9dSddIiXdd/
 MOTvbOENWYwnf+8U57kfQQfaJffLdcyOPQJseMo8S81bmhg7ZUqRw7r7L1GC6vih
 2hBPAmv9uo+c9qAzOyquNeVfktfhJmO+DAMpedJ/fu1VQxwP9/HYnA/ijBWVrjBJ
 2D1EmfzFtTMdPVl1q/K1dpryMM7XAb2QztFGZAYEMWjC1ackKJP289YqC+7V2ug=
 =PYK3
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20151007' into staging

Do away with TB retranslation

# gpg: Signature made Wed 07 Oct 2015 10:42:08 BST using RSA key ID 4DD0279B
# gpg: Good signature from "Richard Henderson <rth7680@gmail.com>"
# gpg:                 aka "Richard Henderson <rth@redhat.com>"
# gpg:                 aka "Richard Henderson <rth@twiddle.net>"

* remotes/rth/tags/pull-tcg-20151007: (26 commits)
  tcg: Adjust CODE_GEN_AVG_BLOCK_SIZE
  tcg: Check for overflow via highwater mark
  tcg: Allocate a guard page after code_gen_buffer
  tcg: Emit prologue to the beginning of code_gen_buffer
  tcg: Remove tcg_gen_code_search_pc
  tcg: Remove gen_intermediate_code_pc
  tcg: Save insn data and use it in cpu_restore_state_from_tb
  tcg: Pass data argument to restore_state_to_opc
  tcg: Add TCG_MAX_INSNS
  target-*: Drop cpu_gen_code define
  tcg: Merge cpu_gen_code into tb_gen_code
  target-sparc: Add npc state to insn_start
  target-sparc: Remove gen_opc_jump_pc
  target-sparc: Split out gen_branch_n
  target-sparc: Tidy gen_branch_a interface
  target-cris: Mirror gen_opc_pc into insn_start
  target-sh4: Add flags state to insn_start
  target-s390x: Add cc_op state to insn_start
  target-mips: Add delayed branch state to insn_start
  target-i386: Add cc_op state to insn_start
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2015-10-08 15:33:56 +01:00
commit 31c9bd164d
45 changed files with 974 additions and 1502 deletions

View File

@ -62,24 +62,15 @@ typedef struct TranslationBlock TranslationBlock;
#define OPC_BUF_SIZE 640
#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
/* Maximum size a TCG op can expand to. This is complicated because a
single op may require several host instructions and register reloads.
For now take a wild guess at 192 bytes, which should allow at least
a couple of fixup instructions per argument. */
#define TCG_MAX_OP_SIZE 192
#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
#include "qemu/log.h"
void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb);
void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
int pc_pos);
target_ulong *data);
void cpu_gen_init(void);
int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
int *gen_code_size_ptr);
bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
void page_size_init(void);
@ -170,13 +161,14 @@ static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...)
#define CODE_GEN_PHYS_HASH_BITS 15
#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
/* estimated block size for TB allocation */
/* XXX: use a per code average code fragment size and modulate it
according to the host CPU */
/* Estimated block size for TB allocation. */
/* ??? The following is based on a 2015 survey of x86_64 host output.
Better would seem to be some sort of dynamically sized TB array,
adapting to the block sizes actually being produced. */
#if defined(CONFIG_SOFTMMU)
#define CODE_GEN_AVG_BLOCK_SIZE 128
#define CODE_GEN_AVG_BLOCK_SIZE 400
#else
#define CODE_GEN_AVG_BLOCK_SIZE 64
#define CODE_GEN_AVG_BLOCK_SIZE 150
#endif
#if defined(__arm__) || defined(_ARCH_PPC) \
@ -201,6 +193,7 @@ struct TranslationBlock {
#define CF_USE_ICOUNT 0x20000
void *tc_ptr; /* pointer to the translated code */
uint8_t *tc_search; /* pointer to search data */
/* next matching tb for physical address. */
struct TranslationBlock *phys_hash_next;
/* original tb when cflags has CF_NOCACHE */

View File

@ -721,6 +721,7 @@ void cpu_single_step(CPUState *cpu, int enabled);
/* 0x08 currently unused */
#define BP_GDB 0x10
#define BP_CPU 0x20
#define BP_ANY (BP_GDB | BP_CPU)
#define BP_WATCHPOINT_HIT_READ 0x40
#define BP_WATCHPOINT_HIT_WRITE 0x80
#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE)
@ -731,6 +732,21 @@ int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
/* Return true if PC matches an installed breakpoint. */
static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
{
CPUBreakpoint *bp;
if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
if (bp->pc == pc && (bp->flags & mask)) {
return true;
}
}
}
return false;
}
int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
int flags, CPUWatchpoint **watchpoint);
int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,

View File

@ -287,7 +287,6 @@ struct CPUAlphaState {
#define cpu_list alpha_cpu_list
#define cpu_exec cpu_alpha_exec
#define cpu_gen_code cpu_alpha_gen_code
#define cpu_signal_handler cpu_alpha_signal_handler
#include "exec/cpu-all.h"

View File

@ -2858,18 +2858,14 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
return ret;
}
static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
TranslationBlock *tb,
bool search_pc)
void gen_intermediate_code(CPUAlphaState *env, struct TranslationBlock *tb)
{
AlphaCPU *cpu = alpha_env_get_cpu(env);
CPUState *cs = CPU(cpu);
CPUAlphaState *env = &cpu->env;
DisasContext ctx, *ctxp = &ctx;
target_ulong pc_start;
target_ulong pc_mask;
uint32_t insn;
CPUBreakpoint *bp;
int j, lj = -1;
ExitStatus ret;
int num_insns;
int max_insns;
@ -2904,6 +2900,9 @@ static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
if (max_insns > TCG_MAX_INSNS) {
max_insns = TCG_MAX_INSNS;
}
if (in_superpage(&ctx, pc_start)) {
pc_mask = (1ULL << 41) - 1;
@ -2913,35 +2912,17 @@ static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
gen_tb_start(tb);
do {
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == ctx.pc) {
gen_excp(&ctx, EXCP_DEBUG, 0);
break;
}
}
tcg_gen_insn_start(ctx.pc);
num_insns++;
if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
gen_excp(&ctx, EXCP_DEBUG, 0);
break;
}
if (search_pc) {
j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
}
tcg_ctx.gen_opc_pc[lj] = ctx.pc;
tcg_ctx.gen_opc_instr_start[lj] = 1;
tcg_ctx.gen_opc_icount[lj] = num_insns;
}
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
gen_io_start();
}
insn = cpu_ldl_code(env, ctx.pc);
num_insns++;
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(ctx.pc);
}
TCGV_UNUSED_I64(ctx.zero);
TCGV_UNUSED_I64(ctx.sink);
@ -2997,16 +2978,8 @@ static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
gen_tb_end(tb, num_insns);
if (search_pc) {
j = tcg_op_buf_count();
lj++;
while (lj <= j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
} else {
tb->size = ctx.pc - pc_start;
tb->icount = num_insns;
}
tb->size = ctx.pc - pc_start;
tb->icount = num_insns;
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
@ -3017,17 +2990,8 @@ static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
#endif
}
void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
target_ulong *data)
{
gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, false);
}
void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
{
gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, true);
}
void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
{
env->pc = tcg_ctx.gen_opc_pc[pc_pos];
env->pc = data[0];
}

View File

@ -95,6 +95,7 @@
struct arm_boot_info;
#define NB_MMU_MODES 7
#define TARGET_INSN_START_EXTRA_WORDS 1
/* We currently assume float and double are IEEE single and double
precision respectively.
@ -1600,7 +1601,6 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
#define cpu_init(cpu_model) CPU(cpu_arm_init(cpu_model))
#define cpu_exec cpu_arm_exec
#define cpu_gen_code cpu_arm_gen_code
#define cpu_signal_handler cpu_arm_signal_handler
#define cpu_list arm_cpu_list

View File

@ -11000,15 +11000,11 @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
free_tmp_a64(s);
}
void gen_intermediate_code_internal_a64(ARMCPU *cpu,
TranslationBlock *tb,
bool search_pc)
void gen_intermediate_code_a64(ARMCPU *cpu, TranslationBlock *tb)
{
CPUState *cs = CPU(cpu);
CPUARMState *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
CPUBreakpoint *bp;
int j, lj;
target_ulong pc_start;
target_ulong next_page_start;
int num_insns;
@ -11067,19 +11063,25 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
init_tmp_a64_array(dc);
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
lj = -1;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
if (max_insns > TCG_MAX_INSNS) {
max_insns = TCG_MAX_INSNS;
}
gen_tb_start(tb);
tcg_clear_temp_count();
do {
tcg_gen_insn_start(dc->pc, 0);
num_insns++;
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
CPUBreakpoint *bp;
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) {
gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
@ -11091,27 +11093,10 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
}
}
if (search_pc) {
j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
}
tcg_ctx.gen_opc_pc[lj] = dc->pc;
tcg_ctx.gen_opc_instr_start[lj] = 1;
tcg_ctx.gen_opc_icount[lj] = num_insns;
}
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
gen_io_start();
}
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(dc->pc);
}
if (dc->ss_active && !dc->pstate_ss) {
/* Singlestep state is Active-pending.
* If we're in this state at the start of a TB then either
@ -11123,7 +11108,7 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
* "did not step an insn" case, and so the syndrome ISV and EX
* bits should be zero.
*/
assert(num_insns == 0);
assert(num_insns == 1);
gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
default_exception_el(dc));
dc->is_jmp = DISAS_EXC;
@ -11142,7 +11127,6 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
* Also stop translation when a page boundary is reached. This
* ensures prefetch aborts occur at the right place.
*/
num_insns++;
} while (!dc->is_jmp && !tcg_op_buf_full() &&
!cs->singlestep_enabled &&
!singlestep &&
@ -11221,14 +11205,6 @@ done_generating:
qemu_log("\n");
}
#endif
if (search_pc) {
j = tcg_op_buf_count();
lj++;
while (lj <= j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
} else {
tb->size = dc->pc - pc_start;
tb->icount = num_insns;
}
tb->size = dc->pc - pc_start;
tb->icount = num_insns;
}

View File

@ -52,7 +52,6 @@
#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
#include "translate.h"
static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
#if defined(CONFIG_USER_ONLY)
#define IS_USER(s) 1
@ -11168,17 +11167,12 @@ undef:
}
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
basic block 'tb'. If search_pc is TRUE, also generate PC
information for each intermediate instruction. */
static inline void gen_intermediate_code_internal(ARMCPU *cpu,
TranslationBlock *tb,
bool search_pc)
basic block 'tb'. */
void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
{
ARMCPU *cpu = arm_env_get_cpu(env);
CPUState *cs = CPU(cpu);
CPUARMState *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
CPUBreakpoint *bp;
int j, lj;
target_ulong pc_start;
target_ulong next_page_start;
int num_insns;
@ -11190,7 +11184,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
* the A32/T32 complexity to do with conditional execution/IT blocks/etc.
*/
if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
gen_intermediate_code_internal_a64(cpu, tb, search_pc);
gen_intermediate_code_a64(cpu, tb);
return;
}
@ -11256,11 +11250,14 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
/* FIXME: cpu_M0 can probably be the same as cpu_V0. */
cpu_M0 = tcg_temp_new_i64();
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
lj = -1;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
if (max_insns == 0)
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
if (max_insns > TCG_MAX_INSNS) {
max_insns = TCG_MAX_INSNS;
}
gen_tb_start(tb);
@ -11286,10 +11283,9 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
* (3) if we leave the TB unexpectedly (eg a data abort on a load)
* then the CPUARMState will be wrong and we need to reset it.
* This is handled in the same way as restoration of the
* PC in these situations: we will be called again with search_pc=1
* and generate a mapping of the condexec bits for each PC in
* gen_opc_condexec_bits[]. restore_state_to_opc() then uses
* this to restore the condexec bits.
* PC in these situations; we save the value of the condexec bits
* for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
* then uses this to restore them after an exception.
*
* Note that there are no instructions which can read the condexec
* bits, and none which can write non-static values to them, so
@ -11306,6 +11302,10 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
store_cpu_field(tmp, condexec_bits);
}
do {
tcg_gen_insn_start(dc->pc,
(dc->condexec_cond << 4) | (dc->condexec_mask >> 1));
num_insns++;
#ifdef CONFIG_USER_ONLY
/* Intercept jump to the magic kernel page. */
if (dc->pc >= 0xffff0000) {
@ -11326,6 +11326,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
#endif
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
CPUBreakpoint *bp;
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) {
gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
@ -11336,24 +11337,9 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
}
}
}
if (search_pc) {
j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
tcg_ctx.gen_opc_pc[lj] = dc->pc;
gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
tcg_ctx.gen_opc_instr_start[lj] = 1;
tcg_ctx.gen_opc_icount[lj] = num_insns;
}
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
gen_io_start();
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(dc->pc);
}
if (dc->ss_active && !dc->pstate_ss) {
@ -11367,7 +11353,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
* "did not step an insn" case, and so the syndrome ISV and EX
* bits should be zero.
*/
assert(num_insns == 0);
assert(num_insns == 1);
gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
default_exception_el(dc));
goto done_generating;
@ -11403,7 +11389,6 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
* Otherwise the subsequent code could get translated several times.
* Also stop translation when a page boundary is reached. This
* ensures prefetch aborts occur at the right place. */
num_insns ++;
} while (!dc->is_jmp && !tcg_op_buf_full() &&
!cs->singlestep_enabled &&
!singlestep &&
@ -11533,25 +11518,8 @@ done_generating:
qemu_log("\n");
}
#endif
if (search_pc) {
j = tcg_op_buf_count();
lj++;
while (lj <= j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
} else {
tb->size = dc->pc - pc_start;
tb->icount = num_insns;
}
}
void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
{
gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false);
}
void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
{
gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true);
tb->size = dc->pc - pc_start;
tb->icount = num_insns;
}
static const char *cpu_mode_names[16] = {
@ -11608,13 +11576,14 @@ void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
}
}
void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
target_ulong *data)
{
if (is_a64(env)) {
env->pc = tcg_ctx.gen_opc_pc[pc_pos];
env->pc = data[0];
env->condexec_bits = 0;
} else {
env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
env->condexec_bits = gen_opc_condexec_bits[pc_pos];
env->regs[15] = data[0];
env->condexec_bits = data[1];
}
}

View File

@ -122,9 +122,7 @@ static inline int default_exception_el(DisasContext *s)
#ifdef TARGET_AARCH64
void a64_translate_init(void);
void gen_intermediate_code_internal_a64(ARMCPU *cpu,
TranslationBlock *tb,
bool search_pc);
void gen_intermediate_code_a64(ARMCPU *cpu, TranslationBlock *tb);
void gen_a64_set_pc_im(uint64_t val);
void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
fprintf_function cpu_fprintf, int flags);
@ -133,9 +131,7 @@ static inline void a64_translate_init(void)
{
}
static inline void gen_intermediate_code_internal_a64(ARMCPU *cpu,
TranslationBlock *tb,
bool search_pc)
static inline void gen_intermediate_code_a64(ARMCPU *cpu, TranslationBlock *tb)
{
}

View File

@ -223,7 +223,6 @@ enum {
#define cpu_init(cpu_model) CPU(cpu_cris_init(cpu_model))
#define cpu_exec cpu_cris_exec
#define cpu_gen_code cpu_cris_gen_code
#define cpu_signal_handler cpu_cris_signal_handler
/* MMU modes definitions */

View File

@ -2994,10 +2994,6 @@ static unsigned int crisv32_decoder(CPUCRISState *env, DisasContext *dc)
int insn_len = 2;
int i;
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(dc->pc);
}
/* Load a halfword onto the instruction register. */
dc->ir = cris_fetch(env, dc, dc->pc, 2, 0);
@ -3034,23 +3030,6 @@ static unsigned int crisv32_decoder(CPUCRISState *env, DisasContext *dc)
return insn_len;
}
static void check_breakpoint(CPUCRISState *env, DisasContext *dc)
{
CPUState *cs = CPU(cris_env_get_cpu(env));
CPUBreakpoint *bp;
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) {
cris_evaluate_flags(dc);
tcg_gen_movi_tl(env_pc, dc->pc);
t_gen_raise_exception(EXCP_DEBUG);
dc->is_jmp = DISAS_UPDATE;
}
}
}
}
#include "translate_v10.c"
/*
@ -3088,15 +3067,12 @@ static void check_breakpoint(CPUCRISState *env, DisasContext *dc)
*/
/* generate intermediate code for basic block 'tb'. */
static inline void
gen_intermediate_code_internal(CRISCPU *cpu, TranslationBlock *tb,
bool search_pc)
void gen_intermediate_code(CPUCRISState *env, struct TranslationBlock *tb)
{
CRISCPU *cpu = cris_env_get_cpu(env);
CPUState *cs = CPU(cpu);
CPUCRISState *env = &cpu->env;
uint32_t pc_start;
unsigned int insn_len;
int j, lj;
struct DisasContext ctx;
struct DisasContext *dc = &ctx;
uint32_t next_page_start;
@ -3148,13 +3124,13 @@ gen_intermediate_code_internal(CRISCPU *cpu, TranslationBlock *tb,
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
qemu_log(
"srch=%d pc=%x %x flg=%" PRIx64 " bt=%x ds=%u ccs=%x\n"
"pc=%x %x flg=%" PRIx64 " bt=%x ds=%u ccs=%x\n"
"pid=%x usp=%x\n"
"%x.%x.%x.%x\n"
"%x.%x.%x.%x\n"
"%x.%x.%x.%x\n"
"%x.%x.%x.%x\n",
search_pc, dc->pc, dc->ppc,
dc->pc, dc->ppc,
(uint64_t)tb->flags,
env->btarget, (unsigned)tb->flags & 7,
env->pregs[PR_CCS],
@ -3170,38 +3146,33 @@ gen_intermediate_code_internal(CRISCPU *cpu, TranslationBlock *tb,
}
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
lj = -1;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
if (max_insns > TCG_MAX_INSNS) {
max_insns = TCG_MAX_INSNS;
}
gen_tb_start(tb);
do {
check_breakpoint(env, dc);
tcg_gen_insn_start(dc->delayed_branch == 1
? dc->ppc | 1 : dc->pc);
num_insns++;
if (search_pc) {
j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
}
if (dc->delayed_branch == 1) {
tcg_ctx.gen_opc_pc[lj] = dc->ppc | 1;
} else {
tcg_ctx.gen_opc_pc[lj] = dc->pc;
}
tcg_ctx.gen_opc_instr_start[lj] = 1;
tcg_ctx.gen_opc_icount[lj] = num_insns;
if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
cris_evaluate_flags(dc);
tcg_gen_movi_tl(env_pc, dc->pc);
t_gen_raise_exception(EXCP_DEBUG);
dc->is_jmp = DISAS_UPDATE;
break;
}
/* Pretty disas. */
LOG_DIS("%8.8x:\t", dc->pc);
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
gen_io_start();
}
dc->clear_x = 1;
@ -3213,7 +3184,6 @@ gen_intermediate_code_internal(CRISCPU *cpu, TranslationBlock *tb,
cris_clear_x_flag(dc);
}
num_insns++;
/* Check for delayed branches here. If we do it before
actually generating any host code, the simulator will just
loop doing nothing for on this program location. */
@ -3318,16 +3288,8 @@ gen_intermediate_code_internal(CRISCPU *cpu, TranslationBlock *tb,
}
gen_tb_end(tb, num_insns);
if (search_pc) {
j = tcg_op_buf_count();
lj++;
while (lj <= j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
} else {
tb->size = dc->pc - pc_start;
tb->icount = num_insns;
}
tb->size = dc->pc - pc_start;
tb->icount = num_insns;
#ifdef DEBUG_DISAS
#if !DISAS_CRIS
@ -3341,16 +3303,6 @@ gen_intermediate_code_internal(CRISCPU *cpu, TranslationBlock *tb,
#endif
}
void gen_intermediate_code (CPUCRISState *env, struct TranslationBlock *tb)
{
gen_intermediate_code_internal(cris_env_get_cpu(env), tb, false);
}
void gen_intermediate_code_pc (CPUCRISState *env, struct TranslationBlock *tb)
{
gen_intermediate_code_internal(cris_env_get_cpu(env), tb, true);
}
void cris_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
int flags)
{
@ -3443,7 +3395,8 @@ void cris_initialize_tcg(void)
}
}
void restore_state_to_opc(CPUCRISState *env, TranslationBlock *tb, int pc_pos)
void restore_state_to_opc(CPUCRISState *env, TranslationBlock *tb,
target_ulong *data)
{
env->pc = tcg_ctx.gen_opc_pc[pc_pos];
env->pc = data[0];
}

View File

@ -1199,9 +1199,6 @@ static unsigned int crisv10_decoder(CPUCRISState *env, DisasContext *dc)
{
unsigned int insn_len = 2;
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
tcg_gen_debug_insn_start(dc->pc);
/* Load a halfword onto the instruction register. */
dc->ir = cpu_lduw_code(env, dc->pc);

View File

@ -794,6 +794,7 @@ typedef struct {
#define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0)
#define NB_MMU_MODES 3
#define TARGET_INSN_START_EXTRA_WORDS 1
#define NB_OPMASK_REGS 8
@ -1188,7 +1189,6 @@ uint64_t cpu_get_tsc(CPUX86State *env);
#define cpu_init(cpu_model) CPU(cpu_x86_init(cpu_model))
#define cpu_exec cpu_x86_exec
#define cpu_gen_code cpu_x86_gen_code
#define cpu_signal_handler cpu_x86_signal_handler
#define cpu_list x86_cpu_list
#define cpudef_setup x86_cpudef_setup

View File

@ -75,8 +75,6 @@ static TCGv_ptr cpu_ptr0, cpu_ptr1;
static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
static TCGv_i64 cpu_tmp1_i64;
static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
#include "exec/gen-icount.h"
#ifdef TARGET_X86_64
@ -4401,9 +4399,6 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
target_ulong next_eip, tval;
int rex_w, rex_r;
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(pc_start);
}
s->pc = pc_start;
prefixes = 0;
s->override = -1;
@ -7842,18 +7837,13 @@ void optimize_flags_init(void)
}
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
basic block 'tb'. If search_pc is TRUE, also generate PC
information for each intermediate instruction. */
static inline void gen_intermediate_code_internal(X86CPU *cpu,
TranslationBlock *tb,
bool search_pc)
basic block 'tb'. */
void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
{
X86CPU *cpu = x86_env_get_cpu(env);
CPUState *cs = CPU(cpu);
CPUX86State *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
target_ulong pc_ptr;
CPUBreakpoint *bp;
int j, lj;
uint64_t flags;
target_ulong pc_start;
target_ulong cs_base;
@ -7933,40 +7923,32 @@ static inline void gen_intermediate_code_internal(X86CPU *cpu,
dc->is_jmp = DISAS_NEXT;
pc_ptr = pc_start;
lj = -1;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
if (max_insns == 0)
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
if (max_insns > TCG_MAX_INSNS) {
max_insns = TCG_MAX_INSNS;
}
gen_tb_start(tb);
for(;;) {
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == pc_ptr &&
!((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
gen_debug(dc, pc_ptr - dc->cs_base);
goto done_generating;
}
}
tcg_gen_insn_start(pc_ptr, dc->cc_op);
num_insns++;
/* If RF is set, suppress an internally generated breakpoint. */
if (unlikely(cpu_breakpoint_test(cs, pc_ptr,
tb->flags & HF_RF_MASK
? BP_GDB : BP_ANY))) {
gen_debug(dc, pc_ptr - dc->cs_base);
goto done_generating;
}
if (search_pc) {
j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
tcg_ctx.gen_opc_pc[lj] = pc_ptr;
gen_opc_cc_op[lj] = dc->cc_op;
tcg_ctx.gen_opc_instr_start[lj] = 1;
tcg_ctx.gen_opc_icount[lj] = num_insns;
}
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
gen_io_start();
}
pc_ptr = disas_insn(env, dc, pc_ptr);
num_insns++;
/* stop translation if indicated */
if (dc->is_jmp)
break;
@ -8014,14 +7996,6 @@ static inline void gen_intermediate_code_internal(X86CPU *cpu,
done_generating:
gen_tb_end(tb, num_insns);
/* we don't forget to fill the last values */
if (search_pc) {
j = tcg_op_buf_count();
lj++;
while (lj <= j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
int disas_flags;
@ -8038,42 +8012,16 @@ done_generating:
}
#endif
if (!search_pc) {
tb->size = pc_ptr - pc_start;
tb->icount = num_insns;
}
tb->size = pc_ptr - pc_start;
tb->icount = num_insns;
}
void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb,
target_ulong *data)
{
gen_intermediate_code_internal(x86_env_get_cpu(env), tb, false);
}
void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
{
gen_intermediate_code_internal(x86_env_get_cpu(env), tb, true);
}
void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
{
int cc_op;
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
int i;
qemu_log("RESTORE:\n");
for(i = 0;i <= pc_pos; i++) {
if (tcg_ctx.gen_opc_instr_start[i]) {
qemu_log("0x%04x: " TARGET_FMT_lx "\n", i,
tcg_ctx.gen_opc_pc[i]);
}
}
qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
pc_pos, tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base,
(uint32_t)tb->cs_base);
}
#endif
env->eip = tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base;
cc_op = gen_opc_cc_op[pc_pos];
if (cc_op != CC_OP_DYNAMIC)
int cc_op = data[1];
env->eip = data[0] - tb->cs_base;
if (cc_op != CC_OP_DYNAMIC) {
env->cc_op = cc_op;
}
}

View File

@ -219,7 +219,6 @@ bool lm32_cpu_do_semihosting(CPUState *cs);
#define cpu_list lm32_cpu_list
#define cpu_exec cpu_lm32_exec
#define cpu_gen_code cpu_lm32_gen_code
#define cpu_signal_handler cpu_lm32_signal_handler
int lm32_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,

View File

@ -1005,10 +1005,6 @@ static const DecoderInfo decinfo[] = {
static inline void decode(DisasContext *dc, uint32_t ir)
{
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(dc->pc);
}
dc->ir = ir;
LOG_DIS("%8.8x\t", dc->ir);
@ -1036,32 +1032,13 @@ static inline void decode(DisasContext *dc, uint32_t ir)
decinfo[dc->opcode](dc);
}
static void check_breakpoint(CPULM32State *env, DisasContext *dc)
{
CPUState *cs = CPU(lm32_env_get_cpu(env));
CPUBreakpoint *bp;
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) {
tcg_gen_movi_tl(cpu_pc, dc->pc);
t_gen_raise_exception(dc, EXCP_DEBUG);
dc->is_jmp = DISAS_UPDATE;
}
}
}
}
/* generate intermediate code for basic block 'tb'. */
static inline
void gen_intermediate_code_internal(LM32CPU *cpu,
TranslationBlock *tb, bool search_pc)
void gen_intermediate_code(CPULM32State *env, struct TranslationBlock *tb)
{
LM32CPU *cpu = lm32_env_get_cpu(env);
CPUState *cs = CPU(cpu);
CPULM32State *env = &cpu->env;
struct DisasContext ctx, *dc = &ctx;
uint32_t pc_start;
int j, lj;
uint32_t next_page_start;
int num_insns;
int max_insns;
@ -1083,41 +1060,36 @@ void gen_intermediate_code_internal(LM32CPU *cpu,
}
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
lj = -1;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
if (max_insns > TCG_MAX_INSNS) {
max_insns = TCG_MAX_INSNS;
}
gen_tb_start(tb);
do {
check_breakpoint(env, dc);
tcg_gen_insn_start(dc->pc);
num_insns++;
if (search_pc) {
j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
}
tcg_ctx.gen_opc_pc[lj] = dc->pc;
tcg_ctx.gen_opc_instr_start[lj] = 1;
tcg_ctx.gen_opc_icount[lj] = num_insns;
if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
tcg_gen_movi_tl(cpu_pc, dc->pc);
t_gen_raise_exception(dc, EXCP_DEBUG);
dc->is_jmp = DISAS_UPDATE;
break;
}
/* Pretty disas. */
LOG_DIS("%8.8x:\t", dc->pc);
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
gen_io_start();
}
decode(dc, cpu_ldl_code(env, dc->pc));
dc->pc += 4;
num_insns++;
} while (!dc->is_jmp
&& !tcg_op_buf_full()
&& !cs->singlestep_enabled
@ -1154,16 +1126,8 @@ void gen_intermediate_code_internal(LM32CPU *cpu,
gen_tb_end(tb, num_insns);
if (search_pc) {
j = tcg_op_buf_count();
lj++;
while (lj <= j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
} else {
tb->size = dc->pc - pc_start;
tb->icount = num_insns;
}
tb->size = dc->pc - pc_start;
tb->icount = num_insns;
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
@ -1175,16 +1139,6 @@ void gen_intermediate_code_internal(LM32CPU *cpu,
#endif
}
void gen_intermediate_code(CPULM32State *env, struct TranslationBlock *tb)
{
gen_intermediate_code_internal(lm32_env_get_cpu(env), tb, false);
}
void gen_intermediate_code_pc(CPULM32State *env, struct TranslationBlock *tb)
{
gen_intermediate_code_internal(lm32_env_get_cpu(env), tb, true);
}
void lm32_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
int flags)
{
@ -1219,9 +1173,10 @@ void lm32_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
cpu_fprintf(f, "\n\n");
}
void restore_state_to_opc(CPULM32State *env, TranslationBlock *tb, int pc_pos)
void restore_state_to_opc(CPULM32State *env, TranslationBlock *tb,
target_ulong *data)
{
env->pc = tcg_ctx.gen_opc_pc[pc_pos];
env->pc = data[0];
}
void lm32_translate_init(void)

View File

@ -213,7 +213,6 @@ void register_m68k_insns (CPUM68KState *env);
#define cpu_init(cpu_model) CPU(cpu_m68k_init(cpu_model))
#define cpu_exec cpu_m68k_exec
#define cpu_gen_code cpu_m68k_gen_code
#define cpu_signal_handler cpu_m68k_signal_handler
#define cpu_list m68k_cpu_list

View File

@ -2955,10 +2955,6 @@ static void disas_m68k_insn(CPUM68KState * env, DisasContext *s)
{
uint16_t insn;
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(s->pc);
}
insn = cpu_lduw_code(env, s->pc);
s->pc += 2;
@ -2966,15 +2962,11 @@ static void disas_m68k_insn(CPUM68KState * env, DisasContext *s)
}
/* generate intermediate code for basic block 'tb'. */
static inline void
gen_intermediate_code_internal(M68kCPU *cpu, TranslationBlock *tb,
bool search_pc)
void gen_intermediate_code(CPUM68KState *env, TranslationBlock *tb)
{
M68kCPU *cpu = m68k_env_get_cpu(env);
CPUState *cs = CPU(cpu);
CPUM68KState *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
CPUBreakpoint *bp;
int j, lj;
target_ulong pc_start;
int pc_offset;
int num_insns;
@ -2993,43 +2985,34 @@ gen_intermediate_code_internal(M68kCPU *cpu, TranslationBlock *tb,
dc->fpcr = env->fpcr;
dc->user = (env->sr & SR_S) == 0;
dc->done_mac = 0;
lj = -1;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
if (max_insns == 0)
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
if (max_insns > TCG_MAX_INSNS) {
max_insns = TCG_MAX_INSNS;
}
gen_tb_start(tb);
do {
pc_offset = dc->pc - pc_start;
gen_throws_exception = NULL;
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) {
gen_exception(dc, dc->pc, EXCP_DEBUG);
dc->is_jmp = DISAS_JUMP;
break;
}
}
if (dc->is_jmp)
break;
tcg_gen_insn_start(dc->pc);
num_insns++;
if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
gen_exception(dc, dc->pc, EXCP_DEBUG);
dc->is_jmp = DISAS_JUMP;
break;
}
if (search_pc) {
j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
tcg_ctx.gen_opc_pc[lj] = dc->pc;
tcg_ctx.gen_opc_instr_start[lj] = 1;
tcg_ctx.gen_opc_icount[lj] = num_insns;
}
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
gen_io_start();
}
dc->insn_pc = dc->pc;
disas_m68k_insn(env, dc);
num_insns++;
} while (!dc->is_jmp && !tcg_op_buf_full() &&
!cs->singlestep_enabled &&
!singlestep &&
@ -3073,28 +3056,8 @@ gen_intermediate_code_internal(M68kCPU *cpu, TranslationBlock *tb,
qemu_log("\n");
}
#endif
if (search_pc) {
j = tcg_op_buf_count();
lj++;
while (lj <= j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
} else {
tb->size = dc->pc - pc_start;
tb->icount = num_insns;
}
//optimize_flags();
//expand_target_qops();
}
void gen_intermediate_code(CPUM68KState *env, TranslationBlock *tb)
{
gen_intermediate_code_internal(m68k_env_get_cpu(env), tb, false);
}
void gen_intermediate_code_pc(CPUM68KState *env, TranslationBlock *tb)
{
gen_intermediate_code_internal(m68k_env_get_cpu(env), tb, true);
tb->size = dc->pc - pc_start;
tb->icount = num_insns;
}
void m68k_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
@ -3120,7 +3083,8 @@ void m68k_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
cpu_fprintf (f, "FPRESULT = %12g\n", *(double *)&env->fp_result);
}
void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb, int pc_pos)
void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb,
target_ulong *data)
{
env->pc = tcg_ctx.gen_opc_pc[pc_pos];
env->pc = data[0];
}

View File

@ -295,7 +295,6 @@ int cpu_mb_signal_handler(int host_signum, void *pinfo,
#define cpu_init(cpu_model) CPU(cpu_mb_init(cpu_model))
#define cpu_exec cpu_mb_exec
#define cpu_gen_code cpu_mb_gen_code
#define cpu_signal_handler cpu_mb_signal_handler
/* MMU modes definitions */

View File

@ -1588,10 +1588,6 @@ static inline void decode(DisasContext *dc, uint32_t ir)
{
int i;
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(dc->pc);
}
dc->ir = ir;
LOG_DIS("%8.8x\t", dc->ir);
@ -1630,30 +1626,12 @@ static inline void decode(DisasContext *dc, uint32_t ir)
}
}
static void check_breakpoint(CPUMBState *env, DisasContext *dc)
{
CPUState *cs = CPU(mb_env_get_cpu(env));
CPUBreakpoint *bp;
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) {
t_gen_raise_exception(dc, EXCP_DEBUG);
dc->is_jmp = DISAS_UPDATE;
}
}
}
}
/* generate intermediate code for basic block 'tb'. */
static inline void
gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
bool search_pc)
void gen_intermediate_code(CPUMBState *env, struct TranslationBlock *tb)
{
MicroBlazeCPU *cpu = mb_env_get_cpu(env);
CPUState *cs = CPU(cpu);
CPUMBState *env = &cpu->env;
uint32_t pc_start;
int j, lj;
struct DisasContext ctx;
struct DisasContext *dc = &ctx;
uint32_t next_page_start, org_flags;
@ -1690,47 +1668,46 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
}
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
lj = -1;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
if (max_insns == 0)
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
if (max_insns > TCG_MAX_INSNS) {
max_insns = TCG_MAX_INSNS;
}
gen_tb_start(tb);
do
{
tcg_gen_insn_start(dc->pc);
num_insns++;
#if SIM_COMPAT
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
gen_helper_debug();
}
#endif
check_breakpoint(env, dc);
if (search_pc) {
j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
tcg_ctx.gen_opc_pc[lj] = dc->pc;
tcg_ctx.gen_opc_instr_start[lj] = 1;
tcg_ctx.gen_opc_icount[lj] = num_insns;
if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
t_gen_raise_exception(dc, EXCP_DEBUG);
dc->is_jmp = DISAS_UPDATE;
break;
}
/* Pretty disas. */
LOG_DIS("%8.8x:\t", dc->pc);
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
gen_io_start();
}
dc->clear_imm = 1;
decode(dc, cpu_ldl_code(env, dc->pc));
if (dc->clear_imm)
dc->tb_flags &= ~IMM_FLAG;
dc->pc += 4;
num_insns++;
if (dc->delayed_branch) {
dc->delayed_branch--;
@ -1821,15 +1798,8 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
}
gen_tb_end(tb, num_insns);
if (search_pc) {
j = tcg_op_buf_count();
lj++;
while (lj <= j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
} else {
tb->size = dc->pc - pc_start;
tb->icount = num_insns;
}
tb->size = dc->pc - pc_start;
tb->icount = num_insns;
#ifdef DEBUG_DISAS
#if !SIM_COMPAT
@ -1846,16 +1816,6 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
assert(!dc->abort_at_next_insn);
}
void gen_intermediate_code (CPUMBState *env, struct TranslationBlock *tb)
{
gen_intermediate_code_internal(mb_env_get_cpu(env), tb, false);
}
void gen_intermediate_code_pc (CPUMBState *env, struct TranslationBlock *tb)
{
gen_intermediate_code_internal(mb_env_get_cpu(env), tb, true);
}
void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
int flags)
{
@ -1936,7 +1896,8 @@ void mb_tcg_init(void)
}
}
void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb, int pc_pos)
void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
target_ulong *data)
{
env->sregs[SR_PC] = tcg_ctx.gen_opc_pc[pc_pos];
env->sregs[SR_PC] = data[0];
}

View File

@ -130,6 +130,7 @@ struct CPUMIPSFPUContext {
};
#define NB_MMU_MODES 3
#define TARGET_INSN_START_EXTRA_WORDS 2
typedef struct CPUMIPSMVPContext CPUMIPSMVPContext;
struct CPUMIPSMVPContext {
@ -619,7 +620,6 @@ void mips_cpu_unassigned_access(CPUState *cpu, hwaddr addr,
void mips_cpu_list (FILE *f, fprintf_function cpu_fprintf);
#define cpu_exec cpu_mips_exec
#define cpu_gen_code cpu_mips_gen_code
#define cpu_signal_handler cpu_mips_signal_handler
#define cpu_list mips_cpu_list

View File

@ -1359,9 +1359,6 @@ static TCGv_i32 fpu_fcr0, fpu_fcr31;
static TCGv_i64 fpu_f64[32];
static TCGv_i64 msa_wr_d[64];
static uint32_t gen_opc_hflags[OPC_BUF_SIZE];
static target_ulong gen_opc_btarget[OPC_BUF_SIZE];
#include "exec/gen-icount.h"
#define gen_helper_0e0i(name, arg) do { \
@ -18904,10 +18901,6 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx)
gen_set_label(l1);
}
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(ctx->pc);
}
op = MASK_OP_MAJOR(ctx->opcode);
rs = (ctx->opcode >> 21) & 0x1f;
rt = (ctx->opcode >> 16) & 0x1f;
@ -19539,25 +19532,18 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx)
}
}
static inline void
gen_intermediate_code_internal(MIPSCPU *cpu, TranslationBlock *tb,
bool search_pc)
void gen_intermediate_code(CPUMIPSState *env, struct TranslationBlock *tb)
{
MIPSCPU *cpu = mips_env_get_cpu(env);
CPUState *cs = CPU(cpu);
CPUMIPSState *env = &cpu->env;
DisasContext ctx;
target_ulong pc_start;
target_ulong next_page_start;
CPUBreakpoint *bp;
int j, lj = -1;
int num_insns;
int max_insns;
int insn_bytes;
int is_slot;
if (search_pc)
qemu_log("search pc %d\n", search_pc);
pc_start = tb->pc;
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
ctx.pc = pc_start;
@ -19567,6 +19553,7 @@ gen_intermediate_code_internal(MIPSCPU *cpu, TranslationBlock *tb,
ctx.CP0_Config1 = env->CP0_Config1;
ctx.tb = tb;
ctx.bstate = BS_NONE;
ctx.btarget = 0;
ctx.kscrexist = (env->CP0_Config4 >> CP0C4_KScrExist) & 0xff;
ctx.rxi = (env->CP0_Config3 >> CP0C3_RXI) & 1;
ctx.ie = (env->CP0_Config4 >> CP0C4_IE) & 3;
@ -19590,40 +19577,32 @@ gen_intermediate_code_internal(MIPSCPU *cpu, TranslationBlock *tb,
MO_UNALN : MO_ALIGN;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
if (max_insns == 0)
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
if (max_insns > TCG_MAX_INSNS) {
max_insns = TCG_MAX_INSNS;
}
LOG_DISAS("\ntb %p idx %d hflags %04x\n", tb, ctx.mem_idx, ctx.hflags);
gen_tb_start(tb);
while (ctx.bstate == BS_NONE) {
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == ctx.pc) {
save_cpu_state(&ctx, 1);
ctx.bstate = BS_BRANCH;
gen_helper_raise_exception_debug(cpu_env);
/* Include the breakpoint location or the tb won't
* be flushed when it must be. */
ctx.pc += 4;
goto done_generating;
}
}
tcg_gen_insn_start(ctx.pc, ctx.hflags & MIPS_HFLAG_BMASK, ctx.btarget);
num_insns++;
if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
save_cpu_state(&ctx, 1);
ctx.bstate = BS_BRANCH;
gen_helper_raise_exception_debug(cpu_env);
/* Include the breakpoint location or the tb won't
* be flushed when it must be. */
ctx.pc += 4;
goto done_generating;
}
if (search_pc) {
j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
tcg_ctx.gen_opc_pc[lj] = ctx.pc;
gen_opc_hflags[lj] = ctx.hflags & MIPS_HFLAG_BMASK;
gen_opc_btarget[lj] = ctx.btarget;
tcg_ctx.gen_opc_instr_start[lj] = 1;
tcg_ctx.gen_opc_icount[lj] = num_insns;
}
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
gen_io_start();
}
is_slot = ctx.hflags & MIPS_HFLAG_BMASK;
if (!(ctx.hflags & MIPS_HFLAG_M16)) {
@ -19660,8 +19639,6 @@ gen_intermediate_code_internal(MIPSCPU *cpu, TranslationBlock *tb,
}
ctx.pc += insn_bytes;
num_insns++;
/* Execute a branch and its delay slot as a single instruction.
This is what GDB expects and is consistent with what the
hardware does (e.g. if a delay slot instruction faults, the
@ -19710,15 +19687,9 @@ gen_intermediate_code_internal(MIPSCPU *cpu, TranslationBlock *tb,
done_generating:
gen_tb_end(tb, num_insns);
if (search_pc) {
j = tcg_op_buf_count();
lj++;
while (lj <= j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
} else {
tb->size = ctx.pc - pc_start;
tb->icount = num_insns;
}
tb->size = ctx.pc - pc_start;
tb->icount = num_insns;
#ifdef DEBUG_DISAS
LOG_DISAS("\n");
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
@ -19729,16 +19700,6 @@ done_generating:
#endif
}
void gen_intermediate_code (CPUMIPSState *env, struct TranslationBlock *tb)
{
gen_intermediate_code_internal(mips_env_get_cpu(env), tb, false);
}
void gen_intermediate_code_pc (CPUMIPSState *env, struct TranslationBlock *tb)
{
gen_intermediate_code_internal(mips_env_get_cpu(env), tb, true);
}
static void fpu_dump_state(CPUMIPSState *env, FILE *f, fprintf_function fpu_fprintf,
int flags)
{
@ -20062,18 +20023,19 @@ void cpu_state_reset(CPUMIPSState *env)
}
}
void restore_state_to_opc(CPUMIPSState *env, TranslationBlock *tb, int pc_pos)
void restore_state_to_opc(CPUMIPSState *env, TranslationBlock *tb,
target_ulong *data)
{
env->active_tc.PC = tcg_ctx.gen_opc_pc[pc_pos];
env->active_tc.PC = data[0];
env->hflags &= ~MIPS_HFLAG_BMASK;
env->hflags |= gen_opc_hflags[pc_pos];
env->hflags |= data[1];
switch (env->hflags & MIPS_HFLAG_BMASK_BASE) {
case MIPS_HFLAG_BR:
break;
case MIPS_HFLAG_BC:
case MIPS_HFLAG_BL:
case MIPS_HFLAG_B:
env->btarget = gen_opc_btarget[pc_pos];
env->btarget = data[2];
break;
}
}

View File

@ -122,7 +122,6 @@ int cpu_moxie_signal_handler(int host_signum, void *pinfo,
#define cpu_init(cpu_model) CPU(cpu_moxie_init(cpu_model))
#define cpu_exec cpu_moxie_exec
#define cpu_gen_code cpu_moxie_gen_code
#define cpu_signal_handler cpu_moxie_signal_handler
static inline int cpu_mmu_index(CPUMoxieState *env, bool ifetch)

View File

@ -153,10 +153,6 @@ static int decode_opc(MoxieCPU *cpu, DisasContext *ctx)
/* Set the default instruction length. */
int length = 2;
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(ctx->pc);
}
/* Examine the 16-bit opcode. */
opcode = ctx->opcode;
@ -819,17 +815,13 @@ static int decode_opc(MoxieCPU *cpu, DisasContext *ctx)
}
/* generate intermediate code for basic block 'tb'. */
static inline void
gen_intermediate_code_internal(MoxieCPU *cpu, TranslationBlock *tb,
bool search_pc)
void gen_intermediate_code(CPUMoxieState *env, struct TranslationBlock *tb)
{
MoxieCPU *cpu = moxie_env_get_cpu(env);
CPUState *cs = CPU(cpu);
DisasContext ctx;
target_ulong pc_start;
CPUBreakpoint *bp;
int j, lj = -1;
CPUMoxieState *env = &cpu->env;
int num_insns;
int num_insns, max_insns;
pc_start = tb->pc;
ctx.pc = pc_start;
@ -839,40 +831,35 @@ gen_intermediate_code_internal(MoxieCPU *cpu, TranslationBlock *tb,
ctx.singlestep_enabled = 0;
ctx.bstate = BS_NONE;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
if (max_insns > TCG_MAX_INSNS) {
max_insns = TCG_MAX_INSNS;
}
gen_tb_start(tb);
do {
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (ctx.pc == bp->pc) {
tcg_gen_movi_i32(cpu_pc, ctx.pc);
gen_helper_debug(cpu_env);
ctx.bstate = BS_EXCP;
goto done_generating;
}
}
}
if (search_pc) {
j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
}
tcg_ctx.gen_opc_pc[lj] = ctx.pc;
tcg_ctx.gen_opc_instr_start[lj] = 1;
tcg_ctx.gen_opc_icount[lj] = num_insns;
}
ctx.opcode = cpu_lduw_code(env, ctx.pc);
ctx.pc += decode_opc(cpu, &ctx);
tcg_gen_insn_start(ctx.pc);
num_insns++;
if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
tcg_gen_movi_i32(cpu_pc, ctx.pc);
gen_helper_debug(cpu_env);
ctx.bstate = BS_EXCP;
goto done_generating;
}
ctx.opcode = cpu_lduw_code(env, ctx.pc);
ctx.pc += decode_opc(cpu, &ctx);
if (num_insns >= max_insns) {
break;
}
if (cs->singlestep_enabled) {
break;
}
if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0) {
break;
}
@ -898,29 +885,12 @@ gen_intermediate_code_internal(MoxieCPU *cpu, TranslationBlock *tb,
done_generating:
gen_tb_end(tb, num_insns);
if (search_pc) {
j = tcg_op_buf_count();
lj++;
while (lj <= j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
} else {
tb->size = ctx.pc - pc_start;
tb->icount = num_insns;
}
tb->size = ctx.pc - pc_start;
tb->icount = num_insns;
}
void gen_intermediate_code(CPUMoxieState *env, struct TranslationBlock *tb)
void restore_state_to_opc(CPUMoxieState *env, TranslationBlock *tb,
target_ulong *data)
{
gen_intermediate_code_internal(moxie_env_get_cpu(env), tb, false);
}
void gen_intermediate_code_pc(CPUMoxieState *env, struct TranslationBlock *tb)
{
gen_intermediate_code_internal(moxie_env_get_cpu(env), tb, true);
}
void restore_state_to_opc(CPUMoxieState *env, TranslationBlock *tb, int pc_pos)
{
env->pc = tcg_ctx.gen_opc_pc[pc_pos];
env->pc = data[0];
}

View File

@ -360,7 +360,6 @@ int cpu_openrisc_signal_handler(int host_signum, void *pinfo, void *puc);
#define cpu_list cpu_openrisc_list
#define cpu_exec cpu_openrisc_exec
#define cpu_gen_code cpu_openrisc_gen_code
#define cpu_signal_handler cpu_openrisc_signal_handler
#ifndef CONFIG_USER_ONLY

View File

@ -1618,30 +1618,12 @@ static void disas_openrisc_insn(DisasContext *dc, OpenRISCCPU *cpu)
}
}
static void check_breakpoint(OpenRISCCPU *cpu, DisasContext *dc)
{
CPUState *cs = CPU(cpu);
CPUBreakpoint *bp;
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) {
tcg_gen_movi_tl(cpu_pc, dc->pc);
gen_exception(dc, EXCP_DEBUG);
dc->is_jmp = DISAS_UPDATE;
}
}
}
}
static inline void gen_intermediate_code_internal(OpenRISCCPU *cpu,
TranslationBlock *tb,
int search_pc)
void gen_intermediate_code(CPUOpenRISCState *env, struct TranslationBlock *tb)
{
OpenRISCCPU *cpu = openrisc_env_get_cpu(env);
CPUState *cs = CPU(cpu);
struct DisasContext ctx, *dc = &ctx;
uint32_t pc_start;
int j, k;
uint32_t next_page_start;
int num_insns;
int max_insns;
@ -1663,36 +1645,30 @@ static inline void gen_intermediate_code_internal(OpenRISCCPU *cpu,
}
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
k = -1;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
if (max_insns > TCG_MAX_INSNS) {
max_insns = TCG_MAX_INSNS;
}
gen_tb_start(tb);
do {
check_breakpoint(cpu, dc);
if (search_pc) {
j = tcg_op_buf_count();
if (k < j) {
k++;
while (k < j) {
tcg_ctx.gen_opc_instr_start[k++] = 0;
}
}
tcg_ctx.gen_opc_pc[k] = dc->pc;
tcg_ctx.gen_opc_instr_start[k] = 1;
tcg_ctx.gen_opc_icount[k] = num_insns;
tcg_gen_insn_start(dc->pc);
num_insns++;
if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
tcg_gen_movi_tl(cpu_pc, dc->pc);
gen_exception(dc, EXCP_DEBUG);
dc->is_jmp = DISAS_UPDATE;
break;
}
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(dc->pc);
}
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
gen_io_start();
}
dc->ppc = dc->pc - 4;
@ -1701,7 +1677,6 @@ static inline void gen_intermediate_code_internal(OpenRISCCPU *cpu,
tcg_gen_movi_tl(cpu_npc, dc->npc);
disas_openrisc_insn(dc, cpu);
dc->pc = dc->npc;
num_insns++;
/* delay slot */
if (dc->delayed_branch) {
dc->delayed_branch--;
@ -1756,16 +1731,8 @@ static inline void gen_intermediate_code_internal(OpenRISCCPU *cpu,
gen_tb_end(tb, num_insns);
if (search_pc) {
j = tcg_op_buf_count();
k++;
while (k <= j) {
tcg_ctx.gen_opc_instr_start[k++] = 0;
}
} else {
tb->size = dc->pc - pc_start;
tb->icount = num_insns;
}
tb->size = dc->pc - pc_start;
tb->icount = num_insns;
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
@ -1777,17 +1744,6 @@ static inline void gen_intermediate_code_internal(OpenRISCCPU *cpu,
#endif
}
void gen_intermediate_code(CPUOpenRISCState *env, struct TranslationBlock *tb)
{
gen_intermediate_code_internal(openrisc_env_get_cpu(env), tb, 0);
}
void gen_intermediate_code_pc(CPUOpenRISCState *env,
struct TranslationBlock *tb)
{
gen_intermediate_code_internal(openrisc_env_get_cpu(env), tb, 1);
}
void openrisc_cpu_dump_state(CPUState *cs, FILE *f,
fprintf_function cpu_fprintf,
int flags)
@ -1804,7 +1760,7 @@ void openrisc_cpu_dump_state(CPUState *cs, FILE *f,
}
void restore_state_to_opc(CPUOpenRISCState *env, TranslationBlock *tb,
int pc_pos)
target_ulong *data)
{
env->pc = tcg_ctx.gen_opc_pc[pc_pos];
env->pc = data[0];
}

View File

@ -1241,7 +1241,6 @@ int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val);
#define cpu_init(cpu_model) CPU(cpu_ppc_init(cpu_model))
#define cpu_exec cpu_ppc_exec
#define cpu_gen_code cpu_ppc_gen_code
#define cpu_signal_handler cpu_ppc_signal_handler
#define cpu_list ppc_cpu_list

View File

@ -11409,17 +11409,13 @@ void ppc_cpu_dump_statistics(CPUState *cs, FILE*f,
}
/*****************************************************************************/
static inline void gen_intermediate_code_internal(PowerPCCPU *cpu,
TranslationBlock *tb,
bool search_pc)
void gen_intermediate_code(CPUPPCState *env, struct TranslationBlock *tb)
{
PowerPCCPU *cpu = ppc_env_get_cpu(env);
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
DisasContext ctx, *ctxp = &ctx;
opc_handler_t **table, *handler;
target_ulong pc_start;
CPUBreakpoint *bp;
int j, lj = -1;
int num_insns;
int max_insns;
@ -11476,36 +11472,29 @@ static inline void gen_intermediate_code_internal(PowerPCCPU *cpu,
#endif
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
if (max_insns == 0)
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
if (max_insns > TCG_MAX_INSNS) {
max_insns = TCG_MAX_INSNS;
}
gen_tb_start(tb);
tcg_clear_temp_count();
/* Set env in case of segfault during code fetch */
while (ctx.exception == POWERPC_EXCP_NONE && !tcg_op_buf_full()) {
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == ctx.nip) {
gen_debug_exception(ctxp);
break;
}
}
}
if (unlikely(search_pc)) {
j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
tcg_ctx.gen_opc_pc[lj] = ctx.nip;
tcg_ctx.gen_opc_instr_start[lj] = 1;
tcg_ctx.gen_opc_icount[lj] = num_insns;
tcg_gen_insn_start(ctx.nip);
num_insns++;
if (unlikely(cpu_breakpoint_test(cs, ctx.nip, BP_ANY))) {
gen_debug_exception(ctxp);
break;
}
LOG_DISAS("----------------\n");
LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n",
ctx.nip, ctx.mem_idx, (int)msr_ir);
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO))
gen_io_start();
if (unlikely(need_byteswap(&ctx))) {
ctx.opcode = bswap32(cpu_ldl_code(env, ctx.nip));
@ -11515,12 +11504,8 @@ static inline void gen_intermediate_code_internal(PowerPCCPU *cpu,
LOG_DISAS("translate opcode %08x (%02x %02x %02x) (%s)\n",
ctx.opcode, opc1(ctx.opcode), opc2(ctx.opcode),
opc3(ctx.opcode), ctx.le_mode ? "little" : "big");
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(ctx.nip);
}
ctx.nip += 4;
table = env->opcodes;
num_insns++;
handler = table[opc1(ctx.opcode)];
if (is_indirect_opcode(handler)) {
table = ind_table(handler);
@ -11599,15 +11584,9 @@ static inline void gen_intermediate_code_internal(PowerPCCPU *cpu,
}
gen_tb_end(tb, num_insns);
if (unlikely(search_pc)) {
j = tcg_op_buf_count();
lj++;
while (lj <= j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
} else {
tb->size = ctx.nip - pc_start;
tb->icount = num_insns;
}
tb->size = ctx.nip - pc_start;
tb->icount = num_insns;
#if defined(DEBUG_DISAS)
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
int flags;
@ -11620,17 +11599,8 @@ static inline void gen_intermediate_code_internal(PowerPCCPU *cpu,
#endif
}
void gen_intermediate_code (CPUPPCState *env, struct TranslationBlock *tb)
void restore_state_to_opc(CPUPPCState *env, TranslationBlock *tb,
target_ulong *data)
{
gen_intermediate_code_internal(ppc_env_get_cpu(env), tb, false);
}
void gen_intermediate_code_pc (CPUPPCState *env, struct TranslationBlock *tb)
{
gen_intermediate_code_internal(ppc_env_get_cpu(env), tb, true);
}
void restore_state_to_opc(CPUPPCState *env, TranslationBlock *tb, int pc_pos)
{
env->nip = tcg_ctx.gen_opc_pc[pc_pos];
env->nip = data[0];
}

View File

@ -42,6 +42,7 @@
#include "fpu/softfloat.h"
#define NB_MMU_MODES 3
#define TARGET_INSN_START_EXTRA_WORDS 1
#define MMU_MODE0_SUFFIX _primary
#define MMU_MODE1_SUFFIX _secondary
@ -597,7 +598,6 @@ bool css_present(uint8_t cssid);
#define cpu_init(model) CPU(cpu_s390x_init(model))
#define cpu_exec cpu_s390x_exec
#define cpu_gen_code cpu_s390x_gen_code
#define cpu_signal_handler cpu_s390x_signal_handler
void s390_cpu_list(FILE *f, fprintf_function cpu_fprintf);

View File

@ -161,8 +161,6 @@ static char cpu_reg_names[32][4];
static TCGv_i64 regs[16];
static TCGv_i64 fregs[16];
static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
void s390x_translate_init(void)
{
int i;
@ -5319,18 +5317,14 @@ static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
return ret;
}
static inline void gen_intermediate_code_internal(S390CPU *cpu,
TranslationBlock *tb,
bool search_pc)
void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
{
S390CPU *cpu = s390_env_get_cpu(env);
CPUState *cs = CPU(cpu);
CPUS390XState *env = &cpu->env;
DisasContext dc;
target_ulong pc_start;
uint64_t next_page_start;
int j, lj = -1;
int num_insns, max_insns;
CPUBreakpoint *bp;
ExitStatus status;
bool do_debug;
@ -5353,41 +5347,27 @@ static inline void gen_intermediate_code_internal(S390CPU *cpu,
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
if (max_insns > TCG_MAX_INSNS) {
max_insns = TCG_MAX_INSNS;
}
gen_tb_start(tb);
do {
if (search_pc) {
j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
}
tcg_ctx.gen_opc_pc[lj] = dc.pc;
gen_opc_cc_op[lj] = dc.cc_op;
tcg_ctx.gen_opc_instr_start[lj] = 1;
tcg_ctx.gen_opc_icount[lj] = num_insns;
tcg_gen_insn_start(dc.pc, dc.cc_op);
num_insns++;
if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
status = EXIT_PC_STALE;
do_debug = true;
break;
}
if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
gen_io_start();
}
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(dc.pc);
}
status = NO_EXIT;
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc.pc) {
status = EXIT_PC_STALE;
do_debug = true;
break;
}
}
}
if (status == NO_EXIT) {
status = translate_one(env, &dc);
}
@ -5432,16 +5412,8 @@ static inline void gen_intermediate_code_internal(S390CPU *cpu,
gen_tb_end(tb, num_insns);
if (search_pc) {
j = tcg_op_buf_count();
lj++;
while (lj <= j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
} else {
tb->size = dc.pc - pc_start;
tb->icount = num_insns;
}
tb->size = dc.pc - pc_start;
tb->icount = num_insns;
#if defined(S390X_DEBUG_DISAS)
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
@ -5452,21 +5424,11 @@ static inline void gen_intermediate_code_internal(S390CPU *cpu,
#endif
}
void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
target_ulong *data)
{
gen_intermediate_code_internal(s390_env_get_cpu(env), tb, false);
}
void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
{
gen_intermediate_code_internal(s390_env_get_cpu(env), tb, true);
}
void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
{
int cc_op;
env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
cc_op = gen_opc_cc_op[pc_pos];
int cc_op = data[1];
env->psw.addr = data[0];
if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
env->cc_op = cc_op;
}

View File

@ -120,6 +120,7 @@ typedef struct tlb_t {
#define ITLB_SIZE 4
#define NB_MMU_MODES 2
#define TARGET_INSN_START_EXTRA_WORDS 1
enum sh_features {
SH_FEATURE_SH4A = 1,
@ -225,7 +226,6 @@ void cpu_load_tlb(CPUSH4State * env);
#define cpu_init(cpu_model) CPU(cpu_sh4_init(cpu_model))
#define cpu_exec cpu_sh4_exec
#define cpu_gen_code cpu_sh4_gen_code
#define cpu_signal_handler cpu_sh4_signal_handler
#define cpu_list sh4_cpu_list

View File

@ -70,8 +70,6 @@ static TCGv cpu_fregs[32];
/* internal register indexes */
static TCGv cpu_flags, cpu_delayed_pc;
static uint32_t gen_opc_hflags[OPC_BUF_SIZE];
#include "exec/gen-icount.h"
void sh4_translate_init(void)
@ -1790,10 +1788,6 @@ static void decode_opc(DisasContext * ctx)
{
uint32_t old_flags = ctx->flags;
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(ctx->pc);
}
_decode_opc(ctx);
if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
@ -1820,16 +1814,12 @@ static void decode_opc(DisasContext * ctx)
gen_store_flags(ctx->flags);
}
static inline void
gen_intermediate_code_internal(SuperHCPU *cpu, TranslationBlock *tb,
bool search_pc)
void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
{
SuperHCPU *cpu = sh_env_get_cpu(env);
CPUState *cs = CPU(cpu);
CPUSH4State *env = &cpu->env;
DisasContext ctx;
target_ulong pc_start;
CPUBreakpoint *bp;
int i, ii;
int num_insns;
int max_insns;
@ -1846,45 +1836,34 @@ gen_intermediate_code_internal(SuperHCPU *cpu, TranslationBlock *tb,
ctx.features = env->features;
ctx.has_movcal = (ctx.flags & TB_FLAG_PENDING_MOVCA);
ii = -1;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
if (max_insns == 0)
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
if (max_insns > TCG_MAX_INSNS) {
max_insns = TCG_MAX_INSNS;
}
gen_tb_start(tb);
while (ctx.bstate == BS_NONE && !tcg_op_buf_full()) {
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (ctx.pc == bp->pc) {
/* We have hit a breakpoint - make sure PC is up-to-date */
tcg_gen_movi_i32(cpu_pc, ctx.pc);
gen_helper_debug(cpu_env);
ctx.bstate = BS_BRANCH;
break;
}
}
}
if (search_pc) {
i = tcg_op_buf_count();
if (ii < i) {
ii++;
while (ii < i)
tcg_ctx.gen_opc_instr_start[ii++] = 0;
}
tcg_ctx.gen_opc_pc[ii] = ctx.pc;
gen_opc_hflags[ii] = ctx.flags;
tcg_ctx.gen_opc_instr_start[ii] = 1;
tcg_ctx.gen_opc_icount[ii] = num_insns;
tcg_gen_insn_start(ctx.pc, ctx.flags);
num_insns++;
if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
/* We have hit a breakpoint - make sure PC is up-to-date */
tcg_gen_movi_i32(cpu_pc, ctx.pc);
gen_helper_debug(cpu_env);
ctx.bstate = BS_BRANCH;
break;
}
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
gen_io_start();
#if 0
fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc);
fflush(stderr);
#endif
}
ctx.opcode = cpu_lduw_code(env, ctx.pc);
decode_opc(&ctx);
num_insns++;
ctx.pc += 2;
if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
break;
@ -1924,15 +1903,8 @@ gen_intermediate_code_internal(SuperHCPU *cpu, TranslationBlock *tb,
gen_tb_end(tb, num_insns);
if (search_pc) {
i = tcg_op_buf_count();
ii++;
while (ii <= i)
tcg_ctx.gen_opc_instr_start[ii++] = 0;
} else {
tb->size = ctx.pc - pc_start;
tb->icount = num_insns;
}
tb->size = ctx.pc - pc_start;
tb->icount = num_insns;
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
@ -1943,18 +1915,9 @@ gen_intermediate_code_internal(SuperHCPU *cpu, TranslationBlock *tb,
#endif
}
void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
target_ulong *data)
{
gen_intermediate_code_internal(sh_env_get_cpu(env), tb, false);
}
void gen_intermediate_code_pc(CPUSH4State * env, struct TranslationBlock *tb)
{
gen_intermediate_code_internal(sh_env_get_cpu(env), tb, true);
}
void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb, int pc_pos)
{
env->pc = tcg_ctx.gen_opc_pc[pc_pos];
env->flags = gen_opc_hflags[pc_pos];
env->pc = data[0];
env->flags = data[1];
}

View File

@ -230,6 +230,7 @@ typedef struct trap_state {
uint32_t tt;
} trap_state;
#endif
#define TARGET_INSN_START_EXTRA_WORDS 1
typedef struct sparc_def_t {
const char *name;
@ -592,7 +593,6 @@ int cpu_sparc_signal_handler(int host_signum, void *pinfo, void *puc);
#endif
#define cpu_exec cpu_sparc_exec
#define cpu_gen_code cpu_sparc_gen_code
#define cpu_signal_handler cpu_sparc_signal_handler
#define cpu_list sparc_cpu_list

View File

@ -64,9 +64,6 @@ static TCGv cpu_wim;
/* Floating point registers */
static TCGv_i64 cpu_fpr[TARGET_DPREGS];
static target_ulong gen_opc_npc[OPC_BUF_SIZE];
static target_ulong gen_opc_jump_pc[2];
#include "exec/gen-icount.h"
typedef struct DisasContext {
@ -955,17 +952,44 @@ static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
gen_goto_tb(dc, 1, pc2, pc2 + 4);
}
static inline void gen_branch_a(DisasContext *dc, target_ulong pc1,
target_ulong pc2, TCGv r_cond)
static void gen_branch_a(DisasContext *dc, target_ulong pc1)
{
TCGLabel *l1 = gen_new_label();
target_ulong npc = dc->npc;
tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
gen_goto_tb(dc, 0, pc2, pc1);
gen_goto_tb(dc, 0, npc, pc1);
gen_set_label(l1);
gen_goto_tb(dc, 1, pc2 + 4, pc2 + 8);
gen_goto_tb(dc, 1, npc + 4, npc + 8);
dc->is_br = 1;
}
static void gen_branch_n(DisasContext *dc, target_ulong pc1)
{
target_ulong npc = dc->npc;
if (likely(npc != DYNAMIC_PC)) {
dc->pc = npc;
dc->jump_pc[0] = pc1;
dc->jump_pc[1] = npc + 4;
dc->npc = JUMP_PC;
} else {
TCGv t, z;
tcg_gen_mov_tl(cpu_pc, cpu_npc);
tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
t = tcg_const_tl(pc1);
z = tcg_const_tl(0);
tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
tcg_temp_free(t);
tcg_temp_free(z);
dc->pc = DYNAMIC_PC;
}
}
static inline void gen_generic_branch(DisasContext *dc)
@ -1398,18 +1422,9 @@ static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
flush_cond(dc);
gen_cond(cpu_cond, cc, cond, dc);
if (a) {
gen_branch_a(dc, target, dc->npc, cpu_cond);
dc->is_br = 1;
gen_branch_a(dc, target);
} else {
dc->pc = dc->npc;
dc->jump_pc[0] = target;
if (unlikely(dc->npc == DYNAMIC_PC)) {
dc->jump_pc[1] = DYNAMIC_PC;
tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
} else {
dc->jump_pc[1] = dc->npc + 4;
dc->npc = JUMP_PC;
}
gen_branch_n(dc, target);
}
}
}
@ -1447,18 +1462,9 @@ static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
flush_cond(dc);
gen_fcond(cpu_cond, cc, cond);
if (a) {
gen_branch_a(dc, target, dc->npc, cpu_cond);
dc->is_br = 1;
gen_branch_a(dc, target);
} else {
dc->pc = dc->npc;
dc->jump_pc[0] = target;
if (unlikely(dc->npc == DYNAMIC_PC)) {
dc->jump_pc[1] = DYNAMIC_PC;
tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
} else {
dc->jump_pc[1] = dc->npc + 4;
dc->npc = JUMP_PC;
}
gen_branch_n(dc, target);
}
}
}
@ -1476,18 +1482,9 @@ static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
flush_cond(dc);
gen_cond_reg(cpu_cond, cond, r_reg);
if (a) {
gen_branch_a(dc, target, dc->npc, cpu_cond);
dc->is_br = 1;
gen_branch_a(dc, target);
} else {
dc->pc = dc->npc;
dc->jump_pc[0] = target;
if (unlikely(dc->npc == DYNAMIC_PC)) {
dc->jump_pc[1] = DYNAMIC_PC;
tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
} else {
dc->jump_pc[1] = dc->npc + 4;
dc->npc = JUMP_PC;
}
gen_branch_n(dc, target);
}
}
@ -2482,10 +2479,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
target_long simm;
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(dc->pc);
}
opc = GET_FIELD(insn, 0, 1);
rd = GET_FIELD(insn, 2, 6);
@ -5213,16 +5206,12 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
}
}
static inline void gen_intermediate_code_internal(SPARCCPU *cpu,
TranslationBlock *tb,
bool spc)
void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
{
SPARCCPU *cpu = sparc_env_get_cpu(env);
CPUState *cs = CPU(cpu);
CPUSPARCState *env = &cpu->env;
target_ulong pc_start, last_pc;
DisasContext dc1, *dc = &dc1;
CPUBreakpoint *bp;
int j, lj = -1;
int num_insns;
int max_insns;
unsigned int insn;
@ -5242,42 +5231,41 @@ static inline void gen_intermediate_code_internal(SPARCCPU *cpu,
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
if (max_insns == 0)
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
if (max_insns > TCG_MAX_INSNS) {
max_insns = TCG_MAX_INSNS;
}
gen_tb_start(tb);
do {
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) {
if (dc->pc != pc_start)
save_state(dc);
gen_helper_debug(cpu_env);
tcg_gen_exit_tb(0);
dc->is_br = 1;
goto exit_gen_loop;
}
}
if (dc->npc & JUMP_PC) {
assert(dc->jump_pc[1] == dc->pc + 4);
tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
} else {
tcg_gen_insn_start(dc->pc, dc->npc);
}
if (spc) {
qemu_log("Search PC...\n");
j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
tcg_ctx.gen_opc_pc[lj] = dc->pc;
gen_opc_npc[lj] = dc->npc;
tcg_ctx.gen_opc_instr_start[lj] = 1;
tcg_ctx.gen_opc_icount[lj] = num_insns;
num_insns++;
if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
if (dc->pc != pc_start) {
save_state(dc);
}
gen_helper_debug(cpu_env);
tcg_gen_exit_tb(0);
dc->is_br = 1;
goto exit_gen_loop;
}
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
gen_io_start();
}
last_pc = dc->pc;
insn = cpu_ldl_code(env, dc->pc);
disas_sparc_insn(dc, insn);
num_insns++;
if (dc->is_br)
break;
@ -5316,20 +5304,9 @@ static inline void gen_intermediate_code_internal(SPARCCPU *cpu,
}
gen_tb_end(tb, num_insns);
if (spc) {
j = tcg_op_buf_count();
lj++;
while (lj <= j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
#if 0
log_page_dump();
#endif
gen_opc_jump_pc[0] = dc->jump_pc[0];
gen_opc_jump_pc[1] = dc->jump_pc[1];
} else {
tb->size = last_pc + 4 - pc_start;
tb->icount = num_insns;
}
tb->size = last_pc + 4 - pc_start;
tb->icount = num_insns;
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
qemu_log("--------------\n");
@ -5340,16 +5317,6 @@ static inline void gen_intermediate_code_internal(SPARCCPU *cpu,
#endif
}
void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
{
gen_intermediate_code_internal(sparc_env_get_cpu(env), tb, false);
}
void gen_intermediate_code_pc(CPUSPARCState * env, TranslationBlock * tb)
{
gen_intermediate_code_internal(sparc_env_get_cpu(env), tb, true);
}
void gen_intermediate_code_init(CPUSPARCState *env)
{
unsigned int i;
@ -5451,19 +5418,21 @@ void gen_intermediate_code_init(CPUSPARCState *env)
}
}
void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb, int pc_pos)
void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb,
target_ulong *data)
{
target_ulong npc;
env->pc = tcg_ctx.gen_opc_pc[pc_pos];
npc = gen_opc_npc[pc_pos];
if (npc == 1) {
target_ulong pc = data[0];
target_ulong npc = data[1];
env->pc = pc;
if (npc == DYNAMIC_PC) {
/* dynamic NPC: already stored */
} else if (npc == 2) {
} else if (npc & JUMP_PC) {
/* jump PC: use 'cond' and the jump targets of the translation */
if (env->cond) {
env->npc = gen_opc_jump_pc[0];
env->npc = npc & ~3;
} else {
env->npc = gen_opc_jump_pc[1];
env->npc = pc + 4;
}
} else {
env->npc = npc;

View File

@ -167,7 +167,6 @@ TileGXCPU *cpu_tilegx_init(const char *cpu_model);
#define cpu_init(cpu_model) CPU(cpu_tilegx_init(cpu_model))
#define cpu_exec cpu_tilegx_exec
#define cpu_gen_code cpu_tilegx_gen_code
#define cpu_signal_handler cpu_tilegx_signal_handler
static inline void cpu_get_tb_cpu_state(CPUTLGState *env, target_ulong *pc,

View File

@ -2320,10 +2320,6 @@ static void translate_one_bundle(DisasContext *dc, uint64_t bundle)
}
dc->num_wb = 0;
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(dc->pc);
}
qemu_log_mask(CPU_LOG_TB_IN_ASM, " %" PRIx64 ": { ", dc->pc);
if (get_Mode(bundle)) {
notice_excp(dc, bundle, "y0", decode_y0(dc, bundle));
@ -2365,17 +2361,14 @@ static void translate_one_bundle(DisasContext *dc, uint64_t bundle)
}
}
static inline void gen_intermediate_code_internal(TileGXCPU *cpu,
TranslationBlock *tb,
bool search_pc)
void gen_intermediate_code(CPUTLGState *env, struct TranslationBlock *tb)
{
TileGXCPU *cpu = tilegx_env_get_cpu(env);
DisasContext ctx;
DisasContext *dc = &ctx;
CPUState *cs = CPU(cpu);
CPUTLGState *env = &cpu->env;
uint64_t pc_start = tb->pc;
uint64_t next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
int j, lj = -1;
int num_insns = 0;
int max_insns = tb->cflags & CF_COUNT_MASK;
@ -2397,21 +2390,15 @@ static inline void gen_intermediate_code_internal(TileGXCPU *cpu,
if (cs->singlestep_enabled || singlestep) {
max_insns = 1;
}
if (max_insns > TCG_MAX_INSNS) {
max_insns = TCG_MAX_INSNS;
}
gen_tb_start(tb);
while (1) {
if (search_pc) {
j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
}
tcg_ctx.gen_opc_pc[lj] = dc->pc;
tcg_ctx.gen_opc_instr_start[lj] = 1;
tcg_ctx.gen_opc_icount[lj] = num_insns;
}
tcg_gen_insn_start(dc->pc);
num_insns++;
translate_one_bundle(dc, cpu_ldq_data(env, dc->pc));
if (dc->exit_tb) {
@ -2419,7 +2406,7 @@ static inline void gen_intermediate_code_internal(TileGXCPU *cpu,
break;
}
dc->pc += TILEGX_BUNDLE_SIZE_IN_BYTES;
if (++num_insns >= max_insns
if (num_insns >= max_insns
|| dc->pc >= next_page_start
|| tcg_op_buf_full()) {
/* Ending the TB due to TB size or page boundary. Set PC. */
@ -2430,33 +2417,16 @@ static inline void gen_intermediate_code_internal(TileGXCPU *cpu,
}
gen_tb_end(tb, num_insns);
if (search_pc) {
j = tcg_op_buf_count();
lj++;
while (lj <= j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
} else {
tb->size = dc->pc - pc_start;
tb->icount = num_insns;
}
tb->size = dc->pc - pc_start;
tb->icount = num_insns;
qemu_log_mask(CPU_LOG_TB_IN_ASM, "\n");
}
void gen_intermediate_code(CPUTLGState *env, struct TranslationBlock *tb)
void restore_state_to_opc(CPUTLGState *env, TranslationBlock *tb,
target_ulong *data)
{
gen_intermediate_code_internal(tilegx_env_get_cpu(env), tb, false);
}
void gen_intermediate_code_pc(CPUTLGState *env, struct TranslationBlock *tb)
{
gen_intermediate_code_internal(tilegx_env_get_cpu(env), tb, true);
}
void restore_state_to_opc(CPUTLGState *env, TranslationBlock *tb, int pc_pos)
{
env->pc = tcg_ctx.gen_opc_pc[pc_pos];
env->pc = data[0];
}
void tilegx_tcg_init(void)

View File

@ -8266,21 +8266,26 @@ static void decode_opc(CPUTriCoreState *env, DisasContext *ctx, int *is_branch)
}
}
static inline void
gen_intermediate_code_internal(TriCoreCPU *cpu, struct TranslationBlock *tb,
int search_pc)
void gen_intermediate_code(CPUTriCoreState *env, struct TranslationBlock *tb)
{
TriCoreCPU *cpu = tricore_env_get_cpu(env);
CPUState *cs = CPU(cpu);
CPUTriCoreState *env = &cpu->env;
DisasContext ctx;
target_ulong pc_start;
int num_insns;
if (search_pc) {
qemu_log("search pc %d\n", search_pc);
}
int num_insns, max_insns;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
if (singlestep) {
max_insns = 1;
}
if (max_insns > TCG_MAX_INSNS) {
max_insns = TCG_MAX_INSNS;
}
pc_start = tb->pc;
ctx.pc = pc_start;
ctx.saved_pc = -1;
@ -8292,17 +8297,13 @@ gen_intermediate_code_internal(TriCoreCPU *cpu, struct TranslationBlock *tb,
tcg_clear_temp_count();
gen_tb_start(tb);
while (ctx.bstate == BS_NONE) {
tcg_gen_insn_start(ctx.pc);
num_insns++;
ctx.opcode = cpu_ldl_code(env, ctx.pc);
decode_opc(env, &ctx, 0);
num_insns++;
if (tcg_op_buf_full()) {
gen_save_pc(ctx.next_pc);
tcg_gen_exit_tb(0);
break;
}
if (singlestep) {
if (num_insns >= max_insns || tcg_op_buf_full()) {
gen_save_pc(ctx.next_pc);
tcg_gen_exit_tb(0);
break;
@ -8311,12 +8312,9 @@ gen_intermediate_code_internal(TriCoreCPU *cpu, struct TranslationBlock *tb,
}
gen_tb_end(tb, num_insns);
if (search_pc) {
printf("done_generating search pc\n");
} else {
tb->size = ctx.pc - pc_start;
tb->icount = num_insns;
}
tb->size = ctx.pc - pc_start;
tb->icount = num_insns;
if (tcg_check_temp_count()) {
printf("LEAK at %08x\n", env->PC);
}
@ -8331,21 +8329,10 @@ gen_intermediate_code_internal(TriCoreCPU *cpu, struct TranslationBlock *tb,
}
void
gen_intermediate_code(CPUTriCoreState *env, struct TranslationBlock *tb)
restore_state_to_opc(CPUTriCoreState *env, TranslationBlock *tb,
target_ulong *data)
{
gen_intermediate_code_internal(tricore_env_get_cpu(env), tb, false);
}
void
gen_intermediate_code_pc(CPUTriCoreState *env, struct TranslationBlock *tb)
{
gen_intermediate_code_internal(tricore_env_get_cpu(env), tb, true);
}
void
restore_state_to_opc(CPUTriCoreState *env, TranslationBlock *tb, int pc_pos)
{
env->PC = tcg_ctx.gen_opc_pc[pc_pos];
env->PC = data[0];
}
/*
*

View File

@ -1794,10 +1794,6 @@ static void disas_uc32_insn(CPUUniCore32State *env, DisasContext *s)
UniCore32CPU *cpu = uc32_env_get_cpu(env);
unsigned int insn;
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(s->pc);
}
insn = cpu_ldl_code(env, s->pc);
s->pc += 4;
@ -1868,16 +1864,12 @@ static void disas_uc32_insn(CPUUniCore32State *env, DisasContext *s)
}
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
basic block 'tb'. If search_pc is TRUE, also generate PC
information for each intermediate instruction. */
static inline void gen_intermediate_code_internal(UniCore32CPU *cpu,
TranslationBlock *tb, bool search_pc)
basic block 'tb'. */
void gen_intermediate_code(CPUUniCore32State *env, TranslationBlock *tb)
{
UniCore32CPU *cpu = uc32_env_get_cpu(env);
CPUState *cs = CPU(cpu);
CPUUniCore32State *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
CPUBreakpoint *bp;
int j, lj;
target_ulong pc_start;
uint32_t next_page_start;
int num_insns;
@ -1899,12 +1891,14 @@ static inline void gen_intermediate_code_internal(UniCore32CPU *cpu,
cpu_F0d = tcg_temp_new_i64();
cpu_F1d = tcg_temp_new_i64();
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
lj = -1;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
if (max_insns > TCG_MAX_INSNS) {
max_insns = TCG_MAX_INSNS;
}
#ifndef CONFIG_USER_ONLY
if ((env->uncached_asr & ASR_M) == ASR_MODE_USER) {
@ -1916,33 +1910,20 @@ static inline void gen_intermediate_code_internal(UniCore32CPU *cpu,
gen_tb_start(tb);
do {
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) {
gen_set_pc_im(dc->pc);
gen_exception(EXCP_DEBUG);
dc->is_jmp = DISAS_JUMP;
/* Advance PC so that clearing the breakpoint will
invalidate this TB. */
dc->pc += 2; /* FIXME */
goto done_generating;
}
}
}
if (search_pc) {
j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
}
tcg_ctx.gen_opc_pc[lj] = dc->pc;
tcg_ctx.gen_opc_instr_start[lj] = 1;
tcg_ctx.gen_opc_icount[lj] = num_insns;
tcg_gen_insn_start(dc->pc);
num_insns++;
if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
gen_set_pc_im(dc->pc);
gen_exception(EXCP_DEBUG);
dc->is_jmp = DISAS_JUMP;
/* Advance PC so that clearing the breakpoint will
invalidate this TB. */
dc->pc += 2; /* FIXME */
goto done_generating;
}
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
gen_io_start();
}
@ -1961,7 +1942,6 @@ static inline void gen_intermediate_code_internal(UniCore32CPU *cpu,
* Otherwise the subsequent code could get translated several times.
* Also stop translation when a page boundary is reached. This
* ensures prefetch aborts occur at the right place. */
num_insns++;
} while (!dc->is_jmp && !tcg_op_buf_full() &&
!cs->singlestep_enabled &&
!singlestep &&
@ -2043,26 +2023,8 @@ done_generating:
qemu_log("\n");
}
#endif
if (search_pc) {
j = tcg_op_buf_count();
lj++;
while (lj <= j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
} else {
tb->size = dc->pc - pc_start;
tb->icount = num_insns;
}
}
void gen_intermediate_code(CPUUniCore32State *env, TranslationBlock *tb)
{
gen_intermediate_code_internal(uc32_env_get_cpu(env), tb, false);
}
void gen_intermediate_code_pc(CPUUniCore32State *env, TranslationBlock *tb)
{
gen_intermediate_code_internal(uc32_env_get_cpu(env), tb, true);
tb->size = dc->pc - pc_start;
tb->icount = num_insns;
}
static const char *cpu_mode_names[16] = {
@ -2133,7 +2095,8 @@ void uc32_cpu_dump_state(CPUState *cs, FILE *f,
cpu_dump_state_ucf64(env, f, cpu_fprintf, flags);
}
void restore_state_to_opc(CPUUniCore32State *env, TranslationBlock *tb, int pc_pos)
void restore_state_to_opc(CPUUniCore32State *env, TranslationBlock *tb,
target_ulong *data)
{
env->regs[31] = tcg_ctx.gen_opc_pc[pc_pos];
env->regs[31] = data[0];
}

View File

@ -382,7 +382,6 @@ typedef struct CPUXtensaState {
#include "cpu-qom.h"
#define cpu_exec cpu_xtensa_exec
#define cpu_gen_code cpu_xtensa_gen_code
#define cpu_signal_handler cpu_xtensa_signal_handler
#define cpu_list xtensa_cpu_list

View File

@ -2984,22 +2984,6 @@ static inline unsigned xtensa_insn_len(CPUXtensaState *env, DisasContext *dc)
return xtensa_op0_insn_len(OP0);
}
static void check_breakpoint(CPUXtensaState *env, DisasContext *dc)
{
CPUState *cs = CPU(xtensa_env_get_cpu(env));
CPUBreakpoint *bp;
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) {
tcg_gen_movi_i32(cpu_pc, dc->pc);
gen_exception(dc, EXCP_DEBUG);
dc->is_jmp = DISAS_UPDATE;
}
}
}
}
static void gen_ibreak_check(CPUXtensaState *env, DisasContext *dc)
{
unsigned i;
@ -3013,15 +2997,12 @@ static void gen_ibreak_check(CPUXtensaState *env, DisasContext *dc)
}
}
static inline
void gen_intermediate_code_internal(XtensaCPU *cpu,
TranslationBlock *tb, bool search_pc)
void gen_intermediate_code(CPUXtensaState *env, TranslationBlock *tb)
{
XtensaCPU *cpu = xtensa_env_get_cpu(env);
CPUState *cs = CPU(cpu);
CPUXtensaState *env = &cpu->env;
DisasContext dc;
int insn_count = 0;
int j, lj = -1;
int max_insns = tb->cflags & CF_COUNT_MASK;
uint32_t pc_start = tb->pc;
uint32_t next_page_start =
@ -3030,6 +3011,9 @@ void gen_intermediate_code_internal(XtensaCPU *cpu,
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
if (max_insns > TCG_MAX_INSNS) {
max_insns = TCG_MAX_INSNS;
}
dc.config = env->config;
dc.singlestep_enabled = cs->singlestep_enabled;
@ -3062,28 +3046,19 @@ void gen_intermediate_code_internal(XtensaCPU *cpu,
}
do {
check_breakpoint(env, &dc);
if (search_pc) {
j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
}
tcg_ctx.gen_opc_pc[lj] = dc.pc;
tcg_ctx.gen_opc_instr_start[lj] = 1;
tcg_ctx.gen_opc_icount[lj] = insn_count;
}
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(dc.pc);
}
tcg_gen_insn_start(dc.pc);
++insn_count;
++dc.ccount_delta;
if (insn_count + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
tcg_gen_movi_i32(cpu_pc, dc.pc);
gen_exception(&dc, EXCP_DEBUG);
dc.is_jmp = DISAS_UPDATE;
break;
}
if (insn_count == max_insns && (tb->cflags & CF_LAST_IO)) {
gen_io_start();
}
@ -3104,7 +3079,6 @@ void gen_intermediate_code_internal(XtensaCPU *cpu,
}
disas_xtensa_insn(env, &dc);
++insn_count;
if (dc.icount) {
tcg_gen_mov_i32(cpu_SR[ICOUNT], dc.next_icount);
}
@ -3142,24 +3116,8 @@ void gen_intermediate_code_internal(XtensaCPU *cpu,
qemu_log("\n");
}
#endif
if (search_pc) {
j = tcg_op_buf_count();
memset(tcg_ctx.gen_opc_instr_start + lj + 1, 0,
(j - lj) * sizeof(tcg_ctx.gen_opc_instr_start[0]));
} else {
tb->size = dc.pc - pc_start;
tb->icount = insn_count;
}
}
void gen_intermediate_code(CPUXtensaState *env, TranslationBlock *tb)
{
gen_intermediate_code_internal(xtensa_env_get_cpu(env), tb, false);
}
void gen_intermediate_code_pc(CPUXtensaState *env, TranslationBlock *tb)
{
gen_intermediate_code_internal(xtensa_env_get_cpu(env), tb, true);
tb->size = dc.pc - pc_start;
tb->icount = insn_count;
}
void xtensa_cpu_dump_state(CPUState *cs, FILE *f,
@ -3213,7 +3171,8 @@ void xtensa_cpu_dump_state(CPUState *cs, FILE *f,
}
}
void restore_state_to_opc(CPUXtensaState *env, TranslationBlock *tb, int pc_pos)
void restore_state_to_opc(CPUXtensaState *env, TranslationBlock *tb,
target_ulong *data)
{
env->pc = tcg_ctx.gen_opc_pc[pc_pos];
env->pc = data[0];
}

View File

@ -700,17 +700,53 @@ static inline void tcg_gen_concat32_i64(TCGv_i64 ret, TCGv_i64 lo, TCGv_i64 hi)
#error must include QEMU headers
#endif
/* debug info: write the PC of the corresponding QEMU CPU instruction */
static inline void tcg_gen_debug_insn_start(uint64_t pc)
#if TARGET_INSN_START_WORDS == 1
# if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
static inline void tcg_gen_insn_start(target_ulong pc)
{
/* XXX: must really use a 32 bit size for TCGArg in all cases */
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
tcg_gen_op2ii(INDEX_op_debug_insn_start,
(uint32_t)(pc), (uint32_t)(pc >> 32));
#else
tcg_gen_op1i(INDEX_op_debug_insn_start, pc);
#endif
tcg_gen_op1(&tcg_ctx, INDEX_op_insn_start, pc);
}
# else
static inline void tcg_gen_insn_start(target_ulong pc)
{
tcg_gen_op2(&tcg_ctx, INDEX_op_insn_start,
(uint32_t)pc, (uint32_t)(pc >> 32));
}
# endif
#elif TARGET_INSN_START_WORDS == 2
# if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1)
{
tcg_gen_op2(&tcg_ctx, INDEX_op_insn_start, pc, a1);
}
# else
static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1)
{
tcg_gen_op4(&tcg_ctx, INDEX_op_insn_start,
(uint32_t)pc, (uint32_t)(pc >> 32),
(uint32_t)a1, (uint32_t)(a1 >> 32));
}
# endif
#elif TARGET_INSN_START_WORDS == 3
# if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1,
target_ulong a2)
{
tcg_gen_op3(&tcg_ctx, INDEX_op_insn_start, pc, a1, a2);
}
# else
static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1,
target_ulong a2)
{
tcg_gen_op6(&tcg_ctx, INDEX_op_insn_start,
(uint32_t)pc, (uint32_t)(pc >> 32),
(uint32_t)a1, (uint32_t)(a1 >> 32),
(uint32_t)a2, (uint32_t)(a2 >> 32));
}
# endif
#else
# error "Unhandled number of operands to insn_start"
#endif
static inline void tcg_gen_exit_tb(uintptr_t val)
{

View File

@ -175,9 +175,9 @@ DEF(mulsh_i64, 1, 2, 0, IMPL(TCG_TARGET_HAS_mulsh_i64))
/* QEMU specific */
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
DEF(debug_insn_start, 0, 0, 2, TCG_OPF_NOT_PRESENT)
DEF(insn_start, 0, 0, 2 * TARGET_INSN_START_WORDS, TCG_OPF_NOT_PRESENT)
#else
DEF(debug_insn_start, 0, 0, 1, TCG_OPF_NOT_PRESENT)
DEF(insn_start, 0, 0, TARGET_INSN_START_WORDS, TCG_OPF_NOT_PRESENT)
#endif
DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_END)
DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_END)

168
tcg/tcg.c
View File

@ -363,17 +363,39 @@ void tcg_context_init(TCGContext *s)
void tcg_prologue_init(TCGContext *s)
{
/* init global prologue and epilogue */
s->code_buf = s->code_gen_prologue;
s->code_ptr = s->code_buf;
size_t prologue_size, total_size;
void *buf0, *buf1;
/* Put the prologue at the beginning of code_gen_buffer. */
buf0 = s->code_gen_buffer;
s->code_ptr = buf0;
s->code_buf = buf0;
s->code_gen_prologue = buf0;
/* Generate the prologue. */
tcg_target_qemu_prologue(s);
flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
buf1 = s->code_ptr;
flush_icache_range((uintptr_t)buf0, (uintptr_t)buf1);
/* Deduct the prologue from the buffer. */
prologue_size = tcg_current_code_size(s);
s->code_gen_ptr = buf1;
s->code_gen_buffer = buf1;
s->code_buf = buf1;
total_size = s->code_gen_buffer_size - prologue_size;
s->code_gen_buffer_size = total_size;
/* Compute a high-water mark, at which we voluntarily flush the buffer
and start over. The size here is arbitrary, significantly larger
than we expect the code generation for any one opcode to require. */
s->code_gen_highwater = s->code_gen_buffer + (total_size - 1024);
tcg_register_jit(s->code_gen_buffer, total_size);
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
size_t size = tcg_current_code_size(s);
qemu_log("PROLOGUE: [size=%zu]\n", size);
log_disas(s->code_buf, size);
qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
log_disas(buf0, prologue_size);
qemu_log("\n");
qemu_log_flush();
}
@ -990,17 +1012,18 @@ void tcg_dump_ops(TCGContext *s)
def = &tcg_op_defs[c];
args = &s->gen_opparam_buf[op->args];
if (c == INDEX_op_debug_insn_start) {
uint64_t pc;
if (c == INDEX_op_insn_start) {
qemu_log("%s ----", oi != s->gen_first_op_idx ? "\n" : "");
for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
target_ulong a;
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
pc = ((uint64_t)args[1] << 32) | args[0];
a = ((target_ulong)args[i * 2 + 1] << 32) | args[i * 2];
#else
pc = args[0];
a = args[i];
#endif
if (oi != s->gen_first_op_idx) {
qemu_log("\n");
qemu_log(" " TARGET_FMT_lx, a);
}
qemu_log(" ---- 0x%" PRIx64, pc);
} else if (c == INDEX_op_call) {
/* variable number of arguments */
nb_oargs = op->callo;
@ -1400,7 +1423,7 @@ static void tcg_liveness_analysis(TCGContext *s)
}
}
break;
case INDEX_op_debug_insn_start:
case INDEX_op_insn_start:
break;
case INDEX_op_discard:
/* mark the temporary as dead */
@ -2289,11 +2312,27 @@ void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
#endif
static inline int tcg_gen_code_common(TCGContext *s,
tcg_insn_unit *gen_code_buf,
long search_pc)
int tcg_gen_code(TCGContext *s, tcg_insn_unit *gen_code_buf)
{
int oi, oi_next;
int i, oi, oi_next, num_insns;
#ifdef CONFIG_PROFILER
{
int n;
n = s->gen_last_op_idx + 1;
s->op_count += n;
if (n > s->op_count_max) {
s->op_count_max = n;
}
n = s->nb_temps;
s->temp_count += n;
if (n > s->temp_count_max) {
s->temp_count_max = n;
}
}
#endif
#ifdef DEBUG_DISAS
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
@ -2337,6 +2376,7 @@ static inline int tcg_gen_code_common(TCGContext *s,
tcg_out_tb_init(s);
num_insns = -1;
for (oi = s->gen_first_op_idx; oi >= 0; oi = oi_next) {
TCGOp * const op = &s->gen_op_buf[oi];
TCGArg * const args = &s->gen_opparam_buf[op->args];
@ -2359,7 +2399,20 @@ static inline int tcg_gen_code_common(TCGContext *s,
case INDEX_op_movi_i64:
tcg_reg_alloc_movi(s, args, dead_args, sync_args);
break;
case INDEX_op_debug_insn_start:
case INDEX_op_insn_start:
if (num_insns >= 0) {
s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
}
num_insns++;
for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
target_ulong a;
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
a = ((target_ulong)args[i * 2 + 1] << 32) | args[i * 2];
#else
a = args[i];
#endif
s->gen_insn_data[num_insns][i] = a;
}
break;
case INDEX_op_discard:
temp_dead(s, args[0]);
@ -2383,40 +2436,22 @@ static inline int tcg_gen_code_common(TCGContext *s,
tcg_reg_alloc_op(s, def, opc, args, dead_args, sync_args);
break;
}
if (search_pc >= 0 && search_pc < tcg_current_code_size(s)) {
return oi;
}
#ifndef NDEBUG
check_regs(s);
#endif
/* Test for (pending) buffer overflow. The assumption is that any
one operation beginning below the high water mark cannot overrun
the buffer completely. Thus we can test for overflow after
generating code without having to check during generation. */
if (unlikely(s->code_gen_ptr > s->code_gen_highwater)) {
return -1;
}
}
tcg_debug_assert(num_insns >= 0);
s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
/* Generate TB finalization at the end of block */
tcg_out_tb_finalize(s);
return -1;
}
int tcg_gen_code(TCGContext *s, tcg_insn_unit *gen_code_buf)
{
#ifdef CONFIG_PROFILER
{
int n;
n = s->gen_last_op_idx + 1;
s->op_count += n;
if (n > s->op_count_max) {
s->op_count_max = n;
}
n = s->nb_temps;
s->temp_count += n;
if (n > s->temp_count_max) {
s->temp_count_max = n;
}
}
#endif
tcg_gen_code_common(s, gen_code_buf, -1);
/* flush instruction cache */
flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
@ -2424,38 +2459,30 @@ int tcg_gen_code(TCGContext *s, tcg_insn_unit *gen_code_buf)
return tcg_current_code_size(s);
}
/* Return the index of the micro operation such as the pc after is <
offset bytes from the start of the TB. The contents of gen_code_buf must
not be changed, though writing the same values is ok.
Return -1 if not found. */
int tcg_gen_code_search_pc(TCGContext *s, tcg_insn_unit *gen_code_buf,
long offset)
{
return tcg_gen_code_common(s, gen_code_buf, offset);
}
#ifdef CONFIG_PROFILER
void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
{
TCGContext *s = &tcg_ctx;
int64_t tot;
int64_t tb_count = s->tb_count;
int64_t tb_div_count = tb_count ? tb_count : 1;
int64_t tot = s->interm_time + s->code_time;
tot = s->interm_time + s->code_time;
cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
tot, tot / 2.4e9);
cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
s->tb_count,
s->tb_count1 - s->tb_count,
s->tb_count1 ? (double)(s->tb_count1 - s->tb_count) / s->tb_count1 * 100.0 : 0);
tb_count, s->tb_count1 - tb_count,
(double)(s->tb_count1 - s->tb_count)
/ (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
s->tb_count ? (double)s->op_count / s->tb_count : 0, s->op_count_max);
(double)s->op_count / tb_div_count, s->op_count_max);
cpu_fprintf(f, "deleted ops/TB %0.2f\n",
s->tb_count ?
(double)s->del_op_count / s->tb_count : 0);
(double)s->del_op_count / tb_div_count);
cpu_fprintf(f, "avg temps/TB %0.2f max=%d\n",
s->tb_count ?
(double)s->temp_count / s->tb_count : 0,
s->temp_count_max);
(double)s->temp_count / tb_div_count, s->temp_count_max);
cpu_fprintf(f, "avg host code/TB %0.1f\n",
(double)s->code_out_len / tb_div_count);
cpu_fprintf(f, "avg search data/TB %0.1f\n",
(double)s->search_out_len / tb_div_count);
cpu_fprintf(f, "cycles/op %0.1f\n",
s->op_count ? (double)tot / s->op_count : 0);
@ -2463,8 +2490,11 @@ void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
s->code_in_len ? (double)tot / s->code_in_len : 0);
cpu_fprintf(f, "cycles/out byte %0.1f\n",
s->code_out_len ? (double)tot / s->code_out_len : 0);
if (tot == 0)
cpu_fprintf(f, "cycles/search byte %0.1f\n",
s->search_out_len ? (double)tot / s->search_out_len : 0);
if (tot == 0) {
tot = 1;
}
cpu_fprintf(f, " gen_interm time %0.1f%%\n",
(double)s->interm_time / tot * 100.0);
cpu_fprintf(f, " gen_code time %0.1f%%\n",

View File

@ -129,6 +129,12 @@ typedef uint64_t TCGRegSet;
# error "Missing unsigned widening multiply"
#endif
#ifndef TARGET_INSN_START_EXTRA_WORDS
# define TARGET_INSN_START_WORDS 1
#else
# define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
#endif
typedef enum TCGOpcode {
#define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
#include "tcg-opc.h"
@ -188,6 +194,7 @@ typedef struct TCGPool {
#define TCG_POOL_CHUNK_SIZE 32768
#define TCG_MAX_TEMPS 512
#define TCG_MAX_INSNS 512
/* when the size of the arguments of a called function is smaller than
this value, they are statically allocated in the TB stack frame */
@ -525,6 +532,7 @@ struct TCGContext {
int64_t del_op_count;
int64_t code_in_len;
int64_t code_out_len;
int64_t search_out_len;
int64_t interm_time;
int64_t code_time;
int64_t la_time;
@ -551,10 +559,11 @@ struct TCGContext {
void *code_gen_prologue;
void *code_gen_buffer;
size_t code_gen_buffer_size;
/* threshold to flush the translated code buffer */
size_t code_gen_buffer_max_size;
void *code_gen_ptr;
/* Threshold to flush the translated code buffer. */
void *code_gen_highwater;
TBContext tb_ctx;
/* The TCGBackendData structure is private to tcg-target.c. */
@ -570,9 +579,8 @@ struct TCGContext {
TCGOp gen_op_buf[OPC_BUF_SIZE];
TCGArg gen_opparam_buf[OPPARAM_BUF_SIZE];
target_ulong gen_opc_pc[OPC_BUF_SIZE];
uint16_t gen_opc_icount[OPC_BUF_SIZE];
uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
uint16_t gen_insn_end_off[TCG_MAX_INSNS];
target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS];
};
extern TCGContext tcg_ctx;
@ -619,8 +627,6 @@ void tcg_prologue_init(TCGContext *s);
void tcg_func_start(TCGContext *s);
int tcg_gen_code(TCGContext *s, tcg_insn_unit *gen_code_buf);
int tcg_gen_code_search_pc(TCGContext *s, tcg_insn_unit *gen_code_buf,
long offset);
void tcg_set_frame(TCGContext *s, int reg, intptr_t start, intptr_t size);

9
tci.c
View File

@ -1081,15 +1081,6 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
/* QEMU specific operations. */
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
case INDEX_op_debug_insn_start:
TODO();
break;
#else
case INDEX_op_debug_insn_start:
TODO();
break;
#endif
case INDEX_op_exit_tb:
next_tb = *(uint64_t *)tb_ptr;
goto exit;

View File

@ -168,127 +168,137 @@ void cpu_gen_init(void)
tcg_context_init(&tcg_ctx);
}
/* return non zero if the very first instruction is invalid so that
* the virtual CPU can trigger an exception.
*
* '*gen_code_size_ptr' contains the size of the generated code (host
* code).
*
* Called with mmap_lock held for user-mode emulation.
*/
int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr)
/* Encode VAL as a signed leb128 sequence at P.
Return P incremented past the encoded value. */
static uint8_t *encode_sleb128(uint8_t *p, target_long val)
{
TCGContext *s = &tcg_ctx;
tcg_insn_unit *gen_code_buf;
int gen_code_size;
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
int more, byte;
#ifdef CONFIG_PROFILER
s->tb_count1++; /* includes aborted translations because of
exceptions */
ti = profile_getclock();
#endif
tcg_func_start(s);
do {
byte = val & 0x7f;
val >>= 7;
more = !((val == 0 && (byte & 0x40) == 0)
|| (val == -1 && (byte & 0x40) != 0));
if (more) {
byte |= 0x80;
}
*p++ = byte;
} while (more);
gen_intermediate_code(env, tb);
trace_translate_block(tb, tb->pc, tb->tc_ptr);
/* generate machine code */
gen_code_buf = tb->tc_ptr;
tb->tb_next_offset[0] = 0xffff;
tb->tb_next_offset[1] = 0xffff;
s->tb_next_offset = tb->tb_next_offset;
#ifdef USE_DIRECT_JUMP
s->tb_jmp_offset = tb->tb_jmp_offset;
s->tb_next = NULL;
#else
s->tb_jmp_offset = NULL;
s->tb_next = tb->tb_next;
#endif
#ifdef CONFIG_PROFILER
s->tb_count++;
s->interm_time += profile_getclock() - ti;
s->code_time -= profile_getclock();
#endif
gen_code_size = tcg_gen_code(s, gen_code_buf);
*gen_code_size_ptr = gen_code_size;
#ifdef CONFIG_PROFILER
s->code_time += profile_getclock();
s->code_in_len += tb->size;
s->code_out_len += gen_code_size;
#endif
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
qemu_log("OUT: [size=%d]\n", gen_code_size);
log_disas(tb->tc_ptr, gen_code_size);
qemu_log("\n");
qemu_log_flush();
}
#endif
return 0;
return p;
}
/* The cpu state corresponding to 'searched_pc' is restored.
*/
/* Decode a signed leb128 sequence at *PP; increment *PP past the
decoded value. Return the decoded value. */
static target_long decode_sleb128(uint8_t **pp)
{
uint8_t *p = *pp;
target_long val = 0;
int byte, shift = 0;
do {
byte = *p++;
val |= (target_ulong)(byte & 0x7f) << shift;
shift += 7;
} while (byte & 0x80);
if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
val |= -(target_ulong)1 << shift;
}
*pp = p;
return val;
}
/* Encode the data collected about the instructions while compiling TB.
Place the data at BLOCK, and return the number of bytes consumed.
The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
which come from the target's insn_start data, followed by a uintptr_t
which comes from the host pc of the end of the code implementing the insn.
Each line of the table is encoded as sleb128 deltas from the previous
line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }.
That is, the first column is seeded with the guest pc, the last column
with the host pc, and the middle columns with zeros. */
static int encode_search(TranslationBlock *tb, uint8_t *block)
{
uint8_t *highwater = tcg_ctx.code_gen_highwater;
uint8_t *p = block;
int i, j, n;
tb->tc_search = block;
for (i = 0, n = tb->icount; i < n; ++i) {
target_ulong prev;
for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
if (i == 0) {
prev = (j == 0 ? tb->pc : 0);
} else {
prev = tcg_ctx.gen_insn_data[i - 1][j];
}
p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev);
}
prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]);
p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev);
/* Test for (pending) buffer overflow. The assumption is that any
one row beginning below the high water mark cannot overrun
the buffer completely. Thus we can test for overflow after
encoding a row without having to check during encoding. */
if (unlikely(p > highwater)) {
return -1;
}
}
return p - block;
}
/* The cpu state corresponding to 'searched_pc' is restored. */
static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t searched_pc)
{
target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
uintptr_t host_pc = (uintptr_t)tb->tc_ptr;
CPUArchState *env = cpu->env_ptr;
TCGContext *s = &tcg_ctx;
int j;
uintptr_t tc_ptr;
uint8_t *p = tb->tc_search;
int i, j, num_insns = tb->icount;
#ifdef CONFIG_PROFILER
int64_t ti;
int64_t ti = profile_getclock();
#endif
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
tcg_func_start(s);
if (searched_pc < host_pc) {
return -1;
}
gen_intermediate_code_pc(env, tb);
/* Reconstruct the stored insn data while looking for the point at
which the end of the insn exceeds the searched_pc. */
for (i = 0; i < num_insns; ++i) {
for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
data[j] += decode_sleb128(&p);
}
host_pc += decode_sleb128(&p);
if (host_pc > searched_pc) {
goto found;
}
}
return -1;
found:
if (tb->cflags & CF_USE_ICOUNT) {
assert(use_icount);
/* Reset the cycle counter to the start of the block. */
cpu->icount_decr.u16.low += tb->icount;
cpu->icount_decr.u16.low += num_insns;
/* Clear the IO flag. */
cpu->can_do_io = 0;
}
/* find opc index corresponding to search_pc */
tc_ptr = (uintptr_t)tb->tc_ptr;
if (searched_pc < tc_ptr)
return -1;
s->tb_next_offset = tb->tb_next_offset;
#ifdef USE_DIRECT_JUMP
s->tb_jmp_offset = tb->tb_jmp_offset;
s->tb_next = NULL;
#else
s->tb_jmp_offset = NULL;
s->tb_next = tb->tb_next;
#endif
j = tcg_gen_code_search_pc(s, (tcg_insn_unit *)tc_ptr,
searched_pc - tc_ptr);
if (j < 0)
return -1;
/* now find start of instruction before */
while (s->gen_opc_instr_start[j] == 0) {
j--;
}
cpu->icount_decr.u16.low -= s->gen_opc_icount[j];
restore_state_to_opc(env, tb, j);
cpu->icount_decr.u16.low -= i;
restore_state_to_opc(env, tb, data);
#ifdef CONFIG_PROFILER
s->restore_time += profile_getclock() - ti;
s->restore_count++;
tcg_ctx.restore_time += profile_getclock() - ti;
tcg_ctx.restore_count++;
#endif
return 0;
}
@ -311,31 +321,6 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
return false;
}
#ifdef _WIN32
static __attribute__((unused)) void map_exec(void *addr, long size)
{
DWORD old_protect;
VirtualProtect(addr, size,
PAGE_EXECUTE_READWRITE, &old_protect);
}
#else
static __attribute__((unused)) void map_exec(void *addr, long size)
{
unsigned long start, end, page_size;
page_size = getpagesize();
start = (unsigned long)addr;
start &= ~(page_size - 1);
end = (unsigned long)addr + size;
end += page_size - 1;
end &= ~(page_size - 1);
mprotect((void *)start, end - start,
PROT_READ | PROT_WRITE | PROT_EXEC);
}
#endif
void page_size_init(void)
{
/* NOTE: we can always suppose that qemu_host_page_size >=
@ -472,14 +457,6 @@ static inline PageDesc *page_find(tb_page_addr_t index)
#define USE_STATIC_CODE_GEN_BUFFER
#endif
/* ??? Should configure for this, not list operating systems here. */
#if (defined(__linux__) \
|| defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
|| defined(__DragonFly__) || defined(__OpenBSD__) \
|| defined(__NetBSD__))
# define USE_MMAP
#endif
/* Minimum size of the code gen buffer. This number is randomly chosen,
but not so small that we can't have a fair number of TB's live. */
#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
@ -567,22 +544,102 @@ static inline void *split_cross_256mb(void *buf1, size_t size1)
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
__attribute__((aligned(CODE_GEN_ALIGN)));
# ifdef _WIN32
static inline void do_protect(void *addr, long size, int prot)
{
DWORD old_protect;
VirtualProtect(addr, size, prot, &old_protect);
}
static inline void map_exec(void *addr, long size)
{
do_protect(addr, size, PAGE_EXECUTE_READWRITE);
}
static inline void map_none(void *addr, long size)
{
do_protect(addr, size, PAGE_NOACCESS);
}
# else
static inline void do_protect(void *addr, long size, int prot)
{
uintptr_t start, end;
start = (uintptr_t)addr;
start &= qemu_real_host_page_mask;
end = (uintptr_t)addr + size;
end = ROUND_UP(end, qemu_real_host_page_size);
mprotect((void *)start, end - start, prot);
}
static inline void map_exec(void *addr, long size)
{
do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
}
static inline void map_none(void *addr, long size)
{
do_protect(addr, size, PROT_NONE);
}
# endif /* WIN32 */
static inline void *alloc_code_gen_buffer(void)
{
void *buf = static_code_gen_buffer;
size_t full_size, size;
/* The size of the buffer, rounded down to end on a page boundary. */
full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer))
& qemu_real_host_page_mask) - (uintptr_t)buf;
/* Reserve a guard page. */
size = full_size - qemu_real_host_page_size;
/* Honor a command-line option limiting the size of the buffer. */
if (size > tcg_ctx.code_gen_buffer_size) {
size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size)
& qemu_real_host_page_mask) - (uintptr_t)buf;
}
tcg_ctx.code_gen_buffer_size = size;
#ifdef __mips__
if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
buf = split_cross_256mb(buf, tcg_ctx.code_gen_buffer_size);
if (cross_256mb(buf, size)) {
buf = split_cross_256mb(buf, size);
size = tcg_ctx.code_gen_buffer_size;
}
#endif
map_exec(buf, tcg_ctx.code_gen_buffer_size);
map_exec(buf, size);
map_none(buf + size, qemu_real_host_page_size);
qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
return buf;
}
#elif defined(USE_MMAP)
#elif defined(_WIN32)
static inline void *alloc_code_gen_buffer(void)
{
size_t size = tcg_ctx.code_gen_buffer_size;
void *buf1, *buf2;
/* Perform the allocation in two steps, so that the guard page
is reserved but uncommitted. */
buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size,
MEM_RESERVE, PAGE_NOACCESS);
if (buf1 != NULL) {
buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
assert(buf1 == buf2);
}
return buf1;
}
#else
static inline void *alloc_code_gen_buffer(void)
{
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
uintptr_t start = 0;
size_t size = tcg_ctx.code_gen_buffer_size;
void *buf;
/* Constrain the position of the buffer based on the host cpu.
@ -598,86 +655,70 @@ static inline void *alloc_code_gen_buffer(void)
Leave the choice of exact location with the kernel. */
flags |= MAP_32BIT;
/* Cannot expect to map more than 800MB in low memory. */
if (tcg_ctx.code_gen_buffer_size > 800u * 1024 * 1024) {
tcg_ctx.code_gen_buffer_size = 800u * 1024 * 1024;
if (size > 800u * 1024 * 1024) {
tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024;
}
# elif defined(__sparc__)
start = 0x40000000ul;
# elif defined(__s390x__)
start = 0x90000000ul;
# elif defined(__mips__)
/* ??? We ought to more explicitly manage layout for softmmu too. */
# ifdef CONFIG_USER_ONLY
start = 0x68000000ul;
# elif _MIPS_SIM == _ABI64
# if _MIPS_SIM == _ABI64
start = 0x128000000ul;
# else
start = 0x08000000ul;
# endif
# endif
buf = mmap((void *)start, tcg_ctx.code_gen_buffer_size,
PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
buf = mmap((void *)start, size + qemu_real_host_page_size,
PROT_NONE, flags, -1, 0);
if (buf == MAP_FAILED) {
return NULL;
}
#ifdef __mips__
if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
if (cross_256mb(buf, size)) {
/* Try again, with the original still mapped, to avoid re-acquiring
that 256mb crossing. This time don't specify an address. */
size_t size2, size1 = tcg_ctx.code_gen_buffer_size;
void *buf2 = mmap(NULL, size1, PROT_WRITE | PROT_READ | PROT_EXEC,
flags, -1, 0);
if (buf2 != MAP_FAILED) {
if (!cross_256mb(buf2, size1)) {
size_t size2;
void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
PROT_NONE, flags, -1, 0);
switch (buf2 != MAP_FAILED) {
case 1:
if (!cross_256mb(buf2, size)) {
/* Success! Use the new buffer. */
munmap(buf, size1);
return buf2;
munmap(buf, size);
break;
}
/* Failure. Work with what we had. */
munmap(buf2, size1);
munmap(buf2, size);
/* fallthru */
default:
/* Split the original buffer. Free the smaller half. */
buf2 = split_cross_256mb(buf, size);
size2 = tcg_ctx.code_gen_buffer_size;
if (buf == buf2) {
munmap(buf + size2 + qemu_real_host_page_size, size - size2);
} else {
munmap(buf, size - size2);
}
size = size2;
break;
}
/* Split the original buffer. Free the smaller half. */
buf2 = split_cross_256mb(buf, size1);
size2 = tcg_ctx.code_gen_buffer_size;
munmap(buf + (buf == buf2 ? size2 : 0), size1 - size2);
return buf2;
buf = buf2;
}
#endif
/* Make the final buffer accessible. The guard page at the end
will remain inaccessible with PROT_NONE. */
mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
/* Request large pages for the buffer. */
qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
return buf;
}
#else
static inline void *alloc_code_gen_buffer(void)
{
void *buf = g_try_malloc(tcg_ctx.code_gen_buffer_size);
if (buf == NULL) {
return NULL;
}
#ifdef __mips__
if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
void *buf2 = g_malloc(tcg_ctx.code_gen_buffer_size);
if (buf2 != NULL && !cross_256mb(buf2, size1)) {
/* Success! Use the new buffer. */
free(buf);
buf = buf2;
} else {
/* Failure. Work with what we had. Since this is malloc
and not mmap, we can't free the other half. */
free(buf2);
buf = split_cross_256mb(buf, tcg_ctx.code_gen_buffer_size);
}
}
#endif
map_exec(buf, tcg_ctx.code_gen_buffer_size);
return buf;
}
#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
#endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
static inline void code_gen_alloc(size_t tb_size)
{
@ -688,24 +729,13 @@ static inline void code_gen_alloc(size_t tb_size)
exit(1);
}
qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size,
QEMU_MADV_HUGEPAGE);
/* Estimate a good size for the number of TBs we can support. We
still haven't deducted the prologue from the buffer size here,
but that's minimal and won't affect the estimate much. */
tcg_ctx.code_gen_max_blocks
= tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock, tcg_ctx.code_gen_max_blocks);
/* Steal room for the prologue at the end of the buffer. This ensures
(via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
from TB's to the prologue are going to be in range. It also means
that we don't need to mark (additional) portions of the data segment
as executable. */
tcg_ctx.code_gen_prologue = tcg_ctx.code_gen_buffer +
tcg_ctx.code_gen_buffer_size - 1024;
tcg_ctx.code_gen_buffer_size -= 1024;
tcg_ctx.code_gen_buffer_max_size = tcg_ctx.code_gen_buffer_size -
(TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
tcg_ctx.code_gen_max_blocks = tcg_ctx.code_gen_buffer_size /
CODE_GEN_AVG_BLOCK_SIZE;
tcg_ctx.tb_ctx.tbs =
g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
}
@ -715,10 +745,8 @@ static inline void code_gen_alloc(size_t tb_size)
void tcg_exec_init(unsigned long tb_size)
{
cpu_gen_init();
code_gen_alloc(tb_size);
tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
tcg_register_jit(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size);
page_init();
code_gen_alloc(tb_size);
#if defined(CONFIG_SOFTMMU)
/* There's no guest base to take into account, so go ahead and
initialize the prologue now. */
@ -737,9 +765,7 @@ static TranslationBlock *tb_alloc(target_ulong pc)
{
TranslationBlock *tb;
if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks ||
(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) >=
tcg_ctx.code_gen_buffer_max_size) {
if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) {
return NULL;
}
tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
@ -1034,28 +1060,98 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
TranslationBlock *tb;
tb_page_addr_t phys_pc, phys_page2;
target_ulong virt_page2;
int code_gen_size;
tcg_insn_unit *gen_code_buf;
int gen_code_size, search_size;
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
phys_pc = get_page_addr_code(env, pc);
if (use_icount) {
cflags |= CF_USE_ICOUNT;
}
tb = tb_alloc(pc);
if (!tb) {
if (unlikely(!tb)) {
buffer_overflow:
/* flush must be done */
tb_flush(cpu);
/* cannot fail at this point */
tb = tb_alloc(pc);
assert(tb != NULL);
/* Don't forget to invalidate previous TB info. */
tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
}
tb->tc_ptr = tcg_ctx.code_gen_ptr;
gen_code_buf = tcg_ctx.code_gen_ptr;
tb->tc_ptr = gen_code_buf;
tb->cs_base = cs_base;
tb->flags = flags;
tb->cflags = cflags;
cpu_gen_code(env, tb, &code_gen_size);
tcg_ctx.code_gen_ptr = (void *)(((uintptr_t)tcg_ctx.code_gen_ptr +
code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
#ifdef CONFIG_PROFILER
tcg_ctx.tb_count1++; /* includes aborted translations because of
exceptions */
ti = profile_getclock();
#endif
tcg_func_start(&tcg_ctx);
gen_intermediate_code(env, tb);
trace_translate_block(tb, tb->pc, tb->tc_ptr);
/* generate machine code */
tb->tb_next_offset[0] = 0xffff;
tb->tb_next_offset[1] = 0xffff;
tcg_ctx.tb_next_offset = tb->tb_next_offset;
#ifdef USE_DIRECT_JUMP
tcg_ctx.tb_jmp_offset = tb->tb_jmp_offset;
tcg_ctx.tb_next = NULL;
#else
tcg_ctx.tb_jmp_offset = NULL;
tcg_ctx.tb_next = tb->tb_next;
#endif
#ifdef CONFIG_PROFILER
tcg_ctx.tb_count++;
tcg_ctx.interm_time += profile_getclock() - ti;
tcg_ctx.code_time -= profile_getclock();
#endif
/* ??? Overflow could be handled better here. In particular, we
don't need to re-do gen_intermediate_code, nor should we re-do
the tcg optimization currently hidden inside tcg_gen_code. All
that should be required is to flush the TBs, allocate a new TB,
re-initialize it per above, and re-do the actual code generation. */
gen_code_size = tcg_gen_code(&tcg_ctx, gen_code_buf);
if (unlikely(gen_code_size < 0)) {
goto buffer_overflow;
}
search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
if (unlikely(search_size < 0)) {
goto buffer_overflow;
}
#ifdef CONFIG_PROFILER
tcg_ctx.code_time += profile_getclock();
tcg_ctx.code_in_len += tb->size;
tcg_ctx.code_out_len += gen_code_size;
tcg_ctx.search_out_len += search_size;
#endif
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
qemu_log("OUT: [size=%d]\n", gen_code_size);
log_disas(tb->tc_ptr, gen_code_size);
qemu_log("\n");
qemu_log_flush();
}
#endif
tcg_ctx.code_gen_ptr = (void *)
ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
CODE_GEN_ALIGN);
/* check next page if needed */
virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
@ -1606,7 +1702,7 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
cpu_fprintf(f, "Translation buffer state:\n");
cpu_fprintf(f, "gen code size %td/%zd\n",
tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
tcg_ctx.code_gen_buffer_max_size);
tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
cpu_fprintf(f, "TB count %d/%d\n",
tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",