Alpha shadow register optimization

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJV03TlAAoJEK0ScMxN0CebCDUH/jBAivio/oh0MEVOy12uQ+Dm
 JWyTnde8PHId50ysHwI3+EiE0Jzk1PQv1RV+7RRSy767KOOc+WqRDGaVHN4vjK6Z
 CgocDUygM79vInaGB34KuvQ0Z+tu4rCIQxBfkCm9OsMF5h/8ZO16IA7RwUQ9GvDj
 1KGFL309D4Uw9/oqtx1SQeXwiZR7eI5buaJFfit3uZeaFwyZkkwLXfgvgoPIlvQ+
 GMMv5Ejxd5pT/2WwqsxwI0/Y2kxRqyJd0u+28S969Vg0sZ4GlMIsFvjuKqB3kSeZ
 TpOyNa7ulcywjZotp6OIlr483Sn6SGjzGej/8Qvg5mQ3fxoHTGeQa3NWfvM2+34=
 =Mg6S
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/rth/tags/pull-axp-201508018' into staging

Alpha shadow register optimization

# gpg: Signature made Tue 18 Aug 2015 19:09:41 BST using RSA key ID 4DD0279B
# gpg: Good signature from "Richard Henderson <rth7680@gmail.com>"
# gpg:                 aka "Richard Henderson <rth@redhat.com>"
# gpg:                 aka "Richard Henderson <rth@twiddle.net>"

* remotes/rth/tags/pull-axp-201508018:
  target-alpha: Inline hw_ret
  target-alpha: Inline call_pal
  target-alpha: Use separate TCGv temporaries for the shadow registers

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2015-08-19 00:25:52 +01:00
commit 4c4a29cb68
7 changed files with 165 additions and 133 deletions

View File

@ -445,8 +445,9 @@ void QEMU_NORETURN arith_excp(CPUAlphaState *, uintptr_t, int, uint64_t);
uint64_t cpu_alpha_load_fpcr (CPUAlphaState *env);
void cpu_alpha_store_fpcr (CPUAlphaState *env, uint64_t val);
uint64_t cpu_alpha_load_gr(CPUAlphaState *env, unsigned reg);
void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val);
#ifndef CONFIG_USER_ONLY
void swap_shadow_regs(CPUAlphaState *env);
QEMU_NORETURN void alpha_cpu_unassigned_access(CPUState *cpu, hwaddr addr,
bool is_write, bool is_exec,
int unused, unsigned size);

View File

@ -30,7 +30,7 @@ int alpha_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
switch (n) {
case 0 ... 30:
val = env->ir[n];
val = cpu_alpha_load_gr(env, n);
break;
case 32 ... 62:
d.d = env->fir[n - 32];
@ -66,7 +66,7 @@ int alpha_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
switch (n) {
case 0 ... 30:
env->ir[n] = tmp;
cpu_alpha_store_gr(env, n, tmp);
break;
case 32 ... 62:
d.ll = tmp;

View File

@ -79,6 +79,30 @@ void helper_store_fpcr(CPUAlphaState *env, uint64_t val)
cpu_alpha_store_fpcr(env, val);
}
static uint64_t *cpu_alpha_addr_gr(CPUAlphaState *env, unsigned reg)
{
#ifndef CONFIG_USER_ONLY
if (env->pal_mode) {
if (reg >= 8 && reg <= 14) {
return &env->shadow[reg - 8];
} else if (reg == 25) {
return &env->shadow[7];
}
}
#endif
return &env->ir[reg];
}
uint64_t cpu_alpha_load_gr(CPUAlphaState *env, unsigned reg)
{
return *cpu_alpha_addr_gr(env, reg);
}
void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val)
{
*cpu_alpha_addr_gr(env, reg) = val;
}
#if defined(CONFIG_USER_ONLY)
int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
int rw, int mmu_idx)
@ -90,38 +114,6 @@ int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
return 1;
}
#else
void swap_shadow_regs(CPUAlphaState *env)
{
uint64_t i0, i1, i2, i3, i4, i5, i6, i7;
i0 = env->ir[8];
i1 = env->ir[9];
i2 = env->ir[10];
i3 = env->ir[11];
i4 = env->ir[12];
i5 = env->ir[13];
i6 = env->ir[14];
i7 = env->ir[25];
env->ir[8] = env->shadow[0];
env->ir[9] = env->shadow[1];
env->ir[10] = env->shadow[2];
env->ir[11] = env->shadow[3];
env->ir[12] = env->shadow[4];
env->ir[13] = env->shadow[5];
env->ir[14] = env->shadow[6];
env->ir[25] = env->shadow[7];
env->shadow[0] = i0;
env->shadow[1] = i1;
env->shadow[2] = i2;
env->shadow[3] = i3;
env->shadow[4] = i4;
env->shadow[5] = i5;
env->shadow[6] = i6;
env->shadow[7] = i7;
}
/* Returns the OSF/1 entMM failure indication, or -1 on success. */
static int get_physical_address(CPUAlphaState *env, target_ulong addr,
int prot_need, int mmu_idx,
@ -375,10 +367,7 @@ void alpha_cpu_do_interrupt(CPUState *cs)
env->pc = env->palbr + i;
/* Switch to PALmode. */
if (!env->pal_mode) {
env->pal_mode = 1;
swap_shadow_regs(env);
}
env->pal_mode = 1;
#endif /* !USER_ONLY */
}
@ -443,7 +432,7 @@ void alpha_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
env->pc, env->ps);
for (i = 0; i < 31; i++) {
cpu_fprintf(f, "IR%02d %s " TARGET_FMT_lx " ", i,
linux_reg_names[i], env->ir[i]);
linux_reg_names[i], cpu_alpha_load_gr(env, i));
if ((i % 3) == 2)
cpu_fprintf(f, "\n");
}

View File

@ -91,9 +91,6 @@ DEF_HELPER_FLAGS_2(ieee_input_cmp, TCG_CALL_NO_WG, void, env, i64)
DEF_HELPER_FLAGS_2(ieee_input_s, TCG_CALL_NO_WG, void, env, i64)
#if !defined (CONFIG_USER_ONLY)
DEF_HELPER_2(hw_ret, void, env, i64)
DEF_HELPER_3(call_pal, void, env, i64, i64)
DEF_HELPER_2(ldl_phys, i64, env, i64)
DEF_HELPER_2(ldq_phys, i64, env, i64)
DEF_HELPER_2(ldl_l_phys, i64, env, i64)

View File

@ -70,8 +70,8 @@ static VMStateField vmstate_env_fields[] = {
static const VMStateDescription vmstate_env = {
.name = "env",
.version_id = 1,
.minimum_version_id = 1,
.version_id = 2,
.minimum_version_id = 2,
.fields = vmstate_env_fields,
};

View File

@ -40,28 +40,6 @@ uint64_t helper_load_pcc(CPUAlphaState *env)
/* PALcode support special instructions */
#ifndef CONFIG_USER_ONLY
void helper_hw_ret(CPUAlphaState *env, uint64_t a)
{
env->pc = a & ~3;
env->intr_flag = 0;
env->lock_addr = -1;
if ((a & 1) == 0) {
env->pal_mode = 0;
swap_shadow_regs(env);
}
}
void helper_call_pal(CPUAlphaState *env, uint64_t pc, uint64_t entry_ofs)
{
int pal_mode = env->pal_mode;
env->exc_addr = pc | pal_mode;
env->pc = env->palbr + entry_ofs;
if (!pal_mode) {
env->pal_mode = 1;
swap_shadow_regs(env);
}
}
void helper_tbia(CPUAlphaState *env)
{
tlb_flush(CPU(alpha_env_get_cpu(env)), 1);

View File

@ -42,6 +42,9 @@ typedef struct DisasContext DisasContext;
struct DisasContext {
struct TranslationBlock *tb;
uint64_t pc;
#ifndef CONFIG_USER_ONLY
uint64_t palbr;
#endif
int mem_idx;
/* Current rounding mode for this TB. */
@ -52,6 +55,9 @@ struct DisasContext {
/* implver value for this CPU. */
int implver;
/* The set of registers active in the current context. */
TCGv *ir;
/* Temporaries for $31 and $f31 as source and destination. */
TCGv zero;
TCGv sink;
@ -86,13 +92,17 @@ typedef enum {
/* global register indexes */
static TCGv_ptr cpu_env;
static TCGv cpu_ir[31];
static TCGv cpu_std_ir[31];
static TCGv cpu_fir[31];
static TCGv cpu_pc;
static TCGv cpu_lock_addr;
static TCGv cpu_lock_st_addr;
static TCGv cpu_lock_value;
#ifndef CONFIG_USER_ONLY
static TCGv cpu_pal_ir[31];
#endif
#include "exec/gen-icount.h"
void alpha_translate_init(void)
@ -122,6 +132,12 @@ void alpha_translate_init(void)
"f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
"f24", "f25", "f26", "f27", "f28", "f29", "f30"
};
#ifndef CONFIG_USER_ONLY
static const char shadow_names[8][8] = {
"pal_t7", "pal_s0", "pal_s1", "pal_s2",
"pal_s3", "pal_s4", "pal_s5", "pal_t11"
};
#endif
static bool done_init = 0;
int i;
@ -134,9 +150,9 @@ void alpha_translate_init(void)
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
for (i = 0; i < 31; i++) {
cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
offsetof(CPUAlphaState, ir[i]),
greg_names[i]);
cpu_std_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
offsetof(CPUAlphaState, ir[i]),
greg_names[i]);
}
for (i = 0; i < 31; i++) {
@ -145,6 +161,17 @@ void alpha_translate_init(void)
freg_names[i]);
}
#ifndef CONFIG_USER_ONLY
memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
for (i = 0; i < 8; i++) {
int r = (i == 7 ? 25 : i + 8);
cpu_pal_ir[r] = tcg_global_mem_new_i64(TCG_AREG0,
offsetof(CPUAlphaState,
shadow[i]),
shadow_names[i]);
}
#endif
for (i = 0; i < ARRAY_SIZE(vars); ++i) {
const GlobalVar *v = &vars[i];
*v->var = tcg_global_mem_new_i64(TCG_AREG0, v->ofs, v->name);
@ -170,7 +197,7 @@ static TCGv dest_sink(DisasContext *ctx)
static TCGv load_gpr(DisasContext *ctx, unsigned reg)
{
if (likely(reg < 31)) {
return cpu_ir[reg];
return ctx->ir[reg];
} else {
return load_zero(ctx);
}
@ -183,7 +210,7 @@ static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
ctx->lit = tcg_const_i64(lit);
return ctx->lit;
} else if (likely(reg < 31)) {
return cpu_ir[reg];
return ctx->ir[reg];
} else {
return load_zero(ctx);
}
@ -192,7 +219,7 @@ static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
{
if (likely(reg < 31)) {
return cpu_ir[reg];
return ctx->ir[reg];
} else {
return dest_sink(ctx);
}
@ -304,7 +331,7 @@ static inline void gen_load_mem(DisasContext *ctx,
addr = tmp;
}
va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
va = (fp ? cpu_fir[ra] : ctx->ir[ra]);
tcg_gen_qemu_load(va, addr, ctx->mem_idx);
tcg_temp_free(tmp);
@ -399,13 +426,13 @@ static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, quad ? MO_LEQ : MO_LESL);
tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
tcg_gen_qemu_st_i64(cpu_ir[ra], addr, ctx->mem_idx,
tcg_gen_qemu_st_i64(ctx->ir[ra], addr, ctx->mem_idx,
quad ? MO_LEQ : MO_LEUL);
tcg_gen_movi_i64(cpu_ir[ra], 1);
tcg_gen_movi_i64(ctx->ir[ra], 1);
tcg_gen_br(lab_done);
gen_set_label(lab_fail);
tcg_gen_movi_i64(cpu_ir[ra], 0);
tcg_gen_movi_i64(ctx->ir[ra], 0);
gen_set_label(lab_done);
tcg_gen_movi_i64(cpu_lock_addr, -1);
@ -444,7 +471,7 @@ static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
uint64_t dest = ctx->pc + (disp << 2);
if (ra != 31) {
tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
tcg_gen_movi_i64(ctx->ir[ra], ctx->pc);
}
/* Notice branch-to-next; used to initialize RA with the PC. */
@ -1059,12 +1086,13 @@ static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
}
}
static void gen_rx(int ra, int set)
static void gen_rx(DisasContext *ctx, int ra, int set)
{
TCGv_i32 tmp;
if (ra != 31) {
tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
tcg_gen_ld8u_i64(ctx->ir[ra], cpu_env,
offsetof(CPUAlphaState, intr_flag));
}
tmp = tcg_const_i32(set);
@ -1086,12 +1114,12 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
break;
case 0x9E:
/* RDUNIQUE */
tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
offsetof(CPUAlphaState, unique));
break;
case 0x9F:
/* WRUNIQUE */
tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
offsetof(CPUAlphaState, unique));
break;
default:
@ -1115,17 +1143,17 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
break;
case 0x2D:
/* WRVPTPTR */
tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
offsetof(CPUAlphaState, vptptr));
break;
case 0x31:
/* WRVAL */
tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
offsetof(CPUAlphaState, sysval));
break;
case 0x32:
/* RDVAL */
tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
offsetof(CPUAlphaState, sysval));
break;
@ -1135,12 +1163,12 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
/* Note that we already know we're in kernel mode, so we know
that PS only contains the 3 IPL bits. */
tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env,
tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env,
offsetof(CPUAlphaState, ps));
/* But make sure and store only the 3 IPL bits from the user. */
tmp = tcg_temp_new();
tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
tcg_temp_free(tmp);
break;
@ -1148,22 +1176,22 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
case 0x36:
/* RDPS */
tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env,
tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env,
offsetof(CPUAlphaState, ps));
break;
case 0x38:
/* WRUSP */
tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
offsetof(CPUAlphaState, usp));
break;
case 0x3A:
/* RDUSP */
tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
offsetof(CPUAlphaState, usp));
break;
case 0x3C:
/* WHAMI */
tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
-offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
break;
@ -1181,15 +1209,24 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
return gen_excp(ctx, EXCP_CALL_PAL, palcode);
#else
{
TCGv pc = tcg_const_i64(ctx->pc);
TCGv entry = tcg_const_i64(palcode & 0x80
? 0x2000 + (palcode - 0x80) * 64
: 0x1000 + palcode * 64);
TCGv tmp = tcg_temp_new();
uint64_t exc_addr = ctx->pc;
uint64_t entry = ctx->palbr;
gen_helper_call_pal(cpu_env, pc, entry);
if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
exc_addr |= 1;
} else {
tcg_gen_movi_i64(tmp, 1);
tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode));
}
tcg_temp_free(entry);
tcg_temp_free(pc);
tcg_gen_movi_i64(tmp, exc_addr);
tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
tcg_temp_free(tmp);
entry += (palcode & 0x80
? 0x2000 + (palcode - 0x80) * 64
: 0x1000 + palcode * 64);
/* Since the destination is running in PALmode, we don't really
need the page permissions check. We'll see the existence of
@ -1197,11 +1234,13 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
we change the PAL base register. */
if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
tcg_gen_goto_tb(0);
tcg_gen_movi_i64(cpu_pc, entry);
tcg_gen_exit_tb((uintptr_t)ctx->tb);
return EXIT_GOTO_TB;
} else {
tcg_gen_movi_i64(cpu_pc, entry);
return EXIT_PC_UPDATED;
}
return EXIT_PC_UPDATED;
}
#endif
}
@ -1228,8 +1267,6 @@ static int cpu_pr_data(int pr)
case 11: return offsetof(CPUAlphaState, sysval);
case 12: return offsetof(CPUAlphaState, usp);
case 32 ... 39:
return offsetof(CPUAlphaState, shadow[pr - 32]);
case 40 ... 63:
return offsetof(CPUAlphaState, scratch[pr - 40]);
@ -1241,36 +1278,48 @@ static int cpu_pr_data(int pr)
static ExitStatus gen_mfpr(DisasContext *ctx, TCGv va, int regno)
{
int data = cpu_pr_data(regno);
void (*helper)(TCGv);
int data;
/* Special help for VMTIME and WALLTIME. */
if (regno == 250 || regno == 249) {
void (*helper)(TCGv) = gen_helper_get_walltime;
if (regno == 249) {
helper = gen_helper_get_vmtime;
}
if (ctx->tb->cflags & CF_USE_ICOUNT) {
switch (regno) {
case 32 ... 39:
/* Accessing the "non-shadow" general registers. */
regno = regno == 39 ? 25 : regno - 32 + 8;
tcg_gen_mov_i64(va, cpu_std_ir[regno]);
break;
case 250: /* WALLTIME */
helper = gen_helper_get_walltime;
goto do_helper;
case 249: /* VMTIME */
helper = gen_helper_get_vmtime;
do_helper:
if (use_icount) {
gen_io_start();
helper(va);
gen_io_end();
return EXIT_PC_STALE;
} else {
helper(va);
return NO_EXIT;
}
break;
default:
/* The basic registers are data only, and unknown registers
are read-zero, write-ignore. */
data = cpu_pr_data(regno);
if (data == 0) {
tcg_gen_movi_i64(va, 0);
} else if (data & PR_BYTE) {
tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE);
} else if (data & PR_LONG) {
tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
} else {
tcg_gen_ld_i64(va, cpu_env, data);
}
break;
}
/* The basic registers are data only, and unknown registers
are read-zero, write-ignore. */
if (data == 0) {
tcg_gen_movi_i64(va, 0);
} else if (data & PR_BYTE) {
tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE);
} else if (data & PR_LONG) {
tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
} else {
tcg_gen_ld_i64(va, cpu_env, data);
}
return NO_EXIT;
}
@ -1316,6 +1365,12 @@ static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
gen_helper_tb_flush(cpu_env);
return EXIT_PC_STALE;
case 32 ... 39:
/* Accessing the "non-shadow" general registers. */
regno = regno == 39 ? 25 : regno - 32 + 8;
tcg_gen_mov_i64(cpu_std_ir[regno], vb);
break;
default:
/* The basic registers are data only, and unknown registers
are read-zero, write-ignore. */
@ -2295,14 +2350,14 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0xE000:
/* RC */
gen_rx(ra, 0);
gen_rx(ctx, ra, 0);
break;
case 0xE800:
/* ECB */
break;
case 0xF000:
/* RS */
gen_rx(ra, 1);
gen_rx(ctx, ra, 1);
break;
case 0xF800:
/* WH64 */
@ -2334,7 +2389,7 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
vb = load_gpr(ctx, rb);
tcg_gen_andi_i64(cpu_pc, vb, ~3);
if (ra != 31) {
tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
tcg_gen_movi_i64(ctx->ir[ra], ctx->pc);
}
ret = EXIT_PC_UPDATED;
break;
@ -2374,10 +2429,10 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
goto invalid_opc;
break;
case 0x6:
/* Incpu_ir[ra]id */
/* Invalid */
goto invalid_opc;
case 0x7:
/* Incpu_ir[ra]id */
/* Invaliid */
goto invalid_opc;
case 0x8:
/* Longword virtual access (hw_ldl) */
@ -2580,13 +2635,18 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
/* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
address from EXC_ADDR. This turns out to be useful for our
emulation PALcode, so continue to accept it. */
tmp = tcg_temp_new();
tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
gen_helper_hw_ret(cpu_env, tmp);
tcg_temp_free(tmp);
ctx->lit = vb = tcg_temp_new();
tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
} else {
gen_helper_hw_ret(cpu_env, load_gpr(ctx, rb));
vb = load_gpr(ctx, rb);
}
tmp = tcg_temp_new();
tcg_gen_movi_i64(tmp, 0);
tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
tcg_gen_movi_i64(cpu_lock_addr, -1);
tcg_gen_andi_i64(tmp, vb, 1);
tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode));
tcg_gen_andi_i64(cpu_pc, vb, ~3);
ret = EXIT_PC_UPDATED;
break;
#else
@ -2817,6 +2877,13 @@ static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
ctx.implver = env->implver;
ctx.singlestep_enabled = cs->singlestep_enabled;
#ifdef CONFIG_USER_ONLY
ctx.ir = cpu_std_ir;
#else
ctx.palbr = env->palbr;
ctx.ir = (tb->flags & TB_FLAGS_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
#endif
/* ??? Every TB begins with unset rounding mode, to be initialized on
the first fp insn of the TB. Alternately we could define a proper
default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure