target/s390x: Use atomic operations for LOAD AND OP

Reviewed-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
This commit is contained in:
Richard Henderson 2017-03-02 12:28:54 +11:00
parent 303a9ab887
commit 4dba4d6fef
2 changed files with 62 additions and 40 deletions

View File

@ -390,20 +390,20 @@
/* LOAD ADDRESS RELATIVE LONG */
C(0xc000, LARL, RIL_b, Z, 0, ri2, 0, r1, mov2, 0)
/* LOAD AND ADD */
C(0xebf8, LAA, RSY_a, ILA, r3_32s, m2_32s_atomic, new, m2_32_r1_atomic, add, adds32)
C(0xebe8, LAAG, RSY_a, ILA, r3, m2_64_atomic, new, m2_64_r1_atomic, add, adds64)
D(0xebf8, LAA, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, laa, adds32, MO_TESL)
D(0xebe8, LAAG, RSY_a, ILA, r3, a2, new, in2_r1, laa, adds64, MO_TEQ)
/* LOAD AND ADD LOGICAL */
C(0xebfa, LAAL, RSY_a, ILA, r3_32s, m2_32s_atomic, new, m2_32_r1_atomic, add, addu32)
C(0xebea, LAALG, RSY_a, ILA, r3, m2_64_atomic, new, m2_64_r1_atomic, add, addu64)
D(0xebfa, LAAL, RSY_a, ILA, r3_32u, a2, new, in2_r1_32, laa, addu32, MO_TEUL)
D(0xebea, LAALG, RSY_a, ILA, r3, a2, new, in2_r1, laa, addu64, MO_TEQ)
/* LOAD AND AND */
C(0xebf4, LAN, RSY_a, ILA, r3_32s, m2_32s_atomic, new, m2_32_r1_atomic, and, nz32)
C(0xebe4, LANG, RSY_a, ILA, r3, m2_64_atomic, new, m2_64_r1_atomic, and, nz64)
D(0xebf4, LAN, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lan, nz32, MO_TESL)
D(0xebe4, LANG, RSY_a, ILA, r3, a2, new, in2_r1, lan, nz64, MO_TEQ)
/* LOAD AND EXCLUSIVE OR */
C(0xebf7, LAX, RSY_a, ILA, r3_32s, m2_32s_atomic, new, m2_32_r1_atomic, xor, nz32)
C(0xebe7, LAXG, RSY_a, ILA, r3, m2_64_atomic, new, m2_64_r1_atomic, xor, nz64)
D(0xebf7, LAX, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lax, nz32, MO_TESL)
D(0xebe7, LAXG, RSY_a, ILA, r3, a2, new, in2_r1, lax, nz64, MO_TEQ)
/* LOAD AND OR */
C(0xebf6, LAO, RSY_a, ILA, r3_32s, m2_32s_atomic, new, m2_32_r1_atomic, or, nz32)
C(0xebe6, LAOG, RSY_a, ILA, r3, m2_64_atomic, new, m2_64_r1_atomic, or, nz64)
D(0xebf6, LAO, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lao, nz32, MO_TESL)
D(0xebe6, LAOG, RSY_a, ILA, r3, a2, new, in2_r1, lao, nz64, MO_TEQ)
/* LOAD AND TEST */
C(0x1200, LTR, RR_a, Z, 0, r2_o, 0, cond_r1r2_32, mov2, s32)
C(0xb902, LTGR, RRE, Z, 0, r2_o, 0, r1, mov2, s64)

View File

@ -2309,6 +2309,50 @@ static ExitStatus op_iske(DisasContext *s, DisasOps *o)
}
#endif
static ExitStatus op_laa(DisasContext *s, DisasOps *o)
{
/* The real output is indeed the original value in memory;
recompute the addition for the computation of CC. */
tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
s->insn->data | MO_ALIGN);
/* However, we need to recompute the addition for setting CC. */
tcg_gen_add_i64(o->out, o->in1, o->in2);
return NO_EXIT;
}
static ExitStatus op_lan(DisasContext *s, DisasOps *o)
{
/* The real output is indeed the original value in memory;
recompute the addition for the computation of CC. */
tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
s->insn->data | MO_ALIGN);
/* However, we need to recompute the operation for setting CC. */
tcg_gen_and_i64(o->out, o->in1, o->in2);
return NO_EXIT;
}
static ExitStatus op_lao(DisasContext *s, DisasOps *o)
{
/* The real output is indeed the original value in memory;
recompute the addition for the computation of CC. */
tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
s->insn->data | MO_ALIGN);
/* However, we need to recompute the operation for setting CC. */
tcg_gen_or_i64(o->out, o->in1, o->in2);
return NO_EXIT;
}
static ExitStatus op_lax(DisasContext *s, DisasOps *o)
{
/* The real output is indeed the original value in memory;
recompute the addition for the computation of CC. */
tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
s->insn->data | MO_ALIGN);
/* However, we need to recompute the operation for setting CC. */
tcg_gen_xor_i64(o->out, o->in1, o->in2);
return NO_EXIT;
}
static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
{
gen_helper_ldeb(o->out, cpu_env, o->in2);
@ -4483,21 +4527,17 @@ static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
}
#define SPEC_wout_m2_32 0
static void wout_m2_32_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
{
/* XXX release reservation */
tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
store_reg32_i64(get_field(f, r1), o->in2);
}
#define SPEC_wout_m2_32_r1_atomic 0
static void wout_m2_64_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
{
/* XXX release reservation */
tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
store_reg(get_field(f, r1), o->in2);
}
#define SPEC_wout_m2_64_r1_atomic 0
#define SPEC_wout_in2_r1 0
static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
{
store_reg32_i64(get_field(f, r1), o->in2);
}
#define SPEC_wout_in2_r1_32 0
/* ====================================================================== */
/* The "INput 1" generators. These load the first operand to an insn. */
@ -4941,24 +4981,6 @@ static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
}
#define SPEC_in2_mri2_64 0
static void in2_m2_32s_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
{
/* XXX should reserve the address */
in1_la2(s, f, o);
o->in2 = tcg_temp_new_i64();
tcg_gen_qemu_ld32s(o->in2, o->addr1, get_mem_index(s));
}
#define SPEC_in2_m2_32s_atomic 0
static void in2_m2_64_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
{
/* XXX should reserve the address */
in1_la2(s, f, o);
o->in2 = tcg_temp_new_i64();
tcg_gen_qemu_ld64(o->in2, o->addr1, get_mem_index(s));
}
#define SPEC_in2_m2_64_atomic 0
static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
{
o->in2 = tcg_const_i64(get_field(f, i2));