target/riscv: rvv: Add tail agnostic for vector load / store instructions
Destination register of unit-stride mask load and store instructions are always written with a tail-agnostic policy. A vector segment load / store instruction may contain fractional lmul with nf * lmul > 1. The rest of the elements in the last register should be treated as tail elements. Signed-off-by: eop Chen <eop.chen@sifive.com> Reviewed-by: Frank Chang <frank.chang@sifive.com> Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn> Acked-by: Alistair Francis <alistair.francis@wdc.com> Message-Id: <165449614532.19704.7000832880482980398-6@git.sr.ht> Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
This commit is contained in:
parent
f1eed927fb
commit
752614cab8
|
@ -711,6 +711,7 @@ static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
|
|||
data = FIELD_DP32(data, VDATA, VM, a->vm);
|
||||
data = FIELD_DP32(data, VDATA, LMUL, emul);
|
||||
data = FIELD_DP32(data, VDATA, NF, a->nf);
|
||||
data = FIELD_DP32(data, VDATA, VTA, s->vta);
|
||||
return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
|
||||
}
|
||||
|
||||
|
@ -774,6 +775,8 @@ static bool ld_us_mask_op(DisasContext *s, arg_vlm_v *a, uint8_t eew)
|
|||
/* EMUL = 1, NFIELDS = 1 */
|
||||
data = FIELD_DP32(data, VDATA, LMUL, 0);
|
||||
data = FIELD_DP32(data, VDATA, NF, 1);
|
||||
/* Mask destination register are always tail-agnostic */
|
||||
data = FIELD_DP32(data, VDATA, VTA, s->cfg_vta_all_1s);
|
||||
return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
|
||||
}
|
||||
|
||||
|
@ -862,6 +865,7 @@ static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
|
|||
data = FIELD_DP32(data, VDATA, VM, a->vm);
|
||||
data = FIELD_DP32(data, VDATA, LMUL, emul);
|
||||
data = FIELD_DP32(data, VDATA, NF, a->nf);
|
||||
data = FIELD_DP32(data, VDATA, VTA, s->vta);
|
||||
return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
|
||||
}
|
||||
|
||||
|
@ -991,6 +995,7 @@ static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
|
|||
data = FIELD_DP32(data, VDATA, VM, a->vm);
|
||||
data = FIELD_DP32(data, VDATA, LMUL, emul);
|
||||
data = FIELD_DP32(data, VDATA, NF, a->nf);
|
||||
data = FIELD_DP32(data, VDATA, VTA, s->vta);
|
||||
return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
|
||||
}
|
||||
|
||||
|
@ -1108,6 +1113,7 @@ static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
|
|||
data = FIELD_DP32(data, VDATA, VM, a->vm);
|
||||
data = FIELD_DP32(data, VDATA, LMUL, emul);
|
||||
data = FIELD_DP32(data, VDATA, NF, a->nf);
|
||||
data = FIELD_DP32(data, VDATA, VTA, s->vta);
|
||||
return ldff_trans(a->rd, a->rs1, data, fn, s);
|
||||
}
|
||||
|
||||
|
|
|
@ -95,6 +95,7 @@ typedef struct DisasContext {
|
|||
int8_t lmul;
|
||||
uint8_t sew;
|
||||
uint8_t vta;
|
||||
bool cfg_vta_all_1s;
|
||||
target_ulong vstart;
|
||||
bool vl_eq_vlmax;
|
||||
uint8_t ntemp;
|
||||
|
@ -1101,6 +1102,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
|||
ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW);
|
||||
ctx->lmul = sextract32(FIELD_EX32(tb_flags, TB_FLAGS, LMUL), 0, 3);
|
||||
ctx->vta = FIELD_EX32(tb_flags, TB_FLAGS, VTA) && cpu->cfg.rvv_ta_all_1s;
|
||||
ctx->cfg_vta_all_1s = cpu->cfg.rvv_ta_all_1s;
|
||||
ctx->vstart = env->vstart;
|
||||
ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
|
||||
ctx->misa_mxl_max = env->misa_mxl_max;
|
||||
|
|
|
@ -270,6 +270,9 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
|
|||
uint32_t i, k;
|
||||
uint32_t nf = vext_nf(desc);
|
||||
uint32_t max_elems = vext_max_elems(desc, log2_esz);
|
||||
uint32_t esz = 1 << log2_esz;
|
||||
uint32_t total_elems = vext_get_total_elems(env, desc, esz);
|
||||
uint32_t vta = vext_vta(desc);
|
||||
|
||||
for (i = env->vstart; i < env->vl; i++, env->vstart++) {
|
||||
if (!vm && !vext_elem_mask(v0, i)) {
|
||||
|
@ -284,6 +287,18 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
|
|||
}
|
||||
}
|
||||
env->vstart = 0;
|
||||
/* set tail elements to 1s */
|
||||
for (k = 0; k < nf; ++k) {
|
||||
vext_set_elems_1s(vd, vta, (k * max_elems + env->vl) * esz,
|
||||
(k * max_elems + max_elems) * esz);
|
||||
}
|
||||
if (nf * max_elems % total_elems != 0) {
|
||||
uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3;
|
||||
uint32_t registers_used =
|
||||
((nf * max_elems) * esz + (vlenb - 1)) / vlenb;
|
||||
vext_set_elems_1s(vd, vta, (nf * max_elems) * esz,
|
||||
registers_used * vlenb);
|
||||
}
|
||||
}
|
||||
|
||||
#define GEN_VEXT_LD_STRIDE(NAME, ETYPE, LOAD_FN) \
|
||||
|
@ -329,6 +344,9 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
|
|||
uint32_t i, k;
|
||||
uint32_t nf = vext_nf(desc);
|
||||
uint32_t max_elems = vext_max_elems(desc, log2_esz);
|
||||
uint32_t esz = 1 << log2_esz;
|
||||
uint32_t total_elems = vext_get_total_elems(env, desc, esz);
|
||||
uint32_t vta = vext_vta(desc);
|
||||
|
||||
/* load bytes from guest memory */
|
||||
for (i = env->vstart; i < evl; i++, env->vstart++) {
|
||||
|
@ -340,6 +358,18 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
|
|||
}
|
||||
}
|
||||
env->vstart = 0;
|
||||
/* set tail elements to 1s */
|
||||
for (k = 0; k < nf; ++k) {
|
||||
vext_set_elems_1s(vd, vta, (k * max_elems + evl) * esz,
|
||||
(k * max_elems + max_elems) * esz);
|
||||
}
|
||||
if (nf * max_elems % total_elems != 0) {
|
||||
uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3;
|
||||
uint32_t registers_used =
|
||||
((nf * max_elems) * esz + (vlenb - 1)) / vlenb;
|
||||
vext_set_elems_1s(vd, vta, (nf * max_elems) * esz,
|
||||
registers_used * vlenb);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -439,6 +469,9 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
|
|||
uint32_t nf = vext_nf(desc);
|
||||
uint32_t vm = vext_vm(desc);
|
||||
uint32_t max_elems = vext_max_elems(desc, log2_esz);
|
||||
uint32_t esz = 1 << log2_esz;
|
||||
uint32_t total_elems = vext_get_total_elems(env, desc, esz);
|
||||
uint32_t vta = vext_vta(desc);
|
||||
|
||||
/* load bytes from guest memory */
|
||||
for (i = env->vstart; i < env->vl; i++, env->vstart++) {
|
||||
|
@ -454,6 +487,18 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
|
|||
}
|
||||
}
|
||||
env->vstart = 0;
|
||||
/* set tail elements to 1s */
|
||||
for (k = 0; k < nf; ++k) {
|
||||
vext_set_elems_1s(vd, vta, (k * max_elems + env->vl) * esz,
|
||||
(k * max_elems + max_elems) * esz);
|
||||
}
|
||||
if (nf * max_elems % total_elems != 0) {
|
||||
uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3;
|
||||
uint32_t registers_used =
|
||||
((nf * max_elems) * esz + (vlenb - 1)) / vlenb;
|
||||
vext_set_elems_1s(vd, vta, (nf * max_elems) * esz,
|
||||
registers_used * vlenb);
|
||||
}
|
||||
}
|
||||
|
||||
#define GEN_VEXT_LD_INDEX(NAME, ETYPE, INDEX_FN, LOAD_FN) \
|
||||
|
@ -521,6 +566,9 @@ vext_ldff(void *vd, void *v0, target_ulong base,
|
|||
uint32_t nf = vext_nf(desc);
|
||||
uint32_t vm = vext_vm(desc);
|
||||
uint32_t max_elems = vext_max_elems(desc, log2_esz);
|
||||
uint32_t esz = 1 << log2_esz;
|
||||
uint32_t total_elems = vext_get_total_elems(env, desc, esz);
|
||||
uint32_t vta = vext_vta(desc);
|
||||
target_ulong addr, offset, remain;
|
||||
|
||||
/* probe every access*/
|
||||
|
@ -576,6 +624,18 @@ ProbeSuccess:
|
|||
}
|
||||
}
|
||||
env->vstart = 0;
|
||||
/* set tail elements to 1s */
|
||||
for (k = 0; k < nf; ++k) {
|
||||
vext_set_elems_1s(vd, vta, (k * max_elems + env->vl) * esz,
|
||||
(k * max_elems + max_elems) * esz);
|
||||
}
|
||||
if (nf * max_elems % total_elems != 0) {
|
||||
uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3;
|
||||
uint32_t registers_used =
|
||||
((nf * max_elems) * esz + (vlenb - 1)) / vlenb;
|
||||
vext_set_elems_1s(vd, vta, (nf * max_elems) * esz,
|
||||
registers_used * vlenb);
|
||||
}
|
||||
}
|
||||
|
||||
#define GEN_VEXT_LDFF(NAME, ETYPE, LOAD_FN) \
|
||||
|
|
Loading…
Reference in New Issue