2020-07-01 17:24:52 +02:00
|
|
|
/*
|
|
|
|
* RISC-V Vector Extension Helpers for QEMU.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms and conditions of the GNU General Public License,
|
|
|
|
* version 2 or later, as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along with
|
|
|
|
* this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
2021-12-10 08:56:11 +01:00
|
|
|
#include "qemu/host-utils.h"
|
2021-12-10 08:56:55 +01:00
|
|
|
#include "qemu/bitops.h"
|
2020-07-01 17:24:52 +02:00
|
|
|
#include "cpu.h"
|
2020-07-01 17:24:54 +02:00
|
|
|
#include "exec/memop.h"
|
2020-07-01 17:24:52 +02:00
|
|
|
#include "exec/exec-all.h"
|
2023-08-28 14:53:30 +02:00
|
|
|
#include "exec/cpu_ldst.h"
|
2020-07-01 17:24:52 +02:00
|
|
|
#include "exec/helper-proto.h"
|
2020-07-01 17:25:18 +02:00
|
|
|
#include "fpu/softfloat.h"
|
2020-07-01 17:24:54 +02:00
|
|
|
#include "tcg/tcg-gvec-desc.h"
|
|
|
|
#include "internals.h"
|
2023-07-11 18:59:00 +02:00
|
|
|
#include "vector_internals.h"
|
2020-07-01 17:24:52 +02:00
|
|
|
#include <math.h>
|
|
|
|
|
|
|
|
target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1,
|
|
|
|
target_ulong s2)
|
|
|
|
{
|
|
|
|
int vlmax, vl;
|
|
|
|
RISCVCPU *cpu = env_archcpu(env);
|
2021-12-10 08:56:05 +01:00
|
|
|
uint64_t lmul = FIELD_EX64(s2, VTYPE, VLMUL);
|
2020-07-01 17:24:52 +02:00
|
|
|
uint16_t sew = 8 << FIELD_EX64(s2, VTYPE, VSEW);
|
|
|
|
uint8_t ediv = FIELD_EX64(s2, VTYPE, VEDIV);
|
2022-01-20 13:20:43 +01:00
|
|
|
int xlen = riscv_cpu_xlen(env);
|
|
|
|
bool vill = (s2 >> (xlen - 1)) & 0x1;
|
|
|
|
target_ulong reserved = s2 &
|
|
|
|
MAKE_64BIT_MASK(R_VTYPE_RESERVED_SHIFT,
|
|
|
|
xlen - 1 - R_VTYPE_RESERVED_SHIFT);
|
2020-07-01 17:24:52 +02:00
|
|
|
|
2021-12-10 08:56:05 +01:00
|
|
|
if (lmul & 4) {
|
target/riscv: Fix LMUL check to use VLEN
The previous check was failing with:
VLEN=128 ELEN = 64 SEW = 16 and LMUL = 1/8 which is a
valid combination.
Fix the check to allow valid combinations when VLEN is a multiple of
ELEN.
From the specification:
"In general, the requirement is to support LMUL ≥ SEWMIN/ELEN, where
SEWMIN is the narrowest supported SEW value and ELEN is the widest
supported SEW value. In the standard extensions, SEWMIN=8. For standard
vector extensions with ELEN=32, fractional LMULs of 1/2 and 1/4 must be
supported. For standard vector extensions with ELEN=64, fractional LMULs
of 1/2, 1/4, and 1/8 must be supported." Elsewhere in the specification
it makes clear that VLEN>=ELEN.
From inspection this new check allows:
VLEN=ELEN=64 1/2, 1/4, 1/8 for SEW >=8
VLEN=ELEN=32 1/2, 1/4 for SEW >=8
Fixes: d9b7609a1fb2 ("target/riscv: rvv-1.0: configure instructions")
Signed-off-by: Rob Bradford <rbradford@rivosinc.com>
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
Message-Id: <20230718131316.12283-2-rbradford@rivosinc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2023-07-18 15:11:44 +02:00
|
|
|
/* Fractional LMUL - check LMUL * VLEN >= SEW */
|
2021-12-10 08:56:05 +01:00
|
|
|
if (lmul == 4 ||
|
target/riscv: Fix LMUL check to use VLEN
The previous check was failing with:
VLEN=128 ELEN = 64 SEW = 16 and LMUL = 1/8 which is a
valid combination.
Fix the check to allow valid combinations when VLEN is a multiple of
ELEN.
From the specification:
"In general, the requirement is to support LMUL ≥ SEWMIN/ELEN, where
SEWMIN is the narrowest supported SEW value and ELEN is the widest
supported SEW value. In the standard extensions, SEWMIN=8. For standard
vector extensions with ELEN=32, fractional LMULs of 1/2 and 1/4 must be
supported. For standard vector extensions with ELEN=64, fractional LMULs
of 1/2, 1/4, and 1/8 must be supported." Elsewhere in the specification
it makes clear that VLEN>=ELEN.
From inspection this new check allows:
VLEN=ELEN=64 1/2, 1/4, 1/8 for SEW >=8
VLEN=ELEN=32 1/2, 1/4 for SEW >=8
Fixes: d9b7609a1fb2 ("target/riscv: rvv-1.0: configure instructions")
Signed-off-by: Rob Bradford <rbradford@rivosinc.com>
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
Message-Id: <20230718131316.12283-2-rbradford@rivosinc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2023-07-18 15:11:44 +02:00
|
|
|
cpu->cfg.vlen >> (8 - lmul) < sew) {
|
2021-12-10 08:56:05 +01:00
|
|
|
vill = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-05 10:58:11 +02:00
|
|
|
if ((sew > cpu->cfg.elen) || vill || (ediv != 0) || (reserved != 0)) {
|
2020-07-01 17:24:52 +02:00
|
|
|
/* only set vill bit. */
|
2022-01-20 13:20:42 +01:00
|
|
|
env->vill = 1;
|
|
|
|
env->vtype = 0;
|
2020-07-01 17:24:52 +02:00
|
|
|
env->vl = 0;
|
|
|
|
env->vstart = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
vlmax = vext_get_vlmax(cpu, s2);
|
|
|
|
if (s1 <= vlmax) {
|
|
|
|
vl = s1;
|
|
|
|
} else {
|
|
|
|
vl = vlmax;
|
|
|
|
}
|
|
|
|
env->vl = vl;
|
|
|
|
env->vtype = s2;
|
|
|
|
env->vstart = 0;
|
2022-02-01 07:46:01 +01:00
|
|
|
env->vill = 0;
|
2020-07-01 17:24:52 +02:00
|
|
|
return vl;
|
|
|
|
}
|
2020-07-01 17:24:54 +02:00
|
|
|
|
|
|
|
/*
|
2021-12-10 08:56:11 +01:00
|
|
|
* Get the maximum number of elements can be operated.
|
2020-07-01 17:24:54 +02:00
|
|
|
*
|
2022-06-06 08:16:16 +02:00
|
|
|
* log2_esz: log2 of element size in bytes.
|
2020-07-01 17:24:54 +02:00
|
|
|
*/
|
2022-06-06 08:16:16 +02:00
|
|
|
static inline uint32_t vext_max_elems(uint32_t desc, uint32_t log2_esz)
|
2020-07-01 17:24:54 +02:00
|
|
|
{
|
2021-12-10 08:56:11 +01:00
|
|
|
/*
|
2021-12-10 08:56:51 +01:00
|
|
|
* As simd_desc support at most 2048 bytes, the max vlen is 1024 bits.
|
2021-12-10 08:56:11 +01:00
|
|
|
* so vlen in bytes (vlenb) is encoded as maxsz.
|
|
|
|
*/
|
|
|
|
uint32_t vlenb = simd_maxsz(desc);
|
|
|
|
|
|
|
|
/* Return VLMAX */
|
2022-06-06 08:16:16 +02:00
|
|
|
int scale = vext_lmul(desc) - log2_esz;
|
2021-12-10 08:56:11 +01:00
|
|
|
return scale < 0 ? vlenb >> -scale : vlenb << scale;
|
2020-07-01 17:24:54 +02:00
|
|
|
}
|
|
|
|
|
2022-01-20 13:20:46 +01:00
|
|
|
static inline target_ulong adjust_addr(CPURISCVState *env, target_ulong addr)
|
|
|
|
{
|
2023-05-24 03:59:32 +02:00
|
|
|
return (addr & ~env->cur_pmmask) | env->cur_pmbase;
|
2022-01-20 13:20:46 +01:00
|
|
|
}
|
|
|
|
|
2020-07-01 17:24:54 +02:00
|
|
|
/*
|
|
|
|
* This function checks watchpoint before real load operation.
|
|
|
|
*
|
2023-10-04 11:06:20 +02:00
|
|
|
* In system mode, the TLB API probe_access is enough for watchpoint check.
|
2020-07-01 17:24:54 +02:00
|
|
|
* In user mode, there is no watchpoint support now.
|
|
|
|
*
|
|
|
|
* It will trigger an exception if there is no mapping in TLB
|
|
|
|
* and page table walk can't fill the TLB entry. Then the guest
|
|
|
|
* software can return here after process the exception or never return.
|
|
|
|
*/
|
|
|
|
static void probe_pages(CPURISCVState *env, target_ulong addr,
|
|
|
|
target_ulong len, uintptr_t ra,
|
|
|
|
MMUAccessType access_type)
|
|
|
|
{
|
|
|
|
target_ulong pagelen = -(addr | TARGET_PAGE_MASK);
|
|
|
|
target_ulong curlen = MIN(pagelen, len);
|
|
|
|
|
2022-01-20 13:20:46 +01:00
|
|
|
probe_access(env, adjust_addr(env, addr), curlen, access_type,
|
2020-07-01 17:24:54 +02:00
|
|
|
cpu_mmu_index(env, false), ra);
|
|
|
|
if (len > curlen) {
|
|
|
|
addr += curlen;
|
|
|
|
curlen = len - curlen;
|
2022-01-20 13:20:46 +01:00
|
|
|
probe_access(env, adjust_addr(env, addr), curlen, access_type,
|
2020-07-01 17:24:54 +02:00
|
|
|
cpu_mmu_index(env, false), ra);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-10 08:55:58 +01:00
|
|
|
static inline void vext_set_elem_mask(void *v0, int index,
|
|
|
|
uint8_t value)
|
2020-07-01 17:25:00 +02:00
|
|
|
{
|
2021-12-10 08:55:58 +01:00
|
|
|
int idx = index / 64;
|
|
|
|
int pos = index % 64;
|
2020-07-01 17:25:00 +02:00
|
|
|
uint64_t old = ((uint64_t *)v0)[idx];
|
2021-12-10 08:55:58 +01:00
|
|
|
((uint64_t *)v0)[idx] = deposit64(old, pos, 1, value);
|
2020-07-01 17:25:00 +02:00
|
|
|
}
|
2020-07-01 17:24:54 +02:00
|
|
|
|
|
|
|
/* elements operations for load and store */
|
2023-08-07 17:57:02 +02:00
|
|
|
typedef void vext_ldst_elem_fn(CPURISCVState *env, abi_ptr addr,
|
2020-07-01 17:24:54 +02:00
|
|
|
uint32_t idx, void *vd, uintptr_t retaddr);
|
|
|
|
|
2021-12-10 08:56:06 +01:00
|
|
|
#define GEN_VEXT_LD_ELEM(NAME, ETYPE, H, LDSUF) \
|
2020-07-01 17:24:54 +02:00
|
|
|
static void NAME(CPURISCVState *env, abi_ptr addr, \
|
|
|
|
uint32_t idx, void *vd, uintptr_t retaddr)\
|
|
|
|
{ \
|
|
|
|
ETYPE *cur = ((ETYPE *)vd + H(idx)); \
|
2021-12-10 08:56:06 +01:00
|
|
|
*cur = cpu_##LDSUF##_data_ra(env, addr, retaddr); \
|
2020-07-01 17:24:54 +02:00
|
|
|
} \
|
|
|
|
|
2021-12-10 08:56:06 +01:00
|
|
|
GEN_VEXT_LD_ELEM(lde_b, int8_t, H1, ldsb)
|
|
|
|
GEN_VEXT_LD_ELEM(lde_h, int16_t, H2, ldsw)
|
|
|
|
GEN_VEXT_LD_ELEM(lde_w, int32_t, H4, ldl)
|
|
|
|
GEN_VEXT_LD_ELEM(lde_d, int64_t, H8, ldq)
|
2020-07-01 17:24:54 +02:00
|
|
|
|
|
|
|
#define GEN_VEXT_ST_ELEM(NAME, ETYPE, H, STSUF) \
|
|
|
|
static void NAME(CPURISCVState *env, abi_ptr addr, \
|
|
|
|
uint32_t idx, void *vd, uintptr_t retaddr)\
|
|
|
|
{ \
|
|
|
|
ETYPE data = *((ETYPE *)vd + H(idx)); \
|
|
|
|
cpu_##STSUF##_data_ra(env, addr, data, retaddr); \
|
|
|
|
}
|
|
|
|
|
|
|
|
GEN_VEXT_ST_ELEM(ste_b, int8_t, H1, stb)
|
|
|
|
GEN_VEXT_ST_ELEM(ste_h, int16_t, H2, stw)
|
|
|
|
GEN_VEXT_ST_ELEM(ste_w, int32_t, H4, stl)
|
|
|
|
GEN_VEXT_ST_ELEM(ste_d, int64_t, H8, stq)
|
|
|
|
|
target/riscv/vector_helper.c: Remove the check for extra tail elements
Commit 752614cab8e6 ("target/riscv: rvv: Add tail agnostic for vector
load / store instructions") added an extra check for LMUL fragmentation,
intended for setting the "rest tail elements" in the last register for a
segment load insn.
Actually, the max_elements derived in vext_ld*() won't be a fraction of
vector register size, since the lmul encoded in desc is emul, which has
already been adjusted to 1 for LMUL fragmentation case by vext_get_emul()
in trans_rvv.c.inc, for ld_stride(), ld_us(), ld_index() and ldff().
Besides, vext_get_emul() has also taken EEW/SEW into consideration, so no
need to call vext_get_total_elems() which would base on the emul to derive
another emul, the second emul would be incorrect when esz differs from sew.
Thus this patch removes the check for extra tail elements.
Fixes: 752614cab8e6 ("target/riscv: rvv: Add tail agnostic for vector load / store instructions")
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
Message-Id: <20230607091646.4049428-1-xiao.w.wang@intel.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2023-06-07 11:16:46 +02:00
|
|
|
static void vext_set_tail_elems_1s(target_ulong vl, void *vd,
|
|
|
|
uint32_t desc, uint32_t nf,
|
2023-02-26 18:05:13 +01:00
|
|
|
uint32_t esz, uint32_t max_elems)
|
|
|
|
{
|
|
|
|
uint32_t vta = vext_vta(desc);
|
|
|
|
int k;
|
|
|
|
|
2023-04-27 22:57:07 +02:00
|
|
|
if (vta == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-02-26 18:05:13 +01:00
|
|
|
for (k = 0; k < nf; ++k) {
|
|
|
|
vext_set_elems_1s(vd, vta, (k * max_elems + vl) * esz,
|
|
|
|
(k * max_elems + max_elems) * esz);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-01 17:24:54 +02:00
|
|
|
/*
|
2023-04-05 10:58:12 +02:00
|
|
|
* stride: access vector element from strided memory
|
2020-07-01 17:24:54 +02:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
vext_ldst_stride(void *vd, void *v0, target_ulong base,
|
|
|
|
target_ulong stride, CPURISCVState *env,
|
|
|
|
uint32_t desc, uint32_t vm,
|
2021-12-10 08:56:00 +01:00
|
|
|
vext_ldst_elem_fn *ldst_elem,
|
2022-06-06 08:16:16 +02:00
|
|
|
uint32_t log2_esz, uintptr_t ra)
|
2020-07-01 17:24:54 +02:00
|
|
|
{
|
|
|
|
uint32_t i, k;
|
|
|
|
uint32_t nf = vext_nf(desc);
|
2022-06-06 08:16:16 +02:00
|
|
|
uint32_t max_elems = vext_max_elems(desc, log2_esz);
|
2022-06-06 08:16:16 +02:00
|
|
|
uint32_t esz = 1 << log2_esz;
|
2022-06-20 08:51:02 +02:00
|
|
|
uint32_t vma = vext_vma(desc);
|
2020-07-01 17:24:54 +02:00
|
|
|
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < env->vl; i++, env->vstart++) {
|
2020-07-01 17:24:54 +02:00
|
|
|
k = 0;
|
|
|
|
while (k < nf) {
|
2022-06-20 08:51:02 +02:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) {
|
|
|
|
/* set masked-off elements to 1s */
|
|
|
|
vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz,
|
|
|
|
(i + k * max_elems + 1) * esz);
|
|
|
|
k++;
|
|
|
|
continue;
|
|
|
|
}
|
2022-06-06 08:16:16 +02:00
|
|
|
target_ulong addr = base + stride * i + (k << log2_esz);
|
2022-01-20 13:20:46 +01:00
|
|
|
ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
|
2020-07-01 17:24:54 +02:00
|
|
|
k++;
|
|
|
|
}
|
|
|
|
}
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0;
|
2023-02-26 18:05:13 +01:00
|
|
|
|
target/riscv/vector_helper.c: Remove the check for extra tail elements
Commit 752614cab8e6 ("target/riscv: rvv: Add tail agnostic for vector
load / store instructions") added an extra check for LMUL fragmentation,
intended for setting the "rest tail elements" in the last register for a
segment load insn.
Actually, the max_elements derived in vext_ld*() won't be a fraction of
vector register size, since the lmul encoded in desc is emul, which has
already been adjusted to 1 for LMUL fragmentation case by vext_get_emul()
in trans_rvv.c.inc, for ld_stride(), ld_us(), ld_index() and ldff().
Besides, vext_get_emul() has also taken EEW/SEW into consideration, so no
need to call vext_get_total_elems() which would base on the emul to derive
another emul, the second emul would be incorrect when esz differs from sew.
Thus this patch removes the check for extra tail elements.
Fixes: 752614cab8e6 ("target/riscv: rvv: Add tail agnostic for vector load / store instructions")
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
Message-Id: <20230607091646.4049428-1-xiao.w.wang@intel.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2023-06-07 11:16:46 +02:00
|
|
|
vext_set_tail_elems_1s(env->vl, vd, desc, nf, esz, max_elems);
|
2020-07-01 17:24:54 +02:00
|
|
|
}
|
|
|
|
|
2021-12-10 08:56:06 +01:00
|
|
|
#define GEN_VEXT_LD_STRIDE(NAME, ETYPE, LOAD_FN) \
|
2020-07-01 17:24:54 +02:00
|
|
|
void HELPER(NAME)(void *vd, void * v0, target_ulong base, \
|
|
|
|
target_ulong stride, CPURISCVState *env, \
|
|
|
|
uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t vm = vext_vm(desc); \
|
|
|
|
vext_ldst_stride(vd, v0, base, stride, env, desc, vm, LOAD_FN, \
|
2022-06-06 08:16:16 +02:00
|
|
|
ctzl(sizeof(ETYPE)), GETPC()); \
|
2020-07-01 17:24:54 +02:00
|
|
|
}
|
|
|
|
|
2021-12-10 08:56:06 +01:00
|
|
|
GEN_VEXT_LD_STRIDE(vlse8_v, int8_t, lde_b)
|
|
|
|
GEN_VEXT_LD_STRIDE(vlse16_v, int16_t, lde_h)
|
|
|
|
GEN_VEXT_LD_STRIDE(vlse32_v, int32_t, lde_w)
|
|
|
|
GEN_VEXT_LD_STRIDE(vlse64_v, int64_t, lde_d)
|
|
|
|
|
|
|
|
#define GEN_VEXT_ST_STRIDE(NAME, ETYPE, STORE_FN) \
|
2020-07-01 17:24:54 +02:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
|
|
|
|
target_ulong stride, CPURISCVState *env, \
|
|
|
|
uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t vm = vext_vm(desc); \
|
|
|
|
vext_ldst_stride(vd, v0, base, stride, env, desc, vm, STORE_FN, \
|
2022-06-06 08:16:16 +02:00
|
|
|
ctzl(sizeof(ETYPE)), GETPC()); \
|
2020-07-01 17:24:54 +02:00
|
|
|
}
|
|
|
|
|
2021-12-10 08:56:06 +01:00
|
|
|
GEN_VEXT_ST_STRIDE(vsse8_v, int8_t, ste_b)
|
|
|
|
GEN_VEXT_ST_STRIDE(vsse16_v, int16_t, ste_h)
|
|
|
|
GEN_VEXT_ST_STRIDE(vsse32_v, int32_t, ste_w)
|
|
|
|
GEN_VEXT_ST_STRIDE(vsse64_v, int64_t, ste_d)
|
2020-07-01 17:24:54 +02:00
|
|
|
|
|
|
|
/*
|
2023-04-05 10:58:12 +02:00
|
|
|
* unit-stride: access elements stored contiguously in memory
|
2020-07-01 17:24:54 +02:00
|
|
|
*/
|
|
|
|
|
2023-04-05 10:58:12 +02:00
|
|
|
/* unmasked unit-stride load and store operation */
|
2020-07-01 17:24:54 +02:00
|
|
|
static void
|
|
|
|
vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
|
2022-06-06 08:16:16 +02:00
|
|
|
vext_ldst_elem_fn *ldst_elem, uint32_t log2_esz, uint32_t evl,
|
2022-06-06 08:16:16 +02:00
|
|
|
uintptr_t ra)
|
2020-07-01 17:24:54 +02:00
|
|
|
{
|
|
|
|
uint32_t i, k;
|
|
|
|
uint32_t nf = vext_nf(desc);
|
2022-06-06 08:16:16 +02:00
|
|
|
uint32_t max_elems = vext_max_elems(desc, log2_esz);
|
2022-06-06 08:16:16 +02:00
|
|
|
uint32_t esz = 1 << log2_esz;
|
2020-07-01 17:24:54 +02:00
|
|
|
|
|
|
|
/* load bytes from guest memory */
|
2021-12-10 08:56:59 +01:00
|
|
|
for (i = env->vstart; i < evl; i++, env->vstart++) {
|
2020-07-01 17:24:54 +02:00
|
|
|
k = 0;
|
|
|
|
while (k < nf) {
|
2022-06-06 08:16:16 +02:00
|
|
|
target_ulong addr = base + ((i * nf + k) << log2_esz);
|
2022-01-20 13:20:46 +01:00
|
|
|
ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
|
2020-07-01 17:24:54 +02:00
|
|
|
k++;
|
|
|
|
}
|
|
|
|
}
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0;
|
2023-02-26 18:05:13 +01:00
|
|
|
|
target/riscv/vector_helper.c: Remove the check for extra tail elements
Commit 752614cab8e6 ("target/riscv: rvv: Add tail agnostic for vector
load / store instructions") added an extra check for LMUL fragmentation,
intended for setting the "rest tail elements" in the last register for a
segment load insn.
Actually, the max_elements derived in vext_ld*() won't be a fraction of
vector register size, since the lmul encoded in desc is emul, which has
already been adjusted to 1 for LMUL fragmentation case by vext_get_emul()
in trans_rvv.c.inc, for ld_stride(), ld_us(), ld_index() and ldff().
Besides, vext_get_emul() has also taken EEW/SEW into consideration, so no
need to call vext_get_total_elems() which would base on the emul to derive
another emul, the second emul would be incorrect when esz differs from sew.
Thus this patch removes the check for extra tail elements.
Fixes: 752614cab8e6 ("target/riscv: rvv: Add tail agnostic for vector load / store instructions")
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
Message-Id: <20230607091646.4049428-1-xiao.w.wang@intel.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2023-06-07 11:16:46 +02:00
|
|
|
vext_set_tail_elems_1s(evl, vd, desc, nf, esz, max_elems);
|
2020-07-01 17:24:54 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2023-04-05 10:58:13 +02:00
|
|
|
* masked unit-stride load and store operation will be a special case of
|
2023-06-08 07:35:17 +02:00
|
|
|
* stride, stride = NF * sizeof (ETYPE)
|
2020-07-01 17:24:54 +02:00
|
|
|
*/
|
|
|
|
|
2021-12-10 08:56:06 +01:00
|
|
|
#define GEN_VEXT_LD_US(NAME, ETYPE, LOAD_FN) \
|
2020-07-01 17:24:54 +02:00
|
|
|
void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
2021-12-10 08:56:11 +01:00
|
|
|
uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \
|
2020-07-01 17:24:54 +02:00
|
|
|
vext_ldst_stride(vd, v0, base, stride, env, desc, false, LOAD_FN, \
|
2022-06-06 08:16:16 +02:00
|
|
|
ctzl(sizeof(ETYPE)), GETPC()); \
|
2020-07-01 17:24:54 +02:00
|
|
|
} \
|
|
|
|
\
|
|
|
|
void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
2021-12-10 08:56:00 +01:00
|
|
|
vext_ldst_us(vd, base, env, desc, LOAD_FN, \
|
2022-06-06 08:16:16 +02:00
|
|
|
ctzl(sizeof(ETYPE)), env->vl, GETPC()); \
|
2020-07-01 17:24:54 +02:00
|
|
|
}
|
|
|
|
|
2021-12-10 08:56:06 +01:00
|
|
|
GEN_VEXT_LD_US(vle8_v, int8_t, lde_b)
|
|
|
|
GEN_VEXT_LD_US(vle16_v, int16_t, lde_h)
|
|
|
|
GEN_VEXT_LD_US(vle32_v, int32_t, lde_w)
|
|
|
|
GEN_VEXT_LD_US(vle64_v, int64_t, lde_d)
|
|
|
|
|
2021-12-10 08:56:59 +01:00
|
|
|
#define GEN_VEXT_ST_US(NAME, ETYPE, STORE_FN) \
|
|
|
|
void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \
|
|
|
|
vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN, \
|
2022-06-06 08:16:16 +02:00
|
|
|
ctzl(sizeof(ETYPE)), GETPC()); \
|
2021-12-10 08:56:59 +01:00
|
|
|
} \
|
|
|
|
\
|
|
|
|
void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
vext_ldst_us(vd, base, env, desc, STORE_FN, \
|
2022-06-06 08:16:16 +02:00
|
|
|
ctzl(sizeof(ETYPE)), env->vl, GETPC()); \
|
2020-07-01 17:24:54 +02:00
|
|
|
}
|
|
|
|
|
2021-12-10 08:56:06 +01:00
|
|
|
GEN_VEXT_ST_US(vse8_v, int8_t, ste_b)
|
|
|
|
GEN_VEXT_ST_US(vse16_v, int16_t, ste_h)
|
|
|
|
GEN_VEXT_ST_US(vse32_v, int32_t, ste_w)
|
|
|
|
GEN_VEXT_ST_US(vse64_v, int64_t, ste_d)
|
2020-07-01 17:24:55 +02:00
|
|
|
|
2021-12-10 08:57:00 +01:00
|
|
|
/*
|
2023-04-05 10:58:12 +02:00
|
|
|
* unit stride mask load and store, EEW = 1
|
2021-12-10 08:57:00 +01:00
|
|
|
*/
|
|
|
|
void HELPER(vlm_v)(void *vd, void *v0, target_ulong base,
|
|
|
|
CPURISCVState *env, uint32_t desc)
|
|
|
|
{
|
|
|
|
/* evl = ceil(vl/8) */
|
|
|
|
uint8_t evl = (env->vl + 7) >> 3;
|
|
|
|
vext_ldst_us(vd, base, env, desc, lde_b,
|
2022-06-06 08:16:16 +02:00
|
|
|
0, evl, GETPC());
|
2021-12-10 08:57:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void HELPER(vsm_v)(void *vd, void *v0, target_ulong base,
|
|
|
|
CPURISCVState *env, uint32_t desc)
|
|
|
|
{
|
|
|
|
/* evl = ceil(vl/8) */
|
|
|
|
uint8_t evl = (env->vl + 7) >> 3;
|
|
|
|
vext_ldst_us(vd, base, env, desc, ste_b,
|
2022-06-06 08:16:16 +02:00
|
|
|
0, evl, GETPC());
|
2021-12-10 08:57:00 +01:00
|
|
|
}
|
|
|
|
|
2020-07-01 17:24:55 +02:00
|
|
|
/*
|
2023-04-05 10:58:12 +02:00
|
|
|
* index: access vector element from indexed memory
|
2020-07-01 17:24:55 +02:00
|
|
|
*/
|
|
|
|
typedef target_ulong vext_get_index_addr(target_ulong base,
|
|
|
|
uint32_t idx, void *vs2);
|
|
|
|
|
|
|
|
#define GEN_VEXT_GET_INDEX_ADDR(NAME, ETYPE, H) \
|
|
|
|
static target_ulong NAME(target_ulong base, \
|
|
|
|
uint32_t idx, void *vs2) \
|
|
|
|
{ \
|
|
|
|
return (base + *((ETYPE *)vs2 + H(idx))); \
|
|
|
|
}
|
|
|
|
|
2021-12-10 08:56:08 +01:00
|
|
|
GEN_VEXT_GET_INDEX_ADDR(idx_b, uint8_t, H1)
|
|
|
|
GEN_VEXT_GET_INDEX_ADDR(idx_h, uint16_t, H2)
|
|
|
|
GEN_VEXT_GET_INDEX_ADDR(idx_w, uint32_t, H4)
|
|
|
|
GEN_VEXT_GET_INDEX_ADDR(idx_d, uint64_t, H8)
|
2020-07-01 17:24:55 +02:00
|
|
|
|
|
|
|
static inline void
|
|
|
|
vext_ldst_index(void *vd, void *v0, target_ulong base,
|
|
|
|
void *vs2, CPURISCVState *env, uint32_t desc,
|
|
|
|
vext_get_index_addr get_index_addr,
|
|
|
|
vext_ldst_elem_fn *ldst_elem,
|
2022-06-06 08:16:16 +02:00
|
|
|
uint32_t log2_esz, uintptr_t ra)
|
2020-07-01 17:24:55 +02:00
|
|
|
{
|
|
|
|
uint32_t i, k;
|
|
|
|
uint32_t nf = vext_nf(desc);
|
|
|
|
uint32_t vm = vext_vm(desc);
|
2022-06-06 08:16:16 +02:00
|
|
|
uint32_t max_elems = vext_max_elems(desc, log2_esz);
|
2022-06-06 08:16:16 +02:00
|
|
|
uint32_t esz = 1 << log2_esz;
|
2022-06-20 08:51:02 +02:00
|
|
|
uint32_t vma = vext_vma(desc);
|
2020-07-01 17:24:55 +02:00
|
|
|
|
|
|
|
/* load bytes from guest memory */
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < env->vl; i++, env->vstart++) {
|
|
|
|
k = 0;
|
2020-07-01 17:24:55 +02:00
|
|
|
while (k < nf) {
|
2022-06-20 08:51:02 +02:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) {
|
|
|
|
/* set masked-off elements to 1s */
|
|
|
|
vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz,
|
|
|
|
(i + k * max_elems + 1) * esz);
|
|
|
|
k++;
|
|
|
|
continue;
|
|
|
|
}
|
2022-06-06 08:16:16 +02:00
|
|
|
abi_ptr addr = get_index_addr(base, i, vs2) + (k << log2_esz);
|
2022-01-20 13:20:46 +01:00
|
|
|
ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
|
2020-07-01 17:24:55 +02:00
|
|
|
k++;
|
|
|
|
}
|
|
|
|
}
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0;
|
2023-02-26 18:05:13 +01:00
|
|
|
|
target/riscv/vector_helper.c: Remove the check for extra tail elements
Commit 752614cab8e6 ("target/riscv: rvv: Add tail agnostic for vector
load / store instructions") added an extra check for LMUL fragmentation,
intended for setting the "rest tail elements" in the last register for a
segment load insn.
Actually, the max_elements derived in vext_ld*() won't be a fraction of
vector register size, since the lmul encoded in desc is emul, which has
already been adjusted to 1 for LMUL fragmentation case by vext_get_emul()
in trans_rvv.c.inc, for ld_stride(), ld_us(), ld_index() and ldff().
Besides, vext_get_emul() has also taken EEW/SEW into consideration, so no
need to call vext_get_total_elems() which would base on the emul to derive
another emul, the second emul would be incorrect when esz differs from sew.
Thus this patch removes the check for extra tail elements.
Fixes: 752614cab8e6 ("target/riscv: rvv: Add tail agnostic for vector load / store instructions")
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
Message-Id: <20230607091646.4049428-1-xiao.w.wang@intel.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2023-06-07 11:16:46 +02:00
|
|
|
vext_set_tail_elems_1s(env->vl, vd, desc, nf, esz, max_elems);
|
2020-07-01 17:24:55 +02:00
|
|
|
}
|
|
|
|
|
2021-12-10 08:56:07 +01:00
|
|
|
#define GEN_VEXT_LD_INDEX(NAME, ETYPE, INDEX_FN, LOAD_FN) \
|
2020-07-01 17:24:55 +02:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
|
|
|
|
void *vs2, CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN, \
|
2022-06-06 08:16:16 +02:00
|
|
|
LOAD_FN, ctzl(sizeof(ETYPE)), GETPC()); \
|
2020-07-01 17:24:55 +02:00
|
|
|
}
|
|
|
|
|
2021-12-10 08:56:07 +01:00
|
|
|
GEN_VEXT_LD_INDEX(vlxei8_8_v, int8_t, idx_b, lde_b)
|
|
|
|
GEN_VEXT_LD_INDEX(vlxei8_16_v, int16_t, idx_b, lde_h)
|
|
|
|
GEN_VEXT_LD_INDEX(vlxei8_32_v, int32_t, idx_b, lde_w)
|
|
|
|
GEN_VEXT_LD_INDEX(vlxei8_64_v, int64_t, idx_b, lde_d)
|
|
|
|
GEN_VEXT_LD_INDEX(vlxei16_8_v, int8_t, idx_h, lde_b)
|
|
|
|
GEN_VEXT_LD_INDEX(vlxei16_16_v, int16_t, idx_h, lde_h)
|
|
|
|
GEN_VEXT_LD_INDEX(vlxei16_32_v, int32_t, idx_h, lde_w)
|
|
|
|
GEN_VEXT_LD_INDEX(vlxei16_64_v, int64_t, idx_h, lde_d)
|
|
|
|
GEN_VEXT_LD_INDEX(vlxei32_8_v, int8_t, idx_w, lde_b)
|
|
|
|
GEN_VEXT_LD_INDEX(vlxei32_16_v, int16_t, idx_w, lde_h)
|
|
|
|
GEN_VEXT_LD_INDEX(vlxei32_32_v, int32_t, idx_w, lde_w)
|
|
|
|
GEN_VEXT_LD_INDEX(vlxei32_64_v, int64_t, idx_w, lde_d)
|
|
|
|
GEN_VEXT_LD_INDEX(vlxei64_8_v, int8_t, idx_d, lde_b)
|
|
|
|
GEN_VEXT_LD_INDEX(vlxei64_16_v, int16_t, idx_d, lde_h)
|
|
|
|
GEN_VEXT_LD_INDEX(vlxei64_32_v, int32_t, idx_d, lde_w)
|
|
|
|
GEN_VEXT_LD_INDEX(vlxei64_64_v, int64_t, idx_d, lde_d)
|
|
|
|
|
|
|
|
#define GEN_VEXT_ST_INDEX(NAME, ETYPE, INDEX_FN, STORE_FN) \
|
2020-07-01 17:24:55 +02:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
|
|
|
|
void *vs2, CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN, \
|
2021-12-10 08:56:11 +01:00
|
|
|
STORE_FN, ctzl(sizeof(ETYPE)), \
|
2022-06-06 08:16:16 +02:00
|
|
|
GETPC()); \
|
2020-07-01 17:24:55 +02:00
|
|
|
}
|
|
|
|
|
2021-12-10 08:56:07 +01:00
|
|
|
GEN_VEXT_ST_INDEX(vsxei8_8_v, int8_t, idx_b, ste_b)
|
|
|
|
GEN_VEXT_ST_INDEX(vsxei8_16_v, int16_t, idx_b, ste_h)
|
|
|
|
GEN_VEXT_ST_INDEX(vsxei8_32_v, int32_t, idx_b, ste_w)
|
|
|
|
GEN_VEXT_ST_INDEX(vsxei8_64_v, int64_t, idx_b, ste_d)
|
|
|
|
GEN_VEXT_ST_INDEX(vsxei16_8_v, int8_t, idx_h, ste_b)
|
|
|
|
GEN_VEXT_ST_INDEX(vsxei16_16_v, int16_t, idx_h, ste_h)
|
|
|
|
GEN_VEXT_ST_INDEX(vsxei16_32_v, int32_t, idx_h, ste_w)
|
|
|
|
GEN_VEXT_ST_INDEX(vsxei16_64_v, int64_t, idx_h, ste_d)
|
|
|
|
GEN_VEXT_ST_INDEX(vsxei32_8_v, int8_t, idx_w, ste_b)
|
|
|
|
GEN_VEXT_ST_INDEX(vsxei32_16_v, int16_t, idx_w, ste_h)
|
|
|
|
GEN_VEXT_ST_INDEX(vsxei32_32_v, int32_t, idx_w, ste_w)
|
|
|
|
GEN_VEXT_ST_INDEX(vsxei32_64_v, int64_t, idx_w, ste_d)
|
|
|
|
GEN_VEXT_ST_INDEX(vsxei64_8_v, int8_t, idx_d, ste_b)
|
|
|
|
GEN_VEXT_ST_INDEX(vsxei64_16_v, int16_t, idx_d, ste_h)
|
|
|
|
GEN_VEXT_ST_INDEX(vsxei64_32_v, int32_t, idx_d, ste_w)
|
|
|
|
GEN_VEXT_ST_INDEX(vsxei64_64_v, int64_t, idx_d, ste_d)
|
2020-07-01 17:24:56 +02:00
|
|
|
|
|
|
|
/*
|
2023-04-05 10:58:12 +02:00
|
|
|
* unit-stride fault-only-fisrt load instructions
|
2020-07-01 17:24:56 +02:00
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
vext_ldff(void *vd, void *v0, target_ulong base,
|
|
|
|
CPURISCVState *env, uint32_t desc,
|
|
|
|
vext_ldst_elem_fn *ldst_elem,
|
2022-06-06 08:16:16 +02:00
|
|
|
uint32_t log2_esz, uintptr_t ra)
|
2020-07-01 17:24:56 +02:00
|
|
|
{
|
|
|
|
void *host;
|
|
|
|
uint32_t i, k, vl = 0;
|
|
|
|
uint32_t nf = vext_nf(desc);
|
|
|
|
uint32_t vm = vext_vm(desc);
|
2022-06-06 08:16:16 +02:00
|
|
|
uint32_t max_elems = vext_max_elems(desc, log2_esz);
|
2022-06-06 08:16:16 +02:00
|
|
|
uint32_t esz = 1 << log2_esz;
|
2022-06-20 08:51:02 +02:00
|
|
|
uint32_t vma = vext_vma(desc);
|
2020-07-01 17:24:56 +02:00
|
|
|
target_ulong addr, offset, remain;
|
|
|
|
|
2023-04-05 10:58:12 +02:00
|
|
|
/* probe every access */
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < env->vl; i++) {
|
2021-12-10 08:55:58 +01:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) {
|
2020-07-01 17:24:56 +02:00
|
|
|
continue;
|
|
|
|
}
|
2022-06-06 08:16:16 +02:00
|
|
|
addr = adjust_addr(env, base + i * (nf << log2_esz));
|
2020-07-01 17:24:56 +02:00
|
|
|
if (i == 0) {
|
2022-06-06 08:16:16 +02:00
|
|
|
probe_pages(env, addr, nf << log2_esz, ra, MMU_DATA_LOAD);
|
2020-07-01 17:24:56 +02:00
|
|
|
} else {
|
|
|
|
/* if it triggers an exception, no need to check watchpoint */
|
2022-06-06 08:16:16 +02:00
|
|
|
remain = nf << log2_esz;
|
2020-07-01 17:24:56 +02:00
|
|
|
while (remain > 0) {
|
|
|
|
offset = -(addr | TARGET_PAGE_MASK);
|
|
|
|
host = tlb_vaddr_to_host(env, addr, MMU_DATA_LOAD,
|
|
|
|
cpu_mmu_index(env, false));
|
|
|
|
if (host) {
|
|
|
|
#ifdef CONFIG_USER_ONLY
|
2023-07-29 05:16:18 +02:00
|
|
|
if (!page_check_range(addr, offset, PAGE_READ)) {
|
2020-07-01 17:24:56 +02:00
|
|
|
vl = i;
|
|
|
|
goto ProbeSuccess;
|
|
|
|
}
|
|
|
|
#else
|
2022-01-20 13:20:45 +01:00
|
|
|
probe_pages(env, addr, offset, ra, MMU_DATA_LOAD);
|
2020-07-01 17:24:56 +02:00
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
vl = i;
|
|
|
|
goto ProbeSuccess;
|
|
|
|
}
|
|
|
|
if (remain <= offset) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
remain -= offset;
|
2022-01-20 13:20:46 +01:00
|
|
|
addr = adjust_addr(env, addr + offset);
|
2020-07-01 17:24:56 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ProbeSuccess:
|
|
|
|
/* load bytes from guest memory */
|
|
|
|
if (vl != 0) {
|
|
|
|
env->vl = vl;
|
|
|
|
}
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < env->vl; i++) {
|
2020-07-01 17:24:56 +02:00
|
|
|
k = 0;
|
|
|
|
while (k < nf) {
|
2022-06-20 08:51:02 +02:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) {
|
|
|
|
/* set masked-off elements to 1s */
|
|
|
|
vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz,
|
|
|
|
(i + k * max_elems + 1) * esz);
|
|
|
|
k++;
|
|
|
|
continue;
|
|
|
|
}
|
2023-09-25 06:30:22 +02:00
|
|
|
addr = base + ((i * nf + k) << log2_esz);
|
2022-01-20 13:20:46 +01:00
|
|
|
ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
|
2020-07-01 17:24:56 +02:00
|
|
|
k++;
|
|
|
|
}
|
|
|
|
}
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0;
|
2023-02-26 18:05:13 +01:00
|
|
|
|
target/riscv/vector_helper.c: Remove the check for extra tail elements
Commit 752614cab8e6 ("target/riscv: rvv: Add tail agnostic for vector
load / store instructions") added an extra check for LMUL fragmentation,
intended for setting the "rest tail elements" in the last register for a
segment load insn.
Actually, the max_elements derived in vext_ld*() won't be a fraction of
vector register size, since the lmul encoded in desc is emul, which has
already been adjusted to 1 for LMUL fragmentation case by vext_get_emul()
in trans_rvv.c.inc, for ld_stride(), ld_us(), ld_index() and ldff().
Besides, vext_get_emul() has also taken EEW/SEW into consideration, so no
need to call vext_get_total_elems() which would base on the emul to derive
another emul, the second emul would be incorrect when esz differs from sew.
Thus this patch removes the check for extra tail elements.
Fixes: 752614cab8e6 ("target/riscv: rvv: Add tail agnostic for vector load / store instructions")
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
Message-Id: <20230607091646.4049428-1-xiao.w.wang@intel.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
2023-06-07 11:16:46 +02:00
|
|
|
vext_set_tail_elems_1s(env->vl, vd, desc, nf, esz, max_elems);
|
2020-07-01 17:24:56 +02:00
|
|
|
}
|
|
|
|
|
2021-12-10 08:56:09 +01:00
|
|
|
#define GEN_VEXT_LDFF(NAME, ETYPE, LOAD_FN) \
|
|
|
|
void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
vext_ldff(vd, v0, base, env, desc, LOAD_FN, \
|
2021-12-10 08:56:11 +01:00
|
|
|
ctzl(sizeof(ETYPE)), GETPC()); \
|
2020-07-01 17:24:56 +02:00
|
|
|
}
|
|
|
|
|
2021-12-10 08:56:09 +01:00
|
|
|
GEN_VEXT_LDFF(vle8ff_v, int8_t, lde_b)
|
|
|
|
GEN_VEXT_LDFF(vle16ff_v, int16_t, lde_h)
|
|
|
|
GEN_VEXT_LDFF(vle32ff_v, int32_t, lde_w)
|
|
|
|
GEN_VEXT_LDFF(vle64ff_v, int64_t, lde_d)
|
2020-07-01 17:24:57 +02:00
|
|
|
|
|
|
|
#define DO_SWAP(N, M) (M)
|
|
|
|
#define DO_AND(N, M) (N & M)
|
|
|
|
#define DO_XOR(N, M) (N ^ M)
|
|
|
|
#define DO_OR(N, M) (N | M)
|
|
|
|
#define DO_ADD(N, M) (N + M)
|
|
|
|
|
|
|
|
/* Signed min/max */
|
|
|
|
#define DO_MAX(N, M) ((N) >= (M) ? (N) : (M))
|
|
|
|
#define DO_MIN(N, M) ((N) >= (M) ? (M) : (N))
|
|
|
|
|
2021-12-10 08:56:10 +01:00
|
|
|
/*
|
2023-04-05 10:58:12 +02:00
|
|
|
* load and store whole register instructions
|
2021-12-10 08:56:10 +01:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
vext_ldst_whole(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
|
2022-06-06 08:16:16 +02:00
|
|
|
vext_ldst_elem_fn *ldst_elem, uint32_t log2_esz, uintptr_t ra)
|
2021-12-10 08:56:10 +01:00
|
|
|
{
|
2021-12-10 08:56:52 +01:00
|
|
|
uint32_t i, k, off, pos;
|
2021-12-10 08:56:10 +01:00
|
|
|
uint32_t nf = vext_nf(desc);
|
2023-02-26 18:05:14 +01:00
|
|
|
uint32_t vlenb = riscv_cpu_cfg(env)->vlen >> 3;
|
2022-06-06 08:16:16 +02:00
|
|
|
uint32_t max_elems = vlenb >> log2_esz;
|
2021-12-10 08:56:10 +01:00
|
|
|
|
2021-12-10 08:56:52 +01:00
|
|
|
k = env->vstart / max_elems;
|
|
|
|
off = env->vstart % max_elems;
|
2021-12-10 08:56:10 +01:00
|
|
|
|
2021-12-10 08:56:52 +01:00
|
|
|
if (off) {
|
|
|
|
/* load/store rest of elements of current segment pointed by vstart */
|
|
|
|
for (pos = off; pos < max_elems; pos++, env->vstart++) {
|
2022-06-06 08:16:16 +02:00
|
|
|
target_ulong addr = base + ((pos + k * max_elems) << log2_esz);
|
2023-04-05 10:58:13 +02:00
|
|
|
ldst_elem(env, adjust_addr(env, addr), pos + k * max_elems, vd,
|
|
|
|
ra);
|
2021-12-10 08:56:52 +01:00
|
|
|
}
|
|
|
|
k++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* load/store elements for rest of segments */
|
|
|
|
for (; k < nf; k++) {
|
|
|
|
for (i = 0; i < max_elems; i++, env->vstart++) {
|
2022-06-06 08:16:16 +02:00
|
|
|
target_ulong addr = base + ((i + k * max_elems) << log2_esz);
|
2022-01-20 13:20:46 +01:00
|
|
|
ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
|
2021-12-10 08:56:10 +01:00
|
|
|
}
|
|
|
|
}
|
2021-12-10 08:56:52 +01:00
|
|
|
|
|
|
|
env->vstart = 0;
|
2021-12-10 08:56:10 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
#define GEN_VEXT_LD_WHOLE(NAME, ETYPE, LOAD_FN) \
|
|
|
|
void HELPER(NAME)(void *vd, target_ulong base, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
vext_ldst_whole(vd, base, env, desc, LOAD_FN, \
|
2022-06-06 08:16:16 +02:00
|
|
|
ctzl(sizeof(ETYPE)), GETPC()); \
|
2021-12-10 08:56:10 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
GEN_VEXT_LD_WHOLE(vl1re8_v, int8_t, lde_b)
|
|
|
|
GEN_VEXT_LD_WHOLE(vl1re16_v, int16_t, lde_h)
|
|
|
|
GEN_VEXT_LD_WHOLE(vl1re32_v, int32_t, lde_w)
|
|
|
|
GEN_VEXT_LD_WHOLE(vl1re64_v, int64_t, lde_d)
|
|
|
|
GEN_VEXT_LD_WHOLE(vl2re8_v, int8_t, lde_b)
|
|
|
|
GEN_VEXT_LD_WHOLE(vl2re16_v, int16_t, lde_h)
|
|
|
|
GEN_VEXT_LD_WHOLE(vl2re32_v, int32_t, lde_w)
|
|
|
|
GEN_VEXT_LD_WHOLE(vl2re64_v, int64_t, lde_d)
|
|
|
|
GEN_VEXT_LD_WHOLE(vl4re8_v, int8_t, lde_b)
|
|
|
|
GEN_VEXT_LD_WHOLE(vl4re16_v, int16_t, lde_h)
|
|
|
|
GEN_VEXT_LD_WHOLE(vl4re32_v, int32_t, lde_w)
|
|
|
|
GEN_VEXT_LD_WHOLE(vl4re64_v, int64_t, lde_d)
|
|
|
|
GEN_VEXT_LD_WHOLE(vl8re8_v, int8_t, lde_b)
|
|
|
|
GEN_VEXT_LD_WHOLE(vl8re16_v, int16_t, lde_h)
|
|
|
|
GEN_VEXT_LD_WHOLE(vl8re32_v, int32_t, lde_w)
|
|
|
|
GEN_VEXT_LD_WHOLE(vl8re64_v, int64_t, lde_d)
|
|
|
|
|
|
|
|
#define GEN_VEXT_ST_WHOLE(NAME, ETYPE, STORE_FN) \
|
|
|
|
void HELPER(NAME)(void *vd, target_ulong base, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
vext_ldst_whole(vd, base, env, desc, STORE_FN, \
|
2022-06-06 08:16:16 +02:00
|
|
|
ctzl(sizeof(ETYPE)), GETPC()); \
|
2021-12-10 08:56:10 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
GEN_VEXT_ST_WHOLE(vs1r_v, int8_t, ste_b)
|
|
|
|
GEN_VEXT_ST_WHOLE(vs2r_v, int8_t, ste_b)
|
|
|
|
GEN_VEXT_ST_WHOLE(vs4r_v, int8_t, ste_b)
|
|
|
|
GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b)
|
|
|
|
|
2020-07-01 17:24:58 +02:00
|
|
|
/*
|
2023-04-05 10:58:12 +02:00
|
|
|
* Vector Integer Arithmetic Instructions
|
2020-07-01 17:24:58 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* (TD, T1, T2, TX1, TX2) */
|
|
|
|
#define OP_SSS_B int8_t, int8_t, int8_t, int8_t, int8_t
|
|
|
|
#define OP_SSS_H int16_t, int16_t, int16_t, int16_t, int16_t
|
|
|
|
#define OP_SSS_W int32_t, int32_t, int32_t, int32_t, int32_t
|
|
|
|
#define OP_SSS_D int64_t, int64_t, int64_t, int64_t, int64_t
|
2020-07-01 17:25:06 +02:00
|
|
|
#define OP_SUS_B int8_t, uint8_t, int8_t, uint8_t, int8_t
|
|
|
|
#define OP_SUS_H int16_t, uint16_t, int16_t, uint16_t, int16_t
|
|
|
|
#define OP_SUS_W int32_t, uint32_t, int32_t, uint32_t, int32_t
|
|
|
|
#define OP_SUS_D int64_t, uint64_t, int64_t, uint64_t, int64_t
|
2020-07-01 17:25:08 +02:00
|
|
|
#define WOP_SSS_B int16_t, int8_t, int8_t, int16_t, int16_t
|
|
|
|
#define WOP_SSS_H int32_t, int16_t, int16_t, int32_t, int32_t
|
|
|
|
#define WOP_SSS_W int64_t, int32_t, int32_t, int64_t, int64_t
|
|
|
|
#define WOP_SUS_B int16_t, uint8_t, int8_t, uint16_t, int16_t
|
|
|
|
#define WOP_SUS_H int32_t, uint16_t, int16_t, uint32_t, int32_t
|
|
|
|
#define WOP_SUS_W int64_t, uint32_t, int32_t, uint64_t, int64_t
|
|
|
|
#define WOP_SSU_B int16_t, int8_t, uint8_t, int16_t, uint16_t
|
|
|
|
#define WOP_SSU_H int32_t, int16_t, uint16_t, int32_t, uint32_t
|
|
|
|
#define WOP_SSU_W int64_t, int32_t, uint32_t, int64_t, uint64_t
|
2020-07-01 17:25:17 +02:00
|
|
|
#define NOP_SSS_B int8_t, int8_t, int16_t, int8_t, int16_t
|
|
|
|
#define NOP_SSS_H int16_t, int16_t, int32_t, int16_t, int32_t
|
|
|
|
#define NOP_SSS_W int32_t, int32_t, int64_t, int32_t, int64_t
|
|
|
|
#define NOP_UUU_B uint8_t, uint8_t, uint16_t, uint8_t, uint16_t
|
|
|
|
#define NOP_UUU_H uint16_t, uint16_t, uint32_t, uint16_t, uint32_t
|
|
|
|
#define NOP_UUU_W uint32_t, uint32_t, uint64_t, uint32_t, uint64_t
|
2020-07-01 17:24:58 +02:00
|
|
|
|
|
|
|
#define DO_SUB(N, M) (N - M)
|
|
|
|
#define DO_RSUB(N, M) (M - N)
|
|
|
|
|
|
|
|
RVVCALL(OPIVV2, vadd_vv_b, OP_SSS_B, H1, H1, H1, DO_ADD)
|
|
|
|
RVVCALL(OPIVV2, vadd_vv_h, OP_SSS_H, H2, H2, H2, DO_ADD)
|
|
|
|
RVVCALL(OPIVV2, vadd_vv_w, OP_SSS_W, H4, H4, H4, DO_ADD)
|
|
|
|
RVVCALL(OPIVV2, vadd_vv_d, OP_SSS_D, H8, H8, H8, DO_ADD)
|
|
|
|
RVVCALL(OPIVV2, vsub_vv_b, OP_SSS_B, H1, H1, H1, DO_SUB)
|
|
|
|
RVVCALL(OPIVV2, vsub_vv_h, OP_SSS_H, H2, H2, H2, DO_SUB)
|
|
|
|
RVVCALL(OPIVV2, vsub_vv_w, OP_SSS_W, H4, H4, H4, DO_SUB)
|
|
|
|
RVVCALL(OPIVV2, vsub_vv_d, OP_SSS_D, H8, H8, H8, DO_SUB)
|
|
|
|
|
2022-06-06 08:16:16 +02:00
|
|
|
GEN_VEXT_VV(vadd_vv_b, 1)
|
|
|
|
GEN_VEXT_VV(vadd_vv_h, 2)
|
|
|
|
GEN_VEXT_VV(vadd_vv_w, 4)
|
|
|
|
GEN_VEXT_VV(vadd_vv_d, 8)
|
|
|
|
GEN_VEXT_VV(vsub_vv_b, 1)
|
|
|
|
GEN_VEXT_VV(vsub_vv_h, 2)
|
|
|
|
GEN_VEXT_VV(vsub_vv_w, 4)
|
|
|
|
GEN_VEXT_VV(vsub_vv_d, 8)
|
2020-07-01 17:24:58 +02:00
|
|
|
|
|
|
|
|
|
|
|
RVVCALL(OPIVX2, vadd_vx_b, OP_SSS_B, H1, H1, DO_ADD)
|
|
|
|
RVVCALL(OPIVX2, vadd_vx_h, OP_SSS_H, H2, H2, DO_ADD)
|
|
|
|
RVVCALL(OPIVX2, vadd_vx_w, OP_SSS_W, H4, H4, DO_ADD)
|
|
|
|
RVVCALL(OPIVX2, vadd_vx_d, OP_SSS_D, H8, H8, DO_ADD)
|
|
|
|
RVVCALL(OPIVX2, vsub_vx_b, OP_SSS_B, H1, H1, DO_SUB)
|
|
|
|
RVVCALL(OPIVX2, vsub_vx_h, OP_SSS_H, H2, H2, DO_SUB)
|
|
|
|
RVVCALL(OPIVX2, vsub_vx_w, OP_SSS_W, H4, H4, DO_SUB)
|
|
|
|
RVVCALL(OPIVX2, vsub_vx_d, OP_SSS_D, H8, H8, DO_SUB)
|
|
|
|
RVVCALL(OPIVX2, vrsub_vx_b, OP_SSS_B, H1, H1, DO_RSUB)
|
|
|
|
RVVCALL(OPIVX2, vrsub_vx_h, OP_SSS_H, H2, H2, DO_RSUB)
|
|
|
|
RVVCALL(OPIVX2, vrsub_vx_w, OP_SSS_W, H4, H4, DO_RSUB)
|
|
|
|
RVVCALL(OPIVX2, vrsub_vx_d, OP_SSS_D, H8, H8, DO_RSUB)
|
|
|
|
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VX(vadd_vx_b, 1)
|
|
|
|
GEN_VEXT_VX(vadd_vx_h, 2)
|
|
|
|
GEN_VEXT_VX(vadd_vx_w, 4)
|
|
|
|
GEN_VEXT_VX(vadd_vx_d, 8)
|
|
|
|
GEN_VEXT_VX(vsub_vx_b, 1)
|
|
|
|
GEN_VEXT_VX(vsub_vx_h, 2)
|
|
|
|
GEN_VEXT_VX(vsub_vx_w, 4)
|
|
|
|
GEN_VEXT_VX(vsub_vx_d, 8)
|
|
|
|
GEN_VEXT_VX(vrsub_vx_b, 1)
|
|
|
|
GEN_VEXT_VX(vrsub_vx_h, 2)
|
|
|
|
GEN_VEXT_VX(vrsub_vx_w, 4)
|
|
|
|
GEN_VEXT_VX(vrsub_vx_d, 8)
|
2020-07-01 17:24:58 +02:00
|
|
|
|
|
|
|
void HELPER(vec_rsubs8)(void *d, void *a, uint64_t b, uint32_t desc)
|
|
|
|
{
|
|
|
|
intptr_t oprsz = simd_oprsz(desc);
|
|
|
|
intptr_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
|
|
|
|
*(uint8_t *)(d + i) = (uint8_t)b - *(uint8_t *)(a + i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void HELPER(vec_rsubs16)(void *d, void *a, uint64_t b, uint32_t desc)
|
|
|
|
{
|
|
|
|
intptr_t oprsz = simd_oprsz(desc);
|
|
|
|
intptr_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
|
|
|
|
*(uint16_t *)(d + i) = (uint16_t)b - *(uint16_t *)(a + i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void HELPER(vec_rsubs32)(void *d, void *a, uint64_t b, uint32_t desc)
|
|
|
|
{
|
|
|
|
intptr_t oprsz = simd_oprsz(desc);
|
|
|
|
intptr_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
|
|
|
|
*(uint32_t *)(d + i) = (uint32_t)b - *(uint32_t *)(a + i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void HELPER(vec_rsubs64)(void *d, void *a, uint64_t b, uint32_t desc)
|
|
|
|
{
|
|
|
|
intptr_t oprsz = simd_oprsz(desc);
|
|
|
|
intptr_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
|
|
|
|
*(uint64_t *)(d + i) = b - *(uint64_t *)(a + i);
|
|
|
|
}
|
|
|
|
}
|
2020-07-01 17:24:59 +02:00
|
|
|
|
|
|
|
/* Vector Widening Integer Add/Subtract */
|
|
|
|
#define WOP_UUU_B uint16_t, uint8_t, uint8_t, uint16_t, uint16_t
|
|
|
|
#define WOP_UUU_H uint32_t, uint16_t, uint16_t, uint32_t, uint32_t
|
|
|
|
#define WOP_UUU_W uint64_t, uint32_t, uint32_t, uint64_t, uint64_t
|
|
|
|
#define WOP_SSS_B int16_t, int8_t, int8_t, int16_t, int16_t
|
|
|
|
#define WOP_SSS_H int32_t, int16_t, int16_t, int32_t, int32_t
|
|
|
|
#define WOP_SSS_W int64_t, int32_t, int32_t, int64_t, int64_t
|
|
|
|
#define WOP_WUUU_B uint16_t, uint8_t, uint16_t, uint16_t, uint16_t
|
|
|
|
#define WOP_WUUU_H uint32_t, uint16_t, uint32_t, uint32_t, uint32_t
|
|
|
|
#define WOP_WUUU_W uint64_t, uint32_t, uint64_t, uint64_t, uint64_t
|
|
|
|
#define WOP_WSSS_B int16_t, int8_t, int16_t, int16_t, int16_t
|
|
|
|
#define WOP_WSSS_H int32_t, int16_t, int32_t, int32_t, int32_t
|
|
|
|
#define WOP_WSSS_W int64_t, int32_t, int64_t, int64_t, int64_t
|
|
|
|
RVVCALL(OPIVV2, vwaddu_vv_b, WOP_UUU_B, H2, H1, H1, DO_ADD)
|
|
|
|
RVVCALL(OPIVV2, vwaddu_vv_h, WOP_UUU_H, H4, H2, H2, DO_ADD)
|
|
|
|
RVVCALL(OPIVV2, vwaddu_vv_w, WOP_UUU_W, H8, H4, H4, DO_ADD)
|
|
|
|
RVVCALL(OPIVV2, vwsubu_vv_b, WOP_UUU_B, H2, H1, H1, DO_SUB)
|
|
|
|
RVVCALL(OPIVV2, vwsubu_vv_h, WOP_UUU_H, H4, H2, H2, DO_SUB)
|
|
|
|
RVVCALL(OPIVV2, vwsubu_vv_w, WOP_UUU_W, H8, H4, H4, DO_SUB)
|
|
|
|
RVVCALL(OPIVV2, vwadd_vv_b, WOP_SSS_B, H2, H1, H1, DO_ADD)
|
|
|
|
RVVCALL(OPIVV2, vwadd_vv_h, WOP_SSS_H, H4, H2, H2, DO_ADD)
|
|
|
|
RVVCALL(OPIVV2, vwadd_vv_w, WOP_SSS_W, H8, H4, H4, DO_ADD)
|
|
|
|
RVVCALL(OPIVV2, vwsub_vv_b, WOP_SSS_B, H2, H1, H1, DO_SUB)
|
|
|
|
RVVCALL(OPIVV2, vwsub_vv_h, WOP_SSS_H, H4, H2, H2, DO_SUB)
|
|
|
|
RVVCALL(OPIVV2, vwsub_vv_w, WOP_SSS_W, H8, H4, H4, DO_SUB)
|
|
|
|
RVVCALL(OPIVV2, vwaddu_wv_b, WOP_WUUU_B, H2, H1, H1, DO_ADD)
|
|
|
|
RVVCALL(OPIVV2, vwaddu_wv_h, WOP_WUUU_H, H4, H2, H2, DO_ADD)
|
|
|
|
RVVCALL(OPIVV2, vwaddu_wv_w, WOP_WUUU_W, H8, H4, H4, DO_ADD)
|
|
|
|
RVVCALL(OPIVV2, vwsubu_wv_b, WOP_WUUU_B, H2, H1, H1, DO_SUB)
|
|
|
|
RVVCALL(OPIVV2, vwsubu_wv_h, WOP_WUUU_H, H4, H2, H2, DO_SUB)
|
|
|
|
RVVCALL(OPIVV2, vwsubu_wv_w, WOP_WUUU_W, H8, H4, H4, DO_SUB)
|
|
|
|
RVVCALL(OPIVV2, vwadd_wv_b, WOP_WSSS_B, H2, H1, H1, DO_ADD)
|
|
|
|
RVVCALL(OPIVV2, vwadd_wv_h, WOP_WSSS_H, H4, H2, H2, DO_ADD)
|
|
|
|
RVVCALL(OPIVV2, vwadd_wv_w, WOP_WSSS_W, H8, H4, H4, DO_ADD)
|
|
|
|
RVVCALL(OPIVV2, vwsub_wv_b, WOP_WSSS_B, H2, H1, H1, DO_SUB)
|
|
|
|
RVVCALL(OPIVV2, vwsub_wv_h, WOP_WSSS_H, H4, H2, H2, DO_SUB)
|
|
|
|
RVVCALL(OPIVV2, vwsub_wv_w, WOP_WSSS_W, H8, H4, H4, DO_SUB)
|
2022-06-06 08:16:16 +02:00
|
|
|
GEN_VEXT_VV(vwaddu_vv_b, 2)
|
|
|
|
GEN_VEXT_VV(vwaddu_vv_h, 4)
|
|
|
|
GEN_VEXT_VV(vwaddu_vv_w, 8)
|
|
|
|
GEN_VEXT_VV(vwsubu_vv_b, 2)
|
|
|
|
GEN_VEXT_VV(vwsubu_vv_h, 4)
|
|
|
|
GEN_VEXT_VV(vwsubu_vv_w, 8)
|
|
|
|
GEN_VEXT_VV(vwadd_vv_b, 2)
|
|
|
|
GEN_VEXT_VV(vwadd_vv_h, 4)
|
|
|
|
GEN_VEXT_VV(vwadd_vv_w, 8)
|
|
|
|
GEN_VEXT_VV(vwsub_vv_b, 2)
|
|
|
|
GEN_VEXT_VV(vwsub_vv_h, 4)
|
|
|
|
GEN_VEXT_VV(vwsub_vv_w, 8)
|
|
|
|
GEN_VEXT_VV(vwaddu_wv_b, 2)
|
|
|
|
GEN_VEXT_VV(vwaddu_wv_h, 4)
|
|
|
|
GEN_VEXT_VV(vwaddu_wv_w, 8)
|
|
|
|
GEN_VEXT_VV(vwsubu_wv_b, 2)
|
|
|
|
GEN_VEXT_VV(vwsubu_wv_h, 4)
|
|
|
|
GEN_VEXT_VV(vwsubu_wv_w, 8)
|
|
|
|
GEN_VEXT_VV(vwadd_wv_b, 2)
|
|
|
|
GEN_VEXT_VV(vwadd_wv_h, 4)
|
|
|
|
GEN_VEXT_VV(vwadd_wv_w, 8)
|
|
|
|
GEN_VEXT_VV(vwsub_wv_b, 2)
|
|
|
|
GEN_VEXT_VV(vwsub_wv_h, 4)
|
|
|
|
GEN_VEXT_VV(vwsub_wv_w, 8)
|
2020-07-01 17:24:59 +02:00
|
|
|
|
|
|
|
RVVCALL(OPIVX2, vwaddu_vx_b, WOP_UUU_B, H2, H1, DO_ADD)
|
|
|
|
RVVCALL(OPIVX2, vwaddu_vx_h, WOP_UUU_H, H4, H2, DO_ADD)
|
|
|
|
RVVCALL(OPIVX2, vwaddu_vx_w, WOP_UUU_W, H8, H4, DO_ADD)
|
|
|
|
RVVCALL(OPIVX2, vwsubu_vx_b, WOP_UUU_B, H2, H1, DO_SUB)
|
|
|
|
RVVCALL(OPIVX2, vwsubu_vx_h, WOP_UUU_H, H4, H2, DO_SUB)
|
|
|
|
RVVCALL(OPIVX2, vwsubu_vx_w, WOP_UUU_W, H8, H4, DO_SUB)
|
|
|
|
RVVCALL(OPIVX2, vwadd_vx_b, WOP_SSS_B, H2, H1, DO_ADD)
|
|
|
|
RVVCALL(OPIVX2, vwadd_vx_h, WOP_SSS_H, H4, H2, DO_ADD)
|
|
|
|
RVVCALL(OPIVX2, vwadd_vx_w, WOP_SSS_W, H8, H4, DO_ADD)
|
|
|
|
RVVCALL(OPIVX2, vwsub_vx_b, WOP_SSS_B, H2, H1, DO_SUB)
|
|
|
|
RVVCALL(OPIVX2, vwsub_vx_h, WOP_SSS_H, H4, H2, DO_SUB)
|
|
|
|
RVVCALL(OPIVX2, vwsub_vx_w, WOP_SSS_W, H8, H4, DO_SUB)
|
|
|
|
RVVCALL(OPIVX2, vwaddu_wx_b, WOP_WUUU_B, H2, H1, DO_ADD)
|
|
|
|
RVVCALL(OPIVX2, vwaddu_wx_h, WOP_WUUU_H, H4, H2, DO_ADD)
|
|
|
|
RVVCALL(OPIVX2, vwaddu_wx_w, WOP_WUUU_W, H8, H4, DO_ADD)
|
|
|
|
RVVCALL(OPIVX2, vwsubu_wx_b, WOP_WUUU_B, H2, H1, DO_SUB)
|
|
|
|
RVVCALL(OPIVX2, vwsubu_wx_h, WOP_WUUU_H, H4, H2, DO_SUB)
|
|
|
|
RVVCALL(OPIVX2, vwsubu_wx_w, WOP_WUUU_W, H8, H4, DO_SUB)
|
|
|
|
RVVCALL(OPIVX2, vwadd_wx_b, WOP_WSSS_B, H2, H1, DO_ADD)
|
|
|
|
RVVCALL(OPIVX2, vwadd_wx_h, WOP_WSSS_H, H4, H2, DO_ADD)
|
|
|
|
RVVCALL(OPIVX2, vwadd_wx_w, WOP_WSSS_W, H8, H4, DO_ADD)
|
|
|
|
RVVCALL(OPIVX2, vwsub_wx_b, WOP_WSSS_B, H2, H1, DO_SUB)
|
|
|
|
RVVCALL(OPIVX2, vwsub_wx_h, WOP_WSSS_H, H4, H2, DO_SUB)
|
|
|
|
RVVCALL(OPIVX2, vwsub_wx_w, WOP_WSSS_W, H8, H4, DO_SUB)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VX(vwaddu_vx_b, 2)
|
|
|
|
GEN_VEXT_VX(vwaddu_vx_h, 4)
|
|
|
|
GEN_VEXT_VX(vwaddu_vx_w, 8)
|
|
|
|
GEN_VEXT_VX(vwsubu_vx_b, 2)
|
|
|
|
GEN_VEXT_VX(vwsubu_vx_h, 4)
|
|
|
|
GEN_VEXT_VX(vwsubu_vx_w, 8)
|
|
|
|
GEN_VEXT_VX(vwadd_vx_b, 2)
|
|
|
|
GEN_VEXT_VX(vwadd_vx_h, 4)
|
|
|
|
GEN_VEXT_VX(vwadd_vx_w, 8)
|
|
|
|
GEN_VEXT_VX(vwsub_vx_b, 2)
|
|
|
|
GEN_VEXT_VX(vwsub_vx_h, 4)
|
|
|
|
GEN_VEXT_VX(vwsub_vx_w, 8)
|
|
|
|
GEN_VEXT_VX(vwaddu_wx_b, 2)
|
|
|
|
GEN_VEXT_VX(vwaddu_wx_h, 4)
|
|
|
|
GEN_VEXT_VX(vwaddu_wx_w, 8)
|
|
|
|
GEN_VEXT_VX(vwsubu_wx_b, 2)
|
|
|
|
GEN_VEXT_VX(vwsubu_wx_h, 4)
|
|
|
|
GEN_VEXT_VX(vwsubu_wx_w, 8)
|
|
|
|
GEN_VEXT_VX(vwadd_wx_b, 2)
|
|
|
|
GEN_VEXT_VX(vwadd_wx_h, 4)
|
|
|
|
GEN_VEXT_VX(vwadd_wx_w, 8)
|
|
|
|
GEN_VEXT_VX(vwsub_wx_b, 2)
|
|
|
|
GEN_VEXT_VX(vwsub_wx_h, 4)
|
|
|
|
GEN_VEXT_VX(vwsub_wx_w, 8)
|
2020-07-01 17:25:00 +02:00
|
|
|
|
|
|
|
/* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */
|
|
|
|
#define DO_VADC(N, M, C) (N + M + C)
|
|
|
|
#define DO_VSBC(N, M, C) (N - M - C)
|
|
|
|
|
2021-12-10 08:56:00 +01:00
|
|
|
#define GEN_VEXT_VADC_VVM(NAME, ETYPE, H, DO_OP) \
|
2020-07-01 17:25:00 +02:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t vl = env->vl; \
|
2022-06-06 08:16:38 +02:00
|
|
|
uint32_t esz = sizeof(ETYPE); \
|
|
|
|
uint32_t total_elems = \
|
|
|
|
vext_get_total_elems(env, desc, esz); \
|
|
|
|
uint32_t vta = vext_vta(desc); \
|
2020-07-01 17:25:00 +02:00
|
|
|
uint32_t i; \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2020-07-01 17:25:00 +02:00
|
|
|
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
|
|
|
|
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
|
2021-12-10 08:56:29 +01:00
|
|
|
ETYPE carry = vext_elem_mask(v0, i); \
|
2020-07-01 17:25:00 +02:00
|
|
|
\
|
|
|
|
*((ETYPE *)vd + H(i)) = DO_OP(s2, s1, carry); \
|
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2022-06-06 08:16:38 +02:00
|
|
|
/* set tail elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
|
2020-07-01 17:25:00 +02:00
|
|
|
}
|
|
|
|
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_VADC_VVM(vadc_vvm_b, uint8_t, H1, DO_VADC)
|
|
|
|
GEN_VEXT_VADC_VVM(vadc_vvm_h, uint16_t, H2, DO_VADC)
|
|
|
|
GEN_VEXT_VADC_VVM(vadc_vvm_w, uint32_t, H4, DO_VADC)
|
|
|
|
GEN_VEXT_VADC_VVM(vadc_vvm_d, uint64_t, H8, DO_VADC)
|
2020-07-01 17:25:00 +02:00
|
|
|
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_VADC_VVM(vsbc_vvm_b, uint8_t, H1, DO_VSBC)
|
|
|
|
GEN_VEXT_VADC_VVM(vsbc_vvm_h, uint16_t, H2, DO_VSBC)
|
|
|
|
GEN_VEXT_VADC_VVM(vsbc_vvm_w, uint32_t, H4, DO_VSBC)
|
|
|
|
GEN_VEXT_VADC_VVM(vsbc_vvm_d, uint64_t, H8, DO_VSBC)
|
2020-07-01 17:25:00 +02:00
|
|
|
|
2021-12-10 08:56:00 +01:00
|
|
|
#define GEN_VEXT_VADC_VXM(NAME, ETYPE, H, DO_OP) \
|
2020-07-01 17:25:00 +02:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t vl = env->vl; \
|
2022-06-06 08:16:38 +02:00
|
|
|
uint32_t esz = sizeof(ETYPE); \
|
|
|
|
uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
|
|
|
|
uint32_t vta = vext_vta(desc); \
|
2020-07-01 17:25:00 +02:00
|
|
|
uint32_t i; \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2020-07-01 17:25:00 +02:00
|
|
|
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
|
2021-12-10 08:56:29 +01:00
|
|
|
ETYPE carry = vext_elem_mask(v0, i); \
|
2020-07-01 17:25:00 +02:00
|
|
|
\
|
|
|
|
*((ETYPE *)vd + H(i)) = DO_OP(s2, (ETYPE)(target_long)s1, carry);\
|
|
|
|
} \
|
2023-04-05 10:58:11 +02:00
|
|
|
env->vstart = 0; \
|
2022-06-06 08:16:38 +02:00
|
|
|
/* set tail elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
|
2020-07-01 17:25:00 +02:00
|
|
|
}
|
|
|
|
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_VADC_VXM(vadc_vxm_b, uint8_t, H1, DO_VADC)
|
|
|
|
GEN_VEXT_VADC_VXM(vadc_vxm_h, uint16_t, H2, DO_VADC)
|
|
|
|
GEN_VEXT_VADC_VXM(vadc_vxm_w, uint32_t, H4, DO_VADC)
|
|
|
|
GEN_VEXT_VADC_VXM(vadc_vxm_d, uint64_t, H8, DO_VADC)
|
2020-07-01 17:25:00 +02:00
|
|
|
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_VADC_VXM(vsbc_vxm_b, uint8_t, H1, DO_VSBC)
|
|
|
|
GEN_VEXT_VADC_VXM(vsbc_vxm_h, uint16_t, H2, DO_VSBC)
|
|
|
|
GEN_VEXT_VADC_VXM(vsbc_vxm_w, uint32_t, H4, DO_VSBC)
|
|
|
|
GEN_VEXT_VADC_VXM(vsbc_vxm_d, uint64_t, H8, DO_VSBC)
|
2020-07-01 17:25:00 +02:00
|
|
|
|
|
|
|
#define DO_MADC(N, M, C) (C ? (__typeof(N))(N + M + 1) <= N : \
|
|
|
|
(__typeof(N))(N + M) < N)
|
|
|
|
#define DO_MSBC(N, M, C) (C ? N <= M : N < M)
|
|
|
|
|
|
|
|
#define GEN_VEXT_VMADC_VVM(NAME, ETYPE, H, DO_OP) \
|
|
|
|
void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t vl = env->vl; \
|
2021-12-10 08:56:29 +01:00
|
|
|
uint32_t vm = vext_vm(desc); \
|
2023-02-26 18:05:14 +01:00
|
|
|
uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \
|
2022-06-06 08:16:38 +02:00
|
|
|
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
|
2020-07-01 17:25:00 +02:00
|
|
|
uint32_t i; \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2020-07-01 17:25:00 +02:00
|
|
|
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
|
|
|
|
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
|
2021-12-10 08:56:29 +01:00
|
|
|
ETYPE carry = !vm && vext_elem_mask(v0, i); \
|
2021-12-10 08:55:58 +01:00
|
|
|
vext_set_elem_mask(vd, i, DO_OP(s2, s1, carry)); \
|
2020-07-01 17:25:00 +02:00
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2023-04-05 10:58:12 +02:00
|
|
|
/*
|
|
|
|
* mask destination register are always tail-agnostic
|
|
|
|
* set tail elements to 1s
|
|
|
|
*/ \
|
2022-06-06 08:16:38 +02:00
|
|
|
if (vta_all_1s) { \
|
|
|
|
for (; i < total_elems; i++) { \
|
|
|
|
vext_set_elem_mask(vd, i, 1); \
|
|
|
|
} \
|
|
|
|
} \
|
2020-07-01 17:25:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
GEN_VEXT_VMADC_VVM(vmadc_vvm_b, uint8_t, H1, DO_MADC)
|
|
|
|
GEN_VEXT_VMADC_VVM(vmadc_vvm_h, uint16_t, H2, DO_MADC)
|
|
|
|
GEN_VEXT_VMADC_VVM(vmadc_vvm_w, uint32_t, H4, DO_MADC)
|
|
|
|
GEN_VEXT_VMADC_VVM(vmadc_vvm_d, uint64_t, H8, DO_MADC)
|
|
|
|
|
|
|
|
GEN_VEXT_VMADC_VVM(vmsbc_vvm_b, uint8_t, H1, DO_MSBC)
|
|
|
|
GEN_VEXT_VMADC_VVM(vmsbc_vvm_h, uint16_t, H2, DO_MSBC)
|
|
|
|
GEN_VEXT_VMADC_VVM(vmsbc_vvm_w, uint32_t, H4, DO_MSBC)
|
|
|
|
GEN_VEXT_VMADC_VVM(vmsbc_vvm_d, uint64_t, H8, DO_MSBC)
|
|
|
|
|
|
|
|
#define GEN_VEXT_VMADC_VXM(NAME, ETYPE, H, DO_OP) \
|
|
|
|
void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
|
|
|
|
void *vs2, CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t vl = env->vl; \
|
2021-12-10 08:56:29 +01:00
|
|
|
uint32_t vm = vext_vm(desc); \
|
2023-02-26 18:05:14 +01:00
|
|
|
uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \
|
2022-06-06 08:16:38 +02:00
|
|
|
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
|
2020-07-01 17:25:00 +02:00
|
|
|
uint32_t i; \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2020-07-01 17:25:00 +02:00
|
|
|
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
|
2021-12-10 08:56:29 +01:00
|
|
|
ETYPE carry = !vm && vext_elem_mask(v0, i); \
|
2021-12-10 08:55:58 +01:00
|
|
|
vext_set_elem_mask(vd, i, \
|
2020-07-01 17:25:00 +02:00
|
|
|
DO_OP(s2, (ETYPE)(target_long)s1, carry)); \
|
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2023-04-05 10:58:12 +02:00
|
|
|
/*
|
|
|
|
* mask destination register are always tail-agnostic
|
|
|
|
* set tail elements to 1s
|
|
|
|
*/ \
|
2022-06-06 08:16:38 +02:00
|
|
|
if (vta_all_1s) { \
|
|
|
|
for (; i < total_elems; i++) { \
|
|
|
|
vext_set_elem_mask(vd, i, 1); \
|
|
|
|
} \
|
|
|
|
} \
|
2020-07-01 17:25:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
GEN_VEXT_VMADC_VXM(vmadc_vxm_b, uint8_t, H1, DO_MADC)
|
|
|
|
GEN_VEXT_VMADC_VXM(vmadc_vxm_h, uint16_t, H2, DO_MADC)
|
|
|
|
GEN_VEXT_VMADC_VXM(vmadc_vxm_w, uint32_t, H4, DO_MADC)
|
|
|
|
GEN_VEXT_VMADC_VXM(vmadc_vxm_d, uint64_t, H8, DO_MADC)
|
|
|
|
|
|
|
|
GEN_VEXT_VMADC_VXM(vmsbc_vxm_b, uint8_t, H1, DO_MSBC)
|
|
|
|
GEN_VEXT_VMADC_VXM(vmsbc_vxm_h, uint16_t, H2, DO_MSBC)
|
|
|
|
GEN_VEXT_VMADC_VXM(vmsbc_vxm_w, uint32_t, H4, DO_MSBC)
|
|
|
|
GEN_VEXT_VMADC_VXM(vmsbc_vxm_d, uint64_t, H8, DO_MSBC)
|
2020-07-01 17:25:01 +02:00
|
|
|
|
|
|
|
/* Vector Bitwise Logical Instructions */
|
|
|
|
RVVCALL(OPIVV2, vand_vv_b, OP_SSS_B, H1, H1, H1, DO_AND)
|
|
|
|
RVVCALL(OPIVV2, vand_vv_h, OP_SSS_H, H2, H2, H2, DO_AND)
|
|
|
|
RVVCALL(OPIVV2, vand_vv_w, OP_SSS_W, H4, H4, H4, DO_AND)
|
|
|
|
RVVCALL(OPIVV2, vand_vv_d, OP_SSS_D, H8, H8, H8, DO_AND)
|
|
|
|
RVVCALL(OPIVV2, vor_vv_b, OP_SSS_B, H1, H1, H1, DO_OR)
|
|
|
|
RVVCALL(OPIVV2, vor_vv_h, OP_SSS_H, H2, H2, H2, DO_OR)
|
|
|
|
RVVCALL(OPIVV2, vor_vv_w, OP_SSS_W, H4, H4, H4, DO_OR)
|
|
|
|
RVVCALL(OPIVV2, vor_vv_d, OP_SSS_D, H8, H8, H8, DO_OR)
|
|
|
|
RVVCALL(OPIVV2, vxor_vv_b, OP_SSS_B, H1, H1, H1, DO_XOR)
|
|
|
|
RVVCALL(OPIVV2, vxor_vv_h, OP_SSS_H, H2, H2, H2, DO_XOR)
|
|
|
|
RVVCALL(OPIVV2, vxor_vv_w, OP_SSS_W, H4, H4, H4, DO_XOR)
|
|
|
|
RVVCALL(OPIVV2, vxor_vv_d, OP_SSS_D, H8, H8, H8, DO_XOR)
|
2022-06-06 08:16:16 +02:00
|
|
|
GEN_VEXT_VV(vand_vv_b, 1)
|
|
|
|
GEN_VEXT_VV(vand_vv_h, 2)
|
|
|
|
GEN_VEXT_VV(vand_vv_w, 4)
|
|
|
|
GEN_VEXT_VV(vand_vv_d, 8)
|
|
|
|
GEN_VEXT_VV(vor_vv_b, 1)
|
|
|
|
GEN_VEXT_VV(vor_vv_h, 2)
|
|
|
|
GEN_VEXT_VV(vor_vv_w, 4)
|
|
|
|
GEN_VEXT_VV(vor_vv_d, 8)
|
|
|
|
GEN_VEXT_VV(vxor_vv_b, 1)
|
|
|
|
GEN_VEXT_VV(vxor_vv_h, 2)
|
|
|
|
GEN_VEXT_VV(vxor_vv_w, 4)
|
|
|
|
GEN_VEXT_VV(vxor_vv_d, 8)
|
2020-07-01 17:25:01 +02:00
|
|
|
|
|
|
|
RVVCALL(OPIVX2, vand_vx_b, OP_SSS_B, H1, H1, DO_AND)
|
|
|
|
RVVCALL(OPIVX2, vand_vx_h, OP_SSS_H, H2, H2, DO_AND)
|
|
|
|
RVVCALL(OPIVX2, vand_vx_w, OP_SSS_W, H4, H4, DO_AND)
|
|
|
|
RVVCALL(OPIVX2, vand_vx_d, OP_SSS_D, H8, H8, DO_AND)
|
|
|
|
RVVCALL(OPIVX2, vor_vx_b, OP_SSS_B, H1, H1, DO_OR)
|
|
|
|
RVVCALL(OPIVX2, vor_vx_h, OP_SSS_H, H2, H2, DO_OR)
|
|
|
|
RVVCALL(OPIVX2, vor_vx_w, OP_SSS_W, H4, H4, DO_OR)
|
|
|
|
RVVCALL(OPIVX2, vor_vx_d, OP_SSS_D, H8, H8, DO_OR)
|
|
|
|
RVVCALL(OPIVX2, vxor_vx_b, OP_SSS_B, H1, H1, DO_XOR)
|
|
|
|
RVVCALL(OPIVX2, vxor_vx_h, OP_SSS_H, H2, H2, DO_XOR)
|
|
|
|
RVVCALL(OPIVX2, vxor_vx_w, OP_SSS_W, H4, H4, DO_XOR)
|
|
|
|
RVVCALL(OPIVX2, vxor_vx_d, OP_SSS_D, H8, H8, DO_XOR)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VX(vand_vx_b, 1)
|
|
|
|
GEN_VEXT_VX(vand_vx_h, 2)
|
|
|
|
GEN_VEXT_VX(vand_vx_w, 4)
|
|
|
|
GEN_VEXT_VX(vand_vx_d, 8)
|
|
|
|
GEN_VEXT_VX(vor_vx_b, 1)
|
|
|
|
GEN_VEXT_VX(vor_vx_h, 2)
|
|
|
|
GEN_VEXT_VX(vor_vx_w, 4)
|
|
|
|
GEN_VEXT_VX(vor_vx_d, 8)
|
|
|
|
GEN_VEXT_VX(vxor_vx_b, 1)
|
|
|
|
GEN_VEXT_VX(vxor_vx_h, 2)
|
|
|
|
GEN_VEXT_VX(vxor_vx_w, 4)
|
|
|
|
GEN_VEXT_VX(vxor_vx_d, 8)
|
2020-07-01 17:25:02 +02:00
|
|
|
|
|
|
|
/* Vector Single-Width Bit Shift Instructions */
|
|
|
|
#define DO_SLL(N, M) (N << (M))
|
|
|
|
#define DO_SRL(N, M) (N >> (M))
|
|
|
|
|
|
|
|
/* generate the helpers for shift instructions with two vector operators */
|
2021-12-10 08:56:00 +01:00
|
|
|
#define GEN_VEXT_SHIFT_VV(NAME, TS1, TS2, HS1, HS2, OP, MASK) \
|
2020-07-01 17:25:02 +02:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, void *vs1, \
|
|
|
|
void *vs2, CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t vm = vext_vm(desc); \
|
|
|
|
uint32_t vl = env->vl; \
|
2022-06-06 08:16:33 +02:00
|
|
|
uint32_t esz = sizeof(TS1); \
|
|
|
|
uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
|
|
|
|
uint32_t vta = vext_vta(desc); \
|
2022-06-20 08:50:58 +02:00
|
|
|
uint32_t vma = vext_vma(desc); \
|
2020-07-01 17:25:02 +02:00
|
|
|
uint32_t i; \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2021-12-10 08:55:58 +01:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) { \
|
2022-06-20 08:50:58 +02:00
|
|
|
/* set masked-off elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
|
2020-07-01 17:25:02 +02:00
|
|
|
continue; \
|
|
|
|
} \
|
|
|
|
TS1 s1 = *((TS1 *)vs1 + HS1(i)); \
|
|
|
|
TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
|
|
|
|
*((TS1 *)vd + HS1(i)) = OP(s2, s1 & MASK); \
|
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2022-06-06 08:16:33 +02:00
|
|
|
/* set tail elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
|
2020-07-01 17:25:02 +02:00
|
|
|
}
|
|
|
|
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_SHIFT_VV(vsll_vv_b, uint8_t, uint8_t, H1, H1, DO_SLL, 0x7)
|
|
|
|
GEN_VEXT_SHIFT_VV(vsll_vv_h, uint16_t, uint16_t, H2, H2, DO_SLL, 0xf)
|
|
|
|
GEN_VEXT_SHIFT_VV(vsll_vv_w, uint32_t, uint32_t, H4, H4, DO_SLL, 0x1f)
|
|
|
|
GEN_VEXT_SHIFT_VV(vsll_vv_d, uint64_t, uint64_t, H8, H8, DO_SLL, 0x3f)
|
2020-07-01 17:25:02 +02:00
|
|
|
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_SHIFT_VV(vsrl_vv_b, uint8_t, uint8_t, H1, H1, DO_SRL, 0x7)
|
|
|
|
GEN_VEXT_SHIFT_VV(vsrl_vv_h, uint16_t, uint16_t, H2, H2, DO_SRL, 0xf)
|
|
|
|
GEN_VEXT_SHIFT_VV(vsrl_vv_w, uint32_t, uint32_t, H4, H4, DO_SRL, 0x1f)
|
|
|
|
GEN_VEXT_SHIFT_VV(vsrl_vv_d, uint64_t, uint64_t, H8, H8, DO_SRL, 0x3f)
|
2020-07-01 17:25:02 +02:00
|
|
|
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_SHIFT_VV(vsra_vv_b, uint8_t, int8_t, H1, H1, DO_SRL, 0x7)
|
|
|
|
GEN_VEXT_SHIFT_VV(vsra_vv_h, uint16_t, int16_t, H2, H2, DO_SRL, 0xf)
|
|
|
|
GEN_VEXT_SHIFT_VV(vsra_vv_w, uint32_t, int32_t, H4, H4, DO_SRL, 0x1f)
|
|
|
|
GEN_VEXT_SHIFT_VV(vsra_vv_d, uint64_t, int64_t, H8, H8, DO_SRL, 0x3f)
|
2020-07-01 17:25:02 +02:00
|
|
|
|
2023-04-05 10:58:13 +02:00
|
|
|
/*
|
|
|
|
* generate the helpers for shift instructions with one vector and one scalar
|
|
|
|
*/
|
2021-12-10 08:56:00 +01:00
|
|
|
#define GEN_VEXT_SHIFT_VX(NAME, TD, TS2, HD, HS2, OP, MASK) \
|
|
|
|
void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
|
2023-04-05 10:58:11 +02:00
|
|
|
void *vs2, CPURISCVState *env, \
|
|
|
|
uint32_t desc) \
|
2021-12-10 08:56:00 +01:00
|
|
|
{ \
|
|
|
|
uint32_t vm = vext_vm(desc); \
|
|
|
|
uint32_t vl = env->vl; \
|
2022-06-06 08:16:33 +02:00
|
|
|
uint32_t esz = sizeof(TD); \
|
|
|
|
uint32_t total_elems = \
|
|
|
|
vext_get_total_elems(env, desc, esz); \
|
|
|
|
uint32_t vta = vext_vta(desc); \
|
2022-06-20 08:50:58 +02:00
|
|
|
uint32_t vma = vext_vma(desc); \
|
2021-12-10 08:56:00 +01:00
|
|
|
uint32_t i; \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2021-12-10 08:56:00 +01:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) { \
|
2022-06-20 08:50:58 +02:00
|
|
|
/* set masked-off elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vma, i * esz, \
|
|
|
|
(i + 1) * esz); \
|
2021-12-10 08:56:00 +01:00
|
|
|
continue; \
|
|
|
|
} \
|
|
|
|
TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
|
|
|
|
*((TD *)vd + HD(i)) = OP(s2, s1 & MASK); \
|
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2022-06-06 08:16:33 +02:00
|
|
|
/* set tail elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);\
|
2021-12-10 08:56:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
GEN_VEXT_SHIFT_VX(vsll_vx_b, uint8_t, int8_t, H1, H1, DO_SLL, 0x7)
|
|
|
|
GEN_VEXT_SHIFT_VX(vsll_vx_h, uint16_t, int16_t, H2, H2, DO_SLL, 0xf)
|
|
|
|
GEN_VEXT_SHIFT_VX(vsll_vx_w, uint32_t, int32_t, H4, H4, DO_SLL, 0x1f)
|
|
|
|
GEN_VEXT_SHIFT_VX(vsll_vx_d, uint64_t, int64_t, H8, H8, DO_SLL, 0x3f)
|
|
|
|
|
|
|
|
GEN_VEXT_SHIFT_VX(vsrl_vx_b, uint8_t, uint8_t, H1, H1, DO_SRL, 0x7)
|
|
|
|
GEN_VEXT_SHIFT_VX(vsrl_vx_h, uint16_t, uint16_t, H2, H2, DO_SRL, 0xf)
|
|
|
|
GEN_VEXT_SHIFT_VX(vsrl_vx_w, uint32_t, uint32_t, H4, H4, DO_SRL, 0x1f)
|
|
|
|
GEN_VEXT_SHIFT_VX(vsrl_vx_d, uint64_t, uint64_t, H8, H8, DO_SRL, 0x3f)
|
|
|
|
|
|
|
|
GEN_VEXT_SHIFT_VX(vsra_vx_b, int8_t, int8_t, H1, H1, DO_SRL, 0x7)
|
|
|
|
GEN_VEXT_SHIFT_VX(vsra_vx_h, int16_t, int16_t, H2, H2, DO_SRL, 0xf)
|
|
|
|
GEN_VEXT_SHIFT_VX(vsra_vx_w, int32_t, int32_t, H4, H4, DO_SRL, 0x1f)
|
|
|
|
GEN_VEXT_SHIFT_VX(vsra_vx_d, int64_t, int64_t, H8, H8, DO_SRL, 0x3f)
|
2020-07-01 17:25:03 +02:00
|
|
|
|
|
|
|
/* Vector Narrowing Integer Right Shift Instructions */
|
2021-12-10 08:56:30 +01:00
|
|
|
GEN_VEXT_SHIFT_VV(vnsrl_wv_b, uint8_t, uint16_t, H1, H2, DO_SRL, 0xf)
|
|
|
|
GEN_VEXT_SHIFT_VV(vnsrl_wv_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f)
|
|
|
|
GEN_VEXT_SHIFT_VV(vnsrl_wv_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f)
|
|
|
|
GEN_VEXT_SHIFT_VV(vnsra_wv_b, uint8_t, int16_t, H1, H2, DO_SRL, 0xf)
|
|
|
|
GEN_VEXT_SHIFT_VV(vnsra_wv_h, uint16_t, int32_t, H2, H4, DO_SRL, 0x1f)
|
|
|
|
GEN_VEXT_SHIFT_VV(vnsra_wv_w, uint32_t, int64_t, H4, H8, DO_SRL, 0x3f)
|
|
|
|
GEN_VEXT_SHIFT_VX(vnsrl_wx_b, uint8_t, uint16_t, H1, H2, DO_SRL, 0xf)
|
|
|
|
GEN_VEXT_SHIFT_VX(vnsrl_wx_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f)
|
|
|
|
GEN_VEXT_SHIFT_VX(vnsrl_wx_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f)
|
|
|
|
GEN_VEXT_SHIFT_VX(vnsra_wx_b, int8_t, int16_t, H1, H2, DO_SRL, 0xf)
|
|
|
|
GEN_VEXT_SHIFT_VX(vnsra_wx_h, int16_t, int32_t, H2, H4, DO_SRL, 0x1f)
|
|
|
|
GEN_VEXT_SHIFT_VX(vnsra_wx_w, int32_t, int64_t, H4, H8, DO_SRL, 0x3f)
|
2020-07-01 17:25:04 +02:00
|
|
|
|
|
|
|
/* Vector Integer Comparison Instructions */
|
|
|
|
#define DO_MSEQ(N, M) (N == M)
|
|
|
|
#define DO_MSNE(N, M) (N != M)
|
|
|
|
#define DO_MSLT(N, M) (N < M)
|
|
|
|
#define DO_MSLE(N, M) (N <= M)
|
|
|
|
#define DO_MSGT(N, M) (N > M)
|
|
|
|
|
|
|
|
#define GEN_VEXT_CMP_VV(NAME, ETYPE, H, DO_OP) \
|
|
|
|
void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t vm = vext_vm(desc); \
|
|
|
|
uint32_t vl = env->vl; \
|
2023-02-26 18:05:14 +01:00
|
|
|
uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \
|
2022-06-06 08:16:51 +02:00
|
|
|
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
|
2022-06-20 08:51:11 +02:00
|
|
|
uint32_t vma = vext_vma(desc); \
|
2020-07-01 17:25:04 +02:00
|
|
|
uint32_t i; \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2020-07-01 17:25:04 +02:00
|
|
|
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
|
|
|
|
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
|
2021-12-10 08:55:58 +01:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) { \
|
2022-06-20 08:51:11 +02:00
|
|
|
/* set masked-off elements to 1s */ \
|
|
|
|
if (vma) { \
|
|
|
|
vext_set_elem_mask(vd, i, 1); \
|
|
|
|
} \
|
2020-07-01 17:25:04 +02:00
|
|
|
continue; \
|
|
|
|
} \
|
2021-12-10 08:55:58 +01:00
|
|
|
vext_set_elem_mask(vd, i, DO_OP(s2, s1)); \
|
2020-07-01 17:25:04 +02:00
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2023-04-05 10:58:12 +02:00
|
|
|
/*
|
|
|
|
* mask destination register are always tail-agnostic
|
|
|
|
* set tail elements to 1s
|
|
|
|
*/ \
|
2022-06-06 08:16:51 +02:00
|
|
|
if (vta_all_1s) { \
|
|
|
|
for (; i < total_elems; i++) { \
|
|
|
|
vext_set_elem_mask(vd, i, 1); \
|
|
|
|
} \
|
|
|
|
} \
|
2020-07-01 17:25:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
GEN_VEXT_CMP_VV(vmseq_vv_b, uint8_t, H1, DO_MSEQ)
|
|
|
|
GEN_VEXT_CMP_VV(vmseq_vv_h, uint16_t, H2, DO_MSEQ)
|
|
|
|
GEN_VEXT_CMP_VV(vmseq_vv_w, uint32_t, H4, DO_MSEQ)
|
|
|
|
GEN_VEXT_CMP_VV(vmseq_vv_d, uint64_t, H8, DO_MSEQ)
|
|
|
|
|
|
|
|
GEN_VEXT_CMP_VV(vmsne_vv_b, uint8_t, H1, DO_MSNE)
|
|
|
|
GEN_VEXT_CMP_VV(vmsne_vv_h, uint16_t, H2, DO_MSNE)
|
|
|
|
GEN_VEXT_CMP_VV(vmsne_vv_w, uint32_t, H4, DO_MSNE)
|
|
|
|
GEN_VEXT_CMP_VV(vmsne_vv_d, uint64_t, H8, DO_MSNE)
|
|
|
|
|
|
|
|
GEN_VEXT_CMP_VV(vmsltu_vv_b, uint8_t, H1, DO_MSLT)
|
|
|
|
GEN_VEXT_CMP_VV(vmsltu_vv_h, uint16_t, H2, DO_MSLT)
|
|
|
|
GEN_VEXT_CMP_VV(vmsltu_vv_w, uint32_t, H4, DO_MSLT)
|
|
|
|
GEN_VEXT_CMP_VV(vmsltu_vv_d, uint64_t, H8, DO_MSLT)
|
|
|
|
|
|
|
|
GEN_VEXT_CMP_VV(vmslt_vv_b, int8_t, H1, DO_MSLT)
|
|
|
|
GEN_VEXT_CMP_VV(vmslt_vv_h, int16_t, H2, DO_MSLT)
|
|
|
|
GEN_VEXT_CMP_VV(vmslt_vv_w, int32_t, H4, DO_MSLT)
|
|
|
|
GEN_VEXT_CMP_VV(vmslt_vv_d, int64_t, H8, DO_MSLT)
|
|
|
|
|
|
|
|
GEN_VEXT_CMP_VV(vmsleu_vv_b, uint8_t, H1, DO_MSLE)
|
|
|
|
GEN_VEXT_CMP_VV(vmsleu_vv_h, uint16_t, H2, DO_MSLE)
|
|
|
|
GEN_VEXT_CMP_VV(vmsleu_vv_w, uint32_t, H4, DO_MSLE)
|
|
|
|
GEN_VEXT_CMP_VV(vmsleu_vv_d, uint64_t, H8, DO_MSLE)
|
|
|
|
|
|
|
|
GEN_VEXT_CMP_VV(vmsle_vv_b, int8_t, H1, DO_MSLE)
|
|
|
|
GEN_VEXT_CMP_VV(vmsle_vv_h, int16_t, H2, DO_MSLE)
|
|
|
|
GEN_VEXT_CMP_VV(vmsle_vv_w, int32_t, H4, DO_MSLE)
|
|
|
|
GEN_VEXT_CMP_VV(vmsle_vv_d, int64_t, H8, DO_MSLE)
|
|
|
|
|
|
|
|
#define GEN_VEXT_CMP_VX(NAME, ETYPE, H, DO_OP) \
|
|
|
|
void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t vm = vext_vm(desc); \
|
|
|
|
uint32_t vl = env->vl; \
|
2023-02-26 18:05:14 +01:00
|
|
|
uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \
|
2022-06-06 08:16:51 +02:00
|
|
|
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
|
2022-06-20 08:51:11 +02:00
|
|
|
uint32_t vma = vext_vma(desc); \
|
2020-07-01 17:25:04 +02:00
|
|
|
uint32_t i; \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2020-07-01 17:25:04 +02:00
|
|
|
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
|
2021-12-10 08:55:58 +01:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) { \
|
2022-06-20 08:51:11 +02:00
|
|
|
/* set masked-off elements to 1s */ \
|
|
|
|
if (vma) { \
|
|
|
|
vext_set_elem_mask(vd, i, 1); \
|
|
|
|
} \
|
2020-07-01 17:25:04 +02:00
|
|
|
continue; \
|
|
|
|
} \
|
2021-12-10 08:55:58 +01:00
|
|
|
vext_set_elem_mask(vd, i, \
|
2020-07-01 17:25:04 +02:00
|
|
|
DO_OP(s2, (ETYPE)(target_long)s1)); \
|
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2023-04-05 10:58:12 +02:00
|
|
|
/*
|
|
|
|
* mask destination register are always tail-agnostic
|
|
|
|
* set tail elements to 1s
|
|
|
|
*/ \
|
2022-06-06 08:16:51 +02:00
|
|
|
if (vta_all_1s) { \
|
|
|
|
for (; i < total_elems; i++) { \
|
|
|
|
vext_set_elem_mask(vd, i, 1); \
|
|
|
|
} \
|
|
|
|
} \
|
2020-07-01 17:25:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
GEN_VEXT_CMP_VX(vmseq_vx_b, uint8_t, H1, DO_MSEQ)
|
|
|
|
GEN_VEXT_CMP_VX(vmseq_vx_h, uint16_t, H2, DO_MSEQ)
|
|
|
|
GEN_VEXT_CMP_VX(vmseq_vx_w, uint32_t, H4, DO_MSEQ)
|
|
|
|
GEN_VEXT_CMP_VX(vmseq_vx_d, uint64_t, H8, DO_MSEQ)
|
|
|
|
|
|
|
|
GEN_VEXT_CMP_VX(vmsne_vx_b, uint8_t, H1, DO_MSNE)
|
|
|
|
GEN_VEXT_CMP_VX(vmsne_vx_h, uint16_t, H2, DO_MSNE)
|
|
|
|
GEN_VEXT_CMP_VX(vmsne_vx_w, uint32_t, H4, DO_MSNE)
|
|
|
|
GEN_VEXT_CMP_VX(vmsne_vx_d, uint64_t, H8, DO_MSNE)
|
|
|
|
|
|
|
|
GEN_VEXT_CMP_VX(vmsltu_vx_b, uint8_t, H1, DO_MSLT)
|
|
|
|
GEN_VEXT_CMP_VX(vmsltu_vx_h, uint16_t, H2, DO_MSLT)
|
|
|
|
GEN_VEXT_CMP_VX(vmsltu_vx_w, uint32_t, H4, DO_MSLT)
|
|
|
|
GEN_VEXT_CMP_VX(vmsltu_vx_d, uint64_t, H8, DO_MSLT)
|
|
|
|
|
|
|
|
GEN_VEXT_CMP_VX(vmslt_vx_b, int8_t, H1, DO_MSLT)
|
|
|
|
GEN_VEXT_CMP_VX(vmslt_vx_h, int16_t, H2, DO_MSLT)
|
|
|
|
GEN_VEXT_CMP_VX(vmslt_vx_w, int32_t, H4, DO_MSLT)
|
|
|
|
GEN_VEXT_CMP_VX(vmslt_vx_d, int64_t, H8, DO_MSLT)
|
|
|
|
|
|
|
|
GEN_VEXT_CMP_VX(vmsleu_vx_b, uint8_t, H1, DO_MSLE)
|
|
|
|
GEN_VEXT_CMP_VX(vmsleu_vx_h, uint16_t, H2, DO_MSLE)
|
|
|
|
GEN_VEXT_CMP_VX(vmsleu_vx_w, uint32_t, H4, DO_MSLE)
|
|
|
|
GEN_VEXT_CMP_VX(vmsleu_vx_d, uint64_t, H8, DO_MSLE)
|
|
|
|
|
|
|
|
GEN_VEXT_CMP_VX(vmsle_vx_b, int8_t, H1, DO_MSLE)
|
|
|
|
GEN_VEXT_CMP_VX(vmsle_vx_h, int16_t, H2, DO_MSLE)
|
|
|
|
GEN_VEXT_CMP_VX(vmsle_vx_w, int32_t, H4, DO_MSLE)
|
|
|
|
GEN_VEXT_CMP_VX(vmsle_vx_d, int64_t, H8, DO_MSLE)
|
|
|
|
|
|
|
|
GEN_VEXT_CMP_VX(vmsgtu_vx_b, uint8_t, H1, DO_MSGT)
|
|
|
|
GEN_VEXT_CMP_VX(vmsgtu_vx_h, uint16_t, H2, DO_MSGT)
|
|
|
|
GEN_VEXT_CMP_VX(vmsgtu_vx_w, uint32_t, H4, DO_MSGT)
|
|
|
|
GEN_VEXT_CMP_VX(vmsgtu_vx_d, uint64_t, H8, DO_MSGT)
|
|
|
|
|
|
|
|
GEN_VEXT_CMP_VX(vmsgt_vx_b, int8_t, H1, DO_MSGT)
|
|
|
|
GEN_VEXT_CMP_VX(vmsgt_vx_h, int16_t, H2, DO_MSGT)
|
|
|
|
GEN_VEXT_CMP_VX(vmsgt_vx_w, int32_t, H4, DO_MSGT)
|
|
|
|
GEN_VEXT_CMP_VX(vmsgt_vx_d, int64_t, H8, DO_MSGT)
|
2020-07-01 17:25:05 +02:00
|
|
|
|
|
|
|
/* Vector Integer Min/Max Instructions */
|
|
|
|
RVVCALL(OPIVV2, vminu_vv_b, OP_UUU_B, H1, H1, H1, DO_MIN)
|
|
|
|
RVVCALL(OPIVV2, vminu_vv_h, OP_UUU_H, H2, H2, H2, DO_MIN)
|
|
|
|
RVVCALL(OPIVV2, vminu_vv_w, OP_UUU_W, H4, H4, H4, DO_MIN)
|
|
|
|
RVVCALL(OPIVV2, vminu_vv_d, OP_UUU_D, H8, H8, H8, DO_MIN)
|
|
|
|
RVVCALL(OPIVV2, vmin_vv_b, OP_SSS_B, H1, H1, H1, DO_MIN)
|
|
|
|
RVVCALL(OPIVV2, vmin_vv_h, OP_SSS_H, H2, H2, H2, DO_MIN)
|
|
|
|
RVVCALL(OPIVV2, vmin_vv_w, OP_SSS_W, H4, H4, H4, DO_MIN)
|
|
|
|
RVVCALL(OPIVV2, vmin_vv_d, OP_SSS_D, H8, H8, H8, DO_MIN)
|
|
|
|
RVVCALL(OPIVV2, vmaxu_vv_b, OP_UUU_B, H1, H1, H1, DO_MAX)
|
|
|
|
RVVCALL(OPIVV2, vmaxu_vv_h, OP_UUU_H, H2, H2, H2, DO_MAX)
|
|
|
|
RVVCALL(OPIVV2, vmaxu_vv_w, OP_UUU_W, H4, H4, H4, DO_MAX)
|
|
|
|
RVVCALL(OPIVV2, vmaxu_vv_d, OP_UUU_D, H8, H8, H8, DO_MAX)
|
|
|
|
RVVCALL(OPIVV2, vmax_vv_b, OP_SSS_B, H1, H1, H1, DO_MAX)
|
|
|
|
RVVCALL(OPIVV2, vmax_vv_h, OP_SSS_H, H2, H2, H2, DO_MAX)
|
|
|
|
RVVCALL(OPIVV2, vmax_vv_w, OP_SSS_W, H4, H4, H4, DO_MAX)
|
|
|
|
RVVCALL(OPIVV2, vmax_vv_d, OP_SSS_D, H8, H8, H8, DO_MAX)
|
2022-06-06 08:16:16 +02:00
|
|
|
GEN_VEXT_VV(vminu_vv_b, 1)
|
|
|
|
GEN_VEXT_VV(vminu_vv_h, 2)
|
|
|
|
GEN_VEXT_VV(vminu_vv_w, 4)
|
|
|
|
GEN_VEXT_VV(vminu_vv_d, 8)
|
|
|
|
GEN_VEXT_VV(vmin_vv_b, 1)
|
|
|
|
GEN_VEXT_VV(vmin_vv_h, 2)
|
|
|
|
GEN_VEXT_VV(vmin_vv_w, 4)
|
|
|
|
GEN_VEXT_VV(vmin_vv_d, 8)
|
|
|
|
GEN_VEXT_VV(vmaxu_vv_b, 1)
|
|
|
|
GEN_VEXT_VV(vmaxu_vv_h, 2)
|
|
|
|
GEN_VEXT_VV(vmaxu_vv_w, 4)
|
|
|
|
GEN_VEXT_VV(vmaxu_vv_d, 8)
|
|
|
|
GEN_VEXT_VV(vmax_vv_b, 1)
|
|
|
|
GEN_VEXT_VV(vmax_vv_h, 2)
|
|
|
|
GEN_VEXT_VV(vmax_vv_w, 4)
|
|
|
|
GEN_VEXT_VV(vmax_vv_d, 8)
|
2020-07-01 17:25:05 +02:00
|
|
|
|
|
|
|
RVVCALL(OPIVX2, vminu_vx_b, OP_UUU_B, H1, H1, DO_MIN)
|
|
|
|
RVVCALL(OPIVX2, vminu_vx_h, OP_UUU_H, H2, H2, DO_MIN)
|
|
|
|
RVVCALL(OPIVX2, vminu_vx_w, OP_UUU_W, H4, H4, DO_MIN)
|
|
|
|
RVVCALL(OPIVX2, vminu_vx_d, OP_UUU_D, H8, H8, DO_MIN)
|
|
|
|
RVVCALL(OPIVX2, vmin_vx_b, OP_SSS_B, H1, H1, DO_MIN)
|
|
|
|
RVVCALL(OPIVX2, vmin_vx_h, OP_SSS_H, H2, H2, DO_MIN)
|
|
|
|
RVVCALL(OPIVX2, vmin_vx_w, OP_SSS_W, H4, H4, DO_MIN)
|
|
|
|
RVVCALL(OPIVX2, vmin_vx_d, OP_SSS_D, H8, H8, DO_MIN)
|
|
|
|
RVVCALL(OPIVX2, vmaxu_vx_b, OP_UUU_B, H1, H1, DO_MAX)
|
|
|
|
RVVCALL(OPIVX2, vmaxu_vx_h, OP_UUU_H, H2, H2, DO_MAX)
|
|
|
|
RVVCALL(OPIVX2, vmaxu_vx_w, OP_UUU_W, H4, H4, DO_MAX)
|
|
|
|
RVVCALL(OPIVX2, vmaxu_vx_d, OP_UUU_D, H8, H8, DO_MAX)
|
|
|
|
RVVCALL(OPIVX2, vmax_vx_b, OP_SSS_B, H1, H1, DO_MAX)
|
|
|
|
RVVCALL(OPIVX2, vmax_vx_h, OP_SSS_H, H2, H2, DO_MAX)
|
|
|
|
RVVCALL(OPIVX2, vmax_vx_w, OP_SSS_W, H4, H4, DO_MAX)
|
|
|
|
RVVCALL(OPIVX2, vmax_vx_d, OP_SSS_D, H8, H8, DO_MAX)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VX(vminu_vx_b, 1)
|
|
|
|
GEN_VEXT_VX(vminu_vx_h, 2)
|
|
|
|
GEN_VEXT_VX(vminu_vx_w, 4)
|
|
|
|
GEN_VEXT_VX(vminu_vx_d, 8)
|
|
|
|
GEN_VEXT_VX(vmin_vx_b, 1)
|
|
|
|
GEN_VEXT_VX(vmin_vx_h, 2)
|
|
|
|
GEN_VEXT_VX(vmin_vx_w, 4)
|
|
|
|
GEN_VEXT_VX(vmin_vx_d, 8)
|
|
|
|
GEN_VEXT_VX(vmaxu_vx_b, 1)
|
|
|
|
GEN_VEXT_VX(vmaxu_vx_h, 2)
|
|
|
|
GEN_VEXT_VX(vmaxu_vx_w, 4)
|
|
|
|
GEN_VEXT_VX(vmaxu_vx_d, 8)
|
|
|
|
GEN_VEXT_VX(vmax_vx_b, 1)
|
|
|
|
GEN_VEXT_VX(vmax_vx_h, 2)
|
|
|
|
GEN_VEXT_VX(vmax_vx_w, 4)
|
|
|
|
GEN_VEXT_VX(vmax_vx_d, 8)
|
2020-07-01 17:25:06 +02:00
|
|
|
|
|
|
|
/* Vector Single-Width Integer Multiply Instructions */
|
|
|
|
#define DO_MUL(N, M) (N * M)
|
|
|
|
RVVCALL(OPIVV2, vmul_vv_b, OP_SSS_B, H1, H1, H1, DO_MUL)
|
|
|
|
RVVCALL(OPIVV2, vmul_vv_h, OP_SSS_H, H2, H2, H2, DO_MUL)
|
|
|
|
RVVCALL(OPIVV2, vmul_vv_w, OP_SSS_W, H4, H4, H4, DO_MUL)
|
|
|
|
RVVCALL(OPIVV2, vmul_vv_d, OP_SSS_D, H8, H8, H8, DO_MUL)
|
2022-06-06 08:16:16 +02:00
|
|
|
GEN_VEXT_VV(vmul_vv_b, 1)
|
|
|
|
GEN_VEXT_VV(vmul_vv_h, 2)
|
|
|
|
GEN_VEXT_VV(vmul_vv_w, 4)
|
|
|
|
GEN_VEXT_VV(vmul_vv_d, 8)
|
2020-07-01 17:25:06 +02:00
|
|
|
|
|
|
|
static int8_t do_mulh_b(int8_t s2, int8_t s1)
|
|
|
|
{
|
|
|
|
return (int16_t)s2 * (int16_t)s1 >> 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int16_t do_mulh_h(int16_t s2, int16_t s1)
|
|
|
|
{
|
|
|
|
return (int32_t)s2 * (int32_t)s1 >> 16;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int32_t do_mulh_w(int32_t s2, int32_t s1)
|
|
|
|
{
|
|
|
|
return (int64_t)s2 * (int64_t)s1 >> 32;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int64_t do_mulh_d(int64_t s2, int64_t s1)
|
|
|
|
{
|
|
|
|
uint64_t hi_64, lo_64;
|
|
|
|
|
|
|
|
muls64(&lo_64, &hi_64, s1, s2);
|
|
|
|
return hi_64;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint8_t do_mulhu_b(uint8_t s2, uint8_t s1)
|
|
|
|
{
|
|
|
|
return (uint16_t)s2 * (uint16_t)s1 >> 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint16_t do_mulhu_h(uint16_t s2, uint16_t s1)
|
|
|
|
{
|
|
|
|
return (uint32_t)s2 * (uint32_t)s1 >> 16;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t do_mulhu_w(uint32_t s2, uint32_t s1)
|
|
|
|
{
|
|
|
|
return (uint64_t)s2 * (uint64_t)s1 >> 32;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t do_mulhu_d(uint64_t s2, uint64_t s1)
|
|
|
|
{
|
|
|
|
uint64_t hi_64, lo_64;
|
|
|
|
|
|
|
|
mulu64(&lo_64, &hi_64, s2, s1);
|
|
|
|
return hi_64;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int8_t do_mulhsu_b(int8_t s2, uint8_t s1)
|
|
|
|
{
|
|
|
|
return (int16_t)s2 * (uint16_t)s1 >> 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int16_t do_mulhsu_h(int16_t s2, uint16_t s1)
|
|
|
|
{
|
|
|
|
return (int32_t)s2 * (uint32_t)s1 >> 16;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int32_t do_mulhsu_w(int32_t s2, uint32_t s1)
|
|
|
|
{
|
|
|
|
return (int64_t)s2 * (uint64_t)s1 >> 32;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Let A = signed operand,
|
|
|
|
* B = unsigned operand
|
|
|
|
* P = mulu64(A, B), unsigned product
|
|
|
|
*
|
|
|
|
* LET X = 2 ** 64 - A, 2's complement of A
|
|
|
|
* SP = signed product
|
|
|
|
* THEN
|
|
|
|
* IF A < 0
|
|
|
|
* SP = -X * B
|
|
|
|
* = -(2 ** 64 - A) * B
|
|
|
|
* = A * B - 2 ** 64 * B
|
|
|
|
* = P - 2 ** 64 * B
|
|
|
|
* ELSE
|
|
|
|
* SP = P
|
|
|
|
* THEN
|
|
|
|
* HI_P -= (A < 0 ? B : 0)
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int64_t do_mulhsu_d(int64_t s2, uint64_t s1)
|
|
|
|
{
|
|
|
|
uint64_t hi_64, lo_64;
|
|
|
|
|
|
|
|
mulu64(&lo_64, &hi_64, s2, s1);
|
|
|
|
|
|
|
|
hi_64 -= s2 < 0 ? s1 : 0;
|
|
|
|
return hi_64;
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPIVV2, vmulh_vv_b, OP_SSS_B, H1, H1, H1, do_mulh_b)
|
|
|
|
RVVCALL(OPIVV2, vmulh_vv_h, OP_SSS_H, H2, H2, H2, do_mulh_h)
|
|
|
|
RVVCALL(OPIVV2, vmulh_vv_w, OP_SSS_W, H4, H4, H4, do_mulh_w)
|
|
|
|
RVVCALL(OPIVV2, vmulh_vv_d, OP_SSS_D, H8, H8, H8, do_mulh_d)
|
|
|
|
RVVCALL(OPIVV2, vmulhu_vv_b, OP_UUU_B, H1, H1, H1, do_mulhu_b)
|
|
|
|
RVVCALL(OPIVV2, vmulhu_vv_h, OP_UUU_H, H2, H2, H2, do_mulhu_h)
|
|
|
|
RVVCALL(OPIVV2, vmulhu_vv_w, OP_UUU_W, H4, H4, H4, do_mulhu_w)
|
|
|
|
RVVCALL(OPIVV2, vmulhu_vv_d, OP_UUU_D, H8, H8, H8, do_mulhu_d)
|
|
|
|
RVVCALL(OPIVV2, vmulhsu_vv_b, OP_SUS_B, H1, H1, H1, do_mulhsu_b)
|
|
|
|
RVVCALL(OPIVV2, vmulhsu_vv_h, OP_SUS_H, H2, H2, H2, do_mulhsu_h)
|
|
|
|
RVVCALL(OPIVV2, vmulhsu_vv_w, OP_SUS_W, H4, H4, H4, do_mulhsu_w)
|
|
|
|
RVVCALL(OPIVV2, vmulhsu_vv_d, OP_SUS_D, H8, H8, H8, do_mulhsu_d)
|
2022-06-06 08:16:16 +02:00
|
|
|
GEN_VEXT_VV(vmulh_vv_b, 1)
|
|
|
|
GEN_VEXT_VV(vmulh_vv_h, 2)
|
|
|
|
GEN_VEXT_VV(vmulh_vv_w, 4)
|
|
|
|
GEN_VEXT_VV(vmulh_vv_d, 8)
|
|
|
|
GEN_VEXT_VV(vmulhu_vv_b, 1)
|
|
|
|
GEN_VEXT_VV(vmulhu_vv_h, 2)
|
|
|
|
GEN_VEXT_VV(vmulhu_vv_w, 4)
|
|
|
|
GEN_VEXT_VV(vmulhu_vv_d, 8)
|
|
|
|
GEN_VEXT_VV(vmulhsu_vv_b, 1)
|
|
|
|
GEN_VEXT_VV(vmulhsu_vv_h, 2)
|
|
|
|
GEN_VEXT_VV(vmulhsu_vv_w, 4)
|
|
|
|
GEN_VEXT_VV(vmulhsu_vv_d, 8)
|
2020-07-01 17:25:06 +02:00
|
|
|
|
|
|
|
RVVCALL(OPIVX2, vmul_vx_b, OP_SSS_B, H1, H1, DO_MUL)
|
|
|
|
RVVCALL(OPIVX2, vmul_vx_h, OP_SSS_H, H2, H2, DO_MUL)
|
|
|
|
RVVCALL(OPIVX2, vmul_vx_w, OP_SSS_W, H4, H4, DO_MUL)
|
|
|
|
RVVCALL(OPIVX2, vmul_vx_d, OP_SSS_D, H8, H8, DO_MUL)
|
|
|
|
RVVCALL(OPIVX2, vmulh_vx_b, OP_SSS_B, H1, H1, do_mulh_b)
|
|
|
|
RVVCALL(OPIVX2, vmulh_vx_h, OP_SSS_H, H2, H2, do_mulh_h)
|
|
|
|
RVVCALL(OPIVX2, vmulh_vx_w, OP_SSS_W, H4, H4, do_mulh_w)
|
|
|
|
RVVCALL(OPIVX2, vmulh_vx_d, OP_SSS_D, H8, H8, do_mulh_d)
|
|
|
|
RVVCALL(OPIVX2, vmulhu_vx_b, OP_UUU_B, H1, H1, do_mulhu_b)
|
|
|
|
RVVCALL(OPIVX2, vmulhu_vx_h, OP_UUU_H, H2, H2, do_mulhu_h)
|
|
|
|
RVVCALL(OPIVX2, vmulhu_vx_w, OP_UUU_W, H4, H4, do_mulhu_w)
|
|
|
|
RVVCALL(OPIVX2, vmulhu_vx_d, OP_UUU_D, H8, H8, do_mulhu_d)
|
|
|
|
RVVCALL(OPIVX2, vmulhsu_vx_b, OP_SUS_B, H1, H1, do_mulhsu_b)
|
|
|
|
RVVCALL(OPIVX2, vmulhsu_vx_h, OP_SUS_H, H2, H2, do_mulhsu_h)
|
|
|
|
RVVCALL(OPIVX2, vmulhsu_vx_w, OP_SUS_W, H4, H4, do_mulhsu_w)
|
|
|
|
RVVCALL(OPIVX2, vmulhsu_vx_d, OP_SUS_D, H8, H8, do_mulhsu_d)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VX(vmul_vx_b, 1)
|
|
|
|
GEN_VEXT_VX(vmul_vx_h, 2)
|
|
|
|
GEN_VEXT_VX(vmul_vx_w, 4)
|
|
|
|
GEN_VEXT_VX(vmul_vx_d, 8)
|
|
|
|
GEN_VEXT_VX(vmulh_vx_b, 1)
|
|
|
|
GEN_VEXT_VX(vmulh_vx_h, 2)
|
|
|
|
GEN_VEXT_VX(vmulh_vx_w, 4)
|
|
|
|
GEN_VEXT_VX(vmulh_vx_d, 8)
|
|
|
|
GEN_VEXT_VX(vmulhu_vx_b, 1)
|
|
|
|
GEN_VEXT_VX(vmulhu_vx_h, 2)
|
|
|
|
GEN_VEXT_VX(vmulhu_vx_w, 4)
|
|
|
|
GEN_VEXT_VX(vmulhu_vx_d, 8)
|
|
|
|
GEN_VEXT_VX(vmulhsu_vx_b, 1)
|
|
|
|
GEN_VEXT_VX(vmulhsu_vx_h, 2)
|
|
|
|
GEN_VEXT_VX(vmulhsu_vx_w, 4)
|
|
|
|
GEN_VEXT_VX(vmulhsu_vx_d, 8)
|
2020-07-01 17:25:07 +02:00
|
|
|
|
|
|
|
/* Vector Integer Divide Instructions */
|
|
|
|
#define DO_DIVU(N, M) (unlikely(M == 0) ? (__typeof(N))(-1) : N / M)
|
|
|
|
#define DO_REMU(N, M) (unlikely(M == 0) ? N : N % M)
|
2023-04-05 10:58:11 +02:00
|
|
|
#define DO_DIV(N, M) (unlikely(M == 0) ? (__typeof(N))(-1) : \
|
2020-07-01 17:25:07 +02:00
|
|
|
unlikely((N == -N) && (M == (__typeof(N))(-1))) ? N : N / M)
|
2023-04-05 10:58:11 +02:00
|
|
|
#define DO_REM(N, M) (unlikely(M == 0) ? N : \
|
2020-07-01 17:25:07 +02:00
|
|
|
unlikely((N == -N) && (M == (__typeof(N))(-1))) ? 0 : N % M)
|
|
|
|
|
|
|
|
RVVCALL(OPIVV2, vdivu_vv_b, OP_UUU_B, H1, H1, H1, DO_DIVU)
|
|
|
|
RVVCALL(OPIVV2, vdivu_vv_h, OP_UUU_H, H2, H2, H2, DO_DIVU)
|
|
|
|
RVVCALL(OPIVV2, vdivu_vv_w, OP_UUU_W, H4, H4, H4, DO_DIVU)
|
|
|
|
RVVCALL(OPIVV2, vdivu_vv_d, OP_UUU_D, H8, H8, H8, DO_DIVU)
|
|
|
|
RVVCALL(OPIVV2, vdiv_vv_b, OP_SSS_B, H1, H1, H1, DO_DIV)
|
|
|
|
RVVCALL(OPIVV2, vdiv_vv_h, OP_SSS_H, H2, H2, H2, DO_DIV)
|
|
|
|
RVVCALL(OPIVV2, vdiv_vv_w, OP_SSS_W, H4, H4, H4, DO_DIV)
|
|
|
|
RVVCALL(OPIVV2, vdiv_vv_d, OP_SSS_D, H8, H8, H8, DO_DIV)
|
|
|
|
RVVCALL(OPIVV2, vremu_vv_b, OP_UUU_B, H1, H1, H1, DO_REMU)
|
|
|
|
RVVCALL(OPIVV2, vremu_vv_h, OP_UUU_H, H2, H2, H2, DO_REMU)
|
|
|
|
RVVCALL(OPIVV2, vremu_vv_w, OP_UUU_W, H4, H4, H4, DO_REMU)
|
|
|
|
RVVCALL(OPIVV2, vremu_vv_d, OP_UUU_D, H8, H8, H8, DO_REMU)
|
|
|
|
RVVCALL(OPIVV2, vrem_vv_b, OP_SSS_B, H1, H1, H1, DO_REM)
|
|
|
|
RVVCALL(OPIVV2, vrem_vv_h, OP_SSS_H, H2, H2, H2, DO_REM)
|
|
|
|
RVVCALL(OPIVV2, vrem_vv_w, OP_SSS_W, H4, H4, H4, DO_REM)
|
|
|
|
RVVCALL(OPIVV2, vrem_vv_d, OP_SSS_D, H8, H8, H8, DO_REM)
|
2022-06-06 08:16:16 +02:00
|
|
|
GEN_VEXT_VV(vdivu_vv_b, 1)
|
|
|
|
GEN_VEXT_VV(vdivu_vv_h, 2)
|
|
|
|
GEN_VEXT_VV(vdivu_vv_w, 4)
|
|
|
|
GEN_VEXT_VV(vdivu_vv_d, 8)
|
|
|
|
GEN_VEXT_VV(vdiv_vv_b, 1)
|
|
|
|
GEN_VEXT_VV(vdiv_vv_h, 2)
|
|
|
|
GEN_VEXT_VV(vdiv_vv_w, 4)
|
|
|
|
GEN_VEXT_VV(vdiv_vv_d, 8)
|
|
|
|
GEN_VEXT_VV(vremu_vv_b, 1)
|
|
|
|
GEN_VEXT_VV(vremu_vv_h, 2)
|
|
|
|
GEN_VEXT_VV(vremu_vv_w, 4)
|
|
|
|
GEN_VEXT_VV(vremu_vv_d, 8)
|
|
|
|
GEN_VEXT_VV(vrem_vv_b, 1)
|
|
|
|
GEN_VEXT_VV(vrem_vv_h, 2)
|
|
|
|
GEN_VEXT_VV(vrem_vv_w, 4)
|
|
|
|
GEN_VEXT_VV(vrem_vv_d, 8)
|
2020-07-01 17:25:07 +02:00
|
|
|
|
|
|
|
RVVCALL(OPIVX2, vdivu_vx_b, OP_UUU_B, H1, H1, DO_DIVU)
|
|
|
|
RVVCALL(OPIVX2, vdivu_vx_h, OP_UUU_H, H2, H2, DO_DIVU)
|
|
|
|
RVVCALL(OPIVX2, vdivu_vx_w, OP_UUU_W, H4, H4, DO_DIVU)
|
|
|
|
RVVCALL(OPIVX2, vdivu_vx_d, OP_UUU_D, H8, H8, DO_DIVU)
|
|
|
|
RVVCALL(OPIVX2, vdiv_vx_b, OP_SSS_B, H1, H1, DO_DIV)
|
|
|
|
RVVCALL(OPIVX2, vdiv_vx_h, OP_SSS_H, H2, H2, DO_DIV)
|
|
|
|
RVVCALL(OPIVX2, vdiv_vx_w, OP_SSS_W, H4, H4, DO_DIV)
|
|
|
|
RVVCALL(OPIVX2, vdiv_vx_d, OP_SSS_D, H8, H8, DO_DIV)
|
|
|
|
RVVCALL(OPIVX2, vremu_vx_b, OP_UUU_B, H1, H1, DO_REMU)
|
|
|
|
RVVCALL(OPIVX2, vremu_vx_h, OP_UUU_H, H2, H2, DO_REMU)
|
|
|
|
RVVCALL(OPIVX2, vremu_vx_w, OP_UUU_W, H4, H4, DO_REMU)
|
|
|
|
RVVCALL(OPIVX2, vremu_vx_d, OP_UUU_D, H8, H8, DO_REMU)
|
|
|
|
RVVCALL(OPIVX2, vrem_vx_b, OP_SSS_B, H1, H1, DO_REM)
|
|
|
|
RVVCALL(OPIVX2, vrem_vx_h, OP_SSS_H, H2, H2, DO_REM)
|
|
|
|
RVVCALL(OPIVX2, vrem_vx_w, OP_SSS_W, H4, H4, DO_REM)
|
|
|
|
RVVCALL(OPIVX2, vrem_vx_d, OP_SSS_D, H8, H8, DO_REM)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VX(vdivu_vx_b, 1)
|
|
|
|
GEN_VEXT_VX(vdivu_vx_h, 2)
|
|
|
|
GEN_VEXT_VX(vdivu_vx_w, 4)
|
|
|
|
GEN_VEXT_VX(vdivu_vx_d, 8)
|
|
|
|
GEN_VEXT_VX(vdiv_vx_b, 1)
|
|
|
|
GEN_VEXT_VX(vdiv_vx_h, 2)
|
|
|
|
GEN_VEXT_VX(vdiv_vx_w, 4)
|
|
|
|
GEN_VEXT_VX(vdiv_vx_d, 8)
|
|
|
|
GEN_VEXT_VX(vremu_vx_b, 1)
|
|
|
|
GEN_VEXT_VX(vremu_vx_h, 2)
|
|
|
|
GEN_VEXT_VX(vremu_vx_w, 4)
|
|
|
|
GEN_VEXT_VX(vremu_vx_d, 8)
|
|
|
|
GEN_VEXT_VX(vrem_vx_b, 1)
|
|
|
|
GEN_VEXT_VX(vrem_vx_h, 2)
|
|
|
|
GEN_VEXT_VX(vrem_vx_w, 4)
|
|
|
|
GEN_VEXT_VX(vrem_vx_d, 8)
|
2020-07-01 17:25:08 +02:00
|
|
|
|
|
|
|
/* Vector Widening Integer Multiply Instructions */
|
|
|
|
RVVCALL(OPIVV2, vwmul_vv_b, WOP_SSS_B, H2, H1, H1, DO_MUL)
|
|
|
|
RVVCALL(OPIVV2, vwmul_vv_h, WOP_SSS_H, H4, H2, H2, DO_MUL)
|
|
|
|
RVVCALL(OPIVV2, vwmul_vv_w, WOP_SSS_W, H8, H4, H4, DO_MUL)
|
|
|
|
RVVCALL(OPIVV2, vwmulu_vv_b, WOP_UUU_B, H2, H1, H1, DO_MUL)
|
|
|
|
RVVCALL(OPIVV2, vwmulu_vv_h, WOP_UUU_H, H4, H2, H2, DO_MUL)
|
|
|
|
RVVCALL(OPIVV2, vwmulu_vv_w, WOP_UUU_W, H8, H4, H4, DO_MUL)
|
|
|
|
RVVCALL(OPIVV2, vwmulsu_vv_b, WOP_SUS_B, H2, H1, H1, DO_MUL)
|
|
|
|
RVVCALL(OPIVV2, vwmulsu_vv_h, WOP_SUS_H, H4, H2, H2, DO_MUL)
|
|
|
|
RVVCALL(OPIVV2, vwmulsu_vv_w, WOP_SUS_W, H8, H4, H4, DO_MUL)
|
2022-06-06 08:16:16 +02:00
|
|
|
GEN_VEXT_VV(vwmul_vv_b, 2)
|
|
|
|
GEN_VEXT_VV(vwmul_vv_h, 4)
|
|
|
|
GEN_VEXT_VV(vwmul_vv_w, 8)
|
|
|
|
GEN_VEXT_VV(vwmulu_vv_b, 2)
|
|
|
|
GEN_VEXT_VV(vwmulu_vv_h, 4)
|
|
|
|
GEN_VEXT_VV(vwmulu_vv_w, 8)
|
|
|
|
GEN_VEXT_VV(vwmulsu_vv_b, 2)
|
|
|
|
GEN_VEXT_VV(vwmulsu_vv_h, 4)
|
|
|
|
GEN_VEXT_VV(vwmulsu_vv_w, 8)
|
2020-07-01 17:25:08 +02:00
|
|
|
|
|
|
|
RVVCALL(OPIVX2, vwmul_vx_b, WOP_SSS_B, H2, H1, DO_MUL)
|
|
|
|
RVVCALL(OPIVX2, vwmul_vx_h, WOP_SSS_H, H4, H2, DO_MUL)
|
|
|
|
RVVCALL(OPIVX2, vwmul_vx_w, WOP_SSS_W, H8, H4, DO_MUL)
|
|
|
|
RVVCALL(OPIVX2, vwmulu_vx_b, WOP_UUU_B, H2, H1, DO_MUL)
|
|
|
|
RVVCALL(OPIVX2, vwmulu_vx_h, WOP_UUU_H, H4, H2, DO_MUL)
|
|
|
|
RVVCALL(OPIVX2, vwmulu_vx_w, WOP_UUU_W, H8, H4, DO_MUL)
|
|
|
|
RVVCALL(OPIVX2, vwmulsu_vx_b, WOP_SUS_B, H2, H1, DO_MUL)
|
|
|
|
RVVCALL(OPIVX2, vwmulsu_vx_h, WOP_SUS_H, H4, H2, DO_MUL)
|
|
|
|
RVVCALL(OPIVX2, vwmulsu_vx_w, WOP_SUS_W, H8, H4, DO_MUL)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VX(vwmul_vx_b, 2)
|
|
|
|
GEN_VEXT_VX(vwmul_vx_h, 4)
|
|
|
|
GEN_VEXT_VX(vwmul_vx_w, 8)
|
|
|
|
GEN_VEXT_VX(vwmulu_vx_b, 2)
|
|
|
|
GEN_VEXT_VX(vwmulu_vx_h, 4)
|
|
|
|
GEN_VEXT_VX(vwmulu_vx_w, 8)
|
|
|
|
GEN_VEXT_VX(vwmulsu_vx_b, 2)
|
|
|
|
GEN_VEXT_VX(vwmulsu_vx_h, 4)
|
|
|
|
GEN_VEXT_VX(vwmulsu_vx_w, 8)
|
2020-07-01 17:25:09 +02:00
|
|
|
|
|
|
|
/* Vector Single-Width Integer Multiply-Add Instructions */
|
2023-04-05 10:58:11 +02:00
|
|
|
#define OPIVV3(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
|
2020-07-01 17:25:09 +02:00
|
|
|
static void do_##NAME(void *vd, void *vs1, void *vs2, int i) \
|
|
|
|
{ \
|
|
|
|
TX1 s1 = *((T1 *)vs1 + HS1(i)); \
|
|
|
|
TX2 s2 = *((T2 *)vs2 + HS2(i)); \
|
|
|
|
TD d = *((TD *)vd + HD(i)); \
|
|
|
|
*((TD *)vd + HD(i)) = OP(s2, s1, d); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DO_MACC(N, M, D) (M * N + D)
|
|
|
|
#define DO_NMSAC(N, M, D) (-(M * N) + D)
|
|
|
|
#define DO_MADD(N, M, D) (M * D + N)
|
|
|
|
#define DO_NMSUB(N, M, D) (-(M * D) + N)
|
|
|
|
RVVCALL(OPIVV3, vmacc_vv_b, OP_SSS_B, H1, H1, H1, DO_MACC)
|
|
|
|
RVVCALL(OPIVV3, vmacc_vv_h, OP_SSS_H, H2, H2, H2, DO_MACC)
|
|
|
|
RVVCALL(OPIVV3, vmacc_vv_w, OP_SSS_W, H4, H4, H4, DO_MACC)
|
|
|
|
RVVCALL(OPIVV3, vmacc_vv_d, OP_SSS_D, H8, H8, H8, DO_MACC)
|
|
|
|
RVVCALL(OPIVV3, vnmsac_vv_b, OP_SSS_B, H1, H1, H1, DO_NMSAC)
|
|
|
|
RVVCALL(OPIVV3, vnmsac_vv_h, OP_SSS_H, H2, H2, H2, DO_NMSAC)
|
|
|
|
RVVCALL(OPIVV3, vnmsac_vv_w, OP_SSS_W, H4, H4, H4, DO_NMSAC)
|
|
|
|
RVVCALL(OPIVV3, vnmsac_vv_d, OP_SSS_D, H8, H8, H8, DO_NMSAC)
|
|
|
|
RVVCALL(OPIVV3, vmadd_vv_b, OP_SSS_B, H1, H1, H1, DO_MADD)
|
|
|
|
RVVCALL(OPIVV3, vmadd_vv_h, OP_SSS_H, H2, H2, H2, DO_MADD)
|
|
|
|
RVVCALL(OPIVV3, vmadd_vv_w, OP_SSS_W, H4, H4, H4, DO_MADD)
|
|
|
|
RVVCALL(OPIVV3, vmadd_vv_d, OP_SSS_D, H8, H8, H8, DO_MADD)
|
|
|
|
RVVCALL(OPIVV3, vnmsub_vv_b, OP_SSS_B, H1, H1, H1, DO_NMSUB)
|
|
|
|
RVVCALL(OPIVV3, vnmsub_vv_h, OP_SSS_H, H2, H2, H2, DO_NMSUB)
|
|
|
|
RVVCALL(OPIVV3, vnmsub_vv_w, OP_SSS_W, H4, H4, H4, DO_NMSUB)
|
|
|
|
RVVCALL(OPIVV3, vnmsub_vv_d, OP_SSS_D, H8, H8, H8, DO_NMSUB)
|
2022-06-06 08:16:16 +02:00
|
|
|
GEN_VEXT_VV(vmacc_vv_b, 1)
|
|
|
|
GEN_VEXT_VV(vmacc_vv_h, 2)
|
|
|
|
GEN_VEXT_VV(vmacc_vv_w, 4)
|
|
|
|
GEN_VEXT_VV(vmacc_vv_d, 8)
|
|
|
|
GEN_VEXT_VV(vnmsac_vv_b, 1)
|
|
|
|
GEN_VEXT_VV(vnmsac_vv_h, 2)
|
|
|
|
GEN_VEXT_VV(vnmsac_vv_w, 4)
|
|
|
|
GEN_VEXT_VV(vnmsac_vv_d, 8)
|
|
|
|
GEN_VEXT_VV(vmadd_vv_b, 1)
|
|
|
|
GEN_VEXT_VV(vmadd_vv_h, 2)
|
|
|
|
GEN_VEXT_VV(vmadd_vv_w, 4)
|
|
|
|
GEN_VEXT_VV(vmadd_vv_d, 8)
|
|
|
|
GEN_VEXT_VV(vnmsub_vv_b, 1)
|
|
|
|
GEN_VEXT_VV(vnmsub_vv_h, 2)
|
|
|
|
GEN_VEXT_VV(vnmsub_vv_w, 4)
|
|
|
|
GEN_VEXT_VV(vnmsub_vv_d, 8)
|
2020-07-01 17:25:09 +02:00
|
|
|
|
|
|
|
#define OPIVX3(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
|
|
|
|
static void do_##NAME(void *vd, target_long s1, void *vs2, int i) \
|
|
|
|
{ \
|
|
|
|
TX2 s2 = *((T2 *)vs2 + HS2(i)); \
|
|
|
|
TD d = *((TD *)vd + HD(i)); \
|
|
|
|
*((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1, d); \
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPIVX3, vmacc_vx_b, OP_SSS_B, H1, H1, DO_MACC)
|
|
|
|
RVVCALL(OPIVX3, vmacc_vx_h, OP_SSS_H, H2, H2, DO_MACC)
|
|
|
|
RVVCALL(OPIVX3, vmacc_vx_w, OP_SSS_W, H4, H4, DO_MACC)
|
|
|
|
RVVCALL(OPIVX3, vmacc_vx_d, OP_SSS_D, H8, H8, DO_MACC)
|
|
|
|
RVVCALL(OPIVX3, vnmsac_vx_b, OP_SSS_B, H1, H1, DO_NMSAC)
|
|
|
|
RVVCALL(OPIVX3, vnmsac_vx_h, OP_SSS_H, H2, H2, DO_NMSAC)
|
|
|
|
RVVCALL(OPIVX3, vnmsac_vx_w, OP_SSS_W, H4, H4, DO_NMSAC)
|
|
|
|
RVVCALL(OPIVX3, vnmsac_vx_d, OP_SSS_D, H8, H8, DO_NMSAC)
|
|
|
|
RVVCALL(OPIVX3, vmadd_vx_b, OP_SSS_B, H1, H1, DO_MADD)
|
|
|
|
RVVCALL(OPIVX3, vmadd_vx_h, OP_SSS_H, H2, H2, DO_MADD)
|
|
|
|
RVVCALL(OPIVX3, vmadd_vx_w, OP_SSS_W, H4, H4, DO_MADD)
|
|
|
|
RVVCALL(OPIVX3, vmadd_vx_d, OP_SSS_D, H8, H8, DO_MADD)
|
|
|
|
RVVCALL(OPIVX3, vnmsub_vx_b, OP_SSS_B, H1, H1, DO_NMSUB)
|
|
|
|
RVVCALL(OPIVX3, vnmsub_vx_h, OP_SSS_H, H2, H2, DO_NMSUB)
|
|
|
|
RVVCALL(OPIVX3, vnmsub_vx_w, OP_SSS_W, H4, H4, DO_NMSUB)
|
|
|
|
RVVCALL(OPIVX3, vnmsub_vx_d, OP_SSS_D, H8, H8, DO_NMSUB)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VX(vmacc_vx_b, 1)
|
|
|
|
GEN_VEXT_VX(vmacc_vx_h, 2)
|
|
|
|
GEN_VEXT_VX(vmacc_vx_w, 4)
|
|
|
|
GEN_VEXT_VX(vmacc_vx_d, 8)
|
|
|
|
GEN_VEXT_VX(vnmsac_vx_b, 1)
|
|
|
|
GEN_VEXT_VX(vnmsac_vx_h, 2)
|
|
|
|
GEN_VEXT_VX(vnmsac_vx_w, 4)
|
|
|
|
GEN_VEXT_VX(vnmsac_vx_d, 8)
|
|
|
|
GEN_VEXT_VX(vmadd_vx_b, 1)
|
|
|
|
GEN_VEXT_VX(vmadd_vx_h, 2)
|
|
|
|
GEN_VEXT_VX(vmadd_vx_w, 4)
|
|
|
|
GEN_VEXT_VX(vmadd_vx_d, 8)
|
|
|
|
GEN_VEXT_VX(vnmsub_vx_b, 1)
|
|
|
|
GEN_VEXT_VX(vnmsub_vx_h, 2)
|
|
|
|
GEN_VEXT_VX(vnmsub_vx_w, 4)
|
|
|
|
GEN_VEXT_VX(vnmsub_vx_d, 8)
|
2020-07-01 17:25:10 +02:00
|
|
|
|
|
|
|
/* Vector Widening Integer Multiply-Add Instructions */
|
|
|
|
RVVCALL(OPIVV3, vwmaccu_vv_b, WOP_UUU_B, H2, H1, H1, DO_MACC)
|
|
|
|
RVVCALL(OPIVV3, vwmaccu_vv_h, WOP_UUU_H, H4, H2, H2, DO_MACC)
|
|
|
|
RVVCALL(OPIVV3, vwmaccu_vv_w, WOP_UUU_W, H8, H4, H4, DO_MACC)
|
|
|
|
RVVCALL(OPIVV3, vwmacc_vv_b, WOP_SSS_B, H2, H1, H1, DO_MACC)
|
|
|
|
RVVCALL(OPIVV3, vwmacc_vv_h, WOP_SSS_H, H4, H2, H2, DO_MACC)
|
|
|
|
RVVCALL(OPIVV3, vwmacc_vv_w, WOP_SSS_W, H8, H4, H4, DO_MACC)
|
|
|
|
RVVCALL(OPIVV3, vwmaccsu_vv_b, WOP_SSU_B, H2, H1, H1, DO_MACC)
|
|
|
|
RVVCALL(OPIVV3, vwmaccsu_vv_h, WOP_SSU_H, H4, H2, H2, DO_MACC)
|
|
|
|
RVVCALL(OPIVV3, vwmaccsu_vv_w, WOP_SSU_W, H8, H4, H4, DO_MACC)
|
2022-06-06 08:16:16 +02:00
|
|
|
GEN_VEXT_VV(vwmaccu_vv_b, 2)
|
|
|
|
GEN_VEXT_VV(vwmaccu_vv_h, 4)
|
|
|
|
GEN_VEXT_VV(vwmaccu_vv_w, 8)
|
|
|
|
GEN_VEXT_VV(vwmacc_vv_b, 2)
|
|
|
|
GEN_VEXT_VV(vwmacc_vv_h, 4)
|
|
|
|
GEN_VEXT_VV(vwmacc_vv_w, 8)
|
|
|
|
GEN_VEXT_VV(vwmaccsu_vv_b, 2)
|
|
|
|
GEN_VEXT_VV(vwmaccsu_vv_h, 4)
|
|
|
|
GEN_VEXT_VV(vwmaccsu_vv_w, 8)
|
2020-07-01 17:25:10 +02:00
|
|
|
|
|
|
|
RVVCALL(OPIVX3, vwmaccu_vx_b, WOP_UUU_B, H2, H1, DO_MACC)
|
|
|
|
RVVCALL(OPIVX3, vwmaccu_vx_h, WOP_UUU_H, H4, H2, DO_MACC)
|
|
|
|
RVVCALL(OPIVX3, vwmaccu_vx_w, WOP_UUU_W, H8, H4, DO_MACC)
|
|
|
|
RVVCALL(OPIVX3, vwmacc_vx_b, WOP_SSS_B, H2, H1, DO_MACC)
|
|
|
|
RVVCALL(OPIVX3, vwmacc_vx_h, WOP_SSS_H, H4, H2, DO_MACC)
|
|
|
|
RVVCALL(OPIVX3, vwmacc_vx_w, WOP_SSS_W, H8, H4, DO_MACC)
|
|
|
|
RVVCALL(OPIVX3, vwmaccsu_vx_b, WOP_SSU_B, H2, H1, DO_MACC)
|
|
|
|
RVVCALL(OPIVX3, vwmaccsu_vx_h, WOP_SSU_H, H4, H2, DO_MACC)
|
|
|
|
RVVCALL(OPIVX3, vwmaccsu_vx_w, WOP_SSU_W, H8, H4, DO_MACC)
|
|
|
|
RVVCALL(OPIVX3, vwmaccus_vx_b, WOP_SUS_B, H2, H1, DO_MACC)
|
|
|
|
RVVCALL(OPIVX3, vwmaccus_vx_h, WOP_SUS_H, H4, H2, DO_MACC)
|
|
|
|
RVVCALL(OPIVX3, vwmaccus_vx_w, WOP_SUS_W, H8, H4, DO_MACC)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VX(vwmaccu_vx_b, 2)
|
|
|
|
GEN_VEXT_VX(vwmaccu_vx_h, 4)
|
|
|
|
GEN_VEXT_VX(vwmaccu_vx_w, 8)
|
|
|
|
GEN_VEXT_VX(vwmacc_vx_b, 2)
|
|
|
|
GEN_VEXT_VX(vwmacc_vx_h, 4)
|
|
|
|
GEN_VEXT_VX(vwmacc_vx_w, 8)
|
|
|
|
GEN_VEXT_VX(vwmaccsu_vx_b, 2)
|
|
|
|
GEN_VEXT_VX(vwmaccsu_vx_h, 4)
|
|
|
|
GEN_VEXT_VX(vwmaccsu_vx_w, 8)
|
|
|
|
GEN_VEXT_VX(vwmaccus_vx_b, 2)
|
|
|
|
GEN_VEXT_VX(vwmaccus_vx_h, 4)
|
|
|
|
GEN_VEXT_VX(vwmaccus_vx_w, 8)
|
2020-07-01 17:25:11 +02:00
|
|
|
|
|
|
|
/* Vector Integer Merge and Move Instructions */
|
2021-12-10 08:56:00 +01:00
|
|
|
#define GEN_VEXT_VMV_VV(NAME, ETYPE, H) \
|
2020-07-01 17:25:11 +02:00
|
|
|
void HELPER(NAME)(void *vd, void *vs1, CPURISCVState *env, \
|
|
|
|
uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t vl = env->vl; \
|
2022-06-06 08:16:51 +02:00
|
|
|
uint32_t esz = sizeof(ETYPE); \
|
|
|
|
uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
|
|
|
|
uint32_t vta = vext_vta(desc); \
|
2020-07-01 17:25:11 +02:00
|
|
|
uint32_t i; \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2020-07-01 17:25:11 +02:00
|
|
|
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
|
|
|
|
*((ETYPE *)vd + H(i)) = s1; \
|
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2022-06-06 08:16:51 +02:00
|
|
|
/* set tail elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
|
2020-07-01 17:25:11 +02:00
|
|
|
}
|
|
|
|
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_VMV_VV(vmv_v_v_b, int8_t, H1)
|
|
|
|
GEN_VEXT_VMV_VV(vmv_v_v_h, int16_t, H2)
|
|
|
|
GEN_VEXT_VMV_VV(vmv_v_v_w, int32_t, H4)
|
|
|
|
GEN_VEXT_VMV_VV(vmv_v_v_d, int64_t, H8)
|
2020-07-01 17:25:11 +02:00
|
|
|
|
2021-12-10 08:56:00 +01:00
|
|
|
#define GEN_VEXT_VMV_VX(NAME, ETYPE, H) \
|
2020-07-01 17:25:11 +02:00
|
|
|
void HELPER(NAME)(void *vd, uint64_t s1, CPURISCVState *env, \
|
|
|
|
uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t vl = env->vl; \
|
2022-06-06 08:16:51 +02:00
|
|
|
uint32_t esz = sizeof(ETYPE); \
|
|
|
|
uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
|
|
|
|
uint32_t vta = vext_vta(desc); \
|
2020-07-01 17:25:11 +02:00
|
|
|
uint32_t i; \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2020-07-01 17:25:11 +02:00
|
|
|
*((ETYPE *)vd + H(i)) = (ETYPE)s1; \
|
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2022-06-06 08:16:51 +02:00
|
|
|
/* set tail elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
|
2020-07-01 17:25:11 +02:00
|
|
|
}
|
|
|
|
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_VMV_VX(vmv_v_x_b, int8_t, H1)
|
|
|
|
GEN_VEXT_VMV_VX(vmv_v_x_h, int16_t, H2)
|
|
|
|
GEN_VEXT_VMV_VX(vmv_v_x_w, int32_t, H4)
|
|
|
|
GEN_VEXT_VMV_VX(vmv_v_x_d, int64_t, H8)
|
2020-07-01 17:25:11 +02:00
|
|
|
|
2021-12-10 08:56:00 +01:00
|
|
|
#define GEN_VEXT_VMERGE_VV(NAME, ETYPE, H) \
|
2020-07-01 17:25:11 +02:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t vl = env->vl; \
|
2022-06-06 08:16:51 +02:00
|
|
|
uint32_t esz = sizeof(ETYPE); \
|
|
|
|
uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
|
|
|
|
uint32_t vta = vext_vta(desc); \
|
2020-07-01 17:25:11 +02:00
|
|
|
uint32_t i; \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2021-12-10 08:55:58 +01:00
|
|
|
ETYPE *vt = (!vext_elem_mask(v0, i) ? vs2 : vs1); \
|
2020-07-01 17:25:11 +02:00
|
|
|
*((ETYPE *)vd + H(i)) = *(vt + H(i)); \
|
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2022-06-06 08:16:51 +02:00
|
|
|
/* set tail elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
|
2020-07-01 17:25:11 +02:00
|
|
|
}
|
|
|
|
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_VMERGE_VV(vmerge_vvm_b, int8_t, H1)
|
|
|
|
GEN_VEXT_VMERGE_VV(vmerge_vvm_h, int16_t, H2)
|
|
|
|
GEN_VEXT_VMERGE_VV(vmerge_vvm_w, int32_t, H4)
|
|
|
|
GEN_VEXT_VMERGE_VV(vmerge_vvm_d, int64_t, H8)
|
2020-07-01 17:25:11 +02:00
|
|
|
|
2021-12-10 08:56:00 +01:00
|
|
|
#define GEN_VEXT_VMERGE_VX(NAME, ETYPE, H) \
|
2020-07-01 17:25:11 +02:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
|
|
|
|
void *vs2, CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t vl = env->vl; \
|
2022-06-06 08:16:51 +02:00
|
|
|
uint32_t esz = sizeof(ETYPE); \
|
|
|
|
uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
|
|
|
|
uint32_t vta = vext_vta(desc); \
|
2020-07-01 17:25:11 +02:00
|
|
|
uint32_t i; \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2020-07-01 17:25:11 +02:00
|
|
|
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
|
2021-12-10 08:55:58 +01:00
|
|
|
ETYPE d = (!vext_elem_mask(v0, i) ? s2 : \
|
2020-07-01 17:25:11 +02:00
|
|
|
(ETYPE)(target_long)s1); \
|
|
|
|
*((ETYPE *)vd + H(i)) = d; \
|
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2022-06-06 08:16:51 +02:00
|
|
|
/* set tail elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
|
2020-07-01 17:25:11 +02:00
|
|
|
}
|
|
|
|
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_VMERGE_VX(vmerge_vxm_b, int8_t, H1)
|
|
|
|
GEN_VEXT_VMERGE_VX(vmerge_vxm_h, int16_t, H2)
|
|
|
|
GEN_VEXT_VMERGE_VX(vmerge_vxm_w, int32_t, H4)
|
|
|
|
GEN_VEXT_VMERGE_VX(vmerge_vxm_d, int64_t, H8)
|
2020-07-01 17:25:12 +02:00
|
|
|
|
|
|
|
/*
|
2023-04-05 10:58:12 +02:00
|
|
|
* Vector Fixed-Point Arithmetic Instructions
|
2020-07-01 17:25:12 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Vector Single-Width Saturating Add and Subtract */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* As fixed point instructions probably have round mode and saturation,
|
|
|
|
* define common macros for fixed point here.
|
|
|
|
*/
|
|
|
|
typedef void opivv2_rm_fn(void *vd, void *vs1, void *vs2, int i,
|
|
|
|
CPURISCVState *env, int vxrm);
|
|
|
|
|
|
|
|
#define OPIVV2_RM(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
|
|
|
|
static inline void \
|
|
|
|
do_##NAME(void *vd, void *vs1, void *vs2, int i, \
|
|
|
|
CPURISCVState *env, int vxrm) \
|
|
|
|
{ \
|
|
|
|
TX1 s1 = *((T1 *)vs1 + HS1(i)); \
|
|
|
|
TX2 s2 = *((T2 *)vs2 + HS2(i)); \
|
|
|
|
*((TD *)vd + HD(i)) = OP(env, vxrm, s2, s1); \
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
vext_vv_rm_1(void *vd, void *v0, void *vs1, void *vs2,
|
|
|
|
CPURISCVState *env,
|
2021-12-10 08:55:58 +01:00
|
|
|
uint32_t vl, uint32_t vm, int vxrm,
|
2022-06-20 08:51:11 +02:00
|
|
|
opivv2_rm_fn *fn, uint32_t vma, uint32_t esz)
|
2020-07-01 17:25:12 +02:00
|
|
|
{
|
2021-12-10 08:56:52 +01:00
|
|
|
for (uint32_t i = env->vstart; i < vl; i++) {
|
2021-12-10 08:55:58 +01:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) {
|
2022-06-20 08:51:11 +02:00
|
|
|
/* set masked-off elements to 1s */
|
|
|
|
vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz);
|
2020-07-01 17:25:12 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
fn(vd, vs1, vs2, i, env, vxrm);
|
|
|
|
}
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0;
|
2020-07-01 17:25:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
vext_vv_rm_2(void *vd, void *v0, void *vs1, void *vs2,
|
|
|
|
CPURISCVState *env,
|
2022-06-06 08:16:16 +02:00
|
|
|
uint32_t desc,
|
2022-06-06 08:16:38 +02:00
|
|
|
opivv2_rm_fn *fn, uint32_t esz)
|
2020-07-01 17:25:12 +02:00
|
|
|
{
|
|
|
|
uint32_t vm = vext_vm(desc);
|
|
|
|
uint32_t vl = env->vl;
|
2022-06-06 08:16:38 +02:00
|
|
|
uint32_t total_elems = vext_get_total_elems(env, desc, esz);
|
|
|
|
uint32_t vta = vext_vta(desc);
|
2022-06-20 08:51:11 +02:00
|
|
|
uint32_t vma = vext_vma(desc);
|
2020-07-01 17:25:12 +02:00
|
|
|
|
|
|
|
switch (env->vxrm) {
|
|
|
|
case 0: /* rnu */
|
|
|
|
vext_vv_rm_1(vd, v0, vs1, vs2,
|
2022-06-20 08:51:11 +02:00
|
|
|
env, vl, vm, 0, fn, vma, esz);
|
2020-07-01 17:25:12 +02:00
|
|
|
break;
|
|
|
|
case 1: /* rne */
|
|
|
|
vext_vv_rm_1(vd, v0, vs1, vs2,
|
2022-06-20 08:51:11 +02:00
|
|
|
env, vl, vm, 1, fn, vma, esz);
|
2020-07-01 17:25:12 +02:00
|
|
|
break;
|
|
|
|
case 2: /* rdn */
|
|
|
|
vext_vv_rm_1(vd, v0, vs1, vs2,
|
2022-06-20 08:51:11 +02:00
|
|
|
env, vl, vm, 2, fn, vma, esz);
|
2020-07-01 17:25:12 +02:00
|
|
|
break;
|
|
|
|
default: /* rod */
|
|
|
|
vext_vv_rm_1(vd, v0, vs1, vs2,
|
2022-06-20 08:51:11 +02:00
|
|
|
env, vl, vm, 3, fn, vma, esz);
|
2020-07-01 17:25:12 +02:00
|
|
|
break;
|
|
|
|
}
|
2022-06-06 08:16:38 +02:00
|
|
|
/* set tail elements to 1s */
|
|
|
|
vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);
|
2020-07-01 17:25:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* generate helpers for fixed point instructions with OPIVV format */
|
2022-06-06 08:16:38 +02:00
|
|
|
#define GEN_VEXT_VV_RM(NAME, ESZ) \
|
2020-07-01 17:25:12 +02:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
2022-06-06 08:16:16 +02:00
|
|
|
vext_vv_rm_2(vd, v0, vs1, vs2, env, desc, \
|
2022-06-06 08:16:38 +02:00
|
|
|
do_##NAME, ESZ); \
|
2020-07-01 17:25:12 +02:00
|
|
|
}
|
|
|
|
|
2023-04-05 10:58:13 +02:00
|
|
|
static inline uint8_t saddu8(CPURISCVState *env, int vxrm, uint8_t a,
|
|
|
|
uint8_t b)
|
2020-07-01 17:25:12 +02:00
|
|
|
{
|
|
|
|
uint8_t res = a + b;
|
|
|
|
if (res < a) {
|
|
|
|
res = UINT8_MAX;
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint16_t saddu16(CPURISCVState *env, int vxrm, uint16_t a,
|
|
|
|
uint16_t b)
|
|
|
|
{
|
|
|
|
uint16_t res = a + b;
|
|
|
|
if (res < a) {
|
|
|
|
res = UINT16_MAX;
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t saddu32(CPURISCVState *env, int vxrm, uint32_t a,
|
|
|
|
uint32_t b)
|
|
|
|
{
|
|
|
|
uint32_t res = a + b;
|
|
|
|
if (res < a) {
|
|
|
|
res = UINT32_MAX;
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint64_t saddu64(CPURISCVState *env, int vxrm, uint64_t a,
|
|
|
|
uint64_t b)
|
|
|
|
{
|
|
|
|
uint64_t res = a + b;
|
|
|
|
if (res < a) {
|
|
|
|
res = UINT64_MAX;
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPIVV2_RM, vsaddu_vv_b, OP_UUU_B, H1, H1, H1, saddu8)
|
|
|
|
RVVCALL(OPIVV2_RM, vsaddu_vv_h, OP_UUU_H, H2, H2, H2, saddu16)
|
|
|
|
RVVCALL(OPIVV2_RM, vsaddu_vv_w, OP_UUU_W, H4, H4, H4, saddu32)
|
|
|
|
RVVCALL(OPIVV2_RM, vsaddu_vv_d, OP_UUU_D, H8, H8, H8, saddu64)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VV_RM(vsaddu_vv_b, 1)
|
|
|
|
GEN_VEXT_VV_RM(vsaddu_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_RM(vsaddu_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_RM(vsaddu_vv_d, 8)
|
2020-07-01 17:25:12 +02:00
|
|
|
|
|
|
|
typedef void opivx2_rm_fn(void *vd, target_long s1, void *vs2, int i,
|
|
|
|
CPURISCVState *env, int vxrm);
|
|
|
|
|
|
|
|
#define OPIVX2_RM(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
|
|
|
|
static inline void \
|
|
|
|
do_##NAME(void *vd, target_long s1, void *vs2, int i, \
|
|
|
|
CPURISCVState *env, int vxrm) \
|
|
|
|
{ \
|
|
|
|
TX2 s2 = *((T2 *)vs2 + HS2(i)); \
|
|
|
|
*((TD *)vd + HD(i)) = OP(env, vxrm, s2, (TX1)(T1)s1); \
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
vext_vx_rm_1(void *vd, void *v0, target_long s1, void *vs2,
|
|
|
|
CPURISCVState *env,
|
2021-12-10 08:55:58 +01:00
|
|
|
uint32_t vl, uint32_t vm, int vxrm,
|
2022-06-20 08:51:11 +02:00
|
|
|
opivx2_rm_fn *fn, uint32_t vma, uint32_t esz)
|
2020-07-01 17:25:12 +02:00
|
|
|
{
|
2021-12-10 08:56:52 +01:00
|
|
|
for (uint32_t i = env->vstart; i < vl; i++) {
|
2021-12-10 08:55:58 +01:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) {
|
2022-06-20 08:51:11 +02:00
|
|
|
/* set masked-off elements to 1s */
|
|
|
|
vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz);
|
2020-07-01 17:25:12 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
fn(vd, s1, vs2, i, env, vxrm);
|
|
|
|
}
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0;
|
2020-07-01 17:25:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
vext_vx_rm_2(void *vd, void *v0, target_long s1, void *vs2,
|
|
|
|
CPURISCVState *env,
|
2022-06-06 08:16:16 +02:00
|
|
|
uint32_t desc,
|
2022-06-06 08:16:38 +02:00
|
|
|
opivx2_rm_fn *fn, uint32_t esz)
|
2020-07-01 17:25:12 +02:00
|
|
|
{
|
|
|
|
uint32_t vm = vext_vm(desc);
|
|
|
|
uint32_t vl = env->vl;
|
2022-06-06 08:16:38 +02:00
|
|
|
uint32_t total_elems = vext_get_total_elems(env, desc, esz);
|
|
|
|
uint32_t vta = vext_vta(desc);
|
2022-06-20 08:51:11 +02:00
|
|
|
uint32_t vma = vext_vma(desc);
|
2020-07-01 17:25:12 +02:00
|
|
|
|
|
|
|
switch (env->vxrm) {
|
|
|
|
case 0: /* rnu */
|
|
|
|
vext_vx_rm_1(vd, v0, s1, vs2,
|
2022-06-20 08:51:11 +02:00
|
|
|
env, vl, vm, 0, fn, vma, esz);
|
2020-07-01 17:25:12 +02:00
|
|
|
break;
|
|
|
|
case 1: /* rne */
|
|
|
|
vext_vx_rm_1(vd, v0, s1, vs2,
|
2022-06-20 08:51:11 +02:00
|
|
|
env, vl, vm, 1, fn, vma, esz);
|
2020-07-01 17:25:12 +02:00
|
|
|
break;
|
|
|
|
case 2: /* rdn */
|
|
|
|
vext_vx_rm_1(vd, v0, s1, vs2,
|
2022-06-20 08:51:11 +02:00
|
|
|
env, vl, vm, 2, fn, vma, esz);
|
2020-07-01 17:25:12 +02:00
|
|
|
break;
|
|
|
|
default: /* rod */
|
|
|
|
vext_vx_rm_1(vd, v0, s1, vs2,
|
2022-06-20 08:51:11 +02:00
|
|
|
env, vl, vm, 3, fn, vma, esz);
|
2020-07-01 17:25:12 +02:00
|
|
|
break;
|
|
|
|
}
|
2022-06-06 08:16:38 +02:00
|
|
|
/* set tail elements to 1s */
|
|
|
|
vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);
|
2020-07-01 17:25:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* generate helpers for fixed point instructions with OPIVX format */
|
2022-06-06 08:16:38 +02:00
|
|
|
#define GEN_VEXT_VX_RM(NAME, ESZ) \
|
2020-07-01 17:25:12 +02:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
|
2023-04-05 10:58:11 +02:00
|
|
|
void *vs2, CPURISCVState *env, \
|
|
|
|
uint32_t desc) \
|
2020-07-01 17:25:12 +02:00
|
|
|
{ \
|
2022-06-06 08:16:16 +02:00
|
|
|
vext_vx_rm_2(vd, v0, s1, vs2, env, desc, \
|
2022-06-06 08:16:38 +02:00
|
|
|
do_##NAME, ESZ); \
|
2020-07-01 17:25:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPIVX2_RM, vsaddu_vx_b, OP_UUU_B, H1, H1, saddu8)
|
|
|
|
RVVCALL(OPIVX2_RM, vsaddu_vx_h, OP_UUU_H, H2, H2, saddu16)
|
|
|
|
RVVCALL(OPIVX2_RM, vsaddu_vx_w, OP_UUU_W, H4, H4, saddu32)
|
|
|
|
RVVCALL(OPIVX2_RM, vsaddu_vx_d, OP_UUU_D, H8, H8, saddu64)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VX_RM(vsaddu_vx_b, 1)
|
|
|
|
GEN_VEXT_VX_RM(vsaddu_vx_h, 2)
|
|
|
|
GEN_VEXT_VX_RM(vsaddu_vx_w, 4)
|
|
|
|
GEN_VEXT_VX_RM(vsaddu_vx_d, 8)
|
2020-07-01 17:25:12 +02:00
|
|
|
|
|
|
|
static inline int8_t sadd8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
|
|
|
|
{
|
|
|
|
int8_t res = a + b;
|
|
|
|
if ((res ^ a) & (res ^ b) & INT8_MIN) {
|
|
|
|
res = a > 0 ? INT8_MAX : INT8_MIN;
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2023-04-05 10:58:13 +02:00
|
|
|
static inline int16_t sadd16(CPURISCVState *env, int vxrm, int16_t a,
|
|
|
|
int16_t b)
|
2020-07-01 17:25:12 +02:00
|
|
|
{
|
|
|
|
int16_t res = a + b;
|
|
|
|
if ((res ^ a) & (res ^ b) & INT16_MIN) {
|
|
|
|
res = a > 0 ? INT16_MAX : INT16_MIN;
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2023-04-05 10:58:13 +02:00
|
|
|
static inline int32_t sadd32(CPURISCVState *env, int vxrm, int32_t a,
|
|
|
|
int32_t b)
|
2020-07-01 17:25:12 +02:00
|
|
|
{
|
|
|
|
int32_t res = a + b;
|
|
|
|
if ((res ^ a) & (res ^ b) & INT32_MIN) {
|
|
|
|
res = a > 0 ? INT32_MAX : INT32_MIN;
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2023-04-05 10:58:13 +02:00
|
|
|
static inline int64_t sadd64(CPURISCVState *env, int vxrm, int64_t a,
|
|
|
|
int64_t b)
|
2020-07-01 17:25:12 +02:00
|
|
|
{
|
|
|
|
int64_t res = a + b;
|
|
|
|
if ((res ^ a) & (res ^ b) & INT64_MIN) {
|
|
|
|
res = a > 0 ? INT64_MAX : INT64_MIN;
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPIVV2_RM, vsadd_vv_b, OP_SSS_B, H1, H1, H1, sadd8)
|
|
|
|
RVVCALL(OPIVV2_RM, vsadd_vv_h, OP_SSS_H, H2, H2, H2, sadd16)
|
|
|
|
RVVCALL(OPIVV2_RM, vsadd_vv_w, OP_SSS_W, H4, H4, H4, sadd32)
|
|
|
|
RVVCALL(OPIVV2_RM, vsadd_vv_d, OP_SSS_D, H8, H8, H8, sadd64)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VV_RM(vsadd_vv_b, 1)
|
|
|
|
GEN_VEXT_VV_RM(vsadd_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_RM(vsadd_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_RM(vsadd_vv_d, 8)
|
2020-07-01 17:25:12 +02:00
|
|
|
|
|
|
|
RVVCALL(OPIVX2_RM, vsadd_vx_b, OP_SSS_B, H1, H1, sadd8)
|
|
|
|
RVVCALL(OPIVX2_RM, vsadd_vx_h, OP_SSS_H, H2, H2, sadd16)
|
|
|
|
RVVCALL(OPIVX2_RM, vsadd_vx_w, OP_SSS_W, H4, H4, sadd32)
|
|
|
|
RVVCALL(OPIVX2_RM, vsadd_vx_d, OP_SSS_D, H8, H8, sadd64)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VX_RM(vsadd_vx_b, 1)
|
|
|
|
GEN_VEXT_VX_RM(vsadd_vx_h, 2)
|
|
|
|
GEN_VEXT_VX_RM(vsadd_vx_w, 4)
|
|
|
|
GEN_VEXT_VX_RM(vsadd_vx_d, 8)
|
2020-07-01 17:25:12 +02:00
|
|
|
|
2023-04-05 10:58:13 +02:00
|
|
|
static inline uint8_t ssubu8(CPURISCVState *env, int vxrm, uint8_t a,
|
|
|
|
uint8_t b)
|
2020-07-01 17:25:12 +02:00
|
|
|
{
|
|
|
|
uint8_t res = a - b;
|
|
|
|
if (res > a) {
|
|
|
|
res = 0;
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint16_t ssubu16(CPURISCVState *env, int vxrm, uint16_t a,
|
|
|
|
uint16_t b)
|
|
|
|
{
|
|
|
|
uint16_t res = a - b;
|
|
|
|
if (res > a) {
|
|
|
|
res = 0;
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t ssubu32(CPURISCVState *env, int vxrm, uint32_t a,
|
|
|
|
uint32_t b)
|
|
|
|
{
|
|
|
|
uint32_t res = a - b;
|
|
|
|
if (res > a) {
|
|
|
|
res = 0;
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint64_t ssubu64(CPURISCVState *env, int vxrm, uint64_t a,
|
|
|
|
uint64_t b)
|
|
|
|
{
|
|
|
|
uint64_t res = a - b;
|
|
|
|
if (res > a) {
|
|
|
|
res = 0;
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPIVV2_RM, vssubu_vv_b, OP_UUU_B, H1, H1, H1, ssubu8)
|
|
|
|
RVVCALL(OPIVV2_RM, vssubu_vv_h, OP_UUU_H, H2, H2, H2, ssubu16)
|
|
|
|
RVVCALL(OPIVV2_RM, vssubu_vv_w, OP_UUU_W, H4, H4, H4, ssubu32)
|
|
|
|
RVVCALL(OPIVV2_RM, vssubu_vv_d, OP_UUU_D, H8, H8, H8, ssubu64)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VV_RM(vssubu_vv_b, 1)
|
|
|
|
GEN_VEXT_VV_RM(vssubu_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_RM(vssubu_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_RM(vssubu_vv_d, 8)
|
2020-07-01 17:25:12 +02:00
|
|
|
|
|
|
|
RVVCALL(OPIVX2_RM, vssubu_vx_b, OP_UUU_B, H1, H1, ssubu8)
|
|
|
|
RVVCALL(OPIVX2_RM, vssubu_vx_h, OP_UUU_H, H2, H2, ssubu16)
|
|
|
|
RVVCALL(OPIVX2_RM, vssubu_vx_w, OP_UUU_W, H4, H4, ssubu32)
|
|
|
|
RVVCALL(OPIVX2_RM, vssubu_vx_d, OP_UUU_D, H8, H8, ssubu64)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VX_RM(vssubu_vx_b, 1)
|
|
|
|
GEN_VEXT_VX_RM(vssubu_vx_h, 2)
|
|
|
|
GEN_VEXT_VX_RM(vssubu_vx_w, 4)
|
|
|
|
GEN_VEXT_VX_RM(vssubu_vx_d, 8)
|
2020-07-01 17:25:12 +02:00
|
|
|
|
|
|
|
static inline int8_t ssub8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
|
|
|
|
{
|
|
|
|
int8_t res = a - b;
|
|
|
|
if ((res ^ a) & (a ^ b) & INT8_MIN) {
|
2021-02-12 16:02:21 +01:00
|
|
|
res = a >= 0 ? INT8_MAX : INT8_MIN;
|
2020-07-01 17:25:12 +02:00
|
|
|
env->vxsat = 0x1;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2023-04-05 10:58:13 +02:00
|
|
|
static inline int16_t ssub16(CPURISCVState *env, int vxrm, int16_t a,
|
|
|
|
int16_t b)
|
2020-07-01 17:25:12 +02:00
|
|
|
{
|
|
|
|
int16_t res = a - b;
|
|
|
|
if ((res ^ a) & (a ^ b) & INT16_MIN) {
|
2021-02-12 16:02:21 +01:00
|
|
|
res = a >= 0 ? INT16_MAX : INT16_MIN;
|
2020-07-01 17:25:12 +02:00
|
|
|
env->vxsat = 0x1;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2023-04-05 10:58:13 +02:00
|
|
|
static inline int32_t ssub32(CPURISCVState *env, int vxrm, int32_t a,
|
|
|
|
int32_t b)
|
2020-07-01 17:25:12 +02:00
|
|
|
{
|
|
|
|
int32_t res = a - b;
|
|
|
|
if ((res ^ a) & (a ^ b) & INT32_MIN) {
|
2021-02-12 16:02:21 +01:00
|
|
|
res = a >= 0 ? INT32_MAX : INT32_MIN;
|
2020-07-01 17:25:12 +02:00
|
|
|
env->vxsat = 0x1;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2023-04-05 10:58:13 +02:00
|
|
|
static inline int64_t ssub64(CPURISCVState *env, int vxrm, int64_t a,
|
|
|
|
int64_t b)
|
2020-07-01 17:25:12 +02:00
|
|
|
{
|
|
|
|
int64_t res = a - b;
|
|
|
|
if ((res ^ a) & (a ^ b) & INT64_MIN) {
|
2021-02-12 16:02:21 +01:00
|
|
|
res = a >= 0 ? INT64_MAX : INT64_MIN;
|
2020-07-01 17:25:12 +02:00
|
|
|
env->vxsat = 0x1;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPIVV2_RM, vssub_vv_b, OP_SSS_B, H1, H1, H1, ssub8)
|
|
|
|
RVVCALL(OPIVV2_RM, vssub_vv_h, OP_SSS_H, H2, H2, H2, ssub16)
|
|
|
|
RVVCALL(OPIVV2_RM, vssub_vv_w, OP_SSS_W, H4, H4, H4, ssub32)
|
|
|
|
RVVCALL(OPIVV2_RM, vssub_vv_d, OP_SSS_D, H8, H8, H8, ssub64)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VV_RM(vssub_vv_b, 1)
|
|
|
|
GEN_VEXT_VV_RM(vssub_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_RM(vssub_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_RM(vssub_vv_d, 8)
|
2020-07-01 17:25:12 +02:00
|
|
|
|
|
|
|
RVVCALL(OPIVX2_RM, vssub_vx_b, OP_SSS_B, H1, H1, ssub8)
|
|
|
|
RVVCALL(OPIVX2_RM, vssub_vx_h, OP_SSS_H, H2, H2, ssub16)
|
|
|
|
RVVCALL(OPIVX2_RM, vssub_vx_w, OP_SSS_W, H4, H4, ssub32)
|
|
|
|
RVVCALL(OPIVX2_RM, vssub_vx_d, OP_SSS_D, H8, H8, ssub64)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VX_RM(vssub_vx_b, 1)
|
|
|
|
GEN_VEXT_VX_RM(vssub_vx_h, 2)
|
|
|
|
GEN_VEXT_VX_RM(vssub_vx_w, 4)
|
|
|
|
GEN_VEXT_VX_RM(vssub_vx_d, 8)
|
2020-07-01 17:25:13 +02:00
|
|
|
|
|
|
|
/* Vector Single-Width Averaging Add and Subtract */
|
|
|
|
static inline uint8_t get_round(int vxrm, uint64_t v, uint8_t shift)
|
|
|
|
{
|
|
|
|
uint8_t d = extract64(v, shift, 1);
|
|
|
|
uint8_t d1;
|
|
|
|
uint64_t D1, D2;
|
|
|
|
|
|
|
|
if (shift == 0 || shift > 64) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
d1 = extract64(v, shift - 1, 1);
|
|
|
|
D1 = extract64(v, 0, shift);
|
|
|
|
if (vxrm == 0) { /* round-to-nearest-up (add +0.5 LSB) */
|
|
|
|
return d1;
|
|
|
|
} else if (vxrm == 1) { /* round-to-nearest-even */
|
|
|
|
if (shift > 1) {
|
|
|
|
D2 = extract64(v, 0, shift - 1);
|
|
|
|
return d1 & ((D2 != 0) | d);
|
|
|
|
} else {
|
|
|
|
return d1 & d;
|
|
|
|
}
|
|
|
|
} else if (vxrm == 3) { /* round-to-odd (OR bits into LSB, aka "jam") */
|
|
|
|
return !d & (D1 != 0);
|
|
|
|
}
|
|
|
|
return 0; /* round-down (truncate) */
|
|
|
|
}
|
|
|
|
|
2023-04-05 10:58:13 +02:00
|
|
|
static inline int32_t aadd32(CPURISCVState *env, int vxrm, int32_t a,
|
|
|
|
int32_t b)
|
2020-07-01 17:25:13 +02:00
|
|
|
{
|
|
|
|
int64_t res = (int64_t)a + b;
|
|
|
|
uint8_t round = get_round(vxrm, res, 1);
|
|
|
|
|
|
|
|
return (res >> 1) + round;
|
|
|
|
}
|
|
|
|
|
2023-04-05 10:58:13 +02:00
|
|
|
static inline int64_t aadd64(CPURISCVState *env, int vxrm, int64_t a,
|
|
|
|
int64_t b)
|
2020-07-01 17:25:13 +02:00
|
|
|
{
|
|
|
|
int64_t res = a + b;
|
|
|
|
uint8_t round = get_round(vxrm, res, 1);
|
|
|
|
int64_t over = (res ^ a) & (res ^ b) & INT64_MIN;
|
|
|
|
|
|
|
|
/* With signed overflow, bit 64 is inverse of bit 63. */
|
|
|
|
return ((res >> 1) ^ over) + round;
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPIVV2_RM, vaadd_vv_b, OP_SSS_B, H1, H1, H1, aadd32)
|
|
|
|
RVVCALL(OPIVV2_RM, vaadd_vv_h, OP_SSS_H, H2, H2, H2, aadd32)
|
|
|
|
RVVCALL(OPIVV2_RM, vaadd_vv_w, OP_SSS_W, H4, H4, H4, aadd32)
|
|
|
|
RVVCALL(OPIVV2_RM, vaadd_vv_d, OP_SSS_D, H8, H8, H8, aadd64)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VV_RM(vaadd_vv_b, 1)
|
|
|
|
GEN_VEXT_VV_RM(vaadd_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_RM(vaadd_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_RM(vaadd_vv_d, 8)
|
2020-07-01 17:25:13 +02:00
|
|
|
|
|
|
|
RVVCALL(OPIVX2_RM, vaadd_vx_b, OP_SSS_B, H1, H1, aadd32)
|
|
|
|
RVVCALL(OPIVX2_RM, vaadd_vx_h, OP_SSS_H, H2, H2, aadd32)
|
|
|
|
RVVCALL(OPIVX2_RM, vaadd_vx_w, OP_SSS_W, H4, H4, aadd32)
|
|
|
|
RVVCALL(OPIVX2_RM, vaadd_vx_d, OP_SSS_D, H8, H8, aadd64)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VX_RM(vaadd_vx_b, 1)
|
|
|
|
GEN_VEXT_VX_RM(vaadd_vx_h, 2)
|
|
|
|
GEN_VEXT_VX_RM(vaadd_vx_w, 4)
|
|
|
|
GEN_VEXT_VX_RM(vaadd_vx_d, 8)
|
2020-07-01 17:25:13 +02:00
|
|
|
|
2021-12-10 08:56:27 +01:00
|
|
|
static inline uint32_t aaddu32(CPURISCVState *env, int vxrm,
|
|
|
|
uint32_t a, uint32_t b)
|
|
|
|
{
|
|
|
|
uint64_t res = (uint64_t)a + b;
|
|
|
|
uint8_t round = get_round(vxrm, res, 1);
|
|
|
|
|
|
|
|
return (res >> 1) + round;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint64_t aaddu64(CPURISCVState *env, int vxrm,
|
|
|
|
uint64_t a, uint64_t b)
|
|
|
|
{
|
|
|
|
uint64_t res = a + b;
|
|
|
|
uint8_t round = get_round(vxrm, res, 1);
|
|
|
|
uint64_t over = (uint64_t)(res < a) << 63;
|
|
|
|
|
|
|
|
return ((res >> 1) | over) + round;
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPIVV2_RM, vaaddu_vv_b, OP_UUU_B, H1, H1, H1, aaddu32)
|
|
|
|
RVVCALL(OPIVV2_RM, vaaddu_vv_h, OP_UUU_H, H2, H2, H2, aaddu32)
|
|
|
|
RVVCALL(OPIVV2_RM, vaaddu_vv_w, OP_UUU_W, H4, H4, H4, aaddu32)
|
|
|
|
RVVCALL(OPIVV2_RM, vaaddu_vv_d, OP_UUU_D, H8, H8, H8, aaddu64)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VV_RM(vaaddu_vv_b, 1)
|
|
|
|
GEN_VEXT_VV_RM(vaaddu_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_RM(vaaddu_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_RM(vaaddu_vv_d, 8)
|
2021-12-10 08:56:27 +01:00
|
|
|
|
|
|
|
RVVCALL(OPIVX2_RM, vaaddu_vx_b, OP_UUU_B, H1, H1, aaddu32)
|
|
|
|
RVVCALL(OPIVX2_RM, vaaddu_vx_h, OP_UUU_H, H2, H2, aaddu32)
|
|
|
|
RVVCALL(OPIVX2_RM, vaaddu_vx_w, OP_UUU_W, H4, H4, aaddu32)
|
|
|
|
RVVCALL(OPIVX2_RM, vaaddu_vx_d, OP_UUU_D, H8, H8, aaddu64)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VX_RM(vaaddu_vx_b, 1)
|
|
|
|
GEN_VEXT_VX_RM(vaaddu_vx_h, 2)
|
|
|
|
GEN_VEXT_VX_RM(vaaddu_vx_w, 4)
|
|
|
|
GEN_VEXT_VX_RM(vaaddu_vx_d, 8)
|
2021-12-10 08:56:27 +01:00
|
|
|
|
2023-04-05 10:58:13 +02:00
|
|
|
static inline int32_t asub32(CPURISCVState *env, int vxrm, int32_t a,
|
|
|
|
int32_t b)
|
2020-07-01 17:25:13 +02:00
|
|
|
{
|
|
|
|
int64_t res = (int64_t)a - b;
|
|
|
|
uint8_t round = get_round(vxrm, res, 1);
|
|
|
|
|
|
|
|
return (res >> 1) + round;
|
|
|
|
}
|
|
|
|
|
2023-04-05 10:58:13 +02:00
|
|
|
static inline int64_t asub64(CPURISCVState *env, int vxrm, int64_t a,
|
|
|
|
int64_t b)
|
2020-07-01 17:25:13 +02:00
|
|
|
{
|
|
|
|
int64_t res = (int64_t)a - b;
|
|
|
|
uint8_t round = get_round(vxrm, res, 1);
|
|
|
|
int64_t over = (res ^ a) & (a ^ b) & INT64_MIN;
|
|
|
|
|
|
|
|
/* With signed overflow, bit 64 is inverse of bit 63. */
|
|
|
|
return ((res >> 1) ^ over) + round;
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPIVV2_RM, vasub_vv_b, OP_SSS_B, H1, H1, H1, asub32)
|
|
|
|
RVVCALL(OPIVV2_RM, vasub_vv_h, OP_SSS_H, H2, H2, H2, asub32)
|
|
|
|
RVVCALL(OPIVV2_RM, vasub_vv_w, OP_SSS_W, H4, H4, H4, asub32)
|
|
|
|
RVVCALL(OPIVV2_RM, vasub_vv_d, OP_SSS_D, H8, H8, H8, asub64)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VV_RM(vasub_vv_b, 1)
|
|
|
|
GEN_VEXT_VV_RM(vasub_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_RM(vasub_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_RM(vasub_vv_d, 8)
|
2020-07-01 17:25:13 +02:00
|
|
|
|
|
|
|
RVVCALL(OPIVX2_RM, vasub_vx_b, OP_SSS_B, H1, H1, asub32)
|
|
|
|
RVVCALL(OPIVX2_RM, vasub_vx_h, OP_SSS_H, H2, H2, asub32)
|
|
|
|
RVVCALL(OPIVX2_RM, vasub_vx_w, OP_SSS_W, H4, H4, asub32)
|
|
|
|
RVVCALL(OPIVX2_RM, vasub_vx_d, OP_SSS_D, H8, H8, asub64)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VX_RM(vasub_vx_b, 1)
|
|
|
|
GEN_VEXT_VX_RM(vasub_vx_h, 2)
|
|
|
|
GEN_VEXT_VX_RM(vasub_vx_w, 4)
|
|
|
|
GEN_VEXT_VX_RM(vasub_vx_d, 8)
|
2020-07-01 17:25:14 +02:00
|
|
|
|
2021-12-10 08:56:27 +01:00
|
|
|
static inline uint32_t asubu32(CPURISCVState *env, int vxrm,
|
|
|
|
uint32_t a, uint32_t b)
|
|
|
|
{
|
|
|
|
int64_t res = (int64_t)a - b;
|
|
|
|
uint8_t round = get_round(vxrm, res, 1);
|
|
|
|
|
|
|
|
return (res >> 1) + round;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint64_t asubu64(CPURISCVState *env, int vxrm,
|
|
|
|
uint64_t a, uint64_t b)
|
|
|
|
{
|
|
|
|
uint64_t res = (uint64_t)a - b;
|
|
|
|
uint8_t round = get_round(vxrm, res, 1);
|
|
|
|
uint64_t over = (uint64_t)(res > a) << 63;
|
|
|
|
|
|
|
|
return ((res >> 1) | over) + round;
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPIVV2_RM, vasubu_vv_b, OP_UUU_B, H1, H1, H1, asubu32)
|
|
|
|
RVVCALL(OPIVV2_RM, vasubu_vv_h, OP_UUU_H, H2, H2, H2, asubu32)
|
|
|
|
RVVCALL(OPIVV2_RM, vasubu_vv_w, OP_UUU_W, H4, H4, H4, asubu32)
|
|
|
|
RVVCALL(OPIVV2_RM, vasubu_vv_d, OP_UUU_D, H8, H8, H8, asubu64)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VV_RM(vasubu_vv_b, 1)
|
|
|
|
GEN_VEXT_VV_RM(vasubu_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_RM(vasubu_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_RM(vasubu_vv_d, 8)
|
2021-12-10 08:56:27 +01:00
|
|
|
|
|
|
|
RVVCALL(OPIVX2_RM, vasubu_vx_b, OP_UUU_B, H1, H1, asubu32)
|
|
|
|
RVVCALL(OPIVX2_RM, vasubu_vx_h, OP_UUU_H, H2, H2, asubu32)
|
|
|
|
RVVCALL(OPIVX2_RM, vasubu_vx_w, OP_UUU_W, H4, H4, asubu32)
|
|
|
|
RVVCALL(OPIVX2_RM, vasubu_vx_d, OP_UUU_D, H8, H8, asubu64)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VX_RM(vasubu_vx_b, 1)
|
|
|
|
GEN_VEXT_VX_RM(vasubu_vx_h, 2)
|
|
|
|
GEN_VEXT_VX_RM(vasubu_vx_w, 4)
|
|
|
|
GEN_VEXT_VX_RM(vasubu_vx_d, 8)
|
2021-12-10 08:56:27 +01:00
|
|
|
|
2020-07-01 17:25:14 +02:00
|
|
|
/* Vector Single-Width Fractional Multiply with Rounding and Saturation */
|
|
|
|
static inline int8_t vsmul8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
|
|
|
|
{
|
|
|
|
uint8_t round;
|
|
|
|
int16_t res;
|
|
|
|
|
|
|
|
res = (int16_t)a * (int16_t)b;
|
|
|
|
round = get_round(vxrm, res, 7);
|
2023-04-05 10:58:11 +02:00
|
|
|
res = (res >> 7) + round;
|
2020-07-01 17:25:14 +02:00
|
|
|
|
|
|
|
if (res > INT8_MAX) {
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
return INT8_MAX;
|
|
|
|
} else if (res < INT8_MIN) {
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
return INT8_MIN;
|
|
|
|
} else {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int16_t vsmul16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
|
|
|
|
{
|
|
|
|
uint8_t round;
|
|
|
|
int32_t res;
|
|
|
|
|
|
|
|
res = (int32_t)a * (int32_t)b;
|
|
|
|
round = get_round(vxrm, res, 15);
|
2023-04-05 10:58:11 +02:00
|
|
|
res = (res >> 15) + round;
|
2020-07-01 17:25:14 +02:00
|
|
|
|
|
|
|
if (res > INT16_MAX) {
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
return INT16_MAX;
|
|
|
|
} else if (res < INT16_MIN) {
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
return INT16_MIN;
|
|
|
|
} else {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int32_t vsmul32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
|
|
|
|
{
|
|
|
|
uint8_t round;
|
|
|
|
int64_t res;
|
|
|
|
|
|
|
|
res = (int64_t)a * (int64_t)b;
|
|
|
|
round = get_round(vxrm, res, 31);
|
2023-04-05 10:58:11 +02:00
|
|
|
res = (res >> 31) + round;
|
2020-07-01 17:25:14 +02:00
|
|
|
|
|
|
|
if (res > INT32_MAX) {
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
return INT32_MAX;
|
|
|
|
} else if (res < INT32_MIN) {
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
return INT32_MIN;
|
|
|
|
} else {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int64_t vsmul64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
|
|
|
|
{
|
|
|
|
uint8_t round;
|
|
|
|
uint64_t hi_64, lo_64;
|
|
|
|
int64_t res;
|
|
|
|
|
|
|
|
if (a == INT64_MIN && b == INT64_MIN) {
|
|
|
|
env->vxsat = 1;
|
|
|
|
return INT64_MAX;
|
|
|
|
}
|
|
|
|
|
|
|
|
muls64(&lo_64, &hi_64, a, b);
|
|
|
|
round = get_round(vxrm, lo_64, 63);
|
|
|
|
/*
|
|
|
|
* Cannot overflow, as there are always
|
|
|
|
* 2 sign bits after multiply.
|
|
|
|
*/
|
|
|
|
res = (hi_64 << 1) | (lo_64 >> 63);
|
|
|
|
if (round) {
|
|
|
|
if (res == INT64_MAX) {
|
|
|
|
env->vxsat = 1;
|
|
|
|
} else {
|
|
|
|
res += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPIVV2_RM, vsmul_vv_b, OP_SSS_B, H1, H1, H1, vsmul8)
|
|
|
|
RVVCALL(OPIVV2_RM, vsmul_vv_h, OP_SSS_H, H2, H2, H2, vsmul16)
|
|
|
|
RVVCALL(OPIVV2_RM, vsmul_vv_w, OP_SSS_W, H4, H4, H4, vsmul32)
|
|
|
|
RVVCALL(OPIVV2_RM, vsmul_vv_d, OP_SSS_D, H8, H8, H8, vsmul64)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VV_RM(vsmul_vv_b, 1)
|
|
|
|
GEN_VEXT_VV_RM(vsmul_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_RM(vsmul_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_RM(vsmul_vv_d, 8)
|
2020-07-01 17:25:14 +02:00
|
|
|
|
|
|
|
RVVCALL(OPIVX2_RM, vsmul_vx_b, OP_SSS_B, H1, H1, vsmul8)
|
|
|
|
RVVCALL(OPIVX2_RM, vsmul_vx_h, OP_SSS_H, H2, H2, vsmul16)
|
|
|
|
RVVCALL(OPIVX2_RM, vsmul_vx_w, OP_SSS_W, H4, H4, vsmul32)
|
|
|
|
RVVCALL(OPIVX2_RM, vsmul_vx_d, OP_SSS_D, H8, H8, vsmul64)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VX_RM(vsmul_vx_b, 1)
|
|
|
|
GEN_VEXT_VX_RM(vsmul_vx_h, 2)
|
|
|
|
GEN_VEXT_VX_RM(vsmul_vx_w, 4)
|
|
|
|
GEN_VEXT_VX_RM(vsmul_vx_d, 8)
|
2020-07-01 17:25:15 +02:00
|
|
|
|
2020-07-01 17:25:16 +02:00
|
|
|
/* Vector Single-Width Scaling Shift Instructions */
|
|
|
|
static inline uint8_t
|
|
|
|
vssrl8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
|
|
|
|
{
|
|
|
|
uint8_t round, shift = b & 0x7;
|
|
|
|
uint8_t res;
|
|
|
|
|
|
|
|
round = get_round(vxrm, a, shift);
|
2023-04-05 10:58:11 +02:00
|
|
|
res = (a >> shift) + round;
|
2020-07-01 17:25:16 +02:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
static inline uint16_t
|
|
|
|
vssrl16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b)
|
|
|
|
{
|
|
|
|
uint8_t round, shift = b & 0xf;
|
|
|
|
|
|
|
|
round = get_round(vxrm, a, shift);
|
2022-11-22 14:49:16 +01:00
|
|
|
return (a >> shift) + round;
|
2020-07-01 17:25:16 +02:00
|
|
|
}
|
|
|
|
static inline uint32_t
|
|
|
|
vssrl32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b)
|
|
|
|
{
|
|
|
|
uint8_t round, shift = b & 0x1f;
|
|
|
|
|
|
|
|
round = get_round(vxrm, a, shift);
|
2022-11-22 14:49:16 +01:00
|
|
|
return (a >> shift) + round;
|
2020-07-01 17:25:16 +02:00
|
|
|
}
|
|
|
|
static inline uint64_t
|
|
|
|
vssrl64(CPURISCVState *env, int vxrm, uint64_t a, uint64_t b)
|
|
|
|
{
|
|
|
|
uint8_t round, shift = b & 0x3f;
|
|
|
|
|
|
|
|
round = get_round(vxrm, a, shift);
|
2022-11-22 14:49:16 +01:00
|
|
|
return (a >> shift) + round;
|
2020-07-01 17:25:16 +02:00
|
|
|
}
|
|
|
|
RVVCALL(OPIVV2_RM, vssrl_vv_b, OP_UUU_B, H1, H1, H1, vssrl8)
|
|
|
|
RVVCALL(OPIVV2_RM, vssrl_vv_h, OP_UUU_H, H2, H2, H2, vssrl16)
|
|
|
|
RVVCALL(OPIVV2_RM, vssrl_vv_w, OP_UUU_W, H4, H4, H4, vssrl32)
|
|
|
|
RVVCALL(OPIVV2_RM, vssrl_vv_d, OP_UUU_D, H8, H8, H8, vssrl64)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VV_RM(vssrl_vv_b, 1)
|
|
|
|
GEN_VEXT_VV_RM(vssrl_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_RM(vssrl_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_RM(vssrl_vv_d, 8)
|
2020-07-01 17:25:16 +02:00
|
|
|
|
|
|
|
RVVCALL(OPIVX2_RM, vssrl_vx_b, OP_UUU_B, H1, H1, vssrl8)
|
|
|
|
RVVCALL(OPIVX2_RM, vssrl_vx_h, OP_UUU_H, H2, H2, vssrl16)
|
|
|
|
RVVCALL(OPIVX2_RM, vssrl_vx_w, OP_UUU_W, H4, H4, vssrl32)
|
|
|
|
RVVCALL(OPIVX2_RM, vssrl_vx_d, OP_UUU_D, H8, H8, vssrl64)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VX_RM(vssrl_vx_b, 1)
|
|
|
|
GEN_VEXT_VX_RM(vssrl_vx_h, 2)
|
|
|
|
GEN_VEXT_VX_RM(vssrl_vx_w, 4)
|
|
|
|
GEN_VEXT_VX_RM(vssrl_vx_d, 8)
|
2020-07-01 17:25:16 +02:00
|
|
|
|
|
|
|
static inline int8_t
|
|
|
|
vssra8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
|
|
|
|
{
|
|
|
|
uint8_t round, shift = b & 0x7;
|
|
|
|
|
|
|
|
round = get_round(vxrm, a, shift);
|
2022-11-22 14:49:16 +01:00
|
|
|
return (a >> shift) + round;
|
2020-07-01 17:25:16 +02:00
|
|
|
}
|
|
|
|
static inline int16_t
|
|
|
|
vssra16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
|
|
|
|
{
|
|
|
|
uint8_t round, shift = b & 0xf;
|
|
|
|
|
|
|
|
round = get_round(vxrm, a, shift);
|
2022-11-22 14:49:16 +01:00
|
|
|
return (a >> shift) + round;
|
2020-07-01 17:25:16 +02:00
|
|
|
}
|
|
|
|
static inline int32_t
|
|
|
|
vssra32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
|
|
|
|
{
|
|
|
|
uint8_t round, shift = b & 0x1f;
|
|
|
|
|
|
|
|
round = get_round(vxrm, a, shift);
|
2022-11-22 14:49:16 +01:00
|
|
|
return (a >> shift) + round;
|
2020-07-01 17:25:16 +02:00
|
|
|
}
|
|
|
|
static inline int64_t
|
|
|
|
vssra64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
|
|
|
|
{
|
|
|
|
uint8_t round, shift = b & 0x3f;
|
|
|
|
|
|
|
|
round = get_round(vxrm, a, shift);
|
2022-11-22 14:49:16 +01:00
|
|
|
return (a >> shift) + round;
|
2020-07-01 17:25:16 +02:00
|
|
|
}
|
2020-07-01 17:25:17 +02:00
|
|
|
|
2020-07-01 17:25:16 +02:00
|
|
|
RVVCALL(OPIVV2_RM, vssra_vv_b, OP_SSS_B, H1, H1, H1, vssra8)
|
|
|
|
RVVCALL(OPIVV2_RM, vssra_vv_h, OP_SSS_H, H2, H2, H2, vssra16)
|
|
|
|
RVVCALL(OPIVV2_RM, vssra_vv_w, OP_SSS_W, H4, H4, H4, vssra32)
|
|
|
|
RVVCALL(OPIVV2_RM, vssra_vv_d, OP_SSS_D, H8, H8, H8, vssra64)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VV_RM(vssra_vv_b, 1)
|
|
|
|
GEN_VEXT_VV_RM(vssra_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_RM(vssra_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_RM(vssra_vv_d, 8)
|
2020-07-01 17:25:16 +02:00
|
|
|
|
|
|
|
RVVCALL(OPIVX2_RM, vssra_vx_b, OP_SSS_B, H1, H1, vssra8)
|
|
|
|
RVVCALL(OPIVX2_RM, vssra_vx_h, OP_SSS_H, H2, H2, vssra16)
|
|
|
|
RVVCALL(OPIVX2_RM, vssra_vx_w, OP_SSS_W, H4, H4, vssra32)
|
|
|
|
RVVCALL(OPIVX2_RM, vssra_vx_d, OP_SSS_D, H8, H8, vssra64)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VX_RM(vssra_vx_b, 1)
|
|
|
|
GEN_VEXT_VX_RM(vssra_vx_h, 2)
|
|
|
|
GEN_VEXT_VX_RM(vssra_vx_w, 4)
|
|
|
|
GEN_VEXT_VX_RM(vssra_vx_d, 8)
|
2020-07-01 17:25:17 +02:00
|
|
|
|
|
|
|
/* Vector Narrowing Fixed-Point Clip Instructions */
|
|
|
|
static inline int8_t
|
|
|
|
vnclip8(CPURISCVState *env, int vxrm, int16_t a, int8_t b)
|
|
|
|
{
|
|
|
|
uint8_t round, shift = b & 0xf;
|
|
|
|
int16_t res;
|
|
|
|
|
|
|
|
round = get_round(vxrm, a, shift);
|
2023-04-05 10:58:11 +02:00
|
|
|
res = (a >> shift) + round;
|
2020-07-01 17:25:17 +02:00
|
|
|
if (res > INT8_MAX) {
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
return INT8_MAX;
|
|
|
|
} else if (res < INT8_MIN) {
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
return INT8_MIN;
|
|
|
|
} else {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int16_t
|
|
|
|
vnclip16(CPURISCVState *env, int vxrm, int32_t a, int16_t b)
|
|
|
|
{
|
|
|
|
uint8_t round, shift = b & 0x1f;
|
|
|
|
int32_t res;
|
|
|
|
|
|
|
|
round = get_round(vxrm, a, shift);
|
2023-04-05 10:58:11 +02:00
|
|
|
res = (a >> shift) + round;
|
2020-07-01 17:25:17 +02:00
|
|
|
if (res > INT16_MAX) {
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
return INT16_MAX;
|
|
|
|
} else if (res < INT16_MIN) {
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
return INT16_MIN;
|
|
|
|
} else {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int32_t
|
|
|
|
vnclip32(CPURISCVState *env, int vxrm, int64_t a, int32_t b)
|
|
|
|
{
|
|
|
|
uint8_t round, shift = b & 0x3f;
|
|
|
|
int64_t res;
|
|
|
|
|
|
|
|
round = get_round(vxrm, a, shift);
|
2023-04-05 10:58:11 +02:00
|
|
|
res = (a >> shift) + round;
|
2020-07-01 17:25:17 +02:00
|
|
|
if (res > INT32_MAX) {
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
return INT32_MAX;
|
|
|
|
} else if (res < INT32_MIN) {
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
return INT32_MIN;
|
|
|
|
} else {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-10 08:56:38 +01:00
|
|
|
RVVCALL(OPIVV2_RM, vnclip_wv_b, NOP_SSS_B, H1, H2, H1, vnclip8)
|
|
|
|
RVVCALL(OPIVV2_RM, vnclip_wv_h, NOP_SSS_H, H2, H4, H2, vnclip16)
|
|
|
|
RVVCALL(OPIVV2_RM, vnclip_wv_w, NOP_SSS_W, H4, H8, H4, vnclip32)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VV_RM(vnclip_wv_b, 1)
|
|
|
|
GEN_VEXT_VV_RM(vnclip_wv_h, 2)
|
|
|
|
GEN_VEXT_VV_RM(vnclip_wv_w, 4)
|
2021-12-10 08:56:38 +01:00
|
|
|
|
|
|
|
RVVCALL(OPIVX2_RM, vnclip_wx_b, NOP_SSS_B, H1, H2, vnclip8)
|
|
|
|
RVVCALL(OPIVX2_RM, vnclip_wx_h, NOP_SSS_H, H2, H4, vnclip16)
|
|
|
|
RVVCALL(OPIVX2_RM, vnclip_wx_w, NOP_SSS_W, H4, H8, vnclip32)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VX_RM(vnclip_wx_b, 1)
|
|
|
|
GEN_VEXT_VX_RM(vnclip_wx_h, 2)
|
|
|
|
GEN_VEXT_VX_RM(vnclip_wx_w, 4)
|
2020-07-01 17:25:17 +02:00
|
|
|
|
|
|
|
static inline uint8_t
|
|
|
|
vnclipu8(CPURISCVState *env, int vxrm, uint16_t a, uint8_t b)
|
|
|
|
{
|
|
|
|
uint8_t round, shift = b & 0xf;
|
|
|
|
uint16_t res;
|
|
|
|
|
|
|
|
round = get_round(vxrm, a, shift);
|
2023-04-05 10:58:11 +02:00
|
|
|
res = (a >> shift) + round;
|
2020-07-01 17:25:17 +02:00
|
|
|
if (res > UINT8_MAX) {
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
return UINT8_MAX;
|
|
|
|
} else {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint16_t
|
|
|
|
vnclipu16(CPURISCVState *env, int vxrm, uint32_t a, uint16_t b)
|
|
|
|
{
|
|
|
|
uint8_t round, shift = b & 0x1f;
|
|
|
|
uint32_t res;
|
|
|
|
|
|
|
|
round = get_round(vxrm, a, shift);
|
2023-04-05 10:58:11 +02:00
|
|
|
res = (a >> shift) + round;
|
2020-07-01 17:25:17 +02:00
|
|
|
if (res > UINT16_MAX) {
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
return UINT16_MAX;
|
|
|
|
} else {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t
|
|
|
|
vnclipu32(CPURISCVState *env, int vxrm, uint64_t a, uint32_t b)
|
|
|
|
{
|
|
|
|
uint8_t round, shift = b & 0x3f;
|
2021-12-10 08:56:38 +01:00
|
|
|
uint64_t res;
|
2020-07-01 17:25:17 +02:00
|
|
|
|
|
|
|
round = get_round(vxrm, a, shift);
|
2023-04-05 10:58:11 +02:00
|
|
|
res = (a >> shift) + round;
|
2020-07-01 17:25:17 +02:00
|
|
|
if (res > UINT32_MAX) {
|
|
|
|
env->vxsat = 0x1;
|
|
|
|
return UINT32_MAX;
|
|
|
|
} else {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-10 08:56:38 +01:00
|
|
|
RVVCALL(OPIVV2_RM, vnclipu_wv_b, NOP_UUU_B, H1, H2, H1, vnclipu8)
|
|
|
|
RVVCALL(OPIVV2_RM, vnclipu_wv_h, NOP_UUU_H, H2, H4, H2, vnclipu16)
|
|
|
|
RVVCALL(OPIVV2_RM, vnclipu_wv_w, NOP_UUU_W, H4, H8, H4, vnclipu32)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VV_RM(vnclipu_wv_b, 1)
|
|
|
|
GEN_VEXT_VV_RM(vnclipu_wv_h, 2)
|
|
|
|
GEN_VEXT_VV_RM(vnclipu_wv_w, 4)
|
2020-07-01 17:25:17 +02:00
|
|
|
|
2021-12-10 08:56:38 +01:00
|
|
|
RVVCALL(OPIVX2_RM, vnclipu_wx_b, NOP_UUU_B, H1, H2, vnclipu8)
|
|
|
|
RVVCALL(OPIVX2_RM, vnclipu_wx_h, NOP_UUU_H, H2, H4, vnclipu16)
|
|
|
|
RVVCALL(OPIVX2_RM, vnclipu_wx_w, NOP_UUU_W, H4, H8, vnclipu32)
|
2022-06-06 08:16:38 +02:00
|
|
|
GEN_VEXT_VX_RM(vnclipu_wx_b, 1)
|
|
|
|
GEN_VEXT_VX_RM(vnclipu_wx_h, 2)
|
|
|
|
GEN_VEXT_VX_RM(vnclipu_wx_w, 4)
|
2020-07-01 17:25:18 +02:00
|
|
|
|
|
|
|
/*
|
2023-04-05 10:58:12 +02:00
|
|
|
* Vector Float Point Arithmetic Instructions
|
2020-07-01 17:25:18 +02:00
|
|
|
*/
|
|
|
|
/* Vector Single-Width Floating-Point Add/Subtract Instructions */
|
|
|
|
#define OPFVV2(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
|
|
|
|
static void do_##NAME(void *vd, void *vs1, void *vs2, int i, \
|
|
|
|
CPURISCVState *env) \
|
|
|
|
{ \
|
|
|
|
TX1 s1 = *((T1 *)vs1 + HS1(i)); \
|
|
|
|
TX2 s2 = *((T2 *)vs2 + HS2(i)); \
|
|
|
|
*((TD *)vd + HD(i)) = OP(s2, s1, &env->fp_status); \
|
|
|
|
}
|
|
|
|
|
2022-06-06 08:16:56 +02:00
|
|
|
#define GEN_VEXT_VV_ENV(NAME, ESZ) \
|
2020-07-01 17:25:18 +02:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, void *vs1, \
|
|
|
|
void *vs2, CPURISCVState *env, \
|
|
|
|
uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t vm = vext_vm(desc); \
|
|
|
|
uint32_t vl = env->vl; \
|
2022-06-06 08:16:56 +02:00
|
|
|
uint32_t total_elems = \
|
|
|
|
vext_get_total_elems(env, desc, ESZ); \
|
|
|
|
uint32_t vta = vext_vta(desc); \
|
2022-06-20 08:51:12 +02:00
|
|
|
uint32_t vma = vext_vma(desc); \
|
2020-07-01 17:25:18 +02:00
|
|
|
uint32_t i; \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2021-12-10 08:55:58 +01:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) { \
|
2022-06-20 08:51:12 +02:00
|
|
|
/* set masked-off elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vma, i * ESZ, \
|
|
|
|
(i + 1) * ESZ); \
|
2020-07-01 17:25:18 +02:00
|
|
|
continue; \
|
|
|
|
} \
|
|
|
|
do_##NAME(vd, vs1, vs2, i, env); \
|
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2022-06-06 08:16:56 +02:00
|
|
|
/* set tail elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vta, vl * ESZ, \
|
|
|
|
total_elems * ESZ); \
|
2020-07-01 17:25:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVV2, vfadd_vv_h, OP_UUU_H, H2, H2, H2, float16_add)
|
|
|
|
RVVCALL(OPFVV2, vfadd_vv_w, OP_UUU_W, H4, H4, H4, float32_add)
|
|
|
|
RVVCALL(OPFVV2, vfadd_vv_d, OP_UUU_D, H8, H8, H8, float64_add)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfadd_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_ENV(vfadd_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfadd_vv_d, 8)
|
2020-07-01 17:25:18 +02:00
|
|
|
|
|
|
|
#define OPFVF2(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
|
|
|
|
static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i, \
|
|
|
|
CPURISCVState *env) \
|
|
|
|
{ \
|
|
|
|
TX2 s2 = *((T2 *)vs2 + HS2(i)); \
|
|
|
|
*((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1, &env->fp_status);\
|
|
|
|
}
|
|
|
|
|
2022-06-06 08:16:56 +02:00
|
|
|
#define GEN_VEXT_VF(NAME, ESZ) \
|
2020-07-01 17:25:18 +02:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \
|
|
|
|
void *vs2, CPURISCVState *env, \
|
|
|
|
uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t vm = vext_vm(desc); \
|
|
|
|
uint32_t vl = env->vl; \
|
2022-06-06 08:16:56 +02:00
|
|
|
uint32_t total_elems = \
|
2023-04-05 10:58:11 +02:00
|
|
|
vext_get_total_elems(env, desc, ESZ); \
|
2022-06-06 08:16:56 +02:00
|
|
|
uint32_t vta = vext_vta(desc); \
|
2022-06-20 08:51:12 +02:00
|
|
|
uint32_t vma = vext_vma(desc); \
|
2020-07-01 17:25:18 +02:00
|
|
|
uint32_t i; \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2021-12-10 08:55:58 +01:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) { \
|
2022-06-20 08:51:12 +02:00
|
|
|
/* set masked-off elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vma, i * ESZ, \
|
|
|
|
(i + 1) * ESZ); \
|
2020-07-01 17:25:18 +02:00
|
|
|
continue; \
|
|
|
|
} \
|
|
|
|
do_##NAME(vd, s1, vs2, i, env); \
|
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2022-06-06 08:16:56 +02:00
|
|
|
/* set tail elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vta, vl * ESZ, \
|
|
|
|
total_elems * ESZ); \
|
2020-07-01 17:25:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVF2, vfadd_vf_h, OP_UUU_H, H2, H2, float16_add)
|
|
|
|
RVVCALL(OPFVF2, vfadd_vf_w, OP_UUU_W, H4, H4, float32_add)
|
|
|
|
RVVCALL(OPFVF2, vfadd_vf_d, OP_UUU_D, H8, H8, float64_add)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfadd_vf_h, 2)
|
|
|
|
GEN_VEXT_VF(vfadd_vf_w, 4)
|
|
|
|
GEN_VEXT_VF(vfadd_vf_d, 8)
|
2020-07-01 17:25:18 +02:00
|
|
|
|
|
|
|
RVVCALL(OPFVV2, vfsub_vv_h, OP_UUU_H, H2, H2, H2, float16_sub)
|
|
|
|
RVVCALL(OPFVV2, vfsub_vv_w, OP_UUU_W, H4, H4, H4, float32_sub)
|
|
|
|
RVVCALL(OPFVV2, vfsub_vv_d, OP_UUU_D, H8, H8, H8, float64_sub)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfsub_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_ENV(vfsub_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfsub_vv_d, 8)
|
2020-07-01 17:25:18 +02:00
|
|
|
RVVCALL(OPFVF2, vfsub_vf_h, OP_UUU_H, H2, H2, float16_sub)
|
|
|
|
RVVCALL(OPFVF2, vfsub_vf_w, OP_UUU_W, H4, H4, float32_sub)
|
|
|
|
RVVCALL(OPFVF2, vfsub_vf_d, OP_UUU_D, H8, H8, float64_sub)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfsub_vf_h, 2)
|
|
|
|
GEN_VEXT_VF(vfsub_vf_w, 4)
|
|
|
|
GEN_VEXT_VF(vfsub_vf_d, 8)
|
2020-07-01 17:25:18 +02:00
|
|
|
|
|
|
|
static uint16_t float16_rsub(uint16_t a, uint16_t b, float_status *s)
|
|
|
|
{
|
|
|
|
return float16_sub(b, a, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t float32_rsub(uint32_t a, uint32_t b, float_status *s)
|
|
|
|
{
|
|
|
|
return float32_sub(b, a, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t float64_rsub(uint64_t a, uint64_t b, float_status *s)
|
|
|
|
{
|
|
|
|
return float64_sub(b, a, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVF2, vfrsub_vf_h, OP_UUU_H, H2, H2, float16_rsub)
|
|
|
|
RVVCALL(OPFVF2, vfrsub_vf_w, OP_UUU_W, H4, H4, float32_rsub)
|
|
|
|
RVVCALL(OPFVF2, vfrsub_vf_d, OP_UUU_D, H8, H8, float64_rsub)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfrsub_vf_h, 2)
|
|
|
|
GEN_VEXT_VF(vfrsub_vf_w, 4)
|
|
|
|
GEN_VEXT_VF(vfrsub_vf_d, 8)
|
2020-07-01 17:25:19 +02:00
|
|
|
|
|
|
|
/* Vector Widening Floating-Point Add/Subtract Instructions */
|
|
|
|
static uint32_t vfwadd16(uint16_t a, uint16_t b, float_status *s)
|
|
|
|
{
|
|
|
|
return float32_add(float16_to_float32(a, true, s),
|
2023-04-05 10:58:11 +02:00
|
|
|
float16_to_float32(b, true, s), s);
|
2020-07-01 17:25:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t vfwadd32(uint32_t a, uint32_t b, float_status *s)
|
|
|
|
{
|
|
|
|
return float64_add(float32_to_float64(a, s),
|
2023-04-05 10:58:11 +02:00
|
|
|
float32_to_float64(b, s), s);
|
2020-07-01 17:25:19 +02:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVV2, vfwadd_vv_h, WOP_UUU_H, H4, H2, H2, vfwadd16)
|
|
|
|
RVVCALL(OPFVV2, vfwadd_vv_w, WOP_UUU_W, H8, H4, H4, vfwadd32)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfwadd_vv_h, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfwadd_vv_w, 8)
|
2020-07-01 17:25:19 +02:00
|
|
|
RVVCALL(OPFVF2, vfwadd_vf_h, WOP_UUU_H, H4, H2, vfwadd16)
|
|
|
|
RVVCALL(OPFVF2, vfwadd_vf_w, WOP_UUU_W, H8, H4, vfwadd32)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfwadd_vf_h, 4)
|
|
|
|
GEN_VEXT_VF(vfwadd_vf_w, 8)
|
2020-07-01 17:25:19 +02:00
|
|
|
|
|
|
|
static uint32_t vfwsub16(uint16_t a, uint16_t b, float_status *s)
|
|
|
|
{
|
|
|
|
return float32_sub(float16_to_float32(a, true, s),
|
2023-04-05 10:58:11 +02:00
|
|
|
float16_to_float32(b, true, s), s);
|
2020-07-01 17:25:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t vfwsub32(uint32_t a, uint32_t b, float_status *s)
|
|
|
|
{
|
|
|
|
return float64_sub(float32_to_float64(a, s),
|
2023-04-05 10:58:11 +02:00
|
|
|
float32_to_float64(b, s), s);
|
2020-07-01 17:25:19 +02:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVV2, vfwsub_vv_h, WOP_UUU_H, H4, H2, H2, vfwsub16)
|
|
|
|
RVVCALL(OPFVV2, vfwsub_vv_w, WOP_UUU_W, H8, H4, H4, vfwsub32)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfwsub_vv_h, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfwsub_vv_w, 8)
|
2020-07-01 17:25:19 +02:00
|
|
|
RVVCALL(OPFVF2, vfwsub_vf_h, WOP_UUU_H, H4, H2, vfwsub16)
|
|
|
|
RVVCALL(OPFVF2, vfwsub_vf_w, WOP_UUU_W, H8, H4, vfwsub32)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfwsub_vf_h, 4)
|
|
|
|
GEN_VEXT_VF(vfwsub_vf_w, 8)
|
2020-07-01 17:25:19 +02:00
|
|
|
|
|
|
|
static uint32_t vfwaddw16(uint32_t a, uint16_t b, float_status *s)
|
|
|
|
{
|
|
|
|
return float32_add(a, float16_to_float32(b, true, s), s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t vfwaddw32(uint64_t a, uint32_t b, float_status *s)
|
|
|
|
{
|
|
|
|
return float64_add(a, float32_to_float64(b, s), s);
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVV2, vfwadd_wv_h, WOP_WUUU_H, H4, H2, H2, vfwaddw16)
|
|
|
|
RVVCALL(OPFVV2, vfwadd_wv_w, WOP_WUUU_W, H8, H4, H4, vfwaddw32)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfwadd_wv_h, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfwadd_wv_w, 8)
|
2020-07-01 17:25:19 +02:00
|
|
|
RVVCALL(OPFVF2, vfwadd_wf_h, WOP_WUUU_H, H4, H2, vfwaddw16)
|
|
|
|
RVVCALL(OPFVF2, vfwadd_wf_w, WOP_WUUU_W, H8, H4, vfwaddw32)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfwadd_wf_h, 4)
|
|
|
|
GEN_VEXT_VF(vfwadd_wf_w, 8)
|
2020-07-01 17:25:19 +02:00
|
|
|
|
|
|
|
static uint32_t vfwsubw16(uint32_t a, uint16_t b, float_status *s)
|
|
|
|
{
|
|
|
|
return float32_sub(a, float16_to_float32(b, true, s), s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t vfwsubw32(uint64_t a, uint32_t b, float_status *s)
|
|
|
|
{
|
|
|
|
return float64_sub(a, float32_to_float64(b, s), s);
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVV2, vfwsub_wv_h, WOP_WUUU_H, H4, H2, H2, vfwsubw16)
|
|
|
|
RVVCALL(OPFVV2, vfwsub_wv_w, WOP_WUUU_W, H8, H4, H4, vfwsubw32)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfwsub_wv_h, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfwsub_wv_w, 8)
|
2020-07-01 17:25:19 +02:00
|
|
|
RVVCALL(OPFVF2, vfwsub_wf_h, WOP_WUUU_H, H4, H2, vfwsubw16)
|
|
|
|
RVVCALL(OPFVF2, vfwsub_wf_w, WOP_WUUU_W, H8, H4, vfwsubw32)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfwsub_wf_h, 4)
|
|
|
|
GEN_VEXT_VF(vfwsub_wf_w, 8)
|
2020-07-01 17:25:20 +02:00
|
|
|
|
|
|
|
/* Vector Single-Width Floating-Point Multiply/Divide Instructions */
|
|
|
|
RVVCALL(OPFVV2, vfmul_vv_h, OP_UUU_H, H2, H2, H2, float16_mul)
|
|
|
|
RVVCALL(OPFVV2, vfmul_vv_w, OP_UUU_W, H4, H4, H4, float32_mul)
|
|
|
|
RVVCALL(OPFVV2, vfmul_vv_d, OP_UUU_D, H8, H8, H8, float64_mul)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfmul_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_ENV(vfmul_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfmul_vv_d, 8)
|
2020-07-01 17:25:20 +02:00
|
|
|
RVVCALL(OPFVF2, vfmul_vf_h, OP_UUU_H, H2, H2, float16_mul)
|
|
|
|
RVVCALL(OPFVF2, vfmul_vf_w, OP_UUU_W, H4, H4, float32_mul)
|
|
|
|
RVVCALL(OPFVF2, vfmul_vf_d, OP_UUU_D, H8, H8, float64_mul)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfmul_vf_h, 2)
|
|
|
|
GEN_VEXT_VF(vfmul_vf_w, 4)
|
|
|
|
GEN_VEXT_VF(vfmul_vf_d, 8)
|
2020-07-01 17:25:20 +02:00
|
|
|
|
|
|
|
RVVCALL(OPFVV2, vfdiv_vv_h, OP_UUU_H, H2, H2, H2, float16_div)
|
|
|
|
RVVCALL(OPFVV2, vfdiv_vv_w, OP_UUU_W, H4, H4, H4, float32_div)
|
|
|
|
RVVCALL(OPFVV2, vfdiv_vv_d, OP_UUU_D, H8, H8, H8, float64_div)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfdiv_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_ENV(vfdiv_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfdiv_vv_d, 8)
|
2020-07-01 17:25:20 +02:00
|
|
|
RVVCALL(OPFVF2, vfdiv_vf_h, OP_UUU_H, H2, H2, float16_div)
|
|
|
|
RVVCALL(OPFVF2, vfdiv_vf_w, OP_UUU_W, H4, H4, float32_div)
|
|
|
|
RVVCALL(OPFVF2, vfdiv_vf_d, OP_UUU_D, H8, H8, float64_div)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfdiv_vf_h, 2)
|
|
|
|
GEN_VEXT_VF(vfdiv_vf_w, 4)
|
|
|
|
GEN_VEXT_VF(vfdiv_vf_d, 8)
|
2020-07-01 17:25:20 +02:00
|
|
|
|
|
|
|
static uint16_t float16_rdiv(uint16_t a, uint16_t b, float_status *s)
|
|
|
|
{
|
|
|
|
return float16_div(b, a, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t float32_rdiv(uint32_t a, uint32_t b, float_status *s)
|
|
|
|
{
|
|
|
|
return float32_div(b, a, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t float64_rdiv(uint64_t a, uint64_t b, float_status *s)
|
|
|
|
{
|
|
|
|
return float64_div(b, a, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVF2, vfrdiv_vf_h, OP_UUU_H, H2, H2, float16_rdiv)
|
|
|
|
RVVCALL(OPFVF2, vfrdiv_vf_w, OP_UUU_W, H4, H4, float32_rdiv)
|
|
|
|
RVVCALL(OPFVF2, vfrdiv_vf_d, OP_UUU_D, H8, H8, float64_rdiv)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfrdiv_vf_h, 2)
|
|
|
|
GEN_VEXT_VF(vfrdiv_vf_w, 4)
|
|
|
|
GEN_VEXT_VF(vfrdiv_vf_d, 8)
|
2020-07-01 17:25:21 +02:00
|
|
|
|
|
|
|
/* Vector Widening Floating-Point Multiply */
|
|
|
|
static uint32_t vfwmul16(uint16_t a, uint16_t b, float_status *s)
|
|
|
|
{
|
|
|
|
return float32_mul(float16_to_float32(a, true, s),
|
2023-04-05 10:58:11 +02:00
|
|
|
float16_to_float32(b, true, s), s);
|
2020-07-01 17:25:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t vfwmul32(uint32_t a, uint32_t b, float_status *s)
|
|
|
|
{
|
|
|
|
return float64_mul(float32_to_float64(a, s),
|
2023-04-05 10:58:11 +02:00
|
|
|
float32_to_float64(b, s), s);
|
2020-07-01 17:25:21 +02:00
|
|
|
|
|
|
|
}
|
|
|
|
RVVCALL(OPFVV2, vfwmul_vv_h, WOP_UUU_H, H4, H2, H2, vfwmul16)
|
|
|
|
RVVCALL(OPFVV2, vfwmul_vv_w, WOP_UUU_W, H8, H4, H4, vfwmul32)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfwmul_vv_h, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfwmul_vv_w, 8)
|
2020-07-01 17:25:21 +02:00
|
|
|
RVVCALL(OPFVF2, vfwmul_vf_h, WOP_UUU_H, H4, H2, vfwmul16)
|
|
|
|
RVVCALL(OPFVF2, vfwmul_vf_w, WOP_UUU_W, H8, H4, vfwmul32)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfwmul_vf_h, 4)
|
|
|
|
GEN_VEXT_VF(vfwmul_vf_w, 8)
|
2020-07-01 17:25:22 +02:00
|
|
|
|
|
|
|
/* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */
|
|
|
|
#define OPFVV3(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
|
|
|
|
static void do_##NAME(void *vd, void *vs1, void *vs2, int i, \
|
2023-04-05 10:58:11 +02:00
|
|
|
CPURISCVState *env) \
|
2020-07-01 17:25:22 +02:00
|
|
|
{ \
|
|
|
|
TX1 s1 = *((T1 *)vs1 + HS1(i)); \
|
|
|
|
TX2 s2 = *((T2 *)vs2 + HS2(i)); \
|
|
|
|
TD d = *((TD *)vd + HD(i)); \
|
|
|
|
*((TD *)vd + HD(i)) = OP(s2, s1, d, &env->fp_status); \
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint16_t fmacc16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float16_muladd(a, b, d, 0, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t fmacc32(uint32_t a, uint32_t b, uint32_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float32_muladd(a, b, d, 0, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t fmacc64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float64_muladd(a, b, d, 0, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVV3, vfmacc_vv_h, OP_UUU_H, H2, H2, H2, fmacc16)
|
|
|
|
RVVCALL(OPFVV3, vfmacc_vv_w, OP_UUU_W, H4, H4, H4, fmacc32)
|
|
|
|
RVVCALL(OPFVV3, vfmacc_vv_d, OP_UUU_D, H8, H8, H8, fmacc64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfmacc_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_ENV(vfmacc_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfmacc_vv_d, 8)
|
2020-07-01 17:25:22 +02:00
|
|
|
|
|
|
|
#define OPFVF3(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
|
|
|
|
static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i, \
|
2023-04-05 10:58:11 +02:00
|
|
|
CPURISCVState *env) \
|
2020-07-01 17:25:22 +02:00
|
|
|
{ \
|
|
|
|
TX2 s2 = *((T2 *)vs2 + HS2(i)); \
|
|
|
|
TD d = *((TD *)vd + HD(i)); \
|
|
|
|
*((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1, d, &env->fp_status);\
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVF3, vfmacc_vf_h, OP_UUU_H, H2, H2, fmacc16)
|
|
|
|
RVVCALL(OPFVF3, vfmacc_vf_w, OP_UUU_W, H4, H4, fmacc32)
|
|
|
|
RVVCALL(OPFVF3, vfmacc_vf_d, OP_UUU_D, H8, H8, fmacc64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfmacc_vf_h, 2)
|
|
|
|
GEN_VEXT_VF(vfmacc_vf_w, 4)
|
|
|
|
GEN_VEXT_VF(vfmacc_vf_d, 8)
|
2020-07-01 17:25:22 +02:00
|
|
|
|
|
|
|
static uint16_t fnmacc16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
|
|
|
|
{
|
2023-04-05 10:58:11 +02:00
|
|
|
return float16_muladd(a, b, d, float_muladd_negate_c |
|
|
|
|
float_muladd_negate_product, s);
|
2020-07-01 17:25:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t fnmacc32(uint32_t a, uint32_t b, uint32_t d, float_status *s)
|
|
|
|
{
|
2023-04-05 10:58:11 +02:00
|
|
|
return float32_muladd(a, b, d, float_muladd_negate_c |
|
|
|
|
float_muladd_negate_product, s);
|
2020-07-01 17:25:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t fnmacc64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
|
|
|
|
{
|
2023-04-05 10:58:11 +02:00
|
|
|
return float64_muladd(a, b, d, float_muladd_negate_c |
|
|
|
|
float_muladd_negate_product, s);
|
2020-07-01 17:25:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVV3, vfnmacc_vv_h, OP_UUU_H, H2, H2, H2, fnmacc16)
|
|
|
|
RVVCALL(OPFVV3, vfnmacc_vv_w, OP_UUU_W, H4, H4, H4, fnmacc32)
|
|
|
|
RVVCALL(OPFVV3, vfnmacc_vv_d, OP_UUU_D, H8, H8, H8, fnmacc64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfnmacc_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_ENV(vfnmacc_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfnmacc_vv_d, 8)
|
2020-07-01 17:25:22 +02:00
|
|
|
RVVCALL(OPFVF3, vfnmacc_vf_h, OP_UUU_H, H2, H2, fnmacc16)
|
|
|
|
RVVCALL(OPFVF3, vfnmacc_vf_w, OP_UUU_W, H4, H4, fnmacc32)
|
|
|
|
RVVCALL(OPFVF3, vfnmacc_vf_d, OP_UUU_D, H8, H8, fnmacc64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfnmacc_vf_h, 2)
|
|
|
|
GEN_VEXT_VF(vfnmacc_vf_w, 4)
|
|
|
|
GEN_VEXT_VF(vfnmacc_vf_d, 8)
|
2020-07-01 17:25:22 +02:00
|
|
|
|
|
|
|
static uint16_t fmsac16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float16_muladd(a, b, d, float_muladd_negate_c, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t fmsac32(uint32_t a, uint32_t b, uint32_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float32_muladd(a, b, d, float_muladd_negate_c, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t fmsac64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float64_muladd(a, b, d, float_muladd_negate_c, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVV3, vfmsac_vv_h, OP_UUU_H, H2, H2, H2, fmsac16)
|
|
|
|
RVVCALL(OPFVV3, vfmsac_vv_w, OP_UUU_W, H4, H4, H4, fmsac32)
|
|
|
|
RVVCALL(OPFVV3, vfmsac_vv_d, OP_UUU_D, H8, H8, H8, fmsac64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfmsac_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_ENV(vfmsac_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfmsac_vv_d, 8)
|
2020-07-01 17:25:22 +02:00
|
|
|
RVVCALL(OPFVF3, vfmsac_vf_h, OP_UUU_H, H2, H2, fmsac16)
|
|
|
|
RVVCALL(OPFVF3, vfmsac_vf_w, OP_UUU_W, H4, H4, fmsac32)
|
|
|
|
RVVCALL(OPFVF3, vfmsac_vf_d, OP_UUU_D, H8, H8, fmsac64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfmsac_vf_h, 2)
|
|
|
|
GEN_VEXT_VF(vfmsac_vf_w, 4)
|
|
|
|
GEN_VEXT_VF(vfmsac_vf_d, 8)
|
2020-07-01 17:25:22 +02:00
|
|
|
|
|
|
|
static uint16_t fnmsac16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float16_muladd(a, b, d, float_muladd_negate_product, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t fnmsac32(uint32_t a, uint32_t b, uint32_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float32_muladd(a, b, d, float_muladd_negate_product, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t fnmsac64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float64_muladd(a, b, d, float_muladd_negate_product, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVV3, vfnmsac_vv_h, OP_UUU_H, H2, H2, H2, fnmsac16)
|
|
|
|
RVVCALL(OPFVV3, vfnmsac_vv_w, OP_UUU_W, H4, H4, H4, fnmsac32)
|
|
|
|
RVVCALL(OPFVV3, vfnmsac_vv_d, OP_UUU_D, H8, H8, H8, fnmsac64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfnmsac_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_ENV(vfnmsac_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfnmsac_vv_d, 8)
|
2020-07-01 17:25:22 +02:00
|
|
|
RVVCALL(OPFVF3, vfnmsac_vf_h, OP_UUU_H, H2, H2, fnmsac16)
|
|
|
|
RVVCALL(OPFVF3, vfnmsac_vf_w, OP_UUU_W, H4, H4, fnmsac32)
|
|
|
|
RVVCALL(OPFVF3, vfnmsac_vf_d, OP_UUU_D, H8, H8, fnmsac64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfnmsac_vf_h, 2)
|
|
|
|
GEN_VEXT_VF(vfnmsac_vf_w, 4)
|
|
|
|
GEN_VEXT_VF(vfnmsac_vf_d, 8)
|
2020-07-01 17:25:22 +02:00
|
|
|
|
|
|
|
static uint16_t fmadd16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float16_muladd(d, b, a, 0, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t fmadd32(uint32_t a, uint32_t b, uint32_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float32_muladd(d, b, a, 0, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t fmadd64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float64_muladd(d, b, a, 0, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVV3, vfmadd_vv_h, OP_UUU_H, H2, H2, H2, fmadd16)
|
|
|
|
RVVCALL(OPFVV3, vfmadd_vv_w, OP_UUU_W, H4, H4, H4, fmadd32)
|
|
|
|
RVVCALL(OPFVV3, vfmadd_vv_d, OP_UUU_D, H8, H8, H8, fmadd64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfmadd_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_ENV(vfmadd_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfmadd_vv_d, 8)
|
2020-07-01 17:25:22 +02:00
|
|
|
RVVCALL(OPFVF3, vfmadd_vf_h, OP_UUU_H, H2, H2, fmadd16)
|
|
|
|
RVVCALL(OPFVF3, vfmadd_vf_w, OP_UUU_W, H4, H4, fmadd32)
|
|
|
|
RVVCALL(OPFVF3, vfmadd_vf_d, OP_UUU_D, H8, H8, fmadd64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfmadd_vf_h, 2)
|
|
|
|
GEN_VEXT_VF(vfmadd_vf_w, 4)
|
|
|
|
GEN_VEXT_VF(vfmadd_vf_d, 8)
|
2020-07-01 17:25:22 +02:00
|
|
|
|
|
|
|
static uint16_t fnmadd16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
|
|
|
|
{
|
2023-04-05 10:58:11 +02:00
|
|
|
return float16_muladd(d, b, a, float_muladd_negate_c |
|
|
|
|
float_muladd_negate_product, s);
|
2020-07-01 17:25:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t fnmadd32(uint32_t a, uint32_t b, uint32_t d, float_status *s)
|
|
|
|
{
|
2023-04-05 10:58:11 +02:00
|
|
|
return float32_muladd(d, b, a, float_muladd_negate_c |
|
|
|
|
float_muladd_negate_product, s);
|
2020-07-01 17:25:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t fnmadd64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
|
|
|
|
{
|
2023-04-05 10:58:11 +02:00
|
|
|
return float64_muladd(d, b, a, float_muladd_negate_c |
|
|
|
|
float_muladd_negate_product, s);
|
2020-07-01 17:25:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVV3, vfnmadd_vv_h, OP_UUU_H, H2, H2, H2, fnmadd16)
|
|
|
|
RVVCALL(OPFVV3, vfnmadd_vv_w, OP_UUU_W, H4, H4, H4, fnmadd32)
|
|
|
|
RVVCALL(OPFVV3, vfnmadd_vv_d, OP_UUU_D, H8, H8, H8, fnmadd64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfnmadd_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_ENV(vfnmadd_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfnmadd_vv_d, 8)
|
2020-07-01 17:25:22 +02:00
|
|
|
RVVCALL(OPFVF3, vfnmadd_vf_h, OP_UUU_H, H2, H2, fnmadd16)
|
|
|
|
RVVCALL(OPFVF3, vfnmadd_vf_w, OP_UUU_W, H4, H4, fnmadd32)
|
|
|
|
RVVCALL(OPFVF3, vfnmadd_vf_d, OP_UUU_D, H8, H8, fnmadd64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfnmadd_vf_h, 2)
|
|
|
|
GEN_VEXT_VF(vfnmadd_vf_w, 4)
|
|
|
|
GEN_VEXT_VF(vfnmadd_vf_d, 8)
|
2020-07-01 17:25:22 +02:00
|
|
|
|
|
|
|
static uint16_t fmsub16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float16_muladd(d, b, a, float_muladd_negate_c, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t fmsub32(uint32_t a, uint32_t b, uint32_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float32_muladd(d, b, a, float_muladd_negate_c, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t fmsub64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float64_muladd(d, b, a, float_muladd_negate_c, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVV3, vfmsub_vv_h, OP_UUU_H, H2, H2, H2, fmsub16)
|
|
|
|
RVVCALL(OPFVV3, vfmsub_vv_w, OP_UUU_W, H4, H4, H4, fmsub32)
|
|
|
|
RVVCALL(OPFVV3, vfmsub_vv_d, OP_UUU_D, H8, H8, H8, fmsub64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfmsub_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_ENV(vfmsub_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfmsub_vv_d, 8)
|
2020-07-01 17:25:22 +02:00
|
|
|
RVVCALL(OPFVF3, vfmsub_vf_h, OP_UUU_H, H2, H2, fmsub16)
|
|
|
|
RVVCALL(OPFVF3, vfmsub_vf_w, OP_UUU_W, H4, H4, fmsub32)
|
|
|
|
RVVCALL(OPFVF3, vfmsub_vf_d, OP_UUU_D, H8, H8, fmsub64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfmsub_vf_h, 2)
|
|
|
|
GEN_VEXT_VF(vfmsub_vf_w, 4)
|
|
|
|
GEN_VEXT_VF(vfmsub_vf_d, 8)
|
2020-07-01 17:25:22 +02:00
|
|
|
|
|
|
|
static uint16_t fnmsub16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float16_muladd(d, b, a, float_muladd_negate_product, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t fnmsub32(uint32_t a, uint32_t b, uint32_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float32_muladd(d, b, a, float_muladd_negate_product, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t fnmsub64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float64_muladd(d, b, a, float_muladd_negate_product, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVV3, vfnmsub_vv_h, OP_UUU_H, H2, H2, H2, fnmsub16)
|
|
|
|
RVVCALL(OPFVV3, vfnmsub_vv_w, OP_UUU_W, H4, H4, H4, fnmsub32)
|
|
|
|
RVVCALL(OPFVV3, vfnmsub_vv_d, OP_UUU_D, H8, H8, H8, fnmsub64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfnmsub_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_ENV(vfnmsub_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfnmsub_vv_d, 8)
|
2020-07-01 17:25:22 +02:00
|
|
|
RVVCALL(OPFVF3, vfnmsub_vf_h, OP_UUU_H, H2, H2, fnmsub16)
|
|
|
|
RVVCALL(OPFVF3, vfnmsub_vf_w, OP_UUU_W, H4, H4, fnmsub32)
|
|
|
|
RVVCALL(OPFVF3, vfnmsub_vf_d, OP_UUU_D, H8, H8, fnmsub64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfnmsub_vf_h, 2)
|
|
|
|
GEN_VEXT_VF(vfnmsub_vf_w, 4)
|
|
|
|
GEN_VEXT_VF(vfnmsub_vf_d, 8)
|
2020-07-01 17:25:23 +02:00
|
|
|
|
|
|
|
/* Vector Widening Floating-Point Fused Multiply-Add Instructions */
|
|
|
|
static uint32_t fwmacc16(uint16_t a, uint16_t b, uint32_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float32_muladd(float16_to_float32(a, true, s),
|
2023-04-05 10:58:11 +02:00
|
|
|
float16_to_float32(b, true, s), d, 0, s);
|
2020-07-01 17:25:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t fwmacc32(uint32_t a, uint32_t b, uint64_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float64_muladd(float32_to_float64(a, s),
|
2023-04-05 10:58:11 +02:00
|
|
|
float32_to_float64(b, s), d, 0, s);
|
2020-07-01 17:25:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVV3, vfwmacc_vv_h, WOP_UUU_H, H4, H2, H2, fwmacc16)
|
|
|
|
RVVCALL(OPFVV3, vfwmacc_vv_w, WOP_UUU_W, H8, H4, H4, fwmacc32)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfwmacc_vv_h, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfwmacc_vv_w, 8)
|
2020-07-01 17:25:23 +02:00
|
|
|
RVVCALL(OPFVF3, vfwmacc_vf_h, WOP_UUU_H, H4, H2, fwmacc16)
|
|
|
|
RVVCALL(OPFVF3, vfwmacc_vf_w, WOP_UUU_W, H8, H4, fwmacc32)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfwmacc_vf_h, 4)
|
|
|
|
GEN_VEXT_VF(vfwmacc_vf_w, 8)
|
2020-07-01 17:25:23 +02:00
|
|
|
|
2023-06-15 08:33:00 +02:00
|
|
|
static uint32_t fwmaccbf16(uint16_t a, uint16_t b, uint32_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float32_muladd(bfloat16_to_float32(a, s),
|
|
|
|
bfloat16_to_float32(b, s), d, 0, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVV3, vfwmaccbf16_vv, WOP_UUU_H, H4, H2, H2, fwmaccbf16)
|
|
|
|
GEN_VEXT_VV_ENV(vfwmaccbf16_vv, 4)
|
2023-10-05 11:57:32 +02:00
|
|
|
RVVCALL(OPFVF3, vfwmaccbf16_vf, WOP_UUU_H, H4, H2, fwmaccbf16)
|
2023-06-15 08:33:00 +02:00
|
|
|
GEN_VEXT_VF(vfwmaccbf16_vf, 4)
|
|
|
|
|
2020-07-01 17:25:23 +02:00
|
|
|
static uint32_t fwnmacc16(uint16_t a, uint16_t b, uint32_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float32_muladd(float16_to_float32(a, true, s),
|
2023-04-05 10:58:11 +02:00
|
|
|
float16_to_float32(b, true, s), d,
|
|
|
|
float_muladd_negate_c | float_muladd_negate_product,
|
|
|
|
s);
|
2020-07-01 17:25:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t fwnmacc32(uint32_t a, uint32_t b, uint64_t d, float_status *s)
|
|
|
|
{
|
2023-04-05 10:58:11 +02:00
|
|
|
return float64_muladd(float32_to_float64(a, s), float32_to_float64(b, s),
|
|
|
|
d, float_muladd_negate_c |
|
|
|
|
float_muladd_negate_product, s);
|
2020-07-01 17:25:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVV3, vfwnmacc_vv_h, WOP_UUU_H, H4, H2, H2, fwnmacc16)
|
|
|
|
RVVCALL(OPFVV3, vfwnmacc_vv_w, WOP_UUU_W, H8, H4, H4, fwnmacc32)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfwnmacc_vv_h, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfwnmacc_vv_w, 8)
|
2020-07-01 17:25:23 +02:00
|
|
|
RVVCALL(OPFVF3, vfwnmacc_vf_h, WOP_UUU_H, H4, H2, fwnmacc16)
|
|
|
|
RVVCALL(OPFVF3, vfwnmacc_vf_w, WOP_UUU_W, H8, H4, fwnmacc32)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfwnmacc_vf_h, 4)
|
|
|
|
GEN_VEXT_VF(vfwnmacc_vf_w, 8)
|
2020-07-01 17:25:23 +02:00
|
|
|
|
|
|
|
static uint32_t fwmsac16(uint16_t a, uint16_t b, uint32_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float32_muladd(float16_to_float32(a, true, s),
|
2023-04-05 10:58:11 +02:00
|
|
|
float16_to_float32(b, true, s), d,
|
|
|
|
float_muladd_negate_c, s);
|
2020-07-01 17:25:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t fwmsac32(uint32_t a, uint32_t b, uint64_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float64_muladd(float32_to_float64(a, s),
|
2023-04-05 10:58:11 +02:00
|
|
|
float32_to_float64(b, s), d,
|
|
|
|
float_muladd_negate_c, s);
|
2020-07-01 17:25:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVV3, vfwmsac_vv_h, WOP_UUU_H, H4, H2, H2, fwmsac16)
|
|
|
|
RVVCALL(OPFVV3, vfwmsac_vv_w, WOP_UUU_W, H8, H4, H4, fwmsac32)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfwmsac_vv_h, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfwmsac_vv_w, 8)
|
2020-07-01 17:25:23 +02:00
|
|
|
RVVCALL(OPFVF3, vfwmsac_vf_h, WOP_UUU_H, H4, H2, fwmsac16)
|
|
|
|
RVVCALL(OPFVF3, vfwmsac_vf_w, WOP_UUU_W, H8, H4, fwmsac32)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfwmsac_vf_h, 4)
|
|
|
|
GEN_VEXT_VF(vfwmsac_vf_w, 8)
|
2020-07-01 17:25:23 +02:00
|
|
|
|
|
|
|
static uint32_t fwnmsac16(uint16_t a, uint16_t b, uint32_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float32_muladd(float16_to_float32(a, true, s),
|
2023-04-05 10:58:11 +02:00
|
|
|
float16_to_float32(b, true, s), d,
|
|
|
|
float_muladd_negate_product, s);
|
2020-07-01 17:25:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t fwnmsac32(uint32_t a, uint32_t b, uint64_t d, float_status *s)
|
|
|
|
{
|
|
|
|
return float64_muladd(float32_to_float64(a, s),
|
2023-04-05 10:58:11 +02:00
|
|
|
float32_to_float64(b, s), d,
|
|
|
|
float_muladd_negate_product, s);
|
2020-07-01 17:25:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVV3, vfwnmsac_vv_h, WOP_UUU_H, H4, H2, H2, fwnmsac16)
|
|
|
|
RVVCALL(OPFVV3, vfwnmsac_vv_w, WOP_UUU_W, H8, H4, H4, fwnmsac32)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfwnmsac_vv_h, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfwnmsac_vv_w, 8)
|
2020-07-01 17:25:23 +02:00
|
|
|
RVVCALL(OPFVF3, vfwnmsac_vf_h, WOP_UUU_H, H4, H2, fwnmsac16)
|
|
|
|
RVVCALL(OPFVF3, vfwnmsac_vf_w, WOP_UUU_W, H8, H4, fwnmsac32)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfwnmsac_vf_h, 4)
|
|
|
|
GEN_VEXT_VF(vfwnmsac_vf_w, 8)
|
2020-07-01 17:25:24 +02:00
|
|
|
|
|
|
|
/* Vector Floating-Point Square-Root Instruction */
|
2023-04-05 10:58:11 +02:00
|
|
|
#define OPFVV1(NAME, TD, T2, TX2, HD, HS2, OP) \
|
2020-07-01 17:25:24 +02:00
|
|
|
static void do_##NAME(void *vd, void *vs2, int i, \
|
2023-04-05 10:58:11 +02:00
|
|
|
CPURISCVState *env) \
|
2020-07-01 17:25:24 +02:00
|
|
|
{ \
|
|
|
|
TX2 s2 = *((T2 *)vs2 + HS2(i)); \
|
|
|
|
*((TD *)vd + HD(i)) = OP(s2, &env->fp_status); \
|
|
|
|
}
|
|
|
|
|
2022-06-06 08:16:56 +02:00
|
|
|
#define GEN_VEXT_V_ENV(NAME, ESZ) \
|
2020-07-01 17:25:24 +02:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, void *vs2, \
|
2023-04-05 10:58:11 +02:00
|
|
|
CPURISCVState *env, uint32_t desc) \
|
2020-07-01 17:25:24 +02:00
|
|
|
{ \
|
|
|
|
uint32_t vm = vext_vm(desc); \
|
|
|
|
uint32_t vl = env->vl; \
|
2022-06-06 08:16:56 +02:00
|
|
|
uint32_t total_elems = \
|
|
|
|
vext_get_total_elems(env, desc, ESZ); \
|
|
|
|
uint32_t vta = vext_vta(desc); \
|
2022-06-20 08:51:12 +02:00
|
|
|
uint32_t vma = vext_vma(desc); \
|
2020-07-01 17:25:24 +02:00
|
|
|
uint32_t i; \
|
|
|
|
\
|
|
|
|
if (vl == 0) { \
|
|
|
|
return; \
|
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2021-12-10 08:55:58 +01:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) { \
|
2022-06-20 08:51:12 +02:00
|
|
|
/* set masked-off elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vma, i * ESZ, \
|
|
|
|
(i + 1) * ESZ); \
|
2020-07-01 17:25:24 +02:00
|
|
|
continue; \
|
|
|
|
} \
|
|
|
|
do_##NAME(vd, vs2, i, env); \
|
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2022-06-06 08:16:56 +02:00
|
|
|
vext_set_elems_1s(vd, vta, vl * ESZ, \
|
|
|
|
total_elems * ESZ); \
|
2020-07-01 17:25:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVV1, vfsqrt_v_h, OP_UU_H, H2, H2, float16_sqrt)
|
|
|
|
RVVCALL(OPFVV1, vfsqrt_v_w, OP_UU_W, H4, H4, float32_sqrt)
|
|
|
|
RVVCALL(OPFVV1, vfsqrt_v_d, OP_UU_D, H8, H8, float64_sqrt)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_V_ENV(vfsqrt_v_h, 2)
|
|
|
|
GEN_VEXT_V_ENV(vfsqrt_v_w, 4)
|
|
|
|
GEN_VEXT_V_ENV(vfsqrt_v_d, 8)
|
2020-07-01 17:25:25 +02:00
|
|
|
|
2021-12-10 08:56:55 +01:00
|
|
|
/*
|
|
|
|
* Vector Floating-Point Reciprocal Square-Root Estimate Instruction
|
|
|
|
*
|
|
|
|
* Adapted from riscv-v-spec recip.c:
|
|
|
|
* https://github.com/riscv/riscv-v-spec/blob/master/recip.c
|
|
|
|
*/
|
|
|
|
static uint64_t frsqrt7(uint64_t f, int exp_size, int frac_size)
|
|
|
|
{
|
|
|
|
uint64_t sign = extract64(f, frac_size + exp_size, 1);
|
|
|
|
uint64_t exp = extract64(f, frac_size, exp_size);
|
|
|
|
uint64_t frac = extract64(f, 0, frac_size);
|
|
|
|
|
|
|
|
const uint8_t lookup_table[] = {
|
|
|
|
52, 51, 50, 48, 47, 46, 44, 43,
|
|
|
|
42, 41, 40, 39, 38, 36, 35, 34,
|
|
|
|
33, 32, 31, 30, 30, 29, 28, 27,
|
|
|
|
26, 25, 24, 23, 23, 22, 21, 20,
|
|
|
|
19, 19, 18, 17, 16, 16, 15, 14,
|
|
|
|
14, 13, 12, 12, 11, 10, 10, 9,
|
|
|
|
9, 8, 7, 7, 6, 6, 5, 4,
|
|
|
|
4, 3, 3, 2, 2, 1, 1, 0,
|
|
|
|
127, 125, 123, 121, 119, 118, 116, 114,
|
|
|
|
113, 111, 109, 108, 106, 105, 103, 102,
|
|
|
|
100, 99, 97, 96, 95, 93, 92, 91,
|
|
|
|
90, 88, 87, 86, 85, 84, 83, 82,
|
|
|
|
80, 79, 78, 77, 76, 75, 74, 73,
|
|
|
|
72, 71, 70, 70, 69, 68, 67, 66,
|
|
|
|
65, 64, 63, 63, 62, 61, 60, 59,
|
|
|
|
59, 58, 57, 56, 56, 55, 54, 53
|
|
|
|
};
|
|
|
|
const int precision = 7;
|
|
|
|
|
|
|
|
if (exp == 0 && frac != 0) { /* subnormal */
|
|
|
|
/* Normalize the subnormal. */
|
|
|
|
while (extract64(frac, frac_size - 1, 1) == 0) {
|
|
|
|
exp--;
|
|
|
|
frac <<= 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
frac = (frac << 1) & MAKE_64BIT_MASK(0, frac_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
int idx = ((exp & 1) << (precision - 1)) |
|
2023-04-05 10:58:11 +02:00
|
|
|
(frac >> (frac_size - precision + 1));
|
2021-12-10 08:56:55 +01:00
|
|
|
uint64_t out_frac = (uint64_t)(lookup_table[idx]) <<
|
2023-04-05 10:58:11 +02:00
|
|
|
(frac_size - precision);
|
2021-12-10 08:56:55 +01:00
|
|
|
uint64_t out_exp = (3 * MAKE_64BIT_MASK(0, exp_size - 1) + ~exp) / 2;
|
|
|
|
|
|
|
|
uint64_t val = 0;
|
|
|
|
val = deposit64(val, 0, frac_size, out_frac);
|
|
|
|
val = deposit64(val, frac_size, exp_size, out_exp);
|
|
|
|
val = deposit64(val, frac_size + exp_size, 1, sign);
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static float16 frsqrt7_h(float16 f, float_status *s)
|
|
|
|
{
|
|
|
|
int exp_size = 5, frac_size = 10;
|
|
|
|
bool sign = float16_is_neg(f);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* frsqrt7(sNaN) = canonical NaN
|
|
|
|
* frsqrt7(-inf) = canonical NaN
|
|
|
|
* frsqrt7(-normal) = canonical NaN
|
|
|
|
* frsqrt7(-subnormal) = canonical NaN
|
|
|
|
*/
|
|
|
|
if (float16_is_signaling_nan(f, s) ||
|
2023-04-05 10:58:11 +02:00
|
|
|
(float16_is_infinity(f) && sign) ||
|
|
|
|
(float16_is_normal(f) && sign) ||
|
|
|
|
(float16_is_zero_or_denormal(f) && !float16_is_zero(f) && sign)) {
|
2021-12-10 08:56:55 +01:00
|
|
|
s->float_exception_flags |= float_flag_invalid;
|
|
|
|
return float16_default_nan(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* frsqrt7(qNaN) = canonical NaN */
|
|
|
|
if (float16_is_quiet_nan(f, s)) {
|
|
|
|
return float16_default_nan(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* frsqrt7(+-0) = +-inf */
|
|
|
|
if (float16_is_zero(f)) {
|
|
|
|
s->float_exception_flags |= float_flag_divbyzero;
|
|
|
|
return float16_set_sign(float16_infinity, sign);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* frsqrt7(+inf) = +0 */
|
|
|
|
if (float16_is_infinity(f) && !sign) {
|
|
|
|
return float16_set_sign(float16_zero, sign);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* +normal, +subnormal */
|
|
|
|
uint64_t val = frsqrt7(f, exp_size, frac_size);
|
|
|
|
return make_float16(val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static float32 frsqrt7_s(float32 f, float_status *s)
|
|
|
|
{
|
|
|
|
int exp_size = 8, frac_size = 23;
|
|
|
|
bool sign = float32_is_neg(f);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* frsqrt7(sNaN) = canonical NaN
|
|
|
|
* frsqrt7(-inf) = canonical NaN
|
|
|
|
* frsqrt7(-normal) = canonical NaN
|
|
|
|
* frsqrt7(-subnormal) = canonical NaN
|
|
|
|
*/
|
|
|
|
if (float32_is_signaling_nan(f, s) ||
|
2023-04-05 10:58:11 +02:00
|
|
|
(float32_is_infinity(f) && sign) ||
|
|
|
|
(float32_is_normal(f) && sign) ||
|
|
|
|
(float32_is_zero_or_denormal(f) && !float32_is_zero(f) && sign)) {
|
2021-12-10 08:56:55 +01:00
|
|
|
s->float_exception_flags |= float_flag_invalid;
|
|
|
|
return float32_default_nan(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* frsqrt7(qNaN) = canonical NaN */
|
|
|
|
if (float32_is_quiet_nan(f, s)) {
|
|
|
|
return float32_default_nan(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* frsqrt7(+-0) = +-inf */
|
|
|
|
if (float32_is_zero(f)) {
|
|
|
|
s->float_exception_flags |= float_flag_divbyzero;
|
|
|
|
return float32_set_sign(float32_infinity, sign);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* frsqrt7(+inf) = +0 */
|
|
|
|
if (float32_is_infinity(f) && !sign) {
|
|
|
|
return float32_set_sign(float32_zero, sign);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* +normal, +subnormal */
|
|
|
|
uint64_t val = frsqrt7(f, exp_size, frac_size);
|
|
|
|
return make_float32(val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static float64 frsqrt7_d(float64 f, float_status *s)
|
|
|
|
{
|
|
|
|
int exp_size = 11, frac_size = 52;
|
|
|
|
bool sign = float64_is_neg(f);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* frsqrt7(sNaN) = canonical NaN
|
|
|
|
* frsqrt7(-inf) = canonical NaN
|
|
|
|
* frsqrt7(-normal) = canonical NaN
|
|
|
|
* frsqrt7(-subnormal) = canonical NaN
|
|
|
|
*/
|
|
|
|
if (float64_is_signaling_nan(f, s) ||
|
2023-04-05 10:58:11 +02:00
|
|
|
(float64_is_infinity(f) && sign) ||
|
|
|
|
(float64_is_normal(f) && sign) ||
|
|
|
|
(float64_is_zero_or_denormal(f) && !float64_is_zero(f) && sign)) {
|
2021-12-10 08:56:55 +01:00
|
|
|
s->float_exception_flags |= float_flag_invalid;
|
|
|
|
return float64_default_nan(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* frsqrt7(qNaN) = canonical NaN */
|
|
|
|
if (float64_is_quiet_nan(f, s)) {
|
|
|
|
return float64_default_nan(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* frsqrt7(+-0) = +-inf */
|
|
|
|
if (float64_is_zero(f)) {
|
|
|
|
s->float_exception_flags |= float_flag_divbyzero;
|
|
|
|
return float64_set_sign(float64_infinity, sign);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* frsqrt7(+inf) = +0 */
|
|
|
|
if (float64_is_infinity(f) && !sign) {
|
|
|
|
return float64_set_sign(float64_zero, sign);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* +normal, +subnormal */
|
|
|
|
uint64_t val = frsqrt7(f, exp_size, frac_size);
|
|
|
|
return make_float64(val);
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVV1, vfrsqrt7_v_h, OP_UU_H, H2, H2, frsqrt7_h)
|
|
|
|
RVVCALL(OPFVV1, vfrsqrt7_v_w, OP_UU_W, H4, H4, frsqrt7_s)
|
|
|
|
RVVCALL(OPFVV1, vfrsqrt7_v_d, OP_UU_D, H8, H8, frsqrt7_d)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_V_ENV(vfrsqrt7_v_h, 2)
|
|
|
|
GEN_VEXT_V_ENV(vfrsqrt7_v_w, 4)
|
|
|
|
GEN_VEXT_V_ENV(vfrsqrt7_v_d, 8)
|
2021-12-10 08:56:55 +01:00
|
|
|
|
2021-12-10 08:56:56 +01:00
|
|
|
/*
|
|
|
|
* Vector Floating-Point Reciprocal Estimate Instruction
|
|
|
|
*
|
|
|
|
* Adapted from riscv-v-spec recip.c:
|
|
|
|
* https://github.com/riscv/riscv-v-spec/blob/master/recip.c
|
|
|
|
*/
|
|
|
|
static uint64_t frec7(uint64_t f, int exp_size, int frac_size,
|
|
|
|
float_status *s)
|
|
|
|
{
|
|
|
|
uint64_t sign = extract64(f, frac_size + exp_size, 1);
|
|
|
|
uint64_t exp = extract64(f, frac_size, exp_size);
|
|
|
|
uint64_t frac = extract64(f, 0, frac_size);
|
|
|
|
|
|
|
|
const uint8_t lookup_table[] = {
|
|
|
|
127, 125, 123, 121, 119, 117, 116, 114,
|
|
|
|
112, 110, 109, 107, 105, 104, 102, 100,
|
|
|
|
99, 97, 96, 94, 93, 91, 90, 88,
|
|
|
|
87, 85, 84, 83, 81, 80, 79, 77,
|
|
|
|
76, 75, 74, 72, 71, 70, 69, 68,
|
|
|
|
66, 65, 64, 63, 62, 61, 60, 59,
|
|
|
|
58, 57, 56, 55, 54, 53, 52, 51,
|
|
|
|
50, 49, 48, 47, 46, 45, 44, 43,
|
|
|
|
42, 41, 40, 40, 39, 38, 37, 36,
|
|
|
|
35, 35, 34, 33, 32, 31, 31, 30,
|
|
|
|
29, 28, 28, 27, 26, 25, 25, 24,
|
|
|
|
23, 23, 22, 21, 21, 20, 19, 19,
|
|
|
|
18, 17, 17, 16, 15, 15, 14, 14,
|
|
|
|
13, 12, 12, 11, 11, 10, 9, 9,
|
|
|
|
8, 8, 7, 7, 6, 5, 5, 4,
|
|
|
|
4, 3, 3, 2, 2, 1, 1, 0
|
|
|
|
};
|
|
|
|
const int precision = 7;
|
|
|
|
|
|
|
|
if (exp == 0 && frac != 0) { /* subnormal */
|
|
|
|
/* Normalize the subnormal. */
|
|
|
|
while (extract64(frac, frac_size - 1, 1) == 0) {
|
|
|
|
exp--;
|
|
|
|
frac <<= 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
frac = (frac << 1) & MAKE_64BIT_MASK(0, frac_size);
|
|
|
|
|
|
|
|
if (exp != 0 && exp != UINT64_MAX) {
|
|
|
|
/*
|
|
|
|
* Overflow to inf or max value of same sign,
|
|
|
|
* depending on sign and rounding mode.
|
|
|
|
*/
|
|
|
|
s->float_exception_flags |= (float_flag_inexact |
|
|
|
|
float_flag_overflow);
|
|
|
|
|
|
|
|
if ((s->float_rounding_mode == float_round_to_zero) ||
|
|
|
|
((s->float_rounding_mode == float_round_down) && !sign) ||
|
|
|
|
((s->float_rounding_mode == float_round_up) && sign)) {
|
|
|
|
/* Return greatest/negative finite value. */
|
|
|
|
return (sign << (exp_size + frac_size)) |
|
2023-04-05 10:58:11 +02:00
|
|
|
(MAKE_64BIT_MASK(frac_size, exp_size) - 1);
|
2021-12-10 08:56:56 +01:00
|
|
|
} else {
|
|
|
|
/* Return +-inf. */
|
|
|
|
return (sign << (exp_size + frac_size)) |
|
2023-04-05 10:58:11 +02:00
|
|
|
MAKE_64BIT_MASK(frac_size, exp_size);
|
2021-12-10 08:56:56 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int idx = frac >> (frac_size - precision);
|
|
|
|
uint64_t out_frac = (uint64_t)(lookup_table[idx]) <<
|
2023-04-05 10:58:11 +02:00
|
|
|
(frac_size - precision);
|
2021-12-10 08:56:56 +01:00
|
|
|
uint64_t out_exp = 2 * MAKE_64BIT_MASK(0, exp_size - 1) + ~exp;
|
|
|
|
|
|
|
|
if (out_exp == 0 || out_exp == UINT64_MAX) {
|
|
|
|
/*
|
|
|
|
* The result is subnormal, but don't raise the underflow exception,
|
|
|
|
* because there's no additional loss of precision.
|
|
|
|
*/
|
|
|
|
out_frac = (out_frac >> 1) | MAKE_64BIT_MASK(frac_size - 1, 1);
|
|
|
|
if (out_exp == UINT64_MAX) {
|
|
|
|
out_frac >>= 1;
|
|
|
|
out_exp = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t val = 0;
|
|
|
|
val = deposit64(val, 0, frac_size, out_frac);
|
|
|
|
val = deposit64(val, frac_size, exp_size, out_exp);
|
|
|
|
val = deposit64(val, frac_size + exp_size, 1, sign);
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static float16 frec7_h(float16 f, float_status *s)
|
|
|
|
{
|
|
|
|
int exp_size = 5, frac_size = 10;
|
|
|
|
bool sign = float16_is_neg(f);
|
|
|
|
|
|
|
|
/* frec7(+-inf) = +-0 */
|
|
|
|
if (float16_is_infinity(f)) {
|
|
|
|
return float16_set_sign(float16_zero, sign);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* frec7(+-0) = +-inf */
|
|
|
|
if (float16_is_zero(f)) {
|
|
|
|
s->float_exception_flags |= float_flag_divbyzero;
|
|
|
|
return float16_set_sign(float16_infinity, sign);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* frec7(sNaN) = canonical NaN */
|
|
|
|
if (float16_is_signaling_nan(f, s)) {
|
|
|
|
s->float_exception_flags |= float_flag_invalid;
|
|
|
|
return float16_default_nan(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* frec7(qNaN) = canonical NaN */
|
|
|
|
if (float16_is_quiet_nan(f, s)) {
|
|
|
|
return float16_default_nan(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* +-normal, +-subnormal */
|
|
|
|
uint64_t val = frec7(f, exp_size, frac_size, s);
|
|
|
|
return make_float16(val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static float32 frec7_s(float32 f, float_status *s)
|
|
|
|
{
|
|
|
|
int exp_size = 8, frac_size = 23;
|
|
|
|
bool sign = float32_is_neg(f);
|
|
|
|
|
|
|
|
/* frec7(+-inf) = +-0 */
|
|
|
|
if (float32_is_infinity(f)) {
|
|
|
|
return float32_set_sign(float32_zero, sign);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* frec7(+-0) = +-inf */
|
|
|
|
if (float32_is_zero(f)) {
|
|
|
|
s->float_exception_flags |= float_flag_divbyzero;
|
|
|
|
return float32_set_sign(float32_infinity, sign);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* frec7(sNaN) = canonical NaN */
|
|
|
|
if (float32_is_signaling_nan(f, s)) {
|
|
|
|
s->float_exception_flags |= float_flag_invalid;
|
|
|
|
return float32_default_nan(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* frec7(qNaN) = canonical NaN */
|
|
|
|
if (float32_is_quiet_nan(f, s)) {
|
|
|
|
return float32_default_nan(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* +-normal, +-subnormal */
|
|
|
|
uint64_t val = frec7(f, exp_size, frac_size, s);
|
|
|
|
return make_float32(val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static float64 frec7_d(float64 f, float_status *s)
|
|
|
|
{
|
|
|
|
int exp_size = 11, frac_size = 52;
|
|
|
|
bool sign = float64_is_neg(f);
|
|
|
|
|
|
|
|
/* frec7(+-inf) = +-0 */
|
|
|
|
if (float64_is_infinity(f)) {
|
|
|
|
return float64_set_sign(float64_zero, sign);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* frec7(+-0) = +-inf */
|
|
|
|
if (float64_is_zero(f)) {
|
|
|
|
s->float_exception_flags |= float_flag_divbyzero;
|
|
|
|
return float64_set_sign(float64_infinity, sign);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* frec7(sNaN) = canonical NaN */
|
|
|
|
if (float64_is_signaling_nan(f, s)) {
|
|
|
|
s->float_exception_flags |= float_flag_invalid;
|
|
|
|
return float64_default_nan(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* frec7(qNaN) = canonical NaN */
|
|
|
|
if (float64_is_quiet_nan(f, s)) {
|
|
|
|
return float64_default_nan(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* +-normal, +-subnormal */
|
|
|
|
uint64_t val = frec7(f, exp_size, frac_size, s);
|
|
|
|
return make_float64(val);
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVV1, vfrec7_v_h, OP_UU_H, H2, H2, frec7_h)
|
|
|
|
RVVCALL(OPFVV1, vfrec7_v_w, OP_UU_W, H4, H4, frec7_s)
|
|
|
|
RVVCALL(OPFVV1, vfrec7_v_d, OP_UU_D, H8, H8, frec7_d)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_V_ENV(vfrec7_v_h, 2)
|
|
|
|
GEN_VEXT_V_ENV(vfrec7_v_w, 4)
|
|
|
|
GEN_VEXT_V_ENV(vfrec7_v_d, 8)
|
2021-12-10 08:56:56 +01:00
|
|
|
|
2020-07-01 17:25:25 +02:00
|
|
|
/* Vector Floating-Point MIN/MAX Instructions */
|
2021-12-10 08:56:45 +01:00
|
|
|
RVVCALL(OPFVV2, vfmin_vv_h, OP_UUU_H, H2, H2, H2, float16_minimum_number)
|
|
|
|
RVVCALL(OPFVV2, vfmin_vv_w, OP_UUU_W, H4, H4, H4, float32_minimum_number)
|
|
|
|
RVVCALL(OPFVV2, vfmin_vv_d, OP_UUU_D, H8, H8, H8, float64_minimum_number)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfmin_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_ENV(vfmin_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfmin_vv_d, 8)
|
2021-12-10 08:56:45 +01:00
|
|
|
RVVCALL(OPFVF2, vfmin_vf_h, OP_UUU_H, H2, H2, float16_minimum_number)
|
|
|
|
RVVCALL(OPFVF2, vfmin_vf_w, OP_UUU_W, H4, H4, float32_minimum_number)
|
|
|
|
RVVCALL(OPFVF2, vfmin_vf_d, OP_UUU_D, H8, H8, float64_minimum_number)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfmin_vf_h, 2)
|
|
|
|
GEN_VEXT_VF(vfmin_vf_w, 4)
|
|
|
|
GEN_VEXT_VF(vfmin_vf_d, 8)
|
2020-07-01 17:25:25 +02:00
|
|
|
|
2021-12-10 08:56:45 +01:00
|
|
|
RVVCALL(OPFVV2, vfmax_vv_h, OP_UUU_H, H2, H2, H2, float16_maximum_number)
|
|
|
|
RVVCALL(OPFVV2, vfmax_vv_w, OP_UUU_W, H4, H4, H4, float32_maximum_number)
|
|
|
|
RVVCALL(OPFVV2, vfmax_vv_d, OP_UUU_D, H8, H8, H8, float64_maximum_number)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfmax_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_ENV(vfmax_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfmax_vv_d, 8)
|
2021-12-10 08:56:45 +01:00
|
|
|
RVVCALL(OPFVF2, vfmax_vf_h, OP_UUU_H, H2, H2, float16_maximum_number)
|
|
|
|
RVVCALL(OPFVF2, vfmax_vf_w, OP_UUU_W, H4, H4, float32_maximum_number)
|
|
|
|
RVVCALL(OPFVF2, vfmax_vf_d, OP_UUU_D, H8, H8, float64_maximum_number)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfmax_vf_h, 2)
|
|
|
|
GEN_VEXT_VF(vfmax_vf_w, 4)
|
|
|
|
GEN_VEXT_VF(vfmax_vf_d, 8)
|
2020-07-01 17:25:26 +02:00
|
|
|
|
|
|
|
/* Vector Floating-Point Sign-Injection Instructions */
|
|
|
|
static uint16_t fsgnj16(uint16_t a, uint16_t b, float_status *s)
|
|
|
|
{
|
|
|
|
return deposit64(b, 0, 15, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t fsgnj32(uint32_t a, uint32_t b, float_status *s)
|
|
|
|
{
|
|
|
|
return deposit64(b, 0, 31, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t fsgnj64(uint64_t a, uint64_t b, float_status *s)
|
|
|
|
{
|
|
|
|
return deposit64(b, 0, 63, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVV2, vfsgnj_vv_h, OP_UUU_H, H2, H2, H2, fsgnj16)
|
|
|
|
RVVCALL(OPFVV2, vfsgnj_vv_w, OP_UUU_W, H4, H4, H4, fsgnj32)
|
|
|
|
RVVCALL(OPFVV2, vfsgnj_vv_d, OP_UUU_D, H8, H8, H8, fsgnj64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfsgnj_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_ENV(vfsgnj_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfsgnj_vv_d, 8)
|
2020-07-01 17:25:26 +02:00
|
|
|
RVVCALL(OPFVF2, vfsgnj_vf_h, OP_UUU_H, H2, H2, fsgnj16)
|
|
|
|
RVVCALL(OPFVF2, vfsgnj_vf_w, OP_UUU_W, H4, H4, fsgnj32)
|
|
|
|
RVVCALL(OPFVF2, vfsgnj_vf_d, OP_UUU_D, H8, H8, fsgnj64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfsgnj_vf_h, 2)
|
|
|
|
GEN_VEXT_VF(vfsgnj_vf_w, 4)
|
|
|
|
GEN_VEXT_VF(vfsgnj_vf_d, 8)
|
2020-07-01 17:25:26 +02:00
|
|
|
|
|
|
|
static uint16_t fsgnjn16(uint16_t a, uint16_t b, float_status *s)
|
|
|
|
{
|
|
|
|
return deposit64(~b, 0, 15, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t fsgnjn32(uint32_t a, uint32_t b, float_status *s)
|
|
|
|
{
|
|
|
|
return deposit64(~b, 0, 31, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t fsgnjn64(uint64_t a, uint64_t b, float_status *s)
|
|
|
|
{
|
|
|
|
return deposit64(~b, 0, 63, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVV2, vfsgnjn_vv_h, OP_UUU_H, H2, H2, H2, fsgnjn16)
|
|
|
|
RVVCALL(OPFVV2, vfsgnjn_vv_w, OP_UUU_W, H4, H4, H4, fsgnjn32)
|
|
|
|
RVVCALL(OPFVV2, vfsgnjn_vv_d, OP_UUU_D, H8, H8, H8, fsgnjn64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfsgnjn_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_ENV(vfsgnjn_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfsgnjn_vv_d, 8)
|
2020-07-01 17:25:26 +02:00
|
|
|
RVVCALL(OPFVF2, vfsgnjn_vf_h, OP_UUU_H, H2, H2, fsgnjn16)
|
|
|
|
RVVCALL(OPFVF2, vfsgnjn_vf_w, OP_UUU_W, H4, H4, fsgnjn32)
|
|
|
|
RVVCALL(OPFVF2, vfsgnjn_vf_d, OP_UUU_D, H8, H8, fsgnjn64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfsgnjn_vf_h, 2)
|
|
|
|
GEN_VEXT_VF(vfsgnjn_vf_w, 4)
|
|
|
|
GEN_VEXT_VF(vfsgnjn_vf_d, 8)
|
2020-07-01 17:25:26 +02:00
|
|
|
|
|
|
|
static uint16_t fsgnjx16(uint16_t a, uint16_t b, float_status *s)
|
|
|
|
{
|
|
|
|
return deposit64(b ^ a, 0, 15, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t fsgnjx32(uint32_t a, uint32_t b, float_status *s)
|
|
|
|
{
|
|
|
|
return deposit64(b ^ a, 0, 31, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t fsgnjx64(uint64_t a, uint64_t b, float_status *s)
|
|
|
|
{
|
|
|
|
return deposit64(b ^ a, 0, 63, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVV2, vfsgnjx_vv_h, OP_UUU_H, H2, H2, H2, fsgnjx16)
|
|
|
|
RVVCALL(OPFVV2, vfsgnjx_vv_w, OP_UUU_W, H4, H4, H4, fsgnjx32)
|
|
|
|
RVVCALL(OPFVV2, vfsgnjx_vv_d, OP_UUU_D, H8, H8, H8, fsgnjx64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VV_ENV(vfsgnjx_vv_h, 2)
|
|
|
|
GEN_VEXT_VV_ENV(vfsgnjx_vv_w, 4)
|
|
|
|
GEN_VEXT_VV_ENV(vfsgnjx_vv_d, 8)
|
2020-07-01 17:25:26 +02:00
|
|
|
RVVCALL(OPFVF2, vfsgnjx_vf_h, OP_UUU_H, H2, H2, fsgnjx16)
|
|
|
|
RVVCALL(OPFVF2, vfsgnjx_vf_w, OP_UUU_W, H4, H4, fsgnjx32)
|
|
|
|
RVVCALL(OPFVF2, vfsgnjx_vf_d, OP_UUU_D, H8, H8, fsgnjx64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_VF(vfsgnjx_vf_h, 2)
|
|
|
|
GEN_VEXT_VF(vfsgnjx_vf_w, 4)
|
|
|
|
GEN_VEXT_VF(vfsgnjx_vf_d, 8)
|
2020-07-01 17:25:27 +02:00
|
|
|
|
|
|
|
/* Vector Floating-Point Compare Instructions */
|
|
|
|
#define GEN_VEXT_CMP_VV_ENV(NAME, ETYPE, H, DO_OP) \
|
|
|
|
void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t vm = vext_vm(desc); \
|
|
|
|
uint32_t vl = env->vl; \
|
2023-02-26 18:05:14 +01:00
|
|
|
uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \
|
2022-06-06 08:16:56 +02:00
|
|
|
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
|
2022-06-20 08:51:12 +02:00
|
|
|
uint32_t vma = vext_vma(desc); \
|
2020-07-01 17:25:27 +02:00
|
|
|
uint32_t i; \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2020-07-01 17:25:27 +02:00
|
|
|
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
|
|
|
|
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
|
2021-12-10 08:55:58 +01:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) { \
|
2022-06-20 08:51:12 +02:00
|
|
|
/* set masked-off elements to 1s */ \
|
|
|
|
if (vma) { \
|
|
|
|
vext_set_elem_mask(vd, i, 1); \
|
|
|
|
} \
|
2020-07-01 17:25:27 +02:00
|
|
|
continue; \
|
|
|
|
} \
|
2021-12-10 08:55:58 +01:00
|
|
|
vext_set_elem_mask(vd, i, \
|
2020-07-01 17:25:27 +02:00
|
|
|
DO_OP(s2, s1, &env->fp_status)); \
|
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2023-04-05 10:58:12 +02:00
|
|
|
/*
|
|
|
|
* mask destination register are always tail-agnostic
|
|
|
|
* set tail elements to 1s
|
|
|
|
*/ \
|
2022-06-06 08:16:56 +02:00
|
|
|
if (vta_all_1s) { \
|
|
|
|
for (; i < total_elems; i++) { \
|
|
|
|
vext_set_elem_mask(vd, i, 1); \
|
|
|
|
} \
|
|
|
|
} \
|
2020-07-01 17:25:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
GEN_VEXT_CMP_VV_ENV(vmfeq_vv_h, uint16_t, H2, float16_eq_quiet)
|
|
|
|
GEN_VEXT_CMP_VV_ENV(vmfeq_vv_w, uint32_t, H4, float32_eq_quiet)
|
|
|
|
GEN_VEXT_CMP_VV_ENV(vmfeq_vv_d, uint64_t, H8, float64_eq_quiet)
|
|
|
|
|
|
|
|
#define GEN_VEXT_CMP_VF(NAME, ETYPE, H, DO_OP) \
|
|
|
|
void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t vm = vext_vm(desc); \
|
|
|
|
uint32_t vl = env->vl; \
|
2023-02-26 18:05:14 +01:00
|
|
|
uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \
|
2022-06-06 08:16:56 +02:00
|
|
|
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
|
2022-06-20 08:51:12 +02:00
|
|
|
uint32_t vma = vext_vma(desc); \
|
2020-07-01 17:25:27 +02:00
|
|
|
uint32_t i; \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2020-07-01 17:25:27 +02:00
|
|
|
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
|
2021-12-10 08:55:58 +01:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) { \
|
2022-06-20 08:51:12 +02:00
|
|
|
/* set masked-off elements to 1s */ \
|
|
|
|
if (vma) { \
|
|
|
|
vext_set_elem_mask(vd, i, 1); \
|
|
|
|
} \
|
2020-07-01 17:25:27 +02:00
|
|
|
continue; \
|
|
|
|
} \
|
2021-12-10 08:55:58 +01:00
|
|
|
vext_set_elem_mask(vd, i, \
|
2020-07-01 17:25:27 +02:00
|
|
|
DO_OP(s2, (ETYPE)s1, &env->fp_status)); \
|
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2023-04-05 10:58:12 +02:00
|
|
|
/*
|
|
|
|
* mask destination register are always tail-agnostic
|
|
|
|
* set tail elements to 1s
|
|
|
|
*/ \
|
2022-06-06 08:16:56 +02:00
|
|
|
if (vta_all_1s) { \
|
|
|
|
for (; i < total_elems; i++) { \
|
|
|
|
vext_set_elem_mask(vd, i, 1); \
|
|
|
|
} \
|
|
|
|
} \
|
2020-07-01 17:25:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
GEN_VEXT_CMP_VF(vmfeq_vf_h, uint16_t, H2, float16_eq_quiet)
|
|
|
|
GEN_VEXT_CMP_VF(vmfeq_vf_w, uint32_t, H4, float32_eq_quiet)
|
|
|
|
GEN_VEXT_CMP_VF(vmfeq_vf_d, uint64_t, H8, float64_eq_quiet)
|
|
|
|
|
|
|
|
static bool vmfne16(uint16_t a, uint16_t b, float_status *s)
|
|
|
|
{
|
|
|
|
FloatRelation compare = float16_compare_quiet(a, b, s);
|
|
|
|
return compare != float_relation_equal;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool vmfne32(uint32_t a, uint32_t b, float_status *s)
|
|
|
|
{
|
|
|
|
FloatRelation compare = float32_compare_quiet(a, b, s);
|
|
|
|
return compare != float_relation_equal;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool vmfne64(uint64_t a, uint64_t b, float_status *s)
|
|
|
|
{
|
|
|
|
FloatRelation compare = float64_compare_quiet(a, b, s);
|
|
|
|
return compare != float_relation_equal;
|
|
|
|
}
|
|
|
|
|
|
|
|
GEN_VEXT_CMP_VV_ENV(vmfne_vv_h, uint16_t, H2, vmfne16)
|
|
|
|
GEN_VEXT_CMP_VV_ENV(vmfne_vv_w, uint32_t, H4, vmfne32)
|
|
|
|
GEN_VEXT_CMP_VV_ENV(vmfne_vv_d, uint64_t, H8, vmfne64)
|
|
|
|
GEN_VEXT_CMP_VF(vmfne_vf_h, uint16_t, H2, vmfne16)
|
|
|
|
GEN_VEXT_CMP_VF(vmfne_vf_w, uint32_t, H4, vmfne32)
|
|
|
|
GEN_VEXT_CMP_VF(vmfne_vf_d, uint64_t, H8, vmfne64)
|
|
|
|
|
|
|
|
GEN_VEXT_CMP_VV_ENV(vmflt_vv_h, uint16_t, H2, float16_lt)
|
|
|
|
GEN_VEXT_CMP_VV_ENV(vmflt_vv_w, uint32_t, H4, float32_lt)
|
|
|
|
GEN_VEXT_CMP_VV_ENV(vmflt_vv_d, uint64_t, H8, float64_lt)
|
|
|
|
GEN_VEXT_CMP_VF(vmflt_vf_h, uint16_t, H2, float16_lt)
|
|
|
|
GEN_VEXT_CMP_VF(vmflt_vf_w, uint32_t, H4, float32_lt)
|
|
|
|
GEN_VEXT_CMP_VF(vmflt_vf_d, uint64_t, H8, float64_lt)
|
|
|
|
|
|
|
|
GEN_VEXT_CMP_VV_ENV(vmfle_vv_h, uint16_t, H2, float16_le)
|
|
|
|
GEN_VEXT_CMP_VV_ENV(vmfle_vv_w, uint32_t, H4, float32_le)
|
|
|
|
GEN_VEXT_CMP_VV_ENV(vmfle_vv_d, uint64_t, H8, float64_le)
|
|
|
|
GEN_VEXT_CMP_VF(vmfle_vf_h, uint16_t, H2, float16_le)
|
|
|
|
GEN_VEXT_CMP_VF(vmfle_vf_w, uint32_t, H4, float32_le)
|
|
|
|
GEN_VEXT_CMP_VF(vmfle_vf_d, uint64_t, H8, float64_le)
|
|
|
|
|
|
|
|
static bool vmfgt16(uint16_t a, uint16_t b, float_status *s)
|
|
|
|
{
|
|
|
|
FloatRelation compare = float16_compare(a, b, s);
|
|
|
|
return compare == float_relation_greater;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool vmfgt32(uint32_t a, uint32_t b, float_status *s)
|
|
|
|
{
|
|
|
|
FloatRelation compare = float32_compare(a, b, s);
|
|
|
|
return compare == float_relation_greater;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool vmfgt64(uint64_t a, uint64_t b, float_status *s)
|
|
|
|
{
|
|
|
|
FloatRelation compare = float64_compare(a, b, s);
|
|
|
|
return compare == float_relation_greater;
|
|
|
|
}
|
|
|
|
|
|
|
|
GEN_VEXT_CMP_VF(vmfgt_vf_h, uint16_t, H2, vmfgt16)
|
|
|
|
GEN_VEXT_CMP_VF(vmfgt_vf_w, uint32_t, H4, vmfgt32)
|
|
|
|
GEN_VEXT_CMP_VF(vmfgt_vf_d, uint64_t, H8, vmfgt64)
|
|
|
|
|
|
|
|
static bool vmfge16(uint16_t a, uint16_t b, float_status *s)
|
|
|
|
{
|
|
|
|
FloatRelation compare = float16_compare(a, b, s);
|
|
|
|
return compare == float_relation_greater ||
|
|
|
|
compare == float_relation_equal;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool vmfge32(uint32_t a, uint32_t b, float_status *s)
|
|
|
|
{
|
|
|
|
FloatRelation compare = float32_compare(a, b, s);
|
|
|
|
return compare == float_relation_greater ||
|
|
|
|
compare == float_relation_equal;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool vmfge64(uint64_t a, uint64_t b, float_status *s)
|
|
|
|
{
|
|
|
|
FloatRelation compare = float64_compare(a, b, s);
|
|
|
|
return compare == float_relation_greater ||
|
|
|
|
compare == float_relation_equal;
|
|
|
|
}
|
|
|
|
|
|
|
|
GEN_VEXT_CMP_VF(vmfge_vf_h, uint16_t, H2, vmfge16)
|
|
|
|
GEN_VEXT_CMP_VF(vmfge_vf_w, uint32_t, H4, vmfge32)
|
|
|
|
GEN_VEXT_CMP_VF(vmfge_vf_d, uint64_t, H8, vmfge64)
|
|
|
|
|
2020-07-01 17:25:28 +02:00
|
|
|
/* Vector Floating-Point Classify Instruction */
|
|
|
|
target_ulong fclass_h(uint64_t frs1)
|
|
|
|
{
|
|
|
|
float16 f = frs1;
|
|
|
|
bool sign = float16_is_neg(f);
|
|
|
|
|
|
|
|
if (float16_is_infinity(f)) {
|
|
|
|
return sign ? 1 << 0 : 1 << 7;
|
|
|
|
} else if (float16_is_zero(f)) {
|
|
|
|
return sign ? 1 << 3 : 1 << 4;
|
|
|
|
} else if (float16_is_zero_or_denormal(f)) {
|
|
|
|
return sign ? 1 << 2 : 1 << 5;
|
|
|
|
} else if (float16_is_any_nan(f)) {
|
|
|
|
float_status s = { }; /* for snan_bit_is_one */
|
|
|
|
return float16_is_quiet_nan(f, &s) ? 1 << 9 : 1 << 8;
|
|
|
|
} else {
|
|
|
|
return sign ? 1 << 1 : 1 << 6;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
target_ulong fclass_s(uint64_t frs1)
|
|
|
|
{
|
|
|
|
float32 f = frs1;
|
|
|
|
bool sign = float32_is_neg(f);
|
|
|
|
|
|
|
|
if (float32_is_infinity(f)) {
|
|
|
|
return sign ? 1 << 0 : 1 << 7;
|
|
|
|
} else if (float32_is_zero(f)) {
|
|
|
|
return sign ? 1 << 3 : 1 << 4;
|
|
|
|
} else if (float32_is_zero_or_denormal(f)) {
|
|
|
|
return sign ? 1 << 2 : 1 << 5;
|
|
|
|
} else if (float32_is_any_nan(f)) {
|
|
|
|
float_status s = { }; /* for snan_bit_is_one */
|
|
|
|
return float32_is_quiet_nan(f, &s) ? 1 << 9 : 1 << 8;
|
|
|
|
} else {
|
|
|
|
return sign ? 1 << 1 : 1 << 6;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
target_ulong fclass_d(uint64_t frs1)
|
|
|
|
{
|
|
|
|
float64 f = frs1;
|
|
|
|
bool sign = float64_is_neg(f);
|
|
|
|
|
|
|
|
if (float64_is_infinity(f)) {
|
|
|
|
return sign ? 1 << 0 : 1 << 7;
|
|
|
|
} else if (float64_is_zero(f)) {
|
|
|
|
return sign ? 1 << 3 : 1 << 4;
|
|
|
|
} else if (float64_is_zero_or_denormal(f)) {
|
|
|
|
return sign ? 1 << 2 : 1 << 5;
|
|
|
|
} else if (float64_is_any_nan(f)) {
|
|
|
|
float_status s = { }; /* for snan_bit_is_one */
|
|
|
|
return float64_is_quiet_nan(f, &s) ? 1 << 9 : 1 << 8;
|
|
|
|
} else {
|
|
|
|
return sign ? 1 << 1 : 1 << 6;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPIVV1, vfclass_v_h, OP_UU_H, H2, H2, fclass_h)
|
|
|
|
RVVCALL(OPIVV1, vfclass_v_w, OP_UU_W, H4, H4, fclass_s)
|
|
|
|
RVVCALL(OPIVV1, vfclass_v_d, OP_UU_D, H8, H8, fclass_d)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_V(vfclass_v_h, 2)
|
|
|
|
GEN_VEXT_V(vfclass_v_w, 4)
|
|
|
|
GEN_VEXT_V(vfclass_v_d, 8)
|
2020-07-01 17:25:29 +02:00
|
|
|
|
|
|
|
/* Vector Floating-Point Merge Instruction */
|
2022-06-06 08:16:56 +02:00
|
|
|
|
2021-12-10 08:56:00 +01:00
|
|
|
#define GEN_VFMERGE_VF(NAME, ETYPE, H) \
|
2020-07-01 17:25:29 +02:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t vm = vext_vm(desc); \
|
|
|
|
uint32_t vl = env->vl; \
|
2022-06-06 08:16:56 +02:00
|
|
|
uint32_t esz = sizeof(ETYPE); \
|
|
|
|
uint32_t total_elems = \
|
|
|
|
vext_get_total_elems(env, desc, esz); \
|
|
|
|
uint32_t vta = vext_vta(desc); \
|
2020-07-01 17:25:29 +02:00
|
|
|
uint32_t i; \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2020-07-01 17:25:29 +02:00
|
|
|
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
|
2023-04-05 10:58:11 +02:00
|
|
|
*((ETYPE *)vd + H(i)) = \
|
|
|
|
(!vm && !vext_elem_mask(v0, i) ? s2 : s1); \
|
2020-07-01 17:25:29 +02:00
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2022-06-06 08:16:56 +02:00
|
|
|
/* set tail elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
|
2020-07-01 17:25:29 +02:00
|
|
|
}
|
|
|
|
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VFMERGE_VF(vfmerge_vfm_h, int16_t, H2)
|
|
|
|
GEN_VFMERGE_VF(vfmerge_vfm_w, int32_t, H4)
|
|
|
|
GEN_VFMERGE_VF(vfmerge_vfm_d, int64_t, H8)
|
2020-07-01 17:25:30 +02:00
|
|
|
|
|
|
|
/* Single-Width Floating-Point/Integer Type-Convert Instructions */
|
|
|
|
/* vfcvt.xu.f.v vd, vs2, vm # Convert float to unsigned integer. */
|
|
|
|
RVVCALL(OPFVV1, vfcvt_xu_f_v_h, OP_UU_H, H2, H2, float16_to_uint16)
|
|
|
|
RVVCALL(OPFVV1, vfcvt_xu_f_v_w, OP_UU_W, H4, H4, float32_to_uint32)
|
|
|
|
RVVCALL(OPFVV1, vfcvt_xu_f_v_d, OP_UU_D, H8, H8, float64_to_uint64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_V_ENV(vfcvt_xu_f_v_h, 2)
|
|
|
|
GEN_VEXT_V_ENV(vfcvt_xu_f_v_w, 4)
|
|
|
|
GEN_VEXT_V_ENV(vfcvt_xu_f_v_d, 8)
|
2020-07-01 17:25:30 +02:00
|
|
|
|
|
|
|
/* vfcvt.x.f.v vd, vs2, vm # Convert float to signed integer. */
|
|
|
|
RVVCALL(OPFVV1, vfcvt_x_f_v_h, OP_UU_H, H2, H2, float16_to_int16)
|
|
|
|
RVVCALL(OPFVV1, vfcvt_x_f_v_w, OP_UU_W, H4, H4, float32_to_int32)
|
|
|
|
RVVCALL(OPFVV1, vfcvt_x_f_v_d, OP_UU_D, H8, H8, float64_to_int64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_V_ENV(vfcvt_x_f_v_h, 2)
|
|
|
|
GEN_VEXT_V_ENV(vfcvt_x_f_v_w, 4)
|
|
|
|
GEN_VEXT_V_ENV(vfcvt_x_f_v_d, 8)
|
2020-07-01 17:25:30 +02:00
|
|
|
|
|
|
|
/* vfcvt.f.xu.v vd, vs2, vm # Convert unsigned integer to float. */
|
|
|
|
RVVCALL(OPFVV1, vfcvt_f_xu_v_h, OP_UU_H, H2, H2, uint16_to_float16)
|
|
|
|
RVVCALL(OPFVV1, vfcvt_f_xu_v_w, OP_UU_W, H4, H4, uint32_to_float32)
|
|
|
|
RVVCALL(OPFVV1, vfcvt_f_xu_v_d, OP_UU_D, H8, H8, uint64_to_float64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_V_ENV(vfcvt_f_xu_v_h, 2)
|
|
|
|
GEN_VEXT_V_ENV(vfcvt_f_xu_v_w, 4)
|
|
|
|
GEN_VEXT_V_ENV(vfcvt_f_xu_v_d, 8)
|
2020-07-01 17:25:30 +02:00
|
|
|
|
|
|
|
/* vfcvt.f.x.v vd, vs2, vm # Convert integer to float. */
|
|
|
|
RVVCALL(OPFVV1, vfcvt_f_x_v_h, OP_UU_H, H2, H2, int16_to_float16)
|
|
|
|
RVVCALL(OPFVV1, vfcvt_f_x_v_w, OP_UU_W, H4, H4, int32_to_float32)
|
|
|
|
RVVCALL(OPFVV1, vfcvt_f_x_v_d, OP_UU_D, H8, H8, int64_to_float64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_V_ENV(vfcvt_f_x_v_h, 2)
|
|
|
|
GEN_VEXT_V_ENV(vfcvt_f_x_v_w, 4)
|
|
|
|
GEN_VEXT_V_ENV(vfcvt_f_x_v_d, 8)
|
2020-07-01 17:25:31 +02:00
|
|
|
|
|
|
|
/* Widening Floating-Point/Integer Type-Convert Instructions */
|
|
|
|
/* (TD, T2, TX2) */
|
2021-12-10 08:56:48 +01:00
|
|
|
#define WOP_UU_B uint16_t, uint8_t, uint8_t
|
2020-07-01 17:25:31 +02:00
|
|
|
#define WOP_UU_H uint32_t, uint16_t, uint16_t
|
|
|
|
#define WOP_UU_W uint64_t, uint32_t, uint32_t
|
2023-04-05 10:58:12 +02:00
|
|
|
/*
|
|
|
|
* vfwcvt.xu.f.v vd, vs2, vm # Convert float to double-width unsigned integer.
|
|
|
|
*/
|
2020-07-01 17:25:31 +02:00
|
|
|
RVVCALL(OPFVV1, vfwcvt_xu_f_v_h, WOP_UU_H, H4, H2, float16_to_uint32)
|
|
|
|
RVVCALL(OPFVV1, vfwcvt_xu_f_v_w, WOP_UU_W, H8, H4, float32_to_uint64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_V_ENV(vfwcvt_xu_f_v_h, 4)
|
|
|
|
GEN_VEXT_V_ENV(vfwcvt_xu_f_v_w, 8)
|
2020-07-01 17:25:31 +02:00
|
|
|
|
|
|
|
/* vfwcvt.x.f.v vd, vs2, vm # Convert float to double-width signed integer. */
|
|
|
|
RVVCALL(OPFVV1, vfwcvt_x_f_v_h, WOP_UU_H, H4, H2, float16_to_int32)
|
|
|
|
RVVCALL(OPFVV1, vfwcvt_x_f_v_w, WOP_UU_W, H8, H4, float32_to_int64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_V_ENV(vfwcvt_x_f_v_h, 4)
|
|
|
|
GEN_VEXT_V_ENV(vfwcvt_x_f_v_w, 8)
|
2020-07-01 17:25:31 +02:00
|
|
|
|
2023-04-05 10:58:13 +02:00
|
|
|
/*
|
|
|
|
* vfwcvt.f.xu.v vd, vs2, vm # Convert unsigned integer to double-width float.
|
|
|
|
*/
|
2021-12-10 08:56:48 +01:00
|
|
|
RVVCALL(OPFVV1, vfwcvt_f_xu_v_b, WOP_UU_B, H2, H1, uint8_to_float16)
|
2020-07-01 17:25:31 +02:00
|
|
|
RVVCALL(OPFVV1, vfwcvt_f_xu_v_h, WOP_UU_H, H4, H2, uint16_to_float32)
|
|
|
|
RVVCALL(OPFVV1, vfwcvt_f_xu_v_w, WOP_UU_W, H8, H4, uint32_to_float64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_V_ENV(vfwcvt_f_xu_v_b, 2)
|
|
|
|
GEN_VEXT_V_ENV(vfwcvt_f_xu_v_h, 4)
|
|
|
|
GEN_VEXT_V_ENV(vfwcvt_f_xu_v_w, 8)
|
2020-07-01 17:25:31 +02:00
|
|
|
|
|
|
|
/* vfwcvt.f.x.v vd, vs2, vm # Convert integer to double-width float. */
|
2021-12-10 08:56:48 +01:00
|
|
|
RVVCALL(OPFVV1, vfwcvt_f_x_v_b, WOP_UU_B, H2, H1, int8_to_float16)
|
2020-07-01 17:25:31 +02:00
|
|
|
RVVCALL(OPFVV1, vfwcvt_f_x_v_h, WOP_UU_H, H4, H2, int16_to_float32)
|
|
|
|
RVVCALL(OPFVV1, vfwcvt_f_x_v_w, WOP_UU_W, H8, H4, int32_to_float64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_V_ENV(vfwcvt_f_x_v_b, 2)
|
|
|
|
GEN_VEXT_V_ENV(vfwcvt_f_x_v_h, 4)
|
|
|
|
GEN_VEXT_V_ENV(vfwcvt_f_x_v_w, 8)
|
2020-07-01 17:25:31 +02:00
|
|
|
|
|
|
|
/*
|
2023-04-05 10:58:13 +02:00
|
|
|
* vfwcvt.f.f.v vd, vs2, vm # Convert single-width float to double-width float.
|
2020-07-01 17:25:31 +02:00
|
|
|
*/
|
|
|
|
static uint32_t vfwcvtffv16(uint16_t a, float_status *s)
|
|
|
|
{
|
|
|
|
return float16_to_float32(a, true, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
RVVCALL(OPFVV1, vfwcvt_f_f_v_h, WOP_UU_H, H4, H2, vfwcvtffv16)
|
|
|
|
RVVCALL(OPFVV1, vfwcvt_f_f_v_w, WOP_UU_W, H8, H4, float32_to_float64)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_V_ENV(vfwcvt_f_f_v_h, 4)
|
|
|
|
GEN_VEXT_V_ENV(vfwcvt_f_f_v_w, 8)
|
2020-07-01 17:25:32 +02:00
|
|
|
|
2023-06-15 08:32:59 +02:00
|
|
|
RVVCALL(OPFVV1, vfwcvtbf16_f_f_v, WOP_UU_H, H4, H2, bfloat16_to_float32)
|
|
|
|
GEN_VEXT_V_ENV(vfwcvtbf16_f_f_v, 4)
|
|
|
|
|
2020-07-01 17:25:32 +02:00
|
|
|
/* Narrowing Floating-Point/Integer Type-Convert Instructions */
|
|
|
|
/* (TD, T2, TX2) */
|
2021-12-10 08:56:50 +01:00
|
|
|
#define NOP_UU_B uint8_t, uint16_t, uint32_t
|
2020-07-01 17:25:32 +02:00
|
|
|
#define NOP_UU_H uint16_t, uint32_t, uint32_t
|
|
|
|
#define NOP_UU_W uint32_t, uint64_t, uint64_t
|
|
|
|
/* vfncvt.xu.f.v vd, vs2, vm # Convert float to unsigned integer. */
|
2021-12-10 08:56:50 +01:00
|
|
|
RVVCALL(OPFVV1, vfncvt_xu_f_w_b, NOP_UU_B, H1, H2, float16_to_uint8)
|
|
|
|
RVVCALL(OPFVV1, vfncvt_xu_f_w_h, NOP_UU_H, H2, H4, float32_to_uint16)
|
|
|
|
RVVCALL(OPFVV1, vfncvt_xu_f_w_w, NOP_UU_W, H4, H8, float64_to_uint32)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_V_ENV(vfncvt_xu_f_w_b, 1)
|
|
|
|
GEN_VEXT_V_ENV(vfncvt_xu_f_w_h, 2)
|
|
|
|
GEN_VEXT_V_ENV(vfncvt_xu_f_w_w, 4)
|
2020-07-01 17:25:32 +02:00
|
|
|
|
|
|
|
/* vfncvt.x.f.v vd, vs2, vm # Convert double-width float to signed integer. */
|
2021-12-10 08:56:50 +01:00
|
|
|
RVVCALL(OPFVV1, vfncvt_x_f_w_b, NOP_UU_B, H1, H2, float16_to_int8)
|
|
|
|
RVVCALL(OPFVV1, vfncvt_x_f_w_h, NOP_UU_H, H2, H4, float32_to_int16)
|
|
|
|
RVVCALL(OPFVV1, vfncvt_x_f_w_w, NOP_UU_W, H4, H8, float64_to_int32)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_V_ENV(vfncvt_x_f_w_b, 1)
|
|
|
|
GEN_VEXT_V_ENV(vfncvt_x_f_w_h, 2)
|
|
|
|
GEN_VEXT_V_ENV(vfncvt_x_f_w_w, 4)
|
2020-07-01 17:25:32 +02:00
|
|
|
|
2023-04-05 10:58:13 +02:00
|
|
|
/*
|
|
|
|
* vfncvt.f.xu.v vd, vs2, vm # Convert double-width unsigned integer to float.
|
|
|
|
*/
|
2021-12-10 08:56:50 +01:00
|
|
|
RVVCALL(OPFVV1, vfncvt_f_xu_w_h, NOP_UU_H, H2, H4, uint32_to_float16)
|
|
|
|
RVVCALL(OPFVV1, vfncvt_f_xu_w_w, NOP_UU_W, H4, H8, uint64_to_float32)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_V_ENV(vfncvt_f_xu_w_h, 2)
|
|
|
|
GEN_VEXT_V_ENV(vfncvt_f_xu_w_w, 4)
|
2020-07-01 17:25:32 +02:00
|
|
|
|
|
|
|
/* vfncvt.f.x.v vd, vs2, vm # Convert double-width integer to float. */
|
2021-12-10 08:56:50 +01:00
|
|
|
RVVCALL(OPFVV1, vfncvt_f_x_w_h, NOP_UU_H, H2, H4, int32_to_float16)
|
|
|
|
RVVCALL(OPFVV1, vfncvt_f_x_w_w, NOP_UU_W, H4, H8, int64_to_float32)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_V_ENV(vfncvt_f_x_w_h, 2)
|
|
|
|
GEN_VEXT_V_ENV(vfncvt_f_x_w_w, 4)
|
2020-07-01 17:25:32 +02:00
|
|
|
|
|
|
|
/* vfncvt.f.f.v vd, vs2, vm # Convert double float to single-width float. */
|
|
|
|
static uint16_t vfncvtffv16(uint32_t a, float_status *s)
|
|
|
|
{
|
|
|
|
return float32_to_float16(a, true, s);
|
|
|
|
}
|
|
|
|
|
2021-12-10 08:56:50 +01:00
|
|
|
RVVCALL(OPFVV1, vfncvt_f_f_w_h, NOP_UU_H, H2, H4, vfncvtffv16)
|
|
|
|
RVVCALL(OPFVV1, vfncvt_f_f_w_w, NOP_UU_W, H4, H8, float64_to_float32)
|
2022-06-06 08:16:56 +02:00
|
|
|
GEN_VEXT_V_ENV(vfncvt_f_f_w_h, 2)
|
|
|
|
GEN_VEXT_V_ENV(vfncvt_f_f_w_w, 4)
|
2020-07-01 17:25:33 +02:00
|
|
|
|
2023-06-15 08:32:59 +02:00
|
|
|
RVVCALL(OPFVV1, vfncvtbf16_f_f_w, NOP_UU_H, H2, H4, float32_to_bfloat16)
|
|
|
|
GEN_VEXT_V_ENV(vfncvtbf16_f_f_w, 2)
|
|
|
|
|
2020-07-01 17:25:33 +02:00
|
|
|
/*
|
2023-04-05 10:58:12 +02:00
|
|
|
* Vector Reduction Operations
|
2020-07-01 17:25:33 +02:00
|
|
|
*/
|
|
|
|
/* Vector Single-Width Integer Reduction Instructions */
|
2021-12-10 08:56:00 +01:00
|
|
|
#define GEN_VEXT_RED(NAME, TD, TS2, HD, HS2, OP) \
|
2020-07-01 17:25:33 +02:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, void *vs1, \
|
2023-04-05 10:58:11 +02:00
|
|
|
void *vs2, CPURISCVState *env, \
|
|
|
|
uint32_t desc) \
|
2020-07-01 17:25:33 +02:00
|
|
|
{ \
|
|
|
|
uint32_t vm = vext_vm(desc); \
|
|
|
|
uint32_t vl = env->vl; \
|
2022-06-06 08:16:16 +02:00
|
|
|
uint32_t esz = sizeof(TD); \
|
|
|
|
uint32_t vlenb = simd_maxsz(desc); \
|
|
|
|
uint32_t vta = vext_vta(desc); \
|
2020-07-01 17:25:33 +02:00
|
|
|
uint32_t i; \
|
|
|
|
TD s1 = *((TD *)vs1 + HD(0)); \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2020-07-01 17:25:33 +02:00
|
|
|
TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
|
2021-12-10 08:55:58 +01:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) { \
|
2020-07-01 17:25:33 +02:00
|
|
|
continue; \
|
|
|
|
} \
|
|
|
|
s1 = OP(s1, (TD)s2); \
|
|
|
|
} \
|
|
|
|
*((TD *)vd + HD(0)) = s1; \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2022-06-06 08:16:16 +02:00
|
|
|
/* set tail elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vta, esz, vlenb); \
|
2020-07-01 17:25:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* vd[0] = sum(vs1[0], vs2[*]) */
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_RED(vredsum_vs_b, int8_t, int8_t, H1, H1, DO_ADD)
|
|
|
|
GEN_VEXT_RED(vredsum_vs_h, int16_t, int16_t, H2, H2, DO_ADD)
|
|
|
|
GEN_VEXT_RED(vredsum_vs_w, int32_t, int32_t, H4, H4, DO_ADD)
|
|
|
|
GEN_VEXT_RED(vredsum_vs_d, int64_t, int64_t, H8, H8, DO_ADD)
|
2020-07-01 17:25:33 +02:00
|
|
|
|
|
|
|
/* vd[0] = maxu(vs1[0], vs2[*]) */
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_RED(vredmaxu_vs_b, uint8_t, uint8_t, H1, H1, DO_MAX)
|
|
|
|
GEN_VEXT_RED(vredmaxu_vs_h, uint16_t, uint16_t, H2, H2, DO_MAX)
|
|
|
|
GEN_VEXT_RED(vredmaxu_vs_w, uint32_t, uint32_t, H4, H4, DO_MAX)
|
|
|
|
GEN_VEXT_RED(vredmaxu_vs_d, uint64_t, uint64_t, H8, H8, DO_MAX)
|
2020-07-01 17:25:33 +02:00
|
|
|
|
|
|
|
/* vd[0] = max(vs1[0], vs2[*]) */
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_RED(vredmax_vs_b, int8_t, int8_t, H1, H1, DO_MAX)
|
|
|
|
GEN_VEXT_RED(vredmax_vs_h, int16_t, int16_t, H2, H2, DO_MAX)
|
|
|
|
GEN_VEXT_RED(vredmax_vs_w, int32_t, int32_t, H4, H4, DO_MAX)
|
|
|
|
GEN_VEXT_RED(vredmax_vs_d, int64_t, int64_t, H8, H8, DO_MAX)
|
2020-07-01 17:25:33 +02:00
|
|
|
|
|
|
|
/* vd[0] = minu(vs1[0], vs2[*]) */
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_RED(vredminu_vs_b, uint8_t, uint8_t, H1, H1, DO_MIN)
|
|
|
|
GEN_VEXT_RED(vredminu_vs_h, uint16_t, uint16_t, H2, H2, DO_MIN)
|
|
|
|
GEN_VEXT_RED(vredminu_vs_w, uint32_t, uint32_t, H4, H4, DO_MIN)
|
|
|
|
GEN_VEXT_RED(vredminu_vs_d, uint64_t, uint64_t, H8, H8, DO_MIN)
|
2020-07-01 17:25:33 +02:00
|
|
|
|
|
|
|
/* vd[0] = min(vs1[0], vs2[*]) */
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_RED(vredmin_vs_b, int8_t, int8_t, H1, H1, DO_MIN)
|
|
|
|
GEN_VEXT_RED(vredmin_vs_h, int16_t, int16_t, H2, H2, DO_MIN)
|
|
|
|
GEN_VEXT_RED(vredmin_vs_w, int32_t, int32_t, H4, H4, DO_MIN)
|
|
|
|
GEN_VEXT_RED(vredmin_vs_d, int64_t, int64_t, H8, H8, DO_MIN)
|
2020-07-01 17:25:33 +02:00
|
|
|
|
|
|
|
/* vd[0] = and(vs1[0], vs2[*]) */
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_RED(vredand_vs_b, int8_t, int8_t, H1, H1, DO_AND)
|
|
|
|
GEN_VEXT_RED(vredand_vs_h, int16_t, int16_t, H2, H2, DO_AND)
|
|
|
|
GEN_VEXT_RED(vredand_vs_w, int32_t, int32_t, H4, H4, DO_AND)
|
|
|
|
GEN_VEXT_RED(vredand_vs_d, int64_t, int64_t, H8, H8, DO_AND)
|
2020-07-01 17:25:33 +02:00
|
|
|
|
|
|
|
/* vd[0] = or(vs1[0], vs2[*]) */
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_RED(vredor_vs_b, int8_t, int8_t, H1, H1, DO_OR)
|
|
|
|
GEN_VEXT_RED(vredor_vs_h, int16_t, int16_t, H2, H2, DO_OR)
|
|
|
|
GEN_VEXT_RED(vredor_vs_w, int32_t, int32_t, H4, H4, DO_OR)
|
|
|
|
GEN_VEXT_RED(vredor_vs_d, int64_t, int64_t, H8, H8, DO_OR)
|
2020-07-01 17:25:33 +02:00
|
|
|
|
|
|
|
/* vd[0] = xor(vs1[0], vs2[*]) */
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_RED(vredxor_vs_b, int8_t, int8_t, H1, H1, DO_XOR)
|
|
|
|
GEN_VEXT_RED(vredxor_vs_h, int16_t, int16_t, H2, H2, DO_XOR)
|
|
|
|
GEN_VEXT_RED(vredxor_vs_w, int32_t, int32_t, H4, H4, DO_XOR)
|
|
|
|
GEN_VEXT_RED(vredxor_vs_d, int64_t, int64_t, H8, H8, DO_XOR)
|
2020-07-01 17:25:34 +02:00
|
|
|
|
|
|
|
/* Vector Widening Integer Reduction Instructions */
|
|
|
|
/* signed sum reduction into double-width accumulator */
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_RED(vwredsum_vs_b, int16_t, int8_t, H2, H1, DO_ADD)
|
|
|
|
GEN_VEXT_RED(vwredsum_vs_h, int32_t, int16_t, H4, H2, DO_ADD)
|
|
|
|
GEN_VEXT_RED(vwredsum_vs_w, int64_t, int32_t, H8, H4, DO_ADD)
|
2020-07-01 17:25:34 +02:00
|
|
|
|
|
|
|
/* Unsigned sum reduction into double-width accumulator */
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_RED(vwredsumu_vs_b, uint16_t, uint8_t, H2, H1, DO_ADD)
|
|
|
|
GEN_VEXT_RED(vwredsumu_vs_h, uint32_t, uint16_t, H4, H2, DO_ADD)
|
|
|
|
GEN_VEXT_RED(vwredsumu_vs_w, uint64_t, uint32_t, H8, H4, DO_ADD)
|
2020-07-01 17:25:35 +02:00
|
|
|
|
|
|
|
/* Vector Single-Width Floating-Point Reduction Instructions */
|
2021-12-10 08:56:00 +01:00
|
|
|
#define GEN_VEXT_FRED(NAME, TD, TS2, HD, HS2, OP) \
|
2020-07-01 17:25:35 +02:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, void *vs1, \
|
|
|
|
void *vs2, CPURISCVState *env, \
|
|
|
|
uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t vm = vext_vm(desc); \
|
|
|
|
uint32_t vl = env->vl; \
|
2022-06-06 08:16:16 +02:00
|
|
|
uint32_t esz = sizeof(TD); \
|
|
|
|
uint32_t vlenb = simd_maxsz(desc); \
|
|
|
|
uint32_t vta = vext_vta(desc); \
|
2020-07-01 17:25:35 +02:00
|
|
|
uint32_t i; \
|
|
|
|
TD s1 = *((TD *)vs1 + HD(0)); \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2020-07-01 17:25:35 +02:00
|
|
|
TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
|
2021-12-10 08:55:58 +01:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) { \
|
2020-07-01 17:25:35 +02:00
|
|
|
continue; \
|
|
|
|
} \
|
|
|
|
s1 = OP(s1, (TD)s2, &env->fp_status); \
|
|
|
|
} \
|
|
|
|
*((TD *)vd + HD(0)) = s1; \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2022-06-06 08:16:16 +02:00
|
|
|
/* set tail elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vta, esz, vlenb); \
|
2020-07-01 17:25:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Unordered sum */
|
2022-08-17 09:48:02 +02:00
|
|
|
GEN_VEXT_FRED(vfredusum_vs_h, uint16_t, uint16_t, H2, H2, float16_add)
|
|
|
|
GEN_VEXT_FRED(vfredusum_vs_w, uint32_t, uint32_t, H4, H4, float32_add)
|
|
|
|
GEN_VEXT_FRED(vfredusum_vs_d, uint64_t, uint64_t, H8, H8, float64_add)
|
|
|
|
|
|
|
|
/* Ordered sum */
|
|
|
|
GEN_VEXT_FRED(vfredosum_vs_h, uint16_t, uint16_t, H2, H2, float16_add)
|
|
|
|
GEN_VEXT_FRED(vfredosum_vs_w, uint32_t, uint32_t, H4, H4, float32_add)
|
|
|
|
GEN_VEXT_FRED(vfredosum_vs_d, uint64_t, uint64_t, H8, H8, float64_add)
|
2020-07-01 17:25:35 +02:00
|
|
|
|
|
|
|
/* Maximum value */
|
2023-04-05 10:58:13 +02:00
|
|
|
GEN_VEXT_FRED(vfredmax_vs_h, uint16_t, uint16_t, H2, H2,
|
|
|
|
float16_maximum_number)
|
|
|
|
GEN_VEXT_FRED(vfredmax_vs_w, uint32_t, uint32_t, H4, H4,
|
|
|
|
float32_maximum_number)
|
|
|
|
GEN_VEXT_FRED(vfredmax_vs_d, uint64_t, uint64_t, H8, H8,
|
|
|
|
float64_maximum_number)
|
2020-07-01 17:25:35 +02:00
|
|
|
|
|
|
|
/* Minimum value */
|
2023-04-05 10:58:13 +02:00
|
|
|
GEN_VEXT_FRED(vfredmin_vs_h, uint16_t, uint16_t, H2, H2,
|
|
|
|
float16_minimum_number)
|
|
|
|
GEN_VEXT_FRED(vfredmin_vs_w, uint32_t, uint32_t, H4, H4,
|
|
|
|
float32_minimum_number)
|
|
|
|
GEN_VEXT_FRED(vfredmin_vs_d, uint64_t, uint64_t, H8, H8,
|
|
|
|
float64_minimum_number)
|
2020-07-01 17:25:36 +02:00
|
|
|
|
2022-08-17 09:48:01 +02:00
|
|
|
/* Vector Widening Floating-Point Add Instructions */
|
|
|
|
static uint32_t fwadd16(uint32_t a, uint16_t b, float_status *s)
|
2020-07-01 17:25:36 +02:00
|
|
|
{
|
2022-08-17 09:48:01 +02:00
|
|
|
return float32_add(a, float16_to_float32(b, true, s), s);
|
2020-07-01 17:25:36 +02:00
|
|
|
}
|
|
|
|
|
2022-08-17 09:48:01 +02:00
|
|
|
static uint64_t fwadd32(uint64_t a, uint32_t b, float_status *s)
|
2020-07-01 17:25:36 +02:00
|
|
|
{
|
2022-08-17 09:48:01 +02:00
|
|
|
return float64_add(a, float32_to_float64(b, s), s);
|
2020-07-01 17:25:36 +02:00
|
|
|
}
|
2020-07-01 17:25:37 +02:00
|
|
|
|
2022-08-17 09:48:01 +02:00
|
|
|
/* Vector Widening Floating-Point Reduction Instructions */
|
2022-08-17 09:48:02 +02:00
|
|
|
/* Ordered/unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */
|
|
|
|
GEN_VEXT_FRED(vfwredusum_vs_h, uint32_t, uint16_t, H4, H2, fwadd16)
|
|
|
|
GEN_VEXT_FRED(vfwredusum_vs_w, uint64_t, uint32_t, H8, H4, fwadd32)
|
|
|
|
GEN_VEXT_FRED(vfwredosum_vs_h, uint32_t, uint16_t, H4, H2, fwadd16)
|
|
|
|
GEN_VEXT_FRED(vfwredosum_vs_w, uint64_t, uint32_t, H8, H4, fwadd32)
|
2022-08-17 09:48:01 +02:00
|
|
|
|
2020-07-01 17:25:37 +02:00
|
|
|
/*
|
2023-04-05 10:58:12 +02:00
|
|
|
* Vector Mask Operations
|
2020-07-01 17:25:37 +02:00
|
|
|
*/
|
|
|
|
/* Vector Mask-Register Logical Instructions */
|
|
|
|
#define GEN_VEXT_MASK_VV(NAME, OP) \
|
|
|
|
void HELPER(NAME)(void *vd, void *v0, void *vs1, \
|
|
|
|
void *vs2, CPURISCVState *env, \
|
|
|
|
uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t vl = env->vl; \
|
2023-02-26 18:05:14 +01:00
|
|
|
uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \
|
2022-06-06 08:16:35 +02:00
|
|
|
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
|
2020-07-01 17:25:37 +02:00
|
|
|
uint32_t i; \
|
|
|
|
int a, b; \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2021-12-10 08:55:58 +01:00
|
|
|
a = vext_elem_mask(vs1, i); \
|
|
|
|
b = vext_elem_mask(vs2, i); \
|
|
|
|
vext_set_elem_mask(vd, i, OP(b, a)); \
|
2020-07-01 17:25:37 +02:00
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2023-04-05 10:58:12 +02:00
|
|
|
/*
|
|
|
|
* mask destination register are always tail-agnostic
|
|
|
|
* set tail elements to 1s
|
2022-06-06 08:16:35 +02:00
|
|
|
*/ \
|
|
|
|
if (vta_all_1s) { \
|
|
|
|
for (; i < total_elems; i++) { \
|
|
|
|
vext_set_elem_mask(vd, i, 1); \
|
|
|
|
} \
|
|
|
|
} \
|
2020-07-01 17:25:37 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#define DO_NAND(N, M) (!(N & M))
|
|
|
|
#define DO_ANDNOT(N, M) (N & !M)
|
|
|
|
#define DO_NOR(N, M) (!(N | M))
|
|
|
|
#define DO_ORNOT(N, M) (N | !M)
|
|
|
|
#define DO_XNOR(N, M) (!(N ^ M))
|
|
|
|
|
|
|
|
GEN_VEXT_MASK_VV(vmand_mm, DO_AND)
|
|
|
|
GEN_VEXT_MASK_VV(vmnand_mm, DO_NAND)
|
2021-12-10 08:57:01 +01:00
|
|
|
GEN_VEXT_MASK_VV(vmandn_mm, DO_ANDNOT)
|
2020-07-01 17:25:37 +02:00
|
|
|
GEN_VEXT_MASK_VV(vmxor_mm, DO_XOR)
|
|
|
|
GEN_VEXT_MASK_VV(vmor_mm, DO_OR)
|
|
|
|
GEN_VEXT_MASK_VV(vmnor_mm, DO_NOR)
|
2021-12-10 08:57:01 +01:00
|
|
|
GEN_VEXT_MASK_VV(vmorn_mm, DO_ORNOT)
|
2020-07-01 17:25:37 +02:00
|
|
|
GEN_VEXT_MASK_VV(vmxnor_mm, DO_XNOR)
|
2020-07-01 17:25:38 +02:00
|
|
|
|
2021-12-10 08:56:15 +01:00
|
|
|
/* Vector count population in mask vcpop */
|
|
|
|
target_ulong HELPER(vcpop_m)(void *v0, void *vs2, CPURISCVState *env,
|
|
|
|
uint32_t desc)
|
2020-07-01 17:25:38 +02:00
|
|
|
{
|
|
|
|
target_ulong cnt = 0;
|
|
|
|
uint32_t vm = vext_vm(desc);
|
|
|
|
uint32_t vl = env->vl;
|
|
|
|
int i;
|
|
|
|
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) {
|
2021-12-10 08:55:58 +01:00
|
|
|
if (vm || vext_elem_mask(v0, i)) {
|
|
|
|
if (vext_elem_mask(vs2, i)) {
|
2020-07-01 17:25:38 +02:00
|
|
|
cnt++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0;
|
2020-07-01 17:25:38 +02:00
|
|
|
return cnt;
|
|
|
|
}
|
2020-07-01 17:25:39 +02:00
|
|
|
|
2023-04-05 10:58:12 +02:00
|
|
|
/* vfirst find-first-set mask bit */
|
2021-12-10 08:56:16 +01:00
|
|
|
target_ulong HELPER(vfirst_m)(void *v0, void *vs2, CPURISCVState *env,
|
|
|
|
uint32_t desc)
|
2020-07-01 17:25:39 +02:00
|
|
|
{
|
|
|
|
uint32_t vm = vext_vm(desc);
|
|
|
|
uint32_t vl = env->vl;
|
|
|
|
int i;
|
|
|
|
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) {
|
2021-12-10 08:55:58 +01:00
|
|
|
if (vm || vext_elem_mask(v0, i)) {
|
|
|
|
if (vext_elem_mask(vs2, i)) {
|
2020-07-01 17:25:39 +02:00
|
|
|
return i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0;
|
2020-07-01 17:25:39 +02:00
|
|
|
return -1LL;
|
|
|
|
}
|
2020-07-01 17:25:40 +02:00
|
|
|
|
|
|
|
enum set_mask_type {
|
|
|
|
ONLY_FIRST = 1,
|
|
|
|
INCLUDE_FIRST,
|
|
|
|
BEFORE_FIRST,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env,
|
|
|
|
uint32_t desc, enum set_mask_type type)
|
|
|
|
{
|
|
|
|
uint32_t vm = vext_vm(desc);
|
|
|
|
uint32_t vl = env->vl;
|
2023-02-26 18:05:14 +01:00
|
|
|
uint32_t total_elems = riscv_cpu_cfg(env)->vlen;
|
2022-06-06 08:16:35 +02:00
|
|
|
uint32_t vta_all_1s = vext_vta_all_1s(desc);
|
2022-06-20 08:51:11 +02:00
|
|
|
uint32_t vma = vext_vma(desc);
|
2020-07-01 17:25:40 +02:00
|
|
|
int i;
|
|
|
|
bool first_mask_bit = false;
|
|
|
|
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) {
|
2021-12-10 08:55:58 +01:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) {
|
2022-06-20 08:51:11 +02:00
|
|
|
/* set masked-off elements to 1s */
|
|
|
|
if (vma) {
|
|
|
|
vext_set_elem_mask(vd, i, 1);
|
|
|
|
}
|
2020-07-01 17:25:40 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* write a zero to all following active elements */
|
|
|
|
if (first_mask_bit) {
|
2021-12-10 08:55:58 +01:00
|
|
|
vext_set_elem_mask(vd, i, 0);
|
2020-07-01 17:25:40 +02:00
|
|
|
continue;
|
|
|
|
}
|
2021-12-10 08:55:58 +01:00
|
|
|
if (vext_elem_mask(vs2, i)) {
|
2020-07-01 17:25:40 +02:00
|
|
|
first_mask_bit = true;
|
|
|
|
if (type == BEFORE_FIRST) {
|
2021-12-10 08:55:58 +01:00
|
|
|
vext_set_elem_mask(vd, i, 0);
|
2020-07-01 17:25:40 +02:00
|
|
|
} else {
|
2021-12-10 08:55:58 +01:00
|
|
|
vext_set_elem_mask(vd, i, 1);
|
2020-07-01 17:25:40 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (type == ONLY_FIRST) {
|
2021-12-10 08:55:58 +01:00
|
|
|
vext_set_elem_mask(vd, i, 0);
|
2020-07-01 17:25:40 +02:00
|
|
|
} else {
|
2021-12-10 08:55:58 +01:00
|
|
|
vext_set_elem_mask(vd, i, 1);
|
2020-07-01 17:25:40 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0;
|
2023-04-05 10:58:12 +02:00
|
|
|
/*
|
|
|
|
* mask destination register are always tail-agnostic
|
|
|
|
* set tail elements to 1s
|
|
|
|
*/
|
2022-06-06 08:16:35 +02:00
|
|
|
if (vta_all_1s) {
|
|
|
|
for (; i < total_elems; i++) {
|
|
|
|
vext_set_elem_mask(vd, i, 1);
|
|
|
|
}
|
|
|
|
}
|
2020-07-01 17:25:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void HELPER(vmsbf_m)(void *vd, void *v0, void *vs2, CPURISCVState *env,
|
|
|
|
uint32_t desc)
|
|
|
|
{
|
|
|
|
vmsetm(vd, v0, vs2, env, desc, BEFORE_FIRST);
|
|
|
|
}
|
|
|
|
|
|
|
|
void HELPER(vmsif_m)(void *vd, void *v0, void *vs2, CPURISCVState *env,
|
|
|
|
uint32_t desc)
|
|
|
|
{
|
|
|
|
vmsetm(vd, v0, vs2, env, desc, INCLUDE_FIRST);
|
|
|
|
}
|
|
|
|
|
|
|
|
void HELPER(vmsof_m)(void *vd, void *v0, void *vs2, CPURISCVState *env,
|
|
|
|
uint32_t desc)
|
|
|
|
{
|
|
|
|
vmsetm(vd, v0, vs2, env, desc, ONLY_FIRST);
|
|
|
|
}
|
2020-07-01 17:25:41 +02:00
|
|
|
|
|
|
|
/* Vector Iota Instruction */
|
2021-12-10 08:56:00 +01:00
|
|
|
#define GEN_VEXT_VIOTA_M(NAME, ETYPE, H) \
|
2020-07-01 17:25:41 +02:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \
|
|
|
|
uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t vm = vext_vm(desc); \
|
|
|
|
uint32_t vl = env->vl; \
|
2022-06-06 08:16:35 +02:00
|
|
|
uint32_t esz = sizeof(ETYPE); \
|
|
|
|
uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
|
|
|
|
uint32_t vta = vext_vta(desc); \
|
2022-06-20 08:51:11 +02:00
|
|
|
uint32_t vma = vext_vma(desc); \
|
2020-07-01 17:25:41 +02:00
|
|
|
uint32_t sum = 0; \
|
|
|
|
int i; \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2021-12-10 08:55:58 +01:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) { \
|
2022-06-20 08:51:11 +02:00
|
|
|
/* set masked-off elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
|
2020-07-01 17:25:41 +02:00
|
|
|
continue; \
|
|
|
|
} \
|
|
|
|
*((ETYPE *)vd + H(i)) = sum; \
|
2021-12-10 08:55:58 +01:00
|
|
|
if (vext_elem_mask(vs2, i)) { \
|
2020-07-01 17:25:41 +02:00
|
|
|
sum++; \
|
|
|
|
} \
|
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2022-06-06 08:16:35 +02:00
|
|
|
/* set tail elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
|
2020-07-01 17:25:41 +02:00
|
|
|
}
|
|
|
|
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_VIOTA_M(viota_m_b, uint8_t, H1)
|
|
|
|
GEN_VEXT_VIOTA_M(viota_m_h, uint16_t, H2)
|
|
|
|
GEN_VEXT_VIOTA_M(viota_m_w, uint32_t, H4)
|
|
|
|
GEN_VEXT_VIOTA_M(viota_m_d, uint64_t, H8)
|
2020-07-01 17:25:42 +02:00
|
|
|
|
|
|
|
/* Vector Element Index Instruction */
|
2021-12-10 08:56:00 +01:00
|
|
|
#define GEN_VEXT_VID_V(NAME, ETYPE, H) \
|
2020-07-01 17:25:42 +02:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t vm = vext_vm(desc); \
|
|
|
|
uint32_t vl = env->vl; \
|
2022-06-06 08:16:35 +02:00
|
|
|
uint32_t esz = sizeof(ETYPE); \
|
|
|
|
uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
|
|
|
|
uint32_t vta = vext_vta(desc); \
|
2022-06-20 08:51:11 +02:00
|
|
|
uint32_t vma = vext_vma(desc); \
|
2020-07-01 17:25:42 +02:00
|
|
|
int i; \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2021-12-10 08:55:58 +01:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) { \
|
2022-06-20 08:51:11 +02:00
|
|
|
/* set masked-off elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
|
2020-07-01 17:25:42 +02:00
|
|
|
continue; \
|
|
|
|
} \
|
|
|
|
*((ETYPE *)vd + H(i)) = i; \
|
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2022-06-06 08:16:35 +02:00
|
|
|
/* set tail elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
|
2020-07-01 17:25:42 +02:00
|
|
|
}
|
|
|
|
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_VID_V(vid_v_b, uint8_t, H1)
|
|
|
|
GEN_VEXT_VID_V(vid_v_h, uint16_t, H2)
|
|
|
|
GEN_VEXT_VID_V(vid_v_w, uint32_t, H4)
|
|
|
|
GEN_VEXT_VID_V(vid_v_d, uint64_t, H8)
|
2020-07-01 17:25:46 +02:00
|
|
|
|
|
|
|
/*
|
2023-04-05 10:58:12 +02:00
|
|
|
* Vector Permutation Instructions
|
2020-07-01 17:25:46 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Vector Slide Instructions */
|
2021-12-10 08:56:00 +01:00
|
|
|
#define GEN_VEXT_VSLIDEUP_VX(NAME, ETYPE, H) \
|
2020-07-01 17:25:46 +02:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t vm = vext_vm(desc); \
|
|
|
|
uint32_t vl = env->vl; \
|
2022-06-06 08:16:56 +02:00
|
|
|
uint32_t esz = sizeof(ETYPE); \
|
|
|
|
uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
|
|
|
|
uint32_t vta = vext_vta(desc); \
|
2022-06-20 08:51:12 +02:00
|
|
|
uint32_t vma = vext_vma(desc); \
|
2021-12-10 08:56:52 +01:00
|
|
|
target_ulong offset = s1, i_min, i; \
|
2020-07-01 17:25:46 +02:00
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
i_min = MAX(env->vstart, offset); \
|
|
|
|
for (i = i_min; i < vl; i++) { \
|
2021-12-10 08:55:58 +01:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) { \
|
2022-06-20 08:51:12 +02:00
|
|
|
/* set masked-off elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
|
2020-07-01 17:25:46 +02:00
|
|
|
continue; \
|
|
|
|
} \
|
|
|
|
*((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - offset)); \
|
|
|
|
} \
|
2022-06-06 08:16:56 +02:00
|
|
|
/* set tail elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
|
2020-07-01 17:25:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* vslideup.vx vd, vs2, rs1, vm # vd[i+rs1] = vs2[i] */
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_VSLIDEUP_VX(vslideup_vx_b, uint8_t, H1)
|
|
|
|
GEN_VEXT_VSLIDEUP_VX(vslideup_vx_h, uint16_t, H2)
|
|
|
|
GEN_VEXT_VSLIDEUP_VX(vslideup_vx_w, uint32_t, H4)
|
|
|
|
GEN_VEXT_VSLIDEUP_VX(vslideup_vx_d, uint64_t, H8)
|
2020-07-01 17:25:46 +02:00
|
|
|
|
2021-12-10 08:56:00 +01:00
|
|
|
#define GEN_VEXT_VSLIDEDOWN_VX(NAME, ETYPE, H) \
|
2020-07-01 17:25:46 +02:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
2021-12-10 08:56:36 +01:00
|
|
|
uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE))); \
|
2020-07-01 17:25:46 +02:00
|
|
|
uint32_t vm = vext_vm(desc); \
|
|
|
|
uint32_t vl = env->vl; \
|
2022-06-06 08:16:56 +02:00
|
|
|
uint32_t esz = sizeof(ETYPE); \
|
|
|
|
uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
|
|
|
|
uint32_t vta = vext_vta(desc); \
|
2022-06-20 08:51:12 +02:00
|
|
|
uint32_t vma = vext_vma(desc); \
|
2023-09-25 06:30:22 +02:00
|
|
|
target_ulong i_max, i_min, i; \
|
2020-07-01 17:25:46 +02:00
|
|
|
\
|
2023-09-25 06:30:22 +02:00
|
|
|
i_min = MIN(s1 < vlmax ? vlmax - s1 : 0, vl); \
|
|
|
|
i_max = MAX(i_min, env->vstart); \
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < i_max; ++i) { \
|
2022-06-20 08:51:12 +02:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) { \
|
|
|
|
/* set masked-off elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
|
|
|
|
continue; \
|
2021-12-10 08:56:36 +01:00
|
|
|
} \
|
2022-06-20 08:51:12 +02:00
|
|
|
*((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + s1)); \
|
2021-12-10 08:56:36 +01:00
|
|
|
} \
|
|
|
|
\
|
|
|
|
for (i = i_max; i < vl; ++i) { \
|
|
|
|
if (vm || vext_elem_mask(v0, i)) { \
|
|
|
|
*((ETYPE *)vd + H(i)) = 0; \
|
2020-07-01 17:25:46 +02:00
|
|
|
} \
|
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
\
|
|
|
|
env->vstart = 0; \
|
2022-06-06 08:16:56 +02:00
|
|
|
/* set tail elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
|
2020-07-01 17:25:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* vslidedown.vx vd, vs2, rs1, vm # vd[i] = vs2[i+rs1] */
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_b, uint8_t, H1)
|
|
|
|
GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_h, uint16_t, H2)
|
|
|
|
GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_w, uint32_t, H4)
|
|
|
|
GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_d, uint64_t, H8)
|
2020-07-01 17:25:46 +02:00
|
|
|
|
2022-06-06 08:16:16 +02:00
|
|
|
#define GEN_VEXT_VSLIE1UP(BITWIDTH, H) \
|
2023-02-13 10:45:50 +01:00
|
|
|
static void vslide1up_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
|
2023-04-05 10:58:11 +02:00
|
|
|
void *vs2, CPURISCVState *env, \
|
|
|
|
uint32_t desc) \
|
2021-12-10 08:56:37 +01:00
|
|
|
{ \
|
2022-06-06 08:16:16 +02:00
|
|
|
typedef uint##BITWIDTH##_t ETYPE; \
|
2021-12-10 08:56:37 +01:00
|
|
|
uint32_t vm = vext_vm(desc); \
|
|
|
|
uint32_t vl = env->vl; \
|
2022-06-06 08:16:56 +02:00
|
|
|
uint32_t esz = sizeof(ETYPE); \
|
|
|
|
uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
|
|
|
|
uint32_t vta = vext_vta(desc); \
|
2022-06-20 08:51:12 +02:00
|
|
|
uint32_t vma = vext_vma(desc); \
|
2021-12-10 08:56:37 +01:00
|
|
|
uint32_t i; \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2021-12-10 08:56:37 +01:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) { \
|
2022-06-20 08:51:12 +02:00
|
|
|
/* set masked-off elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
|
2021-12-10 08:56:37 +01:00
|
|
|
continue; \
|
|
|
|
} \
|
|
|
|
if (i == 0) { \
|
|
|
|
*((ETYPE *)vd + H(i)) = s1; \
|
|
|
|
} else { \
|
|
|
|
*((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - 1)); \
|
|
|
|
} \
|
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2022-06-06 08:16:56 +02:00
|
|
|
/* set tail elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
|
2021-12-10 08:56:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
GEN_VEXT_VSLIE1UP(8, H1)
|
|
|
|
GEN_VEXT_VSLIE1UP(16, H2)
|
|
|
|
GEN_VEXT_VSLIE1UP(32, H4)
|
|
|
|
GEN_VEXT_VSLIE1UP(64, H8)
|
|
|
|
|
2022-06-06 08:16:16 +02:00
|
|
|
#define GEN_VEXT_VSLIDE1UP_VX(NAME, BITWIDTH) \
|
2021-12-10 08:56:37 +01:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
2022-06-06 08:16:16 +02:00
|
|
|
vslide1up_##BITWIDTH(vd, v0, s1, vs2, env, desc); \
|
2020-07-01 17:25:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* vslide1up.vx vd, vs2, rs1, vm # vd[0]=x[rs1], vd[i+1] = vs2[i] */
|
2021-12-10 08:56:37 +01:00
|
|
|
GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_b, 8)
|
|
|
|
GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_h, 16)
|
|
|
|
GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_w, 32)
|
|
|
|
GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, 64)
|
|
|
|
|
2022-06-06 08:16:16 +02:00
|
|
|
#define GEN_VEXT_VSLIDE1DOWN(BITWIDTH, H) \
|
2023-02-13 10:45:50 +01:00
|
|
|
static void vslide1down_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
|
2023-04-05 10:58:11 +02:00
|
|
|
void *vs2, CPURISCVState *env, \
|
|
|
|
uint32_t desc) \
|
2021-12-10 08:56:37 +01:00
|
|
|
{ \
|
2022-06-06 08:16:16 +02:00
|
|
|
typedef uint##BITWIDTH##_t ETYPE; \
|
2021-12-10 08:56:37 +01:00
|
|
|
uint32_t vm = vext_vm(desc); \
|
|
|
|
uint32_t vl = env->vl; \
|
2022-06-06 08:16:56 +02:00
|
|
|
uint32_t esz = sizeof(ETYPE); \
|
|
|
|
uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
|
|
|
|
uint32_t vta = vext_vta(desc); \
|
2022-06-20 08:51:12 +02:00
|
|
|
uint32_t vma = vext_vma(desc); \
|
2021-12-10 08:56:37 +01:00
|
|
|
uint32_t i; \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2021-12-10 08:56:37 +01:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) { \
|
2022-06-20 08:51:12 +02:00
|
|
|
/* set masked-off elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
|
2021-12-10 08:56:37 +01:00
|
|
|
continue; \
|
|
|
|
} \
|
|
|
|
if (i == vl - 1) { \
|
|
|
|
*((ETYPE *)vd + H(i)) = s1; \
|
|
|
|
} else { \
|
|
|
|
*((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + 1)); \
|
|
|
|
} \
|
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2022-06-06 08:16:56 +02:00
|
|
|
/* set tail elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
|
2021-12-10 08:56:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
GEN_VEXT_VSLIDE1DOWN(8, H1)
|
|
|
|
GEN_VEXT_VSLIDE1DOWN(16, H2)
|
|
|
|
GEN_VEXT_VSLIDE1DOWN(32, H4)
|
|
|
|
GEN_VEXT_VSLIDE1DOWN(64, H8)
|
|
|
|
|
2022-06-06 08:16:16 +02:00
|
|
|
#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, BITWIDTH) \
|
2021-12-10 08:56:37 +01:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
2022-06-06 08:16:16 +02:00
|
|
|
vslide1down_##BITWIDTH(vd, v0, s1, vs2, env, desc); \
|
2020-07-01 17:25:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* vslide1down.vx vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=x[rs1] */
|
2021-12-10 08:56:37 +01:00
|
|
|
GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_b, 8)
|
|
|
|
GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_h, 16)
|
|
|
|
GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_w, 32)
|
|
|
|
GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, 64)
|
|
|
|
|
|
|
|
/* Vector Floating-Point Slide Instructions */
|
2022-06-06 08:16:16 +02:00
|
|
|
#define GEN_VEXT_VFSLIDE1UP_VF(NAME, BITWIDTH) \
|
2021-12-10 08:56:37 +01:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
2022-06-06 08:16:16 +02:00
|
|
|
vslide1up_##BITWIDTH(vd, v0, s1, vs2, env, desc); \
|
2021-12-10 08:56:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* vfslide1up.vf vd, vs2, rs1, vm # vd[0]=f[rs1], vd[i+1] = vs2[i] */
|
|
|
|
GEN_VEXT_VFSLIDE1UP_VF(vfslide1up_vf_h, 16)
|
|
|
|
GEN_VEXT_VFSLIDE1UP_VF(vfslide1up_vf_w, 32)
|
|
|
|
GEN_VEXT_VFSLIDE1UP_VF(vfslide1up_vf_d, 64)
|
|
|
|
|
2022-06-06 08:16:16 +02:00
|
|
|
#define GEN_VEXT_VFSLIDE1DOWN_VF(NAME, BITWIDTH) \
|
2021-12-10 08:56:37 +01:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
2022-06-06 08:16:16 +02:00
|
|
|
vslide1down_##BITWIDTH(vd, v0, s1, vs2, env, desc); \
|
2021-12-10 08:56:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* vfslide1down.vf vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=f[rs1] */
|
|
|
|
GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_h, 16)
|
|
|
|
GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_w, 32)
|
|
|
|
GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_d, 64)
|
2020-07-01 17:25:47 +02:00
|
|
|
|
|
|
|
/* Vector Register Gather Instruction */
|
2021-12-10 08:56:21 +01:00
|
|
|
#define GEN_VEXT_VRGATHER_VV(NAME, TS1, TS2, HS1, HS2) \
|
2020-07-01 17:25:47 +02:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
2021-12-10 08:56:52 +01:00
|
|
|
uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(TS2))); \
|
2020-07-01 17:25:47 +02:00
|
|
|
uint32_t vm = vext_vm(desc); \
|
|
|
|
uint32_t vl = env->vl; \
|
2022-06-06 08:16:56 +02:00
|
|
|
uint32_t esz = sizeof(TS2); \
|
|
|
|
uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
|
|
|
|
uint32_t vta = vext_vta(desc); \
|
2022-06-20 08:51:12 +02:00
|
|
|
uint32_t vma = vext_vma(desc); \
|
2021-04-19 08:03:01 +02:00
|
|
|
uint64_t index; \
|
|
|
|
uint32_t i; \
|
2020-07-01 17:25:47 +02:00
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2021-12-10 08:55:58 +01:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) { \
|
2022-06-20 08:51:12 +02:00
|
|
|
/* set masked-off elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
|
2020-07-01 17:25:47 +02:00
|
|
|
continue; \
|
|
|
|
} \
|
2021-12-10 08:56:21 +01:00
|
|
|
index = *((TS1 *)vs1 + HS1(i)); \
|
2020-07-01 17:25:47 +02:00
|
|
|
if (index >= vlmax) { \
|
2021-12-10 08:56:21 +01:00
|
|
|
*((TS2 *)vd + HS2(i)) = 0; \
|
2020-07-01 17:25:47 +02:00
|
|
|
} else { \
|
2021-12-10 08:56:21 +01:00
|
|
|
*((TS2 *)vd + HS2(i)) = *((TS2 *)vs2 + HS2(index)); \
|
2020-07-01 17:25:47 +02:00
|
|
|
} \
|
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2022-06-06 08:16:56 +02:00
|
|
|
/* set tail elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
|
2020-07-01 17:25:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]]; */
|
2021-12-10 08:56:21 +01:00
|
|
|
GEN_VEXT_VRGATHER_VV(vrgather_vv_b, uint8_t, uint8_t, H1, H1)
|
|
|
|
GEN_VEXT_VRGATHER_VV(vrgather_vv_h, uint16_t, uint16_t, H2, H2)
|
|
|
|
GEN_VEXT_VRGATHER_VV(vrgather_vv_w, uint32_t, uint32_t, H4, H4)
|
|
|
|
GEN_VEXT_VRGATHER_VV(vrgather_vv_d, uint64_t, uint64_t, H8, H8)
|
|
|
|
|
|
|
|
GEN_VEXT_VRGATHER_VV(vrgatherei16_vv_b, uint16_t, uint8_t, H2, H1)
|
|
|
|
GEN_VEXT_VRGATHER_VV(vrgatherei16_vv_h, uint16_t, uint16_t, H2, H2)
|
|
|
|
GEN_VEXT_VRGATHER_VV(vrgatherei16_vv_w, uint16_t, uint32_t, H2, H4)
|
|
|
|
GEN_VEXT_VRGATHER_VV(vrgatherei16_vv_d, uint16_t, uint64_t, H2, H8)
|
2020-07-01 17:25:47 +02:00
|
|
|
|
2021-12-10 08:56:00 +01:00
|
|
|
#define GEN_VEXT_VRGATHER_VX(NAME, ETYPE, H) \
|
2020-07-01 17:25:47 +02:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
2021-12-10 08:56:11 +01:00
|
|
|
uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE))); \
|
2020-07-01 17:25:47 +02:00
|
|
|
uint32_t vm = vext_vm(desc); \
|
|
|
|
uint32_t vl = env->vl; \
|
2022-06-06 08:16:56 +02:00
|
|
|
uint32_t esz = sizeof(ETYPE); \
|
|
|
|
uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
|
|
|
|
uint32_t vta = vext_vta(desc); \
|
2022-06-20 08:51:12 +02:00
|
|
|
uint32_t vma = vext_vma(desc); \
|
2021-04-19 08:03:01 +02:00
|
|
|
uint64_t index = s1; \
|
|
|
|
uint32_t i; \
|
2020-07-01 17:25:47 +02:00
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2021-12-10 08:55:58 +01:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) { \
|
2022-06-20 08:51:12 +02:00
|
|
|
/* set masked-off elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
|
2020-07-01 17:25:47 +02:00
|
|
|
continue; \
|
|
|
|
} \
|
|
|
|
if (index >= vlmax) { \
|
|
|
|
*((ETYPE *)vd + H(i)) = 0; \
|
|
|
|
} else { \
|
|
|
|
*((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(index)); \
|
|
|
|
} \
|
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2022-06-06 08:16:56 +02:00
|
|
|
/* set tail elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
|
2020-07-01 17:25:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_VRGATHER_VX(vrgather_vx_b, uint8_t, H1)
|
|
|
|
GEN_VEXT_VRGATHER_VX(vrgather_vx_h, uint16_t, H2)
|
|
|
|
GEN_VEXT_VRGATHER_VX(vrgather_vx_w, uint32_t, H4)
|
|
|
|
GEN_VEXT_VRGATHER_VX(vrgather_vx_d, uint64_t, H8)
|
2020-07-01 17:25:48 +02:00
|
|
|
|
|
|
|
/* Vector Compress Instruction */
|
2021-12-10 08:56:00 +01:00
|
|
|
#define GEN_VEXT_VCOMPRESS_VM(NAME, ETYPE, H) \
|
2020-07-01 17:25:48 +02:00
|
|
|
void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t vl = env->vl; \
|
2022-06-06 08:16:56 +02:00
|
|
|
uint32_t esz = sizeof(ETYPE); \
|
|
|
|
uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
|
|
|
|
uint32_t vta = vext_vta(desc); \
|
2020-07-01 17:25:48 +02:00
|
|
|
uint32_t num = 0, i; \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2021-12-10 08:55:58 +01:00
|
|
|
if (!vext_elem_mask(vs1, i)) { \
|
2020-07-01 17:25:48 +02:00
|
|
|
continue; \
|
|
|
|
} \
|
|
|
|
*((ETYPE *)vd + H(num)) = *((ETYPE *)vs2 + H(i)); \
|
|
|
|
num++; \
|
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2022-06-06 08:16:56 +02:00
|
|
|
/* set tail elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
|
2020-07-01 17:25:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Compress into vd elements of vs2 where vs1 is enabled */
|
2021-12-10 08:56:00 +01:00
|
|
|
GEN_VEXT_VCOMPRESS_VM(vcompress_vm_b, uint8_t, H1)
|
|
|
|
GEN_VEXT_VCOMPRESS_VM(vcompress_vm_h, uint16_t, H2)
|
|
|
|
GEN_VEXT_VCOMPRESS_VM(vcompress_vm_w, uint32_t, H4)
|
|
|
|
GEN_VEXT_VCOMPRESS_VM(vcompress_vm_d, uint64_t, H8)
|
2021-12-10 08:56:26 +01:00
|
|
|
|
2021-12-10 08:56:52 +01:00
|
|
|
/* Vector Whole Register Move */
|
2022-03-25 09:59:02 +01:00
|
|
|
void HELPER(vmvr_v)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
|
|
|
|
{
|
2022-03-30 04:13:16 +02:00
|
|
|
/* EEW = SEW */
|
2022-03-25 09:59:02 +01:00
|
|
|
uint32_t maxsz = simd_maxsz(desc);
|
2022-03-30 04:13:16 +02:00
|
|
|
uint32_t sewb = 1 << FIELD_EX64(env->vtype, VTYPE, VSEW);
|
|
|
|
uint32_t startb = env->vstart * sewb;
|
|
|
|
uint32_t i = startb;
|
2022-03-25 09:59:02 +01:00
|
|
|
|
|
|
|
memcpy((uint8_t *)vd + H1(i),
|
|
|
|
(uint8_t *)vs2 + H1(i),
|
2022-03-30 04:13:16 +02:00
|
|
|
maxsz - startb);
|
2021-12-10 08:56:52 +01:00
|
|
|
|
2022-03-25 09:59:02 +01:00
|
|
|
env->vstart = 0;
|
|
|
|
}
|
2021-12-10 08:56:52 +01:00
|
|
|
|
2021-12-10 08:56:26 +01:00
|
|
|
/* Vector Integer Extension */
|
|
|
|
#define GEN_VEXT_INT_EXT(NAME, ETYPE, DTYPE, HD, HS1) \
|
|
|
|
void HELPER(NAME)(void *vd, void *v0, void *vs2, \
|
|
|
|
CPURISCVState *env, uint32_t desc) \
|
|
|
|
{ \
|
|
|
|
uint32_t vl = env->vl; \
|
|
|
|
uint32_t vm = vext_vm(desc); \
|
2022-06-06 08:16:56 +02:00
|
|
|
uint32_t esz = sizeof(ETYPE); \
|
|
|
|
uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
|
|
|
|
uint32_t vta = vext_vta(desc); \
|
2022-06-20 08:51:12 +02:00
|
|
|
uint32_t vma = vext_vma(desc); \
|
2021-12-10 08:56:26 +01:00
|
|
|
uint32_t i; \
|
|
|
|
\
|
2021-12-10 08:56:52 +01:00
|
|
|
for (i = env->vstart; i < vl; i++) { \
|
2021-12-10 08:56:26 +01:00
|
|
|
if (!vm && !vext_elem_mask(v0, i)) { \
|
2022-06-20 08:51:12 +02:00
|
|
|
/* set masked-off elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
|
2021-12-10 08:56:26 +01:00
|
|
|
continue; \
|
|
|
|
} \
|
|
|
|
*((ETYPE *)vd + HD(i)) = *((DTYPE *)vs2 + HS1(i)); \
|
|
|
|
} \
|
2021-12-10 08:56:52 +01:00
|
|
|
env->vstart = 0; \
|
2022-06-06 08:16:56 +02:00
|
|
|
/* set tail elements to 1s */ \
|
|
|
|
vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
|
2021-12-10 08:56:26 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
GEN_VEXT_INT_EXT(vzext_vf2_h, uint16_t, uint8_t, H2, H1)
|
|
|
|
GEN_VEXT_INT_EXT(vzext_vf2_w, uint32_t, uint16_t, H4, H2)
|
|
|
|
GEN_VEXT_INT_EXT(vzext_vf2_d, uint64_t, uint32_t, H8, H4)
|
|
|
|
GEN_VEXT_INT_EXT(vzext_vf4_w, uint32_t, uint8_t, H4, H1)
|
|
|
|
GEN_VEXT_INT_EXT(vzext_vf4_d, uint64_t, uint16_t, H8, H2)
|
|
|
|
GEN_VEXT_INT_EXT(vzext_vf8_d, uint64_t, uint8_t, H8, H1)
|
|
|
|
|
|
|
|
GEN_VEXT_INT_EXT(vsext_vf2_h, int16_t, int8_t, H2, H1)
|
|
|
|
GEN_VEXT_INT_EXT(vsext_vf2_w, int32_t, int16_t, H4, H2)
|
|
|
|
GEN_VEXT_INT_EXT(vsext_vf2_d, int64_t, int32_t, H8, H4)
|
|
|
|
GEN_VEXT_INT_EXT(vsext_vf4_w, int32_t, int8_t, H4, H1)
|
|
|
|
GEN_VEXT_INT_EXT(vsext_vf4_d, int64_t, int16_t, H8, H2)
|
|
|
|
GEN_VEXT_INT_EXT(vsext_vf8_d, int64_t, int8_t, H8, H1)
|