First RISC-V PR for QEMU 7.0

- Add support for ratified 1.0 Vector extension
  - Drop support for draft 0.7.1 Vector extension
  - Support Zfhmin and Zfh extensions
  - Improve kernel loading for non-Linux platforms
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEE9sSsRtSTSGjTuM6PIeENKd+XcFQFAmHADOgACgkQIeENKd+X
 cFRf+wf/VFpooyx6c4yE0l3MmqYNWXGNi37JVKTd6p2uLM6NWbzmGIBFU/8OL8H+
 v4FM5LVKDaOrzj5lNxqvE5lnglVvZNDjbeJ3SXyMS0Q2EjvsWlMbXvVcindZhX+5
 gwNwMfFBMDWfx0C3NLJGeeWoTO59FsVvgRU5yEgHdV0BWnQtD3qqH7uXvFVWuED/
 QzpNSTuSgxHD1N6cbQwv9KjSiO5q6JbpDCHdDs8O9dTHaouNk/+aHp2kbSBx2upB
 KpehFcca0zbdF2TSYegHh3t6mWVCiQnTZ16V/P5szQJ9RF8pLbKim2fxPMJAJsE2
 Ef0/Dldy76NlaugHLov3NK+QJ1ZpqQ==
 =PEmw
 -----END PGP SIGNATURE-----

Merge tag 'pull-riscv-to-apply-20211220-1' of github.com:alistair23/qemu into staging

First RISC-V PR for QEMU 7.0

 - Add support for ratified 1.0 Vector extension
 - Drop support for draft 0.7.1 Vector extension
 - Support Zfhmin and Zfh extensions
 - Improve kernel loading for non-Linux platforms

# gpg: Signature made Sun 19 Dec 2021 08:56:08 PM PST
# gpg:                using RSA key F6C4AC46D4934868D3B8CE8F21E10D29DF977054
# gpg: Good signature from "Alistair Francis <alistair@alistair23.me>" [unknown]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: F6C4 AC46 D493 4868 D3B8  CE8F 21E1 0D29 DF97 7054

* tag 'pull-riscv-to-apply-20211220-1' of github.com:alistair23/qemu: (88 commits)
  hw/riscv: Use load address rather than entry point for fw_dynamic next_addr
  target/riscv: Enable bitmanip Zb[abcs] instructions
  riscv: Set 5.4 as minimum kernel version for riscv32
  target/riscv: rvv-1.0: Add ELEN checks for widening and narrowing instructions
  target/riscv: rvv-1.0: update opivv_vadc_check() comment
  target/riscv: rvv-1.0: rename vmandnot.mm and vmornot.mm to vmandn.mm and vmorn.mm
  target/riscv: rvv-1.0: add vector unit-stride mask load/store insns
  target/riscv: rvv-1.0: add evl parameter to vext_ldst_us()
  target/riscv: rvv-1.0: add vsetivli instruction
  target/riscv: rvv-1.0: rename r2_zimm to r2_zimm11
  target/riscv: rvv-1.0: floating-point reciprocal estimate instruction
  target/riscv: rvv-1.0: floating-point reciprocal square-root estimate instruction
  target/riscv: gdb: support vector registers for rv64 & rv32
  target/riscv: rvv-1.0: trigger illegal instruction exception if frm is not valid
  target/riscv: rvv-1.0: implement vstart CSR
  target/riscv: rvv-1.0: relax RV_VLEN_MAX to 1024-bits
  target/riscv: rvv-1.0: narrowing floating-point/integer type-convert
  target/riscv: add "set round to odd" rounding mode helper function
  target/riscv: rvv-1.0: widening floating-point/integer type-convert
  target/riscv: rvv-1.0: floating-point/integer type-convert instructions
  ...

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2021-12-20 10:25:40 -08:00
commit c7d773ae49
16 changed files with 4999 additions and 3101 deletions

View File

@ -151,12 +151,19 @@ target_ulong riscv_load_kernel(const char *kernel_filename,
target_ulong kernel_start_addr,
symbol_fn_t sym_cb)
{
uint64_t kernel_entry;
uint64_t kernel_load_base, kernel_entry;
/*
* NB: Use low address not ELF entry point to ensure that the fw_dynamic
* behaviour when loading an ELF matches the fw_payload, fw_jump and BBL
* behaviour, as well as fw_dynamic with a raw binary, all of which jump to
* the (expected) load address load address. This allows kernels to have
* separate SBI and ELF entry points (used by FreeBSD, for example).
*/
if (load_elf_ram_sym(kernel_filename, NULL, NULL, NULL,
&kernel_entry, NULL, NULL, NULL, 0,
NULL, &kernel_load_base, NULL, NULL, 0,
EM_RISCV, 1, 0, NULL, true, sym_cb) > 0) {
return kernel_entry;
return kernel_load_base;
}
if (load_uimage_as(kernel_filename, &kernel_entry, NULL, NULL,

View File

@ -45,10 +45,11 @@ struct target_pt_regs {
#ifdef TARGET_RISCV32
#define UNAME_MACHINE "riscv32"
#define UNAME_MINIMUM_RELEASE "5.4.0"
#else
#define UNAME_MACHINE "riscv64"
#endif
#define UNAME_MINIMUM_RELEASE "4.15.0"
#endif
#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1

View File

@ -523,7 +523,7 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
ext |= RVH;
}
if (cpu->cfg.ext_v) {
int vext_version = VEXT_VERSION_0_07_1;
int vext_version = VEXT_VERSION_1_00_0;
ext |= RVV;
if (!is_power_of_2(cpu->cfg.vlen)) {
error_setg(errp,
@ -548,8 +548,8 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
return;
}
if (cpu->cfg.vext_spec) {
if (!g_strcmp0(cpu->cfg.vext_spec, "v0.7.1")) {
vext_version = VEXT_VERSION_0_07_1;
if (!g_strcmp0(cpu->cfg.vext_spec, "v1.0")) {
vext_version = VEXT_VERSION_1_00_0;
} else {
error_setg(errp,
"Unsupported vector spec version '%s'",
@ -558,7 +558,7 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
}
} else {
qemu_log("vector version is not specified, "
"use the default value v0.7.1\n");
"use the default value v1.0\n");
}
set_vext_version(env, vext_version);
}
@ -626,25 +626,27 @@ static Property riscv_cpu_properties[] = {
DEFINE_PROP_BOOL("c", RISCVCPU, cfg.ext_c, true),
DEFINE_PROP_BOOL("s", RISCVCPU, cfg.ext_s, true),
DEFINE_PROP_BOOL("u", RISCVCPU, cfg.ext_u, true),
DEFINE_PROP_BOOL("v", RISCVCPU, cfg.ext_v, false),
DEFINE_PROP_BOOL("Counters", RISCVCPU, cfg.ext_counters, true),
DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true),
DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true),
DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false),
DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false),
DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true),
DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true),
DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec),
/* These are experimental so mark with 'x-' */
DEFINE_PROP_BOOL("x-zba", RISCVCPU, cfg.ext_zba, false),
DEFINE_PROP_BOOL("x-zbb", RISCVCPU, cfg.ext_zbb, false),
DEFINE_PROP_BOOL("x-zbc", RISCVCPU, cfg.ext_zbc, false),
DEFINE_PROP_BOOL("x-zbs", RISCVCPU, cfg.ext_zbs, false),
DEFINE_PROP_BOOL("x-h", RISCVCPU, cfg.ext_h, false),
DEFINE_PROP_BOOL("x-j", RISCVCPU, cfg.ext_j, false),
DEFINE_PROP_BOOL("x-v", RISCVCPU, cfg.ext_v, false),
DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec),
DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128),
DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64),
/* These are experimental so mark with 'x-' */
DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true),
DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true),
DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true),
DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true),
DEFINE_PROP_BOOL("x-h", RISCVCPU, cfg.ext_h, false),
DEFINE_PROP_BOOL("x-j", RISCVCPU, cfg.ext_j, false),
/* ePMP 0.9.3 */
DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false),
@ -673,6 +675,8 @@ static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
if (strcmp(xmlname, "riscv-csr.xml") == 0) {
return cpu->dyn_csr_xml;
} else if (strcmp(xmlname, "riscv-vector.xml") == 0) {
return cpu->dyn_vreg_xml;
}
return NULL;

View File

@ -81,7 +81,7 @@ enum {
#define PRIV_VERSION_1_10_0 0x00011000
#define PRIV_VERSION_1_11_0 0x00011100
#define VEXT_VERSION_0_07_1 0x00000701
#define VEXT_VERSION_1_00_0 0x00010000
enum {
TRANSLATE_SUCCESS,
@ -100,12 +100,14 @@ typedef struct CPURISCVState CPURISCVState;
#include "pmp.h"
#endif
#define RV_VLEN_MAX 256
#define RV_VLEN_MAX 1024
FIELD(VTYPE, VLMUL, 0, 2)
FIELD(VTYPE, VSEW, 2, 3)
FIELD(VTYPE, VEDIV, 5, 2)
FIELD(VTYPE, RESERVED, 7, sizeof(target_ulong) * 8 - 9)
FIELD(VTYPE, VLMUL, 0, 3)
FIELD(VTYPE, VSEW, 3, 3)
FIELD(VTYPE, VTA, 6, 1)
FIELD(VTYPE, VMA, 7, 1)
FIELD(VTYPE, VEDIV, 8, 2)
FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
FIELD(VTYPE, VILL, sizeof(target_ulong) * 8 - 1, 1)
struct CPURISCVState {
@ -289,6 +291,7 @@ struct RISCVCPU {
CPURISCVState env;
char *dyn_csr_xml;
char *dyn_vreg_xml;
/* Configuration Settings */
struct {
@ -312,6 +315,8 @@ struct RISCVCPU {
bool ext_counters;
bool ext_ifencei;
bool ext_icsr;
bool ext_zfh;
bool ext_zfhmin;
char *priv_spec;
char *user_spec;
@ -350,6 +355,7 @@ int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
bool riscv_cpu_fp_enabled(CPURISCVState *env);
bool riscv_cpu_vector_enabled(CPURISCVState *env);
bool riscv_cpu_virt_enabled(CPURISCVState *env);
void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
bool riscv_cpu_two_stage_lookup(int mmu_idx);
@ -393,23 +399,27 @@ void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
#define TB_FLAGS_PRIV_MMU_MASK 3
#define TB_FLAGS_PRIV_HYP_ACCESS_MASK (1 << 2)
#define TB_FLAGS_MSTATUS_FS MSTATUS_FS
#define TB_FLAGS_MSTATUS_VS MSTATUS_VS
typedef CPURISCVState CPUArchState;
typedef RISCVCPU ArchCPU;
#include "exec/cpu-all.h"
FIELD(TB_FLAGS, MEM_IDX, 0, 3)
FIELD(TB_FLAGS, VL_EQ_VLMAX, 3, 1)
FIELD(TB_FLAGS, LMUL, 4, 2)
FIELD(TB_FLAGS, LMUL, 3, 3)
FIELD(TB_FLAGS, SEW, 6, 3)
FIELD(TB_FLAGS, VILL, 9, 1)
/* Skip MSTATUS_VS (0x600) bits */
FIELD(TB_FLAGS, VL_EQ_VLMAX, 11, 1)
FIELD(TB_FLAGS, VILL, 12, 1)
/* Skip MSTATUS_FS (0x6000) bits */
/* Is a Hypervisor instruction load/store allowed? */
FIELD(TB_FLAGS, HLSX, 10, 1)
FIELD(TB_FLAGS, MSTATUS_HS_FS, 11, 2)
FIELD(TB_FLAGS, HLSX, 15, 1)
FIELD(TB_FLAGS, MSTATUS_HS_FS, 16, 2)
FIELD(TB_FLAGS, MSTATUS_HS_VS, 18, 2)
/* The combination of MXL/SXL/UXL that applies to the current cpu mode. */
FIELD(TB_FLAGS, XL, 13, 2)
FIELD(TB_FLAGS, XL, 20, 2)
/* If PointerMasking should be applied */
FIELD(TB_FLAGS, PM_ENABLED, 15, 1)
FIELD(TB_FLAGS, PM_ENABLED, 22, 1)
#ifdef TARGET_RISCV32
#define riscv_cpu_mxl(env) ((void)(env), MXL_RV32)
@ -421,18 +431,27 @@ static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env)
#endif
/*
* A simplification for VLMAX
* = (1 << LMUL) * VLEN / (8 * (1 << SEW))
* = (VLEN << LMUL) / (8 << SEW)
* = (VLEN << LMUL) >> (SEW + 3)
* = VLEN >> (SEW + 3 - LMUL)
* Encode LMUL to lmul as follows:
* LMUL vlmul lmul
* 1 000 0
* 2 001 1
* 4 010 2
* 8 011 3
* - 100 -
* 1/8 101 -3
* 1/4 110 -2
* 1/2 111 -1
*
* then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul)
* e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8
* => VLMAX = vlen >> (1 + 3 - (-3))
* = 256 >> 7
* = 2
*/
static inline uint32_t vext_get_vlmax(RISCVCPU *cpu, target_ulong vtype)
{
uint8_t sew, lmul;
sew = FIELD_EX64(vtype, VTYPE, VSEW);
lmul = FIELD_EX64(vtype, VTYPE, VLMUL);
uint8_t sew = FIELD_EX64(vtype, VTYPE, VSEW);
int8_t lmul = sextract32(FIELD_EX64(vtype, VTYPE, VLMUL), 0, 3);
return cpu->cfg.vlen >> (sew + 3 - lmul);
}

View File

@ -60,8 +60,16 @@
#define CSR_VSTART 0x008
#define CSR_VXSAT 0x009
#define CSR_VXRM 0x00a
#define CSR_VCSR 0x00f
#define CSR_VL 0xc20
#define CSR_VTYPE 0xc21
#define CSR_VLENB 0xc22
/* VCSR fields */
#define VCSR_VXSAT_SHIFT 0
#define VCSR_VXSAT (0x1 << VCSR_VXSAT_SHIFT)
#define VCSR_VXRM_SHIFT 1
#define VCSR_VXRM (0x3 << VCSR_VXRM_SHIFT)
/* User Timers and Counters */
#define CSR_CYCLE 0xc00
@ -375,6 +383,7 @@
#define MSTATUS_UBE 0x00000040
#define MSTATUS_MPIE 0x00000080
#define MSTATUS_SPP 0x00000100
#define MSTATUS_VS 0x00000600
#define MSTATUS_MPP 0x00001800
#define MSTATUS_FS 0x00006000
#define MSTATUS_XS 0x00018000
@ -408,6 +417,7 @@ typedef enum {
#define SSTATUS_UPIE 0x00000010
#define SSTATUS_SPIE 0x00000020
#define SSTATUS_SPP 0x00000100
#define SSTATUS_VS 0x00000600
#define SSTATUS_FS 0x00006000
#define SSTATUS_XS 0x00018000
#define SSTATUS_SUM 0x00040000 /* since: priv-1.10 */

View File

@ -75,12 +75,22 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
*cs_base = 0;
if (riscv_has_ext(env, RVV)) {
/*
* If env->vl equals to VLMAX, we can use generic vector operation
* expanders (GVEC) to accerlate the vector operations.
* However, as LMUL could be a fractional number. The maximum
* vector size can be operated might be less than 8 bytes,
* which is not supported by GVEC. So we set vl_eq_vlmax flag to true
* only when maxsz >= 8 bytes.
*/
uint32_t vlmax = vext_get_vlmax(env_archcpu(env), env->vtype);
bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl);
uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW);
uint32_t maxsz = vlmax << sew;
bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
(maxsz >= 8);
flags = FIELD_DP32(flags, TB_FLAGS, VILL,
FIELD_EX64(env->vtype, VTYPE, VILL));
flags = FIELD_DP32(flags, TB_FLAGS, SEW,
FIELD_EX64(env->vtype, VTYPE, VSEW));
flags = FIELD_DP32(flags, TB_FLAGS, SEW, sew);
flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
FIELD_EX64(env->vtype, VTYPE, VLMUL));
flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
@ -90,12 +100,17 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
#ifdef CONFIG_USER_ONLY
flags |= TB_FLAGS_MSTATUS_FS;
flags |= TB_FLAGS_MSTATUS_VS;
#else
flags |= cpu_mmu_index(env, 0);
if (riscv_cpu_fp_enabled(env)) {
flags |= env->mstatus & MSTATUS_FS;
}
if (riscv_cpu_vector_enabled(env)) {
flags |= env->mstatus & MSTATUS_VS;
}
if (riscv_has_ext(env, RVH)) {
if (env->priv == PRV_M ||
(env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
@ -106,6 +121,9 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_FS,
get_field(env->mstatus_hs, MSTATUS_FS));
flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_VS,
get_field(env->mstatus_hs, MSTATUS_VS));
}
if (riscv_has_ext(env, RVJ)) {
int priv = flags & TB_FLAGS_PRIV_MMU_MASK;
@ -189,11 +207,24 @@ bool riscv_cpu_fp_enabled(CPURISCVState *env)
return false;
}
/* Return true is vector support is currently enabled */
bool riscv_cpu_vector_enabled(CPURISCVState *env)
{
if (env->mstatus & MSTATUS_VS) {
if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_VS)) {
return false;
}
return true;
}
return false;
}
void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
{
uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | MSTATUS_FS |
MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE |
MSTATUS64_UXL;
MSTATUS64_UXL | MSTATUS_VS;
bool current_virt = riscv_cpu_virt_enabled(env);
g_assert(riscv_has_ext(env, RVH));

View File

@ -38,10 +38,6 @@ void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops)
static RISCVException fs(CPURISCVState *env, int csrno)
{
#if !defined(CONFIG_USER_ONLY)
/* loose check condition for fcsr in vector extension */
if ((csrno == CSR_FCSR) && (env->misa_ext & RVV)) {
return RISCV_EXCP_NONE;
}
if (!env->debugger && !riscv_cpu_fp_enabled(env)) {
return RISCV_EXCP_ILLEGAL_INST;
}
@ -52,6 +48,11 @@ static RISCVException fs(CPURISCVState *env, int csrno)
static RISCVException vs(CPURISCVState *env, int csrno)
{
if (env->misa_ext & RVV) {
#if !defined(CONFIG_USER_ONLY)
if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
return RISCV_EXCP_ILLEGAL_INST;
}
#endif
return RISCV_EXCP_NONE;
}
return RISCV_EXCP_ILLEGAL_INST;
@ -261,10 +262,6 @@ static RISCVException read_fcsr(CPURISCVState *env, int csrno,
{
*val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT)
| (env->frm << FSR_RD_SHIFT);
if (vs(env, csrno) >= 0) {
*val |= (env->vxrm << FSR_VXRM_SHIFT)
| (env->vxsat << FSR_VXSAT_SHIFT);
}
return RISCV_EXCP_NONE;
}
@ -275,10 +272,6 @@ static RISCVException write_fcsr(CPURISCVState *env, int csrno,
env->mstatus |= MSTATUS_FS;
#endif
env->frm = (val & FSR_RD) >> FSR_RD_SHIFT;
if (vs(env, csrno) >= 0) {
env->vxrm = (val & FSR_VXRM) >> FSR_VXRM_SHIFT;
env->vxsat = (val & FSR_VXSAT) >> FSR_VXSAT_SHIFT;
}
riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT);
return RISCV_EXCP_NONE;
}
@ -297,6 +290,12 @@ static RISCVException read_vl(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
static int read_vlenb(CPURISCVState *env, int csrno, target_ulong *val)
{
*val = env_archcpu(env)->cfg.vlen >> 3;
return RISCV_EXCP_NONE;
}
static RISCVException read_vxrm(CPURISCVState *env, int csrno,
target_ulong *val)
{
@ -307,6 +306,9 @@ static RISCVException read_vxrm(CPURISCVState *env, int csrno,
static RISCVException write_vxrm(CPURISCVState *env, int csrno,
target_ulong val)
{
#if !defined(CONFIG_USER_ONLY)
env->mstatus |= MSTATUS_VS;
#endif
env->vxrm = val;
return RISCV_EXCP_NONE;
}
@ -321,6 +323,9 @@ static RISCVException read_vxsat(CPURISCVState *env, int csrno,
static RISCVException write_vxsat(CPURISCVState *env, int csrno,
target_ulong val)
{
#if !defined(CONFIG_USER_ONLY)
env->mstatus |= MSTATUS_VS;
#endif
env->vxsat = val;
return RISCV_EXCP_NONE;
}
@ -335,7 +340,30 @@ static RISCVException read_vstart(CPURISCVState *env, int csrno,
static RISCVException write_vstart(CPURISCVState *env, int csrno,
target_ulong val)
{
env->vstart = val;
#if !defined(CONFIG_USER_ONLY)
env->mstatus |= MSTATUS_VS;
#endif
/*
* The vstart CSR is defined to have only enough writable bits
* to hold the largest element index, i.e. lg2(VLEN) bits.
*/
env->vstart = val & ~(~0ULL << ctzl(env_archcpu(env)->cfg.vlen));
return RISCV_EXCP_NONE;
}
static int read_vcsr(CPURISCVState *env, int csrno, target_ulong *val)
{
*val = (env->vxrm << VCSR_VXRM_SHIFT) | (env->vxsat << VCSR_VXSAT_SHIFT);
return RISCV_EXCP_NONE;
}
static int write_vcsr(CPURISCVState *env, int csrno, target_ulong val)
{
#if !defined(CONFIG_USER_ONLY)
env->mstatus |= MSTATUS_VS;
#endif
env->vxrm = (val & VCSR_VXRM) >> VCSR_VXRM_SHIFT;
env->vxsat = (val & VCSR_VXSAT) >> VCSR_VXSAT_SHIFT;
return RISCV_EXCP_NONE;
}
@ -453,7 +481,7 @@ static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS &
(1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)));
static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
SSTATUS_SUM | SSTATUS_MXR;
SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS;
static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP;
static const target_ulong hip_writable_mask = MIP_VSSIP;
static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP;
@ -492,6 +520,7 @@ static RISCVException read_mhartid(CPURISCVState *env, int csrno,
static uint64_t add_status_sd(RISCVMXL xl, uint64_t status)
{
if ((status & MSTATUS_FS) == MSTATUS_FS ||
(status & MSTATUS_VS) == MSTATUS_VS ||
(status & MSTATUS_XS) == MSTATUS_XS) {
switch (xl) {
case MXL_RV32:
@ -535,7 +564,7 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM |
MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR |
MSTATUS_TW;
MSTATUS_TW | MSTATUS_VS;
if (riscv_cpu_mxl(env) != MXL_RV32) {
/*
@ -632,7 +661,7 @@ static RISCVException write_misa(CPURISCVState *env, int csrno,
val &= env->misa_ext_mask;
/* Mask extensions that are not supported by QEMU */
val &= (RVI | RVE | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
val &= (RVI | RVE | RVM | RVA | RVF | RVD | RVC | RVS | RVU | RVV);
/* 'D' depends on 'F', so clear 'D' if 'F' is not present */
if ((val & RVD) && !(val & RVF)) {
@ -1818,8 +1847,10 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart },
[CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat },
[CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm },
[CSR_VCSR] = { "vcsr", vs, read_vcsr, write_vcsr },
[CSR_VL] = { "vl", vs, read_vl },
[CSR_VTYPE] = { "vtype", vs, read_vtype },
[CSR_VLENB] = { "vlenb", vs, read_vlenb },
/* User Timers and Counters */
[CSR_CYCLE] = { "cycle", ctr, read_instret },
[CSR_INSTRET] = { "instret", ctr, read_instret },

View File

@ -55,23 +55,23 @@ void helper_set_rounding_mode(CPURISCVState *env, uint32_t rm)
{
int softrm;
if (rm == 7) {
if (rm == RISCV_FRM_DYN) {
rm = env->frm;
}
switch (rm) {
case 0:
case RISCV_FRM_RNE:
softrm = float_round_nearest_even;
break;
case 1:
case RISCV_FRM_RTZ:
softrm = float_round_to_zero;
break;
case 2:
case RISCV_FRM_RDN:
softrm = float_round_down;
break;
case 3:
case RISCV_FRM_RUP:
softrm = float_round_up;
break;
case 4:
case RISCV_FRM_RMM:
softrm = float_round_ties_away;
break;
default:
@ -81,6 +81,20 @@ void helper_set_rounding_mode(CPURISCVState *env, uint32_t rm)
set_float_rounding_mode(softrm, &env->fp_status);
}
void helper_set_rod_rounding_mode(CPURISCVState *env)
{
set_float_rounding_mode(float_round_to_odd, &env->fp_status);
}
static uint64_t do_fmadd_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2,
uint64_t rs3, int flags)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs2 = check_nanbox_h(rs2);
float16 frs3 = check_nanbox_h(rs3);
return nanbox_h(float16_muladd(frs1, frs2, frs3, flags, &env->fp_status));
}
static uint64_t do_fmadd_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2,
uint64_t rs3, int flags)
{
@ -102,6 +116,12 @@ uint64_t helper_fmadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
return float64_muladd(frs1, frs2, frs3, 0, &env->fp_status);
}
uint64_t helper_fmadd_h(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
uint64_t frs3)
{
return do_fmadd_h(env, frs1, frs2, frs3, 0);
}
uint64_t helper_fmsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
uint64_t frs3)
{
@ -115,6 +135,12 @@ uint64_t helper_fmsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
&env->fp_status);
}
uint64_t helper_fmsub_h(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
uint64_t frs3)
{
return do_fmadd_h(env, frs1, frs2, frs3, float_muladd_negate_c);
}
uint64_t helper_fnmsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
uint64_t frs3)
{
@ -128,6 +154,12 @@ uint64_t helper_fnmsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
&env->fp_status);
}
uint64_t helper_fnmsub_h(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
uint64_t frs3)
{
return do_fmadd_h(env, frs1, frs2, frs3, float_muladd_negate_product);
}
uint64_t helper_fnmadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
uint64_t frs3)
{
@ -142,6 +174,13 @@ uint64_t helper_fnmadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
float_muladd_negate_product, &env->fp_status);
}
uint64_t helper_fnmadd_h(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
uint64_t frs3)
{
return do_fmadd_h(env, frs1, frs2, frs3,
float_muladd_negate_c | float_muladd_negate_product);
}
uint64_t helper_fadd_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float32 frs1 = check_nanbox_s(rs1);
@ -374,3 +413,149 @@ target_ulong helper_fclass_d(uint64_t frs1)
{
return fclass_d(frs1);
}
uint64_t helper_fadd_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs2 = check_nanbox_h(rs2);
return nanbox_h(float16_add(frs1, frs2, &env->fp_status));
}
uint64_t helper_fsub_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs2 = check_nanbox_h(rs2);
return nanbox_h(float16_sub(frs1, frs2, &env->fp_status));
}
uint64_t helper_fmul_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs2 = check_nanbox_h(rs2);
return nanbox_h(float16_mul(frs1, frs2, &env->fp_status));
}
uint64_t helper_fdiv_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs2 = check_nanbox_h(rs2);
return nanbox_h(float16_div(frs1, frs2, &env->fp_status));
}
uint64_t helper_fmin_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs2 = check_nanbox_h(rs2);
return nanbox_h(env->priv_ver < PRIV_VERSION_1_11_0 ?
float16_minnum(frs1, frs2, &env->fp_status) :
float16_minimum_number(frs1, frs2, &env->fp_status));
}
uint64_t helper_fmax_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs2 = check_nanbox_h(rs2);
return nanbox_h(env->priv_ver < PRIV_VERSION_1_11_0 ?
float16_maxnum(frs1, frs2, &env->fp_status) :
float16_maximum_number(frs1, frs2, &env->fp_status));
}
uint64_t helper_fsqrt_h(CPURISCVState *env, uint64_t rs1)
{
float16 frs1 = check_nanbox_h(rs1);
return nanbox_h(float16_sqrt(frs1, &env->fp_status));
}
target_ulong helper_fle_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs2 = check_nanbox_h(rs2);
return float16_le(frs1, frs2, &env->fp_status);
}
target_ulong helper_flt_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs2 = check_nanbox_h(rs2);
return float16_lt(frs1, frs2, &env->fp_status);
}
target_ulong helper_feq_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(rs1);
float16 frs2 = check_nanbox_h(rs2);
return float16_eq_quiet(frs1, frs2, &env->fp_status);
}
target_ulong helper_fclass_h(uint64_t rs1)
{
float16 frs1 = check_nanbox_h(rs1);
return fclass_h(frs1);
}
target_ulong helper_fcvt_w_h(CPURISCVState *env, uint64_t rs1)
{
float16 frs1 = check_nanbox_h(rs1);
return float16_to_int32(frs1, &env->fp_status);
}
target_ulong helper_fcvt_wu_h(CPURISCVState *env, uint64_t rs1)
{
float16 frs1 = check_nanbox_h(rs1);
return (int32_t)float16_to_uint32(frs1, &env->fp_status);
}
target_ulong helper_fcvt_l_h(CPURISCVState *env, uint64_t rs1)
{
float16 frs1 = check_nanbox_h(rs1);
return float16_to_int64(frs1, &env->fp_status);
}
target_ulong helper_fcvt_lu_h(CPURISCVState *env, uint64_t rs1)
{
float16 frs1 = check_nanbox_h(rs1);
return float16_to_uint64(frs1, &env->fp_status);
}
uint64_t helper_fcvt_h_w(CPURISCVState *env, target_ulong rs1)
{
return nanbox_h(int32_to_float16((int32_t)rs1, &env->fp_status));
}
uint64_t helper_fcvt_h_wu(CPURISCVState *env, target_ulong rs1)
{
return nanbox_h(uint32_to_float16((uint32_t)rs1, &env->fp_status));
}
uint64_t helper_fcvt_h_l(CPURISCVState *env, target_ulong rs1)
{
return nanbox_h(int64_to_float16(rs1, &env->fp_status));
}
uint64_t helper_fcvt_h_lu(CPURISCVState *env, target_ulong rs1)
{
return nanbox_h(uint64_to_float16(rs1, &env->fp_status));
}
uint64_t helper_fcvt_h_s(CPURISCVState *env, uint64_t rs1)
{
float32 frs1 = check_nanbox_s(rs1);
return nanbox_h(float32_to_float16(frs1, true, &env->fp_status));
}
uint64_t helper_fcvt_s_h(CPURISCVState *env, uint64_t rs1)
{
float16 frs1 = check_nanbox_h(rs1);
return nanbox_s(float16_to_float32(frs1, true, &env->fp_status));
}
uint64_t helper_fcvt_h_d(CPURISCVState *env, uint64_t rs1)
{
return nanbox_h(float64_to_float16(rs1, true, &env->fp_status));
}
uint64_t helper_fcvt_d_h(CPURISCVState *env, uint64_t rs1)
{
float16 frs1 = check_nanbox_h(rs1);
return float16_to_float64(frs1, true, &env->fp_status);
}

View File

@ -20,6 +20,32 @@
#include "exec/gdbstub.h"
#include "cpu.h"
struct TypeSize {
const char *gdb_type;
const char *id;
int size;
const char suffix;
};
static const struct TypeSize vec_lanes[] = {
/* quads */
{ "uint128", "quads", 128, 'q' },
/* 64 bit */
{ "uint64", "longs", 64, 'l' },
/* 32 bit */
{ "uint32", "words", 32, 'w' },
/* 16 bit */
{ "uint16", "shorts", 16, 's' },
/*
* TODO: currently there is no reliable way of telling
* if the remote gdb actually understands ieee_half so
* we don't expose it in the target description for now.
* { "ieee_half", 16, 'h', 'f' },
*/
/* bytes */
{ "uint8", "bytes", 8, 'b' },
};
int riscv_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
{
RISCVCPU *cpu = RISCV_CPU(cs);
@ -101,6 +127,96 @@ static int riscv_gdb_set_fpu(CPURISCVState *env, uint8_t *mem_buf, int n)
return 0;
}
/*
* Convert register index number passed by GDB to the correspond
* vector CSR number. Vector CSRs are defined after vector registers
* in dynamic generated riscv-vector.xml, thus the starting register index
* of vector CSRs is 32.
* Return 0 if register index number is out of range.
*/
static int riscv_gdb_vector_csrno(int num_regs)
{
/*
* The order of vector CSRs in the switch case
* should match with the order defined in csr_ops[].
*/
switch (num_regs) {
case 32:
return CSR_VSTART;
case 33:
return CSR_VXSAT;
case 34:
return CSR_VXRM;
case 35:
return CSR_VCSR;
case 36:
return CSR_VL;
case 37:
return CSR_VTYPE;
case 38:
return CSR_VLENB;
default:
/* Unknown register. */
return 0;
}
}
static int riscv_gdb_get_vector(CPURISCVState *env, GByteArray *buf, int n)
{
uint16_t vlenb = env_archcpu(env)->cfg.vlen >> 3;
if (n < 32) {
int i;
int cnt = 0;
for (i = 0; i < vlenb; i += 8) {
cnt += gdb_get_reg64(buf,
env->vreg[(n * vlenb + i) / 8]);
}
return cnt;
}
int csrno = riscv_gdb_vector_csrno(n);
if (!csrno) {
return 0;
}
target_ulong val = 0;
int result = riscv_csrrw_debug(env, csrno, &val, 0, 0);
if (result == 0) {
return gdb_get_regl(buf, val);
}
return 0;
}
static int riscv_gdb_set_vector(CPURISCVState *env, uint8_t *mem_buf, int n)
{
uint16_t vlenb = env_archcpu(env)->cfg.vlen >> 3;
if (n < 32) {
int i;
for (i = 0; i < vlenb; i += 8) {
env->vreg[(n * vlenb + i) / 8] = ldq_p(mem_buf + i);
}
return vlenb;
}
int csrno = riscv_gdb_vector_csrno(n);
if (!csrno) {
return 0;
}
target_ulong val = ldtul_p(mem_buf);
int result = riscv_csrrw_debug(env, csrno, NULL, val, -1);
if (result == 0) {
return sizeof(target_ulong);
}
return 0;
}
static int riscv_gdb_get_csr(CPURISCVState *env, GByteArray *buf, int n)
{
if (n < CSR_TABLE_SIZE) {
@ -187,6 +303,68 @@ static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg)
return CSR_TABLE_SIZE;
}
static int ricsv_gen_dynamic_vector_xml(CPUState *cs, int base_reg)
{
RISCVCPU *cpu = RISCV_CPU(cs);
GString *s = g_string_new(NULL);
g_autoptr(GString) ts = g_string_new("");
int reg_width = cpu->cfg.vlen;
int num_regs = 0;
int i;
g_string_printf(s, "<?xml version=\"1.0\"?>");
g_string_append_printf(s, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">");
g_string_append_printf(s, "<feature name=\"org.gnu.gdb.riscv.vector\">");
/* First define types and totals in a whole VL */
for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) {
int count = reg_width / vec_lanes[i].size;
g_string_printf(ts, "%s", vec_lanes[i].id);
g_string_append_printf(s,
"<vector id=\"%s\" type=\"%s\" count=\"%d\"/>",
ts->str, vec_lanes[i].gdb_type, count);
}
/* Define unions */
g_string_append_printf(s, "<union id=\"riscv_vector\">");
for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) {
g_string_append_printf(s, "<field name=\"%c\" type=\"%s\"/>",
vec_lanes[i].suffix,
vec_lanes[i].id);
}
g_string_append(s, "</union>");
/* Define vector registers */
for (i = 0; i < 32; i++) {
g_string_append_printf(s,
"<reg name=\"v%d\" bitsize=\"%d\""
" regnum=\"%d\" group=\"vector\""
" type=\"riscv_vector\"/>",
i, reg_width, base_reg++);
num_regs++;
}
/* Define vector CSRs */
const char *vector_csrs[7] = {
"vstart", "vxsat", "vxrm", "vcsr",
"vl", "vtype", "vlenb"
};
for (i = 0; i < 7; i++) {
g_string_append_printf(s,
"<reg name=\"%s\" bitsize=\"%d\""
" regnum=\"%d\" group=\"vector\""
" type=\"int\"/>",
vector_csrs[i], TARGET_LONG_BITS, base_reg++);
num_regs++;
}
g_string_append_printf(s, "</feature>");
cpu->dyn_vreg_xml = g_string_free(s, false);
return num_regs;
}
void riscv_cpu_register_gdb_regs_for_features(CPUState *cs)
{
RISCVCPU *cpu = RISCV_CPU(cs);
@ -198,6 +376,12 @@ void riscv_cpu_register_gdb_regs_for_features(CPUState *cs)
gdb_register_coprocessor(cs, riscv_gdb_get_fpu, riscv_gdb_set_fpu,
36, "riscv-32bit-fpu.xml", 0);
}
if (env->misa_ext & RVV) {
gdb_register_coprocessor(cs, riscv_gdb_get_vector, riscv_gdb_set_vector,
ricsv_gen_dynamic_vector_xml(cs,
cs->gdb_num_regs),
"riscv-vector.xml", 0);
}
#if defined(TARGET_RISCV32)
gdb_register_coprocessor(cs, riscv_gdb_get_virtual, riscv_gdb_set_virtual,
1, "riscv-32bit-virtual.xml", 0);

View File

@ -3,16 +3,21 @@ DEF_HELPER_2(raise_exception, noreturn, env, i32)
/* Floating Point - rounding mode */
DEF_HELPER_FLAGS_2(set_rounding_mode, TCG_CALL_NO_WG, void, env, i32)
DEF_HELPER_FLAGS_1(set_rod_rounding_mode, TCG_CALL_NO_WG, void, env)
/* Floating Point - fused */
DEF_HELPER_FLAGS_4(fmadd_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(fmadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(fmadd_h, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(fmsub_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(fmsub_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(fmsub_h, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(fnmsub_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(fnmsub_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(fnmsub_h, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(fnmadd_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(fnmadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(fnmadd_h, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
/* Floating Point - Single Precision */
DEF_HELPER_FLAGS_3(fadd_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
@ -62,6 +67,31 @@ DEF_HELPER_FLAGS_1(fclass_d, TCG_CALL_NO_RWG_SE, tl, i64)
DEF_HELPER_FLAGS_2(clmul, TCG_CALL_NO_RWG_SE, tl, tl, tl)
DEF_HELPER_FLAGS_2(clmulr, TCG_CALL_NO_RWG_SE, tl, tl, tl)
/* Floating Point - Half Precision */
DEF_HELPER_FLAGS_3(fadd_h, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fsub_h, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fmul_h, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fdiv_h, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fmin_h, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fmax_h, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_2(fsqrt_h, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_3(fle_h, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_3(flt_h, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_3(feq_h, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_2(fcvt_s_h, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_2(fcvt_h_s, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_2(fcvt_d_h, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_2(fcvt_h_d, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_2(fcvt_w_h, TCG_CALL_NO_RWG, tl, env, i64)
DEF_HELPER_FLAGS_2(fcvt_wu_h, TCG_CALL_NO_RWG, tl, env, i64)
DEF_HELPER_FLAGS_2(fcvt_l_h, TCG_CALL_NO_RWG, tl, env, i64)
DEF_HELPER_FLAGS_2(fcvt_lu_h, TCG_CALL_NO_RWG, tl, env, i64)
DEF_HELPER_FLAGS_2(fcvt_h_w, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fcvt_h_wu, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fcvt_h_l, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fcvt_h_lu, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_1(fclass_h, TCG_CALL_NO_RWG_SE, tl, i64)
/* Special functions */
DEF_HELPER_2(csrr, tl, env, int)
DEF_HELPER_3(csrw, void, env, int, tl)
@ -83,195 +113,89 @@ DEF_HELPER_2(hyp_hlvx_wu, tl, env, tl)
/* Vector functions */
DEF_HELPER_3(vsetvl, tl, env, tl, tl)
DEF_HELPER_5(vlb_v_b, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlb_v_b_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlb_v_h, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlb_v_h_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlb_v_w, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlb_v_w_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlb_v_d, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlb_v_d_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlh_v_h, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlh_v_h_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlh_v_w, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlh_v_w_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlh_v_d, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlh_v_d_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlw_v_w, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlw_v_w_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlw_v_d, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlw_v_d_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vle_v_b, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vle_v_b_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vle_v_h, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vle_v_h_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vle_v_w, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vle_v_w_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vle_v_d, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vle_v_d_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlbu_v_b, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlbu_v_b_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlbu_v_h, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlbu_v_h_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlbu_v_w, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlbu_v_w_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlbu_v_d, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlbu_v_d_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlhu_v_h, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlhu_v_h_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlhu_v_w, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlhu_v_w_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlhu_v_d, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlhu_v_d_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlwu_v_w, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlwu_v_w_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlwu_v_d, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlwu_v_d_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vsb_v_b, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vsb_v_b_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vsb_v_h, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vsb_v_h_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vsb_v_w, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vsb_v_w_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vsb_v_d, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vsb_v_d_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vsh_v_h, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vsh_v_h_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vsh_v_w, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vsh_v_w_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vsh_v_d, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vsh_v_d_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vsw_v_w, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vsw_v_w_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vsw_v_d, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vsw_v_d_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vse_v_b, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vse_v_b_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vse_v_h, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vse_v_h_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vse_v_w, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vse_v_w_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vse_v_d, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vse_v_d_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_6(vlsb_v_b, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlsb_v_h, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlsb_v_w, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlsb_v_d, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlsh_v_h, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlsh_v_w, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlsh_v_d, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlsw_v_w, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlsw_v_d, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlse_v_b, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlse_v_h, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlse_v_w, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlse_v_d, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlsbu_v_b, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlsbu_v_h, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlsbu_v_w, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlsbu_v_d, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlshu_v_h, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlshu_v_w, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlshu_v_d, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlswu_v_w, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlswu_v_d, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vssb_v_b, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vssb_v_h, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vssb_v_w, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vssb_v_d, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vssh_v_h, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vssh_v_w, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vssh_v_d, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vssw_v_w, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vssw_v_d, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vsse_v_b, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vsse_v_h, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vsse_v_w, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vsse_v_d, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlxb_v_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxb_v_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxb_v_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxb_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxh_v_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxh_v_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxh_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxw_v_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxw_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxe_v_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxe_v_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxe_v_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxe_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxbu_v_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxbu_v_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxbu_v_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxbu_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxhu_v_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxhu_v_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxhu_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxwu_v_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxwu_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxb_v_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxb_v_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxb_v_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxb_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxh_v_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxh_v_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxh_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxw_v_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxw_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxe_v_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxe_v_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxe_v_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxe_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_5(vlbff_v_b, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlbff_v_h, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlbff_v_w, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlbff_v_d, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlhff_v_h, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlhff_v_w, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlhff_v_d, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlwff_v_w, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlwff_v_d, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vleff_v_b, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vleff_v_h, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vleff_v_w, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vleff_v_d, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlbuff_v_b, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlbuff_v_h, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlbuff_v_w, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlbuff_v_d, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlhuff_v_h, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlhuff_v_w, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlhuff_v_d, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlwuff_v_w, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlwuff_v_d, void, ptr, ptr, tl, env, i32)
DEF_HELPER_6(vamoswapw_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamoswapd_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamoaddw_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamoaddd_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamoxorw_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamoxord_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamoandw_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamoandd_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamoorw_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamoord_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamominw_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamomind_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamomaxw_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamomaxd_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamominuw_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamominud_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamomaxuw_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamomaxud_v_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamoswapw_v_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamoaddw_v_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamoxorw_v_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamoandw_v_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamoorw_v_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamominw_v_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamomaxw_v_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamominuw_v_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vamomaxuw_v_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_5(vle8_v, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vle16_v, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vle32_v, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vle64_v, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vle8_v_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vle16_v_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vle32_v_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vle64_v_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vse8_v, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vse16_v, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vse32_v, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vse64_v, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vse8_v_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vse16_v_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vse32_v_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vse64_v_mask, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vlm_v, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vsm_v, void, ptr, ptr, tl, env, i32)
DEF_HELPER_6(vlse8_v, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlse16_v, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlse32_v, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlse64_v, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vsse8_v, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vsse16_v, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vsse32_v, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vsse64_v, void, ptr, ptr, tl, tl, env, i32)
DEF_HELPER_6(vlxei8_8_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxei8_16_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxei16_8_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxei16_16_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxei32_8_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxei32_16_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxei64_8_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxei64_16_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vlxei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxei8_8_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxei8_16_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxei16_8_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxei16_16_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxei32_8_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxei32_16_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxei64_8_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxei64_16_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsxei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_5(vle8ff_v, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vle16ff_v, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vle32ff_v, void, ptr, ptr, tl, env, i32)
DEF_HELPER_5(vle64ff_v, void, ptr, ptr, tl, env, i32)
DEF_HELPER_4(vl1re8_v, void, ptr, tl, env, i32)
DEF_HELPER_4(vl1re16_v, void, ptr, tl, env, i32)
DEF_HELPER_4(vl1re32_v, void, ptr, tl, env, i32)
DEF_HELPER_4(vl1re64_v, void, ptr, tl, env, i32)
DEF_HELPER_4(vl2re8_v, void, ptr, tl, env, i32)
DEF_HELPER_4(vl2re16_v, void, ptr, tl, env, i32)
DEF_HELPER_4(vl2re32_v, void, ptr, tl, env, i32)
DEF_HELPER_4(vl2re64_v, void, ptr, tl, env, i32)
DEF_HELPER_4(vl4re8_v, void, ptr, tl, env, i32)
DEF_HELPER_4(vl4re16_v, void, ptr, tl, env, i32)
DEF_HELPER_4(vl4re32_v, void, ptr, tl, env, i32)
DEF_HELPER_4(vl4re64_v, void, ptr, tl, env, i32)
DEF_HELPER_4(vl8re8_v, void, ptr, tl, env, i32)
DEF_HELPER_4(vl8re16_v, void, ptr, tl, env, i32)
DEF_HELPER_4(vl8re32_v, void, ptr, tl, env, i32)
DEF_HELPER_4(vl8re64_v, void, ptr, tl, env, i32)
DEF_HELPER_4(vs1r_v, void, ptr, tl, env, i32)
DEF_HELPER_4(vs2r_v, void, ptr, tl, env, i32)
DEF_HELPER_4(vs4r_v, void, ptr, tl, env, i32)
DEF_HELPER_4(vs8r_v, void, ptr, tl, env, i32)
DEF_HELPER_6(vadd_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
@ -430,18 +354,18 @@ DEF_HELPER_6(vsra_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsra_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsra_vx_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnsrl_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vnsrl_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vnsrl_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vnsra_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vnsra_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vnsra_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vnsrl_vx_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnsrl_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnsrl_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnsra_vx_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnsra_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnsra_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnsrl_wv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vnsrl_wv_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vnsrl_wv_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vnsra_wv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vnsra_wv_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vnsra_wv_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vnsrl_wx_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnsrl_wx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnsrl_wx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnsra_wx_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnsra_wx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnsra_wx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vmseq_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmseq_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
@ -727,18 +651,34 @@ DEF_HELPER_6(vaadd_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vaadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vaadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vaadd_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vaaddu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vaaddu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vaaddu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vaaddu_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vasub_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vasub_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vasub_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vasub_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vasubu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vasubu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vasubu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vasubu_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vaadd_vx_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vaadd_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vaadd_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vaadd_vx_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vaaddu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vaaddu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vaaddu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vaaddu_vx_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vasub_vx_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vasub_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vasub_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vasub_vx_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vasubu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vasubu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vasubu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vasubu_vx_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsmul_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vsmul_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
@ -749,28 +689,6 @@ DEF_HELPER_6(vsmul_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsmul_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vsmul_vx_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vwsmaccu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vwsmaccu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vwsmaccu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vwsmacc_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vwsmacc_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vwsmacc_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vwsmaccsu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vwsmaccsu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vwsmaccsu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vwsmaccu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vwsmaccu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vwsmaccu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vwsmacc_vx_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vwsmacc_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vwsmacc_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vwsmaccsu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vwsmaccsu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vwsmaccsu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vwsmaccus_vx_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vwsmaccus_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vwsmaccus_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vssrl_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vssrl_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vssrl_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
@ -788,18 +706,18 @@ DEF_HELPER_6(vssra_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vssra_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vssra_vx_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnclip_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vnclip_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vnclip_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vnclipu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vnclipu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vnclipu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vnclipu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnclipu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnclipu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnclip_vx_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnclip_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnclip_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnclip_wv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vnclip_wv_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vnclip_wv_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vnclipu_wv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vnclipu_wv_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vnclipu_wv_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vnclipu_wx_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnclipu_wx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnclipu_wx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnclip_wx_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnclip_wx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vnclip_wx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vfadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vfadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
@ -925,6 +843,14 @@ DEF_HELPER_5(vfsqrt_v_h, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfsqrt_v_w, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfsqrt_v_d, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfrsqrt7_v_h, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfrsqrt7_v_w, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfrsqrt7_v_d, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfrec7_v_h, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfrec7_v_w, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfrec7_v_d, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vfmin_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vfmin_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vfmin_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
@ -987,12 +913,6 @@ DEF_HELPER_6(vmfgt_vf_d, void, ptr, ptr, i64, ptr, env, i32)
DEF_HELPER_6(vmfge_vf_h, void, ptr, ptr, i64, ptr, env, i32)
DEF_HELPER_6(vmfge_vf_w, void, ptr, ptr, i64, ptr, env, i32)
DEF_HELPER_6(vmfge_vf_d, void, ptr, ptr, i64, ptr, env, i32)
DEF_HELPER_6(vmford_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmford_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmford_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmford_vf_h, void, ptr, ptr, i64, ptr, env, i32)
DEF_HELPER_6(vmford_vf_w, void, ptr, ptr, i64, ptr, env, i32)
DEF_HELPER_6(vmford_vf_d, void, ptr, ptr, i64, ptr, env, i32)
DEF_HELPER_5(vfclass_v_h, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfclass_v_w, void, ptr, ptr, ptr, env, i32)
@ -1019,23 +939,27 @@ DEF_HELPER_5(vfwcvt_xu_f_v_h, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfwcvt_xu_f_v_w, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfwcvt_x_f_v_h, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfwcvt_x_f_v_w, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfwcvt_f_xu_v_b, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfwcvt_f_xu_v_h, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfwcvt_f_xu_v_w, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfwcvt_f_x_v_b, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfwcvt_f_x_v_h, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfwcvt_f_x_v_w, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfwcvt_f_f_v_h, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfwcvt_f_f_v_w, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfncvt_xu_f_v_h, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfncvt_xu_f_v_w, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfncvt_x_f_v_h, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfncvt_x_f_v_w, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfncvt_f_xu_v_h, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfncvt_f_xu_v_w, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfncvt_f_x_v_h, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfncvt_f_x_v_w, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfncvt_f_f_v_h, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfncvt_f_f_v_w, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfncvt_xu_f_w_b, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfncvt_xu_f_w_h, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfncvt_xu_f_w_w, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfncvt_x_f_w_b, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfncvt_x_f_w_h, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfncvt_x_f_w_w, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfncvt_f_xu_w_h, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfncvt_f_xu_w_w, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfncvt_f_x_w_h, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfncvt_f_x_w_w, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfncvt_f_f_w_h, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vfncvt_f_f_w_w, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vredsum_vs_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
@ -1092,16 +1016,16 @@ DEF_HELPER_6(vfwredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmand_mm, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmnand_mm, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmandnot_mm, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmandn_mm, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmxor_mm, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmor_mm, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmnor_mm, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmornot_mm, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmorn_mm, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vmxnor_mm, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_4(vmpopc_m, tl, ptr, ptr, env, i32)
DEF_HELPER_4(vcpop_m, tl, ptr, ptr, env, i32)
DEF_HELPER_4(vmfirst_m, tl, ptr, ptr, env, i32)
DEF_HELPER_4(vfirst_m, tl, ptr, ptr, env, i32)
DEF_HELPER_5(vmsbf_m, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vmsif_m, void, ptr, ptr, ptr, env, i32)
@ -1134,10 +1058,21 @@ DEF_HELPER_6(vslide1down_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vslide1down_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vslide1down_vx_d, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vfslide1up_vf_h, void, ptr, ptr, i64, ptr, env, i32)
DEF_HELPER_6(vfslide1up_vf_w, void, ptr, ptr, i64, ptr, env, i32)
DEF_HELPER_6(vfslide1up_vf_d, void, ptr, ptr, i64, ptr, env, i32)
DEF_HELPER_6(vfslide1down_vf_h, void, ptr, ptr, i64, ptr, env, i32)
DEF_HELPER_6(vfslide1down_vf_w, void, ptr, ptr, i64, ptr, env, i32)
DEF_HELPER_6(vfslide1down_vf_d, void, ptr, ptr, i64, ptr, env, i32)
DEF_HELPER_6(vrgather_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vrgather_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vrgather_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vrgather_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vrgatherei16_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vrgatherei16_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vrgatherei16_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vrgatherei16_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vrgather_vx_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vrgather_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vrgather_vx_w, void, ptr, ptr, tl, ptr, env, i32)
@ -1147,3 +1082,22 @@ DEF_HELPER_6(vcompress_vm_b, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vcompress_vm_h, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vcompress_vm_w, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_6(vcompress_vm_d, void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_4(vmv1r_v, void, ptr, ptr, env, i32)
DEF_HELPER_4(vmv2r_v, void, ptr, ptr, env, i32)
DEF_HELPER_4(vmv4r_v, void, ptr, ptr, env, i32)
DEF_HELPER_4(vmv8r_v, void, ptr, ptr, env, i32)
DEF_HELPER_5(vzext_vf2_h, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vzext_vf2_w, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vzext_vf2_d, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vzext_vf4_w, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vzext_vf4_d, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vzext_vf8_d, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vsext_vf2_h, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vsext_vf2_w, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vsext_vf2_d, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vsext_vf4_w, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vsext_vf4_d, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vsext_vf8_d, void, ptr, ptr, ptr, env, i32)

View File

@ -49,7 +49,6 @@
&atomic aq rl rs2 rs1 rd
&rmrr vm rd rs1 rs2
&rmr vm rd rs2
&rwdvm vm wd rd rs1 rs2
&r2nfvm vm rd rs1 nf
&rnfvm vm rd rs1 rs2 nf
@ -79,8 +78,8 @@
@r_vm ...... vm:1 ..... ..... ... ..... ....... &rmrr %rs2 %rs1 %rd
@r_vm_1 ...... . ..... ..... ... ..... ....... &rmrr vm=1 %rs2 %rs1 %rd
@r_vm_0 ...... . ..... ..... ... ..... ....... &rmrr vm=0 %rs2 %rs1 %rd
@r_wdvm ..... wd:1 vm:1 ..... ..... ... ..... ....... &rwdvm %rs2 %rs1 %rd
@r2_zimm . zimm:11 ..... ... ..... ....... %rs1 %rd
@r2_zimm11 . zimm:11 ..... ... ..... ....... %rs1 %rd
@r2_zimm10 .. zimm:10 ..... ... ..... ....... %rs1 %rd
@r2_s ....... ..... ..... ... ..... ....... %rs2 %rs1
@hfence_gvma ....... ..... ..... ... ..... ....... %rs2 %rs1
@ -296,60 +295,69 @@ hlv_d 0110110 00000 ..... 100 ..... 1110011 @r2
hsv_d 0110111 ..... ..... 100 00000 1110011 @r2_s
# *** Vector loads and stores are encoded within LOADFP/STORE-FP ***
vlb_v ... 100 . 00000 ..... 000 ..... 0000111 @r2_nfvm
vlh_v ... 100 . 00000 ..... 101 ..... 0000111 @r2_nfvm
vlw_v ... 100 . 00000 ..... 110 ..... 0000111 @r2_nfvm
vle_v ... 000 . 00000 ..... 111 ..... 0000111 @r2_nfvm
vlbu_v ... 000 . 00000 ..... 000 ..... 0000111 @r2_nfvm
vlhu_v ... 000 . 00000 ..... 101 ..... 0000111 @r2_nfvm
vlwu_v ... 000 . 00000 ..... 110 ..... 0000111 @r2_nfvm
vlbff_v ... 100 . 10000 ..... 000 ..... 0000111 @r2_nfvm
vlhff_v ... 100 . 10000 ..... 101 ..... 0000111 @r2_nfvm
vlwff_v ... 100 . 10000 ..... 110 ..... 0000111 @r2_nfvm
vleff_v ... 000 . 10000 ..... 111 ..... 0000111 @r2_nfvm
vlbuff_v ... 000 . 10000 ..... 000 ..... 0000111 @r2_nfvm
vlhuff_v ... 000 . 10000 ..... 101 ..... 0000111 @r2_nfvm
vlwuff_v ... 000 . 10000 ..... 110 ..... 0000111 @r2_nfvm
vsb_v ... 000 . 00000 ..... 000 ..... 0100111 @r2_nfvm
vsh_v ... 000 . 00000 ..... 101 ..... 0100111 @r2_nfvm
vsw_v ... 000 . 00000 ..... 110 ..... 0100111 @r2_nfvm
vse_v ... 000 . 00000 ..... 111 ..... 0100111 @r2_nfvm
# Vector unit-stride load/store insns.
vle8_v ... 000 . 00000 ..... 000 ..... 0000111 @r2_nfvm
vle16_v ... 000 . 00000 ..... 101 ..... 0000111 @r2_nfvm
vle32_v ... 000 . 00000 ..... 110 ..... 0000111 @r2_nfvm
vle64_v ... 000 . 00000 ..... 111 ..... 0000111 @r2_nfvm
vse8_v ... 000 . 00000 ..... 000 ..... 0100111 @r2_nfvm
vse16_v ... 000 . 00000 ..... 101 ..... 0100111 @r2_nfvm
vse32_v ... 000 . 00000 ..... 110 ..... 0100111 @r2_nfvm
vse64_v ... 000 . 00000 ..... 111 ..... 0100111 @r2_nfvm
vlsb_v ... 110 . ..... ..... 000 ..... 0000111 @r_nfvm
vlsh_v ... 110 . ..... ..... 101 ..... 0000111 @r_nfvm
vlsw_v ... 110 . ..... ..... 110 ..... 0000111 @r_nfvm
vlse_v ... 010 . ..... ..... 111 ..... 0000111 @r_nfvm
vlsbu_v ... 010 . ..... ..... 000 ..... 0000111 @r_nfvm
vlshu_v ... 010 . ..... ..... 101 ..... 0000111 @r_nfvm
vlswu_v ... 010 . ..... ..... 110 ..... 0000111 @r_nfvm
vssb_v ... 010 . ..... ..... 000 ..... 0100111 @r_nfvm
vssh_v ... 010 . ..... ..... 101 ..... 0100111 @r_nfvm
vssw_v ... 010 . ..... ..... 110 ..... 0100111 @r_nfvm
vsse_v ... 010 . ..... ..... 111 ..... 0100111 @r_nfvm
# Vector unit-stride mask load/store insns.
vlm_v 000 000 1 01011 ..... 000 ..... 0000111 @r2
vsm_v 000 000 1 01011 ..... 000 ..... 0100111 @r2
# Vector strided insns.
vlse8_v ... 010 . ..... ..... 000 ..... 0000111 @r_nfvm
vlse16_v ... 010 . ..... ..... 101 ..... 0000111 @r_nfvm
vlse32_v ... 010 . ..... ..... 110 ..... 0000111 @r_nfvm
vlse64_v ... 010 . ..... ..... 111 ..... 0000111 @r_nfvm
vsse8_v ... 010 . ..... ..... 000 ..... 0100111 @r_nfvm
vsse16_v ... 010 . ..... ..... 101 ..... 0100111 @r_nfvm
vsse32_v ... 010 . ..... ..... 110 ..... 0100111 @r_nfvm
vsse64_v ... 010 . ..... ..... 111 ..... 0100111 @r_nfvm
# Vector ordered-indexed and unordered-indexed load insns.
vlxei8_v ... 0-1 . ..... ..... 000 ..... 0000111 @r_nfvm
vlxei16_v ... 0-1 . ..... ..... 101 ..... 0000111 @r_nfvm
vlxei32_v ... 0-1 . ..... ..... 110 ..... 0000111 @r_nfvm
vlxei64_v ... 0-1 . ..... ..... 111 ..... 0000111 @r_nfvm
vlxb_v ... 111 . ..... ..... 000 ..... 0000111 @r_nfvm
vlxh_v ... 111 . ..... ..... 101 ..... 0000111 @r_nfvm
vlxw_v ... 111 . ..... ..... 110 ..... 0000111 @r_nfvm
vlxe_v ... 011 . ..... ..... 111 ..... 0000111 @r_nfvm
vlxbu_v ... 011 . ..... ..... 000 ..... 0000111 @r_nfvm
vlxhu_v ... 011 . ..... ..... 101 ..... 0000111 @r_nfvm
vlxwu_v ... 011 . ..... ..... 110 ..... 0000111 @r_nfvm
# Vector ordered-indexed and unordered-indexed store insns.
vsxb_v ... -11 . ..... ..... 000 ..... 0100111 @r_nfvm
vsxh_v ... -11 . ..... ..... 101 ..... 0100111 @r_nfvm
vsxw_v ... -11 . ..... ..... 110 ..... 0100111 @r_nfvm
vsxe_v ... -11 . ..... ..... 111 ..... 0100111 @r_nfvm
vsxei8_v ... 0-1 . ..... ..... 000 ..... 0100111 @r_nfvm
vsxei16_v ... 0-1 . ..... ..... 101 ..... 0100111 @r_nfvm
vsxei32_v ... 0-1 . ..... ..... 110 ..... 0100111 @r_nfvm
vsxei64_v ... 0-1 . ..... ..... 111 ..... 0100111 @r_nfvm
#*** Vector AMO operations are encoded under the standard AMO major opcode ***
vamoswapw_v 00001 . . ..... ..... 110 ..... 0101111 @r_wdvm
vamoaddw_v 00000 . . ..... ..... 110 ..... 0101111 @r_wdvm
vamoxorw_v 00100 . . ..... ..... 110 ..... 0101111 @r_wdvm
vamoandw_v 01100 . . ..... ..... 110 ..... 0101111 @r_wdvm
vamoorw_v 01000 . . ..... ..... 110 ..... 0101111 @r_wdvm
vamominw_v 10000 . . ..... ..... 110 ..... 0101111 @r_wdvm
vamomaxw_v 10100 . . ..... ..... 110 ..... 0101111 @r_wdvm
vamominuw_v 11000 . . ..... ..... 110 ..... 0101111 @r_wdvm
vamomaxuw_v 11100 . . ..... ..... 110 ..... 0101111 @r_wdvm
# Vector unit-stride fault-only-first load insns.
vle8ff_v ... 000 . 10000 ..... 000 ..... 0000111 @r2_nfvm
vle16ff_v ... 000 . 10000 ..... 101 ..... 0000111 @r2_nfvm
vle32ff_v ... 000 . 10000 ..... 110 ..... 0000111 @r2_nfvm
vle64ff_v ... 000 . 10000 ..... 111 ..... 0000111 @r2_nfvm
# Vector whole register insns
vl1re8_v 000 000 1 01000 ..... 000 ..... 0000111 @r2
vl1re16_v 000 000 1 01000 ..... 101 ..... 0000111 @r2
vl1re32_v 000 000 1 01000 ..... 110 ..... 0000111 @r2
vl1re64_v 000 000 1 01000 ..... 111 ..... 0000111 @r2
vl2re8_v 001 000 1 01000 ..... 000 ..... 0000111 @r2
vl2re16_v 001 000 1 01000 ..... 101 ..... 0000111 @r2
vl2re32_v 001 000 1 01000 ..... 110 ..... 0000111 @r2
vl2re64_v 001 000 1 01000 ..... 111 ..... 0000111 @r2
vl4re8_v 011 000 1 01000 ..... 000 ..... 0000111 @r2
vl4re16_v 011 000 1 01000 ..... 101 ..... 0000111 @r2
vl4re32_v 011 000 1 01000 ..... 110 ..... 0000111 @r2
vl4re64_v 011 000 1 01000 ..... 111 ..... 0000111 @r2
vl8re8_v 111 000 1 01000 ..... 000 ..... 0000111 @r2
vl8re16_v 111 000 1 01000 ..... 101 ..... 0000111 @r2
vl8re32_v 111 000 1 01000 ..... 110 ..... 0000111 @r2
vl8re64_v 111 000 1 01000 ..... 111 ..... 0000111 @r2
vs1r_v 000 000 1 01000 ..... 000 ..... 0100111 @r2
vs2r_v 001 000 1 01000 ..... 000 ..... 0100111 @r2
vs4r_v 011 000 1 01000 ..... 000 ..... 0100111 @r2
vs8r_v 111 000 1 01000 ..... 000 ..... 0100111 @r2
# *** new major opcode OP-V ***
vadd_vv 000000 . ..... ..... 000 ..... 1010111 @r_vm
@ -375,16 +383,16 @@ vwsubu_wv 110110 . ..... ..... 010 ..... 1010111 @r_vm
vwsubu_wx 110110 . ..... ..... 110 ..... 1010111 @r_vm
vwsub_wv 110111 . ..... ..... 010 ..... 1010111 @r_vm
vwsub_wx 110111 . ..... ..... 110 ..... 1010111 @r_vm
vadc_vvm 010000 1 ..... ..... 000 ..... 1010111 @r_vm_1
vadc_vxm 010000 1 ..... ..... 100 ..... 1010111 @r_vm_1
vadc_vim 010000 1 ..... ..... 011 ..... 1010111 @r_vm_1
vmadc_vvm 010001 1 ..... ..... 000 ..... 1010111 @r_vm_1
vmadc_vxm 010001 1 ..... ..... 100 ..... 1010111 @r_vm_1
vmadc_vim 010001 1 ..... ..... 011 ..... 1010111 @r_vm_1
vsbc_vvm 010010 1 ..... ..... 000 ..... 1010111 @r_vm_1
vsbc_vxm 010010 1 ..... ..... 100 ..... 1010111 @r_vm_1
vmsbc_vvm 010011 1 ..... ..... 000 ..... 1010111 @r_vm_1
vmsbc_vxm 010011 1 ..... ..... 100 ..... 1010111 @r_vm_1
vadc_vvm 010000 0 ..... ..... 000 ..... 1010111 @r_vm_1
vadc_vxm 010000 0 ..... ..... 100 ..... 1010111 @r_vm_1
vadc_vim 010000 0 ..... ..... 011 ..... 1010111 @r_vm_1
vmadc_vvm 010001 . ..... ..... 000 ..... 1010111 @r_vm
vmadc_vxm 010001 . ..... ..... 100 ..... 1010111 @r_vm
vmadc_vim 010001 . ..... ..... 011 ..... 1010111 @r_vm
vsbc_vvm 010010 0 ..... ..... 000 ..... 1010111 @r_vm_1
vsbc_vxm 010010 0 ..... ..... 100 ..... 1010111 @r_vm_1
vmsbc_vvm 010011 . ..... ..... 000 ..... 1010111 @r_vm
vmsbc_vxm 010011 . ..... ..... 100 ..... 1010111 @r_vm
vand_vv 001001 . ..... ..... 000 ..... 1010111 @r_vm
vand_vx 001001 . ..... ..... 100 ..... 1010111 @r_vm
vand_vi 001001 . ..... ..... 011 ..... 1010111 @r_vm
@ -403,12 +411,12 @@ vsrl_vi 101000 . ..... ..... 011 ..... 1010111 @r_vm
vsra_vv 101001 . ..... ..... 000 ..... 1010111 @r_vm
vsra_vx 101001 . ..... ..... 100 ..... 1010111 @r_vm
vsra_vi 101001 . ..... ..... 011 ..... 1010111 @r_vm
vnsrl_vv 101100 . ..... ..... 000 ..... 1010111 @r_vm
vnsrl_vx 101100 . ..... ..... 100 ..... 1010111 @r_vm
vnsrl_vi 101100 . ..... ..... 011 ..... 1010111 @r_vm
vnsra_vv 101101 . ..... ..... 000 ..... 1010111 @r_vm
vnsra_vx 101101 . ..... ..... 100 ..... 1010111 @r_vm
vnsra_vi 101101 . ..... ..... 011 ..... 1010111 @r_vm
vnsrl_wv 101100 . ..... ..... 000 ..... 1010111 @r_vm
vnsrl_wx 101100 . ..... ..... 100 ..... 1010111 @r_vm
vnsrl_wi 101100 . ..... ..... 011 ..... 1010111 @r_vm
vnsra_wv 101101 . ..... ..... 000 ..... 1010111 @r_vm
vnsra_wx 101101 . ..... ..... 100 ..... 1010111 @r_vm
vnsra_wi 101101 . ..... ..... 011 ..... 1010111 @r_vm
vmseq_vv 011000 . ..... ..... 000 ..... 1010111 @r_vm
vmseq_vx 011000 . ..... ..... 100 ..... 1010111 @r_vm
vmseq_vi 011000 . ..... ..... 011 ..... 1010111 @r_vm
@ -471,9 +479,9 @@ vwmaccu_vv 111100 . ..... ..... 010 ..... 1010111 @r_vm
vwmaccu_vx 111100 . ..... ..... 110 ..... 1010111 @r_vm
vwmacc_vv 111101 . ..... ..... 010 ..... 1010111 @r_vm
vwmacc_vx 111101 . ..... ..... 110 ..... 1010111 @r_vm
vwmaccsu_vv 111110 . ..... ..... 010 ..... 1010111 @r_vm
vwmaccsu_vx 111110 . ..... ..... 110 ..... 1010111 @r_vm
vwmaccus_vx 111111 . ..... ..... 110 ..... 1010111 @r_vm
vwmaccsu_vv 111111 . ..... ..... 010 ..... 1010111 @r_vm
vwmaccsu_vx 111111 . ..... ..... 110 ..... 1010111 @r_vm
vwmaccus_vx 111110 . ..... ..... 110 ..... 1010111 @r_vm
vmv_v_v 010111 1 00000 ..... 000 ..... 1010111 @r2
vmv_v_x 010111 1 00000 ..... 100 ..... 1010111 @r2
vmv_v_i 010111 1 00000 ..... 011 ..... 1010111 @r2
@ -490,32 +498,28 @@ vssubu_vv 100010 . ..... ..... 000 ..... 1010111 @r_vm
vssubu_vx 100010 . ..... ..... 100 ..... 1010111 @r_vm
vssub_vv 100011 . ..... ..... 000 ..... 1010111 @r_vm
vssub_vx 100011 . ..... ..... 100 ..... 1010111 @r_vm
vaadd_vv 100100 . ..... ..... 000 ..... 1010111 @r_vm
vaadd_vx 100100 . ..... ..... 100 ..... 1010111 @r_vm
vaadd_vi 100100 . ..... ..... 011 ..... 1010111 @r_vm
vasub_vv 100110 . ..... ..... 000 ..... 1010111 @r_vm
vasub_vx 100110 . ..... ..... 100 ..... 1010111 @r_vm
vaadd_vv 001001 . ..... ..... 010 ..... 1010111 @r_vm
vaadd_vx 001001 . ..... ..... 110 ..... 1010111 @r_vm
vaaddu_vv 001000 . ..... ..... 010 ..... 1010111 @r_vm
vaaddu_vx 001000 . ..... ..... 110 ..... 1010111 @r_vm
vasub_vv 001011 . ..... ..... 010 ..... 1010111 @r_vm
vasub_vx 001011 . ..... ..... 110 ..... 1010111 @r_vm
vasubu_vv 001010 . ..... ..... 010 ..... 1010111 @r_vm
vasubu_vx 001010 . ..... ..... 110 ..... 1010111 @r_vm
vsmul_vv 100111 . ..... ..... 000 ..... 1010111 @r_vm
vsmul_vx 100111 . ..... ..... 100 ..... 1010111 @r_vm
vwsmaccu_vv 111100 . ..... ..... 000 ..... 1010111 @r_vm
vwsmaccu_vx 111100 . ..... ..... 100 ..... 1010111 @r_vm
vwsmacc_vv 111101 . ..... ..... 000 ..... 1010111 @r_vm
vwsmacc_vx 111101 . ..... ..... 100 ..... 1010111 @r_vm
vwsmaccsu_vv 111110 . ..... ..... 000 ..... 1010111 @r_vm
vwsmaccsu_vx 111110 . ..... ..... 100 ..... 1010111 @r_vm
vwsmaccus_vx 111111 . ..... ..... 100 ..... 1010111 @r_vm
vssrl_vv 101010 . ..... ..... 000 ..... 1010111 @r_vm
vssrl_vx 101010 . ..... ..... 100 ..... 1010111 @r_vm
vssrl_vi 101010 . ..... ..... 011 ..... 1010111 @r_vm
vssra_vv 101011 . ..... ..... 000 ..... 1010111 @r_vm
vssra_vx 101011 . ..... ..... 100 ..... 1010111 @r_vm
vssra_vi 101011 . ..... ..... 011 ..... 1010111 @r_vm
vnclipu_vv 101110 . ..... ..... 000 ..... 1010111 @r_vm
vnclipu_vx 101110 . ..... ..... 100 ..... 1010111 @r_vm
vnclipu_vi 101110 . ..... ..... 011 ..... 1010111 @r_vm
vnclip_vv 101111 . ..... ..... 000 ..... 1010111 @r_vm
vnclip_vx 101111 . ..... ..... 100 ..... 1010111 @r_vm
vnclip_vi 101111 . ..... ..... 011 ..... 1010111 @r_vm
vnclipu_wv 101110 . ..... ..... 000 ..... 1010111 @r_vm
vnclipu_wx 101110 . ..... ..... 100 ..... 1010111 @r_vm
vnclipu_wi 101110 . ..... ..... 011 ..... 1010111 @r_vm
vnclip_wv 101111 . ..... ..... 000 ..... 1010111 @r_vm
vnclip_wx 101111 . ..... ..... 100 ..... 1010111 @r_vm
vnclip_wi 101111 . ..... ..... 011 ..... 1010111 @r_vm
vfadd_vv 000000 . ..... ..... 001 ..... 1010111 @r_vm
vfadd_vf 000000 . ..... ..... 101 ..... 1010111 @r_vm
vfsub_vv 000010 . ..... ..... 001 ..... 1010111 @r_vm
@ -560,7 +564,9 @@ vfwmsac_vv 111110 . ..... ..... 001 ..... 1010111 @r_vm
vfwmsac_vf 111110 . ..... ..... 101 ..... 1010111 @r_vm
vfwnmsac_vv 111111 . ..... ..... 001 ..... 1010111 @r_vm
vfwnmsac_vf 111111 . ..... ..... 101 ..... 1010111 @r_vm
vfsqrt_v 100011 . ..... 00000 001 ..... 1010111 @r2_vm
vfsqrt_v 010011 . ..... 00000 001 ..... 1010111 @r2_vm
vfrsqrt7_v 010011 . ..... 00100 001 ..... 1010111 @r2_vm
vfrec7_v 010011 . ..... 00101 001 ..... 1010111 @r2_vm
vfmin_vv 000100 . ..... ..... 001 ..... 1010111 @r_vm
vfmin_vf 000100 . ..... ..... 101 ..... 1010111 @r_vm
vfmax_vv 000110 . ..... ..... 001 ..... 1010111 @r_vm
@ -571,6 +577,8 @@ vfsgnjn_vv 001001 . ..... ..... 001 ..... 1010111 @r_vm
vfsgnjn_vf 001001 . ..... ..... 101 ..... 1010111 @r_vm
vfsgnjx_vv 001010 . ..... ..... 001 ..... 1010111 @r_vm
vfsgnjx_vf 001010 . ..... ..... 101 ..... 1010111 @r_vm
vfslide1up_vf 001110 . ..... ..... 101 ..... 1010111 @r_vm
vfslide1down_vf 001111 . ..... ..... 101 ..... 1010111 @r_vm
vmfeq_vv 011000 . ..... ..... 001 ..... 1010111 @r_vm
vmfeq_vf 011000 . ..... ..... 101 ..... 1010111 @r_vm
vmfne_vv 011100 . ..... ..... 001 ..... 1010111 @r_vm
@ -581,25 +589,34 @@ vmfle_vv 011001 . ..... ..... 001 ..... 1010111 @r_vm
vmfle_vf 011001 . ..... ..... 101 ..... 1010111 @r_vm
vmfgt_vf 011101 . ..... ..... 101 ..... 1010111 @r_vm
vmfge_vf 011111 . ..... ..... 101 ..... 1010111 @r_vm
vmford_vv 011010 . ..... ..... 001 ..... 1010111 @r_vm
vmford_vf 011010 . ..... ..... 101 ..... 1010111 @r_vm
vfclass_v 100011 . ..... 10000 001 ..... 1010111 @r2_vm
vfclass_v 010011 . ..... 10000 001 ..... 1010111 @r2_vm
vfmerge_vfm 010111 0 ..... ..... 101 ..... 1010111 @r_vm_0
vfmv_v_f 010111 1 00000 ..... 101 ..... 1010111 @r2
vfcvt_xu_f_v 100010 . ..... 00000 001 ..... 1010111 @r2_vm
vfcvt_x_f_v 100010 . ..... 00001 001 ..... 1010111 @r2_vm
vfcvt_f_xu_v 100010 . ..... 00010 001 ..... 1010111 @r2_vm
vfcvt_f_x_v 100010 . ..... 00011 001 ..... 1010111 @r2_vm
vfwcvt_xu_f_v 100010 . ..... 01000 001 ..... 1010111 @r2_vm
vfwcvt_x_f_v 100010 . ..... 01001 001 ..... 1010111 @r2_vm
vfwcvt_f_xu_v 100010 . ..... 01010 001 ..... 1010111 @r2_vm
vfwcvt_f_x_v 100010 . ..... 01011 001 ..... 1010111 @r2_vm
vfwcvt_f_f_v 100010 . ..... 01100 001 ..... 1010111 @r2_vm
vfncvt_xu_f_v 100010 . ..... 10000 001 ..... 1010111 @r2_vm
vfncvt_x_f_v 100010 . ..... 10001 001 ..... 1010111 @r2_vm
vfncvt_f_xu_v 100010 . ..... 10010 001 ..... 1010111 @r2_vm
vfncvt_f_x_v 100010 . ..... 10011 001 ..... 1010111 @r2_vm
vfncvt_f_f_v 100010 . ..... 10100 001 ..... 1010111 @r2_vm
vfcvt_xu_f_v 010010 . ..... 00000 001 ..... 1010111 @r2_vm
vfcvt_x_f_v 010010 . ..... 00001 001 ..... 1010111 @r2_vm
vfcvt_f_xu_v 010010 . ..... 00010 001 ..... 1010111 @r2_vm
vfcvt_f_x_v 010010 . ..... 00011 001 ..... 1010111 @r2_vm
vfcvt_rtz_xu_f_v 010010 . ..... 00110 001 ..... 1010111 @r2_vm
vfcvt_rtz_x_f_v 010010 . ..... 00111 001 ..... 1010111 @r2_vm
vfwcvt_xu_f_v 010010 . ..... 01000 001 ..... 1010111 @r2_vm
vfwcvt_x_f_v 010010 . ..... 01001 001 ..... 1010111 @r2_vm
vfwcvt_f_xu_v 010010 . ..... 01010 001 ..... 1010111 @r2_vm
vfwcvt_f_x_v 010010 . ..... 01011 001 ..... 1010111 @r2_vm
vfwcvt_f_f_v 010010 . ..... 01100 001 ..... 1010111 @r2_vm
vfwcvt_rtz_xu_f_v 010010 . ..... 01110 001 ..... 1010111 @r2_vm
vfwcvt_rtz_x_f_v 010010 . ..... 01111 001 ..... 1010111 @r2_vm
vfncvt_xu_f_w 010010 . ..... 10000 001 ..... 1010111 @r2_vm
vfncvt_x_f_w 010010 . ..... 10001 001 ..... 1010111 @r2_vm
vfncvt_f_xu_w 010010 . ..... 10010 001 ..... 1010111 @r2_vm
vfncvt_f_x_w 010010 . ..... 10011 001 ..... 1010111 @r2_vm
vfncvt_f_f_w 010010 . ..... 10100 001 ..... 1010111 @r2_vm
vfncvt_rod_f_f_w 010010 . ..... 10101 001 ..... 1010111 @r2_vm
vfncvt_rtz_xu_f_w 010010 . ..... 10110 001 ..... 1010111 @r2_vm
vfncvt_rtz_x_f_w 010010 . ..... 10111 001 ..... 1010111 @r2_vm
vredsum_vs 000000 . ..... ..... 010 ..... 1010111 @r_vm
vredand_vs 000001 . ..... ..... 010 ..... 1010111 @r_vm
vredor_vs 000010 . ..... ..... 010 ..... 1010111 @r_vm
@ -618,23 +635,23 @@ vfredmax_vs 000111 . ..... ..... 001 ..... 1010111 @r_vm
vfwredsum_vs 1100-1 . ..... ..... 001 ..... 1010111 @r_vm
vmand_mm 011001 - ..... ..... 010 ..... 1010111 @r
vmnand_mm 011101 - ..... ..... 010 ..... 1010111 @r
vmandnot_mm 011000 - ..... ..... 010 ..... 1010111 @r
vmandn_mm 011000 - ..... ..... 010 ..... 1010111 @r
vmxor_mm 011011 - ..... ..... 010 ..... 1010111 @r
vmor_mm 011010 - ..... ..... 010 ..... 1010111 @r
vmnor_mm 011110 - ..... ..... 010 ..... 1010111 @r
vmornot_mm 011100 - ..... ..... 010 ..... 1010111 @r
vmorn_mm 011100 - ..... ..... 010 ..... 1010111 @r
vmxnor_mm 011111 - ..... ..... 010 ..... 1010111 @r
vmpopc_m 010100 . ..... ----- 010 ..... 1010111 @r2_vm
vmfirst_m 010101 . ..... ----- 010 ..... 1010111 @r2_vm
vmsbf_m 010110 . ..... 00001 010 ..... 1010111 @r2_vm
vmsif_m 010110 . ..... 00011 010 ..... 1010111 @r2_vm
vmsof_m 010110 . ..... 00010 010 ..... 1010111 @r2_vm
viota_m 010110 . ..... 10000 010 ..... 1010111 @r2_vm
vid_v 010110 . 00000 10001 010 ..... 1010111 @r1_vm
vext_x_v 001100 1 ..... ..... 010 ..... 1010111 @r
vmv_s_x 001101 1 00000 ..... 110 ..... 1010111 @r2
vfmv_f_s 001100 1 ..... 00000 001 ..... 1010111 @r2rd
vfmv_s_f 001101 1 00000 ..... 101 ..... 1010111 @r2
vcpop_m 010000 . ..... 10000 010 ..... 1010111 @r2_vm
vfirst_m 010000 . ..... 10001 010 ..... 1010111 @r2_vm
vmsbf_m 010100 . ..... 00001 010 ..... 1010111 @r2_vm
vmsif_m 010100 . ..... 00011 010 ..... 1010111 @r2_vm
vmsof_m 010100 . ..... 00010 010 ..... 1010111 @r2_vm
viota_m 010100 . ..... 10000 010 ..... 1010111 @r2_vm
vid_v 010100 . 00000 10001 010 ..... 1010111 @r1_vm
vmv_x_s 010000 1 ..... 00000 010 ..... 1010111 @r2rd
vmv_s_x 010000 1 00000 ..... 110 ..... 1010111 @r2
vfmv_f_s 010000 1 ..... 00000 001 ..... 1010111 @r2rd
vfmv_s_f 010000 1 00000 ..... 101 ..... 1010111 @r2
vslideup_vx 001110 . ..... ..... 100 ..... 1010111 @r_vm
vslideup_vi 001110 . ..... ..... 011 ..... 1010111 @r_vm
vslide1up_vx 001110 . ..... ..... 110 ..... 1010111 @r_vm
@ -642,24 +659,27 @@ vslidedown_vx 001111 . ..... ..... 100 ..... 1010111 @r_vm
vslidedown_vi 001111 . ..... ..... 011 ..... 1010111 @r_vm
vslide1down_vx 001111 . ..... ..... 110 ..... 1010111 @r_vm
vrgather_vv 001100 . ..... ..... 000 ..... 1010111 @r_vm
vrgatherei16_vv 001110 . ..... ..... 000 ..... 1010111 @r_vm
vrgather_vx 001100 . ..... ..... 100 ..... 1010111 @r_vm
vrgather_vi 001100 . ..... ..... 011 ..... 1010111 @r_vm
vcompress_vm 010111 - ..... ..... 010 ..... 1010111 @r
vmv1r_v 100111 1 ..... 00000 011 ..... 1010111 @r2rd
vmv2r_v 100111 1 ..... 00001 011 ..... 1010111 @r2rd
vmv4r_v 100111 1 ..... 00011 011 ..... 1010111 @r2rd
vmv8r_v 100111 1 ..... 00111 011 ..... 1010111 @r2rd
vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm
# Vector Integer Extension
vzext_vf2 010010 . ..... 00110 010 ..... 1010111 @r2_vm
vzext_vf4 010010 . ..... 00100 010 ..... 1010111 @r2_vm
vzext_vf8 010010 . ..... 00010 010 ..... 1010111 @r2_vm
vsext_vf2 010010 . ..... 00111 010 ..... 1010111 @r2_vm
vsext_vf4 010010 . ..... 00101 010 ..... 1010111 @r2_vm
vsext_vf8 010010 . ..... 00011 010 ..... 1010111 @r2_vm
vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm11
vsetivli 11 .......... ..... 111 ..... 1010111 @r2_zimm10
vsetvl 1000000 ..... ..... 111 ..... 1010111 @r
#*** Vector AMO operations (in addition to Zvamo) ***
vamoswapd_v 00001 . . ..... ..... 111 ..... 0101111 @r_wdvm
vamoaddd_v 00000 . . ..... ..... 111 ..... 0101111 @r_wdvm
vamoxord_v 00100 . . ..... ..... 111 ..... 0101111 @r_wdvm
vamoandd_v 01100 . . ..... ..... 111 ..... 0101111 @r_wdvm
vamoord_v 01000 . . ..... ..... 111 ..... 0101111 @r_wdvm
vamomind_v 10000 . . ..... ..... 111 ..... 0101111 @r_wdvm
vamomaxd_v 10100 . . ..... ..... 111 ..... 0101111 @r_wdvm
vamominud_v 11000 . . ..... ..... 111 ..... 0101111 @r_wdvm
vamomaxud_v 11100 . . ..... ..... 111 ..... 0101111 @r_wdvm
# *** RV32 Zba Standard Extension ***
sh1add 0010000 .......... 010 ..... 0110011 @r
sh2add 0010000 .......... 100 ..... 0110011 @r
@ -726,3 +746,41 @@ binv 0110100 .......... 001 ..... 0110011 @r
binvi 01101. ........... 001 ..... 0010011 @sh
bset 0010100 .......... 001 ..... 0110011 @r
bseti 00101. ........... 001 ..... 0010011 @sh
# *** RV32 Zfh Extension ***
flh ............ ..... 001 ..... 0000111 @i
fsh ....... ..... ..... 001 ..... 0100111 @s
fmadd_h ..... 10 ..... ..... ... ..... 1000011 @r4_rm
fmsub_h ..... 10 ..... ..... ... ..... 1000111 @r4_rm
fnmsub_h ..... 10 ..... ..... ... ..... 1001011 @r4_rm
fnmadd_h ..... 10 ..... ..... ... ..... 1001111 @r4_rm
fadd_h 0000010 ..... ..... ... ..... 1010011 @r_rm
fsub_h 0000110 ..... ..... ... ..... 1010011 @r_rm
fmul_h 0001010 ..... ..... ... ..... 1010011 @r_rm
fdiv_h 0001110 ..... ..... ... ..... 1010011 @r_rm
fsqrt_h 0101110 00000 ..... ... ..... 1010011 @r2_rm
fsgnj_h 0010010 ..... ..... 000 ..... 1010011 @r
fsgnjn_h 0010010 ..... ..... 001 ..... 1010011 @r
fsgnjx_h 0010010 ..... ..... 010 ..... 1010011 @r
fmin_h 0010110 ..... ..... 000 ..... 1010011 @r
fmax_h 0010110 ..... ..... 001 ..... 1010011 @r
fcvt_h_s 0100010 00000 ..... ... ..... 1010011 @r2_rm
fcvt_s_h 0100000 00010 ..... ... ..... 1010011 @r2_rm
fcvt_h_d 0100010 00001 ..... ... ..... 1010011 @r2_rm
fcvt_d_h 0100001 00010 ..... ... ..... 1010011 @r2_rm
fcvt_w_h 1100010 00000 ..... ... ..... 1010011 @r2_rm
fcvt_wu_h 1100010 00001 ..... ... ..... 1010011 @r2_rm
fmv_x_h 1110010 00000 ..... 000 ..... 1010011 @r2
feq_h 1010010 ..... ..... 010 ..... 1010011 @r
flt_h 1010010 ..... ..... 001 ..... 1010011 @r
fle_h 1010010 ..... ..... 000 ..... 1010011 @r
fclass_h 1110010 00000 ..... 001 ..... 1010011 @r2
fcvt_h_w 1101010 00000 ..... ... ..... 1010011 @r2_rm
fcvt_h_wu 1101010 00001 ..... ... ..... 1010011 @r2_rm
fmv_h_x 1111010 00000 ..... 000 ..... 1010011 @r2
# *** RV64 Zfh Extension (in addition to RV32 Zfh) ***
fcvt_l_h 1100010 00010 ..... ... ..... 1010011 @r2_rm
fcvt_lu_h 1100010 00011 ..... ... ..... 1010011 @r2_rm
fcvt_h_l 1101010 00010 ..... ... ..... 1010011 @r2_rm
fcvt_h_lu 1101010 00011 ..... ... ..... 1010011 @r2_rm

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,537 @@
/*
* RISC-V translation routines for the RV64Zfh Standard Extension.
*
* Copyright (c) 2020 Chih-Min Chao, chihmin.chao@sifive.com
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define REQUIRE_ZFH(ctx) do { \
if (!ctx->ext_zfh) { \
return false; \
} \
} while (0)
#define REQUIRE_ZFH_OR_ZFHMIN(ctx) do { \
if (!(ctx->ext_zfh || ctx->ext_zfhmin)) { \
return false; \
} \
} while (0)
static bool trans_flh(DisasContext *ctx, arg_flh *a)
{
TCGv_i64 dest;
TCGv t0;
REQUIRE_FPU;
REQUIRE_ZFH_OR_ZFHMIN(ctx);
t0 = get_gpr(ctx, a->rs1, EXT_NONE);
if (a->imm) {
TCGv temp = temp_new(ctx);
tcg_gen_addi_tl(temp, t0, a->imm);
t0 = temp;
}
dest = cpu_fpr[a->rd];
tcg_gen_qemu_ld_i64(dest, t0, ctx->mem_idx, MO_TEUW);
gen_nanbox_h(dest, dest);
mark_fs_dirty(ctx);
return true;
}
static bool trans_fsh(DisasContext *ctx, arg_fsh *a)
{
TCGv t0;
REQUIRE_FPU;
REQUIRE_ZFH_OR_ZFHMIN(ctx);
t0 = get_gpr(ctx, a->rs1, EXT_NONE);
if (a->imm) {
TCGv temp = tcg_temp_new();
tcg_gen_addi_tl(temp, t0, a->imm);
t0 = temp;
}
tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], t0, ctx->mem_idx, MO_TEUW);
return true;
}
static bool trans_fmadd_h(DisasContext *ctx, arg_fmadd_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
gen_set_rm(ctx, a->rm);
gen_helper_fmadd_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
mark_fs_dirty(ctx);
return true;
}
static bool trans_fmsub_h(DisasContext *ctx, arg_fmsub_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
gen_set_rm(ctx, a->rm);
gen_helper_fmsub_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
mark_fs_dirty(ctx);
return true;
}
static bool trans_fnmsub_h(DisasContext *ctx, arg_fnmsub_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
gen_set_rm(ctx, a->rm);
gen_helper_fnmsub_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
mark_fs_dirty(ctx);
return true;
}
static bool trans_fnmadd_h(DisasContext *ctx, arg_fnmadd_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
gen_set_rm(ctx, a->rm);
gen_helper_fnmadd_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
mark_fs_dirty(ctx);
return true;
}
static bool trans_fadd_h(DisasContext *ctx, arg_fadd_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
gen_set_rm(ctx, a->rm);
gen_helper_fadd_h(cpu_fpr[a->rd], cpu_env,
cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
mark_fs_dirty(ctx);
return true;
}
static bool trans_fsub_h(DisasContext *ctx, arg_fsub_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
gen_set_rm(ctx, a->rm);
gen_helper_fsub_h(cpu_fpr[a->rd], cpu_env,
cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
mark_fs_dirty(ctx);
return true;
}
static bool trans_fmul_h(DisasContext *ctx, arg_fmul_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
gen_set_rm(ctx, a->rm);
gen_helper_fmul_h(cpu_fpr[a->rd], cpu_env,
cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
mark_fs_dirty(ctx);
return true;
}
static bool trans_fdiv_h(DisasContext *ctx, arg_fdiv_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
gen_set_rm(ctx, a->rm);
gen_helper_fdiv_h(cpu_fpr[a->rd], cpu_env,
cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
mark_fs_dirty(ctx);
return true;
}
static bool trans_fsqrt_h(DisasContext *ctx, arg_fsqrt_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
gen_set_rm(ctx, a->rm);
gen_helper_fsqrt_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
mark_fs_dirty(ctx);
return true;
}
static bool trans_fsgnj_h(DisasContext *ctx, arg_fsgnj_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
if (a->rs1 == a->rs2) { /* FMOV */
gen_check_nanbox_h(cpu_fpr[a->rd], cpu_fpr[a->rs1]);
} else {
TCGv_i64 rs1 = tcg_temp_new_i64();
TCGv_i64 rs2 = tcg_temp_new_i64();
gen_check_nanbox_h(rs1, cpu_fpr[a->rs1]);
gen_check_nanbox_h(rs2, cpu_fpr[a->rs2]);
/* This formulation retains the nanboxing of rs2. */
tcg_gen_deposit_i64(cpu_fpr[a->rd], rs2, rs1, 0, 15);
tcg_temp_free_i64(rs1);
tcg_temp_free_i64(rs2);
}
mark_fs_dirty(ctx);
return true;
}
static bool trans_fsgnjn_h(DisasContext *ctx, arg_fsgnjn_h *a)
{
TCGv_i64 rs1, rs2, mask;
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
rs1 = tcg_temp_new_i64();
gen_check_nanbox_h(rs1, cpu_fpr[a->rs1]);
if (a->rs1 == a->rs2) { /* FNEG */
tcg_gen_xori_i64(cpu_fpr[a->rd], rs1, MAKE_64BIT_MASK(15, 1));
} else {
rs2 = tcg_temp_new_i64();
gen_check_nanbox_h(rs2, cpu_fpr[a->rs2]);
/*
* Replace bit 15 in rs1 with inverse in rs2.
* This formulation retains the nanboxing of rs1.
*/
mask = tcg_const_i64(~MAKE_64BIT_MASK(15, 1));
tcg_gen_not_i64(rs2, rs2);
tcg_gen_andc_i64(rs2, rs2, mask);
tcg_gen_and_i64(rs1, mask, rs1);
tcg_gen_or_i64(cpu_fpr[a->rd], rs1, rs2);
tcg_temp_free_i64(mask);
tcg_temp_free_i64(rs2);
}
mark_fs_dirty(ctx);
return true;
}
static bool trans_fsgnjx_h(DisasContext *ctx, arg_fsgnjx_h *a)
{
TCGv_i64 rs1, rs2;
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
rs1 = tcg_temp_new_i64();
gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]);
if (a->rs1 == a->rs2) { /* FABS */
tcg_gen_andi_i64(cpu_fpr[a->rd], rs1, ~MAKE_64BIT_MASK(15, 1));
} else {
rs2 = tcg_temp_new_i64();
gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]);
/*
* Xor bit 15 in rs1 with that in rs2.
* This formulation retains the nanboxing of rs1.
*/
tcg_gen_andi_i64(rs2, rs2, MAKE_64BIT_MASK(15, 1));
tcg_gen_xor_i64(cpu_fpr[a->rd], rs1, rs2);
tcg_temp_free_i64(rs2);
}
mark_fs_dirty(ctx);
return true;
}
static bool trans_fmin_h(DisasContext *ctx, arg_fmin_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
gen_helper_fmin_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
cpu_fpr[a->rs2]);
mark_fs_dirty(ctx);
return true;
}
static bool trans_fmax_h(DisasContext *ctx, arg_fmax_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
gen_helper_fmax_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
cpu_fpr[a->rs2]);
mark_fs_dirty(ctx);
return true;
}
static bool trans_fcvt_s_h(DisasContext *ctx, arg_fcvt_s_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH_OR_ZFHMIN(ctx);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_s_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
mark_fs_dirty(ctx);
return true;
}
static bool trans_fcvt_d_h(DisasContext *ctx, arg_fcvt_d_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH_OR_ZFHMIN(ctx);
REQUIRE_EXT(ctx, RVD);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_d_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
mark_fs_dirty(ctx);
return true;
}
static bool trans_fcvt_h_s(DisasContext *ctx, arg_fcvt_h_s *a)
{
REQUIRE_FPU;
REQUIRE_ZFH_OR_ZFHMIN(ctx);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_h_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
mark_fs_dirty(ctx);
return true;
}
static bool trans_fcvt_h_d(DisasContext *ctx, arg_fcvt_h_d *a)
{
REQUIRE_FPU;
REQUIRE_ZFH_OR_ZFHMIN(ctx);
REQUIRE_EXT(ctx, RVD);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_h_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
mark_fs_dirty(ctx);
return true;
}
static bool trans_feq_h(DisasContext *ctx, arg_feq_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
gen_helper_feq_h(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
static bool trans_flt_h(DisasContext *ctx, arg_flt_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
gen_helper_flt_h(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
static bool trans_fle_h(DisasContext *ctx, arg_fle_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
gen_helper_fle_h(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
static bool trans_fclass_h(DisasContext *ctx, arg_fclass_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
gen_helper_fclass_h(dest, cpu_fpr[a->rs1]);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
static bool trans_fcvt_w_h(DisasContext *ctx, arg_fcvt_w_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_w_h(dest, cpu_env, cpu_fpr[a->rs1]);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
static bool trans_fcvt_wu_h(DisasContext *ctx, arg_fcvt_wu_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_wu_h(dest, cpu_env, cpu_fpr[a->rs1]);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
static bool trans_fcvt_h_w(DisasContext *ctx, arg_fcvt_h_w *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
TCGv t0 = get_gpr(ctx, a->rs1, EXT_SIGN);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_h_w(cpu_fpr[a->rd], cpu_env, t0);
mark_fs_dirty(ctx);
return true;
}
static bool trans_fcvt_h_wu(DisasContext *ctx, arg_fcvt_h_wu *a)
{
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
TCGv t0 = get_gpr(ctx, a->rs1, EXT_SIGN);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_h_wu(cpu_fpr[a->rd], cpu_env, t0);
mark_fs_dirty(ctx);
return true;
}
static bool trans_fmv_x_h(DisasContext *ctx, arg_fmv_x_h *a)
{
REQUIRE_FPU;
REQUIRE_ZFH_OR_ZFHMIN(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
#if defined(TARGET_RISCV64)
/* 16 bits -> 64 bits */
tcg_gen_ext16s_tl(dest, cpu_fpr[a->rs1]);
#else
/* 16 bits -> 32 bits */
tcg_gen_extrl_i64_i32(dest, cpu_fpr[a->rs1]);
tcg_gen_ext16s_tl(dest, dest);
#endif
gen_set_gpr(ctx, a->rd, dest);
return true;
}
static bool trans_fmv_h_x(DisasContext *ctx, arg_fmv_h_x *a)
{
REQUIRE_FPU;
REQUIRE_ZFH_OR_ZFHMIN(ctx);
TCGv t0 = get_gpr(ctx, a->rs1, EXT_ZERO);
tcg_gen_extu_tl_i64(cpu_fpr[a->rd], t0);
gen_nanbox_h(cpu_fpr[a->rd], cpu_fpr[a->rd]);
mark_fs_dirty(ctx);
return true;
}
static bool trans_fcvt_l_h(DisasContext *ctx, arg_fcvt_l_h *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_l_h(dest, cpu_env, cpu_fpr[a->rs1]);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
static bool trans_fcvt_lu_h(DisasContext *ctx, arg_fcvt_lu_h *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_lu_h(dest, cpu_env, cpu_fpr[a->rs1]);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
static bool trans_fcvt_h_l(DisasContext *ctx, arg_fcvt_h_l *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
TCGv t0 = get_gpr(ctx, a->rs1, EXT_SIGN);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_h_l(cpu_fpr[a->rd], cpu_env, t0);
mark_fs_dirty(ctx);
return true;
}
static bool trans_fcvt_h_lu(DisasContext *ctx, arg_fcvt_h_lu *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_FPU;
REQUIRE_ZFH(ctx);
TCGv t0 = get_gpr(ctx, a->rs1, EXT_SIGN);
gen_set_rm(ctx, a->rm);
gen_helper_fcvt_h_lu(cpu_fpr[a->rd], cpu_env, t0);
mark_fs_dirty(ctx);
return true;
}

View File

@ -22,26 +22,30 @@
#include "hw/registerfields.h"
/* share data between vector helpers and decode code */
FIELD(VDATA, MLEN, 0, 8)
FIELD(VDATA, VM, 8, 1)
FIELD(VDATA, LMUL, 9, 2)
FIELD(VDATA, NF, 11, 4)
FIELD(VDATA, WD, 11, 1)
FIELD(VDATA, VM, 0, 1)
FIELD(VDATA, LMUL, 1, 3)
FIELD(VDATA, NF, 4, 4)
FIELD(VDATA, WD, 4, 1)
/* float point classify helpers */
target_ulong fclass_h(uint64_t frs1);
target_ulong fclass_s(uint64_t frs1);
target_ulong fclass_d(uint64_t frs1);
#define SEW8 0
#define SEW16 1
#define SEW32 2
#define SEW64 3
#ifndef CONFIG_USER_ONLY
extern const VMStateDescription vmstate_riscv_cpu;
#endif
enum {
RISCV_FRM_RNE = 0, /* Round to Nearest, ties to Even */
RISCV_FRM_RTZ = 1, /* Round towards Zero */
RISCV_FRM_RDN = 2, /* Round Down */
RISCV_FRM_RUP = 3, /* Round Up */
RISCV_FRM_RMM = 4, /* Round to Nearest, ties to Max Magnitude */
RISCV_FRM_DYN = 7, /* Dynamic rounding mode */
RISCV_FRM_ROD = 8, /* Round to Odd */
};
static inline uint64_t nanbox_s(float32 f)
{
return f | MAKE_64BIT_MASK(32, 32);
@ -58,4 +62,20 @@ static inline float32 check_nanbox_s(uint64_t f)
}
}
static inline uint64_t nanbox_h(float16 f)
{
return f | MAKE_64BIT_MASK(16, 48);
}
static inline float16 check_nanbox_h(uint64_t f)
{
uint64_t mask = MAKE_64BIT_MASK(16, 48);
if (likely((f & mask) == mask)) {
return (uint16_t)f;
} else {
return 0x7E00u; /* default qnan */
}
}
#endif

View File

@ -30,9 +30,10 @@
#include "exec/log.h"
#include "instmap.h"
#include "internals.h"
/* global register indices */
static TCGv cpu_gpr[32], cpu_pc, cpu_vl;
static TCGv cpu_gpr[32], cpu_pc, cpu_vl, cpu_vstart;
static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
static TCGv load_res;
static TCGv load_val;
@ -62,7 +63,9 @@ typedef struct DisasContext {
uint32_t misa_ext;
uint32_t opcode;
uint32_t mstatus_fs;
uint32_t mstatus_vs;
uint32_t mstatus_hs_fs;
uint32_t mstatus_hs_vs;
uint32_t mem_idx;
/* Remember the rounding mode encoded in the previous fp instruction,
which we have already installed into env->fp_status. Or -1 for
@ -73,13 +76,28 @@ typedef struct DisasContext {
RISCVMXL ol;
bool virt_enabled;
bool ext_ifencei;
bool ext_zfh;
bool ext_zfhmin;
bool hlsx;
/* vector extension */
bool vill;
uint8_t lmul;
/*
* Encode LMUL to lmul as follows:
* LMUL vlmul lmul
* 1 000 0
* 2 001 1
* 4 010 2
* 8 011 3
* - 100 -
* 1/8 101 -3
* 1/4 110 -2
* 1/2 111 -1
*/
int8_t lmul;
uint8_t sew;
uint16_t vlen;
uint16_t mlen;
uint16_t elen;
target_ulong vstart;
bool vl_eq_vlmax;
uint8_t ntemp;
CPUState *cs;
@ -134,6 +152,11 @@ static void gen_nanbox_s(TCGv_i64 out, TCGv_i64 in)
tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(32, 32));
}
static void gen_nanbox_h(TCGv_i64 out, TCGv_i64 in)
{
tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(16, 48));
}
/*
* A narrow n-bit operation, where n < FLEN, checks that input operands
* are correctly Nan-boxed, i.e., all upper FLEN - n bits are 1.
@ -142,6 +165,16 @@ static void gen_nanbox_s(TCGv_i64 out, TCGv_i64 in)
*
* Here, the result is always nan-boxed, even the canonical nan.
*/
static void gen_check_nanbox_h(TCGv_i64 out, TCGv_i64 in)
{
TCGv_i64 t_max = tcg_const_i64(0xffffffffffff0000ull);
TCGv_i64 t_nan = tcg_const_i64(0xffffffffffff7e00ull);
tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan);
tcg_temp_free_i64(t_max);
tcg_temp_free_i64(t_nan);
}
static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in)
{
TCGv_i64 t_max = tcg_constant_i64(0xffffffff00000000ull);
@ -331,12 +364,54 @@ static void mark_fs_dirty(DisasContext *ctx)
static inline void mark_fs_dirty(DisasContext *ctx) { }
#endif
#ifndef CONFIG_USER_ONLY
/* The states of mstatus_vs are:
* 0 = disabled, 1 = initial, 2 = clean, 3 = dirty
* We will have already diagnosed disabled state,
* and need to turn initial/clean into dirty.
*/
static void mark_vs_dirty(DisasContext *ctx)
{
TCGv tmp;
if (ctx->mstatus_vs != MSTATUS_VS) {
/* Remember the state change for the rest of the TB. */
ctx->mstatus_vs = MSTATUS_VS;
tmp = tcg_temp_new();
tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
tcg_gen_ori_tl(tmp, tmp, MSTATUS_VS);
tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
tcg_temp_free(tmp);
}
if (ctx->virt_enabled && ctx->mstatus_hs_vs != MSTATUS_VS) {
/* Remember the stage change for the rest of the TB. */
ctx->mstatus_hs_vs = MSTATUS_VS;
tmp = tcg_temp_new();
tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
tcg_gen_ori_tl(tmp, tmp, MSTATUS_VS);
tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
tcg_temp_free(tmp);
}
}
#else
static inline void mark_vs_dirty(DisasContext *ctx) { }
#endif
static void gen_set_rm(DisasContext *ctx, int rm)
{
if (ctx->frm == rm) {
return;
}
ctx->frm = rm;
if (rm == RISCV_FRM_ROD) {
gen_helper_set_rod_rounding_mode(cpu_env);
return;
}
gen_helper_set_rounding_mode(cpu_env, tcg_constant_i32(rm));
}
@ -574,6 +649,7 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
#include "insn_trans/trans_rvh.c.inc"
#include "insn_trans/trans_rvv.c.inc"
#include "insn_trans/trans_rvb.c.inc"
#include "insn_trans/trans_rvzfh.c.inc"
#include "insn_trans/trans_privileged.c.inc"
/* Include the auto-generated decoder for 16 bit insn */
@ -613,6 +689,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->pc_succ_insn = ctx->base.pc_first;
ctx->mem_idx = FIELD_EX32(tb_flags, TB_FLAGS, MEM_IDX);
ctx->mstatus_fs = tb_flags & TB_FLAGS_MSTATUS_FS;
ctx->mstatus_vs = tb_flags & TB_FLAGS_MSTATUS_VS;
ctx->priv_ver = env->priv_ver;
#if !defined(CONFIG_USER_ONLY)
if (riscv_has_ext(env, RVH)) {
@ -626,13 +703,17 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->misa_ext = env->misa_ext;
ctx->frm = -1; /* unknown rounding mode */
ctx->ext_ifencei = cpu->cfg.ext_ifencei;
ctx->ext_zfh = cpu->cfg.ext_zfh;
ctx->ext_zfhmin = cpu->cfg.ext_zfhmin;
ctx->vlen = cpu->cfg.vlen;
ctx->elen = cpu->cfg.elen;
ctx->mstatus_hs_fs = FIELD_EX32(tb_flags, TB_FLAGS, MSTATUS_HS_FS);
ctx->mstatus_hs_vs = FIELD_EX32(tb_flags, TB_FLAGS, MSTATUS_HS_VS);
ctx->hlsx = FIELD_EX32(tb_flags, TB_FLAGS, HLSX);
ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL);
ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW);
ctx->lmul = FIELD_EX32(tb_flags, TB_FLAGS, LMUL);
ctx->mlen = 1 << (ctx->sew + 3 - ctx->lmul);
ctx->lmul = sextract32(FIELD_EX32(tb_flags, TB_FLAGS, LMUL), 0, 3);
ctx->vstart = env->vstart;
ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL);
ctx->cs = cs;
@ -751,6 +832,8 @@ void riscv_translate_init(void)
cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, pc), "pc");
cpu_vl = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, vl), "vl");
cpu_vstart = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, vstart),
"vstart");
load_res = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_res),
"load_res");
load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val),

File diff suppressed because it is too large Load Diff