2018-03-02 13:31:11 +01:00
|
|
|
/*
|
|
|
|
* RISC-V emulation for qemu: main translation routines.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms and conditions of the GNU General Public License,
|
|
|
|
* version 2 or later, as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along with
|
|
|
|
* this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
|
|
|
#include "qemu/log.h"
|
|
|
|
#include "cpu.h"
|
2020-01-01 12:23:00 +01:00
|
|
|
#include "tcg/tcg-op.h"
|
2018-03-02 13:31:11 +01:00
|
|
|
#include "disas/disas.h"
|
|
|
|
#include "exec/cpu_ldst.h"
|
|
|
|
#include "exec/exec-all.h"
|
|
|
|
#include "exec/helper-proto.h"
|
|
|
|
#include "exec/helper-gen.h"
|
|
|
|
|
2018-02-14 00:27:54 +01:00
|
|
|
#include "exec/translator.h"
|
2018-03-02 13:31:11 +01:00
|
|
|
#include "exec/log.h"
|
|
|
|
|
|
|
|
#include "instmap.h"
|
|
|
|
|
|
|
|
/* global register indices */
|
2020-07-01 17:24:49 +02:00
|
|
|
static TCGv cpu_gpr[32], cpu_pc, cpu_vl;
|
2018-03-02 13:31:11 +01:00
|
|
|
static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
|
|
|
|
static TCGv load_res;
|
|
|
|
static TCGv load_val;
|
|
|
|
|
|
|
|
#include "exec/gen-icount.h"
|
|
|
|
|
|
|
|
typedef struct DisasContext {
|
2018-02-14 00:28:36 +01:00
|
|
|
DisasContextBase base;
|
|
|
|
/* pc_succ_insn points to the instruction following base.pc_next */
|
|
|
|
target_ulong pc_succ_insn;
|
2019-01-15 00:58:32 +01:00
|
|
|
target_ulong priv_ver;
|
2020-02-01 02:02:46 +01:00
|
|
|
bool virt_enabled;
|
|
|
|
uint32_t opcode;
|
2019-01-15 00:57:50 +01:00
|
|
|
uint32_t mstatus_fs;
|
2019-01-15 00:58:42 +01:00
|
|
|
uint32_t misa;
|
2018-03-02 13:31:11 +01:00
|
|
|
uint32_t mem_idx;
|
|
|
|
/* Remember the rounding mode encoded in the previous fp instruction,
|
|
|
|
which we have already installed into env->fp_status. Or -1 for
|
|
|
|
no previous fp instruction. Note that we exit the TB when writing
|
|
|
|
to any system register, which includes CSR_FRM, so we do not have
|
|
|
|
to reset this known value. */
|
|
|
|
int frm;
|
2019-06-24 10:59:05 +02:00
|
|
|
bool ext_ifencei;
|
2020-07-01 17:24:52 +02:00
|
|
|
/* vector extension */
|
|
|
|
bool vill;
|
|
|
|
uint8_t lmul;
|
|
|
|
uint8_t sew;
|
|
|
|
uint16_t vlen;
|
2020-07-01 17:24:54 +02:00
|
|
|
uint16_t mlen;
|
2020-07-01 17:24:52 +02:00
|
|
|
bool vl_eq_vlmax;
|
2018-03-02 13:31:11 +01:00
|
|
|
} DisasContext;
|
|
|
|
|
2019-02-13 16:54:02 +01:00
|
|
|
#ifdef TARGET_RISCV64
|
2018-03-02 13:31:11 +01:00
|
|
|
/* convert riscv funct3 to qemu memop for load/store */
|
|
|
|
static const int tcg_memop_lookup[8] = {
|
|
|
|
[0 ... 7] = -1,
|
|
|
|
[0] = MO_SB,
|
|
|
|
[1] = MO_TESW,
|
|
|
|
[2] = MO_TESL,
|
2019-10-30 01:23:18 +01:00
|
|
|
[3] = MO_TEQ,
|
2018-03-02 13:31:11 +01:00
|
|
|
[4] = MO_UB,
|
|
|
|
[5] = MO_TEUW,
|
|
|
|
[6] = MO_TEUL,
|
|
|
|
};
|
2019-02-13 16:54:02 +01:00
|
|
|
#endif
|
2018-03-02 13:31:11 +01:00
|
|
|
|
|
|
|
#ifdef TARGET_RISCV64
|
|
|
|
#define CASE_OP_32_64(X) case X: case glue(X, W)
|
|
|
|
#else
|
|
|
|
#define CASE_OP_32_64(X) case X
|
|
|
|
#endif
|
|
|
|
|
2019-01-15 00:58:42 +01:00
|
|
|
static inline bool has_ext(DisasContext *ctx, uint32_t ext)
|
|
|
|
{
|
|
|
|
return ctx->misa & ext;
|
2020-07-24 02:28:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* RISC-V requires NaN-boxing of narrower width floating point values.
|
|
|
|
* This applies when a 32-bit value is assigned to a 64-bit FP register.
|
|
|
|
* For consistency and simplicity, we nanbox results even when the RVD
|
|
|
|
* extension is not present.
|
|
|
|
*/
|
|
|
|
static void gen_nanbox_s(TCGv_i64 out, TCGv_i64 in)
|
|
|
|
{
|
|
|
|
tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(32, 32));
|
2020-07-24 02:28:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A narrow n-bit operation, where n < FLEN, checks that input operands
|
|
|
|
* are correctly Nan-boxed, i.e., all upper FLEN - n bits are 1.
|
|
|
|
* If so, the least-significant bits of the input are used, otherwise the
|
|
|
|
* input value is treated as an n-bit canonical NaN (v2.2 section 9.2).
|
|
|
|
*
|
|
|
|
* Here, the result is always nan-boxed, even the canonical nan.
|
|
|
|
*/
|
|
|
|
static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in)
|
|
|
|
{
|
|
|
|
TCGv_i64 t_max = tcg_const_i64(0xffffffff00000000ull);
|
|
|
|
TCGv_i64 t_nan = tcg_const_i64(0xffffffff7fc00000ull);
|
|
|
|
|
|
|
|
tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan);
|
|
|
|
tcg_temp_free_i64(t_max);
|
|
|
|
tcg_temp_free_i64(t_nan);
|
2019-01-15 00:58:42 +01:00
|
|
|
}
|
|
|
|
|
2018-03-02 13:31:11 +01:00
|
|
|
static void generate_exception(DisasContext *ctx, int excp)
|
|
|
|
{
|
2018-02-14 00:28:36 +01:00
|
|
|
tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
|
2018-03-02 13:31:11 +01:00
|
|
|
TCGv_i32 helper_tmp = tcg_const_i32(excp);
|
|
|
|
gen_helper_raise_exception(cpu_env, helper_tmp);
|
|
|
|
tcg_temp_free_i32(helper_tmp);
|
2018-02-14 00:28:36 +01:00
|
|
|
ctx->base.is_jmp = DISAS_NORETURN;
|
2018-03-02 13:31:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void generate_exception_mbadaddr(DisasContext *ctx, int excp)
|
|
|
|
{
|
2018-02-14 00:28:36 +01:00
|
|
|
tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
|
2018-03-02 13:31:11 +01:00
|
|
|
tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr));
|
|
|
|
TCGv_i32 helper_tmp = tcg_const_i32(excp);
|
|
|
|
gen_helper_raise_exception(cpu_env, helper_tmp);
|
|
|
|
tcg_temp_free_i32(helper_tmp);
|
2018-02-14 00:28:36 +01:00
|
|
|
ctx->base.is_jmp = DISAS_NORETURN;
|
2018-03-02 13:31:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_exception_debug(void)
|
|
|
|
{
|
|
|
|
TCGv_i32 helper_tmp = tcg_const_i32(EXCP_DEBUG);
|
|
|
|
gen_helper_raise_exception(cpu_env, helper_tmp);
|
|
|
|
tcg_temp_free_i32(helper_tmp);
|
|
|
|
}
|
|
|
|
|
2019-03-25 12:45:54 +01:00
|
|
|
/* Wrapper around tcg_gen_exit_tb that handles single stepping */
|
|
|
|
static void exit_tb(DisasContext *ctx)
|
|
|
|
{
|
|
|
|
if (ctx->base.singlestep_enabled) {
|
|
|
|
gen_exception_debug();
|
|
|
|
} else {
|
|
|
|
tcg_gen_exit_tb(NULL, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Wrapper around tcg_gen_lookup_and_goto_ptr that handles single stepping */
|
|
|
|
static void lookup_and_goto_ptr(DisasContext *ctx)
|
|
|
|
{
|
|
|
|
if (ctx->base.singlestep_enabled) {
|
|
|
|
gen_exception_debug();
|
|
|
|
} else {
|
|
|
|
tcg_gen_lookup_and_goto_ptr();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-02 13:31:11 +01:00
|
|
|
static void gen_exception_illegal(DisasContext *ctx)
|
|
|
|
{
|
|
|
|
generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_exception_inst_addr_mis(DisasContext *ctx)
|
|
|
|
{
|
|
|
|
generate_exception_mbadaddr(ctx, RISCV_EXCP_INST_ADDR_MIS);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
|
|
|
|
{
|
2018-02-14 00:28:36 +01:00
|
|
|
if (unlikely(ctx->base.singlestep_enabled)) {
|
2018-03-02 13:31:11 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
2018-02-14 00:28:36 +01:00
|
|
|
return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
|
2018-03-02 13:31:11 +01:00
|
|
|
#else
|
|
|
|
return true;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
|
|
|
|
{
|
|
|
|
if (use_goto_tb(ctx, dest)) {
|
|
|
|
/* chaining is only allowed when the jump is to the same page */
|
|
|
|
tcg_gen_goto_tb(n);
|
|
|
|
tcg_gen_movi_tl(cpu_pc, dest);
|
2019-03-25 12:45:54 +01:00
|
|
|
|
|
|
|
/* No need to check for single stepping here as use_goto_tb() will
|
|
|
|
* return false in case of single stepping.
|
|
|
|
*/
|
2018-05-31 03:06:23 +02:00
|
|
|
tcg_gen_exit_tb(ctx->base.tb, n);
|
2018-03-02 13:31:11 +01:00
|
|
|
} else {
|
|
|
|
tcg_gen_movi_tl(cpu_pc, dest);
|
2019-03-25 12:45:54 +01:00
|
|
|
lookup_and_goto_ptr(ctx);
|
2018-03-02 13:31:11 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Wrapper for getting reg values - need to check of reg is zero since
|
|
|
|
* cpu_gpr[0] is not actually allocated
|
|
|
|
*/
|
|
|
|
static inline void gen_get_gpr(TCGv t, int reg_num)
|
|
|
|
{
|
|
|
|
if (reg_num == 0) {
|
|
|
|
tcg_gen_movi_tl(t, 0);
|
|
|
|
} else {
|
|
|
|
tcg_gen_mov_tl(t, cpu_gpr[reg_num]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Wrapper for setting reg values - need to check of reg is zero since
|
|
|
|
* cpu_gpr[0] is not actually allocated. this is more for safety purposes,
|
|
|
|
* since we usually avoid calling the OP_TYPE_gen function if we see a write to
|
|
|
|
* $zero
|
|
|
|
*/
|
|
|
|
static inline void gen_set_gpr(int reg_num_dst, TCGv t)
|
|
|
|
{
|
|
|
|
if (reg_num_dst != 0) {
|
|
|
|
tcg_gen_mov_tl(cpu_gpr[reg_num_dst], t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2)
|
|
|
|
{
|
|
|
|
TCGv rl = tcg_temp_new();
|
|
|
|
TCGv rh = tcg_temp_new();
|
|
|
|
|
|
|
|
tcg_gen_mulu2_tl(rl, rh, arg1, arg2);
|
|
|
|
/* fix up for one negative */
|
|
|
|
tcg_gen_sari_tl(rl, arg1, TARGET_LONG_BITS - 1);
|
|
|
|
tcg_gen_and_tl(rl, rl, arg2);
|
|
|
|
tcg_gen_sub_tl(ret, rh, rl);
|
|
|
|
|
|
|
|
tcg_temp_free(rl);
|
|
|
|
tcg_temp_free(rh);
|
|
|
|
}
|
|
|
|
|
2019-02-13 16:54:06 +01:00
|
|
|
static void gen_div(TCGv ret, TCGv source1, TCGv source2)
|
2018-03-02 13:31:11 +01:00
|
|
|
{
|
2019-02-13 16:54:06 +01:00
|
|
|
TCGv cond1, cond2, zeroreg, resultopt1;
|
|
|
|
/*
|
|
|
|
* Handle by altering args to tcg_gen_div to produce req'd results:
|
|
|
|
* For overflow: want source1 in source1 and 1 in source2
|
|
|
|
* For div by zero: want -1 in source1 and 1 in source2 -> -1 result
|
|
|
|
*/
|
|
|
|
cond1 = tcg_temp_new();
|
|
|
|
cond2 = tcg_temp_new();
|
|
|
|
zeroreg = tcg_const_tl(0);
|
|
|
|
resultopt1 = tcg_temp_new();
|
|
|
|
|
|
|
|
tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
|
|
|
|
tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)(~0L));
|
|
|
|
tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
|
|
|
|
((target_ulong)1) << (TARGET_LONG_BITS - 1));
|
|
|
|
tcg_gen_and_tl(cond1, cond1, cond2); /* cond1 = overflow */
|
|
|
|
tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, 0); /* cond2 = div 0 */
|
|
|
|
/* if div by zero, set source1 to -1, otherwise don't change */
|
|
|
|
tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond2, zeroreg, source1,
|
|
|
|
resultopt1);
|
|
|
|
/* if overflow or div by zero, set source2 to 1, else don't change */
|
|
|
|
tcg_gen_or_tl(cond1, cond1, cond2);
|
|
|
|
tcg_gen_movi_tl(resultopt1, (target_ulong)1);
|
|
|
|
tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
|
|
|
|
resultopt1);
|
|
|
|
tcg_gen_div_tl(ret, source1, source2);
|
|
|
|
|
|
|
|
tcg_temp_free(cond1);
|
|
|
|
tcg_temp_free(cond2);
|
|
|
|
tcg_temp_free(zeroreg);
|
|
|
|
tcg_temp_free(resultopt1);
|
|
|
|
}
|
2018-03-02 13:31:11 +01:00
|
|
|
|
2019-02-13 16:54:06 +01:00
|
|
|
static void gen_divu(TCGv ret, TCGv source1, TCGv source2)
|
|
|
|
{
|
|
|
|
TCGv cond1, zeroreg, resultopt1;
|
|
|
|
cond1 = tcg_temp_new();
|
|
|
|
|
|
|
|
zeroreg = tcg_const_tl(0);
|
|
|
|
resultopt1 = tcg_temp_new();
|
|
|
|
|
|
|
|
tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
|
|
|
|
tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
|
|
|
|
tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, source1,
|
|
|
|
resultopt1);
|
|
|
|
tcg_gen_movi_tl(resultopt1, (target_ulong)1);
|
|
|
|
tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
|
|
|
|
resultopt1);
|
|
|
|
tcg_gen_divu_tl(ret, source1, source2);
|
|
|
|
|
|
|
|
tcg_temp_free(cond1);
|
|
|
|
tcg_temp_free(zeroreg);
|
|
|
|
tcg_temp_free(resultopt1);
|
|
|
|
}
|
2018-03-02 13:31:11 +01:00
|
|
|
|
2019-02-13 16:54:06 +01:00
|
|
|
static void gen_rem(TCGv ret, TCGv source1, TCGv source2)
|
|
|
|
{
|
|
|
|
TCGv cond1, cond2, zeroreg, resultopt1;
|
|
|
|
|
|
|
|
cond1 = tcg_temp_new();
|
|
|
|
cond2 = tcg_temp_new();
|
|
|
|
zeroreg = tcg_const_tl(0);
|
|
|
|
resultopt1 = tcg_temp_new();
|
|
|
|
|
|
|
|
tcg_gen_movi_tl(resultopt1, 1L);
|
|
|
|
tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)-1);
|
|
|
|
tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
|
|
|
|
(target_ulong)1 << (TARGET_LONG_BITS - 1));
|
|
|
|
tcg_gen_and_tl(cond2, cond1, cond2); /* cond1 = overflow */
|
|
|
|
tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0); /* cond2 = div 0 */
|
|
|
|
/* if overflow or div by zero, set source2 to 1, else don't change */
|
|
|
|
tcg_gen_or_tl(cond2, cond1, cond2);
|
|
|
|
tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond2, zeroreg, source2,
|
|
|
|
resultopt1);
|
|
|
|
tcg_gen_rem_tl(resultopt1, source1, source2);
|
|
|
|
/* if div by zero, just return the original dividend */
|
|
|
|
tcg_gen_movcond_tl(TCG_COND_EQ, ret, cond1, zeroreg, resultopt1,
|
|
|
|
source1);
|
|
|
|
|
|
|
|
tcg_temp_free(cond1);
|
|
|
|
tcg_temp_free(cond2);
|
|
|
|
tcg_temp_free(zeroreg);
|
|
|
|
tcg_temp_free(resultopt1);
|
|
|
|
}
|
2018-03-02 13:31:11 +01:00
|
|
|
|
2019-02-13 16:54:06 +01:00
|
|
|
static void gen_remu(TCGv ret, TCGv source1, TCGv source2)
|
|
|
|
{
|
|
|
|
TCGv cond1, zeroreg, resultopt1;
|
|
|
|
cond1 = tcg_temp_new();
|
|
|
|
zeroreg = tcg_const_tl(0);
|
|
|
|
resultopt1 = tcg_temp_new();
|
|
|
|
|
|
|
|
tcg_gen_movi_tl(resultopt1, (target_ulong)1);
|
|
|
|
tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
|
|
|
|
tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
|
|
|
|
resultopt1);
|
|
|
|
tcg_gen_remu_tl(resultopt1, source1, source2);
|
|
|
|
/* if div by zero, just return the original dividend */
|
|
|
|
tcg_gen_movcond_tl(TCG_COND_EQ, ret, cond1, zeroreg, resultopt1,
|
|
|
|
source1);
|
|
|
|
|
|
|
|
tcg_temp_free(cond1);
|
|
|
|
tcg_temp_free(zeroreg);
|
|
|
|
tcg_temp_free(resultopt1);
|
2018-03-02 13:31:11 +01:00
|
|
|
}
|
|
|
|
|
2019-01-15 00:58:42 +01:00
|
|
|
static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
|
2018-03-02 13:31:11 +01:00
|
|
|
{
|
|
|
|
target_ulong next_pc;
|
|
|
|
|
|
|
|
/* check misaligned: */
|
2018-02-14 00:28:36 +01:00
|
|
|
next_pc = ctx->base.pc_next + imm;
|
2019-01-15 00:58:42 +01:00
|
|
|
if (!has_ext(ctx, RVC)) {
|
2018-03-02 13:31:11 +01:00
|
|
|
if ((next_pc & 0x3) != 0) {
|
|
|
|
gen_exception_inst_addr_mis(ctx);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (rd != 0) {
|
2018-02-14 00:28:36 +01:00
|
|
|
tcg_gen_movi_tl(cpu_gpr[rd], ctx->pc_succ_insn);
|
2018-03-02 13:31:11 +01:00
|
|
|
}
|
|
|
|
|
2018-02-14 00:28:36 +01:00
|
|
|
gen_goto_tb(ctx, 0, ctx->base.pc_next + imm); /* must use this for safety */
|
|
|
|
ctx->base.is_jmp = DISAS_NORETURN;
|
2018-03-02 13:31:11 +01:00
|
|
|
}
|
|
|
|
|
2019-02-13 16:54:01 +01:00
|
|
|
#ifdef TARGET_RISCV64
|
|
|
|
static void gen_load_c(DisasContext *ctx, uint32_t opc, int rd, int rs1,
|
2018-03-02 13:31:11 +01:00
|
|
|
target_long imm)
|
|
|
|
{
|
|
|
|
TCGv t0 = tcg_temp_new();
|
|
|
|
TCGv t1 = tcg_temp_new();
|
|
|
|
gen_get_gpr(t0, rs1);
|
|
|
|
tcg_gen_addi_tl(t0, t0, imm);
|
|
|
|
int memop = tcg_memop_lookup[(opc >> 12) & 0x7];
|
|
|
|
|
|
|
|
if (memop < 0) {
|
|
|
|
gen_exception_illegal(ctx);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, memop);
|
|
|
|
gen_set_gpr(rd, t1);
|
|
|
|
tcg_temp_free(t0);
|
|
|
|
tcg_temp_free(t1);
|
|
|
|
}
|
|
|
|
|
2019-02-13 16:54:02 +01:00
|
|
|
static void gen_store_c(DisasContext *ctx, uint32_t opc, int rs1, int rs2,
|
2018-03-02 13:31:11 +01:00
|
|
|
target_long imm)
|
|
|
|
{
|
|
|
|
TCGv t0 = tcg_temp_new();
|
|
|
|
TCGv dat = tcg_temp_new();
|
|
|
|
gen_get_gpr(t0, rs1);
|
|
|
|
tcg_gen_addi_tl(t0, t0, imm);
|
|
|
|
gen_get_gpr(dat, rs2);
|
|
|
|
int memop = tcg_memop_lookup[(opc >> 12) & 0x7];
|
|
|
|
|
|
|
|
if (memop < 0) {
|
|
|
|
gen_exception_illegal(ctx);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx, memop);
|
|
|
|
tcg_temp_free(t0);
|
|
|
|
tcg_temp_free(dat);
|
|
|
|
}
|
2019-02-13 16:54:02 +01:00
|
|
|
#endif
|
2018-03-02 13:31:11 +01:00
|
|
|
|
2019-01-15 00:57:59 +01:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
/* The states of mstatus_fs are:
|
|
|
|
* 0 = disabled, 1 = initial, 2 = clean, 3 = dirty
|
|
|
|
* We will have already diagnosed disabled state,
|
|
|
|
* and need to turn initial/clean into dirty.
|
|
|
|
*/
|
|
|
|
static void mark_fs_dirty(DisasContext *ctx)
|
|
|
|
{
|
|
|
|
TCGv tmp;
|
|
|
|
if (ctx->mstatus_fs == MSTATUS_FS) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* Remember the state change for the rest of the TB. */
|
|
|
|
ctx->mstatus_fs = MSTATUS_FS;
|
|
|
|
|
|
|
|
tmp = tcg_temp_new();
|
|
|
|
tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
|
2020-01-15 07:17:33 +01:00
|
|
|
tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS | MSTATUS_SD);
|
2019-01-15 00:57:59 +01:00
|
|
|
tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
|
2020-02-01 02:02:46 +01:00
|
|
|
|
|
|
|
if (ctx->virt_enabled) {
|
|
|
|
tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
|
|
|
|
tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS | MSTATUS_SD);
|
|
|
|
tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
|
|
|
|
}
|
2019-01-15 00:57:59 +01:00
|
|
|
tcg_temp_free(tmp);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void mark_fs_dirty(DisasContext *ctx) { }
|
|
|
|
#endif
|
|
|
|
|
2019-02-13 16:53:58 +01:00
|
|
|
#if !defined(TARGET_RISCV64)
|
2018-03-02 13:31:11 +01:00
|
|
|
static void gen_fp_load(DisasContext *ctx, uint32_t opc, int rd,
|
|
|
|
int rs1, target_long imm)
|
|
|
|
{
|
|
|
|
TCGv t0;
|
|
|
|
|
2019-01-15 00:57:50 +01:00
|
|
|
if (ctx->mstatus_fs == 0) {
|
2018-03-02 13:31:11 +01:00
|
|
|
gen_exception_illegal(ctx);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
t0 = tcg_temp_new();
|
|
|
|
gen_get_gpr(t0, rs1);
|
|
|
|
tcg_gen_addi_tl(t0, t0, imm);
|
|
|
|
|
|
|
|
switch (opc) {
|
|
|
|
case OPC_RISC_FLW:
|
2019-01-15 00:58:51 +01:00
|
|
|
if (!has_ext(ctx, RVF)) {
|
|
|
|
goto do_illegal;
|
|
|
|
}
|
2018-03-02 13:31:11 +01:00
|
|
|
tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEUL);
|
|
|
|
/* RISC-V requires NaN-boxing of narrower width floating point values */
|
|
|
|
tcg_gen_ori_i64(cpu_fpr[rd], cpu_fpr[rd], 0xffffffff00000000ULL);
|
|
|
|
break;
|
|
|
|
case OPC_RISC_FLD:
|
2019-01-15 00:58:51 +01:00
|
|
|
if (!has_ext(ctx, RVD)) {
|
|
|
|
goto do_illegal;
|
|
|
|
}
|
2018-03-02 13:31:11 +01:00
|
|
|
tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEQ);
|
|
|
|
break;
|
2019-01-15 00:58:51 +01:00
|
|
|
do_illegal:
|
2018-03-02 13:31:11 +01:00
|
|
|
default:
|
|
|
|
gen_exception_illegal(ctx);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
tcg_temp_free(t0);
|
2019-01-15 00:57:59 +01:00
|
|
|
|
|
|
|
mark_fs_dirty(ctx);
|
2018-03-02 13:31:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_fp_store(DisasContext *ctx, uint32_t opc, int rs1,
|
|
|
|
int rs2, target_long imm)
|
|
|
|
{
|
|
|
|
TCGv t0;
|
|
|
|
|
2019-01-15 00:57:50 +01:00
|
|
|
if (ctx->mstatus_fs == 0) {
|
2018-03-02 13:31:11 +01:00
|
|
|
gen_exception_illegal(ctx);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
t0 = tcg_temp_new();
|
|
|
|
gen_get_gpr(t0, rs1);
|
|
|
|
tcg_gen_addi_tl(t0, t0, imm);
|
|
|
|
|
|
|
|
switch (opc) {
|
|
|
|
case OPC_RISC_FSW:
|
2019-01-15 00:58:51 +01:00
|
|
|
if (!has_ext(ctx, RVF)) {
|
|
|
|
goto do_illegal;
|
|
|
|
}
|
2018-03-02 13:31:11 +01:00
|
|
|
tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEUL);
|
|
|
|
break;
|
|
|
|
case OPC_RISC_FSD:
|
2019-01-15 00:58:51 +01:00
|
|
|
if (!has_ext(ctx, RVD)) {
|
|
|
|
goto do_illegal;
|
|
|
|
}
|
2018-03-02 13:31:11 +01:00
|
|
|
tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEQ);
|
|
|
|
break;
|
2019-01-15 00:58:51 +01:00
|
|
|
do_illegal:
|
2018-03-02 13:31:11 +01:00
|
|
|
default:
|
|
|
|
gen_exception_illegal(ctx);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
tcg_temp_free(t0);
|
|
|
|
}
|
2019-02-13 16:53:58 +01:00
|
|
|
#endif
|
2018-03-02 13:31:11 +01:00
|
|
|
|
|
|
|
static void gen_set_rm(DisasContext *ctx, int rm)
|
|
|
|
{
|
|
|
|
TCGv_i32 t0;
|
|
|
|
|
|
|
|
if (ctx->frm == rm) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ctx->frm = rm;
|
|
|
|
t0 = tcg_const_i32(rm);
|
|
|
|
gen_helper_set_rounding_mode(cpu_env, t0);
|
|
|
|
tcg_temp_free_i32(t0);
|
|
|
|
}
|
|
|
|
|
2020-02-25 13:47:05 +01:00
|
|
|
static void decode_RV32_64C0(DisasContext *ctx, uint16_t opcode)
|
2018-03-02 13:31:11 +01:00
|
|
|
{
|
2020-02-25 13:47:05 +01:00
|
|
|
uint8_t funct3 = extract16(opcode, 13, 3);
|
|
|
|
uint8_t rd_rs2 = GET_C_RS2S(opcode);
|
|
|
|
uint8_t rs1s = GET_C_RS1S(opcode);
|
2018-03-02 13:31:11 +01:00
|
|
|
|
|
|
|
switch (funct3) {
|
|
|
|
case 3:
|
|
|
|
#if defined(TARGET_RISCV64)
|
|
|
|
/* C.LD(RV64/128) -> ld rd', offset[7:3](rs1')*/
|
2019-02-13 16:54:01 +01:00
|
|
|
gen_load_c(ctx, OPC_RISC_LD, rd_rs2, rs1s,
|
2020-02-25 13:47:05 +01:00
|
|
|
GET_C_LD_IMM(opcode));
|
2018-03-02 13:31:11 +01:00
|
|
|
#else
|
|
|
|
/* C.FLW (RV32) -> flw rd', offset[6:2](rs1')*/
|
|
|
|
gen_fp_load(ctx, OPC_RISC_FLW, rd_rs2, rs1s,
|
2020-02-25 13:47:05 +01:00
|
|
|
GET_C_LW_IMM(opcode));
|
2018-03-02 13:31:11 +01:00
|
|
|
#endif
|
|
|
|
break;
|
|
|
|
case 7:
|
|
|
|
#if defined(TARGET_RISCV64)
|
|
|
|
/* C.SD (RV64/128) -> sd rs2', offset[7:3](rs1')*/
|
2019-02-13 16:54:02 +01:00
|
|
|
gen_store_c(ctx, OPC_RISC_SD, rs1s, rd_rs2,
|
2020-02-25 13:47:05 +01:00
|
|
|
GET_C_LD_IMM(opcode));
|
2018-03-02 13:31:11 +01:00
|
|
|
#else
|
|
|
|
/* C.FSW (RV32) -> fsw rs2', offset[6:2](rs1')*/
|
|
|
|
gen_fp_store(ctx, OPC_RISC_FSW, rs1s, rd_rs2,
|
2020-02-25 13:47:05 +01:00
|
|
|
GET_C_LW_IMM(opcode));
|
2018-03-02 13:31:11 +01:00
|
|
|
#endif
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-25 13:47:05 +01:00
|
|
|
static void decode_RV32_64C(DisasContext *ctx, uint16_t opcode)
|
2018-03-02 13:31:11 +01:00
|
|
|
{
|
2020-02-25 13:47:05 +01:00
|
|
|
uint8_t op = extract16(opcode, 0, 2);
|
2018-03-02 13:31:11 +01:00
|
|
|
|
|
|
|
switch (op) {
|
|
|
|
case 0:
|
2020-02-25 13:47:05 +01:00
|
|
|
decode_RV32_64C0(ctx, opcode);
|
2018-03-02 13:31:11 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-01 17:24:54 +02:00
|
|
|
static int ex_plus_1(DisasContext *ctx, int nf)
|
|
|
|
{
|
|
|
|
return nf + 1;
|
|
|
|
}
|
|
|
|
|
2019-02-13 16:53:41 +01:00
|
|
|
#define EX_SH(amount) \
|
2019-03-21 03:21:31 +01:00
|
|
|
static int ex_shift_##amount(DisasContext *ctx, int imm) \
|
2019-02-13 16:53:41 +01:00
|
|
|
{ \
|
|
|
|
return imm << amount; \
|
|
|
|
}
|
2019-02-13 16:53:42 +01:00
|
|
|
EX_SH(1)
|
2019-02-13 16:53:56 +01:00
|
|
|
EX_SH(2)
|
|
|
|
EX_SH(3)
|
2019-02-13 16:53:57 +01:00
|
|
|
EX_SH(4)
|
2019-02-13 16:53:41 +01:00
|
|
|
EX_SH(12)
|
|
|
|
|
2019-02-13 16:53:48 +01:00
|
|
|
#define REQUIRE_EXT(ctx, ext) do { \
|
|
|
|
if (!has_ext(ctx, ext)) { \
|
|
|
|
return false; \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
2019-03-21 03:21:31 +01:00
|
|
|
static int ex_rvc_register(DisasContext *ctx, int reg)
|
2019-02-13 16:53:56 +01:00
|
|
|
{
|
|
|
|
return 8 + reg;
|
|
|
|
}
|
|
|
|
|
2019-04-01 05:11:51 +02:00
|
|
|
static int ex_rvc_shifti(DisasContext *ctx, int imm)
|
|
|
|
{
|
|
|
|
/* For RV128 a shamt of 0 means a shift by 64. */
|
|
|
|
return imm ? imm : 64;
|
|
|
|
}
|
|
|
|
|
2019-02-13 16:53:41 +01:00
|
|
|
/* Include the auto-generated decoder for 32 bit insn */
|
2020-08-07 12:10:23 +02:00
|
|
|
#include "decode-insn32.c.inc"
|
2019-02-13 16:54:03 +01:00
|
|
|
|
2019-04-01 05:11:54 +02:00
|
|
|
static bool gen_arith_imm_fn(DisasContext *ctx, arg_i *a,
|
|
|
|
void (*func)(TCGv, TCGv, target_long))
|
|
|
|
{
|
|
|
|
TCGv source1;
|
|
|
|
source1 = tcg_temp_new();
|
|
|
|
|
|
|
|
gen_get_gpr(source1, a->rs1);
|
|
|
|
|
|
|
|
(*func)(source1, source1, a->imm);
|
|
|
|
|
|
|
|
gen_set_gpr(a->rd, source1);
|
|
|
|
tcg_temp_free(source1);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool gen_arith_imm_tl(DisasContext *ctx, arg_i *a,
|
|
|
|
void (*func)(TCGv, TCGv, TCGv))
|
2019-02-13 16:54:03 +01:00
|
|
|
{
|
|
|
|
TCGv source1, source2;
|
|
|
|
source1 = tcg_temp_new();
|
|
|
|
source2 = tcg_temp_new();
|
|
|
|
|
|
|
|
gen_get_gpr(source1, a->rs1);
|
|
|
|
tcg_gen_movi_tl(source2, a->imm);
|
|
|
|
|
|
|
|
(*func)(source1, source1, source2);
|
|
|
|
|
|
|
|
gen_set_gpr(a->rd, source1);
|
|
|
|
tcg_temp_free(source1);
|
|
|
|
tcg_temp_free(source2);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef TARGET_RISCV64
|
|
|
|
static void gen_addw(TCGv ret, TCGv arg1, TCGv arg2)
|
|
|
|
{
|
|
|
|
tcg_gen_add_tl(ret, arg1, arg2);
|
|
|
|
tcg_gen_ext32s_tl(ret, ret);
|
|
|
|
}
|
2019-02-13 16:54:04 +01:00
|
|
|
|
|
|
|
static void gen_subw(TCGv ret, TCGv arg1, TCGv arg2)
|
|
|
|
{
|
|
|
|
tcg_gen_sub_tl(ret, arg1, arg2);
|
|
|
|
tcg_gen_ext32s_tl(ret, ret);
|
|
|
|
}
|
|
|
|
|
2019-02-13 16:54:06 +01:00
|
|
|
static void gen_mulw(TCGv ret, TCGv arg1, TCGv arg2)
|
|
|
|
{
|
|
|
|
tcg_gen_mul_tl(ret, arg1, arg2);
|
|
|
|
tcg_gen_ext32s_tl(ret, ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool gen_arith_div_w(DisasContext *ctx, arg_r *a,
|
|
|
|
void(*func)(TCGv, TCGv, TCGv))
|
|
|
|
{
|
|
|
|
TCGv source1, source2;
|
|
|
|
source1 = tcg_temp_new();
|
|
|
|
source2 = tcg_temp_new();
|
|
|
|
|
|
|
|
gen_get_gpr(source1, a->rs1);
|
|
|
|
gen_get_gpr(source2, a->rs2);
|
|
|
|
tcg_gen_ext32s_tl(source1, source1);
|
|
|
|
tcg_gen_ext32s_tl(source2, source2);
|
|
|
|
|
|
|
|
(*func)(source1, source1, source2);
|
|
|
|
|
|
|
|
tcg_gen_ext32s_tl(source1, source1);
|
|
|
|
gen_set_gpr(a->rd, source1);
|
|
|
|
tcg_temp_free(source1);
|
|
|
|
tcg_temp_free(source2);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
target/riscv: Zero extend the inputs of divuw and remuw
While running the GCC test suite against 4.0.0-rc0, Kito found a
regression introduced by the decodetree conversion that caused divuw and
remuw to sign-extend their inputs. The ISA manual says they are
supposed to be zero extended:
DIVW and DIVUW instructions are only valid for RV64, and divide the
lower 32 bits of rs1 by the lower 32 bits of rs2, treating them as
signed and unsigned integers respectively, placing the 32-bit
quotient in rd, sign-extended to 64 bits. REMW and REMUW
instructions are only valid for RV64, and provide the corresponding
signed and unsigned remainder operations respectively. Both REMW
and REMUW always sign-extend the 32-bit result to 64 bits, including
on a divide by zero.
Here's Kito's reduced test case from the GCC test suite
unsigned calc_mp(unsigned mod)
{
unsigned a,b,c;
c=-1;
a=c/mod;
b=0-a*mod;
if (b > mod) { a += 1; b-=mod; }
return b;
}
int main(int argc, char *argv[])
{
unsigned x = 1234;
unsigned y = calc_mp(x);
if ((sizeof (y) == 4 && y != 680)
|| (sizeof (y) == 2 && y != 134))
abort ();
exit (0);
}
I haven't done any other testing on this, but it does fix the test case.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Palmer Dabbelt <palmer@sifive.com>
2019-03-21 15:59:20 +01:00
|
|
|
static bool gen_arith_div_uw(DisasContext *ctx, arg_r *a,
|
|
|
|
void(*func)(TCGv, TCGv, TCGv))
|
|
|
|
{
|
|
|
|
TCGv source1, source2;
|
|
|
|
source1 = tcg_temp_new();
|
|
|
|
source2 = tcg_temp_new();
|
|
|
|
|
|
|
|
gen_get_gpr(source1, a->rs1);
|
|
|
|
gen_get_gpr(source2, a->rs2);
|
|
|
|
tcg_gen_ext32u_tl(source1, source1);
|
|
|
|
tcg_gen_ext32u_tl(source2, source2);
|
|
|
|
|
|
|
|
(*func)(source1, source1, source2);
|
|
|
|
|
|
|
|
tcg_gen_ext32s_tl(source1, source1);
|
|
|
|
gen_set_gpr(a->rd, source1);
|
|
|
|
tcg_temp_free(source1);
|
|
|
|
tcg_temp_free(source2);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-02-13 16:54:03 +01:00
|
|
|
#endif
|
|
|
|
|
2019-02-13 16:54:07 +01:00
|
|
|
static bool gen_arith(DisasContext *ctx, arg_r *a,
|
|
|
|
void(*func)(TCGv, TCGv, TCGv))
|
2019-02-13 16:54:04 +01:00
|
|
|
{
|
|
|
|
TCGv source1, source2;
|
|
|
|
source1 = tcg_temp_new();
|
|
|
|
source2 = tcg_temp_new();
|
|
|
|
|
|
|
|
gen_get_gpr(source1, a->rs1);
|
|
|
|
gen_get_gpr(source2, a->rs2);
|
|
|
|
|
|
|
|
(*func)(source1, source1, source2);
|
|
|
|
|
|
|
|
gen_set_gpr(a->rd, source1);
|
|
|
|
tcg_temp_free(source1);
|
|
|
|
tcg_temp_free(source2);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-02-13 16:54:05 +01:00
|
|
|
static bool gen_shift(DisasContext *ctx, arg_r *a,
|
|
|
|
void(*func)(TCGv, TCGv, TCGv))
|
|
|
|
{
|
|
|
|
TCGv source1 = tcg_temp_new();
|
|
|
|
TCGv source2 = tcg_temp_new();
|
|
|
|
|
|
|
|
gen_get_gpr(source1, a->rs1);
|
|
|
|
gen_get_gpr(source2, a->rs2);
|
|
|
|
|
|
|
|
tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
|
|
|
|
(*func)(source1, source1, source2);
|
|
|
|
|
|
|
|
gen_set_gpr(a->rd, source1);
|
|
|
|
tcg_temp_free(source1);
|
|
|
|
tcg_temp_free(source2);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-02-13 16:53:41 +01:00
|
|
|
/* Include insn module translation function */
|
2020-02-04 12:41:01 +01:00
|
|
|
#include "insn_trans/trans_rvi.c.inc"
|
|
|
|
#include "insn_trans/trans_rvm.c.inc"
|
|
|
|
#include "insn_trans/trans_rva.c.inc"
|
|
|
|
#include "insn_trans/trans_rvf.c.inc"
|
|
|
|
#include "insn_trans/trans_rvd.c.inc"
|
|
|
|
#include "insn_trans/trans_rvh.c.inc"
|
|
|
|
#include "insn_trans/trans_rvv.c.inc"
|
|
|
|
#include "insn_trans/trans_privileged.c.inc"
|
2019-02-13 16:53:41 +01:00
|
|
|
|
2019-08-09 17:24:57 +02:00
|
|
|
/* Include the auto-generated decoder for 16 bit insn */
|
2020-08-07 12:10:23 +02:00
|
|
|
#include "decode-insn16.c.inc"
|
2019-02-13 16:53:56 +01:00
|
|
|
|
2020-02-25 13:47:05 +01:00
|
|
|
static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
|
2018-03-02 13:31:11 +01:00
|
|
|
{
|
|
|
|
/* check for compressed insn */
|
2020-02-25 13:47:05 +01:00
|
|
|
if (extract16(opcode, 0, 2) != 3) {
|
2019-01-15 00:58:42 +01:00
|
|
|
if (!has_ext(ctx, RVC)) {
|
2018-03-02 13:31:11 +01:00
|
|
|
gen_exception_illegal(ctx);
|
|
|
|
} else {
|
2018-02-14 00:28:36 +01:00
|
|
|
ctx->pc_succ_insn = ctx->base.pc_next + 2;
|
2020-02-25 13:47:05 +01:00
|
|
|
if (!decode_insn16(ctx, opcode)) {
|
2019-02-13 16:53:56 +01:00
|
|
|
/* fall back to old decoder */
|
2020-02-25 13:47:05 +01:00
|
|
|
decode_RV32_64C(ctx, opcode);
|
2019-02-13 16:53:56 +01:00
|
|
|
}
|
2018-03-02 13:31:11 +01:00
|
|
|
}
|
|
|
|
} else {
|
2020-02-25 13:47:05 +01:00
|
|
|
uint32_t opcode32 = opcode;
|
|
|
|
opcode32 = deposit32(opcode32, 16, 16,
|
|
|
|
translator_lduw(env, ctx->base.pc_next + 2));
|
2018-02-14 00:28:36 +01:00
|
|
|
ctx->pc_succ_insn = ctx->base.pc_next + 4;
|
2020-02-25 13:47:05 +01:00
|
|
|
if (!decode_insn32(ctx, opcode32)) {
|
2019-02-13 16:54:09 +01:00
|
|
|
gen_exception_illegal(ctx);
|
2019-02-13 16:53:41 +01:00
|
|
|
}
|
2018-03-02 13:31:11 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-06 19:42:27 +02:00
|
|
|
static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
2018-03-02 13:31:11 +01:00
|
|
|
{
|
2018-04-06 19:42:27 +02:00
|
|
|
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
2019-01-15 00:58:32 +01:00
|
|
|
CPURISCVState *env = cs->env_ptr;
|
2019-06-24 10:59:05 +02:00
|
|
|
RISCVCPU *cpu = RISCV_CPU(cs);
|
2020-07-01 17:24:52 +02:00
|
|
|
uint32_t tb_flags = ctx->base.tb->flags;
|
2018-03-02 13:31:11 +01:00
|
|
|
|
2018-04-06 19:42:27 +02:00
|
|
|
ctx->pc_succ_insn = ctx->base.pc_first;
|
2020-07-01 17:24:52 +02:00
|
|
|
ctx->mem_idx = tb_flags & TB_FLAGS_MMU_MASK;
|
|
|
|
ctx->mstatus_fs = tb_flags & TB_FLAGS_MSTATUS_FS;
|
2019-01-15 00:58:32 +01:00
|
|
|
ctx->priv_ver = env->priv_ver;
|
2020-02-01 02:02:46 +01:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
2020-02-01 02:02:49 +01:00
|
|
|
if (riscv_has_ext(env, RVH)) {
|
|
|
|
ctx->virt_enabled = riscv_cpu_virt_enabled(env);
|
|
|
|
} else {
|
|
|
|
ctx->virt_enabled = false;
|
|
|
|
}
|
2020-02-01 02:02:46 +01:00
|
|
|
#else
|
|
|
|
ctx->virt_enabled = false;
|
|
|
|
#endif
|
2019-01-15 00:58:42 +01:00
|
|
|
ctx->misa = env->misa;
|
2018-04-06 19:42:27 +02:00
|
|
|
ctx->frm = -1; /* unknown rounding mode */
|
2019-06-24 10:59:05 +02:00
|
|
|
ctx->ext_ifencei = cpu->cfg.ext_ifencei;
|
2020-07-01 17:24:52 +02:00
|
|
|
ctx->vlen = cpu->cfg.vlen;
|
|
|
|
ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL);
|
|
|
|
ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW);
|
|
|
|
ctx->lmul = FIELD_EX32(tb_flags, TB_FLAGS, LMUL);
|
2020-07-01 17:24:54 +02:00
|
|
|
ctx->mlen = 1 << (ctx->sew + 3 - ctx->lmul);
|
2020-07-01 17:24:52 +02:00
|
|
|
ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
|
2018-04-06 19:42:27 +02:00
|
|
|
}
|
2018-03-02 13:31:11 +01:00
|
|
|
|
2018-04-06 19:42:27 +02:00
|
|
|
static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu)
|
|
|
|
{
|
|
|
|
}
|
2018-03-02 13:31:11 +01:00
|
|
|
|
2018-04-06 19:42:27 +02:00
|
|
|
static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
|
|
|
|
{
|
|
|
|
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
|
|
|
|
|
|
|
tcg_gen_insn_start(ctx->base.pc_next);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool riscv_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
|
|
|
|
const CPUBreakpoint *bp)
|
|
|
|
{
|
|
|
|
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
|
|
|
|
|
|
|
tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
|
|
|
|
ctx->base.is_jmp = DISAS_NORETURN;
|
|
|
|
gen_exception_debug();
|
|
|
|
/* The address covered by the breakpoint must be included in
|
|
|
|
[tb->pc, tb->pc + tb->size) in order to for it to be
|
|
|
|
properly cleared -- thus we increment the PC here so that
|
|
|
|
the logic setting tb->size below does the right thing. */
|
|
|
|
ctx->base.pc_next += 4;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
|
|
|
{
|
|
|
|
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
|
|
|
CPURISCVState *env = cpu->env_ptr;
|
2020-02-25 13:47:05 +01:00
|
|
|
uint16_t opcode16 = translator_lduw(env, ctx->base.pc_next);
|
2018-03-02 13:31:11 +01:00
|
|
|
|
2020-02-25 13:47:05 +01:00
|
|
|
decode_opc(env, ctx, opcode16);
|
2018-04-06 19:42:27 +02:00
|
|
|
ctx->base.pc_next = ctx->pc_succ_insn;
|
|
|
|
|
|
|
|
if (ctx->base.is_jmp == DISAS_NEXT) {
|
|
|
|
target_ulong page_start;
|
|
|
|
|
|
|
|
page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
|
|
|
|
if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE) {
|
|
|
|
ctx->base.is_jmp = DISAS_TOO_MANY;
|
2018-03-02 13:31:11 +01:00
|
|
|
}
|
|
|
|
}
|
2018-04-06 19:42:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void riscv_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
|
|
|
|
{
|
|
|
|
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
|
|
|
|
|
|
|
switch (ctx->base.is_jmp) {
|
2018-02-14 00:27:54 +01:00
|
|
|
case DISAS_TOO_MANY:
|
2018-07-29 04:14:34 +02:00
|
|
|
gen_goto_tb(ctx, 0, ctx->base.pc_next);
|
2018-03-02 13:31:11 +01:00
|
|
|
break;
|
2018-02-14 00:27:54 +01:00
|
|
|
case DISAS_NORETURN:
|
2018-03-02 13:31:11 +01:00
|
|
|
break;
|
2018-02-14 00:27:54 +01:00
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
2018-03-02 13:31:11 +01:00
|
|
|
}
|
2018-04-06 19:42:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void riscv_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
|
|
|
|
{
|
2020-02-01 02:01:59 +01:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
RISCVCPU *rvcpu = RISCV_CPU(cpu);
|
|
|
|
CPURISCVState *env = &rvcpu->env;
|
|
|
|
#endif
|
|
|
|
|
2018-04-06 19:42:27 +02:00
|
|
|
qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
|
2020-02-01 02:01:59 +01:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
qemu_log("Priv: "TARGET_FMT_ld"; Virt: "TARGET_FMT_ld"\n", env->priv, env->virt);
|
|
|
|
#endif
|
2018-04-06 19:42:27 +02:00
|
|
|
log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const TranslatorOps riscv_tr_ops = {
|
|
|
|
.init_disas_context = riscv_tr_init_disas_context,
|
|
|
|
.tb_start = riscv_tr_tb_start,
|
|
|
|
.insn_start = riscv_tr_insn_start,
|
|
|
|
.breakpoint_check = riscv_tr_breakpoint_check,
|
|
|
|
.translate_insn = riscv_tr_translate_insn,
|
|
|
|
.tb_stop = riscv_tr_tb_stop,
|
|
|
|
.disas_log = riscv_tr_disas_log,
|
|
|
|
};
|
|
|
|
|
2019-04-16 08:54:54 +02:00
|
|
|
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
|
2018-04-06 19:42:27 +02:00
|
|
|
{
|
|
|
|
DisasContext ctx;
|
|
|
|
|
2019-04-16 08:54:54 +02:00
|
|
|
translator_loop(&riscv_tr_ops, &ctx.base, cs, tb, max_insns);
|
2018-03-02 13:31:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void riscv_translate_init(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* cpu_gpr[0] is a placeholder for the zero register. Do not use it. */
|
|
|
|
/* Use the gen_set_gpr and gen_get_gpr helper functions when accessing */
|
|
|
|
/* registers, unless you specifically block reads/writes to reg 0 */
|
|
|
|
cpu_gpr[0] = NULL;
|
|
|
|
|
|
|
|
for (i = 1; i < 32; i++) {
|
|
|
|
cpu_gpr[i] = tcg_global_mem_new(cpu_env,
|
|
|
|
offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < 32; i++) {
|
|
|
|
cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
|
|
|
|
offsetof(CPURISCVState, fpr[i]), riscv_fpr_regnames[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, pc), "pc");
|
2020-07-01 17:24:49 +02:00
|
|
|
cpu_vl = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, vl), "vl");
|
2018-03-02 13:31:11 +01:00
|
|
|
load_res = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_res),
|
|
|
|
"load_res");
|
|
|
|
load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val),
|
|
|
|
"load_val");
|
|
|
|
}
|