target/riscv: Remove the hyp load and store functions

Remove the special Virtulisation load and store functions and just use
the standard tcg tcg_gen_qemu_ld_tl() and tcg_gen_qemu_st_tl() functions
instead.

As part of this change we ensure we still run an access check to make
sure we can perform the operations.

Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 189ac3e53ef2854824d18aad7074c6649f17de2c.1604464950.git.alistair.francis@wdc.com
This commit is contained in:
Alistair Francis 2020-11-03 20:43:31 -08:00
parent 1c1c060aa8
commit 743077b35b
5 changed files with 59 additions and 166 deletions

View File

@ -375,6 +375,8 @@ FIELD(TB_FLAGS, VL_EQ_VLMAX, 2, 1)
FIELD(TB_FLAGS, LMUL, 3, 2)
FIELD(TB_FLAGS, SEW, 5, 3)
FIELD(TB_FLAGS, VILL, 8, 1)
/* Is a Hypervisor instruction load/store allowed? */
FIELD(TB_FLAGS, HLSX, 9, 1)
/*
* A simplification for VLMAX
@ -421,7 +423,17 @@ static inline void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
if (riscv_cpu_fp_enabled(env)) {
flags |= env->mstatus & MSTATUS_FS;
}
if (riscv_has_ext(env, RVH)) {
if (env->priv == PRV_M ||
(env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
(env->priv == PRV_U && !riscv_cpu_virt_enabled(env) &&
get_field(env->hstatus, HSTATUS_HU))) {
flags = FIELD_DP32(flags, TB_FLAGS, HLSX, 1);
}
}
#endif
*pflags = flags;
}

View File

@ -81,8 +81,6 @@ DEF_HELPER_1(tlb_flush, void, env)
#ifndef CONFIG_USER_ONLY
DEF_HELPER_1(hyp_tlb_flush, void, env)
DEF_HELPER_1(hyp_gvma_tlb_flush, void, env)
DEF_HELPER_4(hyp_load, tl, env, tl, tl, tl)
DEF_HELPER_5(hyp_store, void, env, tl, tl, tl, tl)
DEF_HELPER_4(hyp_x_load, tl, env, tl, tl, tl)
#endif

View File

@ -16,26 +16,34 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef CONFIG_USER_ONLY
static void check_access(DisasContext *ctx) {
if (!ctx->hlsx) {
if (ctx->virt_enabled) {
generate_exception(ctx, RISCV_EXCP_VIRT_INSTRUCTION_FAULT);
} else {
generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
}
}
}
#endif
static bool trans_hlv_b(DisasContext *ctx, arg_hlv_b *a)
{
REQUIRE_EXT(ctx, RVH);
#ifndef CONFIG_USER_ONLY
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGv mem_idx = tcg_temp_new();
TCGv memop = tcg_temp_new();
check_access(ctx);
gen_get_gpr(t0, a->rs1);
tcg_gen_movi_tl(mem_idx, ctx->mem_idx);
tcg_gen_movi_tl(memop, MO_SB);
gen_helper_hyp_load(t1, cpu_env, t0, mem_idx, memop);
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_SB);
gen_set_gpr(a->rd, t1);
tcg_temp_free(t0);
tcg_temp_free(t1);
tcg_temp_free(mem_idx);
tcg_temp_free(memop);
return true;
#else
return false;
@ -48,20 +56,16 @@ static bool trans_hlv_h(DisasContext *ctx, arg_hlv_h *a)
#ifndef CONFIG_USER_ONLY
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGv mem_idx = tcg_temp_new();
TCGv memop = tcg_temp_new();
check_access(ctx);
gen_get_gpr(t0, a->rs1);
tcg_gen_movi_tl(mem_idx, ctx->mem_idx);
tcg_gen_movi_tl(memop, MO_TESW);
gen_helper_hyp_load(t1, cpu_env, t0, mem_idx, memop);
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TESW);
gen_set_gpr(a->rd, t1);
tcg_temp_free(t0);
tcg_temp_free(t1);
tcg_temp_free(mem_idx);
tcg_temp_free(memop);
return true;
#else
return false;
@ -74,20 +78,16 @@ static bool trans_hlv_w(DisasContext *ctx, arg_hlv_w *a)
#ifndef CONFIG_USER_ONLY
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGv mem_idx = tcg_temp_new();
TCGv memop = tcg_temp_new();
check_access(ctx);
gen_get_gpr(t0, a->rs1);
tcg_gen_movi_tl(mem_idx, ctx->mem_idx);
tcg_gen_movi_tl(memop, MO_TESL);
gen_helper_hyp_load(t1, cpu_env, t0, mem_idx, memop);
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TESL);
gen_set_gpr(a->rd, t1);
tcg_temp_free(t0);
tcg_temp_free(t1);
tcg_temp_free(mem_idx);
tcg_temp_free(memop);
return true;
#else
return false;
@ -100,20 +100,16 @@ static bool trans_hlv_bu(DisasContext *ctx, arg_hlv_bu *a)
#ifndef CONFIG_USER_ONLY
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGv mem_idx = tcg_temp_new();
TCGv memop = tcg_temp_new();
check_access(ctx);
gen_get_gpr(t0, a->rs1);
tcg_gen_movi_tl(mem_idx, ctx->mem_idx);
tcg_gen_movi_tl(memop, MO_UB);
gen_helper_hyp_load(t1, cpu_env, t0, mem_idx, memop);
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_UB);
gen_set_gpr(a->rd, t1);
tcg_temp_free(t0);
tcg_temp_free(t1);
tcg_temp_free(mem_idx);
tcg_temp_free(memop);
return true;
#else
return false;
@ -126,20 +122,15 @@ static bool trans_hlv_hu(DisasContext *ctx, arg_hlv_hu *a)
#ifndef CONFIG_USER_ONLY
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGv mem_idx = tcg_temp_new();
TCGv memop = tcg_temp_new();
check_access(ctx);
gen_get_gpr(t0, a->rs1);
tcg_gen_movi_tl(mem_idx, ctx->mem_idx);
tcg_gen_movi_tl(memop, MO_TEUW);
gen_helper_hyp_load(t1, cpu_env, t0, mem_idx, memop);
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TEUW);
gen_set_gpr(a->rd, t1);
tcg_temp_free(t0);
tcg_temp_free(t1);
tcg_temp_free(mem_idx);
tcg_temp_free(memop);
return true;
#else
return false;
@ -152,20 +143,16 @@ static bool trans_hsv_b(DisasContext *ctx, arg_hsv_b *a)
#ifndef CONFIG_USER_ONLY
TCGv t0 = tcg_temp_new();
TCGv dat = tcg_temp_new();
TCGv mem_idx = tcg_temp_new();
TCGv memop = tcg_temp_new();
check_access(ctx);
gen_get_gpr(t0, a->rs1);
gen_get_gpr(dat, a->rs2);
tcg_gen_movi_tl(mem_idx, ctx->mem_idx);
tcg_gen_movi_tl(memop, MO_SB);
gen_helper_hyp_store(cpu_env, t0, dat, mem_idx, memop);
tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_SB);
tcg_temp_free(t0);
tcg_temp_free(dat);
tcg_temp_free(mem_idx);
tcg_temp_free(memop);
return true;
#else
return false;
@ -178,20 +165,16 @@ static bool trans_hsv_h(DisasContext *ctx, arg_hsv_h *a)
#ifndef CONFIG_USER_ONLY
TCGv t0 = tcg_temp_new();
TCGv dat = tcg_temp_new();
TCGv mem_idx = tcg_temp_new();
TCGv memop = tcg_temp_new();
check_access(ctx);
gen_get_gpr(t0, a->rs1);
gen_get_gpr(dat, a->rs2);
tcg_gen_movi_tl(mem_idx, ctx->mem_idx);
tcg_gen_movi_tl(memop, MO_TESW);
gen_helper_hyp_store(cpu_env, t0, dat, mem_idx, memop);
tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TESW);
tcg_temp_free(t0);
tcg_temp_free(dat);
tcg_temp_free(mem_idx);
tcg_temp_free(memop);
return true;
#else
return false;
@ -204,20 +187,16 @@ static bool trans_hsv_w(DisasContext *ctx, arg_hsv_w *a)
#ifndef CONFIG_USER_ONLY
TCGv t0 = tcg_temp_new();
TCGv dat = tcg_temp_new();
TCGv mem_idx = tcg_temp_new();
TCGv memop = tcg_temp_new();
check_access(ctx);
gen_get_gpr(t0, a->rs1);
gen_get_gpr(dat, a->rs2);
tcg_gen_movi_tl(mem_idx, ctx->mem_idx);
tcg_gen_movi_tl(memop, MO_TESL);
gen_helper_hyp_store(cpu_env, t0, dat, mem_idx, memop);
tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TESL);
tcg_temp_free(t0);
tcg_temp_free(dat);
tcg_temp_free(mem_idx);
tcg_temp_free(memop);
return true;
#else
return false;
@ -231,20 +210,16 @@ static bool trans_hlv_wu(DisasContext *ctx, arg_hlv_wu *a)
#ifndef CONFIG_USER_ONLY
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGv mem_idx = tcg_temp_new();
TCGv memop = tcg_temp_new();
check_access(ctx);
gen_get_gpr(t0, a->rs1);
tcg_gen_movi_tl(mem_idx, ctx->mem_idx);
tcg_gen_movi_tl(memop, MO_TEUL);
gen_helper_hyp_load(t1, cpu_env, t0, mem_idx, memop);
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TEUL);
gen_set_gpr(a->rd, t1);
tcg_temp_free(t0);
tcg_temp_free(t1);
tcg_temp_free(mem_idx);
tcg_temp_free(memop);
return true;
#else
return false;
@ -257,20 +232,16 @@ static bool trans_hlv_d(DisasContext *ctx, arg_hlv_d *a)
#ifndef CONFIG_USER_ONLY
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGv mem_idx = tcg_temp_new();
TCGv memop = tcg_temp_new();
check_access(ctx);
gen_get_gpr(t0, a->rs1);
tcg_gen_movi_tl(mem_idx, ctx->mem_idx);
tcg_gen_movi_tl(memop, MO_TEQ);
gen_helper_hyp_load(t1, cpu_env, t0, mem_idx, memop);
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TEQ);
gen_set_gpr(a->rd, t1);
tcg_temp_free(t0);
tcg_temp_free(t1);
tcg_temp_free(mem_idx);
tcg_temp_free(memop);
return true;
#else
return false;
@ -283,20 +254,16 @@ static bool trans_hsv_d(DisasContext *ctx, arg_hsv_d *a)
#ifndef CONFIG_USER_ONLY
TCGv t0 = tcg_temp_new();
TCGv dat = tcg_temp_new();
TCGv mem_idx = tcg_temp_new();
TCGv memop = tcg_temp_new();
check_access(ctx);
gen_get_gpr(t0, a->rs1);
gen_get_gpr(dat, a->rs2);
tcg_gen_movi_tl(mem_idx, ctx->mem_idx);
tcg_gen_movi_tl(memop, MO_TEQ);
gen_helper_hyp_store(cpu_env, t0, dat, mem_idx, memop);
tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TEQ);
tcg_temp_free(t0);
tcg_temp_free(dat);
tcg_temp_free(mem_idx);
tcg_temp_free(memop);
return true;
#else
return false;

View File

@ -227,92 +227,6 @@ void helper_hyp_gvma_tlb_flush(CPURISCVState *env)
helper_hyp_tlb_flush(env);
}
target_ulong helper_hyp_load(CPURISCVState *env, target_ulong address,
target_ulong attrs, target_ulong memop)
{
if (env->priv == PRV_M ||
(env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
(env->priv == PRV_U && !riscv_cpu_virt_enabled(env) &&
get_field(env->hstatus, HSTATUS_HU))) {
target_ulong pte;
int mmu_idx = cpu_mmu_index(env, false) | TB_FLAGS_PRIV_HYP_ACCESS_MASK;
switch (memop) {
case MO_SB:
pte = cpu_ldsb_mmuidx_ra(env, address, mmu_idx, GETPC());
break;
case MO_UB:
pte = cpu_ldub_mmuidx_ra(env, address, mmu_idx, GETPC());
break;
case MO_TESW:
pte = cpu_ldsw_mmuidx_ra(env, address, mmu_idx, GETPC());
break;
case MO_TEUW:
pte = cpu_lduw_mmuidx_ra(env, address, mmu_idx, GETPC());
break;
case MO_TESL:
pte = cpu_ldl_mmuidx_ra(env, address, mmu_idx, GETPC());
break;
case MO_TEUL:
pte = cpu_ldl_mmuidx_ra(env, address, mmu_idx, GETPC());
break;
case MO_TEQ:
pte = cpu_ldq_mmuidx_ra(env, address, mmu_idx, GETPC());
break;
default:
g_assert_not_reached();
}
return pte;
}
if (riscv_cpu_virt_enabled(env)) {
riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC());
} else {
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
}
return 0;
}
void helper_hyp_store(CPURISCVState *env, target_ulong address,
target_ulong val, target_ulong attrs, target_ulong memop)
{
if (env->priv == PRV_M ||
(env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
(env->priv == PRV_U && !riscv_cpu_virt_enabled(env) &&
get_field(env->hstatus, HSTATUS_HU))) {
int mmu_idx = cpu_mmu_index(env, false) | TB_FLAGS_PRIV_HYP_ACCESS_MASK;
switch (memop) {
case MO_SB:
case MO_UB:
cpu_stb_mmuidx_ra(env, address, val, mmu_idx, GETPC());
break;
case MO_TESW:
case MO_TEUW:
cpu_stw_mmuidx_ra(env, address, val, mmu_idx, GETPC());
break;
case MO_TESL:
case MO_TEUL:
cpu_stl_mmuidx_ra(env, address, val, mmu_idx, GETPC());
break;
case MO_TEQ:
cpu_stq_mmuidx_ra(env, address, val, mmu_idx, GETPC());
break;
default:
g_assert_not_reached();
}
return;
}
if (riscv_cpu_virt_enabled(env)) {
riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC());
} else {
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
}
}
target_ulong helper_hyp_x_load(CPURISCVState *env, target_ulong address,
target_ulong attrs, target_ulong memop)
{

View File

@ -56,6 +56,7 @@ typedef struct DisasContext {
to reset this known value. */
int frm;
bool ext_ifencei;
bool hlsx;
/* vector extension */
bool vill;
uint8_t lmul;
@ -807,6 +808,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->frm = -1; /* unknown rounding mode */
ctx->ext_ifencei = cpu->cfg.ext_ifencei;
ctx->vlen = cpu->cfg.vlen;
ctx->hlsx = FIELD_EX32(tb_flags, TB_FLAGS, HLSX);
ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL);
ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW);
ctx->lmul = FIELD_EX32(tb_flags, TB_FLAGS, LMUL);