Second RISC-V PR for 8.1
* Skip Vector set tail when vta is zero * Move zc* out of the experimental properties * Mask the implicitly enabled extensions in isa_string based on priv version * Rework CPU extension validation and validate MISA changes * Fixup PMP TLB cacheing errors * Writing to pmpaddr and MML/MMWP correctly triggers TLB flushes * Fixup PMP bypass checks * Deny access if access is partially inside a PMP entry * Correct OpenTitanState parent type/size * Fix QEMU crash when NUMA nodes exceed available CPUs * Fix pointer mask transformation for vector address * Updates and improvements for Smstateen * Support disas for Zcm* extensions * Support disas for Z*inx extensions * Remove unused decomp_rv32/64 value for vector instructions * Enable PC-relative translation * Assume M-mode FW in pflash0 only when "-bios none" * Support using pflash via -blockdev option * Add vector registers to log * Clean up reference of Vector MTYPE * Remove the check for extra Vector tail elements * Smepmp: Return error when access permission not allowed in PMP * Fixes for smsiaddrcfg and smsiaddrcfgh in AIA -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEaukCtqfKh31tZZKWr3yVEwxTgBMFAmSJFRoACgkQr3yVEwxT gBMUkg/8Cuhqpx+zy7MeouVkyhEjUuhtCWyr0WVZBJzDkVEOrlY6TyR0hb5/o1Js LZf6ZMF6JQDN78bmUct8yFBZBGafey5tyonDCsnD7CNQuLPf2NSjTHhu9n5hKFqF F8Mpn9iFu6k1pr0iF7FbCccVWuDb3P4h2PaM0iFhmf4uz42BCMYdgJThhvv38xlt jr6A3dcjTpp8yB+iRCuhL2IU2XVee0XBiDUECqRXd0gmtOtqJNST8L+l8YkLy1VO WUMe8RCO6NMP7BLJ383WwCDeiFTo0mJebZQ0eR/G1xEhy7c8BBMh/CgQmq2F3wDZ Q0biaeozADgAaCC7aOAHI+1sAoMhOm1v2WhIVmh+XXUqT9856cKwc7DUPBmzb9Sj N5Zh+t9WCnZG7qpfxvkDF0Y/aRODMHZ1BW5L/ky9yBtyuRwXOJ6VycZTFyRkSwnN Gd/s9IClDOP1IP5s4TSMGGdelk4lH97x7fZE/2hxn59lp761JtMxbaEceBtqaBh8 zNMTNN/KHs8LeiIBI2ZZ+nQav452Y6XYBivQ7OdsI8xkjnjG9gfgXXjvX1TIh0ow Hy5ZxtAtjXty49Gmjkx5VcBx4auJcnRDlLTzoZjTxq1te+gEWpw6O1EsEKasVLZe uN6PxTOxS3nHvRvPgQc1xNUdhDRqBaYsju6b9YmMxz1uefAjGM0= =fOTc -----END PGP SIGNATURE----- Merge tag 'pull-riscv-to-apply-20230614' of https://github.com/alistair23/qemu into staging Second RISC-V PR for 8.1 * Skip Vector set tail when vta is zero * Move zc* out of the experimental properties * Mask the implicitly enabled extensions in isa_string based on priv version * Rework CPU extension validation and validate MISA changes * Fixup PMP TLB cacheing errors * Writing to pmpaddr and MML/MMWP correctly triggers TLB flushes * Fixup PMP bypass checks * Deny access if access is partially inside a PMP entry * Correct OpenTitanState parent type/size * Fix QEMU crash when NUMA nodes exceed available CPUs * Fix pointer mask transformation for vector address * Updates and improvements for Smstateen * Support disas for Zcm* extensions * Support disas for Z*inx extensions * Remove unused decomp_rv32/64 value for vector instructions * Enable PC-relative translation * Assume M-mode FW in pflash0 only when "-bios none" * Support using pflash via -blockdev option * Add vector registers to log * Clean up reference of Vector MTYPE * Remove the check for extra Vector tail elements * Smepmp: Return error when access permission not allowed in PMP * Fixes for smsiaddrcfg and smsiaddrcfgh in AIA # -----BEGIN PGP SIGNATURE----- # # iQIzBAABCAAdFiEEaukCtqfKh31tZZKWr3yVEwxTgBMFAmSJFRoACgkQr3yVEwxT # gBMUkg/8Cuhqpx+zy7MeouVkyhEjUuhtCWyr0WVZBJzDkVEOrlY6TyR0hb5/o1Js # LZf6ZMF6JQDN78bmUct8yFBZBGafey5tyonDCsnD7CNQuLPf2NSjTHhu9n5hKFqF # F8Mpn9iFu6k1pr0iF7FbCccVWuDb3P4h2PaM0iFhmf4uz42BCMYdgJThhvv38xlt # jr6A3dcjTpp8yB+iRCuhL2IU2XVee0XBiDUECqRXd0gmtOtqJNST8L+l8YkLy1VO # WUMe8RCO6NMP7BLJ383WwCDeiFTo0mJebZQ0eR/G1xEhy7c8BBMh/CgQmq2F3wDZ # Q0biaeozADgAaCC7aOAHI+1sAoMhOm1v2WhIVmh+XXUqT9856cKwc7DUPBmzb9Sj # N5Zh+t9WCnZG7qpfxvkDF0Y/aRODMHZ1BW5L/ky9yBtyuRwXOJ6VycZTFyRkSwnN # Gd/s9IClDOP1IP5s4TSMGGdelk4lH97x7fZE/2hxn59lp761JtMxbaEceBtqaBh8 # zNMTNN/KHs8LeiIBI2ZZ+nQav452Y6XYBivQ7OdsI8xkjnjG9gfgXXjvX1TIh0ow # Hy5ZxtAtjXty49Gmjkx5VcBx4auJcnRDlLTzoZjTxq1te+gEWpw6O1EsEKasVLZe # uN6PxTOxS3nHvRvPgQc1xNUdhDRqBaYsju6b9YmMxz1uefAjGM0= # =fOTc # -----END PGP SIGNATURE----- # gpg: Signature made Wed 14 Jun 2023 03:17:14 AM CEST # gpg: using RSA key 6AE902B6A7CA877D6D659296AF7C95130C538013 # gpg: Good signature from "Alistair Francis <alistair@alistair23.me>" [unknown] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 6AE9 02B6 A7CA 877D 6D65 9296 AF7C 9513 0C53 8013 * tag 'pull-riscv-to-apply-20230614' of https://github.com/alistair23/qemu: (60 commits) hw/intc: If mmsiaddrcfgh.L == 1, smsiaddrcfg and smsiaddrcfgh are read-only. target/riscv: Smepmp: Return error when access permission not allowed in PMP target/riscv/vector_helper.c: Remove the check for extra tail elements target/riscv/vector_helper.c: clean up reference of MTYPE target/riscv: Fix initialized value for cur_pmmask util/log: Add vector registers to log docs/system: riscv: Add pflash usage details riscv/virt: Support using pflash via -blockdev option hw/riscv: virt: Assume M-mode FW in pflash0 only when "-bios none" target/riscv: Remove pc_succ_insn from DisasContext target/riscv: Enable PC-relative translation target/riscv: Use true diff for gen_pc_plus_diff target/riscv: Change gen_set_pc_imm to gen_update_pc target/riscv: Change gen_goto_tb to work on displacements target/riscv: Introduce cur_insn_len into DisasContext target/riscv: Fix target address to update badaddr disas/riscv.c: Remove redundant parentheses disas/riscv.c: Fix lines with over 80 characters disas/riscv.c: Remove unused decomp_rv32/64 value for vector instructions disas/riscv.c: Support disas for Z*inx extensions ... Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
commit
7efd65423a
@ -313,6 +313,9 @@ static void log_cpu_exec(target_ulong pc, CPUState *cpu,
|
||||
#if defined(TARGET_I386)
|
||||
flags |= CPU_DUMP_CCOP;
|
||||
#endif
|
||||
if (qemu_loglevel_mask(CPU_LOG_TB_VPU)) {
|
||||
flags |= CPU_DUMP_VPU;
|
||||
}
|
||||
cpu_dump_state(cpu, logfile, flags);
|
||||
qemu_log_unlock(logfile);
|
||||
}
|
||||
|
1194
disas/riscv.c
1194
disas/riscv.c
File diff suppressed because it is too large
Load Diff
@ -53,6 +53,37 @@ with the default OpenSBI firmware image as the -bios. It also supports
|
||||
the recommended RISC-V bootflow: U-Boot SPL (M-mode) loads OpenSBI fw_dynamic
|
||||
firmware and U-Boot proper (S-mode), using the standard -bios functionality.
|
||||
|
||||
Using flash devices
|
||||
-------------------
|
||||
|
||||
By default, the first flash device (pflash0) is expected to contain
|
||||
S-mode firmware code. It can be configured as read-only, with the
|
||||
second flash device (pflash1) available to store configuration data.
|
||||
|
||||
For example, booting edk2 looks like
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ qemu-system-riscv64 \
|
||||
-blockdev node-name=pflash0,driver=file,read-only=on,filename=<edk2_code> \
|
||||
-blockdev node-name=pflash1,driver=file,filename=<edk2_vars> \
|
||||
-M virt,pflash0=pflash0,pflash1=pflash1 \
|
||||
... other args ....
|
||||
|
||||
For TCG guests only, it is also possible to boot M-mode firmware from
|
||||
the first flash device (pflash0) by additionally passing ``-bios
|
||||
none``, as in
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ qemu-system-riscv64 \
|
||||
-bios none \
|
||||
-blockdev node-name=pflash0,driver=file,read-only=on,filename=<m_mode_code> \
|
||||
-M virt,pflash0=pflash0 \
|
||||
... other args ....
|
||||
|
||||
Firmware images used for pflash must be exactly 32 MiB in size.
|
||||
|
||||
Machine-specific options
|
||||
------------------------
|
||||
|
||||
|
@ -688,13 +688,13 @@ static void riscv_aplic_write(void *opaque, hwaddr addr, uint64_t value,
|
||||
* domains).
|
||||
*/
|
||||
if (aplic->num_children &&
|
||||
!(aplic->smsicfgaddrH & APLIC_xMSICFGADDRH_L)) {
|
||||
!(aplic->mmsicfgaddrH & APLIC_xMSICFGADDRH_L)) {
|
||||
aplic->smsicfgaddr = value;
|
||||
}
|
||||
} else if (aplic->mmode && aplic->msimode &&
|
||||
(addr == APLIC_SMSICFGADDRH)) {
|
||||
if (aplic->num_children &&
|
||||
!(aplic->smsicfgaddrH & APLIC_xMSICFGADDRH_L)) {
|
||||
!(aplic->mmsicfgaddrH & APLIC_xMSICFGADDRH_L)) {
|
||||
aplic->smsicfgaddrH = value & APLIC_xMSICFGADDRH_VALID_MASK;
|
||||
}
|
||||
} else if ((APLIC_SETIP_BASE <= addr) &&
|
||||
|
@ -207,6 +207,12 @@ int64_t riscv_numa_get_default_cpu_node_id(const MachineState *ms, int idx)
|
||||
{
|
||||
int64_t nidx = 0;
|
||||
|
||||
if (ms->numa_state->num_nodes > ms->smp.cpus) {
|
||||
error_report("Number of NUMA nodes (%d)"
|
||||
" cannot exceed the number of available CPUs (%d).",
|
||||
ms->numa_state->num_nodes, ms->smp.max_cpus);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
if (ms->numa_state->num_nodes) {
|
||||
nidx = idx / (ms->smp.cpus / ms->numa_state->num_nodes);
|
||||
if (ms->numa_state->num_nodes <= nidx) {
|
||||
|
@ -75,11 +75,11 @@ static const MemMapEntry ibex_memmap[] = {
|
||||
[IBEX_DEV_FLASH_VIRTUAL] = { 0x80000000, 0x80000 },
|
||||
};
|
||||
|
||||
static void opentitan_board_init(MachineState *machine)
|
||||
static void opentitan_machine_init(MachineState *machine)
|
||||
{
|
||||
MachineClass *mc = MACHINE_GET_CLASS(machine);
|
||||
OpenTitanState *s = OPENTITAN_MACHINE(machine);
|
||||
const MemMapEntry *memmap = ibex_memmap;
|
||||
OpenTitanState *s = g_new0(OpenTitanState, 1);
|
||||
MemoryRegion *sys_mem = get_system_memory();
|
||||
|
||||
if (machine->ram_size != mc->default_ram_size) {
|
||||
@ -108,18 +108,18 @@ static void opentitan_board_init(MachineState *machine)
|
||||
}
|
||||
}
|
||||
|
||||
static void opentitan_machine_init(MachineClass *mc)
|
||||
static void opentitan_machine_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
MachineClass *mc = MACHINE_CLASS(oc);
|
||||
|
||||
mc->desc = "RISC-V Board compatible with OpenTitan";
|
||||
mc->init = opentitan_board_init;
|
||||
mc->init = opentitan_machine_init;
|
||||
mc->max_cpus = 1;
|
||||
mc->default_cpu_type = TYPE_RISCV_CPU_IBEX;
|
||||
mc->default_ram_id = "riscv.lowrisc.ibex.ram";
|
||||
mc->default_ram_size = ibex_memmap[IBEX_DEV_RAM].size;
|
||||
}
|
||||
|
||||
DEFINE_MACHINE("opentitan", opentitan_machine_init)
|
||||
|
||||
static void lowrisc_ibex_soc_init(Object *obj)
|
||||
{
|
||||
LowRISCIbexSoCState *s = RISCV_IBEX_SOC(obj);
|
||||
@ -320,17 +320,19 @@ static void lowrisc_ibex_soc_class_init(ObjectClass *oc, void *data)
|
||||
dc->user_creatable = false;
|
||||
}
|
||||
|
||||
static const TypeInfo lowrisc_ibex_soc_type_info = {
|
||||
.name = TYPE_RISCV_IBEX_SOC,
|
||||
.parent = TYPE_DEVICE,
|
||||
.instance_size = sizeof(LowRISCIbexSoCState),
|
||||
.instance_init = lowrisc_ibex_soc_init,
|
||||
.class_init = lowrisc_ibex_soc_class_init,
|
||||
static const TypeInfo open_titan_types[] = {
|
||||
{
|
||||
.name = TYPE_RISCV_IBEX_SOC,
|
||||
.parent = TYPE_DEVICE,
|
||||
.instance_size = sizeof(LowRISCIbexSoCState),
|
||||
.instance_init = lowrisc_ibex_soc_init,
|
||||
.class_init = lowrisc_ibex_soc_class_init,
|
||||
}, {
|
||||
.name = TYPE_OPENTITAN_MACHINE,
|
||||
.parent = TYPE_MACHINE,
|
||||
.instance_size = sizeof(OpenTitanState),
|
||||
.class_init = opentitan_machine_class_init,
|
||||
}
|
||||
};
|
||||
|
||||
static void lowrisc_ibex_soc_register_types(void)
|
||||
{
|
||||
type_register_static(&lowrisc_ibex_soc_type_info);
|
||||
}
|
||||
|
||||
type_init(lowrisc_ibex_soc_register_types)
|
||||
DEFINE_TYPES(open_titan_types)
|
||||
|
@ -1245,7 +1245,8 @@ static void virt_machine_done(Notifier *notifier, void *data)
|
||||
target_ulong firmware_end_addr, kernel_start_addr;
|
||||
const char *firmware_name = riscv_default_firmware_name(&s->soc[0]);
|
||||
uint32_t fdt_load_addr;
|
||||
uint64_t kernel_entry;
|
||||
uint64_t kernel_entry = 0;
|
||||
BlockBackend *pflash_blk0;
|
||||
|
||||
/*
|
||||
* Only direct boot kernel is currently supported for KVM VM,
|
||||
@ -1266,42 +1267,32 @@ static void virt_machine_done(Notifier *notifier, void *data)
|
||||
firmware_end_addr = riscv_find_and_load_firmware(machine, firmware_name,
|
||||
start_addr, NULL);
|
||||
|
||||
if (drive_get(IF_PFLASH, 0, 1)) {
|
||||
/*
|
||||
* S-mode FW like EDK2 will be kept in second plash (unit 1).
|
||||
* When both kernel, initrd and pflash options are provided in the
|
||||
* command line, the kernel and initrd will be copied to the fw_cfg
|
||||
* table and opensbi will jump to the flash address which is the
|
||||
* entry point of S-mode FW. It is the job of the S-mode FW to load
|
||||
* the kernel and initrd using fw_cfg table.
|
||||
*
|
||||
* If only pflash is given but not -kernel, then it is the job of
|
||||
* of the S-mode firmware to locate and load the kernel.
|
||||
* In either case, the next_addr for opensbi will be the flash address.
|
||||
*/
|
||||
riscv_setup_firmware_boot(machine);
|
||||
kernel_entry = virt_memmap[VIRT_FLASH].base +
|
||||
virt_memmap[VIRT_FLASH].size / 2;
|
||||
} else if (machine->kernel_filename) {
|
||||
pflash_blk0 = pflash_cfi01_get_blk(s->flash[0]);
|
||||
if (pflash_blk0) {
|
||||
if (machine->firmware && !strcmp(machine->firmware, "none") &&
|
||||
!kvm_enabled()) {
|
||||
/*
|
||||
* Pflash was supplied but bios is none and not KVM guest,
|
||||
* let's overwrite the address we jump to after reset to
|
||||
* the base of the flash.
|
||||
*/
|
||||
start_addr = virt_memmap[VIRT_FLASH].base;
|
||||
} else {
|
||||
/*
|
||||
* Pflash was supplied but either KVM guest or bios is not none.
|
||||
* In this case, base of the flash would contain S-mode payload.
|
||||
*/
|
||||
riscv_setup_firmware_boot(machine);
|
||||
kernel_entry = virt_memmap[VIRT_FLASH].base;
|
||||
}
|
||||
}
|
||||
|
||||
if (machine->kernel_filename && !kernel_entry) {
|
||||
kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc[0],
|
||||
firmware_end_addr);
|
||||
|
||||
kernel_entry = riscv_load_kernel(machine, &s->soc[0],
|
||||
kernel_start_addr, true, NULL);
|
||||
} else {
|
||||
/*
|
||||
* If dynamic firmware is used, it doesn't know where is the next mode
|
||||
* if kernel argument is not set.
|
||||
*/
|
||||
kernel_entry = 0;
|
||||
}
|
||||
|
||||
if (drive_get(IF_PFLASH, 0, 0)) {
|
||||
/*
|
||||
* Pflash was supplied, let's overwrite the address we jump to after
|
||||
* reset to the base of the flash.
|
||||
*/
|
||||
start_addr = virt_memmap[VIRT_FLASH].base;
|
||||
}
|
||||
|
||||
fdt_load_addr = riscv_compute_fdt_addr(memmap[VIRT_DRAM].base,
|
||||
@ -1510,8 +1501,6 @@ static void virt_machine_init(MachineState *machine)
|
||||
sysbus_create_simple("goldfish_rtc", memmap[VIRT_RTC].base,
|
||||
qdev_get_gpio_in(mmio_irqchip, RTC_IRQ));
|
||||
|
||||
virt_flash_create(s);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(s->flash); i++) {
|
||||
/* Map legacy -drive if=pflash to machine properties */
|
||||
pflash_cfi01_legacy_drive(s->flash[i],
|
||||
@ -1538,6 +1527,8 @@ static void virt_machine_instance_init(Object *obj)
|
||||
{
|
||||
RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
|
||||
|
||||
virt_flash_create(s);
|
||||
|
||||
s->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6);
|
||||
s->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8);
|
||||
s->acpi = ON_OFF_AUTO_AUTO;
|
||||
|
@ -397,7 +397,7 @@ typedef struct disassemble_info {
|
||||
char * disassembler_options;
|
||||
|
||||
/* Field intended to be used by targets in any way they deem suitable. */
|
||||
int64_t target_info;
|
||||
void *target_info;
|
||||
|
||||
/* Options for Capstone disassembly. */
|
||||
int cap_arch;
|
||||
|
@ -544,11 +544,13 @@ GuestPanicInformation *cpu_get_crash_info(CPUState *cpu);
|
||||
* @CPU_DUMP_CODE:
|
||||
* @CPU_DUMP_FPU: dump FPU register state, not just integer
|
||||
* @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
|
||||
* @CPU_DUMP_VPU: dump VPU registers
|
||||
*/
|
||||
enum CPUDumpFlags {
|
||||
CPU_DUMP_CODE = 0x00010000,
|
||||
CPU_DUMP_FPU = 0x00020000,
|
||||
CPU_DUMP_CCOP = 0x00040000,
|
||||
CPU_DUMP_VPU = 0x00080000,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include "hw/char/ibex_uart.h"
|
||||
#include "hw/timer/ibex_timer.h"
|
||||
#include "hw/ssi/ibex_spi_host.h"
|
||||
#include "hw/boards.h"
|
||||
#include "qom/object.h"
|
||||
|
||||
#define TYPE_RISCV_IBEX_SOC "riscv.lowrisc.ibex.soc"
|
||||
@ -53,9 +54,12 @@ struct LowRISCIbexSoCState {
|
||||
MemoryRegion flash_alias;
|
||||
};
|
||||
|
||||
#define TYPE_OPENTITAN_MACHINE MACHINE_TYPE_NAME("opentitan")
|
||||
OBJECT_DECLARE_SIMPLE_TYPE(OpenTitanState, OPENTITAN_MACHINE)
|
||||
|
||||
typedef struct OpenTitanState {
|
||||
/*< private >*/
|
||||
SysBusDevice parent_obj;
|
||||
MachineState parent_obj;
|
||||
|
||||
/*< public >*/
|
||||
LowRISCIbexSoCState soc;
|
||||
|
@ -35,6 +35,7 @@ bool qemu_log_separate(void);
|
||||
/* LOG_STRACE is used for user-mode strace logging. */
|
||||
#define LOG_STRACE (1 << 19)
|
||||
#define LOG_PER_THREAD (1 << 20)
|
||||
#define CPU_LOG_TB_VPU (1 << 21)
|
||||
|
||||
/* Lock/unlock output. */
|
||||
|
||||
|
@ -119,6 +119,7 @@ static const struct isa_ext_data isa_edata_arr[] = {
|
||||
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
|
||||
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
|
||||
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
|
||||
ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
|
||||
ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
|
||||
ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
|
||||
ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
|
||||
@ -247,16 +248,6 @@ static void set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext)
|
||||
env->misa_ext_mask = env->misa_ext = ext;
|
||||
}
|
||||
|
||||
static void set_priv_version(CPURISCVState *env, int priv_ver)
|
||||
{
|
||||
env->priv_ver = priv_ver;
|
||||
}
|
||||
|
||||
static void set_vext_version(CPURISCVState *env, int vext_ver)
|
||||
{
|
||||
env->vext_ver = vext_ver;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static uint8_t satp_mode_from_str(const char *satp_mode_str)
|
||||
{
|
||||
@ -342,7 +333,8 @@ static void set_satp_mode_default_map(RISCVCPU *cpu)
|
||||
|
||||
static void riscv_any_cpu_init(Object *obj)
|
||||
{
|
||||
CPURISCVState *env = &RISCV_CPU(obj)->env;
|
||||
RISCVCPU *cpu = RISCV_CPU(obj);
|
||||
CPURISCVState *env = &cpu->env;
|
||||
#if defined(TARGET_RISCV32)
|
||||
set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
|
||||
#elif defined(TARGET_RISCV64)
|
||||
@ -355,7 +347,13 @@ static void riscv_any_cpu_init(Object *obj)
|
||||
VM_1_10_SV32 : VM_1_10_SV57);
|
||||
#endif
|
||||
|
||||
set_priv_version(env, PRIV_VERSION_1_12_0);
|
||||
env->priv_ver = PRIV_VERSION_LATEST;
|
||||
|
||||
/* inherited from parent obj via riscv_cpu_init() */
|
||||
cpu->cfg.ext_ifencei = true;
|
||||
cpu->cfg.ext_icsr = true;
|
||||
cpu->cfg.mmu = true;
|
||||
cpu->cfg.pmp = true;
|
||||
}
|
||||
|
||||
#if defined(TARGET_RISCV64)
|
||||
@ -366,7 +364,7 @@ static void rv64_base_cpu_init(Object *obj)
|
||||
set_misa(env, MXL_RV64, 0);
|
||||
riscv_cpu_add_user_properties(obj);
|
||||
/* Set latest version of privileged specification */
|
||||
set_priv_version(env, PRIV_VERSION_1_12_0);
|
||||
env->priv_ver = PRIV_VERSION_LATEST;
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
|
||||
#endif
|
||||
@ -374,12 +372,19 @@ static void rv64_base_cpu_init(Object *obj)
|
||||
|
||||
static void rv64_sifive_u_cpu_init(Object *obj)
|
||||
{
|
||||
CPURISCVState *env = &RISCV_CPU(obj)->env;
|
||||
RISCVCPU *cpu = RISCV_CPU(obj);
|
||||
CPURISCVState *env = &cpu->env;
|
||||
set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
|
||||
set_priv_version(env, PRIV_VERSION_1_10_0);
|
||||
env->priv_ver = PRIV_VERSION_1_10_0;
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
|
||||
#endif
|
||||
|
||||
/* inherited from parent obj via riscv_cpu_init() */
|
||||
cpu->cfg.ext_ifencei = true;
|
||||
cpu->cfg.ext_icsr = true;
|
||||
cpu->cfg.mmu = true;
|
||||
cpu->cfg.pmp = true;
|
||||
}
|
||||
|
||||
static void rv64_sifive_e_cpu_init(Object *obj)
|
||||
@ -388,11 +393,15 @@ static void rv64_sifive_e_cpu_init(Object *obj)
|
||||
RISCVCPU *cpu = RISCV_CPU(obj);
|
||||
|
||||
set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU);
|
||||
set_priv_version(env, PRIV_VERSION_1_10_0);
|
||||
cpu->cfg.mmu = false;
|
||||
env->priv_ver = PRIV_VERSION_1_10_0;
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
|
||||
#endif
|
||||
|
||||
/* inherited from parent obj via riscv_cpu_init() */
|
||||
cpu->cfg.ext_ifencei = true;
|
||||
cpu->cfg.ext_icsr = true;
|
||||
cpu->cfg.pmp = true;
|
||||
}
|
||||
|
||||
static void rv64_thead_c906_cpu_init(Object *obj)
|
||||
@ -401,7 +410,7 @@ static void rv64_thead_c906_cpu_init(Object *obj)
|
||||
RISCVCPU *cpu = RISCV_CPU(obj);
|
||||
|
||||
set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU);
|
||||
set_priv_version(env, PRIV_VERSION_1_11_0);
|
||||
env->priv_ver = PRIV_VERSION_1_11_0;
|
||||
|
||||
cpu->cfg.ext_zfh = true;
|
||||
cpu->cfg.mmu = true;
|
||||
@ -420,6 +429,9 @@ static void rv64_thead_c906_cpu_init(Object *obj)
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
set_satp_mode_max_supported(cpu, VM_1_10_SV39);
|
||||
#endif
|
||||
|
||||
/* inherited from parent obj via riscv_cpu_init() */
|
||||
cpu->cfg.pmp = true;
|
||||
}
|
||||
|
||||
static void rv64_veyron_v1_cpu_init(Object *obj)
|
||||
@ -472,7 +484,7 @@ static void rv128_base_cpu_init(Object *obj)
|
||||
set_misa(env, MXL_RV128, 0);
|
||||
riscv_cpu_add_user_properties(obj);
|
||||
/* Set latest version of privileged specification */
|
||||
set_priv_version(env, PRIV_VERSION_1_12_0);
|
||||
env->priv_ver = PRIV_VERSION_LATEST;
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
|
||||
#endif
|
||||
@ -485,7 +497,7 @@ static void rv32_base_cpu_init(Object *obj)
|
||||
set_misa(env, MXL_RV32, 0);
|
||||
riscv_cpu_add_user_properties(obj);
|
||||
/* Set latest version of privileged specification */
|
||||
set_priv_version(env, PRIV_VERSION_1_12_0);
|
||||
env->priv_ver = PRIV_VERSION_LATEST;
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
|
||||
#endif
|
||||
@ -493,12 +505,19 @@ static void rv32_base_cpu_init(Object *obj)
|
||||
|
||||
static void rv32_sifive_u_cpu_init(Object *obj)
|
||||
{
|
||||
CPURISCVState *env = &RISCV_CPU(obj)->env;
|
||||
RISCVCPU *cpu = RISCV_CPU(obj);
|
||||
CPURISCVState *env = &cpu->env;
|
||||
set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
|
||||
set_priv_version(env, PRIV_VERSION_1_10_0);
|
||||
env->priv_ver = PRIV_VERSION_1_10_0;
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
|
||||
#endif
|
||||
|
||||
/* inherited from parent obj via riscv_cpu_init() */
|
||||
cpu->cfg.ext_ifencei = true;
|
||||
cpu->cfg.ext_icsr = true;
|
||||
cpu->cfg.mmu = true;
|
||||
cpu->cfg.pmp = true;
|
||||
}
|
||||
|
||||
static void rv32_sifive_e_cpu_init(Object *obj)
|
||||
@ -507,11 +526,15 @@ static void rv32_sifive_e_cpu_init(Object *obj)
|
||||
RISCVCPU *cpu = RISCV_CPU(obj);
|
||||
|
||||
set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU);
|
||||
set_priv_version(env, PRIV_VERSION_1_10_0);
|
||||
cpu->cfg.mmu = false;
|
||||
env->priv_ver = PRIV_VERSION_1_10_0;
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
|
||||
#endif
|
||||
|
||||
/* inherited from parent obj via riscv_cpu_init() */
|
||||
cpu->cfg.ext_ifencei = true;
|
||||
cpu->cfg.ext_icsr = true;
|
||||
cpu->cfg.pmp = true;
|
||||
}
|
||||
|
||||
static void rv32_ibex_cpu_init(Object *obj)
|
||||
@ -520,12 +543,16 @@ static void rv32_ibex_cpu_init(Object *obj)
|
||||
RISCVCPU *cpu = RISCV_CPU(obj);
|
||||
|
||||
set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU);
|
||||
set_priv_version(env, PRIV_VERSION_1_11_0);
|
||||
cpu->cfg.mmu = false;
|
||||
env->priv_ver = PRIV_VERSION_1_11_0;
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
|
||||
#endif
|
||||
cpu->cfg.epmp = true;
|
||||
|
||||
/* inherited from parent obj via riscv_cpu_init() */
|
||||
cpu->cfg.ext_ifencei = true;
|
||||
cpu->cfg.ext_icsr = true;
|
||||
cpu->cfg.pmp = true;
|
||||
}
|
||||
|
||||
static void rv32_imafcu_nommu_cpu_init(Object *obj)
|
||||
@ -534,11 +561,15 @@ static void rv32_imafcu_nommu_cpu_init(Object *obj)
|
||||
RISCVCPU *cpu = RISCV_CPU(obj);
|
||||
|
||||
set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU);
|
||||
set_priv_version(env, PRIV_VERSION_1_10_0);
|
||||
cpu->cfg.mmu = false;
|
||||
env->priv_ver = PRIV_VERSION_1_10_0;
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
|
||||
#endif
|
||||
|
||||
/* inherited from parent obj via riscv_cpu_init() */
|
||||
cpu->cfg.ext_ifencei = true;
|
||||
cpu->cfg.ext_icsr = true;
|
||||
cpu->cfg.pmp = true;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -690,16 +721,18 @@ static vaddr riscv_cpu_get_pc(CPUState *cs)
|
||||
static void riscv_cpu_synchronize_from_tb(CPUState *cs,
|
||||
const TranslationBlock *tb)
|
||||
{
|
||||
RISCVCPU *cpu = RISCV_CPU(cs);
|
||||
CPURISCVState *env = &cpu->env;
|
||||
RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
|
||||
if (!(tb_cflags(tb) & CF_PCREL)) {
|
||||
RISCVCPU *cpu = RISCV_CPU(cs);
|
||||
CPURISCVState *env = &cpu->env;
|
||||
RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
|
||||
|
||||
tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL));
|
||||
tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL));
|
||||
|
||||
if (xl == MXL_RV32) {
|
||||
env->pc = (int32_t) tb->pc;
|
||||
} else {
|
||||
env->pc = tb->pc;
|
||||
if (xl == MXL_RV32) {
|
||||
env->pc = (int32_t) tb->pc;
|
||||
} else {
|
||||
env->pc = tb->pc;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -725,11 +758,18 @@ static void riscv_restore_state_to_opc(CPUState *cs,
|
||||
RISCVCPU *cpu = RISCV_CPU(cs);
|
||||
CPURISCVState *env = &cpu->env;
|
||||
RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
|
||||
target_ulong pc;
|
||||
|
||||
if (tb_cflags(tb) & CF_PCREL) {
|
||||
pc = (env->pc & TARGET_PAGE_MASK) | data[0];
|
||||
} else {
|
||||
pc = data[0];
|
||||
}
|
||||
|
||||
if (xl == MXL_RV32) {
|
||||
env->pc = (int32_t)data[0];
|
||||
env->pc = (int32_t)pc;
|
||||
} else {
|
||||
env->pc = data[0];
|
||||
env->pc = pc;
|
||||
}
|
||||
env->bins = data[1];
|
||||
}
|
||||
@ -818,6 +858,7 @@ static void riscv_cpu_reset_hold(Object *obj)
|
||||
static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
|
||||
{
|
||||
RISCVCPU *cpu = RISCV_CPU(s);
|
||||
info->target_info = &cpu->cfg;
|
||||
|
||||
switch (riscv_cpu_mxl(&cpu->env)) {
|
||||
case MXL_RV32:
|
||||
@ -834,13 +875,127 @@ static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
|
||||
}
|
||||
}
|
||||
|
||||
static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg,
|
||||
Error **errp)
|
||||
{
|
||||
int vext_version = VEXT_VERSION_1_00_0;
|
||||
|
||||
if (!is_power_of_2(cfg->vlen)) {
|
||||
error_setg(errp, "Vector extension VLEN must be power of 2");
|
||||
return;
|
||||
}
|
||||
if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) {
|
||||
error_setg(errp,
|
||||
"Vector extension implementation only supports VLEN "
|
||||
"in the range [128, %d]", RV_VLEN_MAX);
|
||||
return;
|
||||
}
|
||||
if (!is_power_of_2(cfg->elen)) {
|
||||
error_setg(errp, "Vector extension ELEN must be power of 2");
|
||||
return;
|
||||
}
|
||||
if (cfg->elen > 64 || cfg->elen < 8) {
|
||||
error_setg(errp,
|
||||
"Vector extension implementation only supports ELEN "
|
||||
"in the range [8, 64]");
|
||||
return;
|
||||
}
|
||||
if (cfg->vext_spec) {
|
||||
if (!g_strcmp0(cfg->vext_spec, "v1.0")) {
|
||||
vext_version = VEXT_VERSION_1_00_0;
|
||||
} else {
|
||||
error_setg(errp, "Unsupported vector spec version '%s'",
|
||||
cfg->vext_spec);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
qemu_log("vector version is not specified, "
|
||||
"use the default value v1.0\n");
|
||||
}
|
||||
env->vext_ver = vext_version;
|
||||
}
|
||||
|
||||
static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp)
|
||||
{
|
||||
CPURISCVState *env = &cpu->env;
|
||||
int priv_version = -1;
|
||||
|
||||
if (cpu->cfg.priv_spec) {
|
||||
if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) {
|
||||
priv_version = PRIV_VERSION_1_12_0;
|
||||
} else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) {
|
||||
priv_version = PRIV_VERSION_1_11_0;
|
||||
} else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) {
|
||||
priv_version = PRIV_VERSION_1_10_0;
|
||||
} else {
|
||||
error_setg(errp,
|
||||
"Unsupported privilege spec version '%s'",
|
||||
cpu->cfg.priv_spec);
|
||||
return;
|
||||
}
|
||||
|
||||
env->priv_ver = priv_version;
|
||||
}
|
||||
}
|
||||
|
||||
static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu)
|
||||
{
|
||||
CPURISCVState *env = &cpu->env;
|
||||
int i;
|
||||
|
||||
/* Force disable extensions if priv spec version does not match */
|
||||
for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) {
|
||||
if (isa_ext_is_enabled(cpu, &isa_edata_arr[i]) &&
|
||||
(env->priv_ver < isa_edata_arr[i].min_version)) {
|
||||
isa_ext_update_enabled(cpu, &isa_edata_arr[i], false);
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx
|
||||
" because privilege spec version does not match",
|
||||
isa_edata_arr[i].name, env->mhartid);
|
||||
#else
|
||||
warn_report("disabling %s extension because "
|
||||
"privilege spec version does not match",
|
||||
isa_edata_arr[i].name);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp)
|
||||
{
|
||||
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
|
||||
CPUClass *cc = CPU_CLASS(mcc);
|
||||
CPURISCVState *env = &cpu->env;
|
||||
|
||||
/* Validate that MISA_MXL is set properly. */
|
||||
switch (env->misa_mxl_max) {
|
||||
#ifdef TARGET_RISCV64
|
||||
case MXL_RV64:
|
||||
case MXL_RV128:
|
||||
cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
|
||||
break;
|
||||
#endif
|
||||
case MXL_RV32:
|
||||
cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
if (env->misa_mxl_max != env->misa_mxl) {
|
||||
error_setg(errp, "misa_mxl_max must be equal to misa_mxl");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check consistency between chosen extensions while setting
|
||||
* cpu->cfg accordingly.
|
||||
*/
|
||||
static void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
|
||||
void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
|
||||
{
|
||||
CPURISCVState *env = &cpu->env;
|
||||
Error *local_err = NULL;
|
||||
|
||||
/* Do some ISA extension error checking */
|
||||
if (riscv_has_ext(env, RVG) &&
|
||||
@ -853,7 +1008,7 @@ static void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
|
||||
cpu->cfg.ext_ifencei = true;
|
||||
|
||||
env->misa_ext |= RVI | RVM | RVA | RVF | RVD;
|
||||
env->misa_ext_mask = env->misa_ext;
|
||||
env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD;
|
||||
}
|
||||
|
||||
if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) {
|
||||
@ -909,8 +1064,14 @@ static void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
|
||||
return;
|
||||
}
|
||||
|
||||
/* The V vector extension depends on the Zve64d extension */
|
||||
if (riscv_has_ext(env, RVV)) {
|
||||
riscv_cpu_validate_v(env, &cpu->cfg, &local_err);
|
||||
if (local_err != NULL) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
|
||||
/* The V vector extension depends on the Zve64d extension */
|
||||
cpu->cfg.ext_zve64d = true;
|
||||
}
|
||||
|
||||
@ -1046,45 +1207,11 @@ static void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
|
||||
cpu->cfg.ext_zksh = true;
|
||||
}
|
||||
|
||||
if (riscv_has_ext(env, RVV)) {
|
||||
int vext_version = VEXT_VERSION_1_00_0;
|
||||
if (!is_power_of_2(cpu->cfg.vlen)) {
|
||||
error_setg(errp,
|
||||
"Vector extension VLEN must be power of 2");
|
||||
return;
|
||||
}
|
||||
if (cpu->cfg.vlen > RV_VLEN_MAX || cpu->cfg.vlen < 128) {
|
||||
error_setg(errp,
|
||||
"Vector extension implementation only supports VLEN "
|
||||
"in the range [128, %d]", RV_VLEN_MAX);
|
||||
return;
|
||||
}
|
||||
if (!is_power_of_2(cpu->cfg.elen)) {
|
||||
error_setg(errp,
|
||||
"Vector extension ELEN must be power of 2");
|
||||
return;
|
||||
}
|
||||
if (cpu->cfg.elen > 64 || cpu->cfg.elen < 8) {
|
||||
error_setg(errp,
|
||||
"Vector extension implementation only supports ELEN "
|
||||
"in the range [8, 64]");
|
||||
return;
|
||||
}
|
||||
if (cpu->cfg.vext_spec) {
|
||||
if (!g_strcmp0(cpu->cfg.vext_spec, "v1.0")) {
|
||||
vext_version = VEXT_VERSION_1_00_0;
|
||||
} else {
|
||||
error_setg(errp,
|
||||
"Unsupported vector spec version '%s'",
|
||||
cpu->cfg.vext_spec);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
qemu_log("vector version is not specified, "
|
||||
"use the default value v1.0\n");
|
||||
}
|
||||
set_vext_version(env, vext_version);
|
||||
}
|
||||
/*
|
||||
* Disable isa extensions based on priv spec after we
|
||||
* validated and set everything we need.
|
||||
*/
|
||||
riscv_cpu_disable_priv_spec_isa_exts(cpu);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
@ -1183,8 +1310,6 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
|
||||
RISCVCPU *cpu = RISCV_CPU(dev);
|
||||
CPURISCVState *env = &cpu->env;
|
||||
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
|
||||
CPUClass *cc = CPU_CLASS(mcc);
|
||||
int i, priv_version = -1;
|
||||
Error *local_err = NULL;
|
||||
|
||||
cpu_exec_realizefn(cs, &local_err);
|
||||
@ -1193,23 +1318,16 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
|
||||
return;
|
||||
}
|
||||
|
||||
if (cpu->cfg.priv_spec) {
|
||||
if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) {
|
||||
priv_version = PRIV_VERSION_1_12_0;
|
||||
} else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) {
|
||||
priv_version = PRIV_VERSION_1_11_0;
|
||||
} else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) {
|
||||
priv_version = PRIV_VERSION_1_10_0;
|
||||
} else {
|
||||
error_setg(errp,
|
||||
"Unsupported privilege spec version '%s'",
|
||||
cpu->cfg.priv_spec);
|
||||
return;
|
||||
}
|
||||
riscv_cpu_validate_misa_mxl(cpu, &local_err);
|
||||
if (local_err != NULL) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
|
||||
if (priv_version >= PRIV_VERSION_1_10_0) {
|
||||
set_priv_version(env, priv_version);
|
||||
riscv_cpu_validate_priv_spec(cpu, &local_err);
|
||||
if (local_err != NULL) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
|
||||
riscv_cpu_validate_misa_priv(env, &local_err);
|
||||
@ -1218,23 +1336,6 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Force disable extensions if priv spec version does not match */
|
||||
for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) {
|
||||
if (isa_ext_is_enabled(cpu, &isa_edata_arr[i]) &&
|
||||
(env->priv_ver < isa_edata_arr[i].min_version)) {
|
||||
isa_ext_update_enabled(cpu, &isa_edata_arr[i], false);
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx
|
||||
" because privilege spec version does not match",
|
||||
isa_edata_arr[i].name, env->mhartid);
|
||||
#else
|
||||
warn_report("disabling %s extension because "
|
||||
"privilege spec version does not match",
|
||||
isa_edata_arr[i].name);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
if (cpu->cfg.epmp && !cpu->cfg.pmp) {
|
||||
/*
|
||||
* Enhanced PMP should only be available
|
||||
@ -1244,29 +1345,6 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
if (cpu->cfg.ext_sstc) {
|
||||
riscv_timer_init(cpu);
|
||||
}
|
||||
#endif /* CONFIG_USER_ONLY */
|
||||
|
||||
/* Validate that MISA_MXL is set properly. */
|
||||
switch (env->misa_mxl_max) {
|
||||
#ifdef TARGET_RISCV64
|
||||
case MXL_RV64:
|
||||
case MXL_RV128:
|
||||
cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
|
||||
break;
|
||||
#endif
|
||||
case MXL_RV32:
|
||||
cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
assert(env->misa_mxl_max == env->misa_mxl);
|
||||
|
||||
riscv_cpu_validate_set_extensions(cpu, &local_err);
|
||||
if (local_err != NULL) {
|
||||
error_propagate(errp, local_err);
|
||||
@ -1274,6 +1352,12 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
cs->tcg_cflags |= CF_PCREL;
|
||||
|
||||
if (cpu->cfg.ext_sstc) {
|
||||
riscv_timer_init(cpu);
|
||||
}
|
||||
|
||||
if (cpu->cfg.pmu_num) {
|
||||
if (!riscv_pmu_init(cpu, cpu->cfg.pmu_num) && cpu->cfg.ext_sscofpmf) {
|
||||
cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
|
||||
@ -1410,11 +1494,6 @@ static void riscv_cpu_init(Object *obj)
|
||||
{
|
||||
RISCVCPU *cpu = RISCV_CPU(obj);
|
||||
|
||||
cpu->cfg.ext_ifencei = true;
|
||||
cpu->cfg.ext_icsr = true;
|
||||
cpu->cfg.mmu = true;
|
||||
cpu->cfg.pmp = true;
|
||||
|
||||
cpu_set_cpustate_pointers(cpu);
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
@ -1535,8 +1614,8 @@ static Property riscv_cpu_extensions[] = {
|
||||
DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128),
|
||||
DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64),
|
||||
|
||||
DEFINE_PROP_BOOL("smstateen", RISCVCPU, cfg.ext_smstateen, false),
|
||||
DEFINE_PROP_BOOL("svadu", RISCVCPU, cfg.ext_svadu, true),
|
||||
|
||||
DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false),
|
||||
DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false),
|
||||
DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false),
|
||||
@ -1571,6 +1650,14 @@ static Property riscv_cpu_extensions[] = {
|
||||
|
||||
DEFINE_PROP_BOOL("zmmul", RISCVCPU, cfg.ext_zmmul, false),
|
||||
|
||||
DEFINE_PROP_BOOL("zca", RISCVCPU, cfg.ext_zca, false),
|
||||
DEFINE_PROP_BOOL("zcb", RISCVCPU, cfg.ext_zcb, false),
|
||||
DEFINE_PROP_BOOL("zcd", RISCVCPU, cfg.ext_zcd, false),
|
||||
DEFINE_PROP_BOOL("zce", RISCVCPU, cfg.ext_zce, false),
|
||||
DEFINE_PROP_BOOL("zcf", RISCVCPU, cfg.ext_zcf, false),
|
||||
DEFINE_PROP_BOOL("zcmp", RISCVCPU, cfg.ext_zcmp, false),
|
||||
DEFINE_PROP_BOOL("zcmt", RISCVCPU, cfg.ext_zcmt, false),
|
||||
|
||||
/* Vendor-specific custom extensions */
|
||||
DEFINE_PROP_BOOL("xtheadba", RISCVCPU, cfg.ext_xtheadba, false),
|
||||
DEFINE_PROP_BOOL("xtheadbb", RISCVCPU, cfg.ext_xtheadbb, false),
|
||||
@ -1588,14 +1675,6 @@ static Property riscv_cpu_extensions[] = {
|
||||
/* These are experimental so mark with 'x-' */
|
||||
DEFINE_PROP_BOOL("x-zicond", RISCVCPU, cfg.ext_zicond, false),
|
||||
|
||||
DEFINE_PROP_BOOL("x-zca", RISCVCPU, cfg.ext_zca, false),
|
||||
DEFINE_PROP_BOOL("x-zcb", RISCVCPU, cfg.ext_zcb, false),
|
||||
DEFINE_PROP_BOOL("x-zcd", RISCVCPU, cfg.ext_zcd, false),
|
||||
DEFINE_PROP_BOOL("x-zce", RISCVCPU, cfg.ext_zce, false),
|
||||
DEFINE_PROP_BOOL("x-zcf", RISCVCPU, cfg.ext_zcf, false),
|
||||
DEFINE_PROP_BOOL("x-zcmp", RISCVCPU, cfg.ext_zcmp, false),
|
||||
DEFINE_PROP_BOOL("x-zcmt", RISCVCPU, cfg.ext_zcmt, false),
|
||||
|
||||
/* ePMP 0.9.3 */
|
||||
DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false),
|
||||
DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false),
|
||||
@ -1761,7 +1840,8 @@ static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) {
|
||||
if (isa_ext_is_enabled(cpu, &isa_edata_arr[i])) {
|
||||
if (cpu->env.priv_ver >= isa_edata_arr[i].min_version &&
|
||||
isa_ext_is_enabled(cpu, &isa_edata_arr[i])) {
|
||||
new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL);
|
||||
g_free(old);
|
||||
old = new;
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "qom/object.h"
|
||||
#include "qemu/int128.h"
|
||||
#include "cpu_bits.h"
|
||||
#include "cpu_cfg.h"
|
||||
#include "qapi/qapi-types-common.h"
|
||||
#include "cpu-qom.h"
|
||||
|
||||
@ -61,6 +62,8 @@ enum {
|
||||
PRIV_VERSION_1_10_0 = 0,
|
||||
PRIV_VERSION_1_11_0,
|
||||
PRIV_VERSION_1_12_0,
|
||||
|
||||
PRIV_VERSION_LATEST = PRIV_VERSION_1_12_0,
|
||||
};
|
||||
|
||||
#define VEXT_VERSION_1_00_0 0x00010000
|
||||
@ -368,119 +371,6 @@ struct CPUArchState {
|
||||
uint64_t kvm_timer_frequency;
|
||||
};
|
||||
|
||||
/*
|
||||
* map is a 16-bit bitmap: the most significant set bit in map is the maximum
|
||||
* satp mode that is supported. It may be chosen by the user and must respect
|
||||
* what qemu implements (valid_1_10_32/64) and what the hw is capable of
|
||||
* (supported bitmap below).
|
||||
*
|
||||
* init is a 16-bit bitmap used to make sure the user selected a correct
|
||||
* configuration as per the specification.
|
||||
*
|
||||
* supported is a 16-bit bitmap used to reflect the hw capabilities.
|
||||
*/
|
||||
typedef struct {
|
||||
uint16_t map, init, supported;
|
||||
} RISCVSATPMap;
|
||||
|
||||
struct RISCVCPUConfig {
|
||||
bool ext_zba;
|
||||
bool ext_zbb;
|
||||
bool ext_zbc;
|
||||
bool ext_zbkb;
|
||||
bool ext_zbkc;
|
||||
bool ext_zbkx;
|
||||
bool ext_zbs;
|
||||
bool ext_zca;
|
||||
bool ext_zcb;
|
||||
bool ext_zcd;
|
||||
bool ext_zce;
|
||||
bool ext_zcf;
|
||||
bool ext_zcmp;
|
||||
bool ext_zcmt;
|
||||
bool ext_zk;
|
||||
bool ext_zkn;
|
||||
bool ext_zknd;
|
||||
bool ext_zkne;
|
||||
bool ext_zknh;
|
||||
bool ext_zkr;
|
||||
bool ext_zks;
|
||||
bool ext_zksed;
|
||||
bool ext_zksh;
|
||||
bool ext_zkt;
|
||||
bool ext_ifencei;
|
||||
bool ext_icsr;
|
||||
bool ext_icbom;
|
||||
bool ext_icboz;
|
||||
bool ext_zicond;
|
||||
bool ext_zihintpause;
|
||||
bool ext_smstateen;
|
||||
bool ext_sstc;
|
||||
bool ext_svadu;
|
||||
bool ext_svinval;
|
||||
bool ext_svnapot;
|
||||
bool ext_svpbmt;
|
||||
bool ext_zdinx;
|
||||
bool ext_zawrs;
|
||||
bool ext_zfh;
|
||||
bool ext_zfhmin;
|
||||
bool ext_zfinx;
|
||||
bool ext_zhinx;
|
||||
bool ext_zhinxmin;
|
||||
bool ext_zve32f;
|
||||
bool ext_zve64f;
|
||||
bool ext_zve64d;
|
||||
bool ext_zmmul;
|
||||
bool ext_zvfh;
|
||||
bool ext_zvfhmin;
|
||||
bool ext_smaia;
|
||||
bool ext_ssaia;
|
||||
bool ext_sscofpmf;
|
||||
bool rvv_ta_all_1s;
|
||||
bool rvv_ma_all_1s;
|
||||
|
||||
uint32_t mvendorid;
|
||||
uint64_t marchid;
|
||||
uint64_t mimpid;
|
||||
|
||||
/* Vendor-specific custom extensions */
|
||||
bool ext_xtheadba;
|
||||
bool ext_xtheadbb;
|
||||
bool ext_xtheadbs;
|
||||
bool ext_xtheadcmo;
|
||||
bool ext_xtheadcondmov;
|
||||
bool ext_xtheadfmemidx;
|
||||
bool ext_xtheadfmv;
|
||||
bool ext_xtheadmac;
|
||||
bool ext_xtheadmemidx;
|
||||
bool ext_xtheadmempair;
|
||||
bool ext_xtheadsync;
|
||||
bool ext_XVentanaCondOps;
|
||||
|
||||
uint8_t pmu_num;
|
||||
char *priv_spec;
|
||||
char *user_spec;
|
||||
char *bext_spec;
|
||||
char *vext_spec;
|
||||
uint16_t vlen;
|
||||
uint16_t elen;
|
||||
uint16_t cbom_blocksize;
|
||||
uint16_t cboz_blocksize;
|
||||
bool mmu;
|
||||
bool pmp;
|
||||
bool epmp;
|
||||
bool debug;
|
||||
bool misa_w;
|
||||
|
||||
bool short_isa_string;
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
RISCVSATPMap satp_mode;
|
||||
#endif
|
||||
};
|
||||
|
||||
typedef struct RISCVCPUConfig RISCVCPUConfig;
|
||||
|
||||
/*
|
||||
* RISCVCPU:
|
||||
* @env: #CPURISCVState
|
||||
@ -546,6 +436,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||
bool probe, uintptr_t retaddr);
|
||||
char *riscv_isa_string(RISCVCPU *cpu);
|
||||
void riscv_cpu_list(void);
|
||||
void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp);
|
||||
|
||||
#define cpu_list riscv_cpu_list
|
||||
#define cpu_mmu_index riscv_cpu_mmu_index
|
||||
|
136
target/riscv/cpu_cfg.h
Normal file
136
target/riscv/cpu_cfg.h
Normal file
@ -0,0 +1,136 @@
|
||||
/*
|
||||
* QEMU RISC-V CPU CFG
|
||||
*
|
||||
* Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
|
||||
* Copyright (c) 2017-2018 SiFive, Inc.
|
||||
* Copyright (c) 2021-2023 PLCT Lab
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2 or later, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef RISCV_CPU_CFG_H
|
||||
#define RISCV_CPU_CFG_H
|
||||
|
||||
/*
|
||||
* map is a 16-bit bitmap: the most significant set bit in map is the maximum
|
||||
* satp mode that is supported. It may be chosen by the user and must respect
|
||||
* what qemu implements (valid_1_10_32/64) and what the hw is capable of
|
||||
* (supported bitmap below).
|
||||
*
|
||||
* init is a 16-bit bitmap used to make sure the user selected a correct
|
||||
* configuration as per the specification.
|
||||
*
|
||||
* supported is a 16-bit bitmap used to reflect the hw capabilities.
|
||||
*/
|
||||
typedef struct {
|
||||
uint16_t map, init, supported;
|
||||
} RISCVSATPMap;
|
||||
|
||||
struct RISCVCPUConfig {
|
||||
bool ext_zba;
|
||||
bool ext_zbb;
|
||||
bool ext_zbc;
|
||||
bool ext_zbkb;
|
||||
bool ext_zbkc;
|
||||
bool ext_zbkx;
|
||||
bool ext_zbs;
|
||||
bool ext_zca;
|
||||
bool ext_zcb;
|
||||
bool ext_zcd;
|
||||
bool ext_zce;
|
||||
bool ext_zcf;
|
||||
bool ext_zcmp;
|
||||
bool ext_zcmt;
|
||||
bool ext_zk;
|
||||
bool ext_zkn;
|
||||
bool ext_zknd;
|
||||
bool ext_zkne;
|
||||
bool ext_zknh;
|
||||
bool ext_zkr;
|
||||
bool ext_zks;
|
||||
bool ext_zksed;
|
||||
bool ext_zksh;
|
||||
bool ext_zkt;
|
||||
bool ext_ifencei;
|
||||
bool ext_icsr;
|
||||
bool ext_icbom;
|
||||
bool ext_icboz;
|
||||
bool ext_zicond;
|
||||
bool ext_zihintpause;
|
||||
bool ext_smstateen;
|
||||
bool ext_sstc;
|
||||
bool ext_svadu;
|
||||
bool ext_svinval;
|
||||
bool ext_svnapot;
|
||||
bool ext_svpbmt;
|
||||
bool ext_zdinx;
|
||||
bool ext_zawrs;
|
||||
bool ext_zfh;
|
||||
bool ext_zfhmin;
|
||||
bool ext_zfinx;
|
||||
bool ext_zhinx;
|
||||
bool ext_zhinxmin;
|
||||
bool ext_zve32f;
|
||||
bool ext_zve64f;
|
||||
bool ext_zve64d;
|
||||
bool ext_zmmul;
|
||||
bool ext_zvfh;
|
||||
bool ext_zvfhmin;
|
||||
bool ext_smaia;
|
||||
bool ext_ssaia;
|
||||
bool ext_sscofpmf;
|
||||
bool rvv_ta_all_1s;
|
||||
bool rvv_ma_all_1s;
|
||||
|
||||
uint32_t mvendorid;
|
||||
uint64_t marchid;
|
||||
uint64_t mimpid;
|
||||
|
||||
/* Vendor-specific custom extensions */
|
||||
bool ext_xtheadba;
|
||||
bool ext_xtheadbb;
|
||||
bool ext_xtheadbs;
|
||||
bool ext_xtheadcmo;
|
||||
bool ext_xtheadcondmov;
|
||||
bool ext_xtheadfmemidx;
|
||||
bool ext_xtheadfmv;
|
||||
bool ext_xtheadmac;
|
||||
bool ext_xtheadmemidx;
|
||||
bool ext_xtheadmempair;
|
||||
bool ext_xtheadsync;
|
||||
bool ext_XVentanaCondOps;
|
||||
|
||||
uint8_t pmu_num;
|
||||
char *priv_spec;
|
||||
char *user_spec;
|
||||
char *bext_spec;
|
||||
char *vext_spec;
|
||||
uint16_t vlen;
|
||||
uint16_t elen;
|
||||
uint16_t cbom_blocksize;
|
||||
uint16_t cboz_blocksize;
|
||||
bool mmu;
|
||||
bool pmp;
|
||||
bool epmp;
|
||||
bool debug;
|
||||
bool misa_w;
|
||||
|
||||
bool short_isa_string;
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
RISCVSATPMap satp_mode;
|
||||
#endif
|
||||
};
|
||||
|
||||
typedef struct RISCVCPUConfig RISCVCPUConfig;
|
||||
#endif
|
@ -120,6 +120,12 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
|
||||
vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS));
|
||||
}
|
||||
|
||||
/* With Zfinx, floating point is enabled/disabled by Smstateen. */
|
||||
if (!riscv_has_ext(env, RVF)) {
|
||||
fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE)
|
||||
? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED;
|
||||
}
|
||||
|
||||
if (cpu->cfg.debug && !icount_enabled()) {
|
||||
flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled);
|
||||
}
|
||||
@ -128,7 +134,7 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
|
||||
flags = FIELD_DP32(flags, TB_FLAGS, FS, fs);
|
||||
flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
|
||||
flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
|
||||
if (env->cur_pmmask < (env->xl == MXL_RV32 ? UINT32_MAX : UINT64_MAX)) {
|
||||
if (env->cur_pmmask != 0) {
|
||||
flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1);
|
||||
}
|
||||
if (env->cur_pmbase != 0) {
|
||||
@ -140,7 +146,7 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
|
||||
|
||||
void riscv_cpu_update_mask(CPURISCVState *env)
|
||||
{
|
||||
target_ulong mask = -1, base = 0;
|
||||
target_ulong mask = 0, base = 0;
|
||||
/*
|
||||
* TODO: Current RVJ spec does not specify
|
||||
* how the extension interacts with XLEN.
|
||||
@ -688,39 +694,30 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
|
||||
*
|
||||
* @env: CPURISCVState
|
||||
* @prot: The returned protection attributes
|
||||
* @tlb_size: TLB page size containing addr. It could be modified after PMP
|
||||
* permission checking. NULL if not set TLB page for addr.
|
||||
* @addr: The physical address to be checked permission
|
||||
* @access_type: The type of MMU access
|
||||
* @mode: Indicates current privilege level.
|
||||
*/
|
||||
static int get_physical_address_pmp(CPURISCVState *env, int *prot,
|
||||
target_ulong *tlb_size, hwaddr addr,
|
||||
static int get_physical_address_pmp(CPURISCVState *env, int *prot, hwaddr addr,
|
||||
int size, MMUAccessType access_type,
|
||||
int mode)
|
||||
{
|
||||
pmp_priv_t pmp_priv;
|
||||
int pmp_index = -1;
|
||||
bool pmp_has_privs;
|
||||
|
||||
if (!riscv_cpu_cfg(env)->pmp) {
|
||||
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
||||
return TRANSLATE_SUCCESS;
|
||||
}
|
||||
|
||||
pmp_index = pmp_hart_has_privs(env, addr, size, 1 << access_type,
|
||||
&pmp_priv, mode);
|
||||
if (pmp_index < 0) {
|
||||
pmp_has_privs = pmp_hart_has_privs(env, addr, size, 1 << access_type,
|
||||
&pmp_priv, mode);
|
||||
if (!pmp_has_privs) {
|
||||
*prot = 0;
|
||||
return TRANSLATE_PMP_FAIL;
|
||||
}
|
||||
|
||||
*prot = pmp_priv_to_page_prot(pmp_priv);
|
||||
if ((tlb_size != NULL) && pmp_index != MAX_RISCV_PMPS) {
|
||||
target_ulong tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1);
|
||||
target_ulong tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1;
|
||||
|
||||
*tlb_size = pmp_get_tlb_size(env, pmp_index, tlb_sa, tlb_ea);
|
||||
}
|
||||
|
||||
return TRANSLATE_SUCCESS;
|
||||
}
|
||||
@ -909,7 +906,7 @@ restart:
|
||||
}
|
||||
|
||||
int pmp_prot;
|
||||
int pmp_ret = get_physical_address_pmp(env, &pmp_prot, NULL, pte_addr,
|
||||
int pmp_ret = get_physical_address_pmp(env, &pmp_prot, pte_addr,
|
||||
sizeof(target_ulong),
|
||||
MMU_DATA_LOAD, PRV_S);
|
||||
if (pmp_ret != TRANSLATE_SUCCESS) {
|
||||
@ -1305,8 +1302,9 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||
prot &= prot2;
|
||||
|
||||
if (ret == TRANSLATE_SUCCESS) {
|
||||
ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
|
||||
ret = get_physical_address_pmp(env, &prot_pmp, pa,
|
||||
size, access_type, mode);
|
||||
tlb_size = pmp_get_tlb_size(env, pa);
|
||||
|
||||
qemu_log_mask(CPU_LOG_MMU,
|
||||
"%s PMP address=" HWADDR_FMT_plx " ret %d prot"
|
||||
@ -1338,8 +1336,9 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||
__func__, address, ret, pa, prot);
|
||||
|
||||
if (ret == TRANSLATE_SUCCESS) {
|
||||
ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
|
||||
ret = get_physical_address_pmp(env, &prot_pmp, pa,
|
||||
size, access_type, mode);
|
||||
tlb_size = pmp_get_tlb_size(env, pa);
|
||||
|
||||
qemu_log_mask(CPU_LOG_MMU,
|
||||
"%s PMP address=" HWADDR_FMT_plx " ret %d prot"
|
||||
|
@ -82,6 +82,10 @@ static RISCVException fs(CPURISCVState *env, int csrno)
|
||||
!riscv_cpu_cfg(env)->ext_zfinx) {
|
||||
return RISCV_EXCP_ILLEGAL_INST;
|
||||
}
|
||||
|
||||
if (!env->debugger && !riscv_cpu_fp_enabled(env)) {
|
||||
return smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR);
|
||||
}
|
||||
#endif
|
||||
return RISCV_EXCP_NONE;
|
||||
}
|
||||
@ -1324,8 +1328,15 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
|
||||
mstatus = set_field(mstatus, MSTATUS64_SXL, xl);
|
||||
}
|
||||
env->mstatus = mstatus;
|
||||
env->xl = cpu_recompute_xl(env);
|
||||
|
||||
/*
|
||||
* Except in debug mode, UXL/SXL can only be modified by higher
|
||||
* privilege mode. So xl will not be changed in normal mode.
|
||||
*/
|
||||
if (env->debugger) {
|
||||
env->xl = cpu_recompute_xl(env);
|
||||
riscv_cpu_update_mask(env);
|
||||
}
|
||||
return RISCV_EXCP_NONE;
|
||||
}
|
||||
|
||||
@ -1387,39 +1398,18 @@ static RISCVException read_misa(CPURISCVState *env, int csrno,
|
||||
static RISCVException write_misa(CPURISCVState *env, int csrno,
|
||||
target_ulong val)
|
||||
{
|
||||
RISCVCPU *cpu = env_archcpu(env);
|
||||
uint32_t orig_misa_ext = env->misa_ext;
|
||||
Error *local_err = NULL;
|
||||
|
||||
if (!riscv_cpu_cfg(env)->misa_w) {
|
||||
/* drop write to misa */
|
||||
return RISCV_EXCP_NONE;
|
||||
}
|
||||
|
||||
/* 'I' or 'E' must be present */
|
||||
if (!(val & (RVI | RVE))) {
|
||||
/* It is not, drop write to misa */
|
||||
return RISCV_EXCP_NONE;
|
||||
}
|
||||
|
||||
/* 'E' excludes all other extensions */
|
||||
if (val & RVE) {
|
||||
/*
|
||||
* when we support 'E' we can do "val = RVE;" however
|
||||
* for now we just drop writes if 'E' is present.
|
||||
*/
|
||||
return RISCV_EXCP_NONE;
|
||||
}
|
||||
|
||||
/*
|
||||
* misa.MXL writes are not supported by QEMU.
|
||||
* Drop writes to those bits.
|
||||
*/
|
||||
|
||||
/* Mask extensions that are not supported by this hart */
|
||||
val &= env->misa_ext_mask;
|
||||
|
||||
/* 'D' depends on 'F', so clear 'D' if 'F' is not present */
|
||||
if ((val & RVD) && !(val & RVF)) {
|
||||
val &= ~RVD;
|
||||
}
|
||||
|
||||
/*
|
||||
* Suppress 'C' if next instruction is not aligned
|
||||
* TODO: this should check next_pc
|
||||
@ -1428,18 +1418,36 @@ static RISCVException write_misa(CPURISCVState *env, int csrno,
|
||||
val &= ~RVC;
|
||||
}
|
||||
|
||||
/* Disable RVG if any of its dependencies are disabled */
|
||||
if (!(val & RVI && val & RVM && val & RVA &&
|
||||
val & RVF && val & RVD)) {
|
||||
val &= ~RVG;
|
||||
}
|
||||
|
||||
/* If nothing changed, do nothing. */
|
||||
if (val == env->misa_ext) {
|
||||
return RISCV_EXCP_NONE;
|
||||
}
|
||||
|
||||
if (!(val & RVF)) {
|
||||
env->misa_ext = val;
|
||||
riscv_cpu_validate_set_extensions(cpu, &local_err);
|
||||
if (local_err != NULL) {
|
||||
/* Rollback on validation error */
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "Unable to write MISA ext value "
|
||||
"0x%x, keeping existing MISA ext 0x%x\n",
|
||||
env->misa_ext, orig_misa_ext);
|
||||
|
||||
env->misa_ext = orig_misa_ext;
|
||||
|
||||
return RISCV_EXCP_NONE;
|
||||
}
|
||||
|
||||
if (!(env->misa_ext & RVF)) {
|
||||
env->mstatus &= ~MSTATUS_FS;
|
||||
}
|
||||
|
||||
/* flush translation cache */
|
||||
tb_flush(env_cpu(env));
|
||||
env->misa_ext = val;
|
||||
env->xl = riscv_cpu_mxl(env);
|
||||
return RISCV_EXCP_NONE;
|
||||
}
|
||||
@ -2100,6 +2108,9 @@ static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
|
||||
target_ulong new_val)
|
||||
{
|
||||
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
|
||||
if (!riscv_has_ext(env, RVF)) {
|
||||
wr_mask |= SMSTATEEN0_FCSR;
|
||||
}
|
||||
|
||||
return write_mstateen(env, csrno, wr_mask, new_val);
|
||||
}
|
||||
@ -2173,6 +2184,10 @@ static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
|
||||
{
|
||||
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
|
||||
|
||||
if (!riscv_has_ext(env, RVF)) {
|
||||
wr_mask |= SMSTATEEN0_FCSR;
|
||||
}
|
||||
|
||||
return write_hstateen(env, csrno, wr_mask, new_val);
|
||||
}
|
||||
|
||||
@ -2259,6 +2274,10 @@ static RISCVException write_sstateen0(CPURISCVState *env, int csrno,
|
||||
{
|
||||
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
|
||||
|
||||
if (!riscv_has_ext(env, RVF)) {
|
||||
wr_mask |= SMSTATEEN0_FCSR;
|
||||
}
|
||||
|
||||
return write_sstateen(env, csrno, wr_mask, new_val);
|
||||
}
|
||||
|
||||
|
@ -108,7 +108,7 @@ static bool trans_wfi(DisasContext *ctx, arg_wfi *a)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
decode_save_opc(ctx);
|
||||
gen_set_pc_imm(ctx, ctx->pc_succ_insn);
|
||||
gen_update_pc(ctx, ctx->cur_insn_len);
|
||||
gen_helper_wfi(cpu_env);
|
||||
return true;
|
||||
#else
|
||||
|
@ -31,9 +31,11 @@
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define REQUIRE_ZCD(ctx) do { \
|
||||
if (!ctx->cfg_ptr->ext_zcd) { \
|
||||
return false; \
|
||||
#define REQUIRE_ZCD_OR_DC(ctx) do { \
|
||||
if (!ctx->cfg_ptr->ext_zcd) { \
|
||||
if (!has_ext(ctx, RVD) || !has_ext(ctx, RVC)) { \
|
||||
return false; \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
@ -67,13 +69,13 @@ static bool trans_fsd(DisasContext *ctx, arg_fsd *a)
|
||||
|
||||
static bool trans_c_fld(DisasContext *ctx, arg_fld *a)
|
||||
{
|
||||
REQUIRE_ZCD(ctx);
|
||||
REQUIRE_ZCD_OR_DC(ctx);
|
||||
return trans_fld(ctx, a);
|
||||
}
|
||||
|
||||
static bool trans_c_fsd(DisasContext *ctx, arg_fsd *a)
|
||||
{
|
||||
REQUIRE_ZCD(ctx);
|
||||
REQUIRE_ZCD_OR_DC(ctx);
|
||||
return trans_fsd(ctx, a);
|
||||
}
|
||||
|
||||
|
@ -19,9 +19,10 @@
|
||||
*/
|
||||
|
||||
#define REQUIRE_FPU do {\
|
||||
if (ctx->mstatus_fs == EXT_STATUS_DISABLED) \
|
||||
if (!ctx->cfg_ptr->ext_zfinx) \
|
||||
return false; \
|
||||
if (ctx->mstatus_fs == EXT_STATUS_DISABLED) { \
|
||||
ctx->virt_inst_excp = ctx->virt_enabled && ctx->cfg_ptr->ext_zfinx; \
|
||||
return false; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define REQUIRE_ZFINX_OR_F(ctx) do {\
|
||||
@ -30,10 +31,12 @@
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define REQUIRE_ZCF(ctx) do { \
|
||||
if (!ctx->cfg_ptr->ext_zcf) { \
|
||||
return false; \
|
||||
} \
|
||||
#define REQUIRE_ZCF_OR_FC(ctx) do { \
|
||||
if (!ctx->cfg_ptr->ext_zcf) { \
|
||||
if (!has_ext(ctx, RVF) || !has_ext(ctx, RVC)) { \
|
||||
return false; \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
static bool trans_flw(DisasContext *ctx, arg_flw *a)
|
||||
@ -69,13 +72,13 @@ static bool trans_fsw(DisasContext *ctx, arg_fsw *a)
|
||||
|
||||
static bool trans_c_flw(DisasContext *ctx, arg_flw *a)
|
||||
{
|
||||
REQUIRE_ZCF(ctx);
|
||||
REQUIRE_ZCF_OR_FC(ctx);
|
||||
return trans_flw(ctx, a);
|
||||
}
|
||||
|
||||
static bool trans_c_fsw(DisasContext *ctx, arg_fsw *a)
|
||||
{
|
||||
REQUIRE_ZCF(ctx);
|
||||
REQUIRE_ZCF_OR_FC(ctx);
|
||||
return trans_fsw(ctx, a);
|
||||
}
|
||||
|
||||
|
@ -38,7 +38,9 @@ static bool trans_lui(DisasContext *ctx, arg_lui *a)
|
||||
|
||||
static bool trans_auipc(DisasContext *ctx, arg_auipc *a)
|
||||
{
|
||||
gen_set_gpri(ctx, a->rd, a->imm + ctx->base.pc_next);
|
||||
TCGv target_pc = dest_gpr(ctx, a->rd);
|
||||
gen_pc_plus_diff(target_pc, ctx, a->imm);
|
||||
gen_set_gpr(ctx, a->rd, target_pc);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -51,25 +53,33 @@ static bool trans_jal(DisasContext *ctx, arg_jal *a)
|
||||
static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
|
||||
{
|
||||
TCGLabel *misaligned = NULL;
|
||||
TCGv target_pc = tcg_temp_new();
|
||||
TCGv succ_pc = dest_gpr(ctx, a->rd);
|
||||
|
||||
tcg_gen_addi_tl(cpu_pc, get_gpr(ctx, a->rs1, EXT_NONE), a->imm);
|
||||
tcg_gen_andi_tl(cpu_pc, cpu_pc, (target_ulong)-2);
|
||||
tcg_gen_addi_tl(target_pc, get_gpr(ctx, a->rs1, EXT_NONE), a->imm);
|
||||
tcg_gen_andi_tl(target_pc, target_pc, (target_ulong)-2);
|
||||
|
||||
gen_set_pc(ctx, cpu_pc);
|
||||
if (!ctx->cfg_ptr->ext_zca) {
|
||||
if (get_xl(ctx) == MXL_RV32) {
|
||||
tcg_gen_ext32s_tl(target_pc, target_pc);
|
||||
}
|
||||
|
||||
if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca) {
|
||||
TCGv t0 = tcg_temp_new();
|
||||
|
||||
misaligned = gen_new_label();
|
||||
tcg_gen_andi_tl(t0, cpu_pc, 0x2);
|
||||
tcg_gen_andi_tl(t0, target_pc, 0x2);
|
||||
tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned);
|
||||
}
|
||||
|
||||
gen_set_gpri(ctx, a->rd, ctx->pc_succ_insn);
|
||||
gen_pc_plus_diff(succ_pc, ctx, ctx->cur_insn_len);
|
||||
gen_set_gpr(ctx, a->rd, succ_pc);
|
||||
|
||||
tcg_gen_mov_tl(cpu_pc, target_pc);
|
||||
lookup_and_goto_ptr(ctx);
|
||||
|
||||
if (misaligned) {
|
||||
gen_set_label(misaligned);
|
||||
gen_exception_inst_addr_mis(ctx);
|
||||
gen_exception_inst_addr_mis(ctx, target_pc);
|
||||
}
|
||||
ctx->base.is_jmp = DISAS_NORETURN;
|
||||
|
||||
@ -153,6 +163,7 @@ static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
|
||||
TCGLabel *l = gen_new_label();
|
||||
TCGv src1 = get_gpr(ctx, a->rs1, EXT_SIGN);
|
||||
TCGv src2 = get_gpr(ctx, a->rs2, EXT_SIGN);
|
||||
target_ulong orig_pc_save = ctx->pc_save;
|
||||
|
||||
if (get_xl(ctx) == MXL_RV128) {
|
||||
TCGv src1h = get_gprh(ctx, a->rs1);
|
||||
@ -165,16 +176,21 @@ static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
|
||||
} else {
|
||||
tcg_gen_brcond_tl(cond, src1, src2, l);
|
||||
}
|
||||
gen_goto_tb(ctx, 1, ctx->pc_succ_insn);
|
||||
gen_goto_tb(ctx, 1, ctx->cur_insn_len);
|
||||
ctx->pc_save = orig_pc_save;
|
||||
|
||||
gen_set_label(l); /* branch taken */
|
||||
|
||||
if (!ctx->cfg_ptr->ext_zca && ((ctx->base.pc_next + a->imm) & 0x3)) {
|
||||
if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca &&
|
||||
(a->imm & 0x3)) {
|
||||
/* misaligned */
|
||||
gen_exception_inst_addr_mis(ctx);
|
||||
TCGv target_pc = tcg_temp_new();
|
||||
gen_pc_plus_diff(target_pc, ctx, a->imm);
|
||||
gen_exception_inst_addr_mis(ctx, target_pc);
|
||||
} else {
|
||||
gen_goto_tb(ctx, 0, ctx->base.pc_next + a->imm);
|
||||
gen_goto_tb(ctx, 0, a->imm);
|
||||
}
|
||||
ctx->pc_save = -1;
|
||||
ctx->base.is_jmp = DISAS_NORETURN;
|
||||
|
||||
return true;
|
||||
@ -767,7 +783,7 @@ static bool trans_pause(DisasContext *ctx, arg_pause *a)
|
||||
* PAUSE is a no-op in QEMU,
|
||||
* end the TB and return to main loop
|
||||
*/
|
||||
gen_set_pc_imm(ctx, ctx->pc_succ_insn);
|
||||
gen_update_pc(ctx, ctx->cur_insn_len);
|
||||
exit_tb(ctx);
|
||||
ctx->base.is_jmp = DISAS_NORETURN;
|
||||
|
||||
@ -791,7 +807,7 @@ static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
|
||||
* FENCE_I is a no-op in QEMU,
|
||||
* however we need to end the translation block
|
||||
*/
|
||||
gen_set_pc_imm(ctx, ctx->pc_succ_insn);
|
||||
gen_update_pc(ctx, ctx->cur_insn_len);
|
||||
exit_tb(ctx);
|
||||
ctx->base.is_jmp = DISAS_NORETURN;
|
||||
return true;
|
||||
@ -802,7 +818,7 @@ static bool do_csr_post(DisasContext *ctx)
|
||||
/* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
|
||||
decode_save_opc(ctx);
|
||||
/* We may have changed important cpu state -- exit to main loop. */
|
||||
gen_set_pc_imm(ctx, ctx->pc_succ_insn);
|
||||
gen_update_pc(ctx, ctx->cur_insn_len);
|
||||
exit_tb(ctx);
|
||||
ctx->base.is_jmp = DISAS_NORETURN;
|
||||
return true;
|
||||
|
@ -169,7 +169,7 @@ static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2)
|
||||
gen_set_gpr(s, rd, dst);
|
||||
mark_vs_dirty(s);
|
||||
|
||||
gen_set_pc_imm(s, s->pc_succ_insn);
|
||||
gen_update_pc(s, s->cur_insn_len);
|
||||
lookup_and_goto_ptr(s);
|
||||
s->base.is_jmp = DISAS_NORETURN;
|
||||
return true;
|
||||
@ -188,7 +188,7 @@ static bool do_vsetivli(DisasContext *s, int rd, TCGv s1, TCGv s2)
|
||||
gen_helper_vsetvl(dst, cpu_env, s1, s2);
|
||||
gen_set_gpr(s, rd, dst);
|
||||
mark_vs_dirty(s);
|
||||
gen_set_pc_imm(s, s->pc_succ_insn);
|
||||
gen_update_pc(s, s->cur_insn_len);
|
||||
lookup_and_goto_ptr(s);
|
||||
s->base.is_jmp = DISAS_NORETURN;
|
||||
|
||||
|
@ -33,7 +33,7 @@ static bool trans_wrs(DisasContext *ctx)
|
||||
/* Clear the load reservation (if any). */
|
||||
tcg_gen_movi_tl(load_res, -1);
|
||||
|
||||
gen_set_pc_imm(ctx, ctx->pc_succ_insn);
|
||||
gen_update_pc(ctx, ctx->cur_insn_len);
|
||||
tcg_gen_exit_tb(NULL, 0);
|
||||
ctx->base.is_jmp = DISAS_NORETURN;
|
||||
|
||||
|
@ -202,8 +202,8 @@ static bool gen_pop(DisasContext *ctx, arg_cmpp *a, bool ret, bool ret_val)
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
TCGv ret_addr = get_gpr(ctx, xRA, EXT_NONE);
|
||||
gen_set_pc(ctx, ret_addr);
|
||||
TCGv ret_addr = get_gpr(ctx, xRA, EXT_SIGN);
|
||||
tcg_gen_mov_tl(cpu_pc, ret_addr);
|
||||
tcg_gen_lookup_and_goto_ptr();
|
||||
ctx->base.is_jmp = DISAS_NORETURN;
|
||||
}
|
||||
@ -297,12 +297,14 @@ static bool trans_cm_jalt(DisasContext *ctx, arg_cm_jalt *a)
|
||||
* Update pc to current for the non-unwinding exception
|
||||
* that might come from cpu_ld*_code() in the helper.
|
||||
*/
|
||||
tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
|
||||
gen_update_pc(ctx, 0);
|
||||
gen_helper_cm_jalt(cpu_pc, cpu_env, tcg_constant_i32(a->index));
|
||||
|
||||
/* c.jt vs c.jalt depends on the index. */
|
||||
if (a->index >= 32) {
|
||||
gen_set_gpri(ctx, xRA, ctx->pc_succ_insn);
|
||||
TCGv succ_pc = dest_gpr(ctx, xRA);
|
||||
gen_pc_plus_diff(succ_pc, ctx, ctx->cur_insn_len);
|
||||
gen_set_gpr(ctx, xRA, succ_pc);
|
||||
}
|
||||
|
||||
tcg_gen_lookup_and_goto_ptr();
|
||||
|
@ -999,7 +999,7 @@ static void gen_th_sync_local(DisasContext *ctx)
|
||||
* Emulate out-of-order barriers with pipeline flush
|
||||
* by exiting the translation block.
|
||||
*/
|
||||
gen_set_pc_imm(ctx, ctx->pc_succ_insn);
|
||||
gen_update_pc(ctx, ctx->cur_insn_len);
|
||||
tcg_gen_exit_tb(NULL, 0);
|
||||
ctx->base.is_jmp = DISAS_NORETURN;
|
||||
}
|
||||
|
@ -26,10 +26,9 @@
|
||||
#include "trace.h"
|
||||
#include "exec/exec-all.h"
|
||||
|
||||
static void pmp_write_cfg(CPURISCVState *env, uint32_t addr_index,
|
||||
static bool pmp_write_cfg(CPURISCVState *env, uint32_t addr_index,
|
||||
uint8_t val);
|
||||
static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index);
|
||||
static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index);
|
||||
|
||||
/*
|
||||
* Accessor method to extract address matching type 'a field' from cfg reg
|
||||
@ -83,7 +82,7 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
|
||||
* Accessor to set the cfg reg for a specific PMP/HART
|
||||
* Bounds checks and relevant lock bit.
|
||||
*/
|
||||
static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
|
||||
static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
|
||||
{
|
||||
if (pmp_index < MAX_RISCV_PMPS) {
|
||||
bool locked = true;
|
||||
@ -119,14 +118,17 @@ static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
|
||||
|
||||
if (locked) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n");
|
||||
} else {
|
||||
} else if (env->pmp_state.pmp[pmp_index].cfg_reg != val) {
|
||||
env->pmp_state.pmp[pmp_index].cfg_reg = val;
|
||||
pmp_update_rule(env, pmp_index);
|
||||
pmp_update_rule_addr(env, pmp_index);
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"ignoring pmpcfg write - out of bounds\n");
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void pmp_decode_napot(target_ulong a, target_ulong *sa,
|
||||
@ -206,18 +208,6 @@ void pmp_update_rule_nums(CPURISCVState *env)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert cfg/addr reg values here into simple 'sa' --> start address and 'ea'
|
||||
* end address values.
|
||||
* This function is called relatively infrequently whereas the check that
|
||||
* an address is within a pmp rule is called often, so optimise that one
|
||||
*/
|
||||
static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index)
|
||||
{
|
||||
pmp_update_rule_addr(env, pmp_index);
|
||||
pmp_update_rule_nums(env);
|
||||
}
|
||||
|
||||
static int pmp_is_in_range(CPURISCVState *env, int pmp_index,
|
||||
target_ulong addr)
|
||||
{
|
||||
@ -236,37 +226,34 @@ static int pmp_is_in_range(CPURISCVState *env, int pmp_index,
|
||||
/*
|
||||
* Check if the address has required RWX privs when no PMP entry is matched.
|
||||
*/
|
||||
static bool pmp_hart_has_privs_default(CPURISCVState *env, target_ulong addr,
|
||||
target_ulong size, pmp_priv_t privs,
|
||||
static bool pmp_hart_has_privs_default(CPURISCVState *env, pmp_priv_t privs,
|
||||
pmp_priv_t *allowed_privs,
|
||||
target_ulong mode)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
if (riscv_cpu_cfg(env)->epmp) {
|
||||
if (MSECCFG_MMWP_ISSET(env)) {
|
||||
/*
|
||||
* The Machine Mode Whitelist Policy (mseccfg.MMWP) is set
|
||||
* so we default to deny all, even for M-mode.
|
||||
*/
|
||||
if (MSECCFG_MMWP_ISSET(env)) {
|
||||
/*
|
||||
* The Machine Mode Whitelist Policy (mseccfg.MMWP) is set
|
||||
* so we default to deny all, even for M-mode.
|
||||
*/
|
||||
*allowed_privs = 0;
|
||||
return false;
|
||||
} else if (MSECCFG_MML_ISSET(env)) {
|
||||
/*
|
||||
* The Machine Mode Lockdown (mseccfg.MML) bit is set
|
||||
* so we can only execute code in M-mode with an applicable
|
||||
* rule. Other modes are disabled.
|
||||
*/
|
||||
if (mode == PRV_M && !(privs & PMP_EXEC)) {
|
||||
ret = true;
|
||||
*allowed_privs = PMP_READ | PMP_WRITE;
|
||||
} else {
|
||||
ret = false;
|
||||
*allowed_privs = 0;
|
||||
return false;
|
||||
} else if (MSECCFG_MML_ISSET(env)) {
|
||||
/*
|
||||
* The Machine Mode Lockdown (mseccfg.MML) bit is set
|
||||
* so we can only execute code in M-mode with an applicable
|
||||
* rule. Other modes are disabled.
|
||||
*/
|
||||
if (mode == PRV_M && !(privs & PMP_EXEC)) {
|
||||
ret = true;
|
||||
*allowed_privs = PMP_READ | PMP_WRITE;
|
||||
} else {
|
||||
ret = false;
|
||||
*allowed_privs = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!riscv_cpu_cfg(env)->pmp || (mode == PRV_M)) {
|
||||
@ -296,26 +283,21 @@ static bool pmp_hart_has_privs_default(CPURISCVState *env, target_ulong addr,
|
||||
|
||||
/*
|
||||
* Check if the address has required RWX privs to complete desired operation
|
||||
* Return PMP rule index if a pmp rule match
|
||||
* Return MAX_RISCV_PMPS if default match
|
||||
* Return negtive value if no match
|
||||
* Return true if a pmp rule match or default match
|
||||
* Return false if no match
|
||||
*/
|
||||
int pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
|
||||
target_ulong size, pmp_priv_t privs,
|
||||
pmp_priv_t *allowed_privs, target_ulong mode)
|
||||
bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
|
||||
target_ulong size, pmp_priv_t privs,
|
||||
pmp_priv_t *allowed_privs, target_ulong mode)
|
||||
{
|
||||
int i = 0;
|
||||
int ret = -1;
|
||||
int pmp_size = 0;
|
||||
target_ulong s = 0;
|
||||
target_ulong e = 0;
|
||||
|
||||
/* Short cut if no rules */
|
||||
if (0 == pmp_get_num_rules(env)) {
|
||||
if (pmp_hart_has_privs_default(env, addr, size, privs,
|
||||
allowed_privs, mode)) {
|
||||
ret = MAX_RISCV_PMPS;
|
||||
}
|
||||
return pmp_hart_has_privs_default(env, privs, allowed_privs, mode);
|
||||
}
|
||||
|
||||
if (size == 0) {
|
||||
@ -344,8 +326,8 @@ int pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
|
||||
if ((s + e) == 1) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"pmp violation - access is partially inside\n");
|
||||
ret = -1;
|
||||
break;
|
||||
*allowed_privs = 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* fully inside */
|
||||
@ -452,20 +434,12 @@ int pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
|
||||
* defined with PMP must be used. We shouldn't fallback on
|
||||
* finding default privileges.
|
||||
*/
|
||||
ret = i;
|
||||
break;
|
||||
return (privs & *allowed_privs) == privs;
|
||||
}
|
||||
}
|
||||
|
||||
/* No rule matched */
|
||||
if (ret == -1) {
|
||||
if (pmp_hart_has_privs_default(env, addr, size, privs,
|
||||
allowed_privs, mode)) {
|
||||
ret = MAX_RISCV_PMPS;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
return pmp_hart_has_privs_default(env, privs, allowed_privs, mode);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -477,16 +451,20 @@ void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
|
||||
int i;
|
||||
uint8_t cfg_val;
|
||||
int pmpcfg_nums = 2 << riscv_cpu_mxl(env);
|
||||
bool modified = false;
|
||||
|
||||
trace_pmpcfg_csr_write(env->mhartid, reg_index, val);
|
||||
|
||||
for (i = 0; i < pmpcfg_nums; i++) {
|
||||
cfg_val = (val >> 8 * i) & 0xff;
|
||||
pmp_write_cfg(env, (reg_index * 4) + i, cfg_val);
|
||||
modified |= pmp_write_cfg(env, (reg_index * 4) + i, cfg_val);
|
||||
}
|
||||
|
||||
/* If PMP permission of any addr has been changed, flush TLB pages. */
|
||||
tlb_flush(env_cpu(env));
|
||||
if (modified) {
|
||||
pmp_update_rule_nums(env);
|
||||
tlb_flush(env_cpu(env));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -517,6 +495,7 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
|
||||
target_ulong val)
|
||||
{
|
||||
trace_pmpaddr_csr_write(env->mhartid, addr_index, val);
|
||||
bool is_next_cfg_tor = false;
|
||||
|
||||
if (addr_index < MAX_RISCV_PMPS) {
|
||||
/*
|
||||
@ -525,9 +504,9 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
|
||||
*/
|
||||
if (addr_index + 1 < MAX_RISCV_PMPS) {
|
||||
uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
|
||||
is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg);
|
||||
|
||||
if (pmp_cfg & PMP_LOCK &&
|
||||
PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg)) {
|
||||
if (pmp_cfg & PMP_LOCK && is_next_cfg_tor) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"ignoring pmpaddr write - pmpcfg + 1 locked\n");
|
||||
return;
|
||||
@ -535,8 +514,14 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
|
||||
}
|
||||
|
||||
if (!pmp_is_locked(env, addr_index)) {
|
||||
env->pmp_state.pmp[addr_index].addr_reg = val;
|
||||
pmp_update_rule(env, addr_index);
|
||||
if (env->pmp_state.pmp[addr_index].addr_reg != val) {
|
||||
env->pmp_state.pmp[addr_index].addr_reg = val;
|
||||
pmp_update_rule_addr(env, addr_index);
|
||||
if (is_next_cfg_tor) {
|
||||
pmp_update_rule_addr(env, addr_index + 1);
|
||||
}
|
||||
tlb_flush(env_cpu(env));
|
||||
}
|
||||
} else {
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"ignoring pmpaddr write - locked\n");
|
||||
@ -585,8 +570,15 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
|
||||
}
|
||||
}
|
||||
|
||||
/* Sticky bits */
|
||||
val |= (env->mseccfg & (MSECCFG_MMWP | MSECCFG_MML));
|
||||
if (riscv_cpu_cfg(env)->epmp) {
|
||||
/* Sticky bits */
|
||||
val |= (env->mseccfg & (MSECCFG_MMWP | MSECCFG_MML));
|
||||
if ((val ^ env->mseccfg) & (MSECCFG_MMWP | MSECCFG_MML)) {
|
||||
tlb_flush(env_cpu(env));
|
||||
}
|
||||
} else {
|
||||
val &= ~(MSECCFG_MMWP | MSECCFG_MML | MSECCFG_RLB);
|
||||
}
|
||||
|
||||
env->mseccfg = val;
|
||||
}
|
||||
@ -601,28 +593,67 @@ target_ulong mseccfg_csr_read(CPURISCVState *env)
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the TLB size if the start address or the end address of
|
||||
* PMP entry is presented in the TLB page.
|
||||
* Calculate the TLB size.
|
||||
* It's possible that PMP regions only cover partial of the TLB page, and
|
||||
* this may split the page into regions with different permissions.
|
||||
* For example if PMP0 is (0x80000008~0x8000000F, R) and PMP1 is (0x80000000
|
||||
* ~0x80000FFF, RWX), then region 0x80000008~0x8000000F has R permission, and
|
||||
* the other regions in this page have RWX permissions.
|
||||
* A write access to 0x80000000 will match PMP1. However we cannot cache the
|
||||
* translation result in the TLB since this will make the write access to
|
||||
* 0x80000008 bypass the check of PMP0.
|
||||
* To avoid this we return a size of 1 (which means no caching) if the PMP
|
||||
* region only covers partial of the TLB page.
|
||||
*/
|
||||
target_ulong pmp_get_tlb_size(CPURISCVState *env, int pmp_index,
|
||||
target_ulong tlb_sa, target_ulong tlb_ea)
|
||||
target_ulong pmp_get_tlb_size(CPURISCVState *env, target_ulong addr)
|
||||
{
|
||||
target_ulong pmp_sa = env->pmp_state.addr[pmp_index].sa;
|
||||
target_ulong pmp_ea = env->pmp_state.addr[pmp_index].ea;
|
||||
target_ulong pmp_sa;
|
||||
target_ulong pmp_ea;
|
||||
target_ulong tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1);
|
||||
target_ulong tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1;
|
||||
int i;
|
||||
|
||||
if (pmp_sa <= tlb_sa && pmp_ea >= tlb_ea) {
|
||||
/*
|
||||
* If PMP is not supported or there are no PMP rules, the TLB page will not
|
||||
* be split into regions with different permissions by PMP so we set the
|
||||
* size to TARGET_PAGE_SIZE.
|
||||
*/
|
||||
if (!riscv_cpu_cfg(env)->pmp || !pmp_get_num_rules(env)) {
|
||||
return TARGET_PAGE_SIZE;
|
||||
} else {
|
||||
/*
|
||||
* At this point we have a tlb_size that is the smallest possible size
|
||||
* That fits within a TARGET_PAGE_SIZE and the PMP region.
|
||||
*
|
||||
* If the size is less then TARGET_PAGE_SIZE we drop the size to 1.
|
||||
* This means the result isn't cached in the TLB and is only used for
|
||||
* a single translation.
|
||||
*/
|
||||
return 1;
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_RISCV_PMPS; i++) {
|
||||
if (pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg) == PMP_AMATCH_OFF) {
|
||||
continue;
|
||||
}
|
||||
|
||||
pmp_sa = env->pmp_state.addr[i].sa;
|
||||
pmp_ea = env->pmp_state.addr[i].ea;
|
||||
|
||||
/*
|
||||
* Only the first PMP entry that covers (whole or partial of) the TLB
|
||||
* page really matters:
|
||||
* If it covers the whole TLB page, set the size to TARGET_PAGE_SIZE,
|
||||
* since the following PMP entries have lower priority and will not
|
||||
* affect the permissions of the page.
|
||||
* If it only covers partial of the TLB page, set the size to 1 since
|
||||
* the allowed permissions of the region may be different from other
|
||||
* region of the page.
|
||||
*/
|
||||
if (pmp_sa <= tlb_sa && pmp_ea >= tlb_ea) {
|
||||
return TARGET_PAGE_SIZE;
|
||||
} else if ((pmp_sa >= tlb_sa && pmp_sa <= tlb_ea) ||
|
||||
(pmp_ea >= tlb_sa && pmp_ea <= tlb_ea)) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If no PMP entry matches the TLB page, the TLB page will also not be
|
||||
* split into regions with different permissions by PMP so we set the size
|
||||
* to TARGET_PAGE_SIZE.
|
||||
*/
|
||||
return TARGET_PAGE_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -72,12 +72,11 @@ target_ulong mseccfg_csr_read(CPURISCVState *env);
|
||||
void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
|
||||
target_ulong val);
|
||||
target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index);
|
||||
int pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
|
||||
target_ulong size, pmp_priv_t privs,
|
||||
pmp_priv_t *allowed_privs,
|
||||
target_ulong mode);
|
||||
target_ulong pmp_get_tlb_size(CPURISCVState *env, int pmp_index,
|
||||
target_ulong tlb_sa, target_ulong tlb_ea);
|
||||
bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
|
||||
target_ulong size, pmp_priv_t privs,
|
||||
pmp_priv_t *allowed_privs,
|
||||
target_ulong mode);
|
||||
target_ulong pmp_get_tlb_size(CPURISCVState *env, target_ulong addr);
|
||||
void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index);
|
||||
void pmp_update_rule_nums(CPURISCVState *env);
|
||||
uint32_t pmp_get_num_rules(CPURISCVState *env);
|
||||
|
@ -59,8 +59,8 @@ typedef enum {
|
||||
|
||||
typedef struct DisasContext {
|
||||
DisasContextBase base;
|
||||
/* pc_succ_insn points to the instruction following base.pc_next */
|
||||
target_ulong pc_succ_insn;
|
||||
target_ulong cur_insn_len;
|
||||
target_ulong pc_save;
|
||||
target_ulong priv_ver;
|
||||
RISCVMXL misa_mxl_max;
|
||||
RISCVMXL xl;
|
||||
@ -224,26 +224,34 @@ static void decode_save_opc(DisasContext *ctx)
|
||||
ctx->insn_start = NULL;
|
||||
}
|
||||
|
||||
static void gen_set_pc_imm(DisasContext *ctx, target_ulong dest)
|
||||
static void gen_pc_plus_diff(TCGv target, DisasContext *ctx,
|
||||
target_long diff)
|
||||
{
|
||||
if (get_xl(ctx) == MXL_RV32) {
|
||||
dest = (int32_t)dest;
|
||||
target_ulong dest = ctx->base.pc_next + diff;
|
||||
|
||||
assert(ctx->pc_save != -1);
|
||||
if (tb_cflags(ctx->base.tb) & CF_PCREL) {
|
||||
tcg_gen_addi_tl(target, cpu_pc, dest - ctx->pc_save);
|
||||
if (get_xl(ctx) == MXL_RV32) {
|
||||
tcg_gen_ext32s_tl(target, target);
|
||||
}
|
||||
} else {
|
||||
if (get_xl(ctx) == MXL_RV32) {
|
||||
dest = (int32_t)dest;
|
||||
}
|
||||
tcg_gen_movi_tl(target, dest);
|
||||
}
|
||||
tcg_gen_movi_tl(cpu_pc, dest);
|
||||
}
|
||||
|
||||
static void gen_set_pc(DisasContext *ctx, TCGv dest)
|
||||
static void gen_update_pc(DisasContext *ctx, target_long diff)
|
||||
{
|
||||
if (get_xl(ctx) == MXL_RV32) {
|
||||
tcg_gen_ext32s_tl(cpu_pc, dest);
|
||||
} else {
|
||||
tcg_gen_mov_tl(cpu_pc, dest);
|
||||
}
|
||||
gen_pc_plus_diff(cpu_pc, ctx, diff);
|
||||
ctx->pc_save = ctx->base.pc_next + diff;
|
||||
}
|
||||
|
||||
static void generate_exception(DisasContext *ctx, int excp)
|
||||
{
|
||||
gen_set_pc_imm(ctx, ctx->base.pc_next);
|
||||
gen_update_pc(ctx, 0);
|
||||
gen_helper_raise_exception(cpu_env, tcg_constant_i32(excp));
|
||||
ctx->base.is_jmp = DISAS_NORETURN;
|
||||
}
|
||||
@ -259,9 +267,9 @@ static void gen_exception_illegal(DisasContext *ctx)
|
||||
}
|
||||
}
|
||||
|
||||
static void gen_exception_inst_addr_mis(DisasContext *ctx)
|
||||
static void gen_exception_inst_addr_mis(DisasContext *ctx, TCGv target)
|
||||
{
|
||||
tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr));
|
||||
tcg_gen_st_tl(target, cpu_env, offsetof(CPURISCVState, badaddr));
|
||||
generate_exception(ctx, RISCV_EXCP_INST_ADDR_MIS);
|
||||
}
|
||||
|
||||
@ -285,18 +293,33 @@ static void exit_tb(DisasContext *ctx)
|
||||
tcg_gen_exit_tb(NULL, 0);
|
||||
}
|
||||
|
||||
static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
|
||||
static void gen_goto_tb(DisasContext *ctx, int n, target_long diff)
|
||||
{
|
||||
target_ulong dest = ctx->base.pc_next + diff;
|
||||
|
||||
/*
|
||||
* Under itrigger, instruction executes one by one like singlestep,
|
||||
* direct block chain benefits will be small.
|
||||
*/
|
||||
if (translator_use_goto_tb(&ctx->base, dest) && !ctx->itrigger) {
|
||||
tcg_gen_goto_tb(n);
|
||||
gen_set_pc_imm(ctx, dest);
|
||||
/*
|
||||
* For pcrel, the pc must always be up-to-date on entry to
|
||||
* the linked TB, so that it can use simple additions for all
|
||||
* further adjustments. For !pcrel, the linked TB is compiled
|
||||
* to know its full virtual address, so we can delay the
|
||||
* update to pc to the unlinked path. A long chain of links
|
||||
* can thus avoid many updates to the PC.
|
||||
*/
|
||||
if (tb_cflags(ctx->base.tb) & CF_PCREL) {
|
||||
gen_update_pc(ctx, diff);
|
||||
tcg_gen_goto_tb(n);
|
||||
} else {
|
||||
tcg_gen_goto_tb(n);
|
||||
gen_update_pc(ctx, diff);
|
||||
}
|
||||
tcg_gen_exit_tb(ctx->base.tb, n);
|
||||
} else {
|
||||
gen_set_pc_imm(ctx, dest);
|
||||
gen_update_pc(ctx, diff);
|
||||
lookup_and_goto_ptr(ctx);
|
||||
}
|
||||
}
|
||||
@ -547,19 +570,22 @@ static void gen_set_fpr_d(DisasContext *ctx, int reg_num, TCGv_i64 t)
|
||||
|
||||
static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
|
||||
{
|
||||
target_ulong next_pc;
|
||||
TCGv succ_pc = dest_gpr(ctx, rd);
|
||||
|
||||
/* check misaligned: */
|
||||
next_pc = ctx->base.pc_next + imm;
|
||||
if (!ctx->cfg_ptr->ext_zca) {
|
||||
if ((next_pc & 0x3) != 0) {
|
||||
gen_exception_inst_addr_mis(ctx);
|
||||
if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca) {
|
||||
if ((imm & 0x3) != 0) {
|
||||
TCGv target_pc = tcg_temp_new();
|
||||
gen_pc_plus_diff(target_pc, ctx, imm);
|
||||
gen_exception_inst_addr_mis(ctx, target_pc);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
gen_set_gpri(ctx, rd, ctx->pc_succ_insn);
|
||||
gen_goto_tb(ctx, 0, ctx->base.pc_next + imm); /* must use this for safety */
|
||||
gen_pc_plus_diff(succ_pc, ctx, ctx->cur_insn_len);
|
||||
gen_set_gpr(ctx, rd, succ_pc);
|
||||
|
||||
gen_goto_tb(ctx, 0, imm); /* must use this for safety */
|
||||
ctx->base.is_jmp = DISAS_NORETURN;
|
||||
}
|
||||
|
||||
@ -1117,15 +1143,16 @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
|
||||
};
|
||||
|
||||
ctx->virt_inst_excp = false;
|
||||
ctx->cur_insn_len = insn_len(opcode);
|
||||
/* Check for compressed insn */
|
||||
if (insn_len(opcode) == 2) {
|
||||
if (ctx->cur_insn_len == 2) {
|
||||
ctx->opcode = opcode;
|
||||
ctx->pc_succ_insn = ctx->base.pc_next + 2;
|
||||
/*
|
||||
* The Zca extension is added as way to refer to instructions in the C
|
||||
* extension that do not include the floating-point loads and stores
|
||||
*/
|
||||
if (ctx->cfg_ptr->ext_zca && decode_insn16(ctx, opcode)) {
|
||||
if ((has_ext(ctx, RVC) || ctx->cfg_ptr->ext_zca) &&
|
||||
decode_insn16(ctx, opcode)) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
@ -1134,7 +1161,6 @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
|
||||
translator_lduw(env, &ctx->base,
|
||||
ctx->base.pc_next + 2));
|
||||
ctx->opcode = opcode32;
|
||||
ctx->pc_succ_insn = ctx->base.pc_next + 4;
|
||||
|
||||
for (size_t i = 0; i < ARRAY_SIZE(decoders); ++i) {
|
||||
if (decoders[i].guard_func(ctx) &&
|
||||
@ -1154,7 +1180,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
||||
RISCVCPU *cpu = RISCV_CPU(cs);
|
||||
uint32_t tb_flags = ctx->base.tb->flags;
|
||||
|
||||
ctx->pc_succ_insn = ctx->base.pc_first;
|
||||
ctx->pc_save = ctx->base.pc_first;
|
||||
ctx->priv = FIELD_EX32(tb_flags, TB_FLAGS, PRIV);
|
||||
ctx->mem_idx = FIELD_EX32(tb_flags, TB_FLAGS, MEM_IDX);
|
||||
ctx->mstatus_fs = FIELD_EX32(tb_flags, TB_FLAGS, FS);
|
||||
@ -1189,8 +1215,13 @@ static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu)
|
||||
static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
|
||||
{
|
||||
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
||||
target_ulong pc_next = ctx->base.pc_next;
|
||||
|
||||
tcg_gen_insn_start(ctx->base.pc_next, 0);
|
||||
if (tb_cflags(dcbase->tb) & CF_PCREL) {
|
||||
pc_next &= ~TARGET_PAGE_MASK;
|
||||
}
|
||||
|
||||
tcg_gen_insn_start(pc_next, 0);
|
||||
ctx->insn_start = tcg_last_op();
|
||||
}
|
||||
|
||||
@ -1202,7 +1233,7 @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
||||
|
||||
ctx->ol = ctx->xl;
|
||||
decode_opc(env, ctx, opcode16);
|
||||
ctx->base.pc_next = ctx->pc_succ_insn;
|
||||
ctx->base.pc_next += ctx->cur_insn_len;
|
||||
|
||||
/* Only the first insn within a TB is allowed to cross a page boundary. */
|
||||
if (ctx->base.is_jmp == DISAS_NEXT) {
|
||||
@ -1229,7 +1260,7 @@ static void riscv_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
|
||||
|
||||
switch (ctx->base.is_jmp) {
|
||||
case DISAS_TOO_MANY:
|
||||
gen_goto_tb(ctx, 0, ctx->base.pc_next);
|
||||
gen_goto_tb(ctx, 0, 0);
|
||||
break;
|
||||
case DISAS_NORETURN:
|
||||
break;
|
||||
|
@ -169,7 +169,7 @@ static inline uint32_t vext_get_total_elems(CPURISCVState *env, uint32_t desc,
|
||||
|
||||
static inline target_ulong adjust_addr(CPURISCVState *env, target_ulong addr)
|
||||
{
|
||||
return (addr & env->cur_pmmask) | env->cur_pmbase;
|
||||
return (addr & ~env->cur_pmmask) | env->cur_pmbase;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -264,26 +264,21 @@ GEN_VEXT_ST_ELEM(ste_h, int16_t, H2, stw)
|
||||
GEN_VEXT_ST_ELEM(ste_w, int32_t, H4, stl)
|
||||
GEN_VEXT_ST_ELEM(ste_d, int64_t, H8, stq)
|
||||
|
||||
static void vext_set_tail_elems_1s(CPURISCVState *env, target_ulong vl,
|
||||
void *vd, uint32_t desc, uint32_t nf,
|
||||
static void vext_set_tail_elems_1s(target_ulong vl, void *vd,
|
||||
uint32_t desc, uint32_t nf,
|
||||
uint32_t esz, uint32_t max_elems)
|
||||
{
|
||||
uint32_t total_elems = vext_get_total_elems(env, desc, esz);
|
||||
uint32_t vlenb = riscv_cpu_cfg(env)->vlen >> 3;
|
||||
uint32_t vta = vext_vta(desc);
|
||||
uint32_t registers_used;
|
||||
int k;
|
||||
|
||||
if (vta == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (k = 0; k < nf; ++k) {
|
||||
vext_set_elems_1s(vd, vta, (k * max_elems + vl) * esz,
|
||||
(k * max_elems + max_elems) * esz);
|
||||
}
|
||||
|
||||
if (nf * max_elems % total_elems != 0) {
|
||||
registers_used = ((nf * max_elems) * esz + (vlenb - 1)) / vlenb;
|
||||
vext_set_elems_1s(vd, vta, (nf * max_elems) * esz,
|
||||
registers_used * vlenb);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -319,7 +314,7 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
|
||||
}
|
||||
env->vstart = 0;
|
||||
|
||||
vext_set_tail_elems_1s(env, env->vl, vd, desc, nf, esz, max_elems);
|
||||
vext_set_tail_elems_1s(env->vl, vd, desc, nf, esz, max_elems);
|
||||
}
|
||||
|
||||
#define GEN_VEXT_LD_STRIDE(NAME, ETYPE, LOAD_FN) \
|
||||
@ -378,12 +373,12 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
|
||||
}
|
||||
env->vstart = 0;
|
||||
|
||||
vext_set_tail_elems_1s(env, evl, vd, desc, nf, esz, max_elems);
|
||||
vext_set_tail_elems_1s(evl, vd, desc, nf, esz, max_elems);
|
||||
}
|
||||
|
||||
/*
|
||||
* masked unit-stride load and store operation will be a special case of
|
||||
* stride, stride = NF * sizeof (MTYPE)
|
||||
* stride, stride = NF * sizeof (ETYPE)
|
||||
*/
|
||||
|
||||
#define GEN_VEXT_LD_US(NAME, ETYPE, LOAD_FN) \
|
||||
@ -499,7 +494,7 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
|
||||
}
|
||||
env->vstart = 0;
|
||||
|
||||
vext_set_tail_elems_1s(env, env->vl, vd, desc, nf, esz, max_elems);
|
||||
vext_set_tail_elems_1s(env->vl, vd, desc, nf, esz, max_elems);
|
||||
}
|
||||
|
||||
#define GEN_VEXT_LD_INDEX(NAME, ETYPE, INDEX_FN, LOAD_FN) \
|
||||
@ -629,7 +624,7 @@ ProbeSuccess:
|
||||
}
|
||||
env->vstart = 0;
|
||||
|
||||
vext_set_tail_elems_1s(env, env->vl, vd, desc, nf, esz, max_elems);
|
||||
vext_set_tail_elems_1s(env->vl, vd, desc, nf, esz, max_elems);
|
||||
}
|
||||
|
||||
#define GEN_VEXT_LDFF(NAME, ETYPE, LOAD_FN) \
|
||||
@ -655,10 +650,6 @@ GEN_VEXT_LDFF(vle64ff_v, int64_t, lde_d)
|
||||
#define DO_MAX(N, M) ((N) >= (M) ? (N) : (M))
|
||||
#define DO_MIN(N, M) ((N) >= (M) ? (M) : (N))
|
||||
|
||||
/* Unsigned min/max */
|
||||
#define DO_MAXU(N, M) DO_MAX((UMTYPE)N, (UMTYPE)M)
|
||||
#define DO_MINU(N, M) DO_MIN((UMTYPE)N, (UMTYPE)M)
|
||||
|
||||
/*
|
||||
* load and store whole register instructions
|
||||
*/
|
||||
|
@ -495,6 +495,8 @@ const QEMULogItem qemu_log_items[] = {
|
||||
"log every user-mode syscall, its input, and its result" },
|
||||
{ LOG_PER_THREAD, "tid",
|
||||
"open a separate log file per thread; filename must contain '%d'" },
|
||||
{ CPU_LOG_TB_VPU, "vpu",
|
||||
"include VPU registers in the 'cpu' logging" },
|
||||
{ 0, NULL, NULL },
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user