RISC-V PR for 9.0

* Check for 'A' extension on all atomic instructions
 * Add support for 'B' extension
 * Internally deprecate riscv_cpu_options
 * Implement optional CSR mcontext of debug Sdtrig extension
 * Internally add cpu->cfg.vlenb and  remove cpu->cfg.vlen
 * Support vlenb and vregs[] in KVM
 * RISC-V gdbstub and TCG plugin improvements
 * Remove vxrm and vxsat from FCSR
 * Use RISCVException as return type for all csr ops
 * Use g_autofree more and fix a memory leak
 * Add support for Zaamo and Zalrsc
 * Support new isa extension detection devicetree properties
 * SMBIOS support for RISC-V virt machine
 * Enable xtheadsync under user mode
 * Add rv32i,rv32e and rv64e CPUs
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEaukCtqfKh31tZZKWr3yVEwxTgBMFAmXGBRAACgkQr3yVEwxT
 gBPqVA//etMiwP8+lQb2E4pw+QwBIzpm3qFyBlqgSCFrekj1u2kYNd4CH3CKurWE
 ysoQ6OAMeb0MUbRHdjrejjzD/wOg7JNA9h7ynM1VbupveBrJY3GWC6qQWSG+A1j/
 LSgmr/dDya74chDxjxa+7ld3xqloHi5OtdGaeORfdPXl7mjCCKKCoSKYCex1ykup
 uuB7bsjeWeWEbuUsntmeuHJLZJuhpnbuZJmp17tEo+3vWXqjxV00Lik+XMwh3gua
 KOLiAqHjGr2NEhA3Mg1JLcQ+6JLTDM9ugZpQeNGQwMkfuB/RAU7jO/1Di3flbadF
 8l2xOHu3mydDbfdxTGZNJjcIrMTX/YEewAYZLRYpNsyPOMntgq8HEegwCdWGvK7C
 M5Tc59MNSuBt+zkZkHd21qLYusa2ThP4YT/schh7IA+2F1TSKdhlptEzi2oebIc7
 ilLSgZ9Of72QlAH2OPJNSAL9Nbc06MHEM0JiHIJa5u+XdcVRhZus5h1YIOKXisqF
 YPP22RnI5Jj5d5csa/0ONAZGFh5SRMTJtpjKoKSkzoYJWDjCQ2MiUAOmLscchMZd
 wbK0vjeRf6kRG4U4z7nTmHS9kzH8RXUZDecVcOITuMpKih9LhUiCZ+xPunFYPycJ
 WNFa9/pENcCXJweXvtk4NHwx933rX56678lF6KY2hwUwwaiBOv4=
 =yuRM
 -----END PGP SIGNATURE-----

Merge tag 'pull-riscv-to-apply-20240209' of https://github.com/alistair23/qemu into staging

RISC-V PR for 9.0

* Check for 'A' extension on all atomic instructions
* Add support for 'B' extension
* Internally deprecate riscv_cpu_options
* Implement optional CSR mcontext of debug Sdtrig extension
* Internally add cpu->cfg.vlenb and  remove cpu->cfg.vlen
* Support vlenb and vregs[] in KVM
* RISC-V gdbstub and TCG plugin improvements
* Remove vxrm and vxsat from FCSR
* Use RISCVException as return type for all csr ops
* Use g_autofree more and fix a memory leak
* Add support for Zaamo and Zalrsc
* Support new isa extension detection devicetree properties
* SMBIOS support for RISC-V virt machine
* Enable xtheadsync under user mode
* Add rv32i,rv32e and rv64e CPUs

# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCAAdFiEEaukCtqfKh31tZZKWr3yVEwxTgBMFAmXGBRAACgkQr3yVEwxT
# gBPqVA//etMiwP8+lQb2E4pw+QwBIzpm3qFyBlqgSCFrekj1u2kYNd4CH3CKurWE
# ysoQ6OAMeb0MUbRHdjrejjzD/wOg7JNA9h7ynM1VbupveBrJY3GWC6qQWSG+A1j/
# LSgmr/dDya74chDxjxa+7ld3xqloHi5OtdGaeORfdPXl7mjCCKKCoSKYCex1ykup
# uuB7bsjeWeWEbuUsntmeuHJLZJuhpnbuZJmp17tEo+3vWXqjxV00Lik+XMwh3gua
# KOLiAqHjGr2NEhA3Mg1JLcQ+6JLTDM9ugZpQeNGQwMkfuB/RAU7jO/1Di3flbadF
# 8l2xOHu3mydDbfdxTGZNJjcIrMTX/YEewAYZLRYpNsyPOMntgq8HEegwCdWGvK7C
# M5Tc59MNSuBt+zkZkHd21qLYusa2ThP4YT/schh7IA+2F1TSKdhlptEzi2oebIc7
# ilLSgZ9Of72QlAH2OPJNSAL9Nbc06MHEM0JiHIJa5u+XdcVRhZus5h1YIOKXisqF
# YPP22RnI5Jj5d5csa/0ONAZGFh5SRMTJtpjKoKSkzoYJWDjCQ2MiUAOmLscchMZd
# wbK0vjeRf6kRG4U4z7nTmHS9kzH8RXUZDecVcOITuMpKih9LhUiCZ+xPunFYPycJ
# WNFa9/pENcCXJweXvtk4NHwx933rX56678lF6KY2hwUwwaiBOv4=
# =yuRM
# -----END PGP SIGNATURE-----
# gpg: Signature made Fri 09 Feb 2024 10:57:20 GMT
# gpg:                using RSA key 6AE902B6A7CA877D6D659296AF7C95130C538013
# gpg: Good signature from "Alistair Francis <alistair@alistair23.me>" [unknown]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 6AE9 02B6 A7CA 877D 6D65  9296 AF7C 9513 0C53 8013

* tag 'pull-riscv-to-apply-20240209' of https://github.com/alistair23/qemu: (61 commits)
  target/riscv: add rv32i, rv32e and rv64e CPUs
  target/riscv/cpu.c: add riscv_bare_cpu_init()
  target/riscv: Enable xtheadsync under user mode
  qemu-options: enable -smbios option on RISC-V
  target/riscv: SMBIOS support for RISC-V virt machine
  smbios: function to set default processor family
  smbios: add processor-family option
  target/riscv: support new isa extension detection devicetree properties
  target/riscv: use misa_mxl_max to populate isa string rather than TARGET_LONG_BITS
  target/riscv: Expose Zaamo and Zalrsc extensions
  target/riscv: Check 'A' and split extensions for atomic instructions
  target/riscv: Add Zaamo and Zalrsc extension infrastructure
  hw/riscv/virt.c: use g_autofree in create_fdt_*
  hw/riscv/virt.c: use g_autofree in virt_machine_init()
  hw/riscv/virt.c: use g_autofree in create_fdt_virtio()
  hw/riscv/virt.c: use g_autofree in create_fdt_sockets()
  hw/riscv/virt.c: use g_autofree in create_fdt_socket_cpus()
  hw/riscv/numa.c: use g_autofree in socket_fdt_write_distance_matrix()
  hw/riscv/virt-acpi-build.c: fix leak in build_rhct()
  target/riscv: Use RISCVException as return type for all csr ops
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2024-02-09 16:15:01 +00:00
commit df50424b4d
30 changed files with 1507 additions and 698 deletions

View File

@ -41,6 +41,7 @@ config RISCV_VIRT
select RISCV_IMSIC
select SIFIVE_PLIC
select SIFIVE_TEST
select SMBIOS
select VIRTIO_MMIO
select FW_CFG_DMA
select PLATFORM_BUS

View File

@ -36,7 +36,8 @@
bool riscv_is_32bit(RISCVHartArrayState *harts)
{
return harts->harts[0].env.misa_mxl_max == MXL_RV32;
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(&harts->harts[0]);
return mcc->misa_mxl_max == MXL_RV32;
}
/*

View File

@ -167,7 +167,8 @@ void riscv_socket_fdt_write_id(const MachineState *ms, const char *node_name,
void riscv_socket_fdt_write_distance_matrix(const MachineState *ms)
{
int i, j, idx;
uint32_t *dist_matrix, dist_matrix_size;
g_autofree uint32_t *dist_matrix = NULL;
uint32_t dist_matrix_size;
if (numa_enabled(ms) && ms->numa_state->have_numa_distance) {
dist_matrix_size = riscv_socket_count(ms) * riscv_socket_count(ms);
@ -189,7 +190,6 @@ void riscv_socket_fdt_write_distance_matrix(const MachineState *ms)
"numa-distance-map-v1");
qemu_fdt_setprop(ms->fdt, "/distance-map", "distance-matrix",
dist_matrix, dist_matrix_size);
g_free(dist_matrix);
}
}

View File

@ -171,7 +171,6 @@ static void create_fdt(SiFiveUState *s, const MemMapEntry *memmap,
int cpu_phandle = phandle++;
nodename = g_strdup_printf("/cpus/cpu@%d", cpu);
char *intc = g_strdup_printf("/cpus/cpu@%d/interrupt-controller", cpu);
char *isa;
qemu_fdt_add_subnode(fdt, nodename);
/* cpu 0 is the management hart that does not have mmu */
if (cpu != 0) {
@ -180,11 +179,10 @@ static void create_fdt(SiFiveUState *s, const MemMapEntry *memmap,
} else {
qemu_fdt_setprop_string(fdt, nodename, "mmu-type", "riscv,sv48");
}
isa = riscv_isa_string(&s->soc.u_cpus.harts[cpu - 1]);
riscv_isa_write_fdt(&s->soc.u_cpus.harts[cpu - 1], fdt, nodename);
} else {
isa = riscv_isa_string(&s->soc.e_cpus.harts[0]);
riscv_isa_write_fdt(&s->soc.e_cpus.harts[0], fdt, nodename);
}
qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", isa);
qemu_fdt_setprop_string(fdt, nodename, "compatible", "riscv");
qemu_fdt_setprop_string(fdt, nodename, "status", "okay");
qemu_fdt_setprop_cell(fdt, nodename, "reg", cpu);
@ -194,7 +192,6 @@ static void create_fdt(SiFiveUState *s, const MemMapEntry *memmap,
qemu_fdt_setprop_string(fdt, intc, "compatible", "riscv,cpu-intc");
qemu_fdt_setprop(fdt, intc, "interrupt-controller", NULL, 0);
qemu_fdt_setprop_cell(fdt, intc, "#interrupt-cells", 1);
g_free(isa);
g_free(intc);
g_free(nodename);
}

View File

@ -59,7 +59,7 @@ static void create_fdt(SpikeState *s, const MemMapEntry *memmap,
MachineState *ms = MACHINE(s);
uint32_t *clint_cells;
uint32_t cpu_phandle, intc_phandle, phandle = 1;
char *name, *mem_name, *clint_name, *clust_name;
char *mem_name, *clint_name, *clust_name;
char *core_name, *cpu_name, *intc_name;
static const char * const clint_compat[2] = {
"sifive,clint0", "riscv,clint0"
@ -113,9 +113,7 @@ static void create_fdt(SpikeState *s, const MemMapEntry *memmap,
} else {
qemu_fdt_setprop_string(fdt, cpu_name, "mmu-type", "riscv,sv48");
}
name = riscv_isa_string(&s->soc[socket].harts[cpu]);
qemu_fdt_setprop_string(fdt, cpu_name, "riscv,isa", name);
g_free(name);
riscv_isa_write_fdt(&s->soc[socket].harts[cpu], fdt, cpu_name);
qemu_fdt_setprop_string(fdt, cpu_name, "compatible", "riscv");
qemu_fdt_setprop_string(fdt, cpu_name, "status", "okay");
qemu_fdt_setprop_cell(fdt, cpu_name, "reg",

View File

@ -196,7 +196,7 @@ static void build_rhct(GArray *table_data,
RISCVCPU *cpu = &s->soc[0].harts[0];
uint32_t mmu_offset = 0;
uint8_t satp_mode_max;
char *isa;
g_autofree char *isa = NULL;
AcpiTable table = { .sig = "RHCT", .rev = 1, .oem_id = s->oem_id,
.oem_table_id = s->oem_table_id };

View File

@ -36,6 +36,7 @@
#include "hw/riscv/boot.h"
#include "hw/riscv/numa.h"
#include "kvm/kvm_riscv.h"
#include "hw/firmware/smbios.h"
#include "hw/intc/riscv_aclint.h"
#include "hw/intc/riscv_aplic.h"
#include "hw/intc/sifive_plic.h"
@ -215,12 +216,15 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket,
int cpu;
uint32_t cpu_phandle;
MachineState *ms = MACHINE(s);
char *name, *cpu_name, *core_name, *intc_name, *sv_name;
bool is_32_bit = riscv_is_32bit(&s->soc[0]);
uint8_t satp_mode_max;
for (cpu = s->soc[socket].num_harts - 1; cpu >= 0; cpu--) {
RISCVCPU *cpu_ptr = &s->soc[socket].harts[cpu];
g_autofree char *cpu_name = NULL;
g_autofree char *core_name = NULL;
g_autofree char *intc_name = NULL;
g_autofree char *sv_name = NULL;
cpu_phandle = (*phandle)++;
@ -233,12 +237,9 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket,
sv_name = g_strdup_printf("riscv,%s",
satp_mode_str(satp_mode_max, is_32_bit));
qemu_fdt_setprop_string(ms->fdt, cpu_name, "mmu-type", sv_name);
g_free(sv_name);
}
name = riscv_isa_string(cpu_ptr);
qemu_fdt_setprop_string(ms->fdt, cpu_name, "riscv,isa", name);
g_free(name);
riscv_isa_write_fdt(cpu_ptr, ms->fdt, cpu_name);
if (cpu_ptr->cfg.ext_zicbom) {
qemu_fdt_setprop_cell(ms->fdt, cpu_name, "riscv,cbom-block-size",
@ -277,17 +278,13 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket,
core_name = g_strdup_printf("%s/core%d", clust_name, cpu);
qemu_fdt_add_subnode(ms->fdt, core_name);
qemu_fdt_setprop_cell(ms->fdt, core_name, "cpu", cpu_phandle);
g_free(core_name);
g_free(intc_name);
g_free(cpu_name);
}
}
static void create_fdt_socket_memory(RISCVVirtState *s,
const MemMapEntry *memmap, int socket)
{
char *mem_name;
g_autofree char *mem_name = NULL;
uint64_t addr, size;
MachineState *ms = MACHINE(s);
@ -299,7 +296,6 @@ static void create_fdt_socket_memory(RISCVVirtState *s,
addr >> 32, addr, size >> 32, size);
qemu_fdt_setprop_string(ms->fdt, mem_name, "device_type", "memory");
riscv_socket_fdt_write_id(ms, mem_name, socket);
g_free(mem_name);
}
static void create_fdt_socket_clint(RISCVVirtState *s,
@ -307,8 +303,8 @@ static void create_fdt_socket_clint(RISCVVirtState *s,
uint32_t *intc_phandles)
{
int cpu;
char *clint_name;
uint32_t *clint_cells;
g_autofree char *clint_name = NULL;
g_autofree uint32_t *clint_cells = NULL;
unsigned long clint_addr;
MachineState *ms = MACHINE(s);
static const char * const clint_compat[2] = {
@ -335,9 +331,6 @@ static void create_fdt_socket_clint(RISCVVirtState *s,
qemu_fdt_setprop(ms->fdt, clint_name, "interrupts-extended",
clint_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 4);
riscv_socket_fdt_write_id(ms, clint_name, socket);
g_free(clint_name);
g_free(clint_cells);
}
static void create_fdt_socket_aclint(RISCVVirtState *s,
@ -348,9 +341,9 @@ static void create_fdt_socket_aclint(RISCVVirtState *s,
char *name;
unsigned long addr, size;
uint32_t aclint_cells_size;
uint32_t *aclint_mswi_cells;
uint32_t *aclint_sswi_cells;
uint32_t *aclint_mtimer_cells;
g_autofree uint32_t *aclint_mswi_cells = NULL;
g_autofree uint32_t *aclint_sswi_cells = NULL;
g_autofree uint32_t *aclint_mtimer_cells = NULL;
MachineState *ms = MACHINE(s);
aclint_mswi_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2);
@ -422,10 +415,6 @@ static void create_fdt_socket_aclint(RISCVVirtState *s,
riscv_socket_fdt_write_id(ms, name, socket);
g_free(name);
}
g_free(aclint_mswi_cells);
g_free(aclint_mtimer_cells);
g_free(aclint_sswi_cells);
}
static void create_fdt_socket_plic(RISCVVirtState *s,
@ -434,8 +423,8 @@ static void create_fdt_socket_plic(RISCVVirtState *s,
uint32_t *plic_phandles)
{
int cpu;
char *plic_name;
uint32_t *plic_cells;
g_autofree char *plic_name = NULL;
g_autofree uint32_t *plic_cells;
unsigned long plic_addr;
MachineState *ms = MACHINE(s);
static const char * const plic_compat[2] = {
@ -495,10 +484,6 @@ static void create_fdt_socket_plic(RISCVVirtState *s,
memmap[VIRT_PLATFORM_BUS].size,
VIRT_PLATFORM_BUS_IRQ);
}
g_free(plic_name);
g_free(plic_cells);
}
uint32_t imsic_num_bits(uint32_t count)
@ -517,11 +502,12 @@ static void create_fdt_one_imsic(RISCVVirtState *s, hwaddr base_addr,
bool m_mode, uint32_t imsic_guest_bits)
{
int cpu, socket;
char *imsic_name;
g_autofree char *imsic_name = NULL;
MachineState *ms = MACHINE(s);
int socket_count = riscv_socket_count(ms);
uint32_t imsic_max_hart_per_socket;
uint32_t *imsic_cells, *imsic_regs, imsic_addr, imsic_size;
uint32_t imsic_max_hart_per_socket, imsic_addr, imsic_size;
g_autofree uint32_t *imsic_cells = NULL;
g_autofree uint32_t *imsic_regs = NULL;
imsic_cells = g_new0(uint32_t, ms->smp.cpus * 2);
imsic_regs = g_new0(uint32_t, socket_count * 4);
@ -573,10 +559,6 @@ static void create_fdt_one_imsic(RISCVVirtState *s, hwaddr base_addr,
IMSIC_MMIO_GROUP_MIN_SHIFT);
}
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "phandle", msi_phandle);
g_free(imsic_name);
g_free(imsic_regs);
g_free(imsic_cells);
}
static void create_fdt_imsic(RISCVVirtState *s, const MemMapEntry *memmap,
@ -608,12 +590,10 @@ static void create_fdt_one_aplic(RISCVVirtState *s, int socket,
bool m_mode, int num_harts)
{
int cpu;
char *aplic_name;
uint32_t *aplic_cells;
g_autofree char *aplic_name = NULL;
g_autofree uint32_t *aplic_cells = g_new0(uint32_t, num_harts * 2);
MachineState *ms = MACHINE(s);
aplic_cells = g_new0(uint32_t, num_harts * 2);
for (cpu = 0; cpu < num_harts; cpu++) {
aplic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
aplic_cells[cpu * 2 + 1] = cpu_to_be32(m_mode ? IRQ_M_EXT : IRQ_S_EXT);
@ -648,9 +628,6 @@ static void create_fdt_one_aplic(RISCVVirtState *s, int socket,
riscv_socket_fdt_write_id(ms, aplic_name, socket);
qemu_fdt_setprop_cell(ms->fdt, aplic_name, "phandle", aplic_phandle);
g_free(aplic_name);
g_free(aplic_cells);
}
static void create_fdt_socket_aplic(RISCVVirtState *s,
@ -662,7 +639,7 @@ static void create_fdt_socket_aplic(RISCVVirtState *s,
uint32_t *aplic_phandles,
int num_harts)
{
char *aplic_name;
g_autofree char *aplic_name = NULL;
unsigned long aplic_addr;
MachineState *ms = MACHINE(s);
uint32_t aplic_m_phandle, aplic_s_phandle;
@ -697,23 +674,18 @@ static void create_fdt_socket_aplic(RISCVVirtState *s,
VIRT_PLATFORM_BUS_IRQ);
}
g_free(aplic_name);
aplic_phandles[socket] = aplic_s_phandle;
}
static void create_fdt_pmu(RISCVVirtState *s)
{
char *pmu_name;
g_autofree char *pmu_name = g_strdup_printf("/pmu");
MachineState *ms = MACHINE(s);
RISCVCPU hart = s->soc[0].harts[0];
pmu_name = g_strdup_printf("/pmu");
qemu_fdt_add_subnode(ms->fdt, pmu_name);
qemu_fdt_setprop_string(ms->fdt, pmu_name, "compatible", "riscv,pmu");
riscv_pmu_generate_fdt_node(ms->fdt, hart.pmu_avail_ctrs, pmu_name);
g_free(pmu_name);
}
static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
@ -723,11 +695,11 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
uint32_t *irq_virtio_phandle,
uint32_t *msi_pcie_phandle)
{
char *clust_name;
int socket, phandle_pos;
MachineState *ms = MACHINE(s);
uint32_t msi_m_phandle = 0, msi_s_phandle = 0;
uint32_t *intc_phandles, xplic_phandles[MAX_NODES];
uint32_t xplic_phandles[MAX_NODES];
g_autofree uint32_t *intc_phandles = NULL;
int socket_count = riscv_socket_count(ms);
qemu_fdt_add_subnode(ms->fdt, "/cpus");
@ -741,6 +713,7 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
phandle_pos = ms->smp.cpus;
for (socket = (socket_count - 1); socket >= 0; socket--) {
g_autofree char *clust_name = NULL;
phandle_pos -= s->soc[socket].num_harts;
clust_name = g_strdup_printf("/cpus/cpu-map/cluster%d", socket);
@ -751,8 +724,6 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
create_fdt_socket_memory(s, memmap, socket);
g_free(clust_name);
if (tcg_enabled()) {
if (s->have_aclint) {
create_fdt_socket_aclint(s, memmap, socket,
@ -795,8 +766,6 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
}
}
g_free(intc_phandles);
if (kvm_enabled() && virt_use_kvm_aia(s)) {
*irq_mmio_phandle = xplic_phandles[0];
*irq_virtio_phandle = xplic_phandles[0];
@ -825,12 +794,12 @@ static void create_fdt_virtio(RISCVVirtState *s, const MemMapEntry *memmap,
uint32_t irq_virtio_phandle)
{
int i;
char *name;
MachineState *ms = MACHINE(s);
for (i = 0; i < VIRTIO_COUNT; i++) {
name = g_strdup_printf("/soc/virtio_mmio@%lx",
g_autofree char *name = g_strdup_printf("/soc/virtio_mmio@%lx",
(long)(memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size));
qemu_fdt_add_subnode(ms->fdt, name);
qemu_fdt_setprop_string(ms->fdt, name, "compatible", "virtio,mmio");
qemu_fdt_setprop_cells(ms->fdt, name, "reg",
@ -845,7 +814,6 @@ static void create_fdt_virtio(RISCVVirtState *s, const MemMapEntry *memmap,
qemu_fdt_setprop_cells(ms->fdt, name, "interrupts",
VIRTIO_IRQ + i, 0x4);
}
g_free(name);
}
}
@ -853,7 +821,7 @@ static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap,
uint32_t irq_pcie_phandle,
uint32_t msi_pcie_phandle)
{
char *name;
g_autofree char *name = NULL;
MachineState *ms = MACHINE(s);
name = g_strdup_printf("/soc/pci@%lx",
@ -887,7 +855,6 @@ static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap,
2, virt_high_pcie_memmap.base, 2, virt_high_pcie_memmap.size);
create_pcie_irq_map(s, ms->fdt, name, irq_pcie_phandle);
g_free(name);
}
static void create_fdt_reset(RISCVVirtState *s, const MemMapEntry *memmap,
@ -934,7 +901,7 @@ static void create_fdt_reset(RISCVVirtState *s, const MemMapEntry *memmap,
static void create_fdt_uart(RISCVVirtState *s, const MemMapEntry *memmap,
uint32_t irq_mmio_phandle)
{
char *name;
g_autofree char *name = NULL;
MachineState *ms = MACHINE(s);
name = g_strdup_printf("/soc/serial@%lx", (long)memmap[VIRT_UART0].base);
@ -952,13 +919,12 @@ static void create_fdt_uart(RISCVVirtState *s, const MemMapEntry *memmap,
}
qemu_fdt_setprop_string(ms->fdt, "/chosen", "stdout-path", name);
g_free(name);
}
static void create_fdt_rtc(RISCVVirtState *s, const MemMapEntry *memmap,
uint32_t irq_mmio_phandle)
{
char *name;
g_autofree char *name = NULL;
MachineState *ms = MACHINE(s);
name = g_strdup_printf("/soc/rtc@%lx", (long)memmap[VIRT_RTC].base);
@ -974,41 +940,36 @@ static void create_fdt_rtc(RISCVVirtState *s, const MemMapEntry *memmap,
} else {
qemu_fdt_setprop_cells(ms->fdt, name, "interrupts", RTC_IRQ, 0x4);
}
g_free(name);
}
static void create_fdt_flash(RISCVVirtState *s, const MemMapEntry *memmap)
{
char *name;
MachineState *ms = MACHINE(s);
hwaddr flashsize = virt_memmap[VIRT_FLASH].size / 2;
hwaddr flashbase = virt_memmap[VIRT_FLASH].base;
g_autofree char *name = g_strdup_printf("/flash@%" PRIx64, flashbase);
name = g_strdup_printf("/flash@%" PRIx64, flashbase);
qemu_fdt_add_subnode(ms->fdt, name);
qemu_fdt_setprop_string(ms->fdt, name, "compatible", "cfi-flash");
qemu_fdt_setprop_sized_cells(ms->fdt, name, "reg",
2, flashbase, 2, flashsize,
2, flashbase + flashsize, 2, flashsize);
qemu_fdt_setprop_cell(ms->fdt, name, "bank-width", 4);
g_free(name);
}
static void create_fdt_fw_cfg(RISCVVirtState *s, const MemMapEntry *memmap)
{
char *nodename;
MachineState *ms = MACHINE(s);
hwaddr base = memmap[VIRT_FW_CFG].base;
hwaddr size = memmap[VIRT_FW_CFG].size;
g_autofree char *nodename = g_strdup_printf("/fw-cfg@%" PRIx64, base);
nodename = g_strdup_printf("/fw-cfg@%" PRIx64, base);
qemu_fdt_add_subnode(ms->fdt, nodename);
qemu_fdt_setprop_string(ms->fdt, nodename,
"compatible", "qemu,fw-cfg-mmio");
qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg",
2, base, 2, size);
qemu_fdt_setprop(ms->fdt, nodename, "dma-coherent", NULL, 0);
g_free(nodename);
}
static void finalize_fdt(RISCVVirtState *s)
@ -1155,7 +1116,7 @@ static DeviceState *virt_create_plic(const MemMapEntry *memmap, int socket,
int base_hartid, int hart_count)
{
DeviceState *ret;
char *plic_hart_config;
g_autofree char *plic_hart_config = NULL;
/* Per-socket PLIC hart topology configuration string */
plic_hart_config = riscv_plic_hart_config_string(hart_count);
@ -1174,8 +1135,6 @@ static DeviceState *virt_create_plic(const MemMapEntry *memmap, int socket,
VIRT_PLIC_CONTEXT_STRIDE,
memmap[VIRT_PLIC].size);
g_free(plic_hart_config);
return ret;
}
@ -1263,6 +1222,45 @@ static void create_platform_bus(RISCVVirtState *s, DeviceState *irqchip)
sysbus_mmio_get_region(sysbus, 0));
}
static void virt_build_smbios(RISCVVirtState *s)
{
MachineClass *mc = MACHINE_GET_CLASS(s);
MachineState *ms = MACHINE(s);
uint8_t *smbios_tables, *smbios_anchor;
size_t smbios_tables_len, smbios_anchor_len;
struct smbios_phys_mem_area mem_array;
const char *product = "QEMU Virtual Machine";
if (kvm_enabled()) {
product = "KVM Virtual Machine";
}
smbios_set_defaults("QEMU", product, mc->name, false,
true, SMBIOS_ENTRY_POINT_TYPE_64);
if (riscv_is_32bit(&s->soc[0])) {
smbios_set_default_processor_family(0x200);
} else {
smbios_set_default_processor_family(0x201);
}
/* build the array of physical mem area from base_memmap */
mem_array.address = s->memmap[VIRT_DRAM].base;
mem_array.length = ms->ram_size;
smbios_get_tables(ms, &mem_array, 1,
&smbios_tables, &smbios_tables_len,
&smbios_anchor, &smbios_anchor_len,
&error_fatal);
if (smbios_anchor) {
fw_cfg_add_file(s->fw_cfg, "etc/smbios/smbios-tables",
smbios_tables, smbios_tables_len);
fw_cfg_add_file(s->fw_cfg, "etc/smbios/smbios-anchor",
smbios_anchor, smbios_anchor_len);
}
}
static void virt_machine_done(Notifier *notifier, void *data)
{
RISCVVirtState *s = container_of(notifier, RISCVVirtState,
@ -1351,6 +1349,8 @@ static void virt_machine_done(Notifier *notifier, void *data)
riscv_setup_direct_kernel(kernel_entry, fdt_load_addr);
}
virt_build_smbios(s);
if (virt_is_acpi_enabled(s)) {
virt_acpi_setup(s);
}
@ -1362,7 +1362,6 @@ static void virt_machine_init(MachineState *machine)
RISCVVirtState *s = RISCV_VIRT_MACHINE(machine);
MemoryRegion *system_memory = get_system_memory();
MemoryRegion *mask_rom = g_new(MemoryRegion, 1);
char *soc_name;
DeviceState *mmio_irqchip, *virtio_irqchip, *pcie_irqchip;
int i, base_hartid, hart_count;
int socket_count = riscv_socket_count(machine);
@ -1382,6 +1381,8 @@ static void virt_machine_init(MachineState *machine)
/* Initialize sockets */
mmio_irqchip = virtio_irqchip = pcie_irqchip = NULL;
for (i = 0; i < socket_count; i++) {
g_autofree char *soc_name = g_strdup_printf("soc%d", i);
if (!riscv_socket_check_hartids(machine, i)) {
error_report("discontinuous hartids in socket%d", i);
exit(1);
@ -1399,10 +1400,8 @@ static void virt_machine_init(MachineState *machine)
exit(1);
}
soc_name = g_strdup_printf("soc%d", i);
object_initialize_child(OBJECT(machine), soc_name, &s->soc[i],
TYPE_RISCV_HART_ARRAY);
g_free(soc_name);
object_property_set_str(OBJECT(&s->soc[i]), "cpu-type",
machine->cpu_type, &error_abort);
object_property_set_int(OBJECT(&s->soc[i]), "hartid-base",

View File

@ -102,6 +102,7 @@ static struct {
#define DEFAULT_CPU_SPEED 2000
static struct {
uint16_t processor_family;
const char *sock_pfx, *manufacturer, *version, *serial, *asset, *part;
uint64_t max_speed;
uint64_t current_speed;
@ -110,6 +111,7 @@ static struct {
.max_speed = DEFAULT_CPU_SPEED,
.current_speed = DEFAULT_CPU_SPEED,
.processor_id = 0,
.processor_family = 0x01, /* Other */
};
struct type8_instance {
@ -337,6 +339,10 @@ static const QemuOptDesc qemu_smbios_type4_opts[] = {
.name = "part",
.type = QEMU_OPT_STRING,
.help = "part number",
}, {
.name = "processor-family",
.type = QEMU_OPT_NUMBER,
.help = "processor family",
}, {
.name = "processor-id",
.type = QEMU_OPT_NUMBER,
@ -726,7 +732,7 @@ static void smbios_build_type_4_table(MachineState *ms, unsigned instance)
snprintf(sock_str, sizeof(sock_str), "%s%2x", type4.sock_pfx, instance);
SMBIOS_TABLE_SET_STR(4, socket_designation_str, sock_str);
t->processor_type = 0x03; /* CPU */
t->processor_family = 0x01; /* Other */
t->processor_family = 0xfe; /* use Processor Family 2 field */
SMBIOS_TABLE_SET_STR(4, processor_manufacturer_str, type4.manufacturer);
if (type4.processor_id == 0) {
t->processor_id[0] = cpu_to_le32(smbios_cpuid_version);
@ -758,7 +764,7 @@ static void smbios_build_type_4_table(MachineState *ms, unsigned instance)
t->thread_count = (threads_per_socket > 255) ? 0xFF : threads_per_socket;
t->processor_characteristics = cpu_to_le16(0x02); /* Unknown */
t->processor_family2 = cpu_to_le16(0x01); /* Other */
t->processor_family2 = cpu_to_le16(type4.processor_family);
if (tbl_len == SMBIOS_TYPE_4_LEN_V30) {
t->core_count2 = t->core_enabled2 = cpu_to_le16(cores_per_socket);
@ -983,6 +989,13 @@ void smbios_set_cpuid(uint32_t version, uint32_t features)
field = value; \
}
void smbios_set_default_processor_family(uint16_t processor_family)
{
if (type4.processor_family <= 0x01) {
type4.processor_family = processor_family;
}
}
void smbios_set_defaults(const char *manufacturer, const char *product,
const char *version, bool legacy_mode,
bool uuid_encoded, SmbiosEntryPointType ep_type)
@ -1402,6 +1415,9 @@ void smbios_entry_add(QemuOpts *opts, Error **errp)
return;
}
save_opt(&type4.sock_pfx, opts, "sock_pfx");
type4.processor_family = qemu_opt_get_number(opts,
"processor-family",
0x01 /* Other */);
save_opt(&type4.manufacturer, opts, "manufacturer");
save_opt(&type4.version, opts, "version");
save_opt(&type4.serial, opts, "serial");

View File

@ -295,6 +295,7 @@ void smbios_set_cpuid(uint32_t version, uint32_t features);
void smbios_set_defaults(const char *manufacturer, const char *product,
const char *version, bool legacy_mode,
bool uuid_encoded, SmbiosEntryPointType ep_type);
void smbios_set_default_processor_family(uint16_t processor_family);
uint8_t *smbios_get_table_legacy(MachineState *ms, size_t *length);
void smbios_get_tables(MachineState *ms,
const struct smbios_phys_mem_area *mem_array,

View File

@ -2686,7 +2686,7 @@ DEF("smbios", HAS_ARG, QEMU_OPTION_smbios,
" specify SMBIOS type 3 fields\n"
"-smbios type=4[,sock_pfx=str][,manufacturer=str][,version=str][,serial=str]\n"
" [,asset=str][,part=str][,max-speed=%d][,current-speed=%d]\n"
" [,processor-id=%d]\n"
" [,processor-family=%d,processor-id=%d]\n"
" specify SMBIOS type 4 fields\n"
"-smbios type=8[,external_reference=str][,internal_reference=str][,connector_type=%d][,port_type=%d]\n"
" specify SMBIOS type 8 fields\n"
@ -2697,7 +2697,7 @@ DEF("smbios", HAS_ARG, QEMU_OPTION_smbios,
" specify SMBIOS type 17 fields\n"
"-smbios type=41[,designation=str][,kind=str][,instance=%d][,pcidev=str]\n"
" specify SMBIOS type 41 fields\n",
QEMU_ARCH_I386 | QEMU_ARCH_ARM | QEMU_ARCH_LOONGARCH)
QEMU_ARCH_I386 | QEMU_ARCH_ARM | QEMU_ARCH_LOONGARCH | QEMU_ARCH_RISCV)
SRST
``-smbios file=binary``
Load SMBIOS entry from binary file.
@ -2714,7 +2714,7 @@ SRST
``-smbios type=3[,manufacturer=str][,version=str][,serial=str][,asset=str][,sku=str]``
Specify SMBIOS type 3 fields
``-smbios type=4[,sock_pfx=str][,manufacturer=str][,version=str][,serial=str][,asset=str][,part=str][,processor-id=%d]``
``-smbios type=4[,sock_pfx=str][,manufacturer=str][,version=str][,serial=str][,asset=str][,part=str][,processor-family=%d][,processor-id=%d]``
Specify SMBIOS type 4 fields
``-smbios type=11[,value=str][,path=filename]``

View File

@ -34,7 +34,10 @@
#define TYPE_RISCV_CPU_BASE32 RISCV_CPU_TYPE_NAME("rv32")
#define TYPE_RISCV_CPU_BASE64 RISCV_CPU_TYPE_NAME("rv64")
#define TYPE_RISCV_CPU_BASE128 RISCV_CPU_TYPE_NAME("x-rv128")
#define TYPE_RISCV_CPU_RV32I RISCV_CPU_TYPE_NAME("rv32i")
#define TYPE_RISCV_CPU_RV32E RISCV_CPU_TYPE_NAME("rv32e")
#define TYPE_RISCV_CPU_RV64I RISCV_CPU_TYPE_NAME("rv64i")
#define TYPE_RISCV_CPU_RV64E RISCV_CPU_TYPE_NAME("rv64e")
#define TYPE_RISCV_CPU_RVA22U64 RISCV_CPU_TYPE_NAME("rva22u64")
#define TYPE_RISCV_CPU_RVA22S64 RISCV_CPU_TYPE_NAME("rva22s64")
#define TYPE_RISCV_CPU_IBEX RISCV_CPU_TYPE_NAME("lowrisc-ibex")

File diff suppressed because it is too large Load Diff

View File

@ -69,6 +69,7 @@ typedef struct CPUArchState CPURISCVState;
#define RVH RV('H')
#define RVJ RV('J')
#define RVG RV('G')
#define RVB RV('B')
extern const uint32_t misa_bits[];
const char *riscv_get_misa_ext_name(uint32_t bit);
@ -93,6 +94,9 @@ typedef struct riscv_cpu_profile {
extern RISCVCPUProfile *riscv_profiles[];
/* Privileged specification version */
#define PRIV_VER_1_10_0_STR "v1.10.0"
#define PRIV_VER_1_11_0_STR "v1.11.0"
#define PRIV_VER_1_12_0_STR "v1.12.0"
enum {
PRIV_VERSION_1_10_0 = 0,
PRIV_VERSION_1_11_0,
@ -102,6 +106,7 @@ enum {
};
#define VEXT_VERSION_1_00_0 0x00010000
#define VEXT_VER_1_00_0_STR "v1.0"
enum {
TRANSLATE_SUCCESS,
@ -180,12 +185,10 @@ struct CPUArchState {
target_ulong guest_phys_fault_addr;
target_ulong priv_ver;
target_ulong bext_ver;
target_ulong vext_ver;
/* RISCVMXL, but uint32_t for vmstate migration */
uint32_t misa_mxl; /* current mxl */
uint32_t misa_mxl_max; /* max mxl for this cpu */
uint32_t misa_ext; /* current extensions */
uint32_t misa_ext_mask; /* max ext for this cpu */
uint32_t xl; /* current xlen */
@ -361,6 +364,7 @@ struct CPUArchState {
target_ulong tdata1[RV_MAX_TRIGGERS];
target_ulong tdata2[RV_MAX_TRIGGERS];
target_ulong tdata3[RV_MAX_TRIGGERS];
target_ulong mcontext;
struct CPUBreakpoint *cpu_breakpoint[RV_MAX_TRIGGERS];
struct CPUWatchpoint *cpu_watchpoint[RV_MAX_TRIGGERS];
QEMUTimer *itrigger_timer[RV_MAX_TRIGGERS];
@ -466,6 +470,7 @@ struct RISCVCPUClass {
DeviceRealize parent_realize;
ResettablePhases parent_phases;
uint32_t misa_mxl_max; /* max mxl for this cpu */
};
static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
@ -506,8 +511,11 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
char *riscv_isa_string(RISCVCPU *cpu);
int riscv_cpu_max_xlen(RISCVCPUClass *mcc);
bool riscv_cpu_option_set(const char *optname);
#ifndef CONFIG_USER_ONLY
void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename);
void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
vaddr addr, unsigned size,
MMUAccessType access_type,
@ -682,11 +690,17 @@ static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env)
* = 256 >> 7
* = 2
*/
static inline uint32_t vext_get_vlmax(RISCVCPU *cpu, target_ulong vtype)
static inline uint32_t vext_get_vlmax(uint32_t vlenb, uint32_t vsew,
int8_t lmul)
{
uint8_t sew = FIELD_EX64(vtype, VTYPE, VSEW);
int8_t lmul = sextract32(FIELD_EX64(vtype, VTYPE, VLMUL), 0, 3);
return cpu->cfg.vlen >> (sew + 3 - lmul);
uint32_t vlen = vlenb << 3;
/*
* We need to use 'vlen' instead of 'vlenb' to
* preserve the '+ 3' in the formula. Otherwise
* we risk a negative shift if vsew < lmul.
*/
return vlen >> (vsew + 3 - lmul);
}
void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
@ -769,7 +783,8 @@ enum riscv_pmu_event_idx {
/* used by tcg/tcg-cpu.c*/
void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en);
bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset);
void riscv_cpu_set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext);
void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext);
bool riscv_cpu_is_vendor(Object *cpu_obj);
typedef struct RISCVCPUMultiExtConfig {
const char *name;
@ -782,7 +797,6 @@ extern const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[];
extern const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[];
extern const RISCVCPUMultiExtConfig riscv_cpu_named_features[];
extern const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[];
extern Property riscv_cpu_options[];
typedef struct isa_ext_data {
const char *name;

View File

@ -32,14 +32,6 @@
#define FSR_NXA (FPEXC_NX << FSR_AEXC_SHIFT)
#define FSR_AEXC (FSR_NVA | FSR_OFA | FSR_UFA | FSR_DZA | FSR_NXA)
/* Vector Fixed-Point round model */
#define FSR_VXRM_SHIFT 9
#define FSR_VXRM (0x3 << FSR_VXRM_SHIFT)
/* Vector Fixed-Point saturation flag */
#define FSR_VXSAT_SHIFT 8
#define FSR_VXSAT (0x1 << FSR_VXSAT_SHIFT)
/* Control and Status Registers */
/* User Trap Setup */
@ -361,6 +353,7 @@
#define CSR_TDATA2 0x7a2
#define CSR_TDATA3 0x7a3
#define CSR_TINFO 0x7a4
#define CSR_MCONTEXT 0x7a8
/* Debug Mode Registers */
#define CSR_DCSR 0x7b0
@ -905,4 +898,10 @@ typedef enum RISCVException {
/* JVT CSR bits */
#define JVT_MODE 0x3F
#define JVT_BASE (~0x3F)
/* Debug Sdtrig CSR masks */
#define MCONTEXT32 0x0000003F
#define MCONTEXT64 0x0000000000001FFFULL
#define MCONTEXT32_HCONTEXT 0x0000007F
#define MCONTEXT64_HCONTEXT 0x0000000000003FFFULL
#endif

View File

@ -78,7 +78,9 @@ struct RISCVCPUConfig {
bool ext_svnapot;
bool ext_svpbmt;
bool ext_zdinx;
bool ext_zaamo;
bool ext_zacas;
bool ext_zalrsc;
bool ext_zawrs;
bool ext_zfa;
bool ext_zfbfmin;
@ -139,11 +141,7 @@ struct RISCVCPUConfig {
bool ext_XVentanaCondOps;
uint32_t pmu_mask;
char *priv_spec;
char *user_spec;
char *bext_spec;
char *vext_spec;
uint16_t vlen;
uint16_t vlenb;
uint16_t elen;
uint16_t cbom_blocksize;
uint16_t cbop_blocksize;

View File

@ -81,13 +81,16 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
* which is not supported by GVEC. So we set vl_eq_vlmax flag to true
* only when maxsz >= 8 bytes.
*/
uint32_t vlmax = vext_get_vlmax(cpu, env->vtype);
uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW);
uint32_t maxsz = vlmax << sew;
/* lmul encoded as in DisasContext::lmul */
int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3);
uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW);
uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul);
uint32_t maxsz = vlmax << vsew;
bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
(maxsz >= 8);
flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
flags = FIELD_DP32(flags, TB_FLAGS, SEW, sew);
flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew);
flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
FIELD_EX64(env->vtype, VTYPE, VLMUL));
flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);

View File

@ -242,7 +242,7 @@ static RISCVException any32(CPURISCVState *env, int csrno)
}
static int aia_any(CPURISCVState *env, int csrno)
static RISCVException aia_any(CPURISCVState *env, int csrno)
{
if (!riscv_cpu_cfg(env)->ext_smaia) {
return RISCV_EXCP_ILLEGAL_INST;
@ -251,7 +251,7 @@ static int aia_any(CPURISCVState *env, int csrno)
return any(env, csrno);
}
static int aia_any32(CPURISCVState *env, int csrno)
static RISCVException aia_any32(CPURISCVState *env, int csrno)
{
if (!riscv_cpu_cfg(env)->ext_smaia) {
return RISCV_EXCP_ILLEGAL_INST;
@ -269,7 +269,7 @@ static RISCVException smode(CPURISCVState *env, int csrno)
return RISCV_EXCP_ILLEGAL_INST;
}
static int smode32(CPURISCVState *env, int csrno)
static RISCVException smode32(CPURISCVState *env, int csrno)
{
if (riscv_cpu_mxl(env) != MXL_RV32) {
return RISCV_EXCP_ILLEGAL_INST;
@ -278,7 +278,7 @@ static int smode32(CPURISCVState *env, int csrno)
return smode(env, csrno);
}
static int aia_smode(CPURISCVState *env, int csrno)
static RISCVException aia_smode(CPURISCVState *env, int csrno)
{
if (!riscv_cpu_cfg(env)->ext_ssaia) {
return RISCV_EXCP_ILLEGAL_INST;
@ -287,7 +287,7 @@ static int aia_smode(CPURISCVState *env, int csrno)
return smode(env, csrno);
}
static int aia_smode32(CPURISCVState *env, int csrno)
static RISCVException aia_smode32(CPURISCVState *env, int csrno)
{
if (!riscv_cpu_cfg(env)->ext_ssaia) {
return RISCV_EXCP_ILLEGAL_INST;
@ -496,7 +496,7 @@ static RISCVException pointer_masking(CPURISCVState *env, int csrno)
return RISCV_EXCP_ILLEGAL_INST;
}
static int aia_hmode(CPURISCVState *env, int csrno)
static RISCVException aia_hmode(CPURISCVState *env, int csrno)
{
if (!riscv_cpu_cfg(env)->ext_ssaia) {
return RISCV_EXCP_ILLEGAL_INST;
@ -505,7 +505,7 @@ static int aia_hmode(CPURISCVState *env, int csrno)
return hmode(env, csrno);
}
static int aia_hmode32(CPURISCVState *env, int csrno)
static RISCVException aia_hmode32(CPURISCVState *env, int csrno)
{
if (!riscv_cpu_cfg(env)->ext_ssaia) {
return RISCV_EXCP_ILLEGAL_INST;
@ -681,9 +681,10 @@ static RISCVException read_vl(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
static int read_vlenb(CPURISCVState *env, int csrno, target_ulong *val)
static RISCVException read_vlenb(CPURISCVState *env, int csrno,
target_ulong *val)
{
*val = riscv_cpu_cfg(env)->vlen >> 3;
*val = riscv_cpu_cfg(env)->vlenb;
return RISCV_EXCP_NONE;
}
@ -738,17 +739,19 @@ static RISCVException write_vstart(CPURISCVState *env, int csrno,
* The vstart CSR is defined to have only enough writable bits
* to hold the largest element index, i.e. lg2(VLEN) bits.
*/
env->vstart = val & ~(~0ULL << ctzl(riscv_cpu_cfg(env)->vlen));
env->vstart = val & ~(~0ULL << ctzl(riscv_cpu_cfg(env)->vlenb << 3));
return RISCV_EXCP_NONE;
}
static int read_vcsr(CPURISCVState *env, int csrno, target_ulong *val)
static RISCVException read_vcsr(CPURISCVState *env, int csrno,
target_ulong *val)
{
*val = (env->vxrm << VCSR_VXRM_SHIFT) | (env->vxsat << VCSR_VXSAT_SHIFT);
return RISCV_EXCP_NONE;
}
static int write_vcsr(CPURISCVState *env, int csrno, target_ulong val)
static RISCVException write_vcsr(CPURISCVState *env, int csrno,
target_ulong val)
{
#if !defined(CONFIG_USER_ONLY)
env->mstatus |= MSTATUS_VS;
@ -798,13 +801,15 @@ static RISCVException read_timeh(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val)
static RISCVException read_hpmcounter(CPURISCVState *env, int csrno,
target_ulong *val)
{
*val = get_ticks(false);
return RISCV_EXCP_NONE;
}
static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val)
static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
target_ulong *val)
{
*val = get_ticks(true);
return RISCV_EXCP_NONE;
@ -812,7 +817,8 @@ static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val)
#else /* CONFIG_USER_ONLY */
static int read_mhpmevent(CPURISCVState *env, int csrno, target_ulong *val)
static RISCVException read_mhpmevent(CPURISCVState *env, int csrno,
target_ulong *val)
{
int evt_index = csrno - CSR_MCOUNTINHIBIT;
@ -821,7 +827,8 @@ static int read_mhpmevent(CPURISCVState *env, int csrno, target_ulong *val)
return RISCV_EXCP_NONE;
}
static int write_mhpmevent(CPURISCVState *env, int csrno, target_ulong val)
static RISCVException write_mhpmevent(CPURISCVState *env, int csrno,
target_ulong val)
{
int evt_index = csrno - CSR_MCOUNTINHIBIT;
uint64_t mhpmevt_val = val;
@ -837,7 +844,8 @@ static int write_mhpmevent(CPURISCVState *env, int csrno, target_ulong val)
return RISCV_EXCP_NONE;
}
static int read_mhpmeventh(CPURISCVState *env, int csrno, target_ulong *val)
static RISCVException read_mhpmeventh(CPURISCVState *env, int csrno,
target_ulong *val)
{
int evt_index = csrno - CSR_MHPMEVENT3H + 3;
@ -846,7 +854,8 @@ static int read_mhpmeventh(CPURISCVState *env, int csrno, target_ulong *val)
return RISCV_EXCP_NONE;
}
static int write_mhpmeventh(CPURISCVState *env, int csrno, target_ulong val)
static RISCVException write_mhpmeventh(CPURISCVState *env, int csrno,
target_ulong val)
{
int evt_index = csrno - CSR_MHPMEVENT3H + 3;
uint64_t mhpmevth_val = val;
@ -860,7 +869,8 @@ static int write_mhpmeventh(CPURISCVState *env, int csrno, target_ulong val)
return RISCV_EXCP_NONE;
}
static int write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong val)
static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
target_ulong val)
{
int ctr_idx = csrno - CSR_MCYCLE;
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
@ -885,7 +895,8 @@ static int write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong val)
return RISCV_EXCP_NONE;
}
static int write_mhpmcounterh(CPURISCVState *env, int csrno, target_ulong val)
static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
target_ulong val)
{
int ctr_idx = csrno - CSR_MCYCLEH;
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
@ -945,7 +956,8 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
return RISCV_EXCP_NONE;
}
static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val)
static RISCVException read_hpmcounter(CPURISCVState *env, int csrno,
target_ulong *val)
{
uint16_t ctr_index;
@ -960,7 +972,8 @@ static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val)
return riscv_pmu_read_ctr(env, val, false, ctr_index);
}
static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val)
static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
target_ulong *val)
{
uint16_t ctr_index;
@ -975,7 +988,8 @@ static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val)
return riscv_pmu_read_ctr(env, val, true, ctr_index);
}
static int read_scountovf(CPURISCVState *env, int csrno, target_ulong *val)
static RISCVException read_scountovf(CPURISCVState *env, int csrno,
target_ulong *val)
{
int mhpmevt_start = CSR_MHPMEVENT3 - CSR_MCOUNTINHIBIT;
int i;
@ -1638,7 +1652,8 @@ static RISCVException rmw_mvienh(CPURISCVState *env, int csrno,
return ret;
}
static int read_mtopi(CPURISCVState *env, int csrno, target_ulong *val)
static RISCVException read_mtopi(CPURISCVState *env, int csrno,
target_ulong *val)
{
int irq;
uint8_t iprio;
@ -1678,8 +1693,9 @@ static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno)
};
}
static int rmw_xiselect(CPURISCVState *env, int csrno, target_ulong *val,
target_ulong new_val, target_ulong wr_mask)
static RISCVException rmw_xiselect(CPURISCVState *env, int csrno,
target_ulong *val, target_ulong new_val,
target_ulong wr_mask)
{
target_ulong *iselect;
@ -1758,8 +1774,9 @@ static int rmw_iprio(target_ulong xlen,
return 0;
}
static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val,
target_ulong new_val, target_ulong wr_mask)
static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
target_ulong *val, target_ulong new_val,
target_ulong wr_mask)
{
bool virt, isel_reserved;
uint8_t *iprio;
@ -1833,8 +1850,9 @@ done:
return RISCV_EXCP_NONE;
}
static int rmw_xtopei(CPURISCVState *env, int csrno, target_ulong *val,
target_ulong new_val, target_ulong wr_mask)
static RISCVException rmw_xtopei(CPURISCVState *env, int csrno,
target_ulong *val, target_ulong new_val,
target_ulong wr_mask)
{
bool virt;
int ret = -EINVAL;
@ -3031,7 +3049,8 @@ static RISCVException write_satp(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
static int read_vstopi(CPURISCVState *env, int csrno, target_ulong *val)
static RISCVException read_vstopi(CPURISCVState *env, int csrno,
target_ulong *val)
{
int irq, ret;
target_ulong topei;
@ -3120,7 +3139,8 @@ static int read_vstopi(CPURISCVState *env, int csrno, target_ulong *val)
return RISCV_EXCP_NONE;
}
static int read_stopi(CPURISCVState *env, int csrno, target_ulong *val)
static RISCVException read_stopi(CPURISCVState *env, int csrno,
target_ulong *val)
{
int irq;
uint8_t iprio;
@ -3576,19 +3596,21 @@ static RISCVException write_htimedeltah(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
static int read_hvictl(CPURISCVState *env, int csrno, target_ulong *val)
static RISCVException read_hvictl(CPURISCVState *env, int csrno,
target_ulong *val)
{
*val = env->hvictl;
return RISCV_EXCP_NONE;
}
static int write_hvictl(CPURISCVState *env, int csrno, target_ulong val)
static RISCVException write_hvictl(CPURISCVState *env, int csrno,
target_ulong val)
{
env->hvictl = val & HVICTL_VALID_MASK;
return RISCV_EXCP_NONE;
}
static int read_hvipriox(CPURISCVState *env, int first_index,
static RISCVException read_hvipriox(CPURISCVState *env, int first_index,
uint8_t *iprio, target_ulong *val)
{
int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
@ -3614,7 +3636,7 @@ static int read_hvipriox(CPURISCVState *env, int first_index,
return RISCV_EXCP_NONE;
}
static int write_hvipriox(CPURISCVState *env, int first_index,
static RISCVException write_hvipriox(CPURISCVState *env, int first_index,
uint8_t *iprio, target_ulong val)
{
int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
@ -3640,42 +3662,50 @@ static int write_hvipriox(CPURISCVState *env, int first_index,
return RISCV_EXCP_NONE;
}
static int read_hviprio1(CPURISCVState *env, int csrno, target_ulong *val)
static RISCVException read_hviprio1(CPURISCVState *env, int csrno,
target_ulong *val)
{
return read_hvipriox(env, 0, env->hviprio, val);
}
static int write_hviprio1(CPURISCVState *env, int csrno, target_ulong val)
static RISCVException write_hviprio1(CPURISCVState *env, int csrno,
target_ulong val)
{
return write_hvipriox(env, 0, env->hviprio, val);
}
static int read_hviprio1h(CPURISCVState *env, int csrno, target_ulong *val)
static RISCVException read_hviprio1h(CPURISCVState *env, int csrno,
target_ulong *val)
{
return read_hvipriox(env, 4, env->hviprio, val);
}
static int write_hviprio1h(CPURISCVState *env, int csrno, target_ulong val)
static RISCVException write_hviprio1h(CPURISCVState *env, int csrno,
target_ulong val)
{
return write_hvipriox(env, 4, env->hviprio, val);
}
static int read_hviprio2(CPURISCVState *env, int csrno, target_ulong *val)
static RISCVException read_hviprio2(CPURISCVState *env, int csrno,
target_ulong *val)
{
return read_hvipriox(env, 8, env->hviprio, val);
}
static int write_hviprio2(CPURISCVState *env, int csrno, target_ulong val)
static RISCVException write_hviprio2(CPURISCVState *env, int csrno,
target_ulong val)
{
return write_hvipriox(env, 8, env->hviprio, val);
}
static int read_hviprio2h(CPURISCVState *env, int csrno, target_ulong *val)
static RISCVException read_hviprio2h(CPURISCVState *env, int csrno,
target_ulong *val)
{
return read_hvipriox(env, 12, env->hviprio, val);
}
static int write_hviprio2h(CPURISCVState *env, int csrno, target_ulong val)
static RISCVException write_hviprio2h(CPURISCVState *env, int csrno,
target_ulong val)
{
return write_hvipriox(env, 12, env->hviprio, val);
}
@ -3699,7 +3729,8 @@ static RISCVException write_vsstatus(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
static int read_vstvec(CPURISCVState *env, int csrno, target_ulong *val)
static RISCVException read_vstvec(CPURISCVState *env, int csrno,
target_ulong *val)
{
*val = env->vstvec;
return RISCV_EXCP_NONE;
@ -3906,6 +3937,31 @@ static RISCVException read_tinfo(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
static RISCVException read_mcontext(CPURISCVState *env, int csrno,
target_ulong *val)
{
*val = env->mcontext;
return RISCV_EXCP_NONE;
}
static RISCVException write_mcontext(CPURISCVState *env, int csrno,
target_ulong val)
{
bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false;
int32_t mask;
if (riscv_has_ext(env, RVH)) {
/* Spec suggest 7-bit for RV32 and 14-bit for RV64 w/ H extension */
mask = rv32 ? MCONTEXT32_HCONTEXT : MCONTEXT64_HCONTEXT;
} else {
/* Spec suggest 6-bit for RV32 and 13-bit for RV64 w/o H extension */
mask = rv32 ? MCONTEXT32 : MCONTEXT64;
}
env->mcontext = val & mask;
return RISCV_EXCP_NONE;
}
/*
* Functions to access Pointer Masking feature registers
* We have to check if current priv lvl could modify
@ -4800,11 +4856,12 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr },
/* Debug CSRs */
[CSR_TSELECT] = { "tselect", debug, read_tselect, write_tselect },
[CSR_TDATA1] = { "tdata1", debug, read_tdata, write_tdata },
[CSR_TDATA2] = { "tdata2", debug, read_tdata, write_tdata },
[CSR_TDATA3] = { "tdata3", debug, read_tdata, write_tdata },
[CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore },
[CSR_TSELECT] = { "tselect", debug, read_tselect, write_tselect },
[CSR_TDATA1] = { "tdata1", debug, read_tdata, write_tdata },
[CSR_TDATA2] = { "tdata2", debug, read_tdata, write_tdata },
[CSR_TDATA3] = { "tdata3", debug, read_tdata, write_tdata },
[CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore },
[CSR_MCONTEXT] = { "mcontext", debug, read_mcontext, write_mcontext },
/* User Pointer Masking */
[CSR_UMTE] = { "umte", pointer_masking, read_umte, write_umte },

View File

@ -940,4 +940,6 @@ void riscv_trigger_reset_hold(CPURISCVState *env)
env->cpu_watchpoint[i] = NULL;
timer_del(env->itrigger_timer[i]);
}
env->mcontext = 0;
}

View File

@ -49,6 +49,7 @@ static const struct TypeSize vec_lanes[] = {
int riscv_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
{
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cs);
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
target_ulong tmp;
@ -61,7 +62,7 @@ int riscv_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
return 0;
}
switch (env->misa_mxl_max) {
switch (mcc->misa_mxl_max) {
case MXL_RV32:
return gdb_get_reg32(mem_buf, tmp);
case MXL_RV64:
@ -75,12 +76,13 @@ int riscv_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
int riscv_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
{
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cs);
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
int length = 0;
target_ulong tmp;
switch (env->misa_mxl_max) {
switch (mcc->misa_mxl_max) {
case MXL_RV32:
tmp = (int32_t)ldl_p(mem_buf);
length = 4;
@ -130,7 +132,7 @@ static int riscv_gdb_set_fpu(CPURISCVState *env, uint8_t *mem_buf, int n)
static int riscv_gdb_get_vector(CPURISCVState *env, GByteArray *buf, int n)
{
uint16_t vlenb = riscv_cpu_cfg(env)->vlen >> 3;
uint16_t vlenb = riscv_cpu_cfg(env)->vlenb;
if (n < 32) {
int i;
int cnt = 0;
@ -146,7 +148,7 @@ static int riscv_gdb_get_vector(CPURISCVState *env, GByteArray *buf, int n)
static int riscv_gdb_set_vector(CPURISCVState *env, uint8_t *mem_buf, int n)
{
uint16_t vlenb = riscv_cpu_cfg(env)->vlen >> 3;
uint16_t vlenb = riscv_cpu_cfg(env)->vlenb;
if (n < 32) {
int i;
for (i = 0; i < vlenb; i += 8) {
@ -214,11 +216,12 @@ static int riscv_gdb_set_virtual(CPURISCVState *cs, uint8_t *mem_buf, int n)
static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg)
{
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cs);
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
GString *s = g_string_new(NULL);
riscv_csr_predicate_fn predicate;
int bitsize = 16 << env->misa_mxl_max;
int bitsize = riscv_cpu_max_xlen(mcc);
int i;
#if !defined(CONFIG_USER_ONLY)
@ -266,7 +269,7 @@ static int ricsv_gen_dynamic_vector_xml(CPUState *cs, int base_reg)
RISCVCPU *cpu = RISCV_CPU(cs);
GString *s = g_string_new(NULL);
g_autoptr(GString) ts = g_string_new("");
int reg_width = cpu->cfg.vlen;
int reg_width = cpu->cfg.vlenb << 3;
int num_regs = 0;
int i;
@ -310,6 +313,7 @@ static int ricsv_gen_dynamic_vector_xml(CPUState *cs, int base_reg)
void riscv_cpu_register_gdb_regs_for_features(CPUState *cs)
{
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cs);
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
if (env->misa_ext & RVD) {
@ -326,7 +330,7 @@ void riscv_cpu_register_gdb_regs_for_features(CPUState *cs)
ricsv_gen_dynamic_vector_xml(cs, base_reg),
"riscv-vector.xml", 0);
}
switch (env->misa_mxl_max) {
switch (mcc->misa_mxl_max) {
case MXL_RV32:
gdb_register_coprocessor(cs, riscv_gdb_get_virtual,
riscv_gdb_set_virtual,

View File

@ -18,6 +18,18 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define REQUIRE_A_OR_ZAAMO(ctx) do { \
if (!ctx->cfg_ptr->ext_zaamo && !has_ext(ctx, RVA)) { \
return false; \
} \
} while (0)
#define REQUIRE_A_OR_ZALRSC(ctx) do { \
if (!ctx->cfg_ptr->ext_zalrsc && !has_ext(ctx, RVA)) { \
return false; \
} \
} while (0)
static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop)
{
TCGv src1;
@ -96,132 +108,143 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a,
static bool trans_lr_w(DisasContext *ctx, arg_lr_w *a)
{
REQUIRE_EXT(ctx, RVA);
REQUIRE_A_OR_ZALRSC(ctx);
return gen_lr(ctx, a, (MO_ALIGN | MO_TESL));
}
static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a)
{
REQUIRE_EXT(ctx, RVA);
REQUIRE_A_OR_ZALRSC(ctx);
return gen_sc(ctx, a, (MO_ALIGN | MO_TESL));
}
static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a)
{
REQUIRE_EXT(ctx, RVA);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TESL));
}
static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a)
{
REQUIRE_EXT(ctx, RVA);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TESL));
}
static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a)
{
REQUIRE_EXT(ctx, RVA);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TESL));
}
static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a)
{
REQUIRE_EXT(ctx, RVA);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TESL));
}
static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a)
{
REQUIRE_EXT(ctx, RVA);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TESL));
}
static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a)
{
REQUIRE_EXT(ctx, RVA);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TESL));
}
static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a)
{
REQUIRE_EXT(ctx, RVA);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TESL));
}
static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a)
{
REQUIRE_EXT(ctx, RVA);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TESL));
}
static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a)
{
REQUIRE_EXT(ctx, RVA);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TESL));
}
static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_A_OR_ZALRSC(ctx);
return gen_lr(ctx, a, MO_ALIGN | MO_TEUQ);
}
static bool trans_sc_d(DisasContext *ctx, arg_sc_d *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_A_OR_ZALRSC(ctx);
return gen_sc(ctx, a, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_A_OR_ZAAMO(ctx);
return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEUQ));
}

View File

@ -83,8 +83,8 @@ static bool trans_vfncvtbf16_f_f_w(DisasContext *ctx, arg_vfncvtbf16_f_f_w *a)
data = FIELD_DP32(data, VDATA, VMA, ctx->vma);
tcg_gen_gvec_3_ptr(vreg_ofs(ctx, a->rd), vreg_ofs(ctx, 0),
vreg_ofs(ctx, a->rs2), tcg_env,
ctx->cfg_ptr->vlen / 8,
ctx->cfg_ptr->vlen / 8, data,
ctx->cfg_ptr->vlenb,
ctx->cfg_ptr->vlenb, data,
gen_helper_vfncvtbf16_f_f_w);
mark_vs_dirty(ctx);
gen_set_label(over);
@ -112,8 +112,8 @@ static bool trans_vfwcvtbf16_f_f_v(DisasContext *ctx, arg_vfwcvtbf16_f_f_v *a)
data = FIELD_DP32(data, VDATA, VMA, ctx->vma);
tcg_gen_gvec_3_ptr(vreg_ofs(ctx, a->rd), vreg_ofs(ctx, 0),
vreg_ofs(ctx, a->rs2), tcg_env,
ctx->cfg_ptr->vlen / 8,
ctx->cfg_ptr->vlen / 8, data,
ctx->cfg_ptr->vlenb,
ctx->cfg_ptr->vlenb, data,
gen_helper_vfwcvtbf16_f_f_v);
mark_vs_dirty(ctx);
gen_set_label(over);
@ -143,8 +143,8 @@ static bool trans_vfwmaccbf16_vv(DisasContext *ctx, arg_vfwmaccbf16_vv *a)
tcg_gen_gvec_4_ptr(vreg_ofs(ctx, a->rd), vreg_ofs(ctx, 0),
vreg_ofs(ctx, a->rs1),
vreg_ofs(ctx, a->rs2), tcg_env,
ctx->cfg_ptr->vlen / 8,
ctx->cfg_ptr->vlen / 8, data,
ctx->cfg_ptr->vlenb,
ctx->cfg_ptr->vlenb, data,
gen_helper_vfwmaccbf16_vv);
mark_vs_dirty(ctx);
gen_set_label(over);

View File

@ -217,7 +217,7 @@ static bool trans_vsetivli(DisasContext *s, arg_vsetivli *a)
/* vector register offset from env */
static uint32_t vreg_ofs(DisasContext *s, int reg)
{
return offsetof(CPURISCVState, vreg) + reg * s->cfg_ptr->vlen / 8;
return offsetof(CPURISCVState, vreg) + reg * s->cfg_ptr->vlenb;
}
/* check functions */
@ -627,11 +627,11 @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
* As simd_desc supports at most 2048 bytes, and in this implementation,
* the max vector group length is 4096 bytes. So split it into two parts.
*
* The first part is vlen in bytes, encoded in maxsz of simd_desc.
* The first part is vlen in bytes (vlenb), encoded in maxsz of simd_desc.
* The second part is lmul, encoded in data of simd_desc.
*/
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data));
tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
@ -791,8 +791,8 @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
mask = tcg_temp_new_ptr();
base = get_gpr(s, rs1, EXT_NONE);
stride = get_gpr(s, rs2, EXT_NONE);
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data));
tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
@ -897,8 +897,8 @@ static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
mask = tcg_temp_new_ptr();
index = tcg_temp_new_ptr();
base = get_gpr(s, rs1, EXT_NONE);
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data));
tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
tcg_gen_addi_ptr(index, tcg_env, vreg_ofs(s, vs2));
@ -1036,8 +1036,8 @@ static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data,
dest = tcg_temp_new_ptr();
mask = tcg_temp_new_ptr();
base = get_gpr(s, rs1, EXT_NONE);
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data));
tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
@ -1086,7 +1086,7 @@ static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf,
uint32_t width, gen_helper_ldst_whole *fn,
DisasContext *s, bool is_store)
{
uint32_t evl = (s->cfg_ptr->vlen / 8) * nf / width;
uint32_t evl = s->cfg_ptr->vlenb * nf / width;
TCGLabel *over = gen_new_label();
tcg_gen_brcondi_tl(TCG_COND_GEU, cpu_vstart, evl, over);
@ -1096,8 +1096,8 @@ static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf,
uint32_t data = FIELD_DP32(0, VDATA, NF, nf);
dest = tcg_temp_new_ptr();
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data));
base = get_gpr(s, rs1, EXT_NONE);
tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
@ -1160,12 +1160,12 @@ GEN_LDST_WHOLE_TRANS(vs8r_v, 8, 1, true)
/*
* MAXSZ returns the maximum vector size can be operated in bytes,
* which is used in GVEC IR when vl_eq_vlmax flag is set to true
* to accerlate vector operation.
* to accelerate vector operation.
*/
static inline uint32_t MAXSZ(DisasContext *s)
{
int scale = s->lmul - 3;
return s->cfg_ptr->vlen >> -scale;
int max_sz = s->cfg_ptr->vlenb * 8;
return max_sz >> (3 - s->lmul);
}
static bool opivv_check(DisasContext *s, arg_rmrr *a)
@ -1199,8 +1199,8 @@ do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn,
data = FIELD_DP32(data, VDATA, VMA, s->vma);
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
tcg_env, s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data, fn);
tcg_env, s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data, fn);
}
mark_vs_dirty(s);
gen_set_label(over);
@ -1248,8 +1248,8 @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
data = FIELD_DP32(data, VDATA, VTA, s->vta);
data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
data = FIELD_DP32(data, VDATA, VMA, s->vma);
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data));
tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, vs2));
@ -1410,8 +1410,8 @@ static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
data = FIELD_DP32(data, VDATA, VTA, s->vta);
data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
data = FIELD_DP32(data, VDATA, VMA, s->vma);
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data));
tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, vs2));
@ -1492,8 +1492,8 @@ static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
vreg_ofs(s, a->rs1),
vreg_ofs(s, a->rs2),
tcg_env, s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8,
tcg_env, s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb,
data, fn);
mark_vs_dirty(s);
gen_set_label(over);
@ -1568,8 +1568,8 @@ static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a,
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
vreg_ofs(s, a->rs1),
vreg_ofs(s, a->rs2),
tcg_env, s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data, fn);
tcg_env, s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data, fn);
mark_vs_dirty(s);
gen_set_label(over);
return true;
@ -1639,8 +1639,8 @@ static bool opivv_trans(uint32_t vd, uint32_t vs1, uint32_t vs2, uint32_t vm,
data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
data = FIELD_DP32(data, VDATA, VMA, s->vma);
tcg_gen_gvec_4_ptr(vreg_ofs(s, vd), vreg_ofs(s, 0), vreg_ofs(s, vs1),
vreg_ofs(s, vs2), tcg_env, s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data, fn);
vreg_ofs(s, vs2), tcg_env, s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data, fn);
mark_vs_dirty(s);
gen_set_label(over);
return true;
@ -1831,8 +1831,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs1), \
vreg_ofs(s, a->rs2), tcg_env, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, \
s->cfg_ptr->vlenb, \
s->cfg_ptr->vlenb, data, \
fns[s->sew]); \
mark_vs_dirty(s); \
gen_set_label(over); \
@ -2036,8 +2036,8 @@ static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
tcg_env, s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data,
tcg_env, s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data,
fns[s->sew]);
gen_set_label(over);
}
@ -2082,8 +2082,8 @@ static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
};
tcg_gen_ext_tl_i64(s1_i64, s1);
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data));
tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd));
fns[s->sew](dest, s1_i64, tcg_env, desc);
}
@ -2121,8 +2121,8 @@ static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a)
s1 = tcg_constant_i64(simm);
dest = tcg_temp_new_ptr();
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data));
tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd));
fns[s->sew](dest, s1, tcg_env, desc);
@ -2275,8 +2275,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs1), \
vreg_ofs(s, a->rs2), tcg_env, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, \
s->cfg_ptr->vlenb, \
s->cfg_ptr->vlenb, data, \
fns[s->sew - 1]); \
mark_vs_dirty(s); \
gen_set_label(over); \
@ -2303,8 +2303,8 @@ static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
dest = tcg_temp_new_ptr();
mask = tcg_temp_new_ptr();
src2 = tcg_temp_new_ptr();
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data));
tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, vs2));
@ -2391,8 +2391,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs1), \
vreg_ofs(s, a->rs2), tcg_env, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, \
s->cfg_ptr->vlenb, \
s->cfg_ptr->vlenb, data, \
fns[s->sew - 1]); \
mark_vs_dirty(s); \
gen_set_label(over); \
@ -2465,8 +2465,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs1), \
vreg_ofs(s, a->rs2), tcg_env, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, \
s->cfg_ptr->vlenb, \
s->cfg_ptr->vlenb, data, \
fns[s->sew - 1]); \
mark_vs_dirty(s); \
gen_set_label(over); \
@ -2581,8 +2581,8 @@ static bool do_opfv(DisasContext *s, arg_rmr *a,
data = FIELD_DP32(data, VDATA, VMA, s->vma);
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
vreg_ofs(s, a->rs2), tcg_env,
s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data, fn);
s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data, fn);
mark_vs_dirty(s);
gen_set_label(over);
return true;
@ -2691,8 +2691,8 @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
do_nanbox(s, t1, cpu_fpr[a->rs1]);
dest = tcg_temp_new_ptr();
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data));
tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd));
fns[s->sew - 1](dest, t1, tcg_env, desc);
@ -2770,8 +2770,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
data = FIELD_DP32(data, VDATA, VMA, s->vma); \
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs2), tcg_env, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, \
s->cfg_ptr->vlenb, \
s->cfg_ptr->vlenb, data, \
fns[s->sew - 1]); \
mark_vs_dirty(s); \
gen_set_label(over); \
@ -2821,8 +2821,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
data = FIELD_DP32(data, VDATA, VMA, s->vma); \
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs2), tcg_env, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, \
s->cfg_ptr->vlenb, \
s->cfg_ptr->vlenb, data, \
fns[s->sew]); \
mark_vs_dirty(s); \
gen_set_label(over); \
@ -2888,8 +2888,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
data = FIELD_DP32(data, VDATA, VMA, s->vma); \
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs2), tcg_env, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, \
s->cfg_ptr->vlenb, \
s->cfg_ptr->vlenb, data, \
fns[s->sew - 1]); \
mark_vs_dirty(s); \
gen_set_label(over); \
@ -2937,8 +2937,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
data = FIELD_DP32(data, VDATA, VMA, s->vma); \
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs2), tcg_env, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, \
s->cfg_ptr->vlenb, \
s->cfg_ptr->vlenb, data, \
fns[s->sew]); \
mark_vs_dirty(s); \
gen_set_label(over); \
@ -3027,8 +3027,8 @@ static bool trans_##NAME(DisasContext *s, arg_r *a) \
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs1), \
vreg_ofs(s, a->rs2), tcg_env, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, fn); \
s->cfg_ptr->vlenb, \
s->cfg_ptr->vlenb, data, fn); \
mark_vs_dirty(s); \
gen_set_label(over); \
return true; \
@ -3061,8 +3061,8 @@ static bool trans_vcpop_m(DisasContext *s, arg_rmr *a)
mask = tcg_temp_new_ptr();
src2 = tcg_temp_new_ptr();
dst = dest_gpr(s, a->rd);
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data));
tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, a->rs2));
tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
@ -3090,8 +3090,8 @@ static bool trans_vfirst_m(DisasContext *s, arg_rmr *a)
mask = tcg_temp_new_ptr();
src2 = tcg_temp_new_ptr();
dst = dest_gpr(s, a->rd);
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data));
tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, a->rs2));
tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
@ -3128,8 +3128,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
data = FIELD_DP32(data, VDATA, VMA, s->vma); \
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), \
vreg_ofs(s, 0), vreg_ofs(s, a->rs2), \
tcg_env, s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, \
tcg_env, s->cfg_ptr->vlenb, \
s->cfg_ptr->vlenb, \
data, fn); \
mark_vs_dirty(s); \
gen_set_label(over); \
@ -3171,8 +3171,8 @@ static bool trans_viota_m(DisasContext *s, arg_viota_m *a)
};
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
vreg_ofs(s, a->rs2), tcg_env,
s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data, fns[s->sew]);
s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data, fns[s->sew]);
mark_vs_dirty(s);
gen_set_label(over);
return true;
@ -3200,8 +3200,8 @@ static bool trans_vid_v(DisasContext *s, arg_vid_v *a)
gen_helper_vid_v_w, gen_helper_vid_v_d,
};
tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
tcg_env, s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8,
tcg_env, s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb,
data, fns[s->sew]);
mark_vs_dirty(s);
gen_set_label(over);
@ -3535,8 +3535,7 @@ static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a)
}
if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
int scale = s->lmul - (s->sew + 3);
int vlmax = s->cfg_ptr->vlen >> -scale;
int vlmax = vext_get_vlmax(s->cfg_ptr->vlenb, s->sew, s->lmul);
TCGv_i64 dest = tcg_temp_new_i64();
if (a->rs1 == 0) {
@ -3566,8 +3565,7 @@ static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a)
}
if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
int scale = s->lmul - (s->sew + 3);
int vlmax = s->cfg_ptr->vlen >> -scale;
int vlmax = vext_get_vlmax(s->cfg_ptr->vlenb, s->sew, s->lmul);
if (a->rs1 >= vlmax) {
tcg_gen_gvec_dup_imm(MO_64, vreg_ofs(s, a->rd),
MAXSZ(s), MAXSZ(s), 0);
@ -3620,8 +3618,8 @@ static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
data = FIELD_DP32(data, VDATA, VTA, s->vta);
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
tcg_env, s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data,
tcg_env, s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data,
fns[s->sew]);
mark_vs_dirty(s);
gen_set_label(over);
@ -3641,7 +3639,7 @@ static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
vext_check_isa_ill(s) && \
QEMU_IS_ALIGNED(a->rd, LEN) && \
QEMU_IS_ALIGNED(a->rs2, LEN)) { \
uint32_t maxsz = (s->cfg_ptr->vlen >> 3) * LEN; \
uint32_t maxsz = s->cfg_ptr->vlenb * LEN; \
if (s->vstart_eq_zero) { \
tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd), \
vreg_ofs(s, a->rs2), maxsz, maxsz); \
@ -3723,8 +3721,8 @@ static bool int_ext_op(DisasContext *s, arg_rmr *a, uint8_t seq)
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
vreg_ofs(s, a->rs2), tcg_env,
s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data, fn);
s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data, fn);
mark_vs_dirty(s);
gen_set_label(over);

View File

@ -174,7 +174,7 @@ GEN_OPIVX_GVEC_TRANS_CHECK(vandn_vx, andcs, zvkb_vx_check)
data = FIELD_DP32(data, VDATA, VMA, s->vma); \
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs2), tcg_env, \
s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlenb, s->cfg_ptr->vlenb, \
data, fns[s->sew]); \
mark_vs_dirty(s); \
gen_set_label(over); \
@ -267,7 +267,7 @@ GEN_OPIVI_WIDEN_TRANS(vwsll_vi, IMM_ZX, vwsll_vx, vwsll_vx_check)
rd_v = tcg_temp_new_ptr(); \
rs2_v = tcg_temp_new_ptr(); \
desc = tcg_constant_i32( \
simd_desc(s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, data)); \
simd_desc(s->cfg_ptr->vlenb, s->cfg_ptr->vlenb, data)); \
tcg_gen_addi_ptr(rd_v, tcg_env, vreg_ofs(s, a->rd)); \
tcg_gen_addi_ptr(rs2_v, tcg_env, vreg_ofs(s, a->rs2)); \
gen_helper_##NAME(rd_v, rs2_v, tcg_env, desc); \
@ -345,7 +345,7 @@ GEN_V_UNMASKED_TRANS(vaesem_vs, vaes_check_vs, ZVKNED_EGS)
rs2_v = tcg_temp_new_ptr(); \
uimm_v = tcg_constant_i32(a->rs1); \
desc = tcg_constant_i32( \
simd_desc(s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, data)); \
simd_desc(s->cfg_ptr->vlenb, s->cfg_ptr->vlenb, data)); \
tcg_gen_addi_ptr(rd_v, tcg_env, vreg_ofs(s, a->rd)); \
tcg_gen_addi_ptr(rs2_v, tcg_env, vreg_ofs(s, a->rs2)); \
gen_helper_##NAME(rd_v, rs2_v, uimm_v, tcg_env, desc); \
@ -413,7 +413,7 @@ GEN_VI_UNMASKED_TRANS(vaeskf2_vi, vaeskf2_check, ZVKNED_EGS)
\
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1), \
vreg_ofs(s, a->rs2), tcg_env, \
s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlenb, s->cfg_ptr->vlenb, \
data, gen_helper_##NAME); \
\
mark_vs_dirty(s); \
@ -466,8 +466,8 @@ static bool trans_vsha2cl_vv(DisasContext *s, arg_rmrr *a)
data = FIELD_DP32(data, VDATA, VMA, s->vma);
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
vreg_ofs(s, a->rs2), tcg_env, s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data,
vreg_ofs(s, a->rs2), tcg_env, s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data,
s->sew == MO_32 ?
gen_helper_vsha2cl32_vv : gen_helper_vsha2cl64_vv);
@ -500,8 +500,8 @@ static bool trans_vsha2ch_vv(DisasContext *s, arg_rmrr *a)
data = FIELD_DP32(data, VDATA, VMA, s->vma);
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
vreg_ofs(s, a->rs2), tcg_env, s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data,
vreg_ofs(s, a->rs2), tcg_env, s->cfg_ptr->vlenb,
s->cfg_ptr->vlenb, data,
s->sew == MO_32 ?
gen_helper_vsha2ch32_vv : gen_helper_vsha2ch64_vv);

View File

@ -992,7 +992,6 @@ static bool trans_th_sfence_vmas(DisasContext *ctx, arg_th_sfence_vmas *a)
#endif
}
#ifndef CONFIG_USER_ONLY
static void gen_th_sync_local(DisasContext *ctx)
{
/*
@ -1003,14 +1002,12 @@ static void gen_th_sync_local(DisasContext *ctx)
tcg_gen_exit_tb(NULL, 0);
ctx->base.is_jmp = DISAS_NORETURN;
}
#endif
static bool trans_th_sync(DisasContext *ctx, arg_th_sync *a)
{
(void) a;
REQUIRE_XTHEADSYNC(ctx);
#ifndef CONFIG_USER_ONLY
REQUIRE_PRIV_MSU(ctx);
/*
@ -1019,9 +1016,6 @@ static bool trans_th_sync(DisasContext *ctx, arg_th_sync *a)
gen_th_sync_local(ctx);
return true;
#else
return false;
#endif
}
static bool trans_th_sync_i(DisasContext *ctx, arg_th_sync_i *a)
@ -1029,7 +1023,6 @@ static bool trans_th_sync_i(DisasContext *ctx, arg_th_sync_i *a)
(void) a;
REQUIRE_XTHEADSYNC(ctx);
#ifndef CONFIG_USER_ONLY
REQUIRE_PRIV_MSU(ctx);
/*
@ -1038,9 +1031,6 @@ static bool trans_th_sync_i(DisasContext *ctx, arg_th_sync_i *a)
gen_th_sync_local(ctx);
return true;
#else
return false;
#endif
}
static bool trans_th_sync_is(DisasContext *ctx, arg_th_sync_is *a)

View File

@ -86,6 +86,27 @@ static uint64_t kvm_riscv_reg_id_u64(uint64_t type, uint64_t idx)
return KVM_REG_RISCV | KVM_REG_SIZE_U64 | type | idx;
}
static uint64_t kvm_encode_reg_size_id(uint64_t id, size_t size_b)
{
uint64_t size_ctz = __builtin_ctz(size_b);
return id | (size_ctz << KVM_REG_SIZE_SHIFT);
}
static uint64_t kvm_riscv_vector_reg_id(RISCVCPU *cpu,
uint64_t idx)
{
uint64_t id;
size_t size_b;
g_assert(idx < 32);
id = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(idx);
size_b = cpu->cfg.vlenb;
return kvm_encode_reg_size_id(id, size_b);
}
#define RISCV_CORE_REG(env, name) \
kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, \
KVM_REG_RISCV_CORE_REG(name))
@ -145,7 +166,7 @@ typedef struct KVMCPUConfig {
const char *name;
const char *description;
target_ulong offset;
int kvm_reg_id;
uint64_t kvm_reg_id;
bool user_set;
bool supported;
} KVMCPUConfig;
@ -352,29 +373,12 @@ static KVMCPUConfig kvm_cboz_blocksize = {
.kvm_reg_id = KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)
};
static void kvm_cpu_set_cbomz_blksize(Object *obj, Visitor *v,
const char *name,
void *opaque, Error **errp)
{
KVMCPUConfig *cbomz_cfg = opaque;
RISCVCPU *cpu = RISCV_CPU(obj);
uint16_t value, *host_val;
if (!visit_type_uint16(v, name, &value, errp)) {
return;
}
host_val = kvmconfig_get_cfg_addr(cpu, cbomz_cfg);
if (value != *host_val) {
error_report("Unable to set %s to a different value than "
"the host (%u)",
cbomz_cfg->name, *host_val);
exit(EXIT_FAILURE);
}
cbomz_cfg->user_set = true;
}
static KVMCPUConfig kvm_v_vlenb = {
.name = "vlenb",
.offset = CPU_CFG_OFFSET(vlenb),
.kvm_reg_id = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_VECTOR |
KVM_REG_RISCV_VECTOR_CSR_REG(vlenb)
};
static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs)
{
@ -493,14 +497,6 @@ static void kvm_riscv_add_cpu_user_properties(Object *cpu_obj)
NULL, multi_cfg);
}
object_property_add(cpu_obj, "cbom_blocksize", "uint16",
NULL, kvm_cpu_set_cbomz_blksize,
NULL, &kvm_cbom_blocksize);
object_property_add(cpu_obj, "cboz_blocksize", "uint16",
NULL, kvm_cpu_set_cbomz_blksize,
NULL, &kvm_cboz_blocksize);
riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_extensions);
riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_vendor_exts);
riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_experimental_exts);
@ -716,9 +712,11 @@ static void kvm_riscv_put_regs_timer(CPUState *cs)
static int kvm_riscv_get_regs_vector(CPUState *cs)
{
CPURISCVState *env = &RISCV_CPU(cs)->env;
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
target_ulong reg;
int ret = 0;
uint64_t vreg_id;
int vreg_idx, ret = 0;
if (!riscv_has_ext(env, RVV)) {
return 0;
@ -742,14 +740,39 @@ static int kvm_riscv_get_regs_vector(CPUState *cs)
}
env->vtype = reg;
if (kvm_v_vlenb.supported) {
ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vlenb), &reg);
if (ret) {
return ret;
}
cpu->cfg.vlenb = reg;
for (int i = 0; i < 32; i++) {
/*
* vreg[] is statically allocated using RV_VLEN_MAX.
* Use it instead of vlenb to calculate vreg_idx for
* simplicity.
*/
vreg_idx = i * RV_VLEN_MAX / 64;
vreg_id = kvm_riscv_vector_reg_id(cpu, i);
ret = kvm_get_one_reg(cs, vreg_id, &env->vreg[vreg_idx]);
if (ret) {
return ret;
}
}
}
return 0;
}
static int kvm_riscv_put_regs_vector(CPUState *cs)
{
CPURISCVState *env = &RISCV_CPU(cs)->env;
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
target_ulong reg;
int ret = 0;
uint64_t vreg_id;
int vreg_idx, ret = 0;
if (!riscv_has_ext(env, RVV)) {
return 0;
@ -769,6 +792,29 @@ static int kvm_riscv_put_regs_vector(CPUState *cs)
reg = env->vtype;
ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), &reg);
if (ret) {
return ret;
}
if (kvm_v_vlenb.supported) {
reg = cpu->cfg.vlenb;
ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vlenb), &reg);
for (int i = 0; i < 32; i++) {
/*
* vreg[] is statically allocated using RV_VLEN_MAX.
* Use it instead of vlenb to calculate vreg_idx for
* simplicity.
*/
vreg_idx = i * RV_VLEN_MAX / 64;
vreg_id = kvm_riscv_vector_reg_id(cpu, i);
ret = kvm_set_one_reg(cs, vreg_id, &env->vreg[vreg_idx]);
if (ret) {
return ret;
}
}
}
return ret;
}
@ -953,6 +999,33 @@ static int uint64_cmp(const void *a, const void *b)
return 0;
}
static void kvm_riscv_read_vlenb(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
struct kvm_reg_list *reglist)
{
struct kvm_one_reg reg;
struct kvm_reg_list *reg_search;
uint64_t val;
int ret;
reg_search = bsearch(&kvm_v_vlenb.kvm_reg_id, reglist->reg, reglist->n,
sizeof(uint64_t), uint64_cmp);
if (reg_search) {
reg.id = kvm_v_vlenb.kvm_reg_id;
reg.addr = (uint64_t)&val;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
if (ret != 0) {
error_report("Unable to read vlenb register, error code: %s",
strerrorname_np(errno));
exit(EXIT_FAILURE);
}
kvm_v_vlenb.supported = true;
cpu->cfg.vlenb = val;
}
}
static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
{
KVMCPUConfig *multi_ext_cfg;
@ -1027,6 +1100,10 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
if (cpu->cfg.ext_zicboz) {
kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cboz_blocksize);
}
if (riscv_has_ext(&cpu->env, RVV)) {
kvm_riscv_read_vlenb(cpu, kvmcpu, reglist);
}
}
static void riscv_init_kvm_registers(Object *cpu_obj)
@ -1559,19 +1636,10 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
static void kvm_cpu_instance_init(CPUState *cs)
{
Object *obj = OBJECT(RISCV_CPU(cs));
DeviceState *dev = DEVICE(obj);
riscv_init_kvm_registers(obj);
kvm_riscv_add_cpu_user_properties(obj);
for (Property *prop = riscv_cpu_options; prop && prop->name; prop++) {
/* Check if we have a specific KVM handler for the option */
if (object_property_find(obj, prop->name)) {
continue;
}
qdev_property_add_static(dev, prop);
}
}
/*
@ -1598,6 +1666,88 @@ static bool kvm_cpu_realize(CPUState *cs, Error **errp)
return true;
}
void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
{
CPURISCVState *env = &cpu->env;
KVMScratchCPU kvmcpu;
struct kvm_one_reg reg;
uint64_t val;
int ret;
/* short-circuit without spinning the scratch CPU */
if (!cpu->cfg.ext_zicbom && !cpu->cfg.ext_zicboz &&
!riscv_has_ext(env, RVV)) {
return;
}
if (!kvm_riscv_create_scratch_vcpu(&kvmcpu)) {
error_setg(errp, "Unable to create scratch KVM cpu");
return;
}
if (cpu->cfg.ext_zicbom &&
riscv_cpu_option_set(kvm_cbom_blocksize.name)) {
reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG,
kvm_cbom_blocksize.kvm_reg_id);
reg.addr = (uint64_t)&val;
ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, &reg);
if (ret != 0) {
error_setg(errp, "Unable to read cbom_blocksize, error %d", errno);
return;
}
if (cpu->cfg.cbom_blocksize != val) {
error_setg(errp, "Unable to set cbom_blocksize to a different "
"value than the host (%lu)", val);
return;
}
}
if (cpu->cfg.ext_zicboz &&
riscv_cpu_option_set(kvm_cboz_blocksize.name)) {
reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG,
kvm_cboz_blocksize.kvm_reg_id);
reg.addr = (uint64_t)&val;
ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, &reg);
if (ret != 0) {
error_setg(errp, "Unable to read cboz_blocksize, error %d", errno);
return;
}
if (cpu->cfg.cboz_blocksize != val) {
error_setg(errp, "Unable to set cboz_blocksize to a different "
"value than the host (%lu)", val);
return;
}
}
/* Users are setting vlen, not vlenb */
if (riscv_has_ext(env, RVV) && riscv_cpu_option_set("vlen")) {
if (!kvm_v_vlenb.supported) {
error_setg(errp, "Unable to set 'vlenb': register not supported");
return;
}
reg.id = kvm_v_vlenb.kvm_reg_id;
reg.addr = (uint64_t)&val;
ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, &reg);
if (ret != 0) {
error_setg(errp, "Unable to read vlenb register, error %d", errno);
return;
}
if (cpu->cfg.vlenb != val) {
error_setg(errp, "Unable to set 'vlen' to a different "
"value than the host (%lu)", val * 8);
return;
}
}
kvm_riscv_destroy_scratch_vcpu(&kvmcpu);
}
static void kvm_cpu_accel_class_init(ObjectClass *oc, void *data)
{
AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
@ -1619,14 +1769,14 @@ static void kvm_cpu_accel_register_types(void)
}
type_init(kvm_cpu_accel_register_types);
static void riscv_host_cpu_init(Object *obj)
static void riscv_host_cpu_class_init(ObjectClass *c, void *data)
{
CPURISCVState *env = &RISCV_CPU(obj)->env;
RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
#if defined(TARGET_RISCV32)
env->misa_mxl_max = env->misa_mxl = MXL_RV32;
mcc->misa_mxl_max = MXL_RV32;
#elif defined(TARGET_RISCV64)
env->misa_mxl_max = env->misa_mxl = MXL_RV64;
mcc->misa_mxl_max = MXL_RV64;
#endif
}
@ -1634,7 +1784,7 @@ static const TypeInfo riscv_kvm_cpu_type_infos[] = {
{
.name = TYPE_RISCV_CPU_HOST,
.parent = TYPE_RISCV_CPU,
.instance_init = riscv_host_cpu_init,
.class_init = riscv_host_cpu_class_init,
}
};

View File

@ -27,5 +27,6 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
uint64_t guest_num);
void riscv_kvm_aplic_request(void *opaque, int irq, int level);
int kvm_riscv_sync_mpstate_to_kvm(RISCVCPU *cpu, int state);
void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp);
#endif

View File

@ -178,10 +178,9 @@ static const VMStateDescription vmstate_pointermasking = {
static bool rv128_needed(void *opaque)
{
RISCVCPU *cpu = opaque;
CPURISCVState *env = &cpu->env;
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(opaque);
return env->misa_mxl_max == MXL_RV128;
return mcc->misa_mxl_max == MXL_RV128;
}
static const VMStateDescription vmstate_rv128 = {
@ -372,7 +371,7 @@ const VMStateDescription vmstate_riscv_cpu = {
VMSTATE_UINTTL(env.vext_ver, RISCVCPU),
VMSTATE_UINT32(env.misa_mxl, RISCVCPU),
VMSTATE_UINT32(env.misa_ext, RISCVCPU),
VMSTATE_UINT32(env.misa_mxl_max, RISCVCPU),
VMSTATE_UNUSED(4),
VMSTATE_UINT32(env.misa_ext_mask, RISCVCPU),
VMSTATE_UINTTL(env.priv, RISCVCPU),
VMSTATE_BOOL(env.virt_enabled, RISCVCPU),

View File

@ -268,97 +268,24 @@ static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp)
}
}
static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp)
{
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
CPUClass *cc = CPU_CLASS(mcc);
CPURISCVState *env = &cpu->env;
/* Validate that MISA_MXL is set properly. */
switch (env->misa_mxl_max) {
#ifdef TARGET_RISCV64
case MXL_RV64:
case MXL_RV128:
cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
break;
#endif
case MXL_RV32:
cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
break;
default:
g_assert_not_reached();
}
if (env->misa_mxl_max != env->misa_mxl) {
error_setg(errp, "misa_mxl_max must be equal to misa_mxl");
return;
}
}
static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp)
{
CPURISCVState *env = &cpu->env;
int priv_version = -1;
if (cpu->cfg.priv_spec) {
if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) {
priv_version = PRIV_VERSION_1_12_0;
} else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) {
priv_version = PRIV_VERSION_1_11_0;
} else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) {
priv_version = PRIV_VERSION_1_10_0;
} else {
error_setg(errp,
"Unsupported privilege spec version '%s'",
cpu->cfg.priv_spec);
return;
}
env->priv_ver = priv_version;
}
}
static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg,
Error **errp)
{
if (!is_power_of_2(cfg->vlen)) {
error_setg(errp, "Vector extension VLEN must be power of 2");
return;
}
uint32_t vlen = cfg->vlenb << 3;
if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) {
if (vlen > RV_VLEN_MAX || vlen < 128) {
error_setg(errp,
"Vector extension implementation only supports VLEN "
"in the range [128, %d]", RV_VLEN_MAX);
return;
}
if (!is_power_of_2(cfg->elen)) {
error_setg(errp, "Vector extension ELEN must be power of 2");
return;
}
if (cfg->elen > 64 || cfg->elen < 8) {
error_setg(errp,
"Vector extension implementation only supports ELEN "
"in the range [8, 64]");
return;
}
if (cfg->vext_spec) {
if (!g_strcmp0(cfg->vext_spec, "v1.0")) {
env->vext_ver = VEXT_VERSION_1_00_0;
} else {
error_setg(errp, "Unsupported vector spec version '%s'",
cfg->vext_spec);
return;
}
} else if (env->vext_ver == 0) {
qemu_log("vector version is not specified, "
"use the default value v1.0\n");
env->vext_ver = VEXT_VERSION_1_00_0;
}
}
static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu)
@ -442,12 +369,42 @@ static void riscv_cpu_validate_g(RISCVCPU *cpu)
}
}
static void riscv_cpu_validate_b(RISCVCPU *cpu)
{
const char *warn_msg = "RVB mandates disabled extension %s";
if (!cpu->cfg.ext_zba) {
if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zba))) {
cpu->cfg.ext_zba = true;
} else {
warn_report(warn_msg, "zba");
}
}
if (!cpu->cfg.ext_zbb) {
if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbb))) {
cpu->cfg.ext_zbb = true;
} else {
warn_report(warn_msg, "zbb");
}
}
if (!cpu->cfg.ext_zbs) {
if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbs))) {
cpu->cfg.ext_zbs = true;
} else {
warn_report(warn_msg, "zbs");
}
}
}
/*
* Check consistency between chosen extensions while setting
* cpu->cfg accordingly.
*/
void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
{
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
CPURISCVState *env = &cpu->env;
Error *local_err = NULL;
@ -455,6 +412,10 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
riscv_cpu_validate_g(cpu);
}
if (riscv_has_ext(env, RVB)) {
riscv_cpu_validate_b(cpu);
}
if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) {
error_setg(errp,
"I and E extensions are incompatible");
@ -610,7 +571,7 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true);
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true);
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true);
if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) {
if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) {
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true);
}
}
@ -618,7 +579,7 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
/* zca, zcd and zcf has a PRIV 1.12.0 restriction */
if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) {
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true);
if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) {
if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) {
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true);
}
if (riscv_has_ext(env, RVD)) {
@ -626,7 +587,7 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
}
}
if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) {
if (mcc->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) {
error_setg(errp, "Zcf extension is only relevant to RV32");
return;
}
@ -876,12 +837,6 @@ void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
CPURISCVState *env = &cpu->env;
Error *local_err = NULL;
riscv_cpu_validate_priv_spec(cpu, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
return;
}
riscv_cpu_validate_misa_priv(env, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
@ -917,11 +872,6 @@ static bool riscv_cpu_is_generic(Object *cpu_obj)
return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
}
static bool riscv_cpu_is_vendor(Object *cpu_obj)
{
return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL;
}
/*
* We'll get here via the following path:
*
@ -932,7 +882,6 @@ static bool riscv_cpu_is_vendor(Object *cpu_obj)
static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp)
{
RISCVCPU *cpu = RISCV_CPU(cs);
Error *local_err = NULL;
if (!riscv_cpu_tcg_compatible(cpu)) {
g_autofree char *name = riscv_cpu_get_name(cpu);
@ -941,14 +890,9 @@ static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp)
return false;
}
riscv_cpu_validate_misa_mxl(cpu, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
return false;
}
#ifndef CONFIG_USER_ONLY
CPURISCVState *env = &cpu->env;
Error *local_err = NULL;
CPU(cs)->tcg_cflags |= CF_PCREL;
@ -1056,6 +1000,7 @@ static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = {
MISA_CFG(RVJ, false),
MISA_CFG(RVV, false),
MISA_CFG(RVG, false),
MISA_CFG(RVB, false),
};
/*
@ -1326,10 +1271,6 @@ static void riscv_cpu_add_user_properties(Object *obj)
riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts);
riscv_cpu_add_profiles(obj);
for (Property *prop = riscv_cpu_options; prop && prop->name; prop++) {
qdev_property_add_static(DEVICE(obj), prop);
}
}
/*
@ -1343,7 +1284,7 @@ static void riscv_init_max_cpu_extensions(Object *obj)
const RISCVCPUMultiExtConfig *prop;
/* Enable RVG, RVJ and RVV that are disabled by default */
riscv_cpu_set_misa(env, env->misa_mxl, env->misa_ext | RVG | RVJ | RVV);
riscv_cpu_set_misa_ext(env, env->misa_ext | RVG | RVJ | RVV);
for (prop = riscv_cpu_extensions; prop && prop->name; prop++) {
isa_ext_update_enabled(cpu, prop->offset, true);

View File

@ -1168,6 +1168,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
CPURISCVState *env = cpu_env(cs);
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cs);
RISCVCPU *cpu = RISCV_CPU(cs);
uint32_t tb_flags = ctx->base.tb->flags;
@ -1189,7 +1190,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->cfg_vta_all_1s = cpu->cfg.rvv_ta_all_1s;
ctx->vstart_eq_zero = FIELD_EX32(tb_flags, TB_FLAGS, VSTART_EQ_ZERO);
ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
ctx->misa_mxl_max = env->misa_mxl_max;
ctx->misa_mxl_max = mcc->misa_mxl_max;
ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL);
ctx->address_xl = FIELD_EX32(tb_flags, TB_FLAGS, AXL);
ctx->cs = cs;

View File

@ -35,19 +35,28 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1,
{
int vlmax, vl;
RISCVCPU *cpu = env_archcpu(env);
uint64_t lmul = FIELD_EX64(s2, VTYPE, VLMUL);
uint16_t sew = 8 << FIELD_EX64(s2, VTYPE, VSEW);
uint64_t vlmul = FIELD_EX64(s2, VTYPE, VLMUL);
uint8_t vsew = FIELD_EX64(s2, VTYPE, VSEW);
uint16_t sew = 8 << vsew;
uint8_t ediv = FIELD_EX64(s2, VTYPE, VEDIV);
int xlen = riscv_cpu_xlen(env);
bool vill = (s2 >> (xlen - 1)) & 0x1;
target_ulong reserved = s2 &
MAKE_64BIT_MASK(R_VTYPE_RESERVED_SHIFT,
xlen - 1 - R_VTYPE_RESERVED_SHIFT);
int8_t lmul;
if (lmul & 4) {
/* Fractional LMUL - check LMUL * VLEN >= SEW */
if (lmul == 4 ||
cpu->cfg.vlen >> (8 - lmul) < sew) {
if (vlmul & 4) {
/*
* Fractional LMUL, check:
*
* VLEN * LMUL >= SEW
* VLEN >> (8 - lmul) >= sew
* (vlenb << 3) >> (8 - lmul) >= sew
* vlenb >> (8 - 3 - lmul) >= sew
*/
if (vlmul == 4 ||
cpu->cfg.vlenb >> (8 - 3 - vlmul) < sew) {
vill = true;
}
}
@ -61,7 +70,9 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1,
return 0;
}
vlmax = vext_get_vlmax(cpu, s2);
/* lmul encoded as in DisasContext::lmul */
lmul = sextract32(FIELD_EX64(s2, VTYPE, VLMUL), 0, 3);
vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul);
if (s1 <= vlmax) {
vl = s1;
} else {
@ -559,7 +570,7 @@ vext_ldst_whole(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
{
uint32_t i, k, off, pos;
uint32_t nf = vext_nf(desc);
uint32_t vlenb = riscv_cpu_cfg(env)->vlen >> 3;
uint32_t vlenb = riscv_cpu_cfg(env)->vlenb;
uint32_t max_elems = vlenb >> log2_esz;
k = env->vstart / max_elems;
@ -930,7 +941,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
{ \
uint32_t vl = env->vl; \
uint32_t vm = vext_vm(desc); \
uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \
uint32_t total_elems = riscv_cpu_cfg(env)->vlenb << 3; \
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
uint32_t i; \
\
@ -968,7 +979,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
{ \
uint32_t vl = env->vl; \
uint32_t vm = vext_vm(desc); \
uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \
uint32_t total_elems = riscv_cpu_cfg(env)->vlenb << 3; \
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
uint32_t i; \
\
@ -1172,7 +1183,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
{ \
uint32_t vm = vext_vm(desc); \
uint32_t vl = env->vl; \
uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \
uint32_t total_elems = riscv_cpu_cfg(env)->vlenb << 3; \
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
@ -1237,7 +1248,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
{ \
uint32_t vm = vext_vm(desc); \
uint32_t vl = env->vl; \
uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \
uint32_t total_elems = riscv_cpu_cfg(env)->vlenb << 3; \
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
@ -3972,7 +3983,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
{ \
uint32_t vm = vext_vm(desc); \
uint32_t vl = env->vl; \
uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \
uint32_t total_elems = riscv_cpu_cfg(env)->vlenb << 3; \
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
@ -4012,7 +4023,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
{ \
uint32_t vm = vext_vm(desc); \
uint32_t vl = env->vl; \
uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \
uint32_t total_elems = riscv_cpu_cfg(env)->vlenb << 3; \
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
@ -4529,7 +4540,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
uint32_t desc) \
{ \
uint32_t vl = env->vl; \
uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \
uint32_t total_elems = riscv_cpu_cfg(env)->vlenb << 3;\
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
uint32_t i; \
int a, b; \
@ -4616,7 +4627,7 @@ static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env,
{
uint32_t vm = vext_vm(desc);
uint32_t vl = env->vl;
uint32_t total_elems = riscv_cpu_cfg(env)->vlen;
uint32_t total_elems = riscv_cpu_cfg(env)->vlenb << 3;
uint32_t vta_all_1s = vext_vta_all_1s(desc);
uint32_t vma = vext_vma(desc);
int i;