diff --git a/hw/riscv/Kconfig b/hw/riscv/Kconfig index a50717be87..5d644eb7b1 100644 --- a/hw/riscv/Kconfig +++ b/hw/riscv/Kconfig @@ -41,6 +41,7 @@ config RISCV_VIRT select RISCV_IMSIC select SIFIVE_PLIC select SIFIVE_TEST + select SMBIOS select VIRTIO_MMIO select FW_CFG_DMA select PLATFORM_BUS diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c index 0ffca05189..12f9792245 100644 --- a/hw/riscv/boot.c +++ b/hw/riscv/boot.c @@ -36,7 +36,8 @@ bool riscv_is_32bit(RISCVHartArrayState *harts) { - return harts->harts[0].env.misa_mxl_max == MXL_RV32; + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(&harts->harts[0]); + return mcc->misa_mxl_max == MXL_RV32; } /* diff --git a/hw/riscv/numa.c b/hw/riscv/numa.c index d319aefb45..cf686f4ff1 100644 --- a/hw/riscv/numa.c +++ b/hw/riscv/numa.c @@ -167,7 +167,8 @@ void riscv_socket_fdt_write_id(const MachineState *ms, const char *node_name, void riscv_socket_fdt_write_distance_matrix(const MachineState *ms) { int i, j, idx; - uint32_t *dist_matrix, dist_matrix_size; + g_autofree uint32_t *dist_matrix = NULL; + uint32_t dist_matrix_size; if (numa_enabled(ms) && ms->numa_state->have_numa_distance) { dist_matrix_size = riscv_socket_count(ms) * riscv_socket_count(ms); @@ -189,7 +190,6 @@ void riscv_socket_fdt_write_distance_matrix(const MachineState *ms) "numa-distance-map-v1"); qemu_fdt_setprop(ms->fdt, "/distance-map", "distance-matrix", dist_matrix, dist_matrix_size); - g_free(dist_matrix); } } diff --git a/hw/riscv/sifive_u.c b/hw/riscv/sifive_u.c index 5207ec1fa5..af5f923f54 100644 --- a/hw/riscv/sifive_u.c +++ b/hw/riscv/sifive_u.c @@ -171,7 +171,6 @@ static void create_fdt(SiFiveUState *s, const MemMapEntry *memmap, int cpu_phandle = phandle++; nodename = g_strdup_printf("/cpus/cpu@%d", cpu); char *intc = g_strdup_printf("/cpus/cpu@%d/interrupt-controller", cpu); - char *isa; qemu_fdt_add_subnode(fdt, nodename); /* cpu 0 is the management hart that does not have mmu */ if (cpu != 0) { @@ -180,11 +179,10 @@ static void create_fdt(SiFiveUState *s, const MemMapEntry *memmap, } else { qemu_fdt_setprop_string(fdt, nodename, "mmu-type", "riscv,sv48"); } - isa = riscv_isa_string(&s->soc.u_cpus.harts[cpu - 1]); + riscv_isa_write_fdt(&s->soc.u_cpus.harts[cpu - 1], fdt, nodename); } else { - isa = riscv_isa_string(&s->soc.e_cpus.harts[0]); + riscv_isa_write_fdt(&s->soc.e_cpus.harts[0], fdt, nodename); } - qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", isa); qemu_fdt_setprop_string(fdt, nodename, "compatible", "riscv"); qemu_fdt_setprop_string(fdt, nodename, "status", "okay"); qemu_fdt_setprop_cell(fdt, nodename, "reg", cpu); @@ -194,7 +192,6 @@ static void create_fdt(SiFiveUState *s, const MemMapEntry *memmap, qemu_fdt_setprop_string(fdt, intc, "compatible", "riscv,cpu-intc"); qemu_fdt_setprop(fdt, intc, "interrupt-controller", NULL, 0); qemu_fdt_setprop_cell(fdt, intc, "#interrupt-cells", 1); - g_free(isa); g_free(intc); g_free(nodename); } diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c index 81f7e53aed..64074395bc 100644 --- a/hw/riscv/spike.c +++ b/hw/riscv/spike.c @@ -59,7 +59,7 @@ static void create_fdt(SpikeState *s, const MemMapEntry *memmap, MachineState *ms = MACHINE(s); uint32_t *clint_cells; uint32_t cpu_phandle, intc_phandle, phandle = 1; - char *name, *mem_name, *clint_name, *clust_name; + char *mem_name, *clint_name, *clust_name; char *core_name, *cpu_name, *intc_name; static const char * const clint_compat[2] = { "sifive,clint0", "riscv,clint0" @@ -113,9 +113,7 @@ static void create_fdt(SpikeState *s, const MemMapEntry *memmap, } else { qemu_fdt_setprop_string(fdt, cpu_name, "mmu-type", "riscv,sv48"); } - name = riscv_isa_string(&s->soc[socket].harts[cpu]); - qemu_fdt_setprop_string(fdt, cpu_name, "riscv,isa", name); - g_free(name); + riscv_isa_write_fdt(&s->soc[socket].harts[cpu], fdt, cpu_name); qemu_fdt_setprop_string(fdt, cpu_name, "compatible", "riscv"); qemu_fdt_setprop_string(fdt, cpu_name, "status", "okay"); qemu_fdt_setprop_cell(fdt, cpu_name, "reg", diff --git a/hw/riscv/virt-acpi-build.c b/hw/riscv/virt-acpi-build.c index 26c7e4482d..fb8baf64f6 100644 --- a/hw/riscv/virt-acpi-build.c +++ b/hw/riscv/virt-acpi-build.c @@ -196,7 +196,7 @@ static void build_rhct(GArray *table_data, RISCVCPU *cpu = &s->soc[0].harts[0]; uint32_t mmu_offset = 0; uint8_t satp_mode_max; - char *isa; + g_autofree char *isa = NULL; AcpiTable table = { .sig = "RHCT", .rev = 1, .oem_id = s->oem_id, .oem_table_id = s->oem_table_id }; diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c index f9fd1341fc..fd35c74781 100644 --- a/hw/riscv/virt.c +++ b/hw/riscv/virt.c @@ -36,6 +36,7 @@ #include "hw/riscv/boot.h" #include "hw/riscv/numa.h" #include "kvm/kvm_riscv.h" +#include "hw/firmware/smbios.h" #include "hw/intc/riscv_aclint.h" #include "hw/intc/riscv_aplic.h" #include "hw/intc/sifive_plic.h" @@ -215,12 +216,15 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket, int cpu; uint32_t cpu_phandle; MachineState *ms = MACHINE(s); - char *name, *cpu_name, *core_name, *intc_name, *sv_name; bool is_32_bit = riscv_is_32bit(&s->soc[0]); uint8_t satp_mode_max; for (cpu = s->soc[socket].num_harts - 1; cpu >= 0; cpu--) { RISCVCPU *cpu_ptr = &s->soc[socket].harts[cpu]; + g_autofree char *cpu_name = NULL; + g_autofree char *core_name = NULL; + g_autofree char *intc_name = NULL; + g_autofree char *sv_name = NULL; cpu_phandle = (*phandle)++; @@ -233,12 +237,9 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket, sv_name = g_strdup_printf("riscv,%s", satp_mode_str(satp_mode_max, is_32_bit)); qemu_fdt_setprop_string(ms->fdt, cpu_name, "mmu-type", sv_name); - g_free(sv_name); } - name = riscv_isa_string(cpu_ptr); - qemu_fdt_setprop_string(ms->fdt, cpu_name, "riscv,isa", name); - g_free(name); + riscv_isa_write_fdt(cpu_ptr, ms->fdt, cpu_name); if (cpu_ptr->cfg.ext_zicbom) { qemu_fdt_setprop_cell(ms->fdt, cpu_name, "riscv,cbom-block-size", @@ -277,17 +278,13 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket, core_name = g_strdup_printf("%s/core%d", clust_name, cpu); qemu_fdt_add_subnode(ms->fdt, core_name); qemu_fdt_setprop_cell(ms->fdt, core_name, "cpu", cpu_phandle); - - g_free(core_name); - g_free(intc_name); - g_free(cpu_name); } } static void create_fdt_socket_memory(RISCVVirtState *s, const MemMapEntry *memmap, int socket) { - char *mem_name; + g_autofree char *mem_name = NULL; uint64_t addr, size; MachineState *ms = MACHINE(s); @@ -299,7 +296,6 @@ static void create_fdt_socket_memory(RISCVVirtState *s, addr >> 32, addr, size >> 32, size); qemu_fdt_setprop_string(ms->fdt, mem_name, "device_type", "memory"); riscv_socket_fdt_write_id(ms, mem_name, socket); - g_free(mem_name); } static void create_fdt_socket_clint(RISCVVirtState *s, @@ -307,8 +303,8 @@ static void create_fdt_socket_clint(RISCVVirtState *s, uint32_t *intc_phandles) { int cpu; - char *clint_name; - uint32_t *clint_cells; + g_autofree char *clint_name = NULL; + g_autofree uint32_t *clint_cells = NULL; unsigned long clint_addr; MachineState *ms = MACHINE(s); static const char * const clint_compat[2] = { @@ -335,9 +331,6 @@ static void create_fdt_socket_clint(RISCVVirtState *s, qemu_fdt_setprop(ms->fdt, clint_name, "interrupts-extended", clint_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 4); riscv_socket_fdt_write_id(ms, clint_name, socket); - g_free(clint_name); - - g_free(clint_cells); } static void create_fdt_socket_aclint(RISCVVirtState *s, @@ -348,9 +341,9 @@ static void create_fdt_socket_aclint(RISCVVirtState *s, char *name; unsigned long addr, size; uint32_t aclint_cells_size; - uint32_t *aclint_mswi_cells; - uint32_t *aclint_sswi_cells; - uint32_t *aclint_mtimer_cells; + g_autofree uint32_t *aclint_mswi_cells = NULL; + g_autofree uint32_t *aclint_sswi_cells = NULL; + g_autofree uint32_t *aclint_mtimer_cells = NULL; MachineState *ms = MACHINE(s); aclint_mswi_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2); @@ -422,10 +415,6 @@ static void create_fdt_socket_aclint(RISCVVirtState *s, riscv_socket_fdt_write_id(ms, name, socket); g_free(name); } - - g_free(aclint_mswi_cells); - g_free(aclint_mtimer_cells); - g_free(aclint_sswi_cells); } static void create_fdt_socket_plic(RISCVVirtState *s, @@ -434,8 +423,8 @@ static void create_fdt_socket_plic(RISCVVirtState *s, uint32_t *plic_phandles) { int cpu; - char *plic_name; - uint32_t *plic_cells; + g_autofree char *plic_name = NULL; + g_autofree uint32_t *plic_cells; unsigned long plic_addr; MachineState *ms = MACHINE(s); static const char * const plic_compat[2] = { @@ -495,10 +484,6 @@ static void create_fdt_socket_plic(RISCVVirtState *s, memmap[VIRT_PLATFORM_BUS].size, VIRT_PLATFORM_BUS_IRQ); } - - g_free(plic_name); - - g_free(plic_cells); } uint32_t imsic_num_bits(uint32_t count) @@ -517,11 +502,12 @@ static void create_fdt_one_imsic(RISCVVirtState *s, hwaddr base_addr, bool m_mode, uint32_t imsic_guest_bits) { int cpu, socket; - char *imsic_name; + g_autofree char *imsic_name = NULL; MachineState *ms = MACHINE(s); int socket_count = riscv_socket_count(ms); - uint32_t imsic_max_hart_per_socket; - uint32_t *imsic_cells, *imsic_regs, imsic_addr, imsic_size; + uint32_t imsic_max_hart_per_socket, imsic_addr, imsic_size; + g_autofree uint32_t *imsic_cells = NULL; + g_autofree uint32_t *imsic_regs = NULL; imsic_cells = g_new0(uint32_t, ms->smp.cpus * 2); imsic_regs = g_new0(uint32_t, socket_count * 4); @@ -573,10 +559,6 @@ static void create_fdt_one_imsic(RISCVVirtState *s, hwaddr base_addr, IMSIC_MMIO_GROUP_MIN_SHIFT); } qemu_fdt_setprop_cell(ms->fdt, imsic_name, "phandle", msi_phandle); - - g_free(imsic_name); - g_free(imsic_regs); - g_free(imsic_cells); } static void create_fdt_imsic(RISCVVirtState *s, const MemMapEntry *memmap, @@ -608,12 +590,10 @@ static void create_fdt_one_aplic(RISCVVirtState *s, int socket, bool m_mode, int num_harts) { int cpu; - char *aplic_name; - uint32_t *aplic_cells; + g_autofree char *aplic_name = NULL; + g_autofree uint32_t *aplic_cells = g_new0(uint32_t, num_harts * 2); MachineState *ms = MACHINE(s); - aplic_cells = g_new0(uint32_t, num_harts * 2); - for (cpu = 0; cpu < num_harts; cpu++) { aplic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]); aplic_cells[cpu * 2 + 1] = cpu_to_be32(m_mode ? IRQ_M_EXT : IRQ_S_EXT); @@ -648,9 +628,6 @@ static void create_fdt_one_aplic(RISCVVirtState *s, int socket, riscv_socket_fdt_write_id(ms, aplic_name, socket); qemu_fdt_setprop_cell(ms->fdt, aplic_name, "phandle", aplic_phandle); - - g_free(aplic_name); - g_free(aplic_cells); } static void create_fdt_socket_aplic(RISCVVirtState *s, @@ -662,7 +639,7 @@ static void create_fdt_socket_aplic(RISCVVirtState *s, uint32_t *aplic_phandles, int num_harts) { - char *aplic_name; + g_autofree char *aplic_name = NULL; unsigned long aplic_addr; MachineState *ms = MACHINE(s); uint32_t aplic_m_phandle, aplic_s_phandle; @@ -697,23 +674,18 @@ static void create_fdt_socket_aplic(RISCVVirtState *s, VIRT_PLATFORM_BUS_IRQ); } - g_free(aplic_name); - aplic_phandles[socket] = aplic_s_phandle; } static void create_fdt_pmu(RISCVVirtState *s) { - char *pmu_name; + g_autofree char *pmu_name = g_strdup_printf("/pmu"); MachineState *ms = MACHINE(s); RISCVCPU hart = s->soc[0].harts[0]; - pmu_name = g_strdup_printf("/pmu"); qemu_fdt_add_subnode(ms->fdt, pmu_name); qemu_fdt_setprop_string(ms->fdt, pmu_name, "compatible", "riscv,pmu"); riscv_pmu_generate_fdt_node(ms->fdt, hart.pmu_avail_ctrs, pmu_name); - - g_free(pmu_name); } static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap, @@ -723,11 +695,11 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap, uint32_t *irq_virtio_phandle, uint32_t *msi_pcie_phandle) { - char *clust_name; int socket, phandle_pos; MachineState *ms = MACHINE(s); uint32_t msi_m_phandle = 0, msi_s_phandle = 0; - uint32_t *intc_phandles, xplic_phandles[MAX_NODES]; + uint32_t xplic_phandles[MAX_NODES]; + g_autofree uint32_t *intc_phandles = NULL; int socket_count = riscv_socket_count(ms); qemu_fdt_add_subnode(ms->fdt, "/cpus"); @@ -741,6 +713,7 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap, phandle_pos = ms->smp.cpus; for (socket = (socket_count - 1); socket >= 0; socket--) { + g_autofree char *clust_name = NULL; phandle_pos -= s->soc[socket].num_harts; clust_name = g_strdup_printf("/cpus/cpu-map/cluster%d", socket); @@ -751,8 +724,6 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap, create_fdt_socket_memory(s, memmap, socket); - g_free(clust_name); - if (tcg_enabled()) { if (s->have_aclint) { create_fdt_socket_aclint(s, memmap, socket, @@ -795,8 +766,6 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap, } } - g_free(intc_phandles); - if (kvm_enabled() && virt_use_kvm_aia(s)) { *irq_mmio_phandle = xplic_phandles[0]; *irq_virtio_phandle = xplic_phandles[0]; @@ -825,12 +794,12 @@ static void create_fdt_virtio(RISCVVirtState *s, const MemMapEntry *memmap, uint32_t irq_virtio_phandle) { int i; - char *name; MachineState *ms = MACHINE(s); for (i = 0; i < VIRTIO_COUNT; i++) { - name = g_strdup_printf("/soc/virtio_mmio@%lx", + g_autofree char *name = g_strdup_printf("/soc/virtio_mmio@%lx", (long)(memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size)); + qemu_fdt_add_subnode(ms->fdt, name); qemu_fdt_setprop_string(ms->fdt, name, "compatible", "virtio,mmio"); qemu_fdt_setprop_cells(ms->fdt, name, "reg", @@ -845,7 +814,6 @@ static void create_fdt_virtio(RISCVVirtState *s, const MemMapEntry *memmap, qemu_fdt_setprop_cells(ms->fdt, name, "interrupts", VIRTIO_IRQ + i, 0x4); } - g_free(name); } } @@ -853,7 +821,7 @@ static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap, uint32_t irq_pcie_phandle, uint32_t msi_pcie_phandle) { - char *name; + g_autofree char *name = NULL; MachineState *ms = MACHINE(s); name = g_strdup_printf("/soc/pci@%lx", @@ -887,7 +855,6 @@ static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap, 2, virt_high_pcie_memmap.base, 2, virt_high_pcie_memmap.size); create_pcie_irq_map(s, ms->fdt, name, irq_pcie_phandle); - g_free(name); } static void create_fdt_reset(RISCVVirtState *s, const MemMapEntry *memmap, @@ -934,7 +901,7 @@ static void create_fdt_reset(RISCVVirtState *s, const MemMapEntry *memmap, static void create_fdt_uart(RISCVVirtState *s, const MemMapEntry *memmap, uint32_t irq_mmio_phandle) { - char *name; + g_autofree char *name = NULL; MachineState *ms = MACHINE(s); name = g_strdup_printf("/soc/serial@%lx", (long)memmap[VIRT_UART0].base); @@ -952,13 +919,12 @@ static void create_fdt_uart(RISCVVirtState *s, const MemMapEntry *memmap, } qemu_fdt_setprop_string(ms->fdt, "/chosen", "stdout-path", name); - g_free(name); } static void create_fdt_rtc(RISCVVirtState *s, const MemMapEntry *memmap, uint32_t irq_mmio_phandle) { - char *name; + g_autofree char *name = NULL; MachineState *ms = MACHINE(s); name = g_strdup_printf("/soc/rtc@%lx", (long)memmap[VIRT_RTC].base); @@ -974,41 +940,36 @@ static void create_fdt_rtc(RISCVVirtState *s, const MemMapEntry *memmap, } else { qemu_fdt_setprop_cells(ms->fdt, name, "interrupts", RTC_IRQ, 0x4); } - g_free(name); } static void create_fdt_flash(RISCVVirtState *s, const MemMapEntry *memmap) { - char *name; MachineState *ms = MACHINE(s); hwaddr flashsize = virt_memmap[VIRT_FLASH].size / 2; hwaddr flashbase = virt_memmap[VIRT_FLASH].base; + g_autofree char *name = g_strdup_printf("/flash@%" PRIx64, flashbase); - name = g_strdup_printf("/flash@%" PRIx64, flashbase); qemu_fdt_add_subnode(ms->fdt, name); qemu_fdt_setprop_string(ms->fdt, name, "compatible", "cfi-flash"); qemu_fdt_setprop_sized_cells(ms->fdt, name, "reg", 2, flashbase, 2, flashsize, 2, flashbase + flashsize, 2, flashsize); qemu_fdt_setprop_cell(ms->fdt, name, "bank-width", 4); - g_free(name); } static void create_fdt_fw_cfg(RISCVVirtState *s, const MemMapEntry *memmap) { - char *nodename; MachineState *ms = MACHINE(s); hwaddr base = memmap[VIRT_FW_CFG].base; hwaddr size = memmap[VIRT_FW_CFG].size; + g_autofree char *nodename = g_strdup_printf("/fw-cfg@%" PRIx64, base); - nodename = g_strdup_printf("/fw-cfg@%" PRIx64, base); qemu_fdt_add_subnode(ms->fdt, nodename); qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "qemu,fw-cfg-mmio"); qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", 2, base, 2, size); qemu_fdt_setprop(ms->fdt, nodename, "dma-coherent", NULL, 0); - g_free(nodename); } static void finalize_fdt(RISCVVirtState *s) @@ -1155,7 +1116,7 @@ static DeviceState *virt_create_plic(const MemMapEntry *memmap, int socket, int base_hartid, int hart_count) { DeviceState *ret; - char *plic_hart_config; + g_autofree char *plic_hart_config = NULL; /* Per-socket PLIC hart topology configuration string */ plic_hart_config = riscv_plic_hart_config_string(hart_count); @@ -1174,8 +1135,6 @@ static DeviceState *virt_create_plic(const MemMapEntry *memmap, int socket, VIRT_PLIC_CONTEXT_STRIDE, memmap[VIRT_PLIC].size); - g_free(plic_hart_config); - return ret; } @@ -1263,6 +1222,45 @@ static void create_platform_bus(RISCVVirtState *s, DeviceState *irqchip) sysbus_mmio_get_region(sysbus, 0)); } +static void virt_build_smbios(RISCVVirtState *s) +{ + MachineClass *mc = MACHINE_GET_CLASS(s); + MachineState *ms = MACHINE(s); + uint8_t *smbios_tables, *smbios_anchor; + size_t smbios_tables_len, smbios_anchor_len; + struct smbios_phys_mem_area mem_array; + const char *product = "QEMU Virtual Machine"; + + if (kvm_enabled()) { + product = "KVM Virtual Machine"; + } + + smbios_set_defaults("QEMU", product, mc->name, false, + true, SMBIOS_ENTRY_POINT_TYPE_64); + + if (riscv_is_32bit(&s->soc[0])) { + smbios_set_default_processor_family(0x200); + } else { + smbios_set_default_processor_family(0x201); + } + + /* build the array of physical mem area from base_memmap */ + mem_array.address = s->memmap[VIRT_DRAM].base; + mem_array.length = ms->ram_size; + + smbios_get_tables(ms, &mem_array, 1, + &smbios_tables, &smbios_tables_len, + &smbios_anchor, &smbios_anchor_len, + &error_fatal); + + if (smbios_anchor) { + fw_cfg_add_file(s->fw_cfg, "etc/smbios/smbios-tables", + smbios_tables, smbios_tables_len); + fw_cfg_add_file(s->fw_cfg, "etc/smbios/smbios-anchor", + smbios_anchor, smbios_anchor_len); + } +} + static void virt_machine_done(Notifier *notifier, void *data) { RISCVVirtState *s = container_of(notifier, RISCVVirtState, @@ -1351,6 +1349,8 @@ static void virt_machine_done(Notifier *notifier, void *data) riscv_setup_direct_kernel(kernel_entry, fdt_load_addr); } + virt_build_smbios(s); + if (virt_is_acpi_enabled(s)) { virt_acpi_setup(s); } @@ -1362,7 +1362,6 @@ static void virt_machine_init(MachineState *machine) RISCVVirtState *s = RISCV_VIRT_MACHINE(machine); MemoryRegion *system_memory = get_system_memory(); MemoryRegion *mask_rom = g_new(MemoryRegion, 1); - char *soc_name; DeviceState *mmio_irqchip, *virtio_irqchip, *pcie_irqchip; int i, base_hartid, hart_count; int socket_count = riscv_socket_count(machine); @@ -1382,6 +1381,8 @@ static void virt_machine_init(MachineState *machine) /* Initialize sockets */ mmio_irqchip = virtio_irqchip = pcie_irqchip = NULL; for (i = 0; i < socket_count; i++) { + g_autofree char *soc_name = g_strdup_printf("soc%d", i); + if (!riscv_socket_check_hartids(machine, i)) { error_report("discontinuous hartids in socket%d", i); exit(1); @@ -1399,10 +1400,8 @@ static void virt_machine_init(MachineState *machine) exit(1); } - soc_name = g_strdup_printf("soc%d", i); object_initialize_child(OBJECT(machine), soc_name, &s->soc[i], TYPE_RISCV_HART_ARRAY); - g_free(soc_name); object_property_set_str(OBJECT(&s->soc[i]), "cpu-type", machine->cpu_type, &error_abort); object_property_set_int(OBJECT(&s->soc[i]), "hartid-base", diff --git a/hw/smbios/smbios.c b/hw/smbios/smbios.c index 2a90601ac5..c0c5a81e66 100644 --- a/hw/smbios/smbios.c +++ b/hw/smbios/smbios.c @@ -102,6 +102,7 @@ static struct { #define DEFAULT_CPU_SPEED 2000 static struct { + uint16_t processor_family; const char *sock_pfx, *manufacturer, *version, *serial, *asset, *part; uint64_t max_speed; uint64_t current_speed; @@ -110,6 +111,7 @@ static struct { .max_speed = DEFAULT_CPU_SPEED, .current_speed = DEFAULT_CPU_SPEED, .processor_id = 0, + .processor_family = 0x01, /* Other */ }; struct type8_instance { @@ -337,6 +339,10 @@ static const QemuOptDesc qemu_smbios_type4_opts[] = { .name = "part", .type = QEMU_OPT_STRING, .help = "part number", + }, { + .name = "processor-family", + .type = QEMU_OPT_NUMBER, + .help = "processor family", }, { .name = "processor-id", .type = QEMU_OPT_NUMBER, @@ -726,7 +732,7 @@ static void smbios_build_type_4_table(MachineState *ms, unsigned instance) snprintf(sock_str, sizeof(sock_str), "%s%2x", type4.sock_pfx, instance); SMBIOS_TABLE_SET_STR(4, socket_designation_str, sock_str); t->processor_type = 0x03; /* CPU */ - t->processor_family = 0x01; /* Other */ + t->processor_family = 0xfe; /* use Processor Family 2 field */ SMBIOS_TABLE_SET_STR(4, processor_manufacturer_str, type4.manufacturer); if (type4.processor_id == 0) { t->processor_id[0] = cpu_to_le32(smbios_cpuid_version); @@ -758,7 +764,7 @@ static void smbios_build_type_4_table(MachineState *ms, unsigned instance) t->thread_count = (threads_per_socket > 255) ? 0xFF : threads_per_socket; t->processor_characteristics = cpu_to_le16(0x02); /* Unknown */ - t->processor_family2 = cpu_to_le16(0x01); /* Other */ + t->processor_family2 = cpu_to_le16(type4.processor_family); if (tbl_len == SMBIOS_TYPE_4_LEN_V30) { t->core_count2 = t->core_enabled2 = cpu_to_le16(cores_per_socket); @@ -983,6 +989,13 @@ void smbios_set_cpuid(uint32_t version, uint32_t features) field = value; \ } +void smbios_set_default_processor_family(uint16_t processor_family) +{ + if (type4.processor_family <= 0x01) { + type4.processor_family = processor_family; + } +} + void smbios_set_defaults(const char *manufacturer, const char *product, const char *version, bool legacy_mode, bool uuid_encoded, SmbiosEntryPointType ep_type) @@ -1402,6 +1415,9 @@ void smbios_entry_add(QemuOpts *opts, Error **errp) return; } save_opt(&type4.sock_pfx, opts, "sock_pfx"); + type4.processor_family = qemu_opt_get_number(opts, + "processor-family", + 0x01 /* Other */); save_opt(&type4.manufacturer, opts, "manufacturer"); save_opt(&type4.version, opts, "version"); save_opt(&type4.serial, opts, "serial"); diff --git a/include/hw/firmware/smbios.h b/include/hw/firmware/smbios.h index 7f3259a630..6e514982d4 100644 --- a/include/hw/firmware/smbios.h +++ b/include/hw/firmware/smbios.h @@ -295,6 +295,7 @@ void smbios_set_cpuid(uint32_t version, uint32_t features); void smbios_set_defaults(const char *manufacturer, const char *product, const char *version, bool legacy_mode, bool uuid_encoded, SmbiosEntryPointType ep_type); +void smbios_set_default_processor_family(uint16_t processor_family); uint8_t *smbios_get_table_legacy(MachineState *ms, size_t *length); void smbios_get_tables(MachineState *ms, const struct smbios_phys_mem_area *mem_array, diff --git a/qemu-options.hx b/qemu-options.hx index 5adbed1101..f7c83362d1 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -2686,7 +2686,7 @@ DEF("smbios", HAS_ARG, QEMU_OPTION_smbios, " specify SMBIOS type 3 fields\n" "-smbios type=4[,sock_pfx=str][,manufacturer=str][,version=str][,serial=str]\n" " [,asset=str][,part=str][,max-speed=%d][,current-speed=%d]\n" - " [,processor-id=%d]\n" + " [,processor-family=%d,processor-id=%d]\n" " specify SMBIOS type 4 fields\n" "-smbios type=8[,external_reference=str][,internal_reference=str][,connector_type=%d][,port_type=%d]\n" " specify SMBIOS type 8 fields\n" @@ -2697,7 +2697,7 @@ DEF("smbios", HAS_ARG, QEMU_OPTION_smbios, " specify SMBIOS type 17 fields\n" "-smbios type=41[,designation=str][,kind=str][,instance=%d][,pcidev=str]\n" " specify SMBIOS type 41 fields\n", - QEMU_ARCH_I386 | QEMU_ARCH_ARM | QEMU_ARCH_LOONGARCH) + QEMU_ARCH_I386 | QEMU_ARCH_ARM | QEMU_ARCH_LOONGARCH | QEMU_ARCH_RISCV) SRST ``-smbios file=binary`` Load SMBIOS entry from binary file. @@ -2714,7 +2714,7 @@ SRST ``-smbios type=3[,manufacturer=str][,version=str][,serial=str][,asset=str][,sku=str]`` Specify SMBIOS type 3 fields -``-smbios type=4[,sock_pfx=str][,manufacturer=str][,version=str][,serial=str][,asset=str][,part=str][,processor-id=%d]`` +``-smbios type=4[,sock_pfx=str][,manufacturer=str][,version=str][,serial=str][,asset=str][,part=str][,processor-family=%d][,processor-id=%d]`` Specify SMBIOS type 4 fields ``-smbios type=11[,value=str][,path=filename]`` diff --git a/target/riscv/cpu-qom.h b/target/riscv/cpu-qom.h index 9219c2fcc3..3670cfe6d9 100644 --- a/target/riscv/cpu-qom.h +++ b/target/riscv/cpu-qom.h @@ -34,7 +34,10 @@ #define TYPE_RISCV_CPU_BASE32 RISCV_CPU_TYPE_NAME("rv32") #define TYPE_RISCV_CPU_BASE64 RISCV_CPU_TYPE_NAME("rv64") #define TYPE_RISCV_CPU_BASE128 RISCV_CPU_TYPE_NAME("x-rv128") +#define TYPE_RISCV_CPU_RV32I RISCV_CPU_TYPE_NAME("rv32i") +#define TYPE_RISCV_CPU_RV32E RISCV_CPU_TYPE_NAME("rv32e") #define TYPE_RISCV_CPU_RV64I RISCV_CPU_TYPE_NAME("rv64i") +#define TYPE_RISCV_CPU_RV64E RISCV_CPU_TYPE_NAME("rv64e") #define TYPE_RISCV_CPU_RVA22U64 RISCV_CPU_TYPE_NAME("rva22u64") #define TYPE_RISCV_CPU_RVA22S64 RISCV_CPU_TYPE_NAME("rva22s64") #define TYPE_RISCV_CPU_IBEX RISCV_CPU_TYPE_NAME("lowrisc-ibex") diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index be21fa09c6..1b8d001d23 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -29,8 +29,10 @@ #include "qapi/visitor.h" #include "qemu/error-report.h" #include "hw/qdev-properties.h" +#include "hw/core/qdev-prop-internal.h" #include "migration/vmstate.h" #include "fpu/softfloat-helpers.h" +#include "sysemu/device_tree.h" #include "sysemu/kvm.h" #include "sysemu/tcg.h" #include "kvm/kvm_riscv.h" @@ -38,9 +40,9 @@ #include "tcg/tcg.h" /* RISC-V CPU definitions */ -static const char riscv_single_letter_exts[] = "IEMAFDQCPVH"; +static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, - RVC, RVS, RVU, RVH, RVJ, RVG, 0}; + RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0}; /* * From vector_helper.c @@ -58,6 +60,20 @@ bool riscv_cpu_is_32bit(RISCVCPU *cpu) return riscv_cpu_mxl(&cpu->env) == MXL_RV32; } +/* Hash that stores general user set numeric options */ +static GHashTable *general_user_opts; + +static void cpu_option_add_user_setting(const char *optname, uint32_t value) +{ + g_hash_table_insert(general_user_opts, (gpointer)optname, + GUINT_TO_POINTER(value)); +} + +bool riscv_cpu_option_set(const char *optname) +{ + return g_hash_table_contains(general_user_opts, optname); +} + #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} @@ -93,7 +109,9 @@ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), + ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), + ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), @@ -190,6 +208,11 @@ void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) *ext_enabled = en; } +bool riscv_cpu_is_vendor(Object *cpu_obj) +{ + return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; +} + const char * const riscv_int_regnames[] = { "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", @@ -281,12 +304,16 @@ const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) } } -void riscv_cpu_set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext) +void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) { - env->misa_mxl_max = env->misa_mxl = mxl; env->misa_ext_mask = env->misa_ext = ext; } +int riscv_cpu_max_xlen(RISCVCPUClass *mcc) +{ + return 16 << mcc->misa_mxl_max; +} + #ifndef CONFIG_USER_ONLY static uint8_t satp_mode_from_str(const char *satp_mode_str) { @@ -396,11 +423,7 @@ static void riscv_any_cpu_init(Object *obj) { RISCVCPU *cpu = RISCV_CPU(obj); CPURISCVState *env = &cpu->env; -#if defined(TARGET_RISCV32) - riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU); -#elif defined(TARGET_RISCV64) - riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU); -#endif + riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVU); #ifndef CONFIG_USER_ONLY set_satp_mode_max_supported(RISCV_CPU(obj), @@ -421,25 +444,29 @@ static void riscv_max_cpu_init(Object *obj) { RISCVCPU *cpu = RISCV_CPU(obj); CPURISCVState *env = &cpu->env; - RISCVMXL mlx = MXL_RV64; -#ifdef TARGET_RISCV32 - mlx = MXL_RV32; -#endif - riscv_cpu_set_misa(env, mlx, 0); + cpu->cfg.mmu = true; + cpu->cfg.pmp = true; + env->priv_ver = PRIV_VERSION_LATEST; #ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(RISCV_CPU(obj), mlx == MXL_RV32 ? - VM_1_10_SV32 : VM_1_10_SV57); +#ifdef TARGET_RISCV32 + set_satp_mode_max_supported(cpu, VM_1_10_SV32); +#else + set_satp_mode_max_supported(cpu, VM_1_10_SV57); +#endif #endif } #if defined(TARGET_RISCV64) static void rv64_base_cpu_init(Object *obj) { - CPURISCVState *env = &RISCV_CPU(obj)->env; - /* We set this in the realise function */ - riscv_cpu_set_misa(env, MXL_RV64, 0); + RISCVCPU *cpu = RISCV_CPU(obj); + CPURISCVState *env = &cpu->env; + + cpu->cfg.mmu = true; + cpu->cfg.pmp = true; + /* Set latest version of privileged specification */ env->priv_ver = PRIV_VERSION_LATEST; #ifndef CONFIG_USER_ONLY @@ -451,8 +478,7 @@ static void rv64_sifive_u_cpu_init(Object *obj) { RISCVCPU *cpu = RISCV_CPU(obj); CPURISCVState *env = &cpu->env; - riscv_cpu_set_misa(env, MXL_RV64, - RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); + riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); env->priv_ver = PRIV_VERSION_1_10_0; #ifndef CONFIG_USER_ONLY set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); @@ -470,7 +496,7 @@ static void rv64_sifive_e_cpu_init(Object *obj) CPURISCVState *env = &RISCV_CPU(obj)->env; RISCVCPU *cpu = RISCV_CPU(obj); - riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU); + riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); env->priv_ver = PRIV_VERSION_1_10_0; #ifndef CONFIG_USER_ONLY set_satp_mode_max_supported(cpu, VM_1_10_MBARE); @@ -487,7 +513,7 @@ static void rv64_thead_c906_cpu_init(Object *obj) CPURISCVState *env = &RISCV_CPU(obj)->env; RISCVCPU *cpu = RISCV_CPU(obj); - riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU); + riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); env->priv_ver = PRIV_VERSION_1_11_0; cpu->cfg.ext_zfa = true; @@ -518,7 +544,7 @@ static void rv64_veyron_v1_cpu_init(Object *obj) CPURISCVState *env = &RISCV_CPU(obj)->env; RISCVCPU *cpu = RISCV_CPU(obj); - riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH); + riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); env->priv_ver = PRIV_VERSION_1_12_0; /* Enable ISA extensions */ @@ -555,15 +581,19 @@ static void rv64_veyron_v1_cpu_init(Object *obj) static void rv128_base_cpu_init(Object *obj) { + RISCVCPU *cpu = RISCV_CPU(obj); + CPURISCVState *env = &cpu->env; + if (qemu_tcg_mttcg_enabled()) { /* Missing 128-bit aligned atomics */ error_report("128-bit RISC-V currently does not work with Multi " "Threaded TCG. Please use: -accel tcg,thread=single"); exit(EXIT_FAILURE); } - CPURISCVState *env = &RISCV_CPU(obj)->env; - /* We set this in the realise function */ - riscv_cpu_set_misa(env, MXL_RV128, 0); + + cpu->cfg.mmu = true; + cpu->cfg.pmp = true; + /* Set latest version of privileged specification */ env->priv_ver = PRIV_VERSION_LATEST; #ifndef CONFIG_USER_ONLY @@ -574,30 +604,23 @@ static void rv128_base_cpu_init(Object *obj) static void rv64i_bare_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; - riscv_cpu_set_misa(env, MXL_RV64, RVI); + riscv_cpu_set_misa_ext(env, RVI); +} - /* Remove the defaults from the parent class */ - RISCV_CPU(obj)->cfg.ext_zicntr = false; - RISCV_CPU(obj)->cfg.ext_zihpm = false; - - /* Set to QEMU's first supported priv version */ - env->priv_ver = PRIV_VERSION_1_10_0; - - /* - * Support all available satp_mode settings. The default - * value will be set to MBARE if the user doesn't set - * satp_mode manually (see set_satp_mode_default()). - */ -#ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV64); -#endif +static void rv64e_bare_cpu_init(Object *obj) +{ + CPURISCVState *env = &RISCV_CPU(obj)->env; + riscv_cpu_set_misa_ext(env, RVE); } #else static void rv32_base_cpu_init(Object *obj) { - CPURISCVState *env = &RISCV_CPU(obj)->env; - /* We set this in the realise function */ - riscv_cpu_set_misa(env, MXL_RV32, 0); + RISCVCPU *cpu = RISCV_CPU(obj); + CPURISCVState *env = &cpu->env; + + cpu->cfg.mmu = true; + cpu->cfg.pmp = true; + /* Set latest version of privileged specification */ env->priv_ver = PRIV_VERSION_LATEST; #ifndef CONFIG_USER_ONLY @@ -609,8 +632,7 @@ static void rv32_sifive_u_cpu_init(Object *obj) { RISCVCPU *cpu = RISCV_CPU(obj); CPURISCVState *env = &cpu->env; - riscv_cpu_set_misa(env, MXL_RV32, - RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); + riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); env->priv_ver = PRIV_VERSION_1_10_0; #ifndef CONFIG_USER_ONLY set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); @@ -628,7 +650,7 @@ static void rv32_sifive_e_cpu_init(Object *obj) CPURISCVState *env = &RISCV_CPU(obj)->env; RISCVCPU *cpu = RISCV_CPU(obj); - riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU); + riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); env->priv_ver = PRIV_VERSION_1_10_0; #ifndef CONFIG_USER_ONLY set_satp_mode_max_supported(cpu, VM_1_10_MBARE); @@ -645,7 +667,7 @@ static void rv32_ibex_cpu_init(Object *obj) CPURISCVState *env = &RISCV_CPU(obj)->env; RISCVCPU *cpu = RISCV_CPU(obj); - riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU); + riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); env->priv_ver = PRIV_VERSION_1_12_0; #ifndef CONFIG_USER_ONLY set_satp_mode_max_supported(cpu, VM_1_10_MBARE); @@ -662,7 +684,7 @@ static void rv32_imafcu_nommu_cpu_init(Object *obj) CPURISCVState *env = &RISCV_CPU(obj)->env; RISCVCPU *cpu = RISCV_CPU(obj); - riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU); + riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); env->priv_ver = PRIV_VERSION_1_10_0; #ifndef CONFIG_USER_ONLY set_satp_mode_max_supported(cpu, VM_1_10_MBARE); @@ -673,6 +695,18 @@ static void rv32_imafcu_nommu_cpu_init(Object *obj) cpu->cfg.ext_zicsr = true; cpu->cfg.pmp = true; } + +static void rv32i_bare_cpu_init(Object *obj) +{ + CPURISCVState *env = &RISCV_CPU(obj)->env; + riscv_cpu_set_misa_ext(env, RVI); +} + +static void rv32e_bare_cpu_init(Object *obj) +{ + CPURISCVState *env = &RISCV_CPU(obj)->env; + riscv_cpu_set_misa_ext(env, RVE); +} #endif static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) @@ -813,7 +847,7 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) csr_ops[csrno].name, val); } } - uint16_t vlenb = cpu->cfg.vlen >> 3; + uint16_t vlenb = cpu->cfg.vlenb; for (i = 0; i < 32; i++) { qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); @@ -887,7 +921,7 @@ static void riscv_cpu_reset_hold(Object *obj) mcc->parent_phases.hold(obj); } #ifndef CONFIG_USER_ONLY - env->misa_mxl = env->misa_mxl_max; + env->misa_mxl = mcc->misa_mxl_max; env->priv = PRV_M; env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); if (env->misa_mxl > MXL_RV32) { @@ -1080,17 +1114,18 @@ void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) } #endif - /* - * KVM accel does not have a specialized finalize() - * callback because its extensions are validated - * in the get()/set() callbacks of each property. - */ if (tcg_enabled()) { riscv_tcg_cpu_finalize_features(cpu, &local_err); if (local_err != NULL) { error_propagate(errp, local_err); return; } + } else if (kvm_enabled()) { + riscv_kvm_cpu_finalize_features(cpu, &local_err); + if (local_err != NULL) { + error_propagate(errp, local_err); + return; + } } } @@ -1263,11 +1298,19 @@ static void riscv_cpu_post_init(Object *obj) static void riscv_cpu_init(Object *obj) { + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); + RISCVCPU *cpu = RISCV_CPU(obj); + CPURISCVState *env = &cpu->env; + + env->misa_mxl = mcc->misa_mxl_max; + #ifndef CONFIG_USER_ONLY qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); #endif /* CONFIG_USER_ONLY */ + general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); + /* * The timer and performance counters extensions were supported * in QEMU before they were added as discrete extensions in the @@ -1277,6 +1320,43 @@ static void riscv_cpu_init(Object *obj) */ RISCV_CPU(obj)->cfg.ext_zicntr = true; RISCV_CPU(obj)->cfg.ext_zihpm = true; + + /* Default values for non-bool cpu properties */ + cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); + cpu->cfg.vlenb = 128 >> 3; + cpu->cfg.elen = 64; + cpu->cfg.cbom_blocksize = 64; + cpu->cfg.cbop_blocksize = 64; + cpu->cfg.cboz_blocksize = 64; + cpu->env.vext_ver = VEXT_VERSION_1_00_0; +} + +static void riscv_bare_cpu_init(Object *obj) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + + /* + * Bare CPUs do not inherit the timer and performance + * counters from the parent class (see riscv_cpu_init() + * for info on why the parent enables them). + * + * Users have to explicitly enable these counters for + * bare CPUs. + */ + cpu->cfg.ext_zicntr = false; + cpu->cfg.ext_zihpm = false; + + /* Set to QEMU's first supported priv version */ + cpu->env.priv_ver = PRIV_VERSION_1_10_0; + + /* + * Support all available satp_mode settings. The default + * value will be set to MBARE if the user doesn't set + * satp_mode manually (see set_satp_mode_default()). + */ +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(cpu, VM_1_10_SV64); +#endif } typedef struct misa_ext_info { @@ -1304,8 +1384,29 @@ static const MISAExtInfo misa_ext_info_arr[] = { MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), MISA_EXT_INFO(RVV, "v", "Vector operations"), MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), + MISA_EXT_INFO(RVB, "x-b", "Bit manipulation (Zba_Zbb_Zbs)") }; +static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) +{ + CPUClass *cc = CPU_CLASS(mcc); + + /* Validate that MISA_MXL is set properly. */ + switch (mcc->misa_mxl_max) { +#ifdef TARGET_RISCV64 + case MXL_RV64: + case MXL_RV128: + cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; + break; +#endif + case MXL_RV32: + cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; + break; + default: + g_assert_not_reached(); + } +} + static int riscv_validate_misa_info_idx(uint32_t bit) { int idx; @@ -1451,6 +1552,9 @@ const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false), MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false), + MULTI_EXT_CFG_BOOL("x-zaamo", ext_zaamo, false), + MULTI_EXT_CFG_BOOL("x-zalrsc", ext_zalrsc, false), + MULTI_EXT_CFG_BOOL("x-zvfh", ext_zvfh, false), MULTI_EXT_CFG_BOOL("x-zvfhmin", ext_zvfhmin, false), @@ -1485,26 +1589,46 @@ const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { DEFINE_PROP_END_OF_LIST(), }; +static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, + Error **errp) +{ + g_autofree char *cpuname = riscv_cpu_get_name(cpu); + error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", + cpuname, propname); +} + static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { RISCVCPU *cpu = RISCV_CPU(obj); - uint8_t pmu_num; + uint8_t pmu_num, curr_pmu_num; + uint32_t pmu_mask; visit_type_uint8(v, name, &pmu_num, errp); + curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); + + if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { + cpu_set_prop_err(cpu, name, errp); + error_append_hint(errp, "Current '%s' val: %u\n", + name, curr_pmu_num); + return; + } + if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { error_setg(errp, "Number of counters exceeds maximum available"); return; } if (pmu_num == 0) { - cpu->cfg.pmu_mask = 0; + pmu_mask = 0; } else { - cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, pmu_num); + pmu_mask = MAKE_64BIT_MASK(3, pmu_num); } warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); + cpu->cfg.pmu_mask = pmu_mask; + cpu_option_add_user_setting("pmu-mask", pmu_mask); } static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, @@ -1516,30 +1640,525 @@ static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, visit_type_uint8(v, name, &pmu_num, errp); } -const PropertyInfo prop_pmu_num = { +static const PropertyInfo prop_pmu_num = { .name = "pmu-num", .get = prop_pmu_num_get, .set = prop_pmu_num_set, }; -Property riscv_cpu_options[] = { - DEFINE_PROP_UINT32("pmu-mask", RISCVCPU, cfg.pmu_mask, MAKE_64BIT_MASK(3, 16)), - {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ +static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + uint32_t value; + uint8_t pmu_num; - DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), - DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), + visit_type_uint32(v, name, &value, errp); - DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec), - DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec), + if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { + cpu_set_prop_err(cpu, name, errp); + error_append_hint(errp, "Current '%s' val: %x\n", + name, cpu->cfg.pmu_mask); + return; + } - DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), - DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), + pmu_num = ctpop32(value); - DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64), - DEFINE_PROP_UINT16("cbop_blocksize", RISCVCPU, cfg.cbop_blocksize, 64), - DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64), + if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { + error_setg(errp, "Number of counters exceeds maximum available"); + return; + } - DEFINE_PROP_END_OF_LIST(), + cpu_option_add_user_setting(name, value); + cpu->cfg.pmu_mask = value; +} + +static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; + + visit_type_uint8(v, name, &pmu_mask, errp); +} + +static const PropertyInfo prop_pmu_mask = { + .name = "pmu-mask", + .get = prop_pmu_mask_get, + .set = prop_pmu_mask_set, +}; + +static void prop_mmu_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + bool value; + + visit_type_bool(v, name, &value, errp); + + if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { + cpu_set_prop_err(cpu, "mmu", errp); + return; + } + + cpu_option_add_user_setting(name, value); + cpu->cfg.mmu = value; +} + +static void prop_mmu_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + bool value = RISCV_CPU(obj)->cfg.mmu; + + visit_type_bool(v, name, &value, errp); +} + +static const PropertyInfo prop_mmu = { + .name = "mmu", + .get = prop_mmu_get, + .set = prop_mmu_set, +}; + +static void prop_pmp_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + bool value; + + visit_type_bool(v, name, &value, errp); + + if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { + cpu_set_prop_err(cpu, name, errp); + return; + } + + cpu_option_add_user_setting(name, value); + cpu->cfg.pmp = value; +} + +static void prop_pmp_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + bool value = RISCV_CPU(obj)->cfg.pmp; + + visit_type_bool(v, name, &value, errp); +} + +static const PropertyInfo prop_pmp = { + .name = "pmp", + .get = prop_pmp_get, + .set = prop_pmp_set, +}; + +static int priv_spec_from_str(const char *priv_spec_str) +{ + int priv_version = -1; + + if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { + priv_version = PRIV_VERSION_1_12_0; + } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { + priv_version = PRIV_VERSION_1_11_0; + } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { + priv_version = PRIV_VERSION_1_10_0; + } + + return priv_version; +} + +static const char *priv_spec_to_str(int priv_version) +{ + switch (priv_version) { + case PRIV_VERSION_1_10_0: + return PRIV_VER_1_10_0_STR; + case PRIV_VERSION_1_11_0: + return PRIV_VER_1_11_0_STR; + case PRIV_VERSION_1_12_0: + return PRIV_VER_1_12_0_STR; + default: + return NULL; + } +} + +static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + g_autofree char *value = NULL; + int priv_version = -1; + + visit_type_str(v, name, &value, errp); + + priv_version = priv_spec_from_str(value); + if (priv_version < 0) { + error_setg(errp, "Unsupported privilege spec version '%s'", value); + return; + } + + if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { + cpu_set_prop_err(cpu, name, errp); + error_append_hint(errp, "Current '%s' val: %s\n", name, + object_property_get_str(obj, name, NULL)); + return; + } + + cpu_option_add_user_setting(name, priv_version); + cpu->env.priv_ver = priv_version; +} + +static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + const char *value = priv_spec_to_str(cpu->env.priv_ver); + + visit_type_str(v, name, (char **)&value, errp); +} + +static const PropertyInfo prop_priv_spec = { + .name = "priv_spec", + .get = prop_priv_spec_get, + .set = prop_priv_spec_set, +}; + +static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + g_autofree char *value = NULL; + + visit_type_str(v, name, &value, errp); + + if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { + error_setg(errp, "Unsupported vector spec version '%s'", value); + return; + } + + cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); + cpu->env.vext_ver = VEXT_VERSION_1_00_0; +} + +static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + const char *value = VEXT_VER_1_00_0_STR; + + visit_type_str(v, name, (char **)&value, errp); +} + +static const PropertyInfo prop_vext_spec = { + .name = "vext_spec", + .get = prop_vext_spec_get, + .set = prop_vext_spec_set, +}; + +static void prop_vlen_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + uint16_t value; + + if (!visit_type_uint16(v, name, &value, errp)) { + return; + } + + if (!is_power_of_2(value)) { + error_setg(errp, "Vector extension VLEN must be power of 2"); + return; + } + + if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) { + cpu_set_prop_err(cpu, name, errp); + error_append_hint(errp, "Current '%s' val: %u\n", + name, cpu->cfg.vlenb << 3); + return; + } + + cpu_option_add_user_setting(name, value); + cpu->cfg.vlenb = value >> 3; +} + +static void prop_vlen_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; + + visit_type_uint16(v, name, &value, errp); +} + +static const PropertyInfo prop_vlen = { + .name = "vlen", + .get = prop_vlen_get, + .set = prop_vlen_set, +}; + +static void prop_elen_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + uint16_t value; + + if (!visit_type_uint16(v, name, &value, errp)) { + return; + } + + if (!is_power_of_2(value)) { + error_setg(errp, "Vector extension ELEN must be power of 2"); + return; + } + + if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { + cpu_set_prop_err(cpu, name, errp); + error_append_hint(errp, "Current '%s' val: %u\n", + name, cpu->cfg.elen); + return; + } + + cpu_option_add_user_setting(name, value); + cpu->cfg.elen = value; +} + +static void prop_elen_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + uint16_t value = RISCV_CPU(obj)->cfg.elen; + + visit_type_uint16(v, name, &value, errp); +} + +static const PropertyInfo prop_elen = { + .name = "elen", + .get = prop_elen_get, + .set = prop_elen_set, +}; + +static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + uint16_t value; + + if (!visit_type_uint16(v, name, &value, errp)) { + return; + } + + if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { + cpu_set_prop_err(cpu, name, errp); + error_append_hint(errp, "Current '%s' val: %u\n", + name, cpu->cfg.cbom_blocksize); + return; + } + + cpu_option_add_user_setting(name, value); + cpu->cfg.cbom_blocksize = value; +} + +static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; + + visit_type_uint16(v, name, &value, errp); +} + +static const PropertyInfo prop_cbom_blksize = { + .name = "cbom_blocksize", + .get = prop_cbom_blksize_get, + .set = prop_cbom_blksize_set, +}; + +static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + uint16_t value; + + if (!visit_type_uint16(v, name, &value, errp)) { + return; + } + + if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { + cpu_set_prop_err(cpu, name, errp); + error_append_hint(errp, "Current '%s' val: %u\n", + name, cpu->cfg.cbop_blocksize); + return; + } + + cpu_option_add_user_setting(name, value); + cpu->cfg.cbop_blocksize = value; +} + +static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; + + visit_type_uint16(v, name, &value, errp); +} + +static const PropertyInfo prop_cbop_blksize = { + .name = "cbop_blocksize", + .get = prop_cbop_blksize_get, + .set = prop_cbop_blksize_set, +}; + +static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + uint16_t value; + + if (!visit_type_uint16(v, name, &value, errp)) { + return; + } + + if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { + cpu_set_prop_err(cpu, name, errp); + error_append_hint(errp, "Current '%s' val: %u\n", + name, cpu->cfg.cboz_blocksize); + return; + } + + cpu_option_add_user_setting(name, value); + cpu->cfg.cboz_blocksize = value; +} + +static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; + + visit_type_uint16(v, name, &value, errp); +} + +static const PropertyInfo prop_cboz_blksize = { + .name = "cboz_blocksize", + .get = prop_cboz_blksize_get, + .set = prop_cboz_blksize_set, +}; + +static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + bool dynamic_cpu = riscv_cpu_is_dynamic(obj); + RISCVCPU *cpu = RISCV_CPU(obj); + uint32_t prev_val = cpu->cfg.mvendorid; + uint32_t value; + + if (!visit_type_uint32(v, name, &value, errp)) { + return; + } + + if (!dynamic_cpu && prev_val != value) { + error_setg(errp, "Unable to change %s mvendorid (0x%x)", + object_get_typename(obj), prev_val); + return; + } + + cpu->cfg.mvendorid = value; +} + +static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; + + visit_type_uint32(v, name, &value, errp); +} + +static const PropertyInfo prop_mvendorid = { + .name = "mvendorid", + .get = prop_mvendorid_get, + .set = prop_mvendorid_set, +}; + +static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + bool dynamic_cpu = riscv_cpu_is_dynamic(obj); + RISCVCPU *cpu = RISCV_CPU(obj); + uint64_t prev_val = cpu->cfg.mimpid; + uint64_t value; + + if (!visit_type_uint64(v, name, &value, errp)) { + return; + } + + if (!dynamic_cpu && prev_val != value) { + error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", + object_get_typename(obj), prev_val); + return; + } + + cpu->cfg.mimpid = value; +} + +static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + uint64_t value = RISCV_CPU(obj)->cfg.mimpid; + + visit_type_uint64(v, name, &value, errp); +} + +static const PropertyInfo prop_mimpid = { + .name = "mimpid", + .get = prop_mimpid_get, + .set = prop_mimpid_set, +}; + +static void prop_marchid_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + bool dynamic_cpu = riscv_cpu_is_dynamic(obj); + RISCVCPU *cpu = RISCV_CPU(obj); + uint64_t prev_val = cpu->cfg.marchid; + uint64_t value, invalid_val; + uint32_t mxlen = 0; + + if (!visit_type_uint64(v, name, &value, errp)) { + return; + } + + if (!dynamic_cpu && prev_val != value) { + error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", + object_get_typename(obj), prev_val); + return; + } + + switch (riscv_cpu_mxl(&cpu->env)) { + case MXL_RV32: + mxlen = 32; + break; + case MXL_RV64: + case MXL_RV128: + mxlen = 64; + break; + default: + g_assert_not_reached(); + } + + invalid_val = 1LL << (mxlen - 1); + + if (value == invalid_val) { + error_setg(errp, "Unable to set marchid with MSB (%u) bit set " + "and the remaining bits zero", mxlen); + return; + } + + cpu->cfg.marchid = value; +} + +static void prop_marchid_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + uint64_t value = RISCV_CPU(obj)->cfg.marchid; + + visit_type_uint64(v, name, &value, errp); +} + +static const PropertyInfo prop_marchid = { + .name = "marchid", + .get = prop_marchid_get, + .set = prop_marchid_set, }; /* @@ -1612,6 +2231,26 @@ RISCVCPUProfile *riscv_profiles[] = { static Property riscv_cpu_properties[] = { DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), + {.name = "pmu-mask", .info = &prop_pmu_mask}, + {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ + + {.name = "mmu", .info = &prop_mmu}, + {.name = "pmp", .info = &prop_pmp}, + + {.name = "priv_spec", .info = &prop_priv_spec}, + {.name = "vext_spec", .info = &prop_vext_spec}, + + {.name = "vlen", .info = &prop_vlen}, + {.name = "elen", .info = &prop_elen}, + + {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, + {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, + {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, + + {.name = "mvendorid", .info = &prop_mvendorid}, + {.name = "mimpid", .info = &prop_mimpid}, + {.name = "marchid", .info = &prop_marchid}, + #ifndef CONFIG_USER_ONLY DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), #endif @@ -1692,115 +2331,7 @@ static const struct SysemuCPUOps riscv_sysemu_ops = { }; #endif -static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) -{ - bool dynamic_cpu = riscv_cpu_is_dynamic(obj); - RISCVCPU *cpu = RISCV_CPU(obj); - uint32_t prev_val = cpu->cfg.mvendorid; - uint32_t value; - - if (!visit_type_uint32(v, name, &value, errp)) { - return; - } - - if (!dynamic_cpu && prev_val != value) { - error_setg(errp, "Unable to change %s mvendorid (0x%x)", - object_get_typename(obj), prev_val); - return; - } - - cpu->cfg.mvendorid = value; -} - -static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) -{ - uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; - - visit_type_uint32(v, name, &value, errp); -} - -static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) -{ - bool dynamic_cpu = riscv_cpu_is_dynamic(obj); - RISCVCPU *cpu = RISCV_CPU(obj); - uint64_t prev_val = cpu->cfg.mimpid; - uint64_t value; - - if (!visit_type_uint64(v, name, &value, errp)) { - return; - } - - if (!dynamic_cpu && prev_val != value) { - error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", - object_get_typename(obj), prev_val); - return; - } - - cpu->cfg.mimpid = value; -} - -static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) -{ - uint64_t value = RISCV_CPU(obj)->cfg.mimpid; - - visit_type_uint64(v, name, &value, errp); -} - -static void cpu_set_marchid(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) -{ - bool dynamic_cpu = riscv_cpu_is_dynamic(obj); - RISCVCPU *cpu = RISCV_CPU(obj); - uint64_t prev_val = cpu->cfg.marchid; - uint64_t value, invalid_val; - uint32_t mxlen = 0; - - if (!visit_type_uint64(v, name, &value, errp)) { - return; - } - - if (!dynamic_cpu && prev_val != value) { - error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", - object_get_typename(obj), prev_val); - return; - } - - switch (riscv_cpu_mxl(&cpu->env)) { - case MXL_RV32: - mxlen = 32; - break; - case MXL_RV64: - case MXL_RV128: - mxlen = 64; - break; - default: - g_assert_not_reached(); - } - - invalid_val = 1LL << (mxlen - 1); - - if (value == invalid_val) { - error_setg(errp, "Unable to set marchid with MSB (%u) bit set " - "and the remaining bits zero", mxlen); - return; - } - - cpu->cfg.marchid = value; -} - -static void cpu_get_marchid(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) -{ - uint64_t value = RISCV_CPU(obj)->cfg.marchid; - - visit_type_uint64(v, name, &value, errp); -} - -static void riscv_cpu_class_init(ObjectClass *c, void *data) +static void riscv_cpu_common_class_init(ObjectClass *c, void *data) { RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); CPUClass *cc = CPU_CLASS(c); @@ -1831,18 +2362,17 @@ static void riscv_cpu_class_init(ObjectClass *c, void *data) cc->gdb_arch_name = riscv_gdb_arch_name; cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; - object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid, - cpu_set_mvendorid, NULL, NULL); - - object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid, - cpu_set_mimpid, NULL, NULL); - - object_class_property_add(c, "marchid", "uint64", cpu_get_marchid, - cpu_set_marchid, NULL, NULL); - device_class_set_props(dc, riscv_cpu_properties); } +static void riscv_cpu_class_init(ObjectClass *c, void *data) +{ + RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); + + mcc->misa_mxl_max = (uint32_t)(uintptr_t)data; + riscv_cpu_validate_misa_mxl(mcc); +} + static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, int max_str_len) { @@ -1863,10 +2393,13 @@ static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, char *riscv_isa_string(RISCVCPU *cpu) { + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); int i; const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); char *isa_str = g_new(char, maxlen); - char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS); + int xlen = riscv_cpu_max_xlen(mcc); + char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); + for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { *p++ = qemu_tolower(riscv_single_letter_exts[i]); @@ -1879,39 +2412,102 @@ char *riscv_isa_string(RISCVCPU *cpu) return isa_str; } -#define DEFINE_CPU(type_name, initfn) \ - { \ - .name = type_name, \ - .parent = TYPE_RISCV_CPU, \ - .instance_init = initfn \ +#ifndef CONFIG_USER_ONLY +static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count) +{ + int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr); + char **extensions = g_new(char *, maxlen); + + for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { + if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { + extensions[*count] = g_new(char, 2); + snprintf(extensions[*count], 2, "%c", + qemu_tolower(riscv_single_letter_exts[i])); + (*count)++; + } } -#define DEFINE_DYNAMIC_CPU(type_name, initfn) \ - { \ - .name = type_name, \ - .parent = TYPE_RISCV_DYNAMIC_CPU, \ - .instance_init = initfn \ + for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) { + if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { + extensions[*count] = g_strdup(edata->name); + (*count)++; + } } -#define DEFINE_VENDOR_CPU(type_name, initfn) \ - { \ - .name = type_name, \ - .parent = TYPE_RISCV_VENDOR_CPU, \ - .instance_init = initfn \ + return extensions; +} + +void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) +{ + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); + const size_t maxlen = sizeof("rv128i"); + g_autofree char *isa_base = g_new(char, maxlen); + g_autofree char *riscv_isa; + char **isa_extensions; + int count = 0; + int xlen = riscv_cpu_max_xlen(mcc); + + riscv_isa = riscv_isa_string(cpu); + qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa); + + snprintf(isa_base, maxlen, "rv%di", xlen); + qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base); + + isa_extensions = riscv_isa_extensions_list(cpu, &count); + qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions", + isa_extensions, count); + + for (int i = 0; i < count; i++) { + g_free(isa_extensions[i]); } -#define DEFINE_BARE_CPU(type_name, initfn) \ - { \ - .name = type_name, \ - .parent = TYPE_RISCV_BARE_CPU, \ - .instance_init = initfn \ + g_free(isa_extensions); +} +#endif + +#define DEFINE_CPU(type_name, misa_mxl_max, initfn) \ + { \ + .name = (type_name), \ + .parent = TYPE_RISCV_CPU, \ + .instance_init = (initfn), \ + .class_init = riscv_cpu_class_init, \ + .class_data = (void *)(misa_mxl_max) \ } -#define DEFINE_PROFILE_CPU(type_name, initfn) \ - { \ - .name = type_name, \ - .parent = TYPE_RISCV_BARE_CPU, \ - .instance_init = initfn \ +#define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ + { \ + .name = (type_name), \ + .parent = TYPE_RISCV_DYNAMIC_CPU, \ + .instance_init = (initfn), \ + .class_init = riscv_cpu_class_init, \ + .class_data = (void *)(misa_mxl_max) \ + } + +#define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ + { \ + .name = (type_name), \ + .parent = TYPE_RISCV_VENDOR_CPU, \ + .instance_init = (initfn), \ + .class_init = riscv_cpu_class_init, \ + .class_data = (void *)(misa_mxl_max) \ + } + +#define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ + { \ + .name = (type_name), \ + .parent = TYPE_RISCV_BARE_CPU, \ + .instance_init = (initfn), \ + .class_init = riscv_cpu_class_init, \ + .class_data = (void *)(misa_mxl_max) \ + } + +#define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ + { \ + .name = (type_name), \ + .parent = TYPE_RISCV_BARE_CPU, \ + .instance_init = (initfn), \ + .class_init = riscv_cpu_class_init, \ + .class_data = (void *)(misa_mxl_max) \ } static const TypeInfo riscv_cpu_type_infos[] = { @@ -1924,7 +2520,7 @@ static const TypeInfo riscv_cpu_type_infos[] = { .instance_post_init = riscv_cpu_post_init, .abstract = true, .class_size = sizeof(RISCVCPUClass), - .class_init = riscv_cpu_class_init, + .class_init = riscv_cpu_common_class_init, }, { .name = TYPE_RISCV_DYNAMIC_CPU, @@ -1939,27 +2535,33 @@ static const TypeInfo riscv_cpu_type_infos[] = { { .name = TYPE_RISCV_BARE_CPU, .parent = TYPE_RISCV_CPU, + .instance_init = riscv_bare_cpu_init, .abstract = true, }, - DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init), - DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, riscv_max_cpu_init), #if defined(TARGET_RISCV32) - DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init), + DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV32, riscv_any_cpu_init), + DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), + DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), + DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), + DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), #elif defined(TARGET_RISCV64) - DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init), - DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init), - DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, rv64i_bare_cpu_init), - DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, rva22u64_profile_cpu_init), - DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, rva22s64_profile_cpu_init), + DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV64, riscv_any_cpu_init), + DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), + DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), + DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), + DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), + DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init), + DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), + DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), #endif }; diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index f63ee9cc58..f52dce78ba 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -69,6 +69,7 @@ typedef struct CPUArchState CPURISCVState; #define RVH RV('H') #define RVJ RV('J') #define RVG RV('G') +#define RVB RV('B') extern const uint32_t misa_bits[]; const char *riscv_get_misa_ext_name(uint32_t bit); @@ -93,6 +94,9 @@ typedef struct riscv_cpu_profile { extern RISCVCPUProfile *riscv_profiles[]; /* Privileged specification version */ +#define PRIV_VER_1_10_0_STR "v1.10.0" +#define PRIV_VER_1_11_0_STR "v1.11.0" +#define PRIV_VER_1_12_0_STR "v1.12.0" enum { PRIV_VERSION_1_10_0 = 0, PRIV_VERSION_1_11_0, @@ -102,6 +106,7 @@ enum { }; #define VEXT_VERSION_1_00_0 0x00010000 +#define VEXT_VER_1_00_0_STR "v1.0" enum { TRANSLATE_SUCCESS, @@ -180,12 +185,10 @@ struct CPUArchState { target_ulong guest_phys_fault_addr; target_ulong priv_ver; - target_ulong bext_ver; target_ulong vext_ver; /* RISCVMXL, but uint32_t for vmstate migration */ uint32_t misa_mxl; /* current mxl */ - uint32_t misa_mxl_max; /* max mxl for this cpu */ uint32_t misa_ext; /* current extensions */ uint32_t misa_ext_mask; /* max ext for this cpu */ uint32_t xl; /* current xlen */ @@ -361,6 +364,7 @@ struct CPUArchState { target_ulong tdata1[RV_MAX_TRIGGERS]; target_ulong tdata2[RV_MAX_TRIGGERS]; target_ulong tdata3[RV_MAX_TRIGGERS]; + target_ulong mcontext; struct CPUBreakpoint *cpu_breakpoint[RV_MAX_TRIGGERS]; struct CPUWatchpoint *cpu_watchpoint[RV_MAX_TRIGGERS]; QEMUTimer *itrigger_timer[RV_MAX_TRIGGERS]; @@ -466,6 +470,7 @@ struct RISCVCPUClass { DeviceRealize parent_realize; ResettablePhases parent_phases; + uint32_t misa_mxl_max; /* max mxl for this cpu */ }; static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext) @@ -506,8 +511,11 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); char *riscv_isa_string(RISCVCPU *cpu); +int riscv_cpu_max_xlen(RISCVCPUClass *mcc); +bool riscv_cpu_option_set(const char *optname); #ifndef CONFIG_USER_ONLY +void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename); void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, unsigned size, MMUAccessType access_type, @@ -682,11 +690,17 @@ static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env) * = 256 >> 7 * = 2 */ -static inline uint32_t vext_get_vlmax(RISCVCPU *cpu, target_ulong vtype) +static inline uint32_t vext_get_vlmax(uint32_t vlenb, uint32_t vsew, + int8_t lmul) { - uint8_t sew = FIELD_EX64(vtype, VTYPE, VSEW); - int8_t lmul = sextract32(FIELD_EX64(vtype, VTYPE, VLMUL), 0, 3); - return cpu->cfg.vlen >> (sew + 3 - lmul); + uint32_t vlen = vlenb << 3; + + /* + * We need to use 'vlen' instead of 'vlenb' to + * preserve the '+ 3' in the formula. Otherwise + * we risk a negative shift if vsew < lmul. + */ + return vlen >> (vsew + 3 - lmul); } void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc, @@ -769,7 +783,8 @@ enum riscv_pmu_event_idx { /* used by tcg/tcg-cpu.c*/ void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en); bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset); -void riscv_cpu_set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext); +void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext); +bool riscv_cpu_is_vendor(Object *cpu_obj); typedef struct RISCVCPUMultiExtConfig { const char *name; @@ -782,7 +797,6 @@ extern const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[]; extern const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[]; extern const RISCVCPUMultiExtConfig riscv_cpu_named_features[]; extern const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[]; -extern Property riscv_cpu_options[]; typedef struct isa_ext_data { const char *name; diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h index ebd7917d49..fc2068ee4d 100644 --- a/target/riscv/cpu_bits.h +++ b/target/riscv/cpu_bits.h @@ -32,14 +32,6 @@ #define FSR_NXA (FPEXC_NX << FSR_AEXC_SHIFT) #define FSR_AEXC (FSR_NVA | FSR_OFA | FSR_UFA | FSR_DZA | FSR_NXA) -/* Vector Fixed-Point round model */ -#define FSR_VXRM_SHIFT 9 -#define FSR_VXRM (0x3 << FSR_VXRM_SHIFT) - -/* Vector Fixed-Point saturation flag */ -#define FSR_VXSAT_SHIFT 8 -#define FSR_VXSAT (0x1 << FSR_VXSAT_SHIFT) - /* Control and Status Registers */ /* User Trap Setup */ @@ -361,6 +353,7 @@ #define CSR_TDATA2 0x7a2 #define CSR_TDATA3 0x7a3 #define CSR_TINFO 0x7a4 +#define CSR_MCONTEXT 0x7a8 /* Debug Mode Registers */ #define CSR_DCSR 0x7b0 @@ -905,4 +898,10 @@ typedef enum RISCVException { /* JVT CSR bits */ #define JVT_MODE 0x3F #define JVT_BASE (~0x3F) + +/* Debug Sdtrig CSR masks */ +#define MCONTEXT32 0x0000003F +#define MCONTEXT64 0x0000000000001FFFULL +#define MCONTEXT32_HCONTEXT 0x0000007F +#define MCONTEXT64_HCONTEXT 0x0000000000003FFFULL #endif diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h index 780ae6ef17..833bf58217 100644 --- a/target/riscv/cpu_cfg.h +++ b/target/riscv/cpu_cfg.h @@ -78,7 +78,9 @@ struct RISCVCPUConfig { bool ext_svnapot; bool ext_svpbmt; bool ext_zdinx; + bool ext_zaamo; bool ext_zacas; + bool ext_zalrsc; bool ext_zawrs; bool ext_zfa; bool ext_zfbfmin; @@ -139,11 +141,7 @@ struct RISCVCPUConfig { bool ext_XVentanaCondOps; uint32_t pmu_mask; - char *priv_spec; - char *user_spec; - char *bext_spec; - char *vext_spec; - uint16_t vlen; + uint16_t vlenb; uint16_t elen; uint16_t cbom_blocksize; uint16_t cbop_blocksize; diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c index b6b23b7d03..d462d95ee1 100644 --- a/target/riscv/cpu_helper.c +++ b/target/riscv/cpu_helper.c @@ -81,13 +81,16 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc, * which is not supported by GVEC. So we set vl_eq_vlmax flag to true * only when maxsz >= 8 bytes. */ - uint32_t vlmax = vext_get_vlmax(cpu, env->vtype); - uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW); - uint32_t maxsz = vlmax << sew; + + /* lmul encoded as in DisasContext::lmul */ + int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3); + uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW); + uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul); + uint32_t maxsz = vlmax << vsew; bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) && (maxsz >= 8); flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill); - flags = FIELD_DP32(flags, TB_FLAGS, SEW, sew); + flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew); flags = FIELD_DP32(flags, TB_FLAGS, LMUL, FIELD_EX64(env->vtype, VTYPE, VLMUL)); flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax); diff --git a/target/riscv/csr.c b/target/riscv/csr.c index 674ea075a4..d4e8ac13b9 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -242,7 +242,7 @@ static RISCVException any32(CPURISCVState *env, int csrno) } -static int aia_any(CPURISCVState *env, int csrno) +static RISCVException aia_any(CPURISCVState *env, int csrno) { if (!riscv_cpu_cfg(env)->ext_smaia) { return RISCV_EXCP_ILLEGAL_INST; @@ -251,7 +251,7 @@ static int aia_any(CPURISCVState *env, int csrno) return any(env, csrno); } -static int aia_any32(CPURISCVState *env, int csrno) +static RISCVException aia_any32(CPURISCVState *env, int csrno) { if (!riscv_cpu_cfg(env)->ext_smaia) { return RISCV_EXCP_ILLEGAL_INST; @@ -269,7 +269,7 @@ static RISCVException smode(CPURISCVState *env, int csrno) return RISCV_EXCP_ILLEGAL_INST; } -static int smode32(CPURISCVState *env, int csrno) +static RISCVException smode32(CPURISCVState *env, int csrno) { if (riscv_cpu_mxl(env) != MXL_RV32) { return RISCV_EXCP_ILLEGAL_INST; @@ -278,7 +278,7 @@ static int smode32(CPURISCVState *env, int csrno) return smode(env, csrno); } -static int aia_smode(CPURISCVState *env, int csrno) +static RISCVException aia_smode(CPURISCVState *env, int csrno) { if (!riscv_cpu_cfg(env)->ext_ssaia) { return RISCV_EXCP_ILLEGAL_INST; @@ -287,7 +287,7 @@ static int aia_smode(CPURISCVState *env, int csrno) return smode(env, csrno); } -static int aia_smode32(CPURISCVState *env, int csrno) +static RISCVException aia_smode32(CPURISCVState *env, int csrno) { if (!riscv_cpu_cfg(env)->ext_ssaia) { return RISCV_EXCP_ILLEGAL_INST; @@ -496,7 +496,7 @@ static RISCVException pointer_masking(CPURISCVState *env, int csrno) return RISCV_EXCP_ILLEGAL_INST; } -static int aia_hmode(CPURISCVState *env, int csrno) +static RISCVException aia_hmode(CPURISCVState *env, int csrno) { if (!riscv_cpu_cfg(env)->ext_ssaia) { return RISCV_EXCP_ILLEGAL_INST; @@ -505,7 +505,7 @@ static int aia_hmode(CPURISCVState *env, int csrno) return hmode(env, csrno); } -static int aia_hmode32(CPURISCVState *env, int csrno) +static RISCVException aia_hmode32(CPURISCVState *env, int csrno) { if (!riscv_cpu_cfg(env)->ext_ssaia) { return RISCV_EXCP_ILLEGAL_INST; @@ -681,9 +681,10 @@ static RISCVException read_vl(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static int read_vlenb(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_vlenb(CPURISCVState *env, int csrno, + target_ulong *val) { - *val = riscv_cpu_cfg(env)->vlen >> 3; + *val = riscv_cpu_cfg(env)->vlenb; return RISCV_EXCP_NONE; } @@ -738,17 +739,19 @@ static RISCVException write_vstart(CPURISCVState *env, int csrno, * The vstart CSR is defined to have only enough writable bits * to hold the largest element index, i.e. lg2(VLEN) bits. */ - env->vstart = val & ~(~0ULL << ctzl(riscv_cpu_cfg(env)->vlen)); + env->vstart = val & ~(~0ULL << ctzl(riscv_cpu_cfg(env)->vlenb << 3)); return RISCV_EXCP_NONE; } -static int read_vcsr(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_vcsr(CPURISCVState *env, int csrno, + target_ulong *val) { *val = (env->vxrm << VCSR_VXRM_SHIFT) | (env->vxsat << VCSR_VXSAT_SHIFT); return RISCV_EXCP_NONE; } -static int write_vcsr(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_vcsr(CPURISCVState *env, int csrno, + target_ulong val) { #if !defined(CONFIG_USER_ONLY) env->mstatus |= MSTATUS_VS; @@ -798,13 +801,15 @@ static RISCVException read_timeh(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hpmcounter(CPURISCVState *env, int csrno, + target_ulong *val) { *val = get_ticks(false); return RISCV_EXCP_NONE; } -static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno, + target_ulong *val) { *val = get_ticks(true); return RISCV_EXCP_NONE; @@ -812,7 +817,8 @@ static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val) #else /* CONFIG_USER_ONLY */ -static int read_mhpmevent(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mhpmevent(CPURISCVState *env, int csrno, + target_ulong *val) { int evt_index = csrno - CSR_MCOUNTINHIBIT; @@ -821,7 +827,8 @@ static int read_mhpmevent(CPURISCVState *env, int csrno, target_ulong *val) return RISCV_EXCP_NONE; } -static int write_mhpmevent(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mhpmevent(CPURISCVState *env, int csrno, + target_ulong val) { int evt_index = csrno - CSR_MCOUNTINHIBIT; uint64_t mhpmevt_val = val; @@ -837,7 +844,8 @@ static int write_mhpmevent(CPURISCVState *env, int csrno, target_ulong val) return RISCV_EXCP_NONE; } -static int read_mhpmeventh(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mhpmeventh(CPURISCVState *env, int csrno, + target_ulong *val) { int evt_index = csrno - CSR_MHPMEVENT3H + 3; @@ -846,7 +854,8 @@ static int read_mhpmeventh(CPURISCVState *env, int csrno, target_ulong *val) return RISCV_EXCP_NONE; } -static int write_mhpmeventh(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mhpmeventh(CPURISCVState *env, int csrno, + target_ulong val) { int evt_index = csrno - CSR_MHPMEVENT3H + 3; uint64_t mhpmevth_val = val; @@ -860,7 +869,8 @@ static int write_mhpmeventh(CPURISCVState *env, int csrno, target_ulong val) return RISCV_EXCP_NONE; } -static int write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno, + target_ulong val) { int ctr_idx = csrno - CSR_MCYCLE; PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; @@ -885,7 +895,8 @@ static int write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong val) return RISCV_EXCP_NONE; } -static int write_mhpmcounterh(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno, + target_ulong val) { int ctr_idx = csrno - CSR_MCYCLEH; PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; @@ -945,7 +956,8 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val, return RISCV_EXCP_NONE; } -static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hpmcounter(CPURISCVState *env, int csrno, + target_ulong *val) { uint16_t ctr_index; @@ -960,7 +972,8 @@ static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val) return riscv_pmu_read_ctr(env, val, false, ctr_index); } -static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno, + target_ulong *val) { uint16_t ctr_index; @@ -975,7 +988,8 @@ static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val) return riscv_pmu_read_ctr(env, val, true, ctr_index); } -static int read_scountovf(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_scountovf(CPURISCVState *env, int csrno, + target_ulong *val) { int mhpmevt_start = CSR_MHPMEVENT3 - CSR_MCOUNTINHIBIT; int i; @@ -1638,7 +1652,8 @@ static RISCVException rmw_mvienh(CPURISCVState *env, int csrno, return ret; } -static int read_mtopi(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mtopi(CPURISCVState *env, int csrno, + target_ulong *val) { int irq; uint8_t iprio; @@ -1678,8 +1693,9 @@ static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno) }; } -static int rmw_xiselect(CPURISCVState *env, int csrno, target_ulong *val, - target_ulong new_val, target_ulong wr_mask) +static RISCVException rmw_xiselect(CPURISCVState *env, int csrno, + target_ulong *val, target_ulong new_val, + target_ulong wr_mask) { target_ulong *iselect; @@ -1758,8 +1774,9 @@ static int rmw_iprio(target_ulong xlen, return 0; } -static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val, - target_ulong new_val, target_ulong wr_mask) +static RISCVException rmw_xireg(CPURISCVState *env, int csrno, + target_ulong *val, target_ulong new_val, + target_ulong wr_mask) { bool virt, isel_reserved; uint8_t *iprio; @@ -1833,8 +1850,9 @@ done: return RISCV_EXCP_NONE; } -static int rmw_xtopei(CPURISCVState *env, int csrno, target_ulong *val, - target_ulong new_val, target_ulong wr_mask) +static RISCVException rmw_xtopei(CPURISCVState *env, int csrno, + target_ulong *val, target_ulong new_val, + target_ulong wr_mask) { bool virt; int ret = -EINVAL; @@ -3031,7 +3049,8 @@ static RISCVException write_satp(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static int read_vstopi(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_vstopi(CPURISCVState *env, int csrno, + target_ulong *val) { int irq, ret; target_ulong topei; @@ -3120,7 +3139,8 @@ static int read_vstopi(CPURISCVState *env, int csrno, target_ulong *val) return RISCV_EXCP_NONE; } -static int read_stopi(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_stopi(CPURISCVState *env, int csrno, + target_ulong *val) { int irq; uint8_t iprio; @@ -3576,19 +3596,21 @@ static RISCVException write_htimedeltah(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static int read_hvictl(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hvictl(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->hvictl; return RISCV_EXCP_NONE; } -static int write_hvictl(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_hvictl(CPURISCVState *env, int csrno, + target_ulong val) { env->hvictl = val & HVICTL_VALID_MASK; return RISCV_EXCP_NONE; } -static int read_hvipriox(CPURISCVState *env, int first_index, +static RISCVException read_hvipriox(CPURISCVState *env, int first_index, uint8_t *iprio, target_ulong *val) { int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32); @@ -3614,7 +3636,7 @@ static int read_hvipriox(CPURISCVState *env, int first_index, return RISCV_EXCP_NONE; } -static int write_hvipriox(CPURISCVState *env, int first_index, +static RISCVException write_hvipriox(CPURISCVState *env, int first_index, uint8_t *iprio, target_ulong val) { int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32); @@ -3640,42 +3662,50 @@ static int write_hvipriox(CPURISCVState *env, int first_index, return RISCV_EXCP_NONE; } -static int read_hviprio1(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hviprio1(CPURISCVState *env, int csrno, + target_ulong *val) { return read_hvipriox(env, 0, env->hviprio, val); } -static int write_hviprio1(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_hviprio1(CPURISCVState *env, int csrno, + target_ulong val) { return write_hvipriox(env, 0, env->hviprio, val); } -static int read_hviprio1h(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hviprio1h(CPURISCVState *env, int csrno, + target_ulong *val) { return read_hvipriox(env, 4, env->hviprio, val); } -static int write_hviprio1h(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_hviprio1h(CPURISCVState *env, int csrno, + target_ulong val) { return write_hvipriox(env, 4, env->hviprio, val); } -static int read_hviprio2(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hviprio2(CPURISCVState *env, int csrno, + target_ulong *val) { return read_hvipriox(env, 8, env->hviprio, val); } -static int write_hviprio2(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_hviprio2(CPURISCVState *env, int csrno, + target_ulong val) { return write_hvipriox(env, 8, env->hviprio, val); } -static int read_hviprio2h(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_hviprio2h(CPURISCVState *env, int csrno, + target_ulong *val) { return read_hvipriox(env, 12, env->hviprio, val); } -static int write_hviprio2h(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_hviprio2h(CPURISCVState *env, int csrno, + target_ulong val) { return write_hvipriox(env, 12, env->hviprio, val); } @@ -3699,7 +3729,8 @@ static RISCVException write_vsstatus(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static int read_vstvec(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_vstvec(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->vstvec; return RISCV_EXCP_NONE; @@ -3906,6 +3937,31 @@ static RISCVException read_tinfo(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static RISCVException read_mcontext(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->mcontext; + return RISCV_EXCP_NONE; +} + +static RISCVException write_mcontext(CPURISCVState *env, int csrno, + target_ulong val) +{ + bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false; + int32_t mask; + + if (riscv_has_ext(env, RVH)) { + /* Spec suggest 7-bit for RV32 and 14-bit for RV64 w/ H extension */ + mask = rv32 ? MCONTEXT32_HCONTEXT : MCONTEXT64_HCONTEXT; + } else { + /* Spec suggest 6-bit for RV32 and 13-bit for RV64 w/o H extension */ + mask = rv32 ? MCONTEXT32 : MCONTEXT64; + } + + env->mcontext = val & mask; + return RISCV_EXCP_NONE; +} + /* * Functions to access Pointer Masking feature registers * We have to check if current priv lvl could modify @@ -4800,11 +4856,12 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr }, /* Debug CSRs */ - [CSR_TSELECT] = { "tselect", debug, read_tselect, write_tselect }, - [CSR_TDATA1] = { "tdata1", debug, read_tdata, write_tdata }, - [CSR_TDATA2] = { "tdata2", debug, read_tdata, write_tdata }, - [CSR_TDATA3] = { "tdata3", debug, read_tdata, write_tdata }, - [CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore }, + [CSR_TSELECT] = { "tselect", debug, read_tselect, write_tselect }, + [CSR_TDATA1] = { "tdata1", debug, read_tdata, write_tdata }, + [CSR_TDATA2] = { "tdata2", debug, read_tdata, write_tdata }, + [CSR_TDATA3] = { "tdata3", debug, read_tdata, write_tdata }, + [CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore }, + [CSR_MCONTEXT] = { "mcontext", debug, read_mcontext, write_mcontext }, /* User Pointer Masking */ [CSR_UMTE] = { "umte", pointer_masking, read_umte, write_umte }, diff --git a/target/riscv/debug.c b/target/riscv/debug.c index 4945d1a1f2..e30d99cc2f 100644 --- a/target/riscv/debug.c +++ b/target/riscv/debug.c @@ -940,4 +940,6 @@ void riscv_trigger_reset_hold(CPURISCVState *env) env->cpu_watchpoint[i] = NULL; timer_del(env->itrigger_timer[i]); } + + env->mcontext = 0; } diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c index 58b3ace0fe..ca9b71f7bb 100644 --- a/target/riscv/gdbstub.c +++ b/target/riscv/gdbstub.c @@ -49,6 +49,7 @@ static const struct TypeSize vec_lanes[] = { int riscv_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cs); RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; target_ulong tmp; @@ -61,7 +62,7 @@ int riscv_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) return 0; } - switch (env->misa_mxl_max) { + switch (mcc->misa_mxl_max) { case MXL_RV32: return gdb_get_reg32(mem_buf, tmp); case MXL_RV64: @@ -75,12 +76,13 @@ int riscv_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) int riscv_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cs); RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; int length = 0; target_ulong tmp; - switch (env->misa_mxl_max) { + switch (mcc->misa_mxl_max) { case MXL_RV32: tmp = (int32_t)ldl_p(mem_buf); length = 4; @@ -130,7 +132,7 @@ static int riscv_gdb_set_fpu(CPURISCVState *env, uint8_t *mem_buf, int n) static int riscv_gdb_get_vector(CPURISCVState *env, GByteArray *buf, int n) { - uint16_t vlenb = riscv_cpu_cfg(env)->vlen >> 3; + uint16_t vlenb = riscv_cpu_cfg(env)->vlenb; if (n < 32) { int i; int cnt = 0; @@ -146,7 +148,7 @@ static int riscv_gdb_get_vector(CPURISCVState *env, GByteArray *buf, int n) static int riscv_gdb_set_vector(CPURISCVState *env, uint8_t *mem_buf, int n) { - uint16_t vlenb = riscv_cpu_cfg(env)->vlen >> 3; + uint16_t vlenb = riscv_cpu_cfg(env)->vlenb; if (n < 32) { int i; for (i = 0; i < vlenb; i += 8) { @@ -214,11 +216,12 @@ static int riscv_gdb_set_virtual(CPURISCVState *cs, uint8_t *mem_buf, int n) static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg) { + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cs); RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; GString *s = g_string_new(NULL); riscv_csr_predicate_fn predicate; - int bitsize = 16 << env->misa_mxl_max; + int bitsize = riscv_cpu_max_xlen(mcc); int i; #if !defined(CONFIG_USER_ONLY) @@ -266,7 +269,7 @@ static int ricsv_gen_dynamic_vector_xml(CPUState *cs, int base_reg) RISCVCPU *cpu = RISCV_CPU(cs); GString *s = g_string_new(NULL); g_autoptr(GString) ts = g_string_new(""); - int reg_width = cpu->cfg.vlen; + int reg_width = cpu->cfg.vlenb << 3; int num_regs = 0; int i; @@ -310,6 +313,7 @@ static int ricsv_gen_dynamic_vector_xml(CPUState *cs, int base_reg) void riscv_cpu_register_gdb_regs_for_features(CPUState *cs) { + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cs); RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; if (env->misa_ext & RVD) { @@ -326,7 +330,7 @@ void riscv_cpu_register_gdb_regs_for_features(CPUState *cs) ricsv_gen_dynamic_vector_xml(cs, base_reg), "riscv-vector.xml", 0); } - switch (env->misa_mxl_max) { + switch (mcc->misa_mxl_max) { case MXL_RV32: gdb_register_coprocessor(cs, riscv_gdb_get_virtual, riscv_gdb_set_virtual, diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc index 5f194a447b..267930e5bc 100644 --- a/target/riscv/insn_trans/trans_rva.c.inc +++ b/target/riscv/insn_trans/trans_rva.c.inc @@ -18,6 +18,18 @@ * this program. If not, see . */ +#define REQUIRE_A_OR_ZAAMO(ctx) do { \ + if (!ctx->cfg_ptr->ext_zaamo && !has_ext(ctx, RVA)) { \ + return false; \ + } \ +} while (0) + +#define REQUIRE_A_OR_ZALRSC(ctx) do { \ + if (!ctx->cfg_ptr->ext_zalrsc && !has_ext(ctx, RVA)) { \ + return false; \ + } \ +} while (0) + static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop) { TCGv src1; @@ -96,132 +108,143 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a, static bool trans_lr_w(DisasContext *ctx, arg_lr_w *a) { - REQUIRE_EXT(ctx, RVA); + REQUIRE_A_OR_ZALRSC(ctx); return gen_lr(ctx, a, (MO_ALIGN | MO_TESL)); } static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a) { - REQUIRE_EXT(ctx, RVA); + REQUIRE_A_OR_ZALRSC(ctx); return gen_sc(ctx, a, (MO_ALIGN | MO_TESL)); } static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a) { - REQUIRE_EXT(ctx, RVA); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a) { - REQUIRE_EXT(ctx, RVA); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a) { - REQUIRE_EXT(ctx, RVA); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a) { - REQUIRE_EXT(ctx, RVA); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a) { - REQUIRE_EXT(ctx, RVA); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a) { - REQUIRE_EXT(ctx, RVA); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a) { - REQUIRE_EXT(ctx, RVA); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a) { - REQUIRE_EXT(ctx, RVA); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a) { - REQUIRE_EXT(ctx, RVA); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TESL)); } static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a) { REQUIRE_64BIT(ctx); + REQUIRE_A_OR_ZALRSC(ctx); return gen_lr(ctx, a, MO_ALIGN | MO_TEUQ); } static bool trans_sc_d(DisasContext *ctx, arg_sc_d *a) { REQUIRE_64BIT(ctx); + REQUIRE_A_OR_ZALRSC(ctx); return gen_sc(ctx, a, (MO_ALIGN | MO_TEUQ)); } static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a) { REQUIRE_64BIT(ctx); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEUQ)); } static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a) { REQUIRE_64BIT(ctx); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEUQ)); } static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a) { REQUIRE_64BIT(ctx); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEUQ)); } static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a) { REQUIRE_64BIT(ctx); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEUQ)); } static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a) { REQUIRE_64BIT(ctx); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEUQ)); } static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a) { REQUIRE_64BIT(ctx); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEUQ)); } static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a) { REQUIRE_64BIT(ctx); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEUQ)); } static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a) { REQUIRE_64BIT(ctx); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEUQ)); } static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a) { REQUIRE_64BIT(ctx); + REQUIRE_A_OR_ZAAMO(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEUQ)); } diff --git a/target/riscv/insn_trans/trans_rvbf16.c.inc b/target/riscv/insn_trans/trans_rvbf16.c.inc index 4e39c00884..8ee99df3f3 100644 --- a/target/riscv/insn_trans/trans_rvbf16.c.inc +++ b/target/riscv/insn_trans/trans_rvbf16.c.inc @@ -83,8 +83,8 @@ static bool trans_vfncvtbf16_f_f_w(DisasContext *ctx, arg_vfncvtbf16_f_f_w *a) data = FIELD_DP32(data, VDATA, VMA, ctx->vma); tcg_gen_gvec_3_ptr(vreg_ofs(ctx, a->rd), vreg_ofs(ctx, 0), vreg_ofs(ctx, a->rs2), tcg_env, - ctx->cfg_ptr->vlen / 8, - ctx->cfg_ptr->vlen / 8, data, + ctx->cfg_ptr->vlenb, + ctx->cfg_ptr->vlenb, data, gen_helper_vfncvtbf16_f_f_w); mark_vs_dirty(ctx); gen_set_label(over); @@ -112,8 +112,8 @@ static bool trans_vfwcvtbf16_f_f_v(DisasContext *ctx, arg_vfwcvtbf16_f_f_v *a) data = FIELD_DP32(data, VDATA, VMA, ctx->vma); tcg_gen_gvec_3_ptr(vreg_ofs(ctx, a->rd), vreg_ofs(ctx, 0), vreg_ofs(ctx, a->rs2), tcg_env, - ctx->cfg_ptr->vlen / 8, - ctx->cfg_ptr->vlen / 8, data, + ctx->cfg_ptr->vlenb, + ctx->cfg_ptr->vlenb, data, gen_helper_vfwcvtbf16_f_f_v); mark_vs_dirty(ctx); gen_set_label(over); @@ -143,8 +143,8 @@ static bool trans_vfwmaccbf16_vv(DisasContext *ctx, arg_vfwmaccbf16_vv *a) tcg_gen_gvec_4_ptr(vreg_ofs(ctx, a->rd), vreg_ofs(ctx, 0), vreg_ofs(ctx, a->rs1), vreg_ofs(ctx, a->rs2), tcg_env, - ctx->cfg_ptr->vlen / 8, - ctx->cfg_ptr->vlen / 8, data, + ctx->cfg_ptr->vlenb, + ctx->cfg_ptr->vlenb, data, gen_helper_vfwmaccbf16_vv); mark_vs_dirty(ctx); gen_set_label(over); diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc index 3871f0ea73..9e101ab434 100644 --- a/target/riscv/insn_trans/trans_rvv.c.inc +++ b/target/riscv/insn_trans/trans_rvv.c.inc @@ -217,7 +217,7 @@ static bool trans_vsetivli(DisasContext *s, arg_vsetivli *a) /* vector register offset from env */ static uint32_t vreg_ofs(DisasContext *s, int reg) { - return offsetof(CPURISCVState, vreg) + reg * s->cfg_ptr->vlen / 8; + return offsetof(CPURISCVState, vreg) + reg * s->cfg_ptr->vlenb; } /* check functions */ @@ -627,11 +627,11 @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data, * As simd_desc supports at most 2048 bytes, and in this implementation, * the max vector group length is 4096 bytes. So split it into two parts. * - * The first part is vlen in bytes, encoded in maxsz of simd_desc. + * The first part is vlen in bytes (vlenb), encoded in maxsz of simd_desc. * The second part is lmul, encoded in data of simd_desc. */ - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0)); @@ -791,8 +791,8 @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2, mask = tcg_temp_new_ptr(); base = get_gpr(s, rs1, EXT_NONE); stride = get_gpr(s, rs2, EXT_NONE); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0)); @@ -897,8 +897,8 @@ static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, mask = tcg_temp_new_ptr(); index = tcg_temp_new_ptr(); base = get_gpr(s, rs1, EXT_NONE); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(index, tcg_env, vreg_ofs(s, vs2)); @@ -1036,8 +1036,8 @@ static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data, dest = tcg_temp_new_ptr(); mask = tcg_temp_new_ptr(); base = get_gpr(s, rs1, EXT_NONE); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0)); @@ -1086,7 +1086,7 @@ static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf, uint32_t width, gen_helper_ldst_whole *fn, DisasContext *s, bool is_store) { - uint32_t evl = (s->cfg_ptr->vlen / 8) * nf / width; + uint32_t evl = s->cfg_ptr->vlenb * nf / width; TCGLabel *over = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_GEU, cpu_vstart, evl, over); @@ -1096,8 +1096,8 @@ static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf, uint32_t data = FIELD_DP32(0, VDATA, NF, nf); dest = tcg_temp_new_ptr(); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); base = get_gpr(s, rs1, EXT_NONE); tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); @@ -1160,12 +1160,12 @@ GEN_LDST_WHOLE_TRANS(vs8r_v, 8, 1, true) /* * MAXSZ returns the maximum vector size can be operated in bytes, * which is used in GVEC IR when vl_eq_vlmax flag is set to true - * to accerlate vector operation. + * to accelerate vector operation. */ static inline uint32_t MAXSZ(DisasContext *s) { - int scale = s->lmul - 3; - return s->cfg_ptr->vlen >> -scale; + int max_sz = s->cfg_ptr->vlenb * 8; + return max_sz >> (3 - s->lmul); } static bool opivv_check(DisasContext *s, arg_rmrr *a) @@ -1199,8 +1199,8 @@ do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn, data = FIELD_DP32(data, VDATA, VMA, s->vma); tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2), - tcg_env, s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data, fn); + tcg_env, s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data, fn); } mark_vs_dirty(s); gen_set_label(over); @@ -1248,8 +1248,8 @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm, data = FIELD_DP32(data, VDATA, VTA, s->vta); data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); data = FIELD_DP32(data, VDATA, VMA, s->vma); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, vs2)); @@ -1410,8 +1410,8 @@ static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm, data = FIELD_DP32(data, VDATA, VTA, s->vta); data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); data = FIELD_DP32(data, VDATA, VMA, s->vma); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, vs2)); @@ -1492,8 +1492,8 @@ static bool do_opivv_widen(DisasContext *s, arg_rmrr *a, tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2), - tcg_env, s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, + tcg_env, s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data, fn); mark_vs_dirty(s); gen_set_label(over); @@ -1568,8 +1568,8 @@ static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a, tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2), - tcg_env, s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data, fn); + tcg_env, s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data, fn); mark_vs_dirty(s); gen_set_label(over); return true; @@ -1639,8 +1639,8 @@ static bool opivv_trans(uint32_t vd, uint32_t vs1, uint32_t vs2, uint32_t vm, data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); data = FIELD_DP32(data, VDATA, VMA, s->vma); tcg_gen_gvec_4_ptr(vreg_ofs(s, vd), vreg_ofs(s, 0), vreg_ofs(s, vs1), - vreg_ofs(s, vs2), tcg_env, s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data, fn); + vreg_ofs(s, vs2), tcg_env, s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data, fn); mark_vs_dirty(s); gen_set_label(over); return true; @@ -1831,8 +1831,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs1), \ vreg_ofs(s, a->rs2), tcg_env, \ - s->cfg_ptr->vlen / 8, \ - s->cfg_ptr->vlen / 8, data, \ + s->cfg_ptr->vlenb, \ + s->cfg_ptr->vlenb, data, \ fns[s->sew]); \ mark_vs_dirty(s); \ gen_set_label(over); \ @@ -2036,8 +2036,8 @@ static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a) tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1), - tcg_env, s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data, + tcg_env, s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data, fns[s->sew]); gen_set_label(over); } @@ -2082,8 +2082,8 @@ static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a) }; tcg_gen_ext_tl_i64(s1_i64, s1); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd)); fns[s->sew](dest, s1_i64, tcg_env, desc); } @@ -2121,8 +2121,8 @@ static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a) s1 = tcg_constant_i64(simm); dest = tcg_temp_new_ptr(); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd)); fns[s->sew](dest, s1, tcg_env, desc); @@ -2275,8 +2275,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs1), \ vreg_ofs(s, a->rs2), tcg_env, \ - s->cfg_ptr->vlen / 8, \ - s->cfg_ptr->vlen / 8, data, \ + s->cfg_ptr->vlenb, \ + s->cfg_ptr->vlenb, data, \ fns[s->sew - 1]); \ mark_vs_dirty(s); \ gen_set_label(over); \ @@ -2303,8 +2303,8 @@ static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, dest = tcg_temp_new_ptr(); mask = tcg_temp_new_ptr(); src2 = tcg_temp_new_ptr(); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, vs2)); @@ -2391,8 +2391,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs1), \ vreg_ofs(s, a->rs2), tcg_env, \ - s->cfg_ptr->vlen / 8, \ - s->cfg_ptr->vlen / 8, data, \ + s->cfg_ptr->vlenb, \ + s->cfg_ptr->vlenb, data, \ fns[s->sew - 1]); \ mark_vs_dirty(s); \ gen_set_label(over); \ @@ -2465,8 +2465,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs1), \ vreg_ofs(s, a->rs2), tcg_env, \ - s->cfg_ptr->vlen / 8, \ - s->cfg_ptr->vlen / 8, data, \ + s->cfg_ptr->vlenb, \ + s->cfg_ptr->vlenb, data, \ fns[s->sew - 1]); \ mark_vs_dirty(s); \ gen_set_label(over); \ @@ -2581,8 +2581,8 @@ static bool do_opfv(DisasContext *s, arg_rmr *a, data = FIELD_DP32(data, VDATA, VMA, s->vma); tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs2), tcg_env, - s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data, fn); + s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data, fn); mark_vs_dirty(s); gen_set_label(over); return true; @@ -2691,8 +2691,8 @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a) do_nanbox(s, t1, cpu_fpr[a->rs1]); dest = tcg_temp_new_ptr(); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd)); fns[s->sew - 1](dest, t1, tcg_env, desc); @@ -2770,8 +2770,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ data = FIELD_DP32(data, VDATA, VMA, s->vma); \ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs2), tcg_env, \ - s->cfg_ptr->vlen / 8, \ - s->cfg_ptr->vlen / 8, data, \ + s->cfg_ptr->vlenb, \ + s->cfg_ptr->vlenb, data, \ fns[s->sew - 1]); \ mark_vs_dirty(s); \ gen_set_label(over); \ @@ -2821,8 +2821,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ data = FIELD_DP32(data, VDATA, VMA, s->vma); \ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs2), tcg_env, \ - s->cfg_ptr->vlen / 8, \ - s->cfg_ptr->vlen / 8, data, \ + s->cfg_ptr->vlenb, \ + s->cfg_ptr->vlenb, data, \ fns[s->sew]); \ mark_vs_dirty(s); \ gen_set_label(over); \ @@ -2888,8 +2888,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ data = FIELD_DP32(data, VDATA, VMA, s->vma); \ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs2), tcg_env, \ - s->cfg_ptr->vlen / 8, \ - s->cfg_ptr->vlen / 8, data, \ + s->cfg_ptr->vlenb, \ + s->cfg_ptr->vlenb, data, \ fns[s->sew - 1]); \ mark_vs_dirty(s); \ gen_set_label(over); \ @@ -2937,8 +2937,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ data = FIELD_DP32(data, VDATA, VMA, s->vma); \ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs2), tcg_env, \ - s->cfg_ptr->vlen / 8, \ - s->cfg_ptr->vlen / 8, data, \ + s->cfg_ptr->vlenb, \ + s->cfg_ptr->vlenb, data, \ fns[s->sew]); \ mark_vs_dirty(s); \ gen_set_label(over); \ @@ -3027,8 +3027,8 @@ static bool trans_##NAME(DisasContext *s, arg_r *a) \ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs1), \ vreg_ofs(s, a->rs2), tcg_env, \ - s->cfg_ptr->vlen / 8, \ - s->cfg_ptr->vlen / 8, data, fn); \ + s->cfg_ptr->vlenb, \ + s->cfg_ptr->vlenb, data, fn); \ mark_vs_dirty(s); \ gen_set_label(over); \ return true; \ @@ -3061,8 +3061,8 @@ static bool trans_vcpop_m(DisasContext *s, arg_rmr *a) mask = tcg_temp_new_ptr(); src2 = tcg_temp_new_ptr(); dst = dest_gpr(s, a->rd); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, a->rs2)); tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0)); @@ -3090,8 +3090,8 @@ static bool trans_vfirst_m(DisasContext *s, arg_rmr *a) mask = tcg_temp_new_ptr(); src2 = tcg_temp_new_ptr(); dst = dest_gpr(s, a->rd); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, a->rs2)); tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0)); @@ -3128,8 +3128,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ data = FIELD_DP32(data, VDATA, VMA, s->vma); \ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), \ vreg_ofs(s, 0), vreg_ofs(s, a->rs2), \ - tcg_env, s->cfg_ptr->vlen / 8, \ - s->cfg_ptr->vlen / 8, \ + tcg_env, s->cfg_ptr->vlenb, \ + s->cfg_ptr->vlenb, \ data, fn); \ mark_vs_dirty(s); \ gen_set_label(over); \ @@ -3171,8 +3171,8 @@ static bool trans_viota_m(DisasContext *s, arg_viota_m *a) }; tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs2), tcg_env, - s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data, fns[s->sew]); + s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data, fns[s->sew]); mark_vs_dirty(s); gen_set_label(over); return true; @@ -3200,8 +3200,8 @@ static bool trans_vid_v(DisasContext *s, arg_vid_v *a) gen_helper_vid_v_w, gen_helper_vid_v_d, }; tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), - tcg_env, s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, + tcg_env, s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data, fns[s->sew]); mark_vs_dirty(s); gen_set_label(over); @@ -3535,8 +3535,7 @@ static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a) } if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) { - int scale = s->lmul - (s->sew + 3); - int vlmax = s->cfg_ptr->vlen >> -scale; + int vlmax = vext_get_vlmax(s->cfg_ptr->vlenb, s->sew, s->lmul); TCGv_i64 dest = tcg_temp_new_i64(); if (a->rs1 == 0) { @@ -3566,8 +3565,7 @@ static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a) } if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) { - int scale = s->lmul - (s->sew + 3); - int vlmax = s->cfg_ptr->vlen >> -scale; + int vlmax = vext_get_vlmax(s->cfg_ptr->vlenb, s->sew, s->lmul); if (a->rs1 >= vlmax) { tcg_gen_gvec_dup_imm(MO_64, vreg_ofs(s, a->rd), MAXSZ(s), MAXSZ(s), 0); @@ -3620,8 +3618,8 @@ static bool trans_vcompress_vm(DisasContext *s, arg_r *a) data = FIELD_DP32(data, VDATA, VTA, s->vta); tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2), - tcg_env, s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data, + tcg_env, s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data, fns[s->sew]); mark_vs_dirty(s); gen_set_label(over); @@ -3641,7 +3639,7 @@ static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \ vext_check_isa_ill(s) && \ QEMU_IS_ALIGNED(a->rd, LEN) && \ QEMU_IS_ALIGNED(a->rs2, LEN)) { \ - uint32_t maxsz = (s->cfg_ptr->vlen >> 3) * LEN; \ + uint32_t maxsz = s->cfg_ptr->vlenb * LEN; \ if (s->vstart_eq_zero) { \ tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd), \ vreg_ofs(s, a->rs2), maxsz, maxsz); \ @@ -3723,8 +3721,8 @@ static bool int_ext_op(DisasContext *s, arg_rmr *a, uint8_t seq) tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs2), tcg_env, - s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data, fn); + s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data, fn); mark_vs_dirty(s); gen_set_label(over); diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc index 3801c16829..a5cdd1b67f 100644 --- a/target/riscv/insn_trans/trans_rvvk.c.inc +++ b/target/riscv/insn_trans/trans_rvvk.c.inc @@ -174,7 +174,7 @@ GEN_OPIVX_GVEC_TRANS_CHECK(vandn_vx, andcs, zvkb_vx_check) data = FIELD_DP32(data, VDATA, VMA, s->vma); \ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs2), tcg_env, \ - s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlenb, s->cfg_ptr->vlenb, \ data, fns[s->sew]); \ mark_vs_dirty(s); \ gen_set_label(over); \ @@ -267,7 +267,7 @@ GEN_OPIVI_WIDEN_TRANS(vwsll_vi, IMM_ZX, vwsll_vx, vwsll_vx_check) rd_v = tcg_temp_new_ptr(); \ rs2_v = tcg_temp_new_ptr(); \ desc = tcg_constant_i32( \ - simd_desc(s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, data)); \ + simd_desc(s->cfg_ptr->vlenb, s->cfg_ptr->vlenb, data)); \ tcg_gen_addi_ptr(rd_v, tcg_env, vreg_ofs(s, a->rd)); \ tcg_gen_addi_ptr(rs2_v, tcg_env, vreg_ofs(s, a->rs2)); \ gen_helper_##NAME(rd_v, rs2_v, tcg_env, desc); \ @@ -345,7 +345,7 @@ GEN_V_UNMASKED_TRANS(vaesem_vs, vaes_check_vs, ZVKNED_EGS) rs2_v = tcg_temp_new_ptr(); \ uimm_v = tcg_constant_i32(a->rs1); \ desc = tcg_constant_i32( \ - simd_desc(s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, data)); \ + simd_desc(s->cfg_ptr->vlenb, s->cfg_ptr->vlenb, data)); \ tcg_gen_addi_ptr(rd_v, tcg_env, vreg_ofs(s, a->rd)); \ tcg_gen_addi_ptr(rs2_v, tcg_env, vreg_ofs(s, a->rs2)); \ gen_helper_##NAME(rd_v, rs2_v, uimm_v, tcg_env, desc); \ @@ -413,7 +413,7 @@ GEN_VI_UNMASKED_TRANS(vaeskf2_vi, vaeskf2_check, ZVKNED_EGS) \ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1), \ vreg_ofs(s, a->rs2), tcg_env, \ - s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlenb, s->cfg_ptr->vlenb, \ data, gen_helper_##NAME); \ \ mark_vs_dirty(s); \ @@ -466,8 +466,8 @@ static bool trans_vsha2cl_vv(DisasContext *s, arg_rmrr *a) data = FIELD_DP32(data, VDATA, VMA, s->vma); tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1), - vreg_ofs(s, a->rs2), tcg_env, s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data, + vreg_ofs(s, a->rs2), tcg_env, s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data, s->sew == MO_32 ? gen_helper_vsha2cl32_vv : gen_helper_vsha2cl64_vv); @@ -500,8 +500,8 @@ static bool trans_vsha2ch_vv(DisasContext *s, arg_rmrr *a) data = FIELD_DP32(data, VDATA, VMA, s->vma); tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1), - vreg_ofs(s, a->rs2), tcg_env, s->cfg_ptr->vlen / 8, - s->cfg_ptr->vlen / 8, data, + vreg_ofs(s, a->rs2), tcg_env, s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data, s->sew == MO_32 ? gen_helper_vsha2ch32_vv : gen_helper_vsha2ch64_vv); diff --git a/target/riscv/insn_trans/trans_xthead.c.inc b/target/riscv/insn_trans/trans_xthead.c.inc index dbb6411239..22488412d4 100644 --- a/target/riscv/insn_trans/trans_xthead.c.inc +++ b/target/riscv/insn_trans/trans_xthead.c.inc @@ -992,7 +992,6 @@ static bool trans_th_sfence_vmas(DisasContext *ctx, arg_th_sfence_vmas *a) #endif } -#ifndef CONFIG_USER_ONLY static void gen_th_sync_local(DisasContext *ctx) { /* @@ -1003,14 +1002,12 @@ static void gen_th_sync_local(DisasContext *ctx) tcg_gen_exit_tb(NULL, 0); ctx->base.is_jmp = DISAS_NORETURN; } -#endif static bool trans_th_sync(DisasContext *ctx, arg_th_sync *a) { (void) a; REQUIRE_XTHEADSYNC(ctx); -#ifndef CONFIG_USER_ONLY REQUIRE_PRIV_MSU(ctx); /* @@ -1019,9 +1016,6 @@ static bool trans_th_sync(DisasContext *ctx, arg_th_sync *a) gen_th_sync_local(ctx); return true; -#else - return false; -#endif } static bool trans_th_sync_i(DisasContext *ctx, arg_th_sync_i *a) @@ -1029,7 +1023,6 @@ static bool trans_th_sync_i(DisasContext *ctx, arg_th_sync_i *a) (void) a; REQUIRE_XTHEADSYNC(ctx); -#ifndef CONFIG_USER_ONLY REQUIRE_PRIV_MSU(ctx); /* @@ -1038,9 +1031,6 @@ static bool trans_th_sync_i(DisasContext *ctx, arg_th_sync_i *a) gen_th_sync_local(ctx); return true; -#else - return false; -#endif } static bool trans_th_sync_is(DisasContext *ctx, arg_th_sync_is *a) diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c index 680a729cd8..422e4f121c 100644 --- a/target/riscv/kvm/kvm-cpu.c +++ b/target/riscv/kvm/kvm-cpu.c @@ -86,6 +86,27 @@ static uint64_t kvm_riscv_reg_id_u64(uint64_t type, uint64_t idx) return KVM_REG_RISCV | KVM_REG_SIZE_U64 | type | idx; } +static uint64_t kvm_encode_reg_size_id(uint64_t id, size_t size_b) +{ + uint64_t size_ctz = __builtin_ctz(size_b); + + return id | (size_ctz << KVM_REG_SIZE_SHIFT); +} + +static uint64_t kvm_riscv_vector_reg_id(RISCVCPU *cpu, + uint64_t idx) +{ + uint64_t id; + size_t size_b; + + g_assert(idx < 32); + + id = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(idx); + size_b = cpu->cfg.vlenb; + + return kvm_encode_reg_size_id(id, size_b); +} + #define RISCV_CORE_REG(env, name) \ kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, \ KVM_REG_RISCV_CORE_REG(name)) @@ -145,7 +166,7 @@ typedef struct KVMCPUConfig { const char *name; const char *description; target_ulong offset; - int kvm_reg_id; + uint64_t kvm_reg_id; bool user_set; bool supported; } KVMCPUConfig; @@ -352,29 +373,12 @@ static KVMCPUConfig kvm_cboz_blocksize = { .kvm_reg_id = KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) }; -static void kvm_cpu_set_cbomz_blksize(Object *obj, Visitor *v, - const char *name, - void *opaque, Error **errp) -{ - KVMCPUConfig *cbomz_cfg = opaque; - RISCVCPU *cpu = RISCV_CPU(obj); - uint16_t value, *host_val; - - if (!visit_type_uint16(v, name, &value, errp)) { - return; - } - - host_val = kvmconfig_get_cfg_addr(cpu, cbomz_cfg); - - if (value != *host_val) { - error_report("Unable to set %s to a different value than " - "the host (%u)", - cbomz_cfg->name, *host_val); - exit(EXIT_FAILURE); - } - - cbomz_cfg->user_set = true; -} +static KVMCPUConfig kvm_v_vlenb = { + .name = "vlenb", + .offset = CPU_CFG_OFFSET(vlenb), + .kvm_reg_id = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_VECTOR | + KVM_REG_RISCV_VECTOR_CSR_REG(vlenb) +}; static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs) { @@ -493,14 +497,6 @@ static void kvm_riscv_add_cpu_user_properties(Object *cpu_obj) NULL, multi_cfg); } - object_property_add(cpu_obj, "cbom_blocksize", "uint16", - NULL, kvm_cpu_set_cbomz_blksize, - NULL, &kvm_cbom_blocksize); - - object_property_add(cpu_obj, "cboz_blocksize", "uint16", - NULL, kvm_cpu_set_cbomz_blksize, - NULL, &kvm_cboz_blocksize); - riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_extensions); riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_vendor_exts); riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_experimental_exts); @@ -716,9 +712,11 @@ static void kvm_riscv_put_regs_timer(CPUState *cs) static int kvm_riscv_get_regs_vector(CPUState *cs) { - CPURISCVState *env = &RISCV_CPU(cs)->env; + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; target_ulong reg; - int ret = 0; + uint64_t vreg_id; + int vreg_idx, ret = 0; if (!riscv_has_ext(env, RVV)) { return 0; @@ -742,14 +740,39 @@ static int kvm_riscv_get_regs_vector(CPUState *cs) } env->vtype = reg; + if (kvm_v_vlenb.supported) { + ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vlenb), ®); + if (ret) { + return ret; + } + cpu->cfg.vlenb = reg; + + for (int i = 0; i < 32; i++) { + /* + * vreg[] is statically allocated using RV_VLEN_MAX. + * Use it instead of vlenb to calculate vreg_idx for + * simplicity. + */ + vreg_idx = i * RV_VLEN_MAX / 64; + vreg_id = kvm_riscv_vector_reg_id(cpu, i); + + ret = kvm_get_one_reg(cs, vreg_id, &env->vreg[vreg_idx]); + if (ret) { + return ret; + } + } + } + return 0; } static int kvm_riscv_put_regs_vector(CPUState *cs) { - CPURISCVState *env = &RISCV_CPU(cs)->env; + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; target_ulong reg; - int ret = 0; + uint64_t vreg_id; + int vreg_idx, ret = 0; if (!riscv_has_ext(env, RVV)) { return 0; @@ -769,6 +792,29 @@ static int kvm_riscv_put_regs_vector(CPUState *cs) reg = env->vtype; ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), ®); + if (ret) { + return ret; + } + + if (kvm_v_vlenb.supported) { + reg = cpu->cfg.vlenb; + ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vlenb), ®); + + for (int i = 0; i < 32; i++) { + /* + * vreg[] is statically allocated using RV_VLEN_MAX. + * Use it instead of vlenb to calculate vreg_idx for + * simplicity. + */ + vreg_idx = i * RV_VLEN_MAX / 64; + vreg_id = kvm_riscv_vector_reg_id(cpu, i); + + ret = kvm_set_one_reg(cs, vreg_id, &env->vreg[vreg_idx]); + if (ret) { + return ret; + } + } + } return ret; } @@ -953,6 +999,33 @@ static int uint64_cmp(const void *a, const void *b) return 0; } +static void kvm_riscv_read_vlenb(RISCVCPU *cpu, KVMScratchCPU *kvmcpu, + struct kvm_reg_list *reglist) +{ + struct kvm_one_reg reg; + struct kvm_reg_list *reg_search; + uint64_t val; + int ret; + + reg_search = bsearch(&kvm_v_vlenb.kvm_reg_id, reglist->reg, reglist->n, + sizeof(uint64_t), uint64_cmp); + + if (reg_search) { + reg.id = kvm_v_vlenb.kvm_reg_id; + reg.addr = (uint64_t)&val; + + ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); + if (ret != 0) { + error_report("Unable to read vlenb register, error code: %s", + strerrorname_np(errno)); + exit(EXIT_FAILURE); + } + + kvm_v_vlenb.supported = true; + cpu->cfg.vlenb = val; + } +} + static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) { KVMCPUConfig *multi_ext_cfg; @@ -1027,6 +1100,10 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) if (cpu->cfg.ext_zicboz) { kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cboz_blocksize); } + + if (riscv_has_ext(&cpu->env, RVV)) { + kvm_riscv_read_vlenb(cpu, kvmcpu, reglist); + } } static void riscv_init_kvm_registers(Object *cpu_obj) @@ -1559,19 +1636,10 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift, static void kvm_cpu_instance_init(CPUState *cs) { Object *obj = OBJECT(RISCV_CPU(cs)); - DeviceState *dev = DEVICE(obj); riscv_init_kvm_registers(obj); kvm_riscv_add_cpu_user_properties(obj); - - for (Property *prop = riscv_cpu_options; prop && prop->name; prop++) { - /* Check if we have a specific KVM handler for the option */ - if (object_property_find(obj, prop->name)) { - continue; - } - qdev_property_add_static(dev, prop); - } } /* @@ -1598,6 +1666,88 @@ static bool kvm_cpu_realize(CPUState *cs, Error **errp) return true; } +void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp) +{ + CPURISCVState *env = &cpu->env; + KVMScratchCPU kvmcpu; + struct kvm_one_reg reg; + uint64_t val; + int ret; + + /* short-circuit without spinning the scratch CPU */ + if (!cpu->cfg.ext_zicbom && !cpu->cfg.ext_zicboz && + !riscv_has_ext(env, RVV)) { + return; + } + + if (!kvm_riscv_create_scratch_vcpu(&kvmcpu)) { + error_setg(errp, "Unable to create scratch KVM cpu"); + return; + } + + if (cpu->cfg.ext_zicbom && + riscv_cpu_option_set(kvm_cbom_blocksize.name)) { + + reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, + kvm_cbom_blocksize.kvm_reg_id); + reg.addr = (uint64_t)&val; + ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, ®); + if (ret != 0) { + error_setg(errp, "Unable to read cbom_blocksize, error %d", errno); + return; + } + + if (cpu->cfg.cbom_blocksize != val) { + error_setg(errp, "Unable to set cbom_blocksize to a different " + "value than the host (%lu)", val); + return; + } + } + + if (cpu->cfg.ext_zicboz && + riscv_cpu_option_set(kvm_cboz_blocksize.name)) { + + reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, + kvm_cboz_blocksize.kvm_reg_id); + reg.addr = (uint64_t)&val; + ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, ®); + if (ret != 0) { + error_setg(errp, "Unable to read cboz_blocksize, error %d", errno); + return; + } + + if (cpu->cfg.cboz_blocksize != val) { + error_setg(errp, "Unable to set cboz_blocksize to a different " + "value than the host (%lu)", val); + return; + } + } + + /* Users are setting vlen, not vlenb */ + if (riscv_has_ext(env, RVV) && riscv_cpu_option_set("vlen")) { + if (!kvm_v_vlenb.supported) { + error_setg(errp, "Unable to set 'vlenb': register not supported"); + return; + } + + reg.id = kvm_v_vlenb.kvm_reg_id; + reg.addr = (uint64_t)&val; + ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, ®); + if (ret != 0) { + error_setg(errp, "Unable to read vlenb register, error %d", errno); + return; + } + + if (cpu->cfg.vlenb != val) { + error_setg(errp, "Unable to set 'vlen' to a different " + "value than the host (%lu)", val * 8); + return; + } + } + + kvm_riscv_destroy_scratch_vcpu(&kvmcpu); +} + static void kvm_cpu_accel_class_init(ObjectClass *oc, void *data) { AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); @@ -1619,14 +1769,14 @@ static void kvm_cpu_accel_register_types(void) } type_init(kvm_cpu_accel_register_types); -static void riscv_host_cpu_init(Object *obj) +static void riscv_host_cpu_class_init(ObjectClass *c, void *data) { - CPURISCVState *env = &RISCV_CPU(obj)->env; + RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); #if defined(TARGET_RISCV32) - env->misa_mxl_max = env->misa_mxl = MXL_RV32; + mcc->misa_mxl_max = MXL_RV32; #elif defined(TARGET_RISCV64) - env->misa_mxl_max = env->misa_mxl = MXL_RV64; + mcc->misa_mxl_max = MXL_RV64; #endif } @@ -1634,7 +1784,7 @@ static const TypeInfo riscv_kvm_cpu_type_infos[] = { { .name = TYPE_RISCV_CPU_HOST, .parent = TYPE_RISCV_CPU, - .instance_init = riscv_host_cpu_init, + .class_init = riscv_host_cpu_class_init, } }; diff --git a/target/riscv/kvm/kvm_riscv.h b/target/riscv/kvm/kvm_riscv.h index 8329cfab82..4bd98fddc7 100644 --- a/target/riscv/kvm/kvm_riscv.h +++ b/target/riscv/kvm/kvm_riscv.h @@ -27,5 +27,6 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift, uint64_t guest_num); void riscv_kvm_aplic_request(void *opaque, int irq, int level); int kvm_riscv_sync_mpstate_to_kvm(RISCVCPU *cpu, int state); +void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp); #endif diff --git a/target/riscv/machine.c b/target/riscv/machine.c index 72fe2374dc..81cf22894e 100644 --- a/target/riscv/machine.c +++ b/target/riscv/machine.c @@ -178,10 +178,9 @@ static const VMStateDescription vmstate_pointermasking = { static bool rv128_needed(void *opaque) { - RISCVCPU *cpu = opaque; - CPURISCVState *env = &cpu->env; + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(opaque); - return env->misa_mxl_max == MXL_RV128; + return mcc->misa_mxl_max == MXL_RV128; } static const VMStateDescription vmstate_rv128 = { @@ -372,7 +371,7 @@ const VMStateDescription vmstate_riscv_cpu = { VMSTATE_UINTTL(env.vext_ver, RISCVCPU), VMSTATE_UINT32(env.misa_mxl, RISCVCPU), VMSTATE_UINT32(env.misa_ext, RISCVCPU), - VMSTATE_UINT32(env.misa_mxl_max, RISCVCPU), + VMSTATE_UNUSED(4), VMSTATE_UINT32(env.misa_ext_mask, RISCVCPU), VMSTATE_UINTTL(env.priv, RISCVCPU), VMSTATE_BOOL(env.virt_enabled, RISCVCPU), diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c index b7da92783b..dd5228c288 100644 --- a/target/riscv/tcg/tcg-cpu.c +++ b/target/riscv/tcg/tcg-cpu.c @@ -268,97 +268,24 @@ static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) } } -static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp) -{ - RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); - CPUClass *cc = CPU_CLASS(mcc); - CPURISCVState *env = &cpu->env; - - /* Validate that MISA_MXL is set properly. */ - switch (env->misa_mxl_max) { -#ifdef TARGET_RISCV64 - case MXL_RV64: - case MXL_RV128: - cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; - break; -#endif - case MXL_RV32: - cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; - break; - default: - g_assert_not_reached(); - } - - if (env->misa_mxl_max != env->misa_mxl) { - error_setg(errp, "misa_mxl_max must be equal to misa_mxl"); - return; - } -} - -static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp) -{ - CPURISCVState *env = &cpu->env; - int priv_version = -1; - - if (cpu->cfg.priv_spec) { - if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) { - priv_version = PRIV_VERSION_1_12_0; - } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { - priv_version = PRIV_VERSION_1_11_0; - } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { - priv_version = PRIV_VERSION_1_10_0; - } else { - error_setg(errp, - "Unsupported privilege spec version '%s'", - cpu->cfg.priv_spec); - return; - } - - env->priv_ver = priv_version; - } -} - static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, Error **errp) { - if (!is_power_of_2(cfg->vlen)) { - error_setg(errp, "Vector extension VLEN must be power of 2"); - return; - } + uint32_t vlen = cfg->vlenb << 3; - if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) { + if (vlen > RV_VLEN_MAX || vlen < 128) { error_setg(errp, "Vector extension implementation only supports VLEN " "in the range [128, %d]", RV_VLEN_MAX); return; } - if (!is_power_of_2(cfg->elen)) { - error_setg(errp, "Vector extension ELEN must be power of 2"); - return; - } - if (cfg->elen > 64 || cfg->elen < 8) { error_setg(errp, "Vector extension implementation only supports ELEN " "in the range [8, 64]"); return; } - - if (cfg->vext_spec) { - if (!g_strcmp0(cfg->vext_spec, "v1.0")) { - env->vext_ver = VEXT_VERSION_1_00_0; - } else { - error_setg(errp, "Unsupported vector spec version '%s'", - cfg->vext_spec); - return; - } - } else if (env->vext_ver == 0) { - qemu_log("vector version is not specified, " - "use the default value v1.0\n"); - - env->vext_ver = VEXT_VERSION_1_00_0; - } } static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) @@ -442,12 +369,42 @@ static void riscv_cpu_validate_g(RISCVCPU *cpu) } } +static void riscv_cpu_validate_b(RISCVCPU *cpu) +{ + const char *warn_msg = "RVB mandates disabled extension %s"; + + if (!cpu->cfg.ext_zba) { + if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zba))) { + cpu->cfg.ext_zba = true; + } else { + warn_report(warn_msg, "zba"); + } + } + + if (!cpu->cfg.ext_zbb) { + if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbb))) { + cpu->cfg.ext_zbb = true; + } else { + warn_report(warn_msg, "zbb"); + } + } + + if (!cpu->cfg.ext_zbs) { + if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbs))) { + cpu->cfg.ext_zbs = true; + } else { + warn_report(warn_msg, "zbs"); + } + } +} + /* * Check consistency between chosen extensions while setting * cpu->cfg accordingly. */ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) { + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); CPURISCVState *env = &cpu->env; Error *local_err = NULL; @@ -455,6 +412,10 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) riscv_cpu_validate_g(cpu); } + if (riscv_has_ext(env, RVB)) { + riscv_cpu_validate_b(cpu); + } + if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { error_setg(errp, "I and E extensions are incompatible"); @@ -610,7 +571,7 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true); cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true); cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true); - if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { + if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); } } @@ -618,7 +579,7 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) /* zca, zcd and zcf has a PRIV 1.12.0 restriction */ if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); - if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { + if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); } if (riscv_has_ext(env, RVD)) { @@ -626,7 +587,7 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) } } - if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { + if (mcc->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { error_setg(errp, "Zcf extension is only relevant to RV32"); return; } @@ -876,12 +837,6 @@ void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp) CPURISCVState *env = &cpu->env; Error *local_err = NULL; - riscv_cpu_validate_priv_spec(cpu, &local_err); - if (local_err != NULL) { - error_propagate(errp, local_err); - return; - } - riscv_cpu_validate_misa_priv(env, &local_err); if (local_err != NULL) { error_propagate(errp, local_err); @@ -917,11 +872,6 @@ static bool riscv_cpu_is_generic(Object *cpu_obj) return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; } -static bool riscv_cpu_is_vendor(Object *cpu_obj) -{ - return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; -} - /* * We'll get here via the following path: * @@ -932,7 +882,6 @@ static bool riscv_cpu_is_vendor(Object *cpu_obj) static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp) { RISCVCPU *cpu = RISCV_CPU(cs); - Error *local_err = NULL; if (!riscv_cpu_tcg_compatible(cpu)) { g_autofree char *name = riscv_cpu_get_name(cpu); @@ -941,14 +890,9 @@ static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp) return false; } - riscv_cpu_validate_misa_mxl(cpu, &local_err); - if (local_err != NULL) { - error_propagate(errp, local_err); - return false; - } - #ifndef CONFIG_USER_ONLY CPURISCVState *env = &cpu->env; + Error *local_err = NULL; CPU(cs)->tcg_cflags |= CF_PCREL; @@ -1056,6 +1000,7 @@ static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = { MISA_CFG(RVJ, false), MISA_CFG(RVV, false), MISA_CFG(RVG, false), + MISA_CFG(RVB, false), }; /* @@ -1326,10 +1271,6 @@ static void riscv_cpu_add_user_properties(Object *obj) riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts); riscv_cpu_add_profiles(obj); - - for (Property *prop = riscv_cpu_options; prop && prop->name; prop++) { - qdev_property_add_static(DEVICE(obj), prop); - } } /* @@ -1343,7 +1284,7 @@ static void riscv_init_max_cpu_extensions(Object *obj) const RISCVCPUMultiExtConfig *prop; /* Enable RVG, RVJ and RVV that are disabled by default */ - riscv_cpu_set_misa(env, env->misa_mxl, env->misa_ext | RVG | RVJ | RVV); + riscv_cpu_set_misa_ext(env, env->misa_ext | RVG | RVJ | RVV); for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { isa_ext_update_enabled(cpu, prop->offset, true); diff --git a/target/riscv/translate.c b/target/riscv/translate.c index ab18899122..177418b2b9 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -1168,6 +1168,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) { DisasContext *ctx = container_of(dcbase, DisasContext, base); CPURISCVState *env = cpu_env(cs); + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cs); RISCVCPU *cpu = RISCV_CPU(cs); uint32_t tb_flags = ctx->base.tb->flags; @@ -1189,7 +1190,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->cfg_vta_all_1s = cpu->cfg.rvv_ta_all_1s; ctx->vstart_eq_zero = FIELD_EX32(tb_flags, TB_FLAGS, VSTART_EQ_ZERO); ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX); - ctx->misa_mxl_max = env->misa_mxl_max; + ctx->misa_mxl_max = mcc->misa_mxl_max; ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL); ctx->address_xl = FIELD_EX32(tb_flags, TB_FLAGS, AXL); ctx->cs = cs; diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index fe0d5d053c..84cec73eb2 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -35,19 +35,28 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1, { int vlmax, vl; RISCVCPU *cpu = env_archcpu(env); - uint64_t lmul = FIELD_EX64(s2, VTYPE, VLMUL); - uint16_t sew = 8 << FIELD_EX64(s2, VTYPE, VSEW); + uint64_t vlmul = FIELD_EX64(s2, VTYPE, VLMUL); + uint8_t vsew = FIELD_EX64(s2, VTYPE, VSEW); + uint16_t sew = 8 << vsew; uint8_t ediv = FIELD_EX64(s2, VTYPE, VEDIV); int xlen = riscv_cpu_xlen(env); bool vill = (s2 >> (xlen - 1)) & 0x1; target_ulong reserved = s2 & MAKE_64BIT_MASK(R_VTYPE_RESERVED_SHIFT, xlen - 1 - R_VTYPE_RESERVED_SHIFT); + int8_t lmul; - if (lmul & 4) { - /* Fractional LMUL - check LMUL * VLEN >= SEW */ - if (lmul == 4 || - cpu->cfg.vlen >> (8 - lmul) < sew) { + if (vlmul & 4) { + /* + * Fractional LMUL, check: + * + * VLEN * LMUL >= SEW + * VLEN >> (8 - lmul) >= sew + * (vlenb << 3) >> (8 - lmul) >= sew + * vlenb >> (8 - 3 - lmul) >= sew + */ + if (vlmul == 4 || + cpu->cfg.vlenb >> (8 - 3 - vlmul) < sew) { vill = true; } } @@ -61,7 +70,9 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1, return 0; } - vlmax = vext_get_vlmax(cpu, s2); + /* lmul encoded as in DisasContext::lmul */ + lmul = sextract32(FIELD_EX64(s2, VTYPE, VLMUL), 0, 3); + vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul); if (s1 <= vlmax) { vl = s1; } else { @@ -559,7 +570,7 @@ vext_ldst_whole(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, { uint32_t i, k, off, pos; uint32_t nf = vext_nf(desc); - uint32_t vlenb = riscv_cpu_cfg(env)->vlen >> 3; + uint32_t vlenb = riscv_cpu_cfg(env)->vlenb; uint32_t max_elems = vlenb >> log2_esz; k = env->vstart / max_elems; @@ -930,7 +941,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ { \ uint32_t vl = env->vl; \ uint32_t vm = vext_vm(desc); \ - uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlenb << 3; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t i; \ \ @@ -968,7 +979,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ { \ uint32_t vl = env->vl; \ uint32_t vm = vext_vm(desc); \ - uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlenb << 3; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t i; \ \ @@ -1172,7 +1183,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ { \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlenb << 3; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ @@ -1237,7 +1248,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ { \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlenb << 3; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ @@ -3972,7 +3983,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ { \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlenb << 3; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ @@ -4012,7 +4023,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ { \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlenb << 3; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ @@ -4529,7 +4540,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ uint32_t desc) \ { \ uint32_t vl = env->vl; \ - uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlenb << 3;\ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t i; \ int a, b; \ @@ -4616,7 +4627,7 @@ static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env, { uint32_t vm = vext_vm(desc); uint32_t vl = env->vl; - uint32_t total_elems = riscv_cpu_cfg(env)->vlen; + uint32_t total_elems = riscv_cpu_cfg(env)->vlenb << 3; uint32_t vta_all_1s = vext_vta_all_1s(desc); uint32_t vma = vext_vma(desc); int i;