Fourth RISC-V PR for QEMU 7.0

* Remove old Ibex PLIC header file
  * Allow writing 8 bytes with generic loader
  * Fixes for RV128
  * Refactor RISC-V CPU configs
  * Initial support for XVentanaCondOps custom extension
  * Fix for vill field in vtype
  * Fix trap cause for RV32 HS-mode CSR access from RV64 HS-mode
  * Support for svnapot, svinval and svpbmt extensions
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEE9sSsRtSTSGjTuM6PIeENKd+XcFQFAmIMmLQACgkQIeENKd+X
 cFTDuQf7Ba9LS+bPHShVBbbZSJEeaFbrNOBKbDT2pBVpJ02yj/yfEiwNp1sD1ich
 8Ud6QHUzBVZ1+yYXfZfrMZPD1B+Yq++9hAKLxlFQjS5E6e3WTUQTvkDqoABG03Wu
 4QPqVcoPGVBvnhO4kuMoDidItljTUGEOGG1m/kc3eXsQ/e9A1PgDWsZMYkLDKkxE
 DOCzgyCNKeWB4Wq6IRTUqmXNMl6WjMKO8qouQUGdlROfQ9HFAfELwIBoCRgQ7LCT
 KiOM04ts5Fv5qjhFH/e4+9zxCiS9YX56qJooNHgNFqr1EOzGl1XzMu77KkT8LhC4
 vJKX+9v4VO8u9fybDEOyELRXHgkEdQ==
 =R9lk
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/alistair/tags/pull-riscv-to-apply-20220216' into staging

Fourth RISC-V PR for QEMU 7.0

 * Remove old Ibex PLIC header file
 * Allow writing 8 bytes with generic loader
 * Fixes for RV128
 * Refactor RISC-V CPU configs
 * Initial support for XVentanaCondOps custom extension
 * Fix for vill field in vtype
 * Fix trap cause for RV32 HS-mode CSR access from RV64 HS-mode
 * Support for svnapot, svinval and svpbmt extensions

# gpg: Signature made Wed 16 Feb 2022 06:24:52 GMT
# gpg:                using RSA key F6C4AC46D4934868D3B8CE8F21E10D29DF977054
# gpg: Good signature from "Alistair Francis <alistair@alistair23.me>" [full]
# Primary key fingerprint: F6C4 AC46 D493 4868 D3B8  CE8F 21E1 0D29 DF97 7054

* remotes/alistair/tags/pull-riscv-to-apply-20220216: (35 commits)
  docs/system: riscv: Update description of CPU
  target/riscv: add support for svpbmt extension
  target/riscv: add support for svinval extension
  target/riscv: add support for svnapot extension
  target/riscv: add PTE_A/PTE_D/PTE_U bits check for inner PTE
  target/riscv: Ignore reserved bits in PTE for RV64
  hw/intc: Add RISC-V AIA APLIC device emulation
  target/riscv: Allow users to force enable AIA CSRs in HART
  hw/riscv: virt: Use AIA INTC compatible string when available
  target/riscv: Implement AIA IMSIC interface CSRs
  target/riscv: Implement AIA xiselect and xireg CSRs
  target/riscv: Implement AIA mtopi, stopi, and vstopi CSRs
  target/riscv: Implement AIA interrupt filtering CSRs
  target/riscv: Implement AIA hvictl and hviprioX CSRs
  target/riscv: Implement AIA CSRs for 64 local interrupts on RV32
  target/riscv: Implement AIA local interrupt priorities
  target/riscv: Allow AIA device emulation to set ireg rmw callback
  target/riscv: Add defines for AIA CSRs
  target/riscv: Add AIA cpu feature
  target/riscv: Allow setting CPU feature from machine/device emulation
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2022-02-16 09:57:11 +00:00
commit c13b8e9973
27 changed files with 3268 additions and 386 deletions

View File

@ -286,6 +286,13 @@ F: include/hw/riscv/
F: linux-user/host/riscv32/
F: linux-user/host/riscv64/
RISC-V XVentanaCondOps extension
M: Philipp Tomsich <philipp.tomsich@vrull.eu>
L: qemu-riscv@nongnu.org
S: Supported
F: target/riscv/XVentanaCondOps.decode
F: target/riscv/insn_trans/trans_xventanacondops.c.inc
RENESAS RX CPUs
R: Yoshinori Sato <ysato@users.sourceforge.jp>
S: Orphan

View File

@ -23,9 +23,9 @@ The ``virt`` machine supports the following devices:
* 1 generic PCIe host bridge
* The fw_cfg device that allows a guest to obtain data from QEMU
Note that the default CPU is a generic RV32GC/RV64GC. Optional extensions
can be enabled via command line parameters, e.g.: ``-cpu rv64,x-h=true``
enables the hypervisor extension for RV64.
The hypervisor extension has been enabled for the default CPU, so virtual
machines with hypervisor extension can simply be used without explicitly
declaring.
Hardware configuration information
----------------------------------

View File

@ -56,7 +56,7 @@ static void generic_loader_reset(void *opaque)
}
if (s->data_len) {
assert(s->data_len < sizeof(s->data));
assert(s->data_len <= sizeof(s->data));
dma_memory_write(s->cpu->as, s->addr, &s->data, s->data_len,
MEMTXATTRS_UNSPECIFIED);
}

View File

@ -70,6 +70,9 @@ config LOONGSON_LIOINTC
config RISCV_ACLINT
bool
config RISCV_APLIC
bool
config SIFIVE_PLIC
bool

View File

@ -50,6 +50,7 @@ specific_ss.add(when: 'CONFIG_S390_FLIC', if_true: files('s390_flic.c'))
specific_ss.add(when: 'CONFIG_S390_FLIC_KVM', if_true: files('s390_flic_kvm.c'))
specific_ss.add(when: 'CONFIG_SH_INTC', if_true: files('sh_intc.c'))
specific_ss.add(when: 'CONFIG_RISCV_ACLINT', if_true: files('riscv_aclint.c'))
specific_ss.add(when: 'CONFIG_RISCV_APLIC', if_true: files('riscv_aplic.c'))
specific_ss.add(when: 'CONFIG_SIFIVE_PLIC', if_true: files('sifive_plic.c'))
specific_ss.add(when: 'CONFIG_XICS', if_true: files('xics.c'))
specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_XICS'],

978
hw/intc/riscv_aplic.c Normal file
View File

@ -0,0 +1,978 @@
/*
* RISC-V APLIC (Advanced Platform Level Interrupt Controller)
*
* Copyright (c) 2021 Western Digital Corporation or its affiliates.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "qemu/error-report.h"
#include "qemu/bswap.h"
#include "exec/address-spaces.h"
#include "hw/sysbus.h"
#include "hw/pci/msi.h"
#include "hw/boards.h"
#include "hw/qdev-properties.h"
#include "hw/intc/riscv_aplic.h"
#include "hw/irq.h"
#include "target/riscv/cpu.h"
#include "sysemu/sysemu.h"
#include "migration/vmstate.h"
#define APLIC_MAX_IDC (1UL << 14)
#define APLIC_MAX_SOURCE 1024
#define APLIC_MIN_IPRIO_BITS 1
#define APLIC_MAX_IPRIO_BITS 8
#define APLIC_MAX_CHILDREN 1024
#define APLIC_DOMAINCFG 0x0000
#define APLIC_DOMAINCFG_RDONLY 0x80000000
#define APLIC_DOMAINCFG_IE (1 << 8)
#define APLIC_DOMAINCFG_DM (1 << 2)
#define APLIC_DOMAINCFG_BE (1 << 0)
#define APLIC_SOURCECFG_BASE 0x0004
#define APLIC_SOURCECFG_D (1 << 10)
#define APLIC_SOURCECFG_CHILDIDX_MASK 0x000003ff
#define APLIC_SOURCECFG_SM_MASK 0x00000007
#define APLIC_SOURCECFG_SM_INACTIVE 0x0
#define APLIC_SOURCECFG_SM_DETACH 0x1
#define APLIC_SOURCECFG_SM_EDGE_RISE 0x4
#define APLIC_SOURCECFG_SM_EDGE_FALL 0x5
#define APLIC_SOURCECFG_SM_LEVEL_HIGH 0x6
#define APLIC_SOURCECFG_SM_LEVEL_LOW 0x7
#define APLIC_MMSICFGADDR 0x1bc0
#define APLIC_MMSICFGADDRH 0x1bc4
#define APLIC_SMSICFGADDR 0x1bc8
#define APLIC_SMSICFGADDRH 0x1bcc
#define APLIC_xMSICFGADDRH_L (1UL << 31)
#define APLIC_xMSICFGADDRH_HHXS_MASK 0x1f
#define APLIC_xMSICFGADDRH_HHXS_SHIFT 24
#define APLIC_xMSICFGADDRH_LHXS_MASK 0x7
#define APLIC_xMSICFGADDRH_LHXS_SHIFT 20
#define APLIC_xMSICFGADDRH_HHXW_MASK 0x7
#define APLIC_xMSICFGADDRH_HHXW_SHIFT 16
#define APLIC_xMSICFGADDRH_LHXW_MASK 0xf
#define APLIC_xMSICFGADDRH_LHXW_SHIFT 12
#define APLIC_xMSICFGADDRH_BAPPN_MASK 0xfff
#define APLIC_xMSICFGADDR_PPN_SHIFT 12
#define APLIC_xMSICFGADDR_PPN_HART(__lhxs) \
((1UL << (__lhxs)) - 1)
#define APLIC_xMSICFGADDR_PPN_LHX_MASK(__lhxw) \
((1UL << (__lhxw)) - 1)
#define APLIC_xMSICFGADDR_PPN_LHX_SHIFT(__lhxs) \
((__lhxs))
#define APLIC_xMSICFGADDR_PPN_LHX(__lhxw, __lhxs) \
(APLIC_xMSICFGADDR_PPN_LHX_MASK(__lhxw) << \
APLIC_xMSICFGADDR_PPN_LHX_SHIFT(__lhxs))
#define APLIC_xMSICFGADDR_PPN_HHX_MASK(__hhxw) \
((1UL << (__hhxw)) - 1)
#define APLIC_xMSICFGADDR_PPN_HHX_SHIFT(__hhxs) \
((__hhxs) + APLIC_xMSICFGADDR_PPN_SHIFT)
#define APLIC_xMSICFGADDR_PPN_HHX(__hhxw, __hhxs) \
(APLIC_xMSICFGADDR_PPN_HHX_MASK(__hhxw) << \
APLIC_xMSICFGADDR_PPN_HHX_SHIFT(__hhxs))
#define APLIC_xMSICFGADDRH_VALID_MASK \
(APLIC_xMSICFGADDRH_L | \
(APLIC_xMSICFGADDRH_HHXS_MASK << APLIC_xMSICFGADDRH_HHXS_SHIFT) | \
(APLIC_xMSICFGADDRH_LHXS_MASK << APLIC_xMSICFGADDRH_LHXS_SHIFT) | \
(APLIC_xMSICFGADDRH_HHXW_MASK << APLIC_xMSICFGADDRH_HHXW_SHIFT) | \
(APLIC_xMSICFGADDRH_LHXW_MASK << APLIC_xMSICFGADDRH_LHXW_SHIFT) | \
APLIC_xMSICFGADDRH_BAPPN_MASK)
#define APLIC_SETIP_BASE 0x1c00
#define APLIC_SETIPNUM 0x1cdc
#define APLIC_CLRIP_BASE 0x1d00
#define APLIC_CLRIPNUM 0x1ddc
#define APLIC_SETIE_BASE 0x1e00
#define APLIC_SETIENUM 0x1edc
#define APLIC_CLRIE_BASE 0x1f00
#define APLIC_CLRIENUM 0x1fdc
#define APLIC_SETIPNUM_LE 0x2000
#define APLIC_SETIPNUM_BE 0x2004
#define APLIC_ISTATE_PENDING (1U << 0)
#define APLIC_ISTATE_ENABLED (1U << 1)
#define APLIC_ISTATE_ENPEND (APLIC_ISTATE_ENABLED | \
APLIC_ISTATE_PENDING)
#define APLIC_ISTATE_INPUT (1U << 8)
#define APLIC_GENMSI 0x3000
#define APLIC_TARGET_BASE 0x3004
#define APLIC_TARGET_HART_IDX_SHIFT 18
#define APLIC_TARGET_HART_IDX_MASK 0x3fff
#define APLIC_TARGET_GUEST_IDX_SHIFT 12
#define APLIC_TARGET_GUEST_IDX_MASK 0x3f
#define APLIC_TARGET_IPRIO_MASK 0xff
#define APLIC_TARGET_EIID_MASK 0x7ff
#define APLIC_IDC_BASE 0x4000
#define APLIC_IDC_SIZE 32
#define APLIC_IDC_IDELIVERY 0x00
#define APLIC_IDC_IFORCE 0x04
#define APLIC_IDC_ITHRESHOLD 0x08
#define APLIC_IDC_TOPI 0x18
#define APLIC_IDC_TOPI_ID_SHIFT 16
#define APLIC_IDC_TOPI_ID_MASK 0x3ff
#define APLIC_IDC_TOPI_PRIO_MASK 0xff
#define APLIC_IDC_CLAIMI 0x1c
static uint32_t riscv_aplic_read_input_word(RISCVAPLICState *aplic,
uint32_t word)
{
uint32_t i, irq, ret = 0;
for (i = 0; i < 32; i++) {
irq = word * 32 + i;
if (!irq || aplic->num_irqs <= irq) {
continue;
}
ret |= ((aplic->state[irq] & APLIC_ISTATE_INPUT) ? 1 : 0) << i;
}
return ret;
}
static uint32_t riscv_aplic_read_pending_word(RISCVAPLICState *aplic,
uint32_t word)
{
uint32_t i, irq, ret = 0;
for (i = 0; i < 32; i++) {
irq = word * 32 + i;
if (!irq || aplic->num_irqs <= irq) {
continue;
}
ret |= ((aplic->state[irq] & APLIC_ISTATE_PENDING) ? 1 : 0) << i;
}
return ret;
}
static void riscv_aplic_set_pending_raw(RISCVAPLICState *aplic,
uint32_t irq, bool pending)
{
if (pending) {
aplic->state[irq] |= APLIC_ISTATE_PENDING;
} else {
aplic->state[irq] &= ~APLIC_ISTATE_PENDING;
}
}
static void riscv_aplic_set_pending(RISCVAPLICState *aplic,
uint32_t irq, bool pending)
{
uint32_t sourcecfg, sm;
if ((irq <= 0) || (aplic->num_irqs <= irq)) {
return;
}
sourcecfg = aplic->sourcecfg[irq];
if (sourcecfg & APLIC_SOURCECFG_D) {
return;
}
sm = sourcecfg & APLIC_SOURCECFG_SM_MASK;
if ((sm == APLIC_SOURCECFG_SM_INACTIVE) ||
((!aplic->msimode || (aplic->msimode && !pending)) &&
((sm == APLIC_SOURCECFG_SM_LEVEL_HIGH) ||
(sm == APLIC_SOURCECFG_SM_LEVEL_LOW)))) {
return;
}
riscv_aplic_set_pending_raw(aplic, irq, pending);
}
static void riscv_aplic_set_pending_word(RISCVAPLICState *aplic,
uint32_t word, uint32_t value,
bool pending)
{
uint32_t i, irq;
for (i = 0; i < 32; i++) {
irq = word * 32 + i;
if (!irq || aplic->num_irqs <= irq) {
continue;
}
if (value & (1U << i)) {
riscv_aplic_set_pending(aplic, irq, pending);
}
}
}
static uint32_t riscv_aplic_read_enabled_word(RISCVAPLICState *aplic,
int word)
{
uint32_t i, irq, ret = 0;
for (i = 0; i < 32; i++) {
irq = word * 32 + i;
if (!irq || aplic->num_irqs <= irq) {
continue;
}
ret |= ((aplic->state[irq] & APLIC_ISTATE_ENABLED) ? 1 : 0) << i;
}
return ret;
}
static void riscv_aplic_set_enabled_raw(RISCVAPLICState *aplic,
uint32_t irq, bool enabled)
{
if (enabled) {
aplic->state[irq] |= APLIC_ISTATE_ENABLED;
} else {
aplic->state[irq] &= ~APLIC_ISTATE_ENABLED;
}
}
static void riscv_aplic_set_enabled(RISCVAPLICState *aplic,
uint32_t irq, bool enabled)
{
uint32_t sourcecfg, sm;
if ((irq <= 0) || (aplic->num_irqs <= irq)) {
return;
}
sourcecfg = aplic->sourcecfg[irq];
if (sourcecfg & APLIC_SOURCECFG_D) {
return;
}
sm = sourcecfg & APLIC_SOURCECFG_SM_MASK;
if (sm == APLIC_SOURCECFG_SM_INACTIVE) {
return;
}
riscv_aplic_set_enabled_raw(aplic, irq, enabled);
}
static void riscv_aplic_set_enabled_word(RISCVAPLICState *aplic,
uint32_t word, uint32_t value,
bool enabled)
{
uint32_t i, irq;
for (i = 0; i < 32; i++) {
irq = word * 32 + i;
if (!irq || aplic->num_irqs <= irq) {
continue;
}
if (value & (1U << i)) {
riscv_aplic_set_enabled(aplic, irq, enabled);
}
}
}
static void riscv_aplic_msi_send(RISCVAPLICState *aplic,
uint32_t hart_idx, uint32_t guest_idx,
uint32_t eiid)
{
uint64_t addr;
MemTxResult result;
RISCVAPLICState *aplic_m;
uint32_t lhxs, lhxw, hhxs, hhxw, group_idx, msicfgaddr, msicfgaddrH;
aplic_m = aplic;
while (aplic_m && !aplic_m->mmode) {
aplic_m = aplic_m->parent;
}
if (!aplic_m) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: m-level APLIC not found\n",
__func__);
return;
}
if (aplic->mmode) {
msicfgaddr = aplic_m->mmsicfgaddr;
msicfgaddrH = aplic_m->mmsicfgaddrH;
} else {
msicfgaddr = aplic_m->smsicfgaddr;
msicfgaddrH = aplic_m->smsicfgaddrH;
}
lhxs = (msicfgaddrH >> APLIC_xMSICFGADDRH_LHXS_SHIFT) &
APLIC_xMSICFGADDRH_LHXS_MASK;
lhxw = (msicfgaddrH >> APLIC_xMSICFGADDRH_LHXW_SHIFT) &
APLIC_xMSICFGADDRH_LHXW_MASK;
hhxs = (msicfgaddrH >> APLIC_xMSICFGADDRH_HHXS_SHIFT) &
APLIC_xMSICFGADDRH_HHXS_MASK;
hhxw = (msicfgaddrH >> APLIC_xMSICFGADDRH_HHXW_SHIFT) &
APLIC_xMSICFGADDRH_HHXW_MASK;
group_idx = hart_idx >> lhxw;
hart_idx &= APLIC_xMSICFGADDR_PPN_LHX_MASK(lhxw);
addr = msicfgaddr;
addr |= ((uint64_t)(msicfgaddrH & APLIC_xMSICFGADDRH_BAPPN_MASK)) << 32;
addr |= ((uint64_t)(group_idx & APLIC_xMSICFGADDR_PPN_HHX_MASK(hhxw))) <<
APLIC_xMSICFGADDR_PPN_HHX_SHIFT(hhxs);
addr |= ((uint64_t)(hart_idx & APLIC_xMSICFGADDR_PPN_LHX_MASK(lhxw))) <<
APLIC_xMSICFGADDR_PPN_LHX_SHIFT(lhxs);
addr |= (uint64_t)(guest_idx & APLIC_xMSICFGADDR_PPN_HART(lhxs));
addr <<= APLIC_xMSICFGADDR_PPN_SHIFT;
address_space_stl_le(&address_space_memory, addr,
eiid, MEMTXATTRS_UNSPECIFIED, &result);
if (result != MEMTX_OK) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: MSI write failed for "
"hart_index=%d guest_index=%d eiid=%d\n",
__func__, hart_idx, guest_idx, eiid);
}
}
static void riscv_aplic_msi_irq_update(RISCVAPLICState *aplic, uint32_t irq)
{
uint32_t hart_idx, guest_idx, eiid;
if (!aplic->msimode || (aplic->num_irqs <= irq) ||
!(aplic->domaincfg & APLIC_DOMAINCFG_IE)) {
return;
}
if ((aplic->state[irq] & APLIC_ISTATE_ENPEND) != APLIC_ISTATE_ENPEND) {
return;
}
riscv_aplic_set_pending_raw(aplic, irq, false);
hart_idx = aplic->target[irq] >> APLIC_TARGET_HART_IDX_SHIFT;
hart_idx &= APLIC_TARGET_HART_IDX_MASK;
if (aplic->mmode) {
/* M-level APLIC ignores guest_index */
guest_idx = 0;
} else {
guest_idx = aplic->target[irq] >> APLIC_TARGET_GUEST_IDX_SHIFT;
guest_idx &= APLIC_TARGET_GUEST_IDX_MASK;
}
eiid = aplic->target[irq] & APLIC_TARGET_EIID_MASK;
riscv_aplic_msi_send(aplic, hart_idx, guest_idx, eiid);
}
static uint32_t riscv_aplic_idc_topi(RISCVAPLICState *aplic, uint32_t idc)
{
uint32_t best_irq, best_iprio;
uint32_t irq, iprio, ihartidx, ithres;
if (aplic->num_harts <= idc) {
return 0;
}
ithres = aplic->ithreshold[idc];
best_irq = best_iprio = UINT32_MAX;
for (irq = 1; irq < aplic->num_irqs; irq++) {
if ((aplic->state[irq] & APLIC_ISTATE_ENPEND) !=
APLIC_ISTATE_ENPEND) {
continue;
}
ihartidx = aplic->target[irq] >> APLIC_TARGET_HART_IDX_SHIFT;
ihartidx &= APLIC_TARGET_HART_IDX_MASK;
if (ihartidx != idc) {
continue;
}
iprio = aplic->target[irq] & aplic->iprio_mask;
if (ithres && iprio >= ithres) {
continue;
}
if (iprio < best_iprio) {
best_irq = irq;
best_iprio = iprio;
}
}
if (best_irq < aplic->num_irqs && best_iprio <= aplic->iprio_mask) {
return (best_irq << APLIC_IDC_TOPI_ID_SHIFT) | best_iprio;
}
return 0;
}
static void riscv_aplic_idc_update(RISCVAPLICState *aplic, uint32_t idc)
{
uint32_t topi;
if (aplic->msimode || aplic->num_harts <= idc) {
return;
}
topi = riscv_aplic_idc_topi(aplic, idc);
if ((aplic->domaincfg & APLIC_DOMAINCFG_IE) &&
aplic->idelivery[idc] &&
(aplic->iforce[idc] || topi)) {
qemu_irq_raise(aplic->external_irqs[idc]);
} else {
qemu_irq_lower(aplic->external_irqs[idc]);
}
}
static uint32_t riscv_aplic_idc_claimi(RISCVAPLICState *aplic, uint32_t idc)
{
uint32_t irq, state, sm, topi = riscv_aplic_idc_topi(aplic, idc);
if (!topi) {
aplic->iforce[idc] = 0;
return 0;
}
irq = (topi >> APLIC_IDC_TOPI_ID_SHIFT) & APLIC_IDC_TOPI_ID_MASK;
sm = aplic->sourcecfg[irq] & APLIC_SOURCECFG_SM_MASK;
state = aplic->state[irq];
riscv_aplic_set_pending_raw(aplic, irq, false);
if ((sm == APLIC_SOURCECFG_SM_LEVEL_HIGH) &&
(state & APLIC_ISTATE_INPUT)) {
riscv_aplic_set_pending_raw(aplic, irq, true);
} else if ((sm == APLIC_SOURCECFG_SM_LEVEL_LOW) &&
!(state & APLIC_ISTATE_INPUT)) {
riscv_aplic_set_pending_raw(aplic, irq, true);
}
riscv_aplic_idc_update(aplic, idc);
return topi;
}
static void riscv_aplic_request(void *opaque, int irq, int level)
{
bool update = false;
RISCVAPLICState *aplic = opaque;
uint32_t sourcecfg, childidx, state, idc;
assert((0 < irq) && (irq < aplic->num_irqs));
sourcecfg = aplic->sourcecfg[irq];
if (sourcecfg & APLIC_SOURCECFG_D) {
childidx = sourcecfg & APLIC_SOURCECFG_CHILDIDX_MASK;
if (childidx < aplic->num_children) {
riscv_aplic_request(aplic->children[childidx], irq, level);
}
return;
}
state = aplic->state[irq];
switch (sourcecfg & APLIC_SOURCECFG_SM_MASK) {
case APLIC_SOURCECFG_SM_EDGE_RISE:
if ((level > 0) && !(state & APLIC_ISTATE_INPUT) &&
!(state & APLIC_ISTATE_PENDING)) {
riscv_aplic_set_pending_raw(aplic, irq, true);
update = true;
}
break;
case APLIC_SOURCECFG_SM_EDGE_FALL:
if ((level <= 0) && (state & APLIC_ISTATE_INPUT) &&
!(state & APLIC_ISTATE_PENDING)) {
riscv_aplic_set_pending_raw(aplic, irq, true);
update = true;
}
break;
case APLIC_SOURCECFG_SM_LEVEL_HIGH:
if ((level > 0) && !(state & APLIC_ISTATE_PENDING)) {
riscv_aplic_set_pending_raw(aplic, irq, true);
update = true;
}
break;
case APLIC_SOURCECFG_SM_LEVEL_LOW:
if ((level <= 0) && !(state & APLIC_ISTATE_PENDING)) {
riscv_aplic_set_pending_raw(aplic, irq, true);
update = true;
}
break;
default:
break;
}
if (level <= 0) {
aplic->state[irq] &= ~APLIC_ISTATE_INPUT;
} else {
aplic->state[irq] |= APLIC_ISTATE_INPUT;
}
if (update) {
if (aplic->msimode) {
riscv_aplic_msi_irq_update(aplic, irq);
} else {
idc = aplic->target[irq] >> APLIC_TARGET_HART_IDX_SHIFT;
idc &= APLIC_TARGET_HART_IDX_MASK;
riscv_aplic_idc_update(aplic, idc);
}
}
}
static uint64_t riscv_aplic_read(void *opaque, hwaddr addr, unsigned size)
{
uint32_t irq, word, idc;
RISCVAPLICState *aplic = opaque;
/* Reads must be 4 byte words */
if ((addr & 0x3) != 0) {
goto err;
}
if (addr == APLIC_DOMAINCFG) {
return APLIC_DOMAINCFG_RDONLY | aplic->domaincfg |
(aplic->msimode ? APLIC_DOMAINCFG_DM : 0);
} else if ((APLIC_SOURCECFG_BASE <= addr) &&
(addr < (APLIC_SOURCECFG_BASE + (aplic->num_irqs - 1) * 4))) {
irq = ((addr - APLIC_SOURCECFG_BASE) >> 2) + 1;
return aplic->sourcecfg[irq];
} else if (aplic->mmode && aplic->msimode &&
(addr == APLIC_MMSICFGADDR)) {
return aplic->mmsicfgaddr;
} else if (aplic->mmode && aplic->msimode &&
(addr == APLIC_MMSICFGADDRH)) {
return aplic->mmsicfgaddrH;
} else if (aplic->mmode && aplic->msimode &&
(addr == APLIC_SMSICFGADDR)) {
/*
* Registers SMSICFGADDR and SMSICFGADDRH are implemented only if:
* (a) the interrupt domain is at machine level
* (b) the domain's harts implement supervisor mode
* (c) the domain has one or more child supervisor-level domains
* that support MSI delivery mode (domaincfg.DM is not read-
* only zero in at least one of the supervisor-level child
* domains).
*/
return (aplic->num_children) ? aplic->smsicfgaddr : 0;
} else if (aplic->mmode && aplic->msimode &&
(addr == APLIC_SMSICFGADDRH)) {
return (aplic->num_children) ? aplic->smsicfgaddrH : 0;
} else if ((APLIC_SETIP_BASE <= addr) &&
(addr < (APLIC_SETIP_BASE + aplic->bitfield_words * 4))) {
word = (addr - APLIC_SETIP_BASE) >> 2;
return riscv_aplic_read_pending_word(aplic, word);
} else if (addr == APLIC_SETIPNUM) {
return 0;
} else if ((APLIC_CLRIP_BASE <= addr) &&
(addr < (APLIC_CLRIP_BASE + aplic->bitfield_words * 4))) {
word = (addr - APLIC_CLRIP_BASE) >> 2;
return riscv_aplic_read_input_word(aplic, word);
} else if (addr == APLIC_CLRIPNUM) {
return 0;
} else if ((APLIC_SETIE_BASE <= addr) &&
(addr < (APLIC_SETIE_BASE + aplic->bitfield_words * 4))) {
word = (addr - APLIC_SETIE_BASE) >> 2;
return riscv_aplic_read_enabled_word(aplic, word);
} else if (addr == APLIC_SETIENUM) {
return 0;
} else if ((APLIC_CLRIE_BASE <= addr) &&
(addr < (APLIC_CLRIE_BASE + aplic->bitfield_words * 4))) {
return 0;
} else if (addr == APLIC_CLRIENUM) {
return 0;
} else if (addr == APLIC_SETIPNUM_LE) {
return 0;
} else if (addr == APLIC_SETIPNUM_BE) {
return 0;
} else if (addr == APLIC_GENMSI) {
return (aplic->msimode) ? aplic->genmsi : 0;
} else if ((APLIC_TARGET_BASE <= addr) &&
(addr < (APLIC_TARGET_BASE + (aplic->num_irqs - 1) * 4))) {
irq = ((addr - APLIC_TARGET_BASE) >> 2) + 1;
return aplic->target[irq];
} else if (!aplic->msimode && (APLIC_IDC_BASE <= addr) &&
(addr < (APLIC_IDC_BASE + aplic->num_harts * APLIC_IDC_SIZE))) {
idc = (addr - APLIC_IDC_BASE) / APLIC_IDC_SIZE;
switch (addr - (APLIC_IDC_BASE + idc * APLIC_IDC_SIZE)) {
case APLIC_IDC_IDELIVERY:
return aplic->idelivery[idc];
case APLIC_IDC_IFORCE:
return aplic->iforce[idc];
case APLIC_IDC_ITHRESHOLD:
return aplic->ithreshold[idc];
case APLIC_IDC_TOPI:
return riscv_aplic_idc_topi(aplic, idc);
case APLIC_IDC_CLAIMI:
return riscv_aplic_idc_claimi(aplic, idc);
default:
goto err;
};
}
err:
qemu_log_mask(LOG_GUEST_ERROR,
"%s: Invalid register read 0x%" HWADDR_PRIx "\n",
__func__, addr);
return 0;
}
static void riscv_aplic_write(void *opaque, hwaddr addr, uint64_t value,
unsigned size)
{
RISCVAPLICState *aplic = opaque;
uint32_t irq, word, idc = UINT32_MAX;
/* Writes must be 4 byte words */
if ((addr & 0x3) != 0) {
goto err;
}
if (addr == APLIC_DOMAINCFG) {
/* Only IE bit writeable at the moment */
value &= APLIC_DOMAINCFG_IE;
aplic->domaincfg = value;
} else if ((APLIC_SOURCECFG_BASE <= addr) &&
(addr < (APLIC_SOURCECFG_BASE + (aplic->num_irqs - 1) * 4))) {
irq = ((addr - APLIC_SOURCECFG_BASE) >> 2) + 1;
if (!aplic->num_children && (value & APLIC_SOURCECFG_D)) {
value = 0;
}
if (value & APLIC_SOURCECFG_D) {
value &= (APLIC_SOURCECFG_D | APLIC_SOURCECFG_CHILDIDX_MASK);
} else {
value &= (APLIC_SOURCECFG_D | APLIC_SOURCECFG_SM_MASK);
}
aplic->sourcecfg[irq] = value;
if ((aplic->sourcecfg[irq] & APLIC_SOURCECFG_D) ||
(aplic->sourcecfg[irq] == 0)) {
riscv_aplic_set_pending_raw(aplic, irq, false);
riscv_aplic_set_enabled_raw(aplic, irq, false);
}
} else if (aplic->mmode && aplic->msimode &&
(addr == APLIC_MMSICFGADDR)) {
if (!(aplic->mmsicfgaddrH & APLIC_xMSICFGADDRH_L)) {
aplic->mmsicfgaddr = value;
}
} else if (aplic->mmode && aplic->msimode &&
(addr == APLIC_MMSICFGADDRH)) {
if (!(aplic->mmsicfgaddrH & APLIC_xMSICFGADDRH_L)) {
aplic->mmsicfgaddrH = value & APLIC_xMSICFGADDRH_VALID_MASK;
}
} else if (aplic->mmode && aplic->msimode &&
(addr == APLIC_SMSICFGADDR)) {
/*
* Registers SMSICFGADDR and SMSICFGADDRH are implemented only if:
* (a) the interrupt domain is at machine level
* (b) the domain's harts implement supervisor mode
* (c) the domain has one or more child supervisor-level domains
* that support MSI delivery mode (domaincfg.DM is not read-
* only zero in at least one of the supervisor-level child
* domains).
*/
if (aplic->num_children &&
!(aplic->smsicfgaddrH & APLIC_xMSICFGADDRH_L)) {
aplic->smsicfgaddr = value;
}
} else if (aplic->mmode && aplic->msimode &&
(addr == APLIC_SMSICFGADDRH)) {
if (aplic->num_children &&
!(aplic->smsicfgaddrH & APLIC_xMSICFGADDRH_L)) {
aplic->smsicfgaddrH = value & APLIC_xMSICFGADDRH_VALID_MASK;
}
} else if ((APLIC_SETIP_BASE <= addr) &&
(addr < (APLIC_SETIP_BASE + aplic->bitfield_words * 4))) {
word = (addr - APLIC_SETIP_BASE) >> 2;
riscv_aplic_set_pending_word(aplic, word, value, true);
} else if (addr == APLIC_SETIPNUM) {
riscv_aplic_set_pending(aplic, value, true);
} else if ((APLIC_CLRIP_BASE <= addr) &&
(addr < (APLIC_CLRIP_BASE + aplic->bitfield_words * 4))) {
word = (addr - APLIC_CLRIP_BASE) >> 2;
riscv_aplic_set_pending_word(aplic, word, value, false);
} else if (addr == APLIC_CLRIPNUM) {
riscv_aplic_set_pending(aplic, value, false);
} else if ((APLIC_SETIE_BASE <= addr) &&
(addr < (APLIC_SETIE_BASE + aplic->bitfield_words * 4))) {
word = (addr - APLIC_SETIE_BASE) >> 2;
riscv_aplic_set_enabled_word(aplic, word, value, true);
} else if (addr == APLIC_SETIENUM) {
riscv_aplic_set_enabled(aplic, value, true);
} else if ((APLIC_CLRIE_BASE <= addr) &&
(addr < (APLIC_CLRIE_BASE + aplic->bitfield_words * 4))) {
word = (addr - APLIC_CLRIE_BASE) >> 2;
riscv_aplic_set_enabled_word(aplic, word, value, false);
} else if (addr == APLIC_CLRIENUM) {
riscv_aplic_set_enabled(aplic, value, false);
} else if (addr == APLIC_SETIPNUM_LE) {
riscv_aplic_set_pending(aplic, value, true);
} else if (addr == APLIC_SETIPNUM_BE) {
riscv_aplic_set_pending(aplic, bswap32(value), true);
} else if (addr == APLIC_GENMSI) {
if (aplic->msimode) {
aplic->genmsi = value & ~(APLIC_TARGET_GUEST_IDX_MASK <<
APLIC_TARGET_GUEST_IDX_SHIFT);
riscv_aplic_msi_send(aplic,
value >> APLIC_TARGET_HART_IDX_SHIFT,
0,
value & APLIC_TARGET_EIID_MASK);
}
} else if ((APLIC_TARGET_BASE <= addr) &&
(addr < (APLIC_TARGET_BASE + (aplic->num_irqs - 1) * 4))) {
irq = ((addr - APLIC_TARGET_BASE) >> 2) + 1;
if (aplic->msimode) {
aplic->target[irq] = value;
} else {
aplic->target[irq] = (value & ~APLIC_TARGET_IPRIO_MASK) |
((value & aplic->iprio_mask) ?
(value & aplic->iprio_mask) : 1);
}
} else if (!aplic->msimode && (APLIC_IDC_BASE <= addr) &&
(addr < (APLIC_IDC_BASE + aplic->num_harts * APLIC_IDC_SIZE))) {
idc = (addr - APLIC_IDC_BASE) / APLIC_IDC_SIZE;
switch (addr - (APLIC_IDC_BASE + idc * APLIC_IDC_SIZE)) {
case APLIC_IDC_IDELIVERY:
aplic->idelivery[idc] = value & 0x1;
break;
case APLIC_IDC_IFORCE:
aplic->iforce[idc] = value & 0x1;
break;
case APLIC_IDC_ITHRESHOLD:
aplic->ithreshold[idc] = value & aplic->iprio_mask;
break;
default:
goto err;
};
} else {
goto err;
}
if (aplic->msimode) {
for (irq = 1; irq < aplic->num_irqs; irq++) {
riscv_aplic_msi_irq_update(aplic, irq);
}
} else {
if (idc == UINT32_MAX) {
for (idc = 0; idc < aplic->num_harts; idc++) {
riscv_aplic_idc_update(aplic, idc);
}
} else {
riscv_aplic_idc_update(aplic, idc);
}
}
return;
err:
qemu_log_mask(LOG_GUEST_ERROR,
"%s: Invalid register write 0x%" HWADDR_PRIx "\n",
__func__, addr);
}
static const MemoryRegionOps riscv_aplic_ops = {
.read = riscv_aplic_read,
.write = riscv_aplic_write,
.endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4
}
};
static void riscv_aplic_realize(DeviceState *dev, Error **errp)
{
uint32_t i;
RISCVAPLICState *aplic = RISCV_APLIC(dev);
aplic->bitfield_words = (aplic->num_irqs + 31) >> 5;
aplic->sourcecfg = g_new0(uint32_t, aplic->num_irqs);
aplic->state = g_new(uint32_t, aplic->num_irqs);
aplic->target = g_new0(uint32_t, aplic->num_irqs);
if (!aplic->msimode) {
for (i = 0; i < aplic->num_irqs; i++) {
aplic->target[i] = 1;
}
}
aplic->idelivery = g_new0(uint32_t, aplic->num_harts);
aplic->iforce = g_new0(uint32_t, aplic->num_harts);
aplic->ithreshold = g_new0(uint32_t, aplic->num_harts);
memory_region_init_io(&aplic->mmio, OBJECT(dev), &riscv_aplic_ops, aplic,
TYPE_RISCV_APLIC, aplic->aperture_size);
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &aplic->mmio);
/*
* Only root APLICs have hardware IRQ lines. All non-root APLICs
* have IRQ lines delegated by their parent APLIC.
*/
if (!aplic->parent) {
qdev_init_gpio_in(dev, riscv_aplic_request, aplic->num_irqs);
}
/* Create output IRQ lines for non-MSI mode */
if (!aplic->msimode) {
aplic->external_irqs = g_malloc(sizeof(qemu_irq) * aplic->num_harts);
qdev_init_gpio_out(dev, aplic->external_irqs, aplic->num_harts);
/* Claim the CPU interrupt to be triggered by this APLIC */
for (i = 0; i < aplic->num_harts; i++) {
RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(aplic->hartid_base + i));
if (riscv_cpu_claim_interrupts(cpu,
(aplic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) {
error_report("%s already claimed",
(aplic->mmode) ? "MEIP" : "SEIP");
exit(1);
}
}
}
msi_nonbroken = true;
}
static Property riscv_aplic_properties[] = {
DEFINE_PROP_UINT32("aperture-size", RISCVAPLICState, aperture_size, 0),
DEFINE_PROP_UINT32("hartid-base", RISCVAPLICState, hartid_base, 0),
DEFINE_PROP_UINT32("num-harts", RISCVAPLICState, num_harts, 0),
DEFINE_PROP_UINT32("iprio-mask", RISCVAPLICState, iprio_mask, 0),
DEFINE_PROP_UINT32("num-irqs", RISCVAPLICState, num_irqs, 0),
DEFINE_PROP_BOOL("msimode", RISCVAPLICState, msimode, 0),
DEFINE_PROP_BOOL("mmode", RISCVAPLICState, mmode, 0),
DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_riscv_aplic = {
.name = "riscv_aplic",
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT32(domaincfg, RISCVAPLICState),
VMSTATE_UINT32(mmsicfgaddr, RISCVAPLICState),
VMSTATE_UINT32(mmsicfgaddrH, RISCVAPLICState),
VMSTATE_UINT32(smsicfgaddr, RISCVAPLICState),
VMSTATE_UINT32(smsicfgaddrH, RISCVAPLICState),
VMSTATE_UINT32(genmsi, RISCVAPLICState),
VMSTATE_VARRAY_UINT32(sourcecfg, RISCVAPLICState,
num_irqs, 0,
vmstate_info_uint32, uint32_t),
VMSTATE_VARRAY_UINT32(state, RISCVAPLICState,
num_irqs, 0,
vmstate_info_uint32, uint32_t),
VMSTATE_VARRAY_UINT32(target, RISCVAPLICState,
num_irqs, 0,
vmstate_info_uint32, uint32_t),
VMSTATE_VARRAY_UINT32(idelivery, RISCVAPLICState,
num_harts, 0,
vmstate_info_uint32, uint32_t),
VMSTATE_VARRAY_UINT32(iforce, RISCVAPLICState,
num_harts, 0,
vmstate_info_uint32, uint32_t),
VMSTATE_VARRAY_UINT32(ithreshold, RISCVAPLICState,
num_harts, 0,
vmstate_info_uint32, uint32_t),
VMSTATE_END_OF_LIST()
}
};
static void riscv_aplic_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
device_class_set_props(dc, riscv_aplic_properties);
dc->realize = riscv_aplic_realize;
dc->vmsd = &vmstate_riscv_aplic;
}
static const TypeInfo riscv_aplic_info = {
.name = TYPE_RISCV_APLIC,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(RISCVAPLICState),
.class_init = riscv_aplic_class_init,
};
static void riscv_aplic_register_types(void)
{
type_register_static(&riscv_aplic_info);
}
type_init(riscv_aplic_register_types)
/*
* Add a APLIC device to another APLIC device as child for
* interrupt delegation.
*/
void riscv_aplic_add_child(DeviceState *parent, DeviceState *child)
{
RISCVAPLICState *caplic, *paplic;
assert(parent && child);
caplic = RISCV_APLIC(child);
paplic = RISCV_APLIC(parent);
assert(paplic->num_irqs == caplic->num_irqs);
assert(paplic->num_children <= QEMU_APLIC_MAX_CHILDREN);
caplic->parent = paplic;
paplic->children[paplic->num_children] = caplic;
paplic->num_children++;
}
/*
* Create APLIC device.
*/
DeviceState *riscv_aplic_create(hwaddr addr, hwaddr size,
uint32_t hartid_base, uint32_t num_harts, uint32_t num_sources,
uint32_t iprio_bits, bool msimode, bool mmode, DeviceState *parent)
{
DeviceState *dev = qdev_new(TYPE_RISCV_APLIC);
uint32_t i;
assert(num_harts < APLIC_MAX_IDC);
assert((APLIC_IDC_BASE + (num_harts * APLIC_IDC_SIZE)) <= size);
assert(num_sources < APLIC_MAX_SOURCE);
assert(APLIC_MIN_IPRIO_BITS <= iprio_bits);
assert(iprio_bits <= APLIC_MAX_IPRIO_BITS);
qdev_prop_set_uint32(dev, "aperture-size", size);
qdev_prop_set_uint32(dev, "hartid-base", hartid_base);
qdev_prop_set_uint32(dev, "num-harts", num_harts);
qdev_prop_set_uint32(dev, "iprio-mask", ((1U << iprio_bits) - 1));
qdev_prop_set_uint32(dev, "num-irqs", num_sources + 1);
qdev_prop_set_bit(dev, "msimode", msimode);
qdev_prop_set_bit(dev, "mmode", mmode);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
if (parent) {
riscv_aplic_add_child(parent, dev);
}
if (!msimode) {
for (i = 0; i < num_harts; i++) {
CPUState *cpu = qemu_get_cpu(hartid_base + i);
qdev_connect_gpio_out_named(dev, NULL, i,
qdev_get_gpio_in(DEVICE(cpu),
(mmode) ? IRQ_M_EXT : IRQ_S_EXT));
}
}
return dev;
}

View File

@ -212,8 +212,17 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket,
qemu_fdt_add_subnode(mc->fdt, intc_name);
qemu_fdt_setprop_cell(mc->fdt, intc_name, "phandle",
intc_phandles[cpu]);
qemu_fdt_setprop_string(mc->fdt, intc_name, "compatible",
"riscv,cpu-intc");
if (riscv_feature(&s->soc[socket].harts[cpu].env,
RISCV_FEATURE_AIA)) {
static const char * const compat[2] = {
"riscv,cpu-intc-aia", "riscv,cpu-intc"
};
qemu_fdt_setprop_string_array(mc->fdt, intc_name, "compatible",
(char **)&compat, ARRAY_SIZE(compat));
} else {
qemu_fdt_setprop_string(mc->fdt, intc_name, "compatible",
"riscv,cpu-intc");
}
qemu_fdt_setprop(mc->fdt, intc_name, "interrupt-controller", NULL, 0);
qemu_fdt_setprop_cell(mc->fdt, intc_name, "#interrupt-cells", 1);

View File

@ -1,67 +0,0 @@
/*
* QEMU RISC-V lowRISC Ibex PLIC
*
* Copyright (c) 2020 Western Digital
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef HW_IBEX_PLIC_H
#define HW_IBEX_PLIC_H
#include "hw/sysbus.h"
#include "qom/object.h"
#define TYPE_IBEX_PLIC "ibex-plic"
OBJECT_DECLARE_SIMPLE_TYPE(IbexPlicState, IBEX_PLIC)
struct IbexPlicState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion mmio;
uint32_t *pending;
uint32_t *hidden_pending;
uint32_t *claimed;
uint32_t *source;
uint32_t *priority;
uint32_t *enable;
uint32_t threshold;
uint32_t claim;
/* config */
uint32_t num_cpus;
uint32_t num_sources;
uint32_t pending_base;
uint32_t pending_num;
uint32_t source_base;
uint32_t source_num;
uint32_t priority_base;
uint32_t priority_num;
uint32_t enable_base;
uint32_t enable_num;
uint32_t threshold_base;
uint32_t claim_base;
qemu_irq *external_irqs;
};
#endif /* HW_IBEX_PLIC_H */

View File

@ -0,0 +1,79 @@
/*
* RISC-V APLIC (Advanced Platform Level Interrupt Controller) interface
*
* Copyright (c) 2021 Western Digital Corporation or its affiliates.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef HW_RISCV_APLIC_H
#define HW_RISCV_APLIC_H
#include "hw/sysbus.h"
#include "qom/object.h"
#define TYPE_RISCV_APLIC "riscv.aplic"
typedef struct RISCVAPLICState RISCVAPLICState;
DECLARE_INSTANCE_CHECKER(RISCVAPLICState, RISCV_APLIC, TYPE_RISCV_APLIC)
#define APLIC_MIN_SIZE 0x4000
#define APLIC_SIZE_ALIGN(__x) (((__x) + (APLIC_MIN_SIZE - 1)) & \
~(APLIC_MIN_SIZE - 1))
#define APLIC_SIZE(__num_harts) (APLIC_MIN_SIZE + \
APLIC_SIZE_ALIGN(32 * (__num_harts)))
struct RISCVAPLICState {
/*< private >*/
SysBusDevice parent_obj;
qemu_irq *external_irqs;
/*< public >*/
MemoryRegion mmio;
uint32_t bitfield_words;
uint32_t domaincfg;
uint32_t mmsicfgaddr;
uint32_t mmsicfgaddrH;
uint32_t smsicfgaddr;
uint32_t smsicfgaddrH;
uint32_t genmsi;
uint32_t *sourcecfg;
uint32_t *state;
uint32_t *target;
uint32_t *idelivery;
uint32_t *iforce;
uint32_t *ithreshold;
/* topology */
#define QEMU_APLIC_MAX_CHILDREN 16
struct RISCVAPLICState *parent;
struct RISCVAPLICState *children[QEMU_APLIC_MAX_CHILDREN];
uint16_t num_children;
/* config */
uint32_t aperture_size;
uint32_t hartid_base;
uint32_t num_harts;
uint32_t iprio_mask;
uint32_t num_irqs;
bool msimode;
bool mmode;
};
void riscv_aplic_add_child(DeviceState *parent, DeviceState *child);
DeviceState *riscv_aplic_create(hwaddr addr, hwaddr size,
uint32_t hartid_base, uint32_t num_harts, uint32_t num_sources,
uint32_t iprio_bits, bool msimode, bool mmode, DeviceState *parent);
#endif

View File

@ -0,0 +1,25 @@
#
# RISC-V translation routines for the XVentanaCondOps extension
#
# Copyright (c) 2022 Dr. Philipp Tomsich, philipp.tomsich@vrull.eu
#
# SPDX-License-Identifier: LGPL-2.1-or-later
#
# Reference: VTx-family custom instructions
# Custom ISA extensions for Ventana Micro Systems RISC-V cores
# (https://github.com/ventanamicro/ventana-custom-extensions/releases/download/v1.0.0/ventana-custom-extensions-v1.0.0.pdf)
# Fields
%rs2 20:5
%rs1 15:5
%rd 7:5
# Argument sets
&r rd rs1 rs2 !extern
# Formats
@r ....... ..... ..... ... ..... ....... &r %rs2 %rs1 %rd
# *** RV64 Custom-3 Extension ***
vt_maskc 0000000 ..... ..... 110 ..... 1111011 @r
vt_maskcn 0000000 ..... ..... 111 ..... 1111011 @r

View File

@ -135,11 +135,6 @@ static void set_vext_version(CPURISCVState *env, int vext_ver)
env->vext_ver = vext_ver;
}
static void set_feature(CPURISCVState *env, int feature)
{
env->features |= (1ULL << feature);
}
static void set_resetvec(CPURISCVState *env, target_ulong resetvec)
{
#ifndef CONFIG_USER_ONLY
@ -405,6 +400,10 @@ void restore_state_to_opc(CPURISCVState *env, TranslationBlock *tb,
static void riscv_cpu_reset(DeviceState *dev)
{
#ifndef CONFIG_USER_ONLY
uint8_t iprio;
int i, irq, rdzero;
#endif
CPUState *cs = CPU(dev);
RISCVCPU *cpu = RISCV_CPU(cs);
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
@ -434,8 +433,24 @@ static void riscv_cpu_reset(DeviceState *dev)
}
}
env->mcause = 0;
env->miclaim = MIP_SGEIP;
env->pc = env->resetvec;
env->two_stage_lookup = false;
/* Initialized default priorities of local interrupts. */
for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
iprio = riscv_cpu_default_priority(i);
env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
env->hviprio[i] = 0;
}
i = 0;
while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
if (!rdzero) {
env->hviprio[irq] = env->miprio[irq];
}
i++;
}
/* mmte is supposed to have pm.current hardwired to 1 */
env->mmte |= (PM_EXT_INITIAL | MMTE_M_PM_CURRENT);
#endif
@ -507,30 +522,33 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
}
if (cpu->cfg.mmu) {
set_feature(env, RISCV_FEATURE_MMU);
riscv_set_feature(env, RISCV_FEATURE_MMU);
}
if (cpu->cfg.pmp) {
set_feature(env, RISCV_FEATURE_PMP);
riscv_set_feature(env, RISCV_FEATURE_PMP);
/*
* Enhanced PMP should only be available
* on harts with PMP support
*/
if (cpu->cfg.epmp) {
set_feature(env, RISCV_FEATURE_EPMP);
riscv_set_feature(env, RISCV_FEATURE_EPMP);
}
}
if (cpu->cfg.aia) {
riscv_set_feature(env, RISCV_FEATURE_AIA);
}
set_resetvec(env, cpu->cfg.resetvec);
/* Validate that MISA_MXL is set properly. */
switch (env->misa_mxl_max) {
#ifdef TARGET_RISCV64
case MXL_RV64:
cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
break;
case MXL_RV128:
cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
break;
#endif
case MXL_RV32:
@ -663,27 +681,53 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
static void riscv_cpu_set_irq(void *opaque, int irq, int level)
{
RISCVCPU *cpu = RISCV_CPU(opaque);
CPURISCVState *env = &cpu->env;
switch (irq) {
case IRQ_U_SOFT:
case IRQ_S_SOFT:
case IRQ_VS_SOFT:
case IRQ_M_SOFT:
case IRQ_U_TIMER:
case IRQ_S_TIMER:
case IRQ_VS_TIMER:
case IRQ_M_TIMER:
case IRQ_U_EXT:
case IRQ_S_EXT:
case IRQ_VS_EXT:
case IRQ_M_EXT:
if (kvm_enabled()) {
kvm_riscv_set_irq(cpu, irq, level);
} else {
riscv_cpu_update_mip(cpu, 1 << irq, BOOL_TO_MASK(level));
if (irq < IRQ_LOCAL_MAX) {
switch (irq) {
case IRQ_U_SOFT:
case IRQ_S_SOFT:
case IRQ_VS_SOFT:
case IRQ_M_SOFT:
case IRQ_U_TIMER:
case IRQ_S_TIMER:
case IRQ_VS_TIMER:
case IRQ_M_TIMER:
case IRQ_U_EXT:
case IRQ_S_EXT:
case IRQ_VS_EXT:
case IRQ_M_EXT:
if (kvm_enabled()) {
kvm_riscv_set_irq(cpu, irq, level);
} else {
riscv_cpu_update_mip(cpu, 1 << irq, BOOL_TO_MASK(level));
}
break;
default:
g_assert_not_reached();
}
break;
default:
} else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
/* Require H-extension for handling guest local interrupts */
if (!riscv_has_ext(env, RVH)) {
g_assert_not_reached();
}
/* Compute bit position in HGEIP CSR */
irq = irq - IRQ_LOCAL_MAX + 1;
if (env->geilen < irq) {
g_assert_not_reached();
}
/* Update HGEIP CSR */
env->hgeip &= ~((target_ulong)1 << irq);
if (level) {
env->hgeip |= (target_ulong)1 << irq;
}
/* Update mip.SGEIP bit */
riscv_cpu_update_mip(cpu, MIP_SGEIP,
BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
} else {
g_assert_not_reached();
}
}
@ -696,7 +740,8 @@ static void riscv_cpu_init(Object *obj)
cpu_set_cpustate_pointers(cpu);
#ifndef CONFIG_USER_ONLY
qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq, 12);
qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq,
IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
#endif /* CONFIG_USER_ONLY */
}
@ -729,15 +774,23 @@ static Property riscv_cpu_properties[] = {
DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128),
DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64),
DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false),
DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false),
DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false),
DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true),
DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true),
DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true),
DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true),
/* Vendor-specific custom extensions */
DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false),
/* These are experimental so mark with 'x-' */
DEFINE_PROP_BOOL("x-j", RISCVCPU, cfg.ext_j, false),
/* ePMP 0.9.3 */
DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false),
DEFINE_PROP_BOOL("x-aia", RISCVCPU, cfg.aia, false),
DEFINE_PROP_UINT64("resetvec", RISCVCPU, cfg.resetvec, DEFAULT_RSTVEC),
DEFINE_PROP_END_OF_LIST(),

View File

@ -78,7 +78,8 @@ enum {
RISCV_FEATURE_MMU,
RISCV_FEATURE_PMP,
RISCV_FEATURE_EPMP,
RISCV_FEATURE_MISA
RISCV_FEATURE_MISA,
RISCV_FEATURE_AIA
};
#define PRIV_VERSION_1_10_0 0x00011000
@ -161,6 +162,7 @@ struct CPURISCVState {
target_ulong priv;
/* This contains QEMU specific information about the virt state. */
target_ulong virt;
target_ulong geilen;
target_ulong resetvec;
target_ulong mhartid;
@ -170,12 +172,12 @@ struct CPURISCVState {
*/
uint64_t mstatus;
target_ulong mip;
uint64_t mip;
uint32_t miclaim;
uint64_t miclaim;
target_ulong mie;
target_ulong mideleg;
uint64_t mie;
uint64_t mideleg;
target_ulong satp; /* since: priv-1.10.0 */
target_ulong stval;
@ -190,16 +192,30 @@ struct CPURISCVState {
target_ulong mcause;
target_ulong mtval; /* since: priv-1.10.0 */
/* Machine and Supervisor interrupt priorities */
uint8_t miprio[64];
uint8_t siprio[64];
/* AIA CSRs */
target_ulong miselect;
target_ulong siselect;
/* Hypervisor CSRs */
target_ulong hstatus;
target_ulong hedeleg;
target_ulong hideleg;
uint64_t hideleg;
target_ulong hcounteren;
target_ulong htval;
target_ulong htinst;
target_ulong hgatp;
target_ulong hgeie;
target_ulong hgeip;
uint64_t htimedelta;
/* Hypervisor controlled virtual interrupt priorities */
target_ulong hvictl;
uint8_t hviprio[64];
/* Upper 64-bits of 128-bit CSRs */
uint64_t mscratchh;
uint64_t sscratchh;
@ -217,6 +233,9 @@ struct CPURISCVState {
target_ulong vstval;
target_ulong vsatp;
/* AIA VS-mode CSRs */
target_ulong vsiselect;
target_ulong mtval2;
target_ulong mtinst;
@ -252,6 +271,22 @@ struct CPURISCVState {
uint64_t (*rdtime_fn)(uint32_t);
uint32_t rdtime_fn_arg;
/* machine specific AIA ireg read-modify-write callback */
#define AIA_MAKE_IREG(__isel, __priv, __virt, __vgein, __xlen) \
((((__xlen) & 0xff) << 24) | \
(((__vgein) & 0x3f) << 20) | \
(((__virt) & 0x1) << 18) | \
(((__priv) & 0x3) << 16) | \
(__isel & 0xffff))
#define AIA_IREG_ISEL(__ireg) ((__ireg) & 0xffff)
#define AIA_IREG_PRIV(__ireg) (((__ireg) >> 16) & 0x3)
#define AIA_IREG_VIRT(__ireg) (((__ireg) >> 18) & 0x1)
#define AIA_IREG_VGEIN(__ireg) (((__ireg) >> 20) & 0x3f)
#define AIA_IREG_XLEN(__ireg) (((__ireg) >> 24) & 0xff)
int (*aia_ireg_rmw_fn[4])(void *arg, target_ulong reg,
target_ulong *val, target_ulong new_val, target_ulong write_mask);
void *aia_ireg_rmw_fn_arg[4];
/* True if in debugger mode. */
bool debugger;
@ -303,6 +338,53 @@ struct RISCVCPUClass {
DeviceReset parent_reset;
};
struct RISCVCPUConfig {
bool ext_i;
bool ext_e;
bool ext_g;
bool ext_m;
bool ext_a;
bool ext_f;
bool ext_d;
bool ext_c;
bool ext_s;
bool ext_u;
bool ext_h;
bool ext_j;
bool ext_v;
bool ext_zba;
bool ext_zbb;
bool ext_zbc;
bool ext_zbs;
bool ext_counters;
bool ext_ifencei;
bool ext_icsr;
bool ext_svinval;
bool ext_svnapot;
bool ext_svpbmt;
bool ext_zfh;
bool ext_zfhmin;
bool ext_zve32f;
bool ext_zve64f;
/* Vendor-specific custom extensions */
bool ext_XVentanaCondOps;
char *priv_spec;
char *user_spec;
char *bext_spec;
char *vext_spec;
uint16_t vlen;
uint16_t elen;
bool mmu;
bool pmp;
bool epmp;
bool aia;
uint64_t resetvec;
};
typedef struct RISCVCPUConfig RISCVCPUConfig;
/**
* RISCVCPU:
* @env: #CPURISCVState
@ -320,43 +402,7 @@ struct RISCVCPU {
char *dyn_vreg_xml;
/* Configuration Settings */
struct {
bool ext_i;
bool ext_e;
bool ext_g;
bool ext_m;
bool ext_a;
bool ext_f;
bool ext_d;
bool ext_c;
bool ext_s;
bool ext_u;
bool ext_h;
bool ext_j;
bool ext_v;
bool ext_zba;
bool ext_zbb;
bool ext_zbc;
bool ext_zbs;
bool ext_counters;
bool ext_ifencei;
bool ext_icsr;
bool ext_zfh;
bool ext_zfhmin;
bool ext_zve32f;
bool ext_zve64f;
char *priv_spec;
char *user_spec;
char *bext_spec;
char *vext_spec;
uint16_t vlen;
uint16_t elen;
bool mmu;
bool pmp;
bool epmp;
uint64_t resetvec;
} cfg;
RISCVCPUConfig cfg;
};
static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
@ -369,6 +415,11 @@ static inline bool riscv_feature(CPURISCVState *env, int feature)
return env->features & (1ULL << feature);
}
static inline void riscv_set_feature(CPURISCVState *env, int feature)
{
env->features |= (1ULL << feature);
}
#include "cpu_user.h"
extern const char * const riscv_int_regnames[];
@ -383,7 +434,14 @@ int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
int cpuid, void *opaque);
int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero);
uint8_t riscv_cpu_default_priority(int irq);
int riscv_cpu_mirq_pending(CPURISCVState *env);
int riscv_cpu_sirq_pending(CPURISCVState *env);
int riscv_cpu_vsirq_pending(CPURISCVState *env);
bool riscv_cpu_fp_enabled(CPURISCVState *env);
target_ulong riscv_cpu_get_geilen(CPURISCVState *env);
void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen);
bool riscv_cpu_vector_enabled(CPURISCVState *env);
bool riscv_cpu_virt_enabled(CPURISCVState *env);
void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
@ -410,11 +468,18 @@ void riscv_cpu_list(void);
#ifndef CONFIG_USER_ONLY
bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts);
uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value);
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts);
uint64_t riscv_cpu_update_mip(RISCVCPU *cpu, uint64_t mask, uint64_t value);
#define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(uint32_t),
uint32_t arg);
void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
int (*rmw_fn)(void *arg,
target_ulong reg,
target_ulong *val,
target_ulong new_val,
target_ulong write_mask),
void *rmw_fn_arg);
#endif
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv);
@ -459,6 +524,7 @@ static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env)
return env->misa_mxl;
}
#endif
#define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env)))
#if defined(TARGET_RISCV32)
#define cpu_recompute_xl(env) ((void)(env), MXL_RV32)
@ -495,6 +561,19 @@ static inline int riscv_cpu_xlen(CPURISCVState *env)
return 16 << env->xl;
}
#ifdef TARGET_RISCV32
#define riscv_cpu_sxl(env) ((void)(env), MXL_RV32)
#else
static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env)
{
#ifdef CONFIG_USER_ONLY
return env->misa_mxl;
#else
return get_field(env->mstatus, MSTATUS64_SXL);
#endif
}
#endif
/*
* Encode LMUL to lmul as follows:
* LMUL vlmul lmul

View File

@ -168,6 +168,31 @@
#define CSR_MTVAL 0x343
#define CSR_MIP 0x344
/* Machine-Level Window to Indirectly Accessed Registers (AIA) */
#define CSR_MISELECT 0x350
#define CSR_MIREG 0x351
/* Machine-Level Interrupts (AIA) */
#define CSR_MTOPI 0xfb0
/* Machine-Level IMSIC Interface (AIA) */
#define CSR_MSETEIPNUM 0x358
#define CSR_MCLREIPNUM 0x359
#define CSR_MSETEIENUM 0x35a
#define CSR_MCLREIENUM 0x35b
#define CSR_MTOPEI 0x35c
/* Virtual Interrupts for Supervisor Level (AIA) */
#define CSR_MVIEN 0x308
#define CSR_MVIP 0x309
/* Machine-Level High-Half CSRs (AIA) */
#define CSR_MIDELEGH 0x313
#define CSR_MIEH 0x314
#define CSR_MVIENH 0x318
#define CSR_MVIPH 0x319
#define CSR_MIPH 0x354
/* Supervisor Trap Setup */
#define CSR_SSTATUS 0x100
#define CSR_SEDELEG 0x102
@ -187,6 +212,24 @@
#define CSR_SPTBR 0x180
#define CSR_SATP 0x180
/* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
#define CSR_SISELECT 0x150
#define CSR_SIREG 0x151
/* Supervisor-Level Interrupts (AIA) */
#define CSR_STOPI 0xdb0
/* Supervisor-Level IMSIC Interface (AIA) */
#define CSR_SSETEIPNUM 0x158
#define CSR_SCLREIPNUM 0x159
#define CSR_SSETEIENUM 0x15a
#define CSR_SCLREIENUM 0x15b
#define CSR_STOPEI 0x15c
/* Supervisor-Level High-Half CSRs (AIA) */
#define CSR_SIEH 0x114
#define CSR_SIPH 0x154
/* Hpervisor CSRs */
#define CSR_HSTATUS 0x600
#define CSR_HEDELEG 0x602
@ -217,6 +260,35 @@
#define CSR_MTINST 0x34a
#define CSR_MTVAL2 0x34b
/* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
#define CSR_HVIEN 0x608
#define CSR_HVICTL 0x609
#define CSR_HVIPRIO1 0x646
#define CSR_HVIPRIO2 0x647
/* VS-Level Window to Indirectly Accessed Registers (H-extension with AIA) */
#define CSR_VSISELECT 0x250
#define CSR_VSIREG 0x251
/* VS-Level Interrupts (H-extension with AIA) */
#define CSR_VSTOPI 0xeb0
/* VS-Level IMSIC Interface (H-extension with AIA) */
#define CSR_VSSETEIPNUM 0x258
#define CSR_VSCLREIPNUM 0x259
#define CSR_VSSETEIENUM 0x25a
#define CSR_VSCLREIENUM 0x25b
#define CSR_VSTOPEI 0x25c
/* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
#define CSR_HIDELEGH 0x613
#define CSR_HVIENH 0x618
#define CSR_HVIPH 0x655
#define CSR_HVIPRIO1H 0x656
#define CSR_HVIPRIO2H 0x657
#define CSR_VSIEH 0x214
#define CSR_VSIPH 0x254
/* Enhanced Physical Memory Protection (ePMP) */
#define CSR_MSECCFG 0x747
#define CSR_MSECCFGH 0x757
@ -489,10 +561,16 @@ typedef enum {
#define PTE_A 0x040 /* Accessed */
#define PTE_D 0x080 /* Dirty */
#define PTE_SOFT 0x300 /* Reserved for Software */
#define PTE_PBMT 0x6000000000000000ULL /* Page-based memory types */
#define PTE_N 0x8000000000000000ULL /* NAPOT translation */
#define PTE_ATTR (PTE_N | PTE_PBMT) /* All attributes bits */
/* Page table PPN shift amount */
#define PTE_PPN_SHIFT 10
/* Page table PPN mask */
#define PTE_PPN_MASK 0x3FFFFFFFFFFC00ULL
/* Leaf page shift amount */
#define PGSHIFT 12
@ -540,6 +618,9 @@ typedef enum RISCVException {
#define IRQ_S_EXT 9
#define IRQ_VS_EXT 10
#define IRQ_M_EXT 11
#define IRQ_S_GEXT 12
#define IRQ_LOCAL_MAX 16
#define IRQ_LOCAL_GUEST_MAX (TARGET_LONG_BITS - 1)
/* mip masks */
#define MIP_USIP (1 << IRQ_U_SOFT)
@ -554,6 +635,7 @@ typedef enum RISCVException {
#define MIP_SEIP (1 << IRQ_S_EXT)
#define MIP_VSEIP (1 << IRQ_VS_EXT)
#define MIP_MEIP (1 << IRQ_M_EXT)
#define MIP_SGEIP (1 << IRQ_S_GEXT)
/* sip masks */
#define SIP_SSIP MIP_SSIP
@ -631,4 +713,51 @@ typedef enum RISCVException {
#define UMTE_U_PM_INSN U_PM_INSN
#define UMTE_MASK (UMTE_U_PM_ENABLE | MMTE_U_PM_CURRENT | UMTE_U_PM_INSN)
/* MISELECT, SISELECT, and VSISELECT bits (AIA) */
#define ISELECT_IPRIO0 0x30
#define ISELECT_IPRIO15 0x3f
#define ISELECT_IMSIC_EIDELIVERY 0x70
#define ISELECT_IMSIC_EITHRESHOLD 0x72
#define ISELECT_IMSIC_EIP0 0x80
#define ISELECT_IMSIC_EIP63 0xbf
#define ISELECT_IMSIC_EIE0 0xc0
#define ISELECT_IMSIC_EIE63 0xff
#define ISELECT_IMSIC_FIRST ISELECT_IMSIC_EIDELIVERY
#define ISELECT_IMSIC_LAST ISELECT_IMSIC_EIE63
#define ISELECT_MASK 0x1ff
/* Dummy [M|S|VS]ISELECT value for emulating [M|S|VS]TOPEI CSRs */
#define ISELECT_IMSIC_TOPEI (ISELECT_MASK + 1)
/* IMSIC bits (AIA) */
#define IMSIC_TOPEI_IID_SHIFT 16
#define IMSIC_TOPEI_IID_MASK 0x7ff
#define IMSIC_TOPEI_IPRIO_MASK 0x7ff
#define IMSIC_EIPx_BITS 32
#define IMSIC_EIEx_BITS 32
/* MTOPI and STOPI bits (AIA) */
#define TOPI_IID_SHIFT 16
#define TOPI_IID_MASK 0xfff
#define TOPI_IPRIO_MASK 0xff
/* Interrupt priority bits (AIA) */
#define IPRIO_IRQ_BITS 8
#define IPRIO_MMAXIPRIO 255
#define IPRIO_DEFAULT_UPPER 4
#define IPRIO_DEFAULT_MIDDLE (IPRIO_DEFAULT_UPPER + 24)
#define IPRIO_DEFAULT_M IPRIO_DEFAULT_MIDDLE
#define IPRIO_DEFAULT_S (IPRIO_DEFAULT_M + 3)
#define IPRIO_DEFAULT_SGEXT (IPRIO_DEFAULT_S + 3)
#define IPRIO_DEFAULT_VS (IPRIO_DEFAULT_SGEXT + 1)
#define IPRIO_DEFAULT_LOWER (IPRIO_DEFAULT_VS + 3)
/* HVICTL bits (AIA) */
#define HVICTL_VTI 0x40000000
#define HVICTL_IID 0x0fff0000
#define HVICTL_IPRIOM 0x00000100
#define HVICTL_IPRIO 0x000000ff
#define HVICTL_VALID_MASK \
(HVICTL_VTI | HVICTL_IID | HVICTL_IPRIOM | HVICTL_IPRIO)
#endif

View File

@ -152,32 +152,275 @@ void riscv_cpu_update_mask(CPURISCVState *env)
}
#ifndef CONFIG_USER_ONLY
/*
* The HS-mode is allowed to configure priority only for the
* following VS-mode local interrupts:
*
* 0 (Reserved interrupt, reads as zero)
* 1 Supervisor software interrupt
* 4 (Reserved interrupt, reads as zero)
* 5 Supervisor timer interrupt
* 8 (Reserved interrupt, reads as zero)
* 13 (Reserved interrupt)
* 14 "
* 15 "
* 16 "
* 18 Debug/trace interrupt
* 20 (Reserved interrupt)
* 22 "
* 24 "
* 26 "
* 28 "
* 30 (Reserved for standard reporting of bus or system errors)
*/
static const int hviprio_index2irq[] = {
0, 1, 4, 5, 8, 13, 14, 15, 16, 18, 20, 22, 24, 26, 28, 30 };
static const int hviprio_index2rdzero[] = {
1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero)
{
if (index < 0 || ARRAY_SIZE(hviprio_index2irq) <= index) {
return -EINVAL;
}
if (out_irq) {
*out_irq = hviprio_index2irq[index];
}
if (out_rdzero) {
*out_rdzero = hviprio_index2rdzero[index];
}
return 0;
}
/*
* Default priorities of local interrupts are defined in the
* RISC-V Advanced Interrupt Architecture specification.
*
* ----------------------------------------------------------------
* Default |
* Priority | Major Interrupt Numbers
* ----------------------------------------------------------------
* Highest | 63 (3f), 62 (3e), 31 (1f), 30 (1e), 61 (3d), 60 (3c),
* | 59 (3b), 58 (3a), 29 (1d), 28 (1c), 57 (39), 56 (38),
* | 55 (37), 54 (36), 27 (1b), 26 (1a), 53 (35), 52 (34),
* | 51 (33), 50 (32), 25 (19), 24 (18), 49 (31), 48 (30)
* |
* | 11 (0b), 3 (03), 7 (07)
* | 9 (09), 1 (01), 5 (05)
* | 12 (0c)
* | 10 (0a), 2 (02), 6 (06)
* |
* | 47 (2f), 46 (2e), 23 (17), 22 (16), 45 (2d), 44 (2c),
* | 43 (2b), 42 (2a), 21 (15), 20 (14), 41 (29), 40 (28),
* | 39 (27), 38 (26), 19 (13), 18 (12), 37 (25), 36 (24),
* Lowest | 35 (23), 34 (22), 17 (11), 16 (10), 33 (21), 32 (20)
* ----------------------------------------------------------------
*/
static const uint8_t default_iprio[64] = {
[63] = IPRIO_DEFAULT_UPPER,
[62] = IPRIO_DEFAULT_UPPER + 1,
[31] = IPRIO_DEFAULT_UPPER + 2,
[30] = IPRIO_DEFAULT_UPPER + 3,
[61] = IPRIO_DEFAULT_UPPER + 4,
[60] = IPRIO_DEFAULT_UPPER + 5,
[59] = IPRIO_DEFAULT_UPPER + 6,
[58] = IPRIO_DEFAULT_UPPER + 7,
[29] = IPRIO_DEFAULT_UPPER + 8,
[28] = IPRIO_DEFAULT_UPPER + 9,
[57] = IPRIO_DEFAULT_UPPER + 10,
[56] = IPRIO_DEFAULT_UPPER + 11,
[55] = IPRIO_DEFAULT_UPPER + 12,
[54] = IPRIO_DEFAULT_UPPER + 13,
[27] = IPRIO_DEFAULT_UPPER + 14,
[26] = IPRIO_DEFAULT_UPPER + 15,
[53] = IPRIO_DEFAULT_UPPER + 16,
[52] = IPRIO_DEFAULT_UPPER + 17,
[51] = IPRIO_DEFAULT_UPPER + 18,
[50] = IPRIO_DEFAULT_UPPER + 19,
[25] = IPRIO_DEFAULT_UPPER + 20,
[24] = IPRIO_DEFAULT_UPPER + 21,
[49] = IPRIO_DEFAULT_UPPER + 22,
[48] = IPRIO_DEFAULT_UPPER + 23,
[11] = IPRIO_DEFAULT_M,
[3] = IPRIO_DEFAULT_M + 1,
[7] = IPRIO_DEFAULT_M + 2,
[9] = IPRIO_DEFAULT_S,
[1] = IPRIO_DEFAULT_S + 1,
[5] = IPRIO_DEFAULT_S + 2,
[12] = IPRIO_DEFAULT_SGEXT,
[10] = IPRIO_DEFAULT_VS,
[2] = IPRIO_DEFAULT_VS + 1,
[6] = IPRIO_DEFAULT_VS + 2,
[47] = IPRIO_DEFAULT_LOWER,
[46] = IPRIO_DEFAULT_LOWER + 1,
[23] = IPRIO_DEFAULT_LOWER + 2,
[22] = IPRIO_DEFAULT_LOWER + 3,
[45] = IPRIO_DEFAULT_LOWER + 4,
[44] = IPRIO_DEFAULT_LOWER + 5,
[43] = IPRIO_DEFAULT_LOWER + 6,
[42] = IPRIO_DEFAULT_LOWER + 7,
[21] = IPRIO_DEFAULT_LOWER + 8,
[20] = IPRIO_DEFAULT_LOWER + 9,
[41] = IPRIO_DEFAULT_LOWER + 10,
[40] = IPRIO_DEFAULT_LOWER + 11,
[39] = IPRIO_DEFAULT_LOWER + 12,
[38] = IPRIO_DEFAULT_LOWER + 13,
[19] = IPRIO_DEFAULT_LOWER + 14,
[18] = IPRIO_DEFAULT_LOWER + 15,
[37] = IPRIO_DEFAULT_LOWER + 16,
[36] = IPRIO_DEFAULT_LOWER + 17,
[35] = IPRIO_DEFAULT_LOWER + 18,
[34] = IPRIO_DEFAULT_LOWER + 19,
[17] = IPRIO_DEFAULT_LOWER + 20,
[16] = IPRIO_DEFAULT_LOWER + 21,
[33] = IPRIO_DEFAULT_LOWER + 22,
[32] = IPRIO_DEFAULT_LOWER + 23,
};
uint8_t riscv_cpu_default_priority(int irq)
{
if (irq < 0 || irq > 63) {
return IPRIO_MMAXIPRIO;
}
return default_iprio[irq] ? default_iprio[irq] : IPRIO_MMAXIPRIO;
};
static int riscv_cpu_pending_to_irq(CPURISCVState *env,
int extirq, unsigned int extirq_def_prio,
uint64_t pending, uint8_t *iprio)
{
int irq, best_irq = RISCV_EXCP_NONE;
unsigned int prio, best_prio = UINT_MAX;
if (!pending) {
return RISCV_EXCP_NONE;
}
irq = ctz64(pending);
if (!riscv_feature(env, RISCV_FEATURE_AIA)) {
return irq;
}
pending = pending >> irq;
while (pending) {
prio = iprio[irq];
if (!prio) {
if (irq == extirq) {
prio = extirq_def_prio;
} else {
prio = (riscv_cpu_default_priority(irq) < extirq_def_prio) ?
1 : IPRIO_MMAXIPRIO;
}
}
if ((pending & 0x1) && (prio <= best_prio)) {
best_irq = irq;
best_prio = prio;
}
irq++;
pending = pending >> 1;
}
return best_irq;
}
static uint64_t riscv_cpu_all_pending(CPURISCVState *env)
{
uint32_t gein = get_field(env->hstatus, HSTATUS_VGEIN);
uint64_t vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
return (env->mip | vsgein) & env->mie;
}
int riscv_cpu_mirq_pending(CPURISCVState *env)
{
uint64_t irqs = riscv_cpu_all_pending(env) & ~env->mideleg &
~(MIP_SGEIP | MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
irqs, env->miprio);
}
int riscv_cpu_sirq_pending(CPURISCVState *env)
{
uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg &
~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
irqs, env->siprio);
}
int riscv_cpu_vsirq_pending(CPURISCVState *env)
{
uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg &
(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
irqs >> 1, env->hviprio);
}
static int riscv_cpu_local_irq_pending(CPURISCVState *env)
{
target_ulong virt_enabled = riscv_cpu_virt_enabled(env);
int virq;
uint64_t irqs, pending, mie, hsie, vsie;
target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE);
target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE);
target_ulong pending = env->mip & env->mie;
target_ulong mie = env->priv < PRV_M ||
(env->priv == PRV_M && mstatus_mie);
target_ulong sie = env->priv < PRV_S ||
(env->priv == PRV_S && mstatus_sie);
target_ulong hsie = virt_enabled || sie;
target_ulong vsie = virt_enabled && sie;
target_ulong irqs =
(pending & ~env->mideleg & -mie) |
(pending & env->mideleg & ~env->hideleg & -hsie) |
(pending & env->mideleg & env->hideleg & -vsie);
if (irqs) {
return ctz64(irqs); /* since non-zero */
/* Determine interrupt enable state of all privilege modes */
if (riscv_cpu_virt_enabled(env)) {
mie = 1;
hsie = 1;
vsie = (env->priv < PRV_S) ||
(env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
} else {
return RISCV_EXCP_NONE; /* indicates no pending interrupt */
mie = (env->priv < PRV_M) ||
(env->priv == PRV_M && get_field(env->mstatus, MSTATUS_MIE));
hsie = (env->priv < PRV_S) ||
(env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
vsie = 0;
}
/* Determine all pending interrupts */
pending = riscv_cpu_all_pending(env);
/* Check M-mode interrupts */
irqs = pending & ~env->mideleg & -mie;
if (irqs) {
return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
irqs, env->miprio);
}
/* Check HS-mode interrupts */
irqs = pending & env->mideleg & ~env->hideleg & -hsie;
if (irqs) {
return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
irqs, env->siprio);
}
/* Check VS-mode interrupts */
irqs = pending & env->mideleg & env->hideleg & -vsie;
if (irqs) {
virq = riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
irqs >> 1, env->hviprio);
return (virq <= 0) ? virq : virq + 1;
}
/* Indicate no pending interrupt */
return RISCV_EXCP_NONE;
}
bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
@ -279,6 +522,28 @@ void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
}
}
target_ulong riscv_cpu_get_geilen(CPURISCVState *env)
{
if (!riscv_has_ext(env, RVH)) {
return 0;
}
return env->geilen;
}
void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen)
{
if (!riscv_has_ext(env, RVH)) {
return;
}
if (geilen > (TARGET_LONG_BITS - 1)) {
return;
}
env->geilen = geilen;
}
bool riscv_cpu_virt_enabled(CPURISCVState *env)
{
if (!riscv_has_ext(env, RVH)) {
@ -300,6 +565,19 @@ void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable)
}
env->virt = set_field(env->virt, VIRT_ONOFF, enable);
if (enable) {
/*
* The guest external interrupts from an interrupt controller are
* delivered only when the Guest/VM is running (i.e. V=1). This means
* any guest external interrupt which is triggered while the Guest/VM
* is not running (i.e. V=0) will be missed on QEMU resulting in guest
* with sluggish response to serial console input and other I/O events.
*
* To solve this, we check and inject interrupt after setting V=1.
*/
riscv_cpu_update_mip(env_archcpu(env), 0, 0);
}
}
bool riscv_cpu_two_stage_lookup(int mmu_idx)
@ -307,7 +585,7 @@ bool riscv_cpu_two_stage_lookup(int mmu_idx)
return mmu_idx & TB_FLAGS_PRIV_HYP_ACCESS_MASK;
}
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts)
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts)
{
CPURISCVState *env = &cpu->env;
if (env->miclaim & interrupts) {
@ -318,13 +596,18 @@ int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts)
}
}
uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value)
uint64_t riscv_cpu_update_mip(RISCVCPU *cpu, uint64_t mask, uint64_t value)
{
CPURISCVState *env = &cpu->env;
CPUState *cs = CPU(cpu);
uint32_t old = env->mip;
uint64_t gein, vsgein = 0, old = env->mip;
bool locked = false;
if (riscv_cpu_virt_enabled(env)) {
gein = get_field(env->hstatus, HSTATUS_VGEIN);
vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
}
if (!qemu_mutex_iothread_locked()) {
locked = true;
qemu_mutex_lock_iothread();
@ -332,7 +615,7 @@ uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value)
env->mip = (env->mip & ~mask) | (value & mask);
if (env->mip) {
if (env->mip | vsgein) {
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
} else {
cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
@ -352,6 +635,20 @@ void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(uint32_t),
env->rdtime_fn_arg = arg;
}
void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
int (*rmw_fn)(void *arg,
target_ulong reg,
target_ulong *val,
target_ulong new_val,
target_ulong write_mask),
void *rmw_fn_arg)
{
if (priv <= PRV_M) {
env->aia_ireg_rmw_fn[priv] = rmw_fn;
env->aia_ireg_rmw_fn_arg[priv] = rmw_fn_arg;
}
}
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
{
if (newpriv > PRV_M) {
@ -454,6 +751,10 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
int mode = mmu_idx & TB_FLAGS_PRIV_MMU_MASK;
bool use_background = false;
hwaddr ppn;
RISCVCPU *cpu = env_archcpu(env);
int napot_bits = 0;
target_ulong napot_mask;
/*
* Check if we should use the background registers for the two
@ -622,13 +923,27 @@ restart:
return TRANSLATE_FAIL;
}
hwaddr ppn = pte >> PTE_PPN_SHIFT;
if (riscv_cpu_sxl(env) == MXL_RV32) {
ppn = pte >> PTE_PPN_SHIFT;
} else if (cpu->cfg.ext_svpbmt || cpu->cfg.ext_svnapot) {
ppn = (pte & (target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT;
} else {
ppn = pte >> PTE_PPN_SHIFT;
if ((pte & ~(target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT) {
return TRANSLATE_FAIL;
}
}
if (!(pte & PTE_V)) {
/* Invalid PTE */
return TRANSLATE_FAIL;
} else if (!cpu->cfg.ext_svpbmt && (pte & PTE_PBMT)) {
return TRANSLATE_FAIL;
} else if (!(pte & (PTE_R | PTE_W | PTE_X))) {
/* Inner PTE, continue walking */
if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) {
return TRANSLATE_FAIL;
}
base = ppn << PGSHIFT;
} else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) {
/* Reserved leaf PTE flags: PTE_W */
@ -702,8 +1017,18 @@ restart:
/* for superpage mappings, make a fake leaf PTE for the TLB's
benefit. */
target_ulong vpn = addr >> PGSHIFT;
*physical = ((ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT) |
(addr & ~TARGET_PAGE_MASK);
if (cpu->cfg.ext_svnapot && (pte & PTE_N)) {
napot_bits = ctzl(ppn) + 1;
if ((i != (levels - 1)) || (napot_bits != 4)) {
return TRANSLATE_FAIL;
}
}
napot_mask = (1 << napot_bits) - 1;
*physical = (((ppn & ~napot_mask) | (vpn & napot_mask) |
(vpn & (((target_ulong)1 << ptshift) - 1))
) << PGSHIFT) | (addr & ~TARGET_PAGE_MASK);
/* set permissions on the TLB entry */
if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) {
@ -1009,7 +1334,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
*/
bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
target_ulong deleg = async ? env->mideleg : env->medeleg;
uint64_t deleg = async ? env->mideleg : env->medeleg;
target_ulong tval = 0;
target_ulong htval = 0;
target_ulong mtval2 = 0;
@ -1076,7 +1401,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) {
/* handle the trap in S-mode */
if (riscv_has_ext(env, RVH)) {
target_ulong hdeleg = async ? env->hideleg : env->hedeleg;
uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1)) {
/* Trap to VS mode */

File diff suppressed because it is too large Load Diff

View File

@ -64,6 +64,7 @@ int riscv_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
case MXL_RV32:
return gdb_get_reg32(mem_buf, tmp);
case MXL_RV64:
case MXL_RV128:
return gdb_get_reg64(mem_buf, tmp);
default:
g_assert_not_reached();
@ -84,6 +85,7 @@ int riscv_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
length = 4;
break;
case MXL_RV64:
case MXL_RV128:
if (env->xl < MXL_RV64) {
tmp = (int32_t)ldq_p(mem_buf);
} else {
@ -420,6 +422,7 @@ void riscv_cpu_register_gdb_regs_for_features(CPUState *cs)
1, "riscv-32bit-virtual.xml", 0);
break;
case MXL_RV64:
case MXL_RV128:
gdb_register_coprocessor(cs, riscv_gdb_get_virtual,
riscv_gdb_set_virtual,
1, "riscv-64bit-virtual.xml", 0);

View File

@ -809,3 +809,10 @@ fcvt_l_h 1100010 00010 ..... ... ..... 1010011 @r2_rm
fcvt_lu_h 1100010 00011 ..... ... ..... 1010011 @r2_rm
fcvt_h_l 1101010 00010 ..... ... ..... 1010011 @r2_rm
fcvt_h_lu 1101010 00011 ..... ... ..... 1010011 @r2_rm
# *** Svinval Standard Extension ***
sinval_vma 0001011 ..... ..... 000 00000 1110011 @sfence_vma
sfence_w_inval 0001100 00000 00000 000 00000 1110011
sfence_inval_ir 0001100 00001 00000 000 00000 1110011
hinval_vvma 0010011 ..... ..... 000 00000 1110011 @hfence_vvma
hinval_gvma 0110011 ..... ..... 000 00000 1110011 @hfence_gvma

View File

@ -19,25 +19,25 @@
*/
#define REQUIRE_ZBA(ctx) do { \
if (!RISCV_CPU(ctx->cs)->cfg.ext_zba) { \
if (ctx->cfg_ptr->ext_zba) { \
return false; \
} \
} while (0)
#define REQUIRE_ZBB(ctx) do { \
if (!RISCV_CPU(ctx->cs)->cfg.ext_zbb) { \
if (ctx->cfg_ptr->ext_zbb) { \
return false; \
} \
} while (0)
#define REQUIRE_ZBC(ctx) do { \
if (!RISCV_CPU(ctx->cs)->cfg.ext_zbc) { \
if (ctx->cfg_ptr->ext_zbc) { \
return false; \
} \
} while (0)
#define REQUIRE_ZBS(ctx) do { \
if (!RISCV_CPU(ctx->cs)->cfg.ext_zbs) { \
if (ctx->cfg_ptr->ext_zbs) { \
return false; \
} \
} while (0)

View File

@ -806,7 +806,7 @@ static bool trans_fence(DisasContext *ctx, arg_fence *a)
static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
{
if (!ctx->ext_ifencei) {
if (!ctx->cfg_ptr->ext_ifencei) {
return false;
}

View File

@ -74,7 +74,7 @@ static bool require_zve32f(DisasContext *s)
}
/* Zve32f doesn't support FP64. (Section 18.2) */
return s->ext_zve32f ? s->sew <= MO_32 : true;
return s->cfg_ptr->ext_zve32f ? s->sew <= MO_32 : true;
}
static bool require_scale_zve32f(DisasContext *s)
@ -85,7 +85,7 @@ static bool require_scale_zve32f(DisasContext *s)
}
/* Zve32f doesn't support FP64. (Section 18.2) */
return s->ext_zve64f ? s->sew <= MO_16 : true;
return s->cfg_ptr->ext_zve64f ? s->sew <= MO_16 : true;
}
static bool require_zve64f(DisasContext *s)
@ -96,7 +96,7 @@ static bool require_zve64f(DisasContext *s)
}
/* Zve64f doesn't support FP64. (Section 18.2) */
return s->ext_zve64f ? s->sew <= MO_32 : true;
return s->cfg_ptr->ext_zve64f ? s->sew <= MO_32 : true;
}
static bool require_scale_zve64f(DisasContext *s)
@ -107,7 +107,7 @@ static bool require_scale_zve64f(DisasContext *s)
}
/* Zve64f doesn't support FP64. (Section 18.2) */
return s->ext_zve64f ? s->sew <= MO_16 : true;
return s->cfg_ptr->ext_zve64f ? s->sew <= MO_16 : true;
}
/* Destination vector register group cannot overlap source mask register. */
@ -174,7 +174,8 @@ static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2)
TCGv s1, dst;
if (!require_rvv(s) ||
!(has_ext(s, RVV) || s->ext_zve32f || s->ext_zve64f)) {
!(has_ext(s, RVV) || s->cfg_ptr->ext_zve32f ||
s->cfg_ptr->ext_zve64f)) {
return false;
}
@ -210,7 +211,8 @@ static bool do_vsetivli(DisasContext *s, int rd, TCGv s1, TCGv s2)
TCGv dst;
if (!require_rvv(s) ||
!(has_ext(s, RVV) || s->ext_zve32f || s->ext_zve64f)) {
!(has_ext(s, RVV) || s->cfg_ptr->ext_zve32f ||
s->cfg_ptr->ext_zve64f)) {
return false;
}
@ -248,7 +250,7 @@ static bool trans_vsetivli(DisasContext *s, arg_vsetivli *a)
/* vector register offset from env */
static uint32_t vreg_ofs(DisasContext *s, int reg)
{
return offsetof(CPURISCVState, vreg) + reg * s->vlen / 8;
return offsetof(CPURISCVState, vreg) + reg * s->cfg_ptr->vlen / 8;
}
/* check functions */
@ -318,7 +320,8 @@ static bool vext_check_st_index(DisasContext *s, int vd, int vs2, int nf,
* when XLEN=32. (Section 18.2)
*/
if (get_xl(s) == MXL_RV32) {
ret &= (!has_ext(s, RVV) && s->ext_zve64f ? eew != MO_64 : true);
ret &= (!has_ext(s, RVV) &&
s->cfg_ptr->ext_zve64f ? eew != MO_64 : true);
}
return ret;
@ -454,7 +457,7 @@ static bool vext_wide_check_common(DisasContext *s, int vd, int vm)
{
return (s->lmul <= 2) &&
(s->sew < MO_64) &&
((s->sew + 1) <= (s->elen >> 4)) &&
((s->sew + 1) <= (s->cfg_ptr->elen >> 4)) &&
require_align(vd, s->lmul + 1) &&
require_vm(vm, vd);
}
@ -482,7 +485,7 @@ static bool vext_narrow_check_common(DisasContext *s, int vd, int vs2,
{
return (s->lmul <= 2) &&
(s->sew < MO_64) &&
((s->sew + 1) <= (s->elen >> 4)) &&
((s->sew + 1) <= (s->cfg_ptr->elen >> 4)) &&
require_align(vs2, s->lmul + 1) &&
require_align(vd, s->lmul) &&
require_vm(vm, vd);
@ -661,7 +664,8 @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
* The first part is vlen in bytes, encoded in maxsz of simd_desc.
* The second part is lmul, encoded in data of simd_desc.
*/
desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
@ -819,7 +823,8 @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
mask = tcg_temp_new_ptr();
base = get_gpr(s, rs1, EXT_NONE);
stride = get_gpr(s, rs2, EXT_NONE);
desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
@ -925,7 +930,8 @@ static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
mask = tcg_temp_new_ptr();
index = tcg_temp_new_ptr();
base = get_gpr(s, rs1, EXT_NONE);
desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
tcg_gen_addi_ptr(index, cpu_env, vreg_ofs(s, vs2));
@ -1065,7 +1071,8 @@ static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data,
dest = tcg_temp_new_ptr();
mask = tcg_temp_new_ptr();
base = get_gpr(s, rs1, EXT_NONE);
desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
@ -1120,7 +1127,8 @@ static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf,
uint32_t data = FIELD_DP32(0, VDATA, NF, nf);
dest = tcg_temp_new_ptr();
desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
base = get_gpr(s, rs1, EXT_NONE);
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
@ -1185,7 +1193,7 @@ GEN_LDST_WHOLE_TRANS(vs8r_v, 8, true)
static inline uint32_t MAXSZ(DisasContext *s)
{
int scale = s->lmul - 3;
return scale < 0 ? s->vlen >> -scale : s->vlen << scale;
return scale < 0 ? s->cfg_ptr->vlen >> -scale : s->cfg_ptr->vlen << scale;
}
static bool opivv_check(DisasContext *s, arg_rmrr *a)
@ -1220,7 +1228,8 @@ do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn,
data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
cpu_env, s->vlen / 8, s->vlen / 8, data, fn);
cpu_env, s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data, fn);
}
mark_vs_dirty(s);
gen_set_label(over);
@ -1262,7 +1271,8 @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
data = FIELD_DP32(data, VDATA, VM, vm);
data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
@ -1425,7 +1435,8 @@ static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
data = FIELD_DP32(data, VDATA, VM, vm);
data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
@ -1508,7 +1519,8 @@ static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
vreg_ofs(s, a->rs1),
vreg_ofs(s, a->rs2),
cpu_env, s->vlen / 8, s->vlen / 8,
cpu_env, s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8,
data, fn);
mark_vs_dirty(s);
gen_set_label(over);
@ -1587,7 +1599,8 @@ static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a,
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
vreg_ofs(s, a->rs1),
vreg_ofs(s, a->rs2),
cpu_env, s->vlen / 8, s->vlen / 8, data, fn);
cpu_env, s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data, fn);
mark_vs_dirty(s);
gen_set_label(over);
return true;
@ -1663,7 +1676,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs1), \
vreg_ofs(s, a->rs2), cpu_env, \
s->vlen / 8, s->vlen / 8, data, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, \
fns[s->sew]); \
mark_vs_dirty(s); \
gen_set_label(over); \
@ -1843,7 +1857,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs1), \
vreg_ofs(s, a->rs2), cpu_env, \
s->vlen / 8, s->vlen / 8, data, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, \
fns[s->sew]); \
mark_vs_dirty(s); \
gen_set_label(over); \
@ -1963,7 +1978,8 @@ static bool vmulh_vv_check(DisasContext *s, arg_rmrr *a)
* are not included for EEW=64 in Zve64*. (Section 18.2)
*/
return opivv_check(s, a) &&
(!has_ext(s, RVV) && s->ext_zve64f ? s->sew != MO_64 : true);
(!has_ext(s, RVV) &&
s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true);
}
static bool vmulh_vx_check(DisasContext *s, arg_rmrr *a)
@ -1976,7 +1992,8 @@ static bool vmulh_vx_check(DisasContext *s, arg_rmrr *a)
* are not included for EEW=64 in Zve64*. (Section 18.2)
*/
return opivx_check(s, a) &&
(!has_ext(s, RVV) && s->ext_zve64f ? s->sew != MO_64 : true);
(!has_ext(s, RVV) &&
s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true);
}
GEN_OPIVV_GVEC_TRANS(vmul_vv, mul)
@ -2046,7 +2063,8 @@ static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
cpu_env, s->vlen / 8, s->vlen / 8, data,
cpu_env, s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data,
fns[s->sew]);
gen_set_label(over);
}
@ -2083,7 +2101,8 @@ static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
};
tcg_gen_ext_tl_i64(s1_i64, s1);
desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
fns[s->sew](dest, s1_i64, cpu_env, desc);
@ -2123,7 +2142,8 @@ static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a)
s1 = tcg_constant_i64(simm);
dest = tcg_temp_new_ptr();
desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
fns[s->sew](dest, s1, cpu_env, desc);
@ -2176,7 +2196,8 @@ static bool vsmul_vv_check(DisasContext *s, arg_rmrr *a)
* for EEW=64 in Zve64*. (Section 18.2)
*/
return opivv_check(s, a) &&
(!has_ext(s, RVV) && s->ext_zve64f ? s->sew != MO_64 : true);
(!has_ext(s, RVV) &&
s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true);
}
static bool vsmul_vx_check(DisasContext *s, arg_rmrr *a)
@ -2187,7 +2208,8 @@ static bool vsmul_vx_check(DisasContext *s, arg_rmrr *a)
* for EEW=64 in Zve64*. (Section 18.2)
*/
return opivx_check(s, a) &&
(!has_ext(s, RVV) && s->ext_zve64f ? s->sew != MO_64 : true);
(!has_ext(s, RVV) &&
s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true);
}
GEN_OPIVV_TRANS(vsmul_vv, vsmul_vv_check)
@ -2275,7 +2297,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs1), \
vreg_ofs(s, a->rs2), cpu_env, \
s->vlen / 8, s->vlen / 8, data, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, \
fns[s->sew - 1]); \
mark_vs_dirty(s); \
gen_set_label(over); \
@ -2302,7 +2325,8 @@ static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
dest = tcg_temp_new_ptr();
mask = tcg_temp_new_ptr();
src2 = tcg_temp_new_ptr();
desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
@ -2391,7 +2415,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs1), \
vreg_ofs(s, a->rs2), cpu_env, \
s->vlen / 8, s->vlen / 8, data, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, \
fns[s->sew - 1]); \
mark_vs_dirty(s); \
gen_set_label(over); \
@ -2464,7 +2489,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs1), \
vreg_ofs(s, a->rs2), cpu_env, \
s->vlen / 8, s->vlen / 8, data, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, \
fns[s->sew - 1]); \
mark_vs_dirty(s); \
gen_set_label(over); \
@ -2583,7 +2609,8 @@ static bool do_opfv(DisasContext *s, arg_rmr *a,
data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
vreg_ofs(s, a->rs2), cpu_env,
s->vlen / 8, s->vlen / 8, data, fn);
s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data, fn);
mark_vs_dirty(s);
gen_set_label(over);
return true;
@ -2696,7 +2723,8 @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
do_nanbox(s, t1, cpu_fpr[a->rs1]);
dest = tcg_temp_new_ptr();
desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
fns[s->sew - 1](dest, t1, cpu_env, desc);
@ -2782,7 +2810,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs2), cpu_env, \
s->vlen / 8, s->vlen / 8, data, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, \
fns[s->sew - 1]); \
mark_vs_dirty(s); \
gen_set_label(over); \
@ -2831,7 +2860,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
data = FIELD_DP32(data, VDATA, VM, a->vm); \
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs2), cpu_env, \
s->vlen / 8, s->vlen / 8, data, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, \
fns[s->sew]); \
mark_vs_dirty(s); \
gen_set_label(over); \
@ -2896,7 +2926,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs2), cpu_env, \
s->vlen / 8, s->vlen / 8, data, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, \
fns[s->sew - 1]); \
mark_vs_dirty(s); \
gen_set_label(over); \
@ -2947,7 +2978,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
data = FIELD_DP32(data, VDATA, VM, a->vm); \
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs2), cpu_env, \
s->vlen / 8, s->vlen / 8, data, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, \
fns[s->sew]); \
mark_vs_dirty(s); \
gen_set_label(over); \
@ -2986,7 +3018,7 @@ GEN_OPIVV_TRANS(vredxor_vs, reduction_check)
static bool reduction_widen_check(DisasContext *s, arg_rmrr *a)
{
return reduction_check(s, a) && (s->sew < MO_64) &&
((s->sew + 1) <= (s->elen >> 4));
((s->sew + 1) <= (s->cfg_ptr->elen >> 4));
}
GEN_OPIVV_WIDEN_TRANS(vwredsum_vs, reduction_widen_check)
@ -3034,7 +3066,8 @@ static bool trans_##NAME(DisasContext *s, arg_r *a) \
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs1), \
vreg_ofs(s, a->rs2), cpu_env, \
s->vlen / 8, s->vlen / 8, data, fn); \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, fn); \
mark_vs_dirty(s); \
gen_set_label(over); \
return true; \
@ -3067,7 +3100,8 @@ static bool trans_vcpop_m(DisasContext *s, arg_rmr *a)
mask = tcg_temp_new_ptr();
src2 = tcg_temp_new_ptr();
dst = dest_gpr(s, a->rd);
desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2));
tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
@ -3099,7 +3133,8 @@ static bool trans_vfirst_m(DisasContext *s, arg_rmr *a)
mask = tcg_temp_new_ptr();
src2 = tcg_temp_new_ptr();
dst = dest_gpr(s, a->rd);
desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2));
tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
@ -3134,7 +3169,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), \
vreg_ofs(s, 0), vreg_ofs(s, a->rs2), \
cpu_env, s->vlen / 8, s->vlen / 8, \
cpu_env, s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, \
data, fn); \
mark_vs_dirty(s); \
gen_set_label(over); \
@ -3174,7 +3210,8 @@ static bool trans_viota_m(DisasContext *s, arg_viota_m *a)
};
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
vreg_ofs(s, a->rs2), cpu_env,
s->vlen / 8, s->vlen / 8, data, fns[s->sew]);
s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data, fns[s->sew]);
mark_vs_dirty(s);
gen_set_label(over);
return true;
@ -3200,7 +3237,8 @@ static bool trans_vid_v(DisasContext *s, arg_vid_v *a)
gen_helper_vid_v_w, gen_helper_vid_v_d,
};
tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
cpu_env, s->vlen / 8, s->vlen / 8,
cpu_env, s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8,
data, fns[s->sew]);
mark_vs_dirty(s);
gen_set_label(over);
@ -3554,7 +3592,8 @@ static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a)
if (a->vm && s->vl_eq_vlmax) {
int scale = s->lmul - (s->sew + 3);
int vlmax = scale < 0 ? s->vlen >> -scale : s->vlen << scale;
int vlmax = scale < 0 ?
s->cfg_ptr->vlen >> -scale : s->cfg_ptr->vlen << scale;
TCGv_i64 dest = tcg_temp_new_i64();
if (a->rs1 == 0) {
@ -3586,7 +3625,8 @@ static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a)
if (a->vm && s->vl_eq_vlmax) {
int scale = s->lmul - (s->sew + 3);
int vlmax = scale < 0 ? s->vlen >> -scale : s->vlen << scale;
int vlmax = scale < 0 ?
s->cfg_ptr->vlen >> -scale : s->cfg_ptr->vlen << scale;
if (a->rs1 >= vlmax) {
tcg_gen_gvec_dup_imm(MO_64, vreg_ofs(s, a->rd),
MAXSZ(s), MAXSZ(s), 0);
@ -3638,7 +3678,8 @@ static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
cpu_env, s->vlen / 8, s->vlen / 8, data,
cpu_env, s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data,
fns[s->sew]);
mark_vs_dirty(s);
gen_set_label(over);
@ -3657,7 +3698,7 @@ static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
if (require_rvv(s) && \
QEMU_IS_ALIGNED(a->rd, LEN) && \
QEMU_IS_ALIGNED(a->rs2, LEN)) { \
uint32_t maxsz = (s->vlen >> 3) * LEN; \
uint32_t maxsz = (s->cfg_ptr->vlen >> 3) * LEN; \
if (s->vstart == 0) { \
/* EEW = 8 */ \
tcg_gen_gvec_mov(MO_8, vreg_ofs(s, a->rd), \
@ -3742,7 +3783,8 @@ static bool int_ext_op(DisasContext *s, arg_rmr *a, uint8_t seq)
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
vreg_ofs(s, a->rs2), cpu_env,
s->vlen / 8, s->vlen / 8, data, fn);
s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data, fn);
mark_vs_dirty(s);
gen_set_label(over);

View File

@ -17,13 +17,13 @@
*/
#define REQUIRE_ZFH(ctx) do { \
if (!ctx->ext_zfh) { \
if (!ctx->cfg_ptr->ext_zfh) { \
return false; \
} \
} while (0)
#define REQUIRE_ZFH_OR_ZFHMIN(ctx) do { \
if (!(ctx->ext_zfh || ctx->ext_zfhmin)) { \
if (!(ctx->cfg_ptr->ext_zfh || ctx->cfg_ptr->ext_zfhmin)) { \
return false; \
} \
} while (0)

View File

@ -0,0 +1,75 @@
/*
* RISC-V translation routines for the Svinval Standard Instruction Set.
*
* Copyright (c) 2020-2022 PLCT lab
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define REQUIRE_SVINVAL(ctx) do { \
if (!ctx->cfg_ptr->ext_svinval) { \
return false; \
} \
} while (0)
static bool trans_sinval_vma(DisasContext *ctx, arg_sinval_vma *a)
{
REQUIRE_SVINVAL(ctx);
/* Do the same as sfence.vma currently */
REQUIRE_EXT(ctx, RVS);
#ifndef CONFIG_USER_ONLY
gen_helper_tlb_flush(cpu_env);
return true;
#endif
return false;
}
static bool trans_sfence_w_inval(DisasContext *ctx, arg_sfence_w_inval *a)
{
REQUIRE_SVINVAL(ctx);
REQUIRE_EXT(ctx, RVS);
/* Do nothing currently */
return true;
}
static bool trans_sfence_inval_ir(DisasContext *ctx, arg_sfence_inval_ir *a)
{
REQUIRE_SVINVAL(ctx);
REQUIRE_EXT(ctx, RVS);
/* Do nothing currently */
return true;
}
static bool trans_hinval_vvma(DisasContext *ctx, arg_hinval_vvma *a)
{
REQUIRE_SVINVAL(ctx);
/* Do the same as hfence.vvma currently */
REQUIRE_EXT(ctx, RVH);
#ifndef CONFIG_USER_ONLY
gen_helper_hyp_tlb_flush(cpu_env);
return true;
#endif
return false;
}
static bool trans_hinval_gvma(DisasContext *ctx, arg_hinval_gvma *a)
{
REQUIRE_SVINVAL(ctx);
/* Do the same as hfence.gvma currently */
REQUIRE_EXT(ctx, RVH);
#ifndef CONFIG_USER_ONLY
gen_helper_hyp_gvma_tlb_flush(cpu_env);
return true;
#endif
return false;
}

View File

@ -0,0 +1,39 @@
/*
* RISC-V translation routines for the XVentanaCondOps extension.
*
* Copyright (c) 2021-2022 VRULL GmbH.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
static bool gen_vt_condmask(DisasContext *ctx, arg_r *a, TCGCond cond)
{
TCGv dest = dest_gpr(ctx, a->rd);
TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
tcg_gen_movcond_tl(cond, dest, src2, ctx->zero, src1, ctx->zero);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
static bool trans_vt_maskc(DisasContext *ctx, arg_r *a)
{
return gen_vt_condmask(ctx, a, TCG_COND_NE);
}
static bool trans_vt_maskcn(DisasContext *ctx, arg_r *a)
{
return gen_vt_condmask(ctx, a, TCG_COND_EQ);
}

View File

@ -78,19 +78,24 @@ static bool hyper_needed(void *opaque)
static const VMStateDescription vmstate_hyper = {
.name = "cpu/hyper",
.version_id = 1,
.minimum_version_id = 1,
.version_id = 2,
.minimum_version_id = 2,
.needed = hyper_needed,
.fields = (VMStateField[]) {
VMSTATE_UINTTL(env.hstatus, RISCVCPU),
VMSTATE_UINTTL(env.hedeleg, RISCVCPU),
VMSTATE_UINTTL(env.hideleg, RISCVCPU),
VMSTATE_UINT64(env.hideleg, RISCVCPU),
VMSTATE_UINTTL(env.hcounteren, RISCVCPU),
VMSTATE_UINTTL(env.htval, RISCVCPU),
VMSTATE_UINTTL(env.htinst, RISCVCPU),
VMSTATE_UINTTL(env.hgatp, RISCVCPU),
VMSTATE_UINTTL(env.hgeie, RISCVCPU),
VMSTATE_UINTTL(env.hgeip, RISCVCPU),
VMSTATE_UINT64(env.htimedelta, RISCVCPU),
VMSTATE_UINTTL(env.hvictl, RISCVCPU),
VMSTATE_UINT8_ARRAY(env.hviprio, RISCVCPU, 64),
VMSTATE_UINT64(env.vsstatus, RISCVCPU),
VMSTATE_UINTTL(env.vstvec, RISCVCPU),
VMSTATE_UINTTL(env.vsscratch, RISCVCPU),
@ -98,6 +103,7 @@ static const VMStateDescription vmstate_hyper = {
VMSTATE_UINTTL(env.vscause, RISCVCPU),
VMSTATE_UINTTL(env.vstval, RISCVCPU),
VMSTATE_UINTTL(env.vsatp, RISCVCPU),
VMSTATE_UINTTL(env.vsiselect, RISCVCPU),
VMSTATE_UINTTL(env.mtval2, RISCVCPU),
VMSTATE_UINTTL(env.mtinst, RISCVCPU),
@ -233,6 +239,8 @@ const VMStateDescription vmstate_riscv_cpu = {
.fields = (VMStateField[]) {
VMSTATE_UINTTL_ARRAY(env.gpr, RISCVCPU, 32),
VMSTATE_UINT64_ARRAY(env.fpr, RISCVCPU, 32),
VMSTATE_UINT8_ARRAY(env.miprio, RISCVCPU, 64),
VMSTATE_UINT8_ARRAY(env.siprio, RISCVCPU, 64),
VMSTATE_UINTTL(env.pc, RISCVCPU),
VMSTATE_UINTTL(env.load_res, RISCVCPU),
VMSTATE_UINTTL(env.load_val, RISCVCPU),
@ -251,10 +259,10 @@ const VMStateDescription vmstate_riscv_cpu = {
VMSTATE_UINTTL(env.resetvec, RISCVCPU),
VMSTATE_UINTTL(env.mhartid, RISCVCPU),
VMSTATE_UINT64(env.mstatus, RISCVCPU),
VMSTATE_UINTTL(env.mip, RISCVCPU),
VMSTATE_UINT32(env.miclaim, RISCVCPU),
VMSTATE_UINTTL(env.mie, RISCVCPU),
VMSTATE_UINTTL(env.mideleg, RISCVCPU),
VMSTATE_UINT64(env.mip, RISCVCPU),
VMSTATE_UINT64(env.miclaim, RISCVCPU),
VMSTATE_UINT64(env.mie, RISCVCPU),
VMSTATE_UINT64(env.mideleg, RISCVCPU),
VMSTATE_UINTTL(env.satp, RISCVCPU),
VMSTATE_UINTTL(env.stval, RISCVCPU),
VMSTATE_UINTTL(env.medeleg, RISCVCPU),
@ -265,6 +273,8 @@ const VMStateDescription vmstate_riscv_cpu = {
VMSTATE_UINTTL(env.mepc, RISCVCPU),
VMSTATE_UINTTL(env.mcause, RISCVCPU),
VMSTATE_UINTTL(env.mtval, RISCVCPU),
VMSTATE_UINTTL(env.miselect, RISCVCPU),
VMSTATE_UINTTL(env.siselect, RISCVCPU),
VMSTATE_UINTTL(env.scounteren, RISCVCPU),
VMSTATE_UINTTL(env.mcounteren, RISCVCPU),
VMSTATE_UINTTL(env.sscratch, RISCVCPU),

View File

@ -4,6 +4,7 @@ dir = meson.current_source_dir()
gen = [
decodetree.process('insn16.decode', extra_args: ['--static-decode=decode_insn16', '--insnwidth=16']),
decodetree.process('insn32.decode', extra_args: '--static-decode=decode_insn32'),
decodetree.process('XVentanaCondOps.decode', extra_args: '--static-decode=decode_XVentanaCodeOps'),
]
riscv_ss = ss.source_set()

View File

@ -76,11 +76,7 @@ typedef struct DisasContext {
int frm;
RISCVMXL ol;
bool virt_enabled;
bool ext_ifencei;
bool ext_zfh;
bool ext_zfhmin;
bool ext_zve32f;
bool ext_zve64f;
const RISCVCPUConfig *cfg_ptr;
bool hlsx;
/* vector extension */
bool vill;
@ -98,8 +94,6 @@ typedef struct DisasContext {
*/
int8_t lmul;
uint8_t sew;
uint16_t vlen;
uint16_t elen;
target_ulong vstart;
bool vl_eq_vlmax;
uint8_t ntemp;
@ -117,6 +111,19 @@ static inline bool has_ext(DisasContext *ctx, uint32_t ext)
return ctx->misa_ext & ext;
}
static bool always_true_p(DisasContext *ctx __attribute__((__unused__)))
{
return true;
}
#define MATERIALISE_EXT_PREDICATE(ext) \
static bool has_ ## ext ## _p(DisasContext *ctx) \
{ \
return ctx->cfg_ptr->ext_ ## ext ; \
}
MATERIALISE_EXT_PREDICATE(XVentanaCondOps);
#ifdef TARGET_RISCV32
#define get_xl(ctx) MXL_RV32
#elif defined(CONFIG_USER_ONLY)
@ -855,21 +862,37 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
#include "insn_trans/trans_rvb.c.inc"
#include "insn_trans/trans_rvzfh.c.inc"
#include "insn_trans/trans_privileged.c.inc"
#include "insn_trans/trans_svinval.c.inc"
#include "insn_trans/trans_xventanacondops.c.inc"
/* Include the auto-generated decoder for 16 bit insn */
#include "decode-insn16.c.inc"
/* Include decoders for factored-out extensions */
#include "decode-XVentanaCondOps.c.inc"
static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
{
/* check for compressed insn */
/*
* A table with predicate (i.e., guard) functions and decoder functions
* that are tested in-order until a decoder matches onto the opcode.
*/
static const struct {
bool (*guard_func)(DisasContext *);
bool (*decode_func)(DisasContext *, uint32_t);
} decoders[] = {
{ always_true_p, decode_insn32 },
{ has_XVentanaCondOps_p, decode_XVentanaCodeOps },
};
/* Check for compressed insn */
if (extract16(opcode, 0, 2) != 3) {
if (!has_ext(ctx, RVC)) {
gen_exception_illegal(ctx);
} else {
ctx->opcode = opcode;
ctx->pc_succ_insn = ctx->base.pc_next + 2;
if (!decode_insn16(ctx, opcode)) {
gen_exception_illegal(ctx);
if (decode_insn16(ctx, opcode)) {
return;
}
}
} else {
@ -879,10 +902,16 @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
ctx->base.pc_next + 2));
ctx->opcode = opcode32;
ctx->pc_succ_insn = ctx->base.pc_next + 4;
if (!decode_insn32(ctx, opcode32)) {
gen_exception_illegal(ctx);
for (size_t i = 0; i < ARRAY_SIZE(decoders); ++i) {
if (decoders[i].guard_func(ctx) &&
decoders[i].decode_func(ctx, opcode32)) {
return;
}
}
}
gen_exception_illegal(ctx);
}
static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
@ -908,13 +937,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
#endif
ctx->misa_ext = env->misa_ext;
ctx->frm = -1; /* unknown rounding mode */
ctx->ext_ifencei = cpu->cfg.ext_ifencei;
ctx->ext_zfh = cpu->cfg.ext_zfh;
ctx->ext_zfhmin = cpu->cfg.ext_zfhmin;
ctx->ext_zve32f = cpu->cfg.ext_zve32f;
ctx->ext_zve64f = cpu->cfg.ext_zve64f;
ctx->vlen = cpu->cfg.vlen;
ctx->elen = cpu->cfg.elen;
ctx->cfg_ptr = &(cpu->cfg);
ctx->mstatus_hs_fs = FIELD_EX32(tb_flags, TB_FLAGS, MSTATUS_HS_FS);
ctx->mstatus_hs_vs = FIELD_EX32(tb_flags, TB_FLAGS, MSTATUS_HS_VS);
ctx->hlsx = FIELD_EX32(tb_flags, TB_FLAGS, HLSX);

View File

@ -71,6 +71,7 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1,
env->vl = vl;
env->vtype = s2;
env->vstart = 0;
env->vill = 0;
return vl;
}