ppc-7.0 queue

* ppc/pnv fixes
 * PMU EBB support
 * target/ppc: PowerISA Vector/VSX instruction batch
 * ppc/pnv: Extension of the powernv10 machine with XIVE2 ans PHB5 models
 * spapr allocation cleanups
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEoPZlSPBIlev+awtgUaNDx8/77KEFAmIfTloACgkQUaNDx8/7
 7KFSjg/+PzZn81n2WiDE5HCORc5L/nwFMv8zevBNpHZn3LE1nTfzEV0BqekiyWc4
 nsMix9soXlYX86u7HzCZI212jPWbf6z+4ACI40uQh8U7t45CXkmKi5x8kosPbwqa
 d7iOiDv76k8f2c3Uv9ynmYk3TZOfrA5Ua79P+ZE09EKnIr6dYmcGCq6EYm6KN6p8
 hoZ97DbyT5loQ1x7/pIO10Wr84xvoEGYzqm6+TKFTsyBNSaXjzXNIJegxHDuR0iz
 D9YFb/w3WzBR9EORRzasvuZFI3yGcgy/WuWJUrb2VC8G+TTe7IlJsAFoCNyoysh7
 FbtL1vTmHPh7XSfn34sB1x4wqPHaohrS4/zCN1l1eeEU+giTBXGhPULEypCDqHgn
 SD1DLRwVRqT0uH5SqEGPl2eYaccs0MHflD2YWS5HdOdBYE9jic8jQDv8TZlfqhzp
 x9B1b/dg3nlz7yaOj3LFw7ohN2IlU7o66QqcKytO3phdp6a2z4OoFvv6jcnEqYwi
 YnL8ScUeXqheDfA/fh1BF4gAZdSf655Kvk7MbGwBLwFq2jqygP8Ca2ODp03NYhB0
 qb3sM08fy7CSIdwaDySePDkrWcHU/XeVhRN6Gj8W1g8ZH9Z7/iSLiP4hZjEqvXNC
 zoM1ut0CkrHpZzPZv3+ZGxzr0A+fDZGub0rp7W6BbPzYyiChuIk=
 =kWv5
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/legoater/tags/pull-ppc-20220302' into staging

ppc-7.0 queue

* ppc/pnv fixes
* PMU EBB support
* target/ppc: PowerISA Vector/VSX instruction batch
* ppc/pnv: Extension of the powernv10 machine with XIVE2 ans PHB5 models
* spapr allocation cleanups

# gpg: Signature made Wed 02 Mar 2022 11:00:42 GMT
# gpg:                using RSA key A0F66548F04895EBFE6B0B6051A343C7CFFBECA1
# gpg: Good signature from "Cédric Le Goater <clg@kaod.org>" [undefined]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: A0F6 6548 F048 95EB FE6B  0B60 51A3 43C7 CFFB ECA1

* remotes/legoater/tags/pull-ppc-20220302: (87 commits)
  hw/ppc/spapr_vio.c: use g_autofree in spapr_dt_vdevice()
  hw/ppc/spapr_rtas.c: use g_autofree in rtas_ibm_get_system_parameter()
  spapr_pci_nvlink2.c: use g_autofree in spapr_phb_nvgpu_ram_populate_dt()
  hw/ppc/spapr_numa.c: simplify spapr_numa_write_assoc_lookup_arrays()
  hw/ppc/spapr_drc.c: use g_autofree in spapr_drc_by_index()
  hw/ppc/spapr_drc.c: use g_autofree in spapr_dr_connector_new()
  hw/ppc/spapr_drc.c: use g_autofree in drc_unrealize()
  hw/ppc/spapr_drc.c: use g_autofree in drc_realize()
  hw/ppc/spapr_drc.c: use g_auto in spapr_dt_drc()
  hw/ppc/spapr_caps.c: use g_autofree in spapr_caps_add_properties()
  hw/ppc/spapr_caps.c: use g_autofree in spapr_cap_get_string()
  hw/ppc/spapr_caps.c: use g_autofree in spapr_cap_set_string()
  hw/ppc/spapr.c: fail early if no firmware found in machine_init()
  hw/ppc/spapr.c: use g_autofree in spapr_dt_chosen()
  pnv/xive2: Add support for 8bits thread id
  pnv/xive2: Add support for automatic save&restore
  xive2: Add a get_config() handler for the router configuration
  pnv/xive2: Add support XIVE2 P9-compat mode (or Gen1)
  ppc/pnv: add XIVE Gen2 TIMA support
  pnv/xive2: Introduce new capability bits
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2022-03-02 12:38:46 +00:00
commit 64ada298b9
51 changed files with 7740 additions and 951 deletions

View File

@ -42,7 +42,7 @@ specific_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_intc.c'))
specific_ss.add(when: 'CONFIG_OMPIC', if_true: files('ompic.c'))
specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_OPENPIC'],
if_true: files('openpic_kvm.c'))
specific_ss.add(when: 'CONFIG_POWERNV', if_true: files('xics_pnv.c', 'pnv_xive.c'))
specific_ss.add(when: 'CONFIG_POWERNV', if_true: files('xics_pnv.c', 'pnv_xive.c', 'pnv_xive2.c'))
specific_ss.add(when: 'CONFIG_PPC_UIC', if_true: files('ppc-uic.c'))
specific_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_ic.c', 'bcm2836_control.c'))
specific_ss.add(when: 'CONFIG_RX_ICU', if_true: files('rx_icu.c'))
@ -52,7 +52,7 @@ specific_ss.add(when: 'CONFIG_SH_INTC', if_true: files('sh_intc.c'))
specific_ss.add(when: 'CONFIG_RISCV_ACLINT', if_true: files('riscv_aclint.c'))
specific_ss.add(when: 'CONFIG_RISCV_APLIC', if_true: files('riscv_aplic.c'))
specific_ss.add(when: 'CONFIG_SIFIVE_PLIC', if_true: files('sifive_plic.c'))
specific_ss.add(when: 'CONFIG_XICS', if_true: files('xics.c'))
specific_ss.add(when: 'CONFIG_XICS', if_true: files('xics.c', 'xive2.c'))
specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_XICS'],
if_true: files('xics_kvm.c'))
specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('xics_spapr.c', 'spapr_xive.c'))

View File

@ -403,6 +403,34 @@ static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
}
static int pnv_xive_get_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
uint8_t *pq)
{
PnvXive *xive = PNV_XIVE(xrtr);
if (pnv_xive_block_id(xive) != blk) {
xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
return -1;
}
*pq = xive_source_esb_get(&xive->ipi_source, idx);
return 0;
}
static int pnv_xive_set_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
uint8_t *pq)
{
PnvXive *xive = PNV_XIVE(xrtr);
if (pnv_xive_block_id(xive) != blk) {
xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
return -1;
}
*pq = xive_source_esb_set(&xive->ipi_source, idx, *pq);
return 0;
}
/*
* One bit per thread id. The first register PC_THREAD_EN_REG0 covers
* the first cores 0-15 (normal) of the chip or 0-7 (fused). The
@ -499,12 +527,12 @@ static PnvXive *pnv_xive_tm_get_xive(PowerPCCPU *cpu)
* event notification to the Router. This is required on a multichip
* system.
*/
static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno)
static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked)
{
PnvXive *xive = PNV_XIVE(xn);
uint8_t blk = pnv_xive_block_id(xive);
xive_router_notify(xn, XIVE_EAS(blk, srcno));
xive_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked);
}
/*
@ -1351,7 +1379,8 @@ static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val)
blk = XIVE_EAS_BLOCK(val);
idx = XIVE_EAS_INDEX(val);
xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx));
xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx),
!!(val & XIVE_TRIGGER_PQ));
}
static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val,
@ -1971,6 +2000,8 @@ static void pnv_xive_class_init(ObjectClass *klass, void *data)
device_class_set_props(dc, pnv_xive_properties);
xrc->get_eas = pnv_xive_get_eas;
xrc->get_pq = pnv_xive_get_pq;
xrc->set_pq = pnv_xive_set_pq;
xrc->get_end = pnv_xive_get_end;
xrc->write_end = pnv_xive_write_end;
xrc->get_nvt = pnv_xive_get_nvt;

2128
hw/intc/pnv_xive2.c Normal file

File diff suppressed because it is too large Load Diff

442
hw/intc/pnv_xive2_regs.h Normal file
View File

@ -0,0 +1,442 @@
/*
* QEMU PowerPC XIVE2 interrupt controller model (POWER10)
*
* Copyright (c) 2019-2022, IBM Corporation.
*
* This code is licensed under the GPL version 2 or later. See the
* COPYING file in the top-level directory.
*/
#ifndef PPC_PNV_XIVE2_REGS_H
#define PPC_PNV_XIVE2_REGS_H
/*
* CQ Common Queue (PowerBus bridge) Registers
*/
/* XIVE2 Capabilities */
#define X_CQ_XIVE_CAP 0x02
#define CQ_XIVE_CAP 0x010
#define CQ_XIVE_CAP_VERSION PPC_BITMASK(0, 3)
/* 4:6 reserved */
#define CQ_XIVE_CAP_USER_INT_PRIO PPC_BITMASK(8, 9)
#define CQ_XIVE_CAP_USER_INT_PRIO_1 0
#define CQ_XIVE_CAP_USER_INT_PRIO_1_2 1
#define CQ_XIVE_CAP_USER_INT_PRIO_1_4 2
#define CQ_XIVE_CAP_USER_INT_PRIO_1_8 3
#define CQ_XIVE_CAP_VP_INT_PRIO PPC_BITMASK(10, 11)
#define CQ_XIVE_CAP_VP_INT_PRIO_1_8 0
#define CQ_XIVE_CAP_VP_INT_PRIO_2_8 1
#define CQ_XIVE_CAP_VP_INT_PRIO_4_8 2
#define CQ_XIVE_CAP_VP_INT_PRIO_8 3
#define CQ_XIVE_CAP_BLOCK_ID_WIDTH PPC_BITMASK(12, 13)
#define CQ_XIVE_CAP_VP_SAVE_RESTORE PPC_BIT(38)
#define CQ_XIVE_CAP_PHB_PQ_DISABLE PPC_BIT(56)
#define CQ_XIVE_CAP_PHB_ABT PPC_BIT(57)
#define CQ_XIVE_CAP_EXPLOITATION_MODE PPC_BIT(58)
#define CQ_XIVE_CAP_STORE_EOI PPC_BIT(59)
/* XIVE2 Configuration */
#define X_CQ_XIVE_CFG 0x03
#define CQ_XIVE_CFG 0x018
/* 0:7 reserved */
#define CQ_XIVE_CFG_USER_INT_PRIO PPC_BITMASK(8, 9)
#define CQ_XIVE_CFG_VP_INT_PRIO PPC_BITMASK(10, 11)
#define CQ_XIVE_CFG_INT_PRIO_1 0
#define CQ_XIVE_CFG_INT_PRIO_2 1
#define CQ_XIVE_CFG_INT_PRIO_4 2
#define CQ_XIVE_CFG_INT_PRIO_8 3
#define CQ_XIVE_CFG_BLOCK_ID_WIDTH PPC_BITMASK(12, 13)
#define CQ_XIVE_CFG_BLOCK_ID_4BITS 0
#define CQ_XIVE_CFG_BLOCK_ID_5BITS 1
#define CQ_XIVE_CFG_BLOCK_ID_6BITS 2
#define CQ_XIVE_CFG_BLOCK_ID_7BITS 3
#define CQ_XIVE_CFG_HYP_HARD_RANGE PPC_BITMASK(14, 15)
#define CQ_XIVE_CFG_THREADID_7BITS 0
#define CQ_XIVE_CFG_THREADID_8BITS 1
#define CQ_XIVE_CFG_THREADID_9BITS 2
#define CQ_XIVE_CFG_THREADID_10BITs 3
#define CQ_XIVE_CFG_HYP_HARD_BLKID_OVERRIDE PPC_BIT(16)
#define CQ_XIVE_CFG_HYP_HARD_BLOCK_ID PPC_BITMASK(17, 23)
#define CQ_XIVE_CFG_GEN1_TIMA_OS PPC_BIT(24)
#define CQ_XIVE_CFG_GEN1_TIMA_HYP PPC_BIT(25)
#define CQ_XIVE_CFG_GEN1_TIMA_HYP_BLK0 PPC_BIT(26) /* 0 if bit[25]=0 */
#define CQ_XIVE_CFG_GEN1_TIMA_CROWD_DIS PPC_BIT(27) /* 0 if bit[25]=0 */
#define CQ_XIVE_CFG_GEN1_END_ESX PPC_BIT(28)
#define CQ_XIVE_CFG_EN_VP_SAVE_RESTORE PPC_BIT(38) /* 0 if bit[25]=1 */
#define CQ_XIVE_CFG_EN_VP_SAVE_REST_STRICT PPC_BIT(39) /* 0 if bit[25]=1 */
/* Interrupt Controller Base Address Register - 512 pages (32M) */
#define X_CQ_IC_BAR 0x08
#define CQ_IC_BAR 0x040
#define CQ_IC_BAR_VALID PPC_BIT(0)
#define CQ_IC_BAR_64K PPC_BIT(1)
/* 2:7 reserved */
#define CQ_IC_BAR_ADDR PPC_BITMASK(8, 42)
/* 43:63 reserved */
/* Thread Management Base Address Register - 4 pages */
#define X_CQ_TM_BAR 0x09
#define CQ_TM_BAR 0x048
#define CQ_TM_BAR_VALID PPC_BIT(0)
#define CQ_TM_BAR_64K PPC_BIT(1)
#define CQ_TM_BAR_ADDR PPC_BITMASK(8, 49)
/* ESB Base Address Register */
#define X_CQ_ESB_BAR 0x0A
#define CQ_ESB_BAR 0x050
#define CQ_BAR_VALID PPC_BIT(0)
#define CQ_BAR_64K PPC_BIT(1)
/* 2:7 reserved */
#define CQ_BAR_ADDR PPC_BITMASK(8, 39)
#define CQ_BAR_SET_DIV PPC_BITMASK(56, 58)
#define CQ_BAR_RANGE PPC_BITMASK(59, 63)
/* 0 (16M) - 16 (16T) */
/* END Base Address Register */
#define X_CQ_END_BAR 0x0B
#define CQ_END_BAR 0x058
/* NVPG Base Address Register */
#define X_CQ_NVPG_BAR 0x0C
#define CQ_NVPG_BAR 0x060
/* NVC Base Address Register */
#define X_CQ_NVC_BAR 0x0D
#define CQ_NVC_BAR 0x068
/* Table Address Register */
#define X_CQ_TAR 0x0E
#define CQ_TAR 0x070
#define CQ_TAR_AUTOINC PPC_BIT(0)
#define CQ_TAR_SELECT PPC_BITMASK(12, 15)
#define CQ_TAR_ESB 0 /* 0 - 15 */
#define CQ_TAR_END 2 /* 0 - 15 */
#define CQ_TAR_NVPG 3 /* 0 - 15 */
#define CQ_TAR_NVC 5 /* 0 - 15 */
#define CQ_TAR_ENTRY_SELECT PPC_BITMASK(28, 31)
/* Table Data Register */
#define X_CQ_TDR 0x0F
#define CQ_TDR 0x078
/* for the NVPG, NVC, ESB, END Set Translation Tables */
#define CQ_TDR_VALID PPC_BIT(0)
#define CQ_TDR_BLOCK_ID PPC_BITMASK(60, 63)
/*
* Processor Cores Enabled for MsgSnd
* Identifies which of the 32 possible core chiplets are enabled and
* available to receive the MsgSnd command
*/
#define X_CQ_MSGSND 0x10
#define CQ_MSGSND 0x080
/* Interrupt Unit Reset Control */
#define X_CQ_RST_CTL 0x12
#define CQ_RST_CTL 0x090
#define CQ_RST_SYNC_RESET PPC_BIT(0) /* Write Only */
#define CQ_RST_QUIESCE_PB PPC_BIT(1) /* RW */
#define CQ_RST_MASTER_IDLE PPC_BIT(2) /* Read Only */
#define CQ_RST_SAVE_IDLE PPC_BIT(3) /* Read Only */
#define CQ_RST_PB_BAR_RESET PPC_BIT(4) /* Write Only */
/* PowerBus General Configuration */
#define X_CQ_CFG_PB_GEN 0x14
#define CQ_CFG_PB_GEN 0x0A0
#define CQ_CFG_PB_GEN_PB_INIT PPC_BIT(45)
/*
* FIR
* (And-Mask)
* (Or-Mask)
*/
#define X_CQ_FIR 0x30
#define X_CQ_FIR_AND 0x31
#define X_CQ_FIR_OR 0x32
#define CQ_FIR 0x180
#define CQ_FIR_AND 0x188
#define CQ_FIR_OR 0x190
#define CQ_FIR_PB_RCMDX_CI_ERR1 PPC_BIT(19)
#define CQ_FIR_VC_INFO_ERROR_0_2 PPC_BITMASK(61, 63)
/*
* FIR Mask
* (And-Mask)
* (Or-Mask)
*/
#define X_CQ_FIRMASK 0x33
#define X_CQ_FIRMASK_AND 0x34
#define X_CQ_FIRMASK_OR 0x35
#define CQ_FIRMASK 0x198
#define CQ_FIRMASK_AND 0x1A0
#define CQ_FIRMASK_OR 0x1A8
/*
* VC0
*/
/* VSD table address */
#define X_VC_VSD_TABLE_ADDR 0x100
#define VC_VSD_TABLE_ADDR 0x000
#define VC_VSD_TABLE_AUTOINC PPC_BIT(0)
#define VC_VSD_TABLE_SELECT PPC_BITMASK(12, 15)
#define VC_VSD_TABLE_ADDRESS PPC_BITMASK(28, 31)
/* VSD table data */
#define X_VC_VSD_TABLE_DATA 0x101
#define VC_VSD_TABLE_DATA 0x008
/* AIB AT macro indirect kill */
#define X_VC_AT_MACRO_KILL 0x102
#define VC_AT_MACRO_KILL 0x010
#define VC_AT_MACRO_KILL_VALID PPC_BIT(0)
#define VC_AT_MACRO_KILL_VSD PPC_BITMASK(12, 15)
#define VC_AT_MACRO_KILL_BLOCK_ID PPC_BITMASK(28, 31)
#define VC_AT_MACRO_KILL_OFFSET PPC_BITMASK(48, 60)
/* AIB AT macro indirect kill mask (same bit definitions) */
#define X_VC_AT_MACRO_KILL_MASK 0x103
#define VC_AT_MACRO_KILL_MASK 0x018
/* Remote IRQs and ERQs configuration [n] (n = 0:6) */
#define X_VC_QUEUES_CFG_REM0 0x117
#define VC_QUEUES_CFG_REM0 0x0B8
#define VC_QUEUES_CFG_REM1 0x0C0
#define VC_QUEUES_CFG_REM2 0x0C8
#define VC_QUEUES_CFG_REM3 0x0D0
#define VC_QUEUES_CFG_REM4 0x0D8
#define VC_QUEUES_CFG_REM5 0x0E0
#define VC_QUEUES_CFG_REM6 0x0E8
#define VC_QUEUES_CFG_MEMB_EN PPC_BIT(38)
#define VC_QUEUES_CFG_MEMB_SZ PPC_BITMASK(42, 47)
/*
* VC1
*/
/* ESBC cache flush control trigger */
#define X_VC_ESBC_FLUSH_CTRL 0x140
#define VC_ESBC_FLUSH_CTRL 0x200
#define VC_ESBC_FLUSH_CTRL_POLL_VALID PPC_BIT(0)
#define VC_ESBC_FLUSH_CTRL_WANT_CACHE_DISABLE PPC_BIT(2)
/* ESBC cache flush poll trigger */
#define X_VC_ESBC_FLUSH_POLL 0x141
#define VC_ESBC_FLUSH_POLL 0x208
#define VC_ESBC_FLUSH_POLL_BLOCK_ID PPC_BITMASK(0, 3)
#define VC_ESBC_FLUSH_POLL_OFFSET PPC_BITMASK(4, 31) /* 28-bit */
#define VC_ESBC_FLUSH_POLL_BLOCK_ID_MASK PPC_BITMASK(32, 35)
#define VC_ESBC_FLUSH_POLL_OFFSET_MASK PPC_BITMASK(36, 63) /* 28-bit */
/* EASC flush control register */
#define X_VC_EASC_FLUSH_CTRL 0x160
#define VC_EASC_FLUSH_CTRL 0x300
#define VC_EASC_FLUSH_CTRL_POLL_VALID PPC_BIT(0)
#define VC_EASC_FLUSH_CTRL_WANT_CACHE_DISABLE PPC_BIT(2)
/* EASC flush poll register */
#define X_VC_EASC_FLUSH_POLL 0x161
#define VC_EASC_FLUSH_POLL 0x308
#define VC_EASC_FLUSH_POLL_BLOCK_ID PPC_BITMASK(0, 3)
#define VC_EASC_FLUSH_POLL_OFFSET PPC_BITMASK(4, 31) /* 28-bit */
#define VC_EASC_FLUSH_POLL_BLOCK_ID_MASK PPC_BITMASK(32, 35)
#define VC_EASC_FLUSH_POLL_OFFSET_MASK PPC_BITMASK(36, 63) /* 28-bit */
/*
* VC2
*/
/* ENDC flush control register */
#define X_VC_ENDC_FLUSH_CTRL 0x180
#define VC_ENDC_FLUSH_CTRL 0x400
#define VC_ENDC_FLUSH_CTRL_POLL_VALID PPC_BIT(0)
#define VC_ENDC_FLUSH_CTRL_WANT_CACHE_DISABLE PPC_BIT(2)
#define VC_ENDC_FLUSH_CTRL_WANT_INVALIDATE PPC_BIT(3)
#define VC_ENDC_FLUSH_CTRL_INJECT_INVALIDATE PPC_BIT(7)
/* ENDC flush poll register */
#define X_VC_ENDC_FLUSH_POLL 0x181
#define VC_ENDC_FLUSH_POLL 0x408
#define VC_ENDC_FLUSH_POLL_BLOCK_ID PPC_BITMASK(4, 7)
#define VC_ENDC_FLUSH_POLL_OFFSET PPC_BITMASK(8, 31) /* 24-bit */
#define VC_ENDC_FLUSH_POLL_BLOCK_ID_MASK PPC_BITMASK(36, 39)
#define VC_ENDC_FLUSH_POLL_OFFSET_MASK PPC_BITMASK(40, 63) /* 24-bit */
/* ENDC Sync done */
#define X_VC_ENDC_SYNC_DONE 0x184
#define VC_ENDC_SYNC_DONE 0x420
#define VC_ENDC_SYNC_POLL_DONE PPC_BITMASK(0, 6)
#define VC_ENDC_SYNC_QUEUE_IPI PPC_BIT(0)
#define VC_ENDC_SYNC_QUEUE_HWD PPC_BIT(1)
#define VC_ENDC_SYNC_QUEUE_NXC PPC_BIT(2)
#define VC_ENDC_SYNC_QUEUE_INT PPC_BIT(3)
#define VC_ENDC_SYNC_QUEUE_OS PPC_BIT(4)
#define VC_ENDC_SYNC_QUEUE_POOL PPC_BIT(5)
#define VC_ENDC_SYNC_QUEUE_HARD PPC_BIT(6)
#define VC_QUEUE_COUNT 7
/* ENDC cache watch specification 0 */
#define X_VC_ENDC_WATCH0_SPEC 0x1A0
#define VC_ENDC_WATCH0_SPEC 0x500
#define VC_ENDC_WATCH_CONFLICT PPC_BIT(0)
#define VC_ENDC_WATCH_FULL PPC_BIT(8)
#define VC_ENDC_WATCH_BLOCK_ID PPC_BITMASK(28, 31)
#define VC_ENDC_WATCH_INDEX PPC_BITMASK(40, 63)
/* ENDC cache watch data 0 */
#define X_VC_ENDC_WATCH0_DATA0 0x1A4
#define X_VC_ENDC_WATCH0_DATA1 0x1A5
#define X_VC_ENDC_WATCH0_DATA2 0x1A6
#define X_VC_ENDC_WATCH0_DATA3 0x1A7
#define VC_ENDC_WATCH0_DATA0 0x520
#define VC_ENDC_WATCH0_DATA1 0x528
#define VC_ENDC_WATCH0_DATA2 0x530
#define VC_ENDC_WATCH0_DATA3 0x538
/*
* PC LSB1
*/
/* VSD table address register */
#define X_PC_VSD_TABLE_ADDR 0x200
#define PC_VSD_TABLE_ADDR 0x000
#define PC_VSD_TABLE_AUTOINC PPC_BIT(0)
#define PC_VSD_TABLE_SELECT PPC_BITMASK(12, 15)
#define PC_VSD_TABLE_ADDRESS PPC_BITMASK(28, 31)
/* VSD table data register */
#define X_PC_VSD_TABLE_DATA 0x201
#define PC_VSD_TABLE_DATA 0x008
/* AT indirect kill register */
#define X_PC_AT_KILL 0x202
#define PC_AT_KILL 0x010
#define PC_AT_KILL_VALID PPC_BIT(0)
#define PC_AT_KILL_VSD_TYPE PPC_BITMASK(24, 27)
/* Only NVP, NVG, NVC */
#define PC_AT_KILL_BLOCK_ID PPC_BITMASK(28, 31)
#define PC_AT_KILL_OFFSET PPC_BITMASK(48, 60)
/* AT indirect kill mask register */
#define X_PC_AT_KILL_MASK 0x203
#define PC_AT_KILL_MASK 0x018
#define PC_AT_KILL_MASK_VSD_TYPE PPC_BITMASK(24, 27)
#define PC_AT_KILL_MASK_BLOCK_ID PPC_BITMASK(28, 31)
#define PC_AT_KILL_MASK_OFFSET PPC_BITMASK(48, 60)
/*
* PC LSB2
*/
/* NxC Cache flush control */
#define X_PC_NXC_FLUSH_CTRL 0x280
#define PC_NXC_FLUSH_CTRL 0x400
#define PC_NXC_FLUSH_CTRL_POLL_VALID PPC_BIT(0)
#define PC_NXC_FLUSH_CTRL_WANT_CACHE_DISABLE PPC_BIT(2)
#define PC_NXC_FLUSH_CTRL_WANT_INVALIDATE PPC_BIT(3)
#define PC_NXC_FLUSH_CTRL_INJECT_INVALIDATE PPC_BIT(7)
/* NxC Cache flush poll */
#define X_PC_NXC_FLUSH_POLL 0x281
#define PC_NXC_FLUSH_POLL 0x408
#define PC_NXC_FLUSH_POLL_NXC_TYPE PPC_BITMASK(2, 3)
#define PC_NXC_FLUSH_POLL_NXC_TYPE_NVP 0
#define PC_NXC_FLUSH_POLL_NXC_TYPE_NVG 2
#define PC_NXC_FLUSH_POLL_NXC_TYPE_NVC 3
#define PC_NXC_FLUSH_POLL_BLOCK_ID PPC_BITMASK(4, 7)
#define PC_NXC_FLUSH_POLL_OFFSET PPC_BITMASK(8, 31) /* 24-bit */
#define PC_NXC_FLUSH_POLL_NXC_TYPE_MASK PPC_BITMASK(34, 35) /* 0: Ign */
#define PC_NXC_FLUSH_POLL_BLOCK_ID_MASK PPC_BITMASK(36, 39)
#define PC_NXC_FLUSH_POLL_OFFSET_MASK PPC_BITMASK(40, 63) /* 24-bit */
/* NxC Cache Watch 0 Specification */
#define X_PC_NXC_WATCH0_SPEC 0x2A0
#define PC_NXC_WATCH0_SPEC 0x500
#define PC_NXC_WATCH_CONFLICT PPC_BIT(0)
#define PC_NXC_WATCH_FULL PPC_BIT(8)
#define PC_NXC_WATCH_NXC_TYPE PPC_BITMASK(26, 27)
#define PC_NXC_WATCH_NXC_NVP 0
#define PC_NXC_WATCH_NXC_NVG 2
#define PC_NXC_WATCH_NXC_NVC 3
#define PC_NXC_WATCH_BLOCK_ID PPC_BITMASK(28, 31)
#define PC_NXC_WATCH_INDEX PPC_BITMASK(40, 63)
/* NxC Cache Watch 0 Data */
#define X_PC_NXC_WATCH0_DATA0 0x2A4
#define X_PC_NXC_WATCH0_DATA1 0x2A5
#define X_PC_NXC_WATCH0_DATA2 0x2A6
#define X_PC_NXC_WATCH0_DATA3 0x2A7
#define PC_NXC_WATCH0_DATA0 0x520
#define PC_NXC_WATCH0_DATA1 0x528
#define PC_NXC_WATCH0_DATA2 0x530
#define PC_NXC_WATCH0_DATA3 0x538
/*
* TCTXT Registers
*/
/* Physical Thread Enable0 register */
#define X_TCTXT_EN0 0x300
#define TCTXT_EN0 0x000
/* Physical Thread Enable0 Set register */
#define X_TCTXT_EN0_SET 0x302
#define TCTXT_EN0_SET 0x010
/* Physical Thread Enable0 Reset register */
#define X_TCTXT_EN0_RESET 0x303
#define TCTXT_EN0_RESET 0x018
/* Physical Thread Enable1 register */
#define X_TCTXT_EN1 0x304
#define TCTXT_EN1 0x020
/* Physical Thread Enable1 Set register */
#define X_TCTXT_EN1_SET 0x306
#define TCTXT_EN1_SET 0x030
/* Physical Thread Enable1 Reset register */
#define X_TCTXT_EN1_RESET 0x307
#define TCTXT_EN1_RESET 0x038
/*
* VSD Tables
*/
#define VST_ESB 0
#define VST_EAS 1 /* No used by PC */
#define VST_END 2
#define VST_NVP 3
#define VST_NVG 4
#define VST_NVC 5
#define VST_IC 6 /* No used by PC */
#define VST_SYNC 7
#define VST_ERQ 8 /* No used by PC */
/*
* Bits in a VSD entry.
*
* Note: the address is naturally aligned, we don't use a PPC_BITMASK,
* but just a mask to apply to the address before OR'ing it in.
*
* Note: VSD_FIRMWARE is a SW bit ! It hijacks an unused bit in the
* VSD and is only meant to be used in indirect mode !
*/
#define VSD_MODE PPC_BITMASK(0, 1)
#define VSD_MODE_SHARED 1
#define VSD_MODE_EXCLUSIVE 2
#define VSD_MODE_FORWARD 3
#define VSD_FIRMWARE PPC_BIT(2) /* Read warning */
#define VSD_FIRMWARE2 PPC_BIT(3) /* unused */
#define VSD_RESERVED PPC_BITMASK(4, 7) /* P10 reserved */
#define VSD_ADDRESS_MASK 0x00fffffffffff000ull
#define VSD_MIGRATION_REG PPC_BITMASK(52, 55)
#define VSD_INDIRECT PPC_BIT(56)
#define VSD_TSIZE PPC_BITMASK(59, 63)
#endif /* PPC_PNV_XIVE2_REGS_H */

View File

@ -480,6 +480,29 @@ static uint8_t spapr_xive_get_block_id(XiveRouter *xrtr)
return SPAPR_XIVE_BLOCK_ID;
}
static int spapr_xive_get_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
uint8_t *pq)
{
SpaprXive *xive = SPAPR_XIVE(xrtr);
assert(SPAPR_XIVE_BLOCK_ID == blk);
*pq = xive_source_esb_get(&xive->source, idx);
return 0;
}
static int spapr_xive_set_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
uint8_t *pq)
{
SpaprXive *xive = SPAPR_XIVE(xrtr);
assert(SPAPR_XIVE_BLOCK_ID == blk);
*pq = xive_source_esb_set(&xive->source, idx, *pq);
return 0;
}
static const VMStateDescription vmstate_spapr_xive_end = {
.name = TYPE_SPAPR_XIVE "/end",
.version_id = 1,
@ -788,6 +811,8 @@ static void spapr_xive_class_init(ObjectClass *klass, void *data)
dc->vmsd = &vmstate_spapr_xive;
xrc->get_eas = spapr_xive_get_eas;
xrc->get_pq = spapr_xive_get_pq;
xrc->set_pq = spapr_xive_set_pq;
xrc->get_end = spapr_xive_get_end;
xrc->write_end = spapr_xive_write_end;
xrc->get_nvt = spapr_xive_get_nvt;

View File

@ -886,6 +886,16 @@ static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno)
}
}
/*
* Sources can be configured with PQ offloading in which case the check
* on the PQ state bits of MSIs is disabled
*/
static bool xive_source_esb_disabled(XiveSource *xsrc, uint32_t srcno)
{
return (xsrc->esb_flags & XIVE_SRC_PQ_DISABLE) &&
!xive_source_irq_is_lsi(xsrc, srcno);
}
/*
* Returns whether the event notification should be forwarded.
*/
@ -895,6 +905,10 @@ static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno)
assert(srcno < xsrc->nr_irqs);
if (xive_source_esb_disabled(xsrc, srcno)) {
return true;
}
ret = xive_esb_trigger(&xsrc->status[srcno]);
if (xive_source_irq_is_lsi(xsrc, srcno) &&
@ -915,6 +929,11 @@ static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno)
assert(srcno < xsrc->nr_irqs);
if (xive_source_esb_disabled(xsrc, srcno)) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EOI for IRQ %d\n", srcno);
return false;
}
ret = xive_esb_eoi(&xsrc->status[srcno]);
/*
@ -936,9 +955,10 @@ static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno)
static void xive_source_notify(XiveSource *xsrc, int srcno)
{
XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive);
bool pq_checked = !xive_source_esb_disabled(xsrc, srcno);
if (xnc->notify) {
xnc->notify(xsrc->xive, srcno);
xnc->notify(xsrc->xive, srcno, pq_checked);
}
}
@ -1061,6 +1081,15 @@ static void xive_source_esb_write(void *opaque, hwaddr addr,
notify = xive_source_esb_eoi(xsrc, srcno);
break;
/*
* This is an internal offset used to inject triggers when the PQ
* state bits are not controlled locally. Such as for LSIs when
* under ABT mode.
*/
case XIVE_ESB_INJECT ... XIVE_ESB_INJECT + 0x3FF:
notify = true;
break;
case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
@ -1361,6 +1390,24 @@ int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
return xrc->get_eas(xrtr, eas_blk, eas_idx, eas);
}
static
int xive_router_get_pq(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
uint8_t *pq)
{
XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
return xrc->get_pq(xrtr, eas_blk, eas_idx, pq);
}
static
int xive_router_set_pq(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
uint8_t *pq)
{
XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
return xrc->set_pq(xrtr, eas_blk, eas_idx, pq);
}
int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
XiveEND *end)
{
@ -1712,7 +1759,7 @@ do_escalation:
xive_get_field32(END_W5_ESC_END_DATA, end.w5));
}
void xive_router_notify(XiveNotifier *xn, uint32_t lisn)
void xive_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked)
{
XiveRouter *xrtr = XIVE_ROUTER(xn);
uint8_t eas_blk = XIVE_EAS_BLOCK(lisn);
@ -1725,11 +1772,27 @@ void xive_router_notify(XiveNotifier *xn, uint32_t lisn)
return;
}
/*
* The IVRE checks the State Bit Cache at this point. We skip the
* SBC lookup because the state bits of the sources are modeled
* internally in QEMU.
*/
if (!pq_checked) {
bool notify;
uint8_t pq;
/* PQ cache lookup */
if (xive_router_get_pq(xrtr, eas_blk, eas_idx, &pq)) {
/* Set FIR */
g_assert_not_reached();
}
notify = xive_esb_trigger(&pq);
if (xive_router_set_pq(xrtr, eas_blk, eas_idx, &pq)) {
/* Set FIR */
g_assert_not_reached();
}
if (!notify) {
return;
}
}
if (!xive_eas_is_valid(&eas)) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid LISN %x\n", lisn);

1018
hw/intc/xive2.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -485,6 +485,15 @@ static void pnv_phb4_update_xsrc(PnvPHB4 *phb)
flags = 0;
}
/*
* When the PQ disable configuration bit is set, the check on the
* PQ state bits is disabled on the PHB side (for MSI only) and it
* is performed on the IC side instead.
*/
if (phb->regs[PHB_CTRLR >> 3] & PHB_CTRLR_IRQ_PQ_DISABLE) {
flags |= XIVE_SRC_PQ_DISABLE;
}
phb->xsrc.esb_shift = shift;
phb->xsrc.esb_flags = flags;
@ -1568,40 +1577,36 @@ static PnvPhb4PecState *pnv_phb4_get_pec(PnvChip *chip, PnvPHB4 *phb,
static void pnv_phb4_realize(DeviceState *dev, Error **errp)
{
PnvPHB4 *phb = PNV_PHB4(dev);
PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
PnvChip *chip = pnv_get_chip(pnv, phb->chip_id);
PCIHostState *pci = PCI_HOST_BRIDGE(dev);
XiveSource *xsrc = &phb->xsrc;
BusState *s;
Error *local_err = NULL;
int nr_irqs;
char name[32];
/* User created PHB */
if (!chip) {
error_setg(errp, "invalid chip id: %d", phb->chip_id);
return;
}
/* User created PHBs need to be assigned to a PEC */
if (!phb->pec) {
PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
PnvChip *chip = pnv_get_chip(pnv, phb->chip_id);
BusState *s;
if (!chip) {
error_setg(errp, "invalid chip id: %d", phb->chip_id);
return;
}
phb->pec = pnv_phb4_get_pec(chip, phb, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
}
/*
* Reparent user created devices to the chip to build
* correctly the device tree.
*/
pnv_chip_parent_fixup(chip, OBJECT(phb), phb->phb_id);
/* Reparent the PHB to the chip to build the device tree */
pnv_chip_parent_fixup(chip, OBJECT(phb), phb->phb_id);
s = qdev_get_parent_bus(DEVICE(chip));
if (!qdev_set_parent_bus(DEVICE(phb), s, &local_err)) {
error_propagate(errp, local_err);
return;
}
s = qdev_get_parent_bus(DEVICE(chip));
if (!qdev_set_parent_bus(DEVICE(phb), s, &local_err)) {
error_propagate(errp, local_err);
return;
}
/* Set the "big_phb" flag */
@ -1664,15 +1669,64 @@ static const char *pnv_phb4_root_bus_path(PCIHostState *host_bridge,
return phb->bus_path;
}
static void pnv_phb4_xive_notify(XiveNotifier *xf, uint32_t srcno)
/*
* Address base trigger mode (POWER10)
*
* Trigger directly the IC ESB page
*/
static void pnv_phb4_xive_notify_abt(PnvPHB4 *phb, uint32_t srcno,
bool pq_checked)
{
uint64_t notif_port = phb->regs[PHB_INT_NOTIFY_ADDR >> 3];
uint64_t data = 0; /* trigger data : don't care */
hwaddr addr;
MemTxResult result;
int esb_shift;
if (notif_port & PHB_INT_NOTIFY_ADDR_64K) {
esb_shift = 16;
} else {
esb_shift = 12;
}
/* Compute the address of the IC ESB management page */
addr = (notif_port & ~PHB_INT_NOTIFY_ADDR_64K);
addr |= (1ull << (esb_shift + 1)) * srcno;
addr |= (1ull << esb_shift);
/*
* When the PQ state bits are checked on the PHB, the associated
* PQ state bits on the IC should be ignored. Use the unconditional
* trigger offset to inject a trigger on the IC. This is always
* the case for LSIs
*/
if (pq_checked) {
addr |= XIVE_ESB_INJECT;
}
trace_pnv_phb4_xive_notify_ic(addr, data);
address_space_stq_be(&address_space_memory, addr, data,
MEMTXATTRS_UNSPECIFIED, &result);
if (result != MEMTX_OK) {
phb_error(phb, "trigger failed @%"HWADDR_PRIx "\n", addr);
return;
}
}
static void pnv_phb4_xive_notify_ic(PnvPHB4 *phb, uint32_t srcno,
bool pq_checked)
{
PnvPHB4 *phb = PNV_PHB4(xf);
uint64_t notif_port = phb->regs[PHB_INT_NOTIFY_ADDR >> 3];
uint32_t offset = phb->regs[PHB_INT_NOTIFY_INDEX >> 3];
uint64_t data = XIVE_TRIGGER_PQ | offset | srcno;
uint64_t data = offset | srcno;
MemTxResult result;
trace_pnv_phb4_xive_notify(notif_port, data);
if (pq_checked) {
data |= XIVE_TRIGGER_PQ;
}
trace_pnv_phb4_xive_notify_ic(notif_port, data);
address_space_stq_be(&address_space_memory, notif_port, data,
MEMTXATTRS_UNSPECIFIED, &result);
@ -1682,6 +1736,18 @@ static void pnv_phb4_xive_notify(XiveNotifier *xf, uint32_t srcno)
}
}
static void pnv_phb4_xive_notify(XiveNotifier *xf, uint32_t srcno,
bool pq_checked)
{
PnvPHB4 *phb = PNV_PHB4(xf);
if (phb->regs[PHB_CTRLR >> 3] & PHB_CTRLR_IRQ_ABT_MODE) {
pnv_phb4_xive_notify_abt(phb, srcno, pq_checked);
} else {
pnv_phb4_xive_notify_ic(phb, srcno, pq_checked);
}
}
static Property pnv_phb4_properties[] = {
DEFINE_PROP_UINT32("index", PnvPHB4, phb_id, 0),
DEFINE_PROP_UINT32("chip-id", PnvPHB4, chip_id, 0),
@ -1816,9 +1882,29 @@ static const TypeInfo pnv_phb4_root_port_info = {
.class_init = pnv_phb4_root_port_class_init,
};
static void pnv_phb5_root_port_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
dc->desc = "IBM PHB5 PCIE Root Port";
dc->user_creatable = true;
k->vendor_id = PCI_VENDOR_ID_IBM;
k->device_id = PNV_PHB5_DEVICE_ID;
}
static const TypeInfo pnv_phb5_root_port_info = {
.name = TYPE_PNV_PHB5_ROOT_PORT,
.parent = TYPE_PNV_PHB4_ROOT_PORT,
.instance_size = sizeof(PnvPHB4RootPort),
.class_init = pnv_phb5_root_port_class_init,
};
static void pnv_phb4_register_types(void)
{
type_register_static(&pnv_phb4_root_bus_info);
type_register_static(&pnv_phb5_root_port_info);
type_register_static(&pnv_phb4_root_port_info);
type_register_static(&pnv_phb4_type_info);
type_register_static(&pnv_phb4_iommu_memory_region_info);
@ -1828,10 +1914,15 @@ type_init(pnv_phb4_register_types);
void pnv_phb4_pic_print_info(PnvPHB4 *phb, Monitor *mon)
{
uint64_t notif_port =
phb->regs[PHB_INT_NOTIFY_ADDR >> 3] & ~PHB_INT_NOTIFY_ADDR_64K;
uint32_t offset = phb->regs[PHB_INT_NOTIFY_INDEX >> 3];
bool abt = !!(phb->regs[PHB_CTRLR >> 3] & PHB_CTRLR_IRQ_ABT_MODE);
monitor_printf(mon, "PHB4[%x:%x] Source %08x .. %08x\n",
monitor_printf(mon, "PHB4[%x:%x] Source %08x .. %08x %s @%"HWADDR_PRIx"\n",
phb->chip_id, phb->phb_id,
offset, offset + phb->xsrc.nr_irqs - 1);
offset, offset + phb->xsrc.nr_irqs - 1,
abt ? "ABT" : "",
notif_port);
xive_source_pic_print_info(&phb->xsrc, 0, mon);
}

View File

@ -281,9 +281,62 @@ static const TypeInfo pnv_pec_type_info = {
}
};
/*
* POWER10 definitions
*/
static uint32_t pnv_phb5_pec_xscom_pci_base(PnvPhb4PecState *pec)
{
return PNV10_XSCOM_PEC_PCI_BASE + 0x1000000 * pec->index;
}
static uint32_t pnv_phb5_pec_xscom_nest_base(PnvPhb4PecState *pec)
{
/* index goes down ... */
return PNV10_XSCOM_PEC_NEST_BASE - 0x1000000 * pec->index;
}
/*
* PEC0 -> 3 stacks
* PEC1 -> 3 stacks
*/
static const uint32_t pnv_phb5_pec_num_stacks[] = { 3, 3 };
static void pnv_phb5_pec_class_init(ObjectClass *klass, void *data)
{
PnvPhb4PecClass *pecc = PNV_PHB4_PEC_CLASS(klass);
static const char compat[] = "ibm,power10-pbcq";
static const char stk_compat[] = "ibm,power10-phb-stack";
pecc->xscom_nest_base = pnv_phb5_pec_xscom_nest_base;
pecc->xscom_pci_base = pnv_phb5_pec_xscom_pci_base;
pecc->xscom_nest_size = PNV10_XSCOM_PEC_NEST_SIZE;
pecc->xscom_pci_size = PNV10_XSCOM_PEC_PCI_SIZE;
pecc->compat = compat;
pecc->compat_size = sizeof(compat);
pecc->stk_compat = stk_compat;
pecc->stk_compat_size = sizeof(stk_compat);
pecc->version = PNV_PHB5_VERSION;
pecc->num_phbs = pnv_phb5_pec_num_stacks;
pecc->rp_model = TYPE_PNV_PHB5_ROOT_PORT;
}
static const TypeInfo pnv_phb5_pec_type_info = {
.name = TYPE_PNV_PHB5_PEC,
.parent = TYPE_PNV_PHB4_PEC,
.instance_size = sizeof(PnvPhb4PecState),
.class_init = pnv_phb5_pec_class_init,
.class_size = sizeof(PnvPhb4PecClass),
.interfaces = (InterfaceInfo[]) {
{ TYPE_PNV_XSCOM_INTERFACE },
{ }
}
};
static void pnv_pec_register_types(void)
{
type_register_static(&pnv_pec_type_info);
type_register_static(&pnv_phb5_pec_type_info);
}
type_init(pnv_pec_register_types);

View File

@ -32,3 +32,5 @@ unin_read(uint64_t addr, uint64_t value) "addr=0x%" PRIx64 " val=0x%"PRIx64
# pnv_phb4.c
pnv_phb4_xive_notify(uint64_t notif_port, uint64_t data) "notif=@0x%"PRIx64" data=0x%"PRIx64
pnv_phb4_xive_notify_ic(uint64_t addr, uint64_t data) "addr=@0x%"PRIx64" data=0x%"PRIx64
pnv_phb4_xive_notify_abt(uint64_t notif_port, uint64_t data) "notif=@0x%"PRIx64" data=0x%"PRIx64

View File

@ -380,9 +380,12 @@ static void pnv_dt_serial(ISADevice *d, void *fdt, int lpc_off)
cpu_to_be32(io_base),
cpu_to_be32(8)
};
uint32_t irq;
char *name;
int node;
irq = object_property_get_uint(OBJECT(d), "irq", &error_fatal);
name = g_strdup_printf("%s@i%x", qdev_fw_name(DEVICE(d)), io_base);
node = fdt_add_subnode(fdt, lpc_off, name);
_FDT(node);
@ -394,7 +397,7 @@ static void pnv_dt_serial(ISADevice *d, void *fdt, int lpc_off)
_FDT((fdt_setprop_cell(fdt, node, "clock-frequency", 1843200)));
_FDT((fdt_setprop_cell(fdt, node, "current-speed", 115200)));
_FDT((fdt_setprop_cell(fdt, node, "interrupts", d->isairq[0])));
_FDT((fdt_setprop_cell(fdt, node, "interrupts", irq)));
_FDT((fdt_setprop_cell(fdt, node, "interrupt-parent",
fdt_get_phandle(fdt, lpc_off))));
@ -722,7 +725,11 @@ static void pnv_chip_power10_pic_print_info(PnvChip *chip, Monitor *mon)
{
Pnv10Chip *chip10 = PNV10_CHIP(chip);
pnv_xive2_pic_print_info(&chip10->xive, mon);
pnv_psi_pic_print_info(&chip10->psi, mon);
object_child_foreach_recursive(OBJECT(chip),
pnv_chip_power9_pic_print_info_child, mon);
}
/* Always give the first 1GB to chip 0 else we won't boot */
@ -1044,27 +1051,45 @@ static void pnv_chip_power9_intc_print_info(PnvChip *chip, PowerPCCPU *cpu,
static void pnv_chip_power10_intc_create(PnvChip *chip, PowerPCCPU *cpu,
Error **errp)
{
Pnv10Chip *chip10 = PNV10_CHIP(chip);
Error *local_err = NULL;
Object *obj;
PnvCPUState *pnv_cpu = pnv_cpu_state(cpu);
/* Will be defined when the interrupt controller is */
pnv_cpu->intc = NULL;
/*
* The core creates its interrupt presenter but the XIVE2 interrupt
* controller object is initialized afterwards. Hopefully, it's
* only used at runtime.
*/
obj = xive_tctx_create(OBJECT(cpu), XIVE_PRESENTER(&chip10->xive),
&local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
pnv_cpu->intc = obj;
}
static void pnv_chip_power10_intc_reset(PnvChip *chip, PowerPCCPU *cpu)
{
;
PnvCPUState *pnv_cpu = pnv_cpu_state(cpu);
xive_tctx_reset(XIVE_TCTX(pnv_cpu->intc));
}
static void pnv_chip_power10_intc_destroy(PnvChip *chip, PowerPCCPU *cpu)
{
PnvCPUState *pnv_cpu = pnv_cpu_state(cpu);
xive_tctx_destroy(XIVE_TCTX(pnv_cpu->intc));
pnv_cpu->intc = NULL;
}
static void pnv_chip_power10_intc_print_info(PnvChip *chip, PowerPCCPU *cpu,
Monitor *mon)
{
xive_tctx_pic_print_info(XIVE_TCTX(pnv_cpu_state(cpu)->intc), mon);
}
/*
@ -1366,6 +1391,21 @@ static void pnv_chip_power9_instance_init(Object *obj)
}
}
static void pnv_chip_quad_realize_one(PnvChip *chip, PnvQuad *eq,
PnvCore *pnv_core)
{
char eq_name[32];
int core_id = CPU_CORE(pnv_core)->core_id;
snprintf(eq_name, sizeof(eq_name), "eq[%d]", core_id);
object_initialize_child_with_props(OBJECT(chip), eq_name, eq,
sizeof(*eq), TYPE_PNV_QUAD,
&error_fatal, NULL);
object_property_set_int(OBJECT(eq), "quad-id", core_id, &error_fatal);
qdev_realize(DEVICE(eq), NULL, &error_fatal);
}
static void pnv_chip_quad_realize(Pnv9Chip *chip9, Error **errp)
{
PnvChip *chip = PNV_CHIP(chip9);
@ -1375,18 +1415,9 @@ static void pnv_chip_quad_realize(Pnv9Chip *chip9, Error **errp)
chip9->quads = g_new0(PnvQuad, chip9->nr_quads);
for (i = 0; i < chip9->nr_quads; i++) {
char eq_name[32];
PnvQuad *eq = &chip9->quads[i];
PnvCore *pnv_core = chip->cores[i * 4];
int core_id = CPU_CORE(pnv_core)->core_id;
snprintf(eq_name, sizeof(eq_name), "eq[%d]", core_id);
object_initialize_child_with_props(OBJECT(chip), eq_name, eq,
sizeof(*eq), TYPE_PNV_QUAD,
&error_fatal, NULL);
object_property_set_int(OBJECT(eq), "quad-id", core_id, &error_fatal);
qdev_realize(DEVICE(eq), NULL, &error_fatal);
pnv_chip_quad_realize_one(chip, eq, chip->cores[i * 4]);
pnv_xscom_add_subregion(chip, PNV9_XSCOM_EQ_BASE(eq->quad_id),
&eq->xscom_regs);
@ -1469,6 +1500,9 @@ static void pnv_chip_power9_realize(DeviceState *dev, Error **errp)
/* Processor Service Interface (PSI) Host Bridge */
object_property_set_int(OBJECT(&chip9->psi), "bar", PNV9_PSIHB_BASE(chip),
&error_fatal);
/* This is the only device with 4k ESB pages */
object_property_set_int(OBJECT(&chip9->psi), "shift", XIVE_ESB_4K,
&error_fatal);
if (!qdev_realize(DEVICE(&chip9->psi), NULL, errp)) {
return;
}
@ -1553,10 +1587,73 @@ static void pnv_chip_power9_class_init(ObjectClass *klass, void *data)
static void pnv_chip_power10_instance_init(Object *obj)
{
PnvChip *chip = PNV_CHIP(obj);
Pnv10Chip *chip10 = PNV10_CHIP(obj);
PnvChipClass *pcc = PNV_CHIP_GET_CLASS(obj);
int i;
object_initialize_child(obj, "xive", &chip10->xive, TYPE_PNV_XIVE2);
object_property_add_alias(obj, "xive-fabric", OBJECT(&chip10->xive),
"xive-fabric");
object_initialize_child(obj, "psi", &chip10->psi, TYPE_PNV10_PSI);
object_initialize_child(obj, "lpc", &chip10->lpc, TYPE_PNV10_LPC);
object_initialize_child(obj, "occ", &chip10->occ, TYPE_PNV10_OCC);
object_initialize_child(obj, "homer", &chip10->homer, TYPE_PNV10_HOMER);
if (defaults_enabled()) {
chip->num_pecs = pcc->num_pecs;
}
for (i = 0; i < chip->num_pecs; i++) {
object_initialize_child(obj, "pec[*]", &chip10->pecs[i],
TYPE_PNV_PHB5_PEC);
}
}
static void pnv_chip_power10_quad_realize(Pnv10Chip *chip10, Error **errp)
{
PnvChip *chip = PNV_CHIP(chip10);
int i;
chip10->nr_quads = DIV_ROUND_UP(chip->nr_cores, 4);
chip10->quads = g_new0(PnvQuad, chip10->nr_quads);
for (i = 0; i < chip10->nr_quads; i++) {
PnvQuad *eq = &chip10->quads[i];
pnv_chip_quad_realize_one(chip, eq, chip->cores[i * 4]);
pnv_xscom_add_subregion(chip, PNV10_XSCOM_EQ_BASE(eq->quad_id),
&eq->xscom_regs);
}
}
static void pnv_chip_power10_phb_realize(PnvChip *chip, Error **errp)
{
Pnv10Chip *chip10 = PNV10_CHIP(chip);
int i;
for (i = 0; i < chip->num_pecs; i++) {
PnvPhb4PecState *pec = &chip10->pecs[i];
PnvPhb4PecClass *pecc = PNV_PHB4_PEC_GET_CLASS(pec);
uint32_t pec_nest_base;
uint32_t pec_pci_base;
object_property_set_int(OBJECT(pec), "index", i, &error_fatal);
object_property_set_int(OBJECT(pec), "chip-id", chip->chip_id,
&error_fatal);
object_property_set_link(OBJECT(pec), "chip", OBJECT(chip),
&error_fatal);
if (!qdev_realize(DEVICE(pec), NULL, errp)) {
return;
}
pec_nest_base = pecc->xscom_nest_base(pec);
pec_pci_base = pecc->xscom_pci_base(pec);
pnv_xscom_add_subregion(chip, pec_nest_base, &pec->nest_regs_mr);
pnv_xscom_add_subregion(chip, pec_pci_base, &pec->pci_regs_mr);
}
}
static void pnv_chip_power10_realize(DeviceState *dev, Error **errp)
@ -1580,9 +1677,39 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp)
return;
}
pnv_chip_power10_quad_realize(chip10, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
/* XIVE2 interrupt controller (POWER10) */
object_property_set_int(OBJECT(&chip10->xive), "ic-bar",
PNV10_XIVE2_IC_BASE(chip), &error_fatal);
object_property_set_int(OBJECT(&chip10->xive), "esb-bar",
PNV10_XIVE2_ESB_BASE(chip), &error_fatal);
object_property_set_int(OBJECT(&chip10->xive), "end-bar",
PNV10_XIVE2_END_BASE(chip), &error_fatal);
object_property_set_int(OBJECT(&chip10->xive), "nvpg-bar",
PNV10_XIVE2_NVPG_BASE(chip), &error_fatal);
object_property_set_int(OBJECT(&chip10->xive), "nvc-bar",
PNV10_XIVE2_NVC_BASE(chip), &error_fatal);
object_property_set_int(OBJECT(&chip10->xive), "tm-bar",
PNV10_XIVE2_TM_BASE(chip), &error_fatal);
object_property_set_link(OBJECT(&chip10->xive), "chip", OBJECT(chip),
&error_abort);
if (!sysbus_realize(SYS_BUS_DEVICE(&chip10->xive), errp)) {
return;
}
pnv_xscom_add_subregion(chip, PNV10_XSCOM_XIVE2_BASE,
&chip10->xive.xscom_regs);
/* Processor Service Interface (PSI) Host Bridge */
object_property_set_int(OBJECT(&chip10->psi), "bar",
PNV10_PSIHB_BASE(chip), &error_fatal);
/* PSI can now be configured to use 64k ESB pages on POWER10 */
object_property_set_int(OBJECT(&chip10->psi), "shift", XIVE_ESB_64K,
&error_fatal);
if (!qdev_realize(DEVICE(&chip10->psi), NULL, errp)) {
return;
}
@ -1601,6 +1728,41 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp)
chip->fw_mr = &chip10->lpc.isa_fw;
chip->dt_isa_nodename = g_strdup_printf("/lpcm-opb@%" PRIx64 "/lpc@0",
(uint64_t) PNV10_LPCM_BASE(chip));
/* Create the simplified OCC model */
object_property_set_link(OBJECT(&chip10->occ), "psi", OBJECT(&chip10->psi),
&error_abort);
if (!qdev_realize(DEVICE(&chip10->occ), NULL, errp)) {
return;
}
pnv_xscom_add_subregion(chip, PNV10_XSCOM_OCC_BASE,
&chip10->occ.xscom_regs);
/* OCC SRAM model */
memory_region_add_subregion(get_system_memory(),
PNV10_OCC_SENSOR_BASE(chip),
&chip10->occ.sram_regs);
/* HOMER */
object_property_set_link(OBJECT(&chip10->homer), "chip", OBJECT(chip),
&error_abort);
if (!qdev_realize(DEVICE(&chip10->homer), NULL, errp)) {
return;
}
/* Homer Xscom region */
pnv_xscom_add_subregion(chip, PNV10_XSCOM_PBA_BASE,
&chip10->homer.pba_regs);
/* Homer mmio region */
memory_region_add_subregion(get_system_memory(), PNV10_HOMER_BASE(chip),
&chip10->homer.regs);
/* PHBs */
pnv_chip_power10_phb_realize(chip, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
}
static uint32_t pnv_chip_power10_xscom_pcba(PnvChip *chip, uint64_t addr)
@ -1627,6 +1789,7 @@ static void pnv_chip_power10_class_init(ObjectClass *klass, void *data)
k->xscom_core_base = pnv_chip_power10_xscom_core_base;
k->xscom_pcba = pnv_chip_power10_xscom_pcba;
dc->desc = "PowerNV Chip POWER10";
k->num_pecs = PNV10_CHIP_MAX_PEC;
device_class_set_parent_realize(dc, pnv_chip_power10_realize,
&k->parent_realize);
@ -1924,6 +2087,35 @@ static int pnv_match_nvt(XiveFabric *xfb, uint8_t format,
return total_count;
}
static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool cam_ignore, uint8_t priority,
uint32_t logic_serv,
XiveTCTXMatch *match)
{
PnvMachineState *pnv = PNV_MACHINE(xfb);
int total_count = 0;
int i;
for (i = 0; i < pnv->num_chips; i++) {
Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]);
XivePresenter *xptr = XIVE_PRESENTER(&chip10->xive);
XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr);
int count;
count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore,
priority, logic_serv, match);
if (count < 0) {
return count;
}
total_count += count;
}
return total_count;
}
static void pnv_machine_power8_class_init(ObjectClass *oc, void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@ -1968,6 +2160,7 @@ static void pnv_machine_power10_class_init(ObjectClass *oc, void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
PnvMachineClass *pmc = PNV_MACHINE_CLASS(oc);
XiveFabricClass *xfc = XIVE_FABRIC_CLASS(oc);
static const char compat[] = "qemu,powernv10\0ibm,powernv";
mc->desc = "IBM PowerNV (Non-Virtualized) POWER10";
@ -1976,6 +2169,8 @@ static void pnv_machine_power10_class_init(ObjectClass *oc, void *data)
pmc->compat = compat;
pmc->compat_size = sizeof(compat);
pmc->dt_power_mgt = pnv_dt_power_mgt;
xfc->match_nvt = pnv10_xive_match_nvt;
}
static bool pnv_machine_get_hb(Object *obj, Error **errp)
@ -2087,6 +2282,10 @@ static const TypeInfo types[] = {
.name = MACHINE_TYPE_NAME("powernv10"),
.parent = TYPE_PNV_MACHINE,
.class_init = pnv_machine_power10_class_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_XIVE_FABRIC },
{ },
},
},
{
.name = MACHINE_TYPE_NAME("powernv9"),

View File

@ -332,6 +332,69 @@ static const TypeInfo pnv_homer_power9_type_info = {
.class_init = pnv_homer_power9_class_init,
};
static uint64_t pnv_homer_power10_pba_read(void *opaque, hwaddr addr,
unsigned size)
{
PnvHomer *homer = PNV_HOMER(opaque);
PnvChip *chip = homer->chip;
uint32_t reg = addr >> 3;
uint64_t val = 0;
switch (reg) {
case PBA_BAR0:
val = PNV10_HOMER_BASE(chip);
break;
case PBA_BARMASK0: /* P10 homer region mask */
val = (PNV10_HOMER_SIZE - 1) & 0x300000;
break;
case PBA_BAR2: /* P10 occ common area */
val = PNV10_OCC_COMMON_AREA_BASE;
break;
case PBA_BARMASK2: /* P10 occ common area size */
val = (PNV10_OCC_COMMON_AREA_SIZE - 1) & 0x700000;
break;
default:
qemu_log_mask(LOG_UNIMP, "PBA: read to unimplemented register: Ox%"
HWADDR_PRIx "\n", addr >> 3);
}
return val;
}
static void pnv_homer_power10_pba_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
qemu_log_mask(LOG_UNIMP, "PBA: write to unimplemented register: Ox%"
HWADDR_PRIx "\n", addr >> 3);
}
static const MemoryRegionOps pnv_homer_power10_pba_ops = {
.read = pnv_homer_power10_pba_read,
.write = pnv_homer_power10_pba_write,
.valid.min_access_size = 8,
.valid.max_access_size = 8,
.impl.min_access_size = 8,
.impl.max_access_size = 8,
.endianness = DEVICE_BIG_ENDIAN,
};
static void pnv_homer_power10_class_init(ObjectClass *klass, void *data)
{
PnvHomerClass *homer = PNV_HOMER_CLASS(klass);
homer->pba_size = PNV10_XSCOM_PBA_SIZE;
homer->pba_ops = &pnv_homer_power10_pba_ops;
homer->homer_size = PNV10_HOMER_SIZE;
homer->homer_ops = &pnv_power9_homer_ops; /* TODO */
homer->core_max_base = PNV9_CORE_MAX_BASE;
}
static const TypeInfo pnv_homer_power10_type_info = {
.name = TYPE_PNV10_HOMER,
.parent = TYPE_PNV_HOMER,
.instance_size = sizeof(PnvHomer),
.class_init = pnv_homer_power10_class_init,
};
static void pnv_homer_realize(DeviceState *dev, Error **errp)
{
PnvHomer *homer = PNV_HOMER(dev);
@ -377,6 +440,7 @@ static void pnv_homer_register_types(void)
type_register_static(&pnv_homer_type_info);
type_register_static(&pnv_homer_power8_type_info);
type_register_static(&pnv_homer_power9_type_info);
type_register_static(&pnv_homer_power10_type_info);
}
type_init(pnv_homer_register_types);

View File

@ -236,7 +236,9 @@ static const MemoryRegionOps pnv_occ_power9_xscom_ops = {
static void pnv_occ_power9_class_init(ObjectClass *klass, void *data)
{
PnvOCCClass *poc = PNV_OCC_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
dc->desc = "PowerNV OCC Controller (POWER9)";
poc->xscom_size = PNV9_XSCOM_OCC_SIZE;
poc->xscom_ops = &pnv_occ_power9_xscom_ops;
poc->psi_irq = PSIHB9_IRQ_OCC;
@ -249,6 +251,19 @@ static const TypeInfo pnv_occ_power9_type_info = {
.class_init = pnv_occ_power9_class_init,
};
static void pnv_occ_power10_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->desc = "PowerNV OCC Controller (POWER10)";
}
static const TypeInfo pnv_occ_power10_type_info = {
.name = TYPE_PNV10_OCC,
.parent = TYPE_PNV9_OCC,
.class_init = pnv_occ_power10_class_init,
};
static void pnv_occ_realize(DeviceState *dev, Error **errp)
{
PnvOCC *occ = PNV_OCC(dev);
@ -297,6 +312,7 @@ static void pnv_occ_register_types(void)
type_register_static(&pnv_occ_type_info);
type_register_static(&pnv_occ_power8_type_info);
type_register_static(&pnv_occ_power9_type_info);
type_register_static(&pnv_occ_power10_type_info);
}
type_init(pnv_occ_register_types);

View File

@ -601,7 +601,6 @@ static const TypeInfo pnv_psi_power8_info = {
#define PSIHB9_IRQ_METHOD PPC_BIT(0)
#define PSIHB9_IRQ_RESET PPC_BIT(1)
#define PSIHB9_ESB_CI_BASE 0x60
#define PSIHB9_ESB_CI_64K PPC_BIT(1)
#define PSIHB9_ESB_CI_ADDR_MASK PPC_BITMASK(8, 47)
#define PSIHB9_ESB_CI_VALID PPC_BIT(63)
#define PSIHB9_ESB_NOTIF_ADDR 0x68
@ -646,7 +645,15 @@ static const TypeInfo pnv_psi_power8_info = {
#define PSIHB9_IRQ_STAT_DIO PPC_BIT(12)
#define PSIHB9_IRQ_STAT_PSU PPC_BIT(13)
static void pnv_psi_notify(XiveNotifier *xf, uint32_t srcno)
/* P10 register extensions */
#define PSIHB10_CR PSIHB9_CR
#define PSIHB10_CR_STORE_EOI PPC_BIT(12)
#define PSIHB10_ESB_CI_BASE PSIHB9_ESB_CI_BASE
#define PSIHB10_ESB_CI_64K PPC_BIT(1)
static void pnv_psi_notify(XiveNotifier *xf, uint32_t srcno, bool pq_checked)
{
PnvPsi *psi = PNV_PSI(xf);
uint64_t notif_port = psi->regs[PSIHB_REG(PSIHB9_ESB_NOTIF_ADDR)];
@ -655,9 +662,13 @@ static void pnv_psi_notify(XiveNotifier *xf, uint32_t srcno)
uint32_t offset =
(psi->regs[PSIHB_REG(PSIHB9_IVT_OFFSET)] >> PSIHB9_IVT_OFF_SHIFT);
uint64_t data = XIVE_TRIGGER_PQ | offset | srcno;
uint64_t data = offset | srcno;
MemTxResult result;
if (pq_checked) {
data |= XIVE_TRIGGER_PQ;
}
if (!valid) {
return;
}
@ -704,6 +715,13 @@ static void pnv_psi_p9_mmio_write(void *opaque, hwaddr addr,
switch (addr) {
case PSIHB9_CR:
if (val & PSIHB10_CR_STORE_EOI) {
psi9->source.esb_flags |= XIVE_SRC_STORE_EOI;
} else {
psi9->source.esb_flags &= ~XIVE_SRC_STORE_EOI;
}
break;
case PSIHB9_SEMR:
/* FSP stuff */
break;
@ -715,15 +733,20 @@ static void pnv_psi_p9_mmio_write(void *opaque, hwaddr addr,
break;
case PSIHB9_ESB_CI_BASE:
if (val & PSIHB10_ESB_CI_64K) {
psi9->source.esb_shift = XIVE_ESB_64K;
} else {
psi9->source.esb_shift = XIVE_ESB_4K;
}
if (!(val & PSIHB9_ESB_CI_VALID)) {
if (psi->regs[reg] & PSIHB9_ESB_CI_VALID) {
memory_region_del_subregion(sysmem, &psi9->source.esb_mmio);
}
} else {
if (!(psi->regs[reg] & PSIHB9_ESB_CI_VALID)) {
memory_region_add_subregion(sysmem,
val & ~PSIHB9_ESB_CI_VALID,
&psi9->source.esb_mmio);
hwaddr addr = val & ~(PSIHB9_ESB_CI_VALID | PSIHB10_ESB_CI_64K);
memory_region_add_subregion(sysmem, addr,
&psi9->source.esb_mmio);
}
}
psi->regs[reg] = val;
@ -831,6 +854,7 @@ static void pnv_psi_power9_instance_init(Object *obj)
Pnv9Psi *psi = PNV9_PSI(obj);
object_initialize_child(obj, "source", &psi->source, TYPE_XIVE_SOURCE);
object_property_add_alias(obj, "shift", OBJECT(&psi->source), "shift");
}
static void pnv_psi_power9_realize(DeviceState *dev, Error **errp)
@ -839,8 +863,6 @@ static void pnv_psi_power9_realize(DeviceState *dev, Error **errp)
XiveSource *xsrc = &PNV9_PSI(psi)->source;
int i;
/* This is the only device with 4k ESB pages */
object_property_set_int(OBJECT(xsrc), "shift", XIVE_ESB_4K, &error_fatal);
object_property_set_int(OBJECT(xsrc), "nr-irqs", PSIHB9_NUM_IRQS,
&error_fatal);
object_property_set_link(OBJECT(xsrc), "xive", OBJECT(psi), &error_abort);

View File

@ -1018,9 +1018,9 @@ static void spapr_dt_chosen(SpaprMachineState *spapr, void *fdt, bool reset)
if (reset) {
const char *boot_device = spapr->boot_device;
char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus);
g_autofree char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus);
size_t cb = 0;
char *bootlist = get_boot_devices_list(&cb);
g_autofree char *bootlist = get_boot_devices_list(&cb);
if (machine->kernel_cmdline && machine->kernel_cmdline[0]) {
_FDT(fdt_setprop_string(fdt, chosen, "bootargs",
@ -1087,9 +1087,6 @@ static void spapr_dt_chosen(SpaprMachineState *spapr, void *fdt, bool reset)
}
spapr_dt_ov5_platform_support(spapr, fdt, chosen);
g_free(stdout_path);
g_free(bootlist);
}
_FDT(spapr_dt_ovec(fdt, chosen, spapr->ov5_cas, "ibm,architecture-vec-5"));
@ -2710,15 +2707,25 @@ static void spapr_machine_init(MachineState *machine)
MachineClass *mc = MACHINE_GET_CLASS(machine);
const char *bios_default = spapr->vof ? FW_FILE_NAME_VOF : FW_FILE_NAME;
const char *bios_name = machine->firmware ?: bios_default;
g_autofree char *filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
const char *kernel_filename = machine->kernel_filename;
const char *initrd_filename = machine->initrd_filename;
PCIHostState *phb;
int i;
MemoryRegion *sysmem = get_system_memory();
long load_limit, fw_size;
char *filename;
Error *resize_hpt_err = NULL;
if (!filename) {
error_report("Could not find LPAR firmware '%s'", bios_name);
exit(1);
}
fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE);
if (fw_size <= 0) {
error_report("Could not load LPAR firmware '%s'", filename);
exit(1);
}
/*
* if Secure VM (PEF) support is configured, then initialize it
*/
@ -2999,18 +3006,6 @@ static void spapr_machine_init(MachineState *machine)
}
}
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
if (!filename) {
error_report("Could not find LPAR firmware '%s'", bios_name);
exit(1);
}
fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE);
if (fw_size <= 0) {
error_report("Could not load LPAR firmware '%s'", filename);
exit(1);
}
g_free(filename);
/* FIXME: Should register things through the MachineState's qdev
* interface, this is a legacy from the sPAPREnvironment structure
* which predated MachineState but had a similar function */

View File

@ -95,12 +95,12 @@ static void spapr_cap_set_bool(Object *obj, Visitor *v, const char *name,
}
static void spapr_cap_get_string(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
static void spapr_cap_get_string(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
SpaprCapabilityInfo *cap = opaque;
SpaprMachineState *spapr = SPAPR_MACHINE(obj);
char *val = NULL;
g_autofree char *val = NULL;
uint8_t value = spapr_get_cap(spapr, cap->index);
if (value >= cap->possible->num) {
@ -111,7 +111,6 @@ static void spapr_cap_get_string(Object *obj, Visitor *v, const char *name,
val = g_strdup(cap->possible->vals[value]);
visit_type_str(v, name, &val, errp);
g_free(val);
}
static void spapr_cap_set_string(Object *obj, Visitor *v, const char *name,
@ -120,7 +119,7 @@ static void spapr_cap_set_string(Object *obj, Visitor *v, const char *name,
SpaprCapabilityInfo *cap = opaque;
SpaprMachineState *spapr = SPAPR_MACHINE(obj);
uint8_t i;
char *val;
g_autofree char *val = NULL;
if (!visit_type_str(v, name, &val, errp)) {
return;
@ -128,20 +127,18 @@ static void spapr_cap_set_string(Object *obj, Visitor *v, const char *name,
if (!strcmp(val, "?")) {
error_setg(errp, "%s", cap->possible->help);
goto out;
return;
}
for (i = 0; i < cap->possible->num; i++) {
if (!strcasecmp(val, cap->possible->vals[i])) {
spapr->cmd_line_caps[cap->index] = true;
spapr->eff.caps[cap->index] = i;
goto out;
return;
}
}
error_setg(errp, "Invalid capability mode \"%s\" for cap-%s", val,
cap->name);
out:
g_free(val);
}
static void spapr_cap_get_pagesize(Object *obj, Visitor *v, const char *name,
@ -933,16 +930,13 @@ void spapr_caps_add_properties(SpaprMachineClass *smc)
for (i = 0; i < ARRAY_SIZE(capability_table); i++) {
SpaprCapabilityInfo *cap = &capability_table[i];
char *name = g_strdup_printf("cap-%s", cap->name);
char *desc;
g_autofree char *name = g_strdup_printf("cap-%s", cap->name);
g_autofree char *desc = g_strdup_printf("%s", cap->description);
object_class_property_add(klass, name, cap->type,
cap->get, cap->set,
NULL, cap);
desc = g_strdup_printf("%s", cap->description);
object_class_property_set_description(klass, name, desc);
g_free(name);
g_free(desc);
}
}

View File

@ -519,8 +519,8 @@ static const VMStateDescription vmstate_spapr_drc = {
static void drc_realize(DeviceState *d, Error **errp)
{
SpaprDrc *drc = SPAPR_DR_CONNECTOR(d);
g_autofree gchar *link_name = g_strdup_printf("%x", spapr_drc_index(drc));
Object *root_container;
gchar *link_name;
const char *child_name;
trace_spapr_drc_realize(spapr_drc_index(drc));
@ -532,12 +532,10 @@ static void drc_realize(DeviceState *d, Error **errp)
* existing in the composition tree
*/
root_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
link_name = g_strdup_printf("%x", spapr_drc_index(drc));
child_name = object_get_canonical_path_component(OBJECT(drc));
trace_spapr_drc_realize_child(spapr_drc_index(drc), child_name);
object_property_add_alias(root_container, link_name,
drc->owner, child_name);
g_free(link_name);
vmstate_register(VMSTATE_IF(drc), spapr_drc_index(drc), &vmstate_spapr_drc,
drc);
trace_spapr_drc_realize_complete(spapr_drc_index(drc));
@ -546,22 +544,20 @@ static void drc_realize(DeviceState *d, Error **errp)
static void drc_unrealize(DeviceState *d)
{
SpaprDrc *drc = SPAPR_DR_CONNECTOR(d);
g_autofree gchar *name = g_strdup_printf("%x", spapr_drc_index(drc));
Object *root_container;
gchar *name;
trace_spapr_drc_unrealize(spapr_drc_index(drc));
vmstate_unregister(VMSTATE_IF(drc), &vmstate_spapr_drc, drc);
root_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
name = g_strdup_printf("%x", spapr_drc_index(drc));
object_property_del(root_container, name);
g_free(name);
}
SpaprDrc *spapr_dr_connector_new(Object *owner, const char *type,
uint32_t id)
{
SpaprDrc *drc = SPAPR_DR_CONNECTOR(object_new(type));
char *prop_name;
g_autofree char *prop_name = NULL;
drc->id = id;
drc->owner = owner;
@ -570,7 +566,6 @@ SpaprDrc *spapr_dr_connector_new(Object *owner, const char *type,
object_property_add_child(owner, prop_name, OBJECT(drc));
object_unref(OBJECT(drc));
qdev_realize(DEVICE(drc), NULL, NULL);
g_free(prop_name);
return drc;
}
@ -803,11 +798,9 @@ static const TypeInfo spapr_drc_pmem_info = {
SpaprDrc *spapr_drc_by_index(uint32_t index)
{
Object *obj;
gchar *name;
name = g_strdup_printf("%s/%x", DRC_CONTAINER_PATH, index);
g_autofree gchar *name = g_strdup_printf("%s/%x", DRC_CONTAINER_PATH,
index);
obj = object_resolve_path(name, NULL);
g_free(name);
return !obj ? NULL : SPAPR_DR_CONNECTOR(obj);
}
@ -841,8 +834,14 @@ int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask)
ObjectProperty *prop;
ObjectPropertyIterator iter;
uint32_t drc_count = 0;
GArray *drc_indexes, *drc_power_domains;
GString *drc_names, *drc_types;
g_autoptr(GArray) drc_indexes = g_array_new(false, true,
sizeof(uint32_t));
g_autoptr(GArray) drc_power_domains = g_array_new(false, true,
sizeof(uint32_t));
g_autoptr(GString) drc_names = g_string_set_size(g_string_new(NULL),
sizeof(uint32_t));
g_autoptr(GString) drc_types = g_string_set_size(g_string_new(NULL),
sizeof(uint32_t));
int ret;
/*
@ -857,12 +856,8 @@ int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask)
* reserve the space now and set the offsets accordingly so we
* can fill them in later.
*/
drc_indexes = g_array_new(false, true, sizeof(uint32_t));
drc_indexes = g_array_set_size(drc_indexes, 1);
drc_power_domains = g_array_new(false, true, sizeof(uint32_t));
drc_power_domains = g_array_set_size(drc_power_domains, 1);
drc_names = g_string_set_size(g_string_new(NULL), sizeof(uint32_t));
drc_types = g_string_set_size(g_string_new(NULL), sizeof(uint32_t));
/* aliases for all DRConnector objects will be rooted in QOM
* composition tree at DRC_CONTAINER_PATH
@ -874,7 +869,7 @@ int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask)
Object *obj;
SpaprDrc *drc;
SpaprDrcClass *drck;
char *drc_name = NULL;
g_autofree char *drc_name = NULL;
uint32_t drc_index, drc_power_domain;
if (!strstart(prop->type, "link<", NULL)) {
@ -908,7 +903,6 @@ int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask)
drc_name = spapr_drc_name(drc);
drc_names = g_string_append(drc_names, drc_name);
drc_names = g_string_insert_len(drc_names, -1, "\0", 1);
g_free(drc_name);
/* ibm,drc-types */
drc_types = g_string_append(drc_types, drck->typename);
@ -928,7 +922,7 @@ int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask)
drc_indexes->len * sizeof(uint32_t));
if (ret) {
error_report("Couldn't create ibm,drc-indexes property");
goto out;
return ret;
}
ret = fdt_setprop(fdt, offset, "ibm,drc-power-domains",
@ -936,29 +930,22 @@ int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask)
drc_power_domains->len * sizeof(uint32_t));
if (ret) {
error_report("Couldn't finalize ibm,drc-power-domains property");
goto out;
return ret;
}
ret = fdt_setprop(fdt, offset, "ibm,drc-names",
drc_names->str, drc_names->len);
if (ret) {
error_report("Couldn't finalize ibm,drc-names property");
goto out;
return ret;
}
ret = fdt_setprop(fdt, offset, "ibm,drc-types",
drc_types->str, drc_types->len);
if (ret) {
error_report("Couldn't finalize ibm,drc-types property");
goto out;
}
out:
g_array_free(drc_indexes, true);
g_array_free(drc_power_domains, true);
g_string_free(drc_names, true);
g_string_free(drc_types, true);
return ret;
}

View File

@ -431,12 +431,14 @@ int spapr_numa_write_assoc_lookup_arrays(SpaprMachineState *spapr, void *fdt,
int max_distance_ref_points = get_max_dist_ref_points(spapr);
int nb_numa_nodes = machine->numa_state->num_nodes;
int nr_nodes = nb_numa_nodes ? nb_numa_nodes : 1;
uint32_t *int_buf, *cur_index, buf_len;
int ret, i;
g_autofree uint32_t *int_buf = NULL;
uint32_t *cur_index;
int i;
/* ibm,associativity-lookup-arrays */
buf_len = (nr_nodes * max_distance_ref_points + 2) * sizeof(uint32_t);
cur_index = int_buf = g_malloc0(buf_len);
int_buf = g_malloc0((nr_nodes * max_distance_ref_points + 2) *
sizeof(uint32_t));
cur_index = int_buf;
int_buf[0] = cpu_to_be32(nr_nodes);
/* Number of entries per associativity list */
int_buf[1] = cpu_to_be32(max_distance_ref_points);
@ -451,11 +453,9 @@ int spapr_numa_write_assoc_lookup_arrays(SpaprMachineState *spapr, void *fdt,
sizeof(uint32_t) * max_distance_ref_points);
cur_index += max_distance_ref_points;
}
ret = fdt_setprop(fdt, offset, "ibm,associativity-lookup-arrays", int_buf,
(cur_index - int_buf) * sizeof(uint32_t));
g_free(int_buf);
return ret;
return fdt_setprop(fdt, offset, "ibm,associativity-lookup-arrays",
int_buf, (cur_index - int_buf) * sizeof(uint32_t));
}
static void spapr_numa_FORM1_write_rtas_dt(SpaprMachineState *spapr,

View File

@ -320,7 +320,7 @@ void spapr_phb_nvgpu_populate_dt(SpaprPhbState *sphb, void *fdt, int bus_off,
void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt)
{
int i, j, linkidx, npuoff;
char *npuname;
g_autofree char *npuname = NULL;
if (!sphb->nvgpus) {
return;
@ -333,11 +333,10 @@ void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt)
_FDT(fdt_setprop_cell(fdt, npuoff, "#size-cells", 0));
/* Advertise NPU as POWER9 so the guest can enable NPU2 contexts */
_FDT((fdt_setprop_string(fdt, npuoff, "compatible", "ibm,power9-npu")));
g_free(npuname);
for (i = 0, linkidx = 0; i < sphb->nvgpus->num; ++i) {
for (j = 0; j < sphb->nvgpus->slots[i].linknum; ++j) {
char *linkname = g_strdup_printf("link@%d", linkidx);
g_autofree char *linkname = g_strdup_printf("link@%d", linkidx);
int off = fdt_add_subnode(fdt, npuoff, linkname);
_FDT(off);
@ -347,7 +346,6 @@ void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt)
_FDT((fdt_setprop_cell(fdt, off, "phandle",
PHANDLE_NVLINK(sphb, i, j))));
_FDT((fdt_setprop_cell(fdt, off, "ibm,npu-link-index", linkidx)));
g_free(linkname);
++linkidx;
}
}
@ -360,7 +358,8 @@ void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt)
&error_abort);
uint64_t size = object_property_get_uint(nv_mrobj, "size", NULL);
uint64_t mem_reg[2] = { cpu_to_be64(nvslot->gpa), cpu_to_be64(size) };
char *mem_name = g_strdup_printf("memory@%"PRIx64, nvslot->gpa);
g_autofree char *mem_name = g_strdup_printf("memory@%"PRIx64,
nvslot->gpa);
int off = fdt_add_subnode(fdt, 0, mem_name);
_FDT(off);
@ -378,7 +377,6 @@ void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt)
sizeof(mem_reg))));
_FDT((fdt_setprop_cell(fdt, off, "phandle",
PHANDLE_GPURAM(sphb, i))));
g_free(mem_name);
}
}

View File

@ -279,30 +279,29 @@ static void rtas_ibm_get_system_parameter(PowerPCCPU *cpu,
switch (parameter) {
case RTAS_SYSPARM_SPLPAR_CHARACTERISTICS: {
char *param_val = g_strdup_printf("MaxEntCap=%d,"
"DesMem=%" PRIu64 ","
"DesProcs=%d,"
"MaxPlatProcs=%d",
ms->smp.max_cpus,
ms->ram_size / MiB,
ms->smp.cpus,
ms->smp.max_cpus);
g_autofree char *param_val = g_strdup_printf("MaxEntCap=%d,"
"DesMem=%" PRIu64 ","
"DesProcs=%d,"
"MaxPlatProcs=%d",
ms->smp.max_cpus,
ms->ram_size / MiB,
ms->smp.cpus,
ms->smp.max_cpus);
if (pcc->n_host_threads > 0) {
char *hostthr_val, *old = param_val;
/*
* Add HostThrs property. This property is not present in PAPR but
* is expected by some guests to communicate the number of physical
* host threads per core on the system so that they can scale
* information which varies based on the thread configuration.
*/
hostthr_val = g_strdup_printf(",HostThrs=%d", pcc->n_host_threads);
g_autofree char *hostthr_val = g_strdup_printf(",HostThrs=%d",
pcc->n_host_threads);
char *old = param_val;
param_val = g_strconcat(param_val, hostthr_val, NULL);
g_free(hostthr_val);
g_free(old);
}
ret = sysparm_st(buffer, length, param_val, strlen(param_val) + 1);
g_free(param_val);
break;
}
case RTAS_SYSPARM_DIAGNOSTICS_RUN_MODE: {

View File

@ -726,7 +726,7 @@ void spapr_dt_vdevice(SpaprVioBus *bus, void *fdt)
gchar *spapr_vio_stdout_path(SpaprVioBus *bus)
{
SpaprVioDevice *dev;
char *name, *path;
g_autofree char *name = NULL;
dev = spapr_vty_get_default(bus);
if (!dev) {
@ -734,8 +734,6 @@ gchar *spapr_vio_stdout_path(SpaprVioBus *bus)
}
name = spapr_vio_get_dev_name(DEVICE(dev));
path = g_strdup_printf("/vdevice/%s", name);
g_free(name);
return path;
return g_strdup_printf("/vdevice/%s", name);
}

View File

@ -49,6 +49,7 @@ typedef struct PnvPhb4DMASpace {
*/
#define TYPE_PNV_PHB4_ROOT_BUS "pnv-phb4-root"
#define TYPE_PNV_PHB4_ROOT_PORT "pnv-phb4-root-port"
#define TYPE_PNV_PHB5_ROOT_PORT "pnv-phb5-root-port"
typedef struct PnvPHB4RootPort {
PCIESlot parent_obj;
@ -206,4 +207,15 @@ struct PnvPhb4PecClass {
const char *rp_model;
};
/*
* POWER10 definitions
*/
#define PNV_PHB5_VERSION 0x000000a500000001ull
#define PNV_PHB5_DEVICE_ID 0x0652
#define TYPE_PNV_PHB5_PEC "pnv-phb5-pec"
#define PNV_PHB5_PEC(obj) \
OBJECT_CHECK(PnvPhb4PecState, (obj), TYPE_PNV_PHB5_PEC)
#endif /* PCI_HOST_PNV_PHB4_H */

View File

@ -220,11 +220,14 @@
#define PHB_PAPR_ERR_INJ_MASK_MMIO PPC_BITMASK(16, 63)
#define PHB_ETU_ERR_SUMMARY 0x2c8
#define PHB_INT_NOTIFY_ADDR 0x300
#define PHB_INT_NOTIFY_ADDR_64K PPC_BIT(1) /* P10 */
#define PHB_INT_NOTIFY_INDEX 0x308
/* Fundamental register set B */
#define PHB_VERSION 0x800
#define PHB_CTRLR 0x810
#define PHB_CTRLR_IRQ_PQ_DISABLE PPC_BIT(9) /* P10 */
#define PHB_CTRLR_IRQ_ABT_MODE PPC_BIT(10) /* P10 */
#define PHB_CTRLR_IRQ_PGSZ_64K PPC_BIT(11)
#define PHB_CTRLR_IRQ_STORE_EOI PPC_BIT(12)
#define PHB_CTRLR_MMIO_RD_STRICT PPC_BIT(13)

View File

@ -125,10 +125,22 @@ struct Pnv10Chip {
PnvChip parent_obj;
/*< public >*/
PnvXive2 xive;
Pnv9Psi psi;
PnvLpcController lpc;
PnvOCC occ;
PnvHomer homer;
uint32_t nr_quads;
PnvQuad *quads;
#define PNV10_CHIP_MAX_PEC 2
PnvPhb4PecState pecs[PNV10_CHIP_MAX_PEC];
};
#define PNV10_PIR2FUSEDCORE(pir) (((pir) >> 3) & 0xf)
#define PNV10_PIR2CHIP(pir) (((pir) >> 8) & 0x7f)
struct PnvChipClass {
/*< private >*/
SysBusDeviceClass parent_class;
@ -329,10 +341,37 @@ void pnv_bmc_set_pnor(IPMIBmc *bmc, PnvPnor *pnor);
#define PNV10_LPCM_SIZE 0x0000000100000000ull
#define PNV10_LPCM_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006030000000000ull)
#define PNV10_XIVE2_IC_SIZE 0x0000000002000000ull
#define PNV10_XIVE2_IC_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006030200000000ull)
#define PNV10_PSIHB_ESB_SIZE 0x0000000000100000ull
#define PNV10_PSIHB_ESB_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006030202000000ull)
#define PNV10_PSIHB_SIZE 0x0000000000100000ull
#define PNV10_PSIHB_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006030203000000ull)
#define PNV10_XIVE2_TM_SIZE 0x0000000000040000ull
#define PNV10_XIVE2_TM_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006030203180000ull)
#define PNV10_XIVE2_NVC_SIZE 0x0000000008000000ull
#define PNV10_XIVE2_NVC_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006030208000000ull)
#define PNV10_XIVE2_NVPG_SIZE 0x0000010000000000ull
#define PNV10_XIVE2_NVPG_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006040000000000ull)
#define PNV10_XIVE2_ESB_SIZE 0x0000010000000000ull
#define PNV10_XIVE2_ESB_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006050000000000ull)
#define PNV10_XIVE2_END_SIZE 0x0000020000000000ull
#define PNV10_XIVE2_END_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006060000000000ull)
#define PNV10_OCC_COMMON_AREA_SIZE 0x0000000000800000ull
#define PNV10_OCC_COMMON_AREA_BASE 0x300fff800000ull
#define PNV10_OCC_SENSOR_BASE(chip) (PNV10_OCC_COMMON_AREA_BASE + \
PNV_OCC_SENSOR_DATA_BLOCK_BASE((chip)->chip_id))
#define PNV10_HOMER_SIZE 0x0000000000400000ull
#define PNV10_HOMER_BASE(chip) \
(0x300ffd800000ll + ((uint64_t)(chip)->chip_id) * PNV10_HOMER_SIZE)
#endif /* PPC_PNV_H */

View File

@ -32,6 +32,9 @@ DECLARE_INSTANCE_CHECKER(PnvHomer, PNV8_HOMER,
#define TYPE_PNV9_HOMER TYPE_PNV_HOMER "-POWER9"
DECLARE_INSTANCE_CHECKER(PnvHomer, PNV9_HOMER,
TYPE_PNV9_HOMER)
#define TYPE_PNV10_HOMER TYPE_PNV_HOMER "-POWER10"
DECLARE_INSTANCE_CHECKER(PnvHomer, PNV10_HOMER,
TYPE_PNV10_HOMER)
struct PnvHomer {
DeviceState parent;

View File

@ -32,6 +32,8 @@ DECLARE_INSTANCE_CHECKER(PnvOCC, PNV8_OCC,
#define TYPE_PNV9_OCC TYPE_PNV_OCC "-POWER9"
DECLARE_INSTANCE_CHECKER(PnvOCC, PNV9_OCC,
TYPE_PNV9_OCC)
#define TYPE_PNV10_OCC TYPE_PNV_OCC "-POWER10"
DECLARE_INSTANCE_CHECKER(PnvOCC, PNV10_OCC, TYPE_PNV10_OCC)
#define PNV_OCC_SENSOR_DATA_BLOCK_OFFSET 0x00580000
#define PNV_OCC_SENSOR_DATA_BLOCK_SIZE 0x00025800

View File

@ -12,6 +12,7 @@
#include "hw/ppc/xive.h"
#include "qom/object.h"
#include "hw/ppc/xive2.h"
struct PnvChip;
@ -95,4 +96,74 @@ struct PnvXiveClass {
void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon);
/*
* XIVE2 interrupt controller (POWER10)
*/
#define TYPE_PNV_XIVE2 "pnv-xive2"
OBJECT_DECLARE_TYPE(PnvXive2, PnvXive2Class, PNV_XIVE2);
typedef struct PnvXive2 {
Xive2Router parent_obj;
/* Owning chip */
struct PnvChip *chip;
/* XSCOM addresses giving access to the controller registers */
MemoryRegion xscom_regs;
MemoryRegion ic_mmio;
MemoryRegion ic_mmios[8];
MemoryRegion esb_mmio;
MemoryRegion end_mmio;
MemoryRegion nvc_mmio;
MemoryRegion nvpg_mmio;
MemoryRegion tm_mmio;
/* Shortcut values for the Main MMIO regions */
hwaddr ic_base;
uint32_t ic_shift;
hwaddr esb_base;
uint32_t esb_shift;
hwaddr end_base;
uint32_t end_shift;
hwaddr nvc_base;
uint32_t nvc_shift;
hwaddr nvpg_base;
uint32_t nvpg_shift;
hwaddr tm_base;
uint32_t tm_shift;
/* Interrupt controller registers */
uint64_t cq_regs[0x40];
uint64_t vc_regs[0x100];
uint64_t pc_regs[0x100];
uint64_t tctxt_regs[0x30];
/* To change default behavior */
uint64_t capabilities;
uint64_t config;
/* Our XIVE source objects for IPIs and ENDs */
XiveSource ipi_source;
Xive2EndSource end_source;
/*
* Virtual Structure Descriptor tables
* These are in a SRAM protected by ECC.
*/
uint64_t vsds[9][XIVE_BLOCK_MAX];
/* Translation tables */
uint64_t tables[8][XIVE_BLOCK_MAX];
} PnvXive2;
typedef struct PnvXive2Class {
Xive2RouterClass parent_class;
DeviceRealize parent_realize;
} PnvXive2Class;
void pnv_xive2_pic_print_info(PnvXive2 *xive, Monitor *mon);
#endif /* PPC_PNV_XIVE_H */

View File

@ -131,6 +131,21 @@ struct PnvXScomInterfaceClass {
#define PNV10_XSCOM_PSIHB_BASE 0x3011D00
#define PNV10_XSCOM_PSIHB_SIZE 0x100
#define PNV10_XSCOM_OCC_BASE PNV9_XSCOM_OCC_BASE
#define PNV10_XSCOM_OCC_SIZE PNV9_XSCOM_OCC_SIZE
#define PNV10_XSCOM_PBA_BASE 0x01010CDA
#define PNV10_XSCOM_PBA_SIZE 0x40
#define PNV10_XSCOM_XIVE2_BASE 0x2010800
#define PNV10_XSCOM_XIVE2_SIZE 0x400
#define PNV10_XSCOM_PEC_NEST_BASE 0x3011800 /* index goes downwards ... */
#define PNV10_XSCOM_PEC_NEST_SIZE 0x100
#define PNV10_XSCOM_PEC_PCI_BASE 0x8010800 /* index goes upwards ... */
#define PNV10_XSCOM_PEC_PCI_SIZE 0x200
void pnv_xscom_realize(PnvChip *chip, uint64_t size, Error **errp);
int pnv_dt_xscom(PnvChip *chip, void *fdt, int root_offset,
uint64_t xscom_base, uint64_t xscom_size,

View File

@ -160,7 +160,7 @@ DECLARE_CLASS_CHECKERS(XiveNotifierClass, XIVE_NOTIFIER,
struct XiveNotifierClass {
InterfaceClass parent;
void (*notify)(XiveNotifier *xn, uint32_t lisn);
void (*notify)(XiveNotifier *xn, uint32_t lisn, bool pq_checked);
};
/*
@ -176,6 +176,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(XiveSource, XIVE_SOURCE)
*/
#define XIVE_SRC_H_INT_ESB 0x1 /* ESB managed with hcall H_INT_ESB */
#define XIVE_SRC_STORE_EOI 0x2 /* Store EOI supported */
#define XIVE_SRC_PQ_DISABLE 0x4 /* Disable check on the PQ state bits */
struct XiveSource {
DeviceState parent;
@ -278,6 +279,7 @@ uint8_t xive_esb_set(uint8_t *pq, uint8_t value);
#define XIVE_ESB_STORE_EOI 0x400 /* Store */
#define XIVE_ESB_LOAD_EOI 0x000 /* Load */
#define XIVE_ESB_GET 0x800 /* Load */
#define XIVE_ESB_INJECT 0x800 /* Store */
#define XIVE_ESB_SET_PQ_00 0xc00 /* Load */
#define XIVE_ESB_SET_PQ_01 0xd00 /* Load */
#define XIVE_ESB_SET_PQ_10 0xe00 /* Load */
@ -385,6 +387,10 @@ struct XiveRouterClass {
/* XIVE table accessors */
int (*get_eas)(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
XiveEAS *eas);
int (*get_pq)(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
uint8_t *pq);
int (*set_pq)(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
uint8_t *pq);
int (*get_end)(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
XiveEND *end);
int (*write_end)(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
@ -406,7 +412,7 @@ int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
XiveNVT *nvt);
int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
XiveNVT *nvt, uint8_t word_number);
void xive_router_notify(XiveNotifier *xn, uint32_t lisn);
void xive_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked);
/*
* XIVE Presenter

109
include/hw/ppc/xive2.h Normal file
View File

@ -0,0 +1,109 @@
/*
* QEMU PowerPC XIVE2 interrupt controller model (POWER10)
*
* Copyright (c) 2019-2022, IBM Corporation.
*
* This code is licensed under the GPL version 2 or later. See the
* COPYING file in the top-level directory.
*
*/
#ifndef PPC_XIVE2_H
#define PPC_XIVE2_H
#include "hw/ppc/xive2_regs.h"
/*
* XIVE2 Router (POWER10)
*/
typedef struct Xive2Router {
SysBusDevice parent;
XiveFabric *xfb;
} Xive2Router;
#define TYPE_XIVE2_ROUTER "xive2-router"
OBJECT_DECLARE_TYPE(Xive2Router, Xive2RouterClass, XIVE2_ROUTER);
/*
* Configuration flags
*/
#define XIVE2_GEN1_TIMA_OS 0x00000001
#define XIVE2_VP_SAVE_RESTORE 0x00000002
#define XIVE2_THREADID_8BITS 0x00000004
typedef struct Xive2RouterClass {
SysBusDeviceClass parent;
/* XIVE table accessors */
int (*get_eas)(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
Xive2Eas *eas);
int (*get_pq)(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
uint8_t *pq);
int (*set_pq)(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
uint8_t *pq);
int (*get_end)(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
Xive2End *end);
int (*write_end)(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
Xive2End *end, uint8_t word_number);
int (*get_nvp)(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
Xive2Nvp *nvp);
int (*write_nvp)(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
Xive2Nvp *nvp, uint8_t word_number);
uint8_t (*get_block_id)(Xive2Router *xrtr);
uint32_t (*get_config)(Xive2Router *xrtr);
} Xive2RouterClass;
int xive2_router_get_eas(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
Xive2Eas *eas);
int xive2_router_get_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
Xive2End *end);
int xive2_router_write_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
Xive2End *end, uint8_t word_number);
int xive2_router_get_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
Xive2Nvp *nvp);
int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
Xive2Nvp *nvp, uint8_t word_number);
uint32_t xive2_router_get_config(Xive2Router *xrtr);
void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked);
/*
* XIVE2 Presenter (POWER10)
*/
int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool cam_ignore, uint32_t logic_serv);
/*
* XIVE2 END ESBs (POWER10)
*/
#define TYPE_XIVE2_END_SOURCE "xive2-end-source"
OBJECT_DECLARE_SIMPLE_TYPE(Xive2EndSource, XIVE2_END_SOURCE)
typedef struct Xive2EndSource {
DeviceState parent;
uint32_t nr_ends;
/* ESB memory region */
uint32_t esb_shift;
MemoryRegion esb_mmio;
Xive2Router *xrtr;
} Xive2EndSource;
/*
* XIVE2 Thread Interrupt Management Area (POWER10)
*/
void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
uint64_t value, unsigned size);
uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, unsigned size);
#endif /* PPC_XIVE2_H */

210
include/hw/ppc/xive2_regs.h Normal file
View File

@ -0,0 +1,210 @@
/*
* QEMU PowerPC XIVE2 internal structure definitions (POWER10)
*
* Copyright (c) 2019-2022, IBM Corporation.
*
* This code is licensed under the GPL version 2 or later. See the
* COPYING file in the top-level directory.
*/
#ifndef PPC_XIVE2_REGS_H
#define PPC_XIVE2_REGS_H
/*
* Thread Interrupt Management Area (TIMA)
*
* In Gen1 mode (P9 compat mode) word 2 is the same. However in Gen2
* mode (P10), the CAM line is slightly different as the VP space was
* increased.
*/
#define TM2_QW0W2_VU PPC_BIT32(0)
#define TM2_QW0W2_LOGIC_SERV PPC_BITMASK32(4, 31)
#define TM2_QW1W2_VO PPC_BIT32(0)
#define TM2_QW1W2_HO PPC_BIT32(1)
#define TM2_QW1W2_OS_CAM PPC_BITMASK32(4, 31)
#define TM2_QW2W2_VP PPC_BIT32(0)
#define TM2_QW2W2_HP PPC_BIT32(1)
#define TM2_QW2W2_POOL_CAM PPC_BITMASK32(4, 31)
#define TM2_QW3W2_VT PPC_BIT32(0)
#define TM2_QW3W2_HT PPC_BIT32(1)
#define TM2_QW3W2_LP PPC_BIT32(6)
#define TM2_QW3W2_LE PPC_BIT32(7)
/*
* Event Assignment Structure (EAS)
*/
typedef struct Xive2Eas {
uint64_t w;
#define EAS2_VALID PPC_BIT(0)
#define EAS2_END_BLOCK PPC_BITMASK(4, 7) /* Destination EQ block# */
#define EAS2_END_INDEX PPC_BITMASK(8, 31) /* Destination EQ index */
#define EAS2_MASKED PPC_BIT(32) /* Masked */
#define EAS2_END_DATA PPC_BITMASK(33, 63) /* written to the EQ */
} Xive2Eas;
#define xive2_eas_is_valid(eas) (be64_to_cpu((eas)->w) & EAS2_VALID)
#define xive2_eas_is_masked(eas) (be64_to_cpu((eas)->w) & EAS2_MASKED)
void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, Monitor *mon);
/*
* Event Notifification Descriptor (END)
*/
typedef struct Xive2End {
uint32_t w0;
#define END2_W0_VALID PPC_BIT32(0) /* "v" bit */
#define END2_W0_ENQUEUE PPC_BIT32(5) /* "q" bit */
#define END2_W0_UCOND_NOTIFY PPC_BIT32(6) /* "n" bit */
#define END2_W0_SILENT_ESCALATE PPC_BIT32(7) /* "s" bit */
#define END2_W0_BACKLOG PPC_BIT32(8) /* "b" bit */
#define END2_W0_PRECL_ESC_CTL PPC_BIT32(9) /* "p" bit */
#define END2_W0_UNCOND_ESCALATE PPC_BIT32(10) /* "u" bit */
#define END2_W0_ESCALATE_CTL PPC_BIT32(11) /* "e" bit */
#define END2_W0_ADAPTIVE_ESC PPC_BIT32(12) /* "a" bit */
#define END2_W0_ESCALATE_END PPC_BIT32(13) /* "N" bit */
#define END2_W0_FIRMWARE1 PPC_BIT32(16) /* Owned by FW */
#define END2_W0_FIRMWARE2 PPC_BIT32(17) /* Owned by FW */
#define END2_W0_AEC_SIZE PPC_BITMASK32(18, 19)
#define END2_W0_AEG_SIZE PPC_BITMASK32(20, 23)
#define END2_W0_EQ_VG_PREDICT PPC_BITMASK32(24, 31) /* Owned by HW */
uint32_t w1;
#define END2_W1_ESn PPC_BITMASK32(0, 1)
#define END2_W1_ESn_P PPC_BIT32(0)
#define END2_W1_ESn_Q PPC_BIT32(1)
#define END2_W1_ESe PPC_BITMASK32(2, 3)
#define END2_W1_ESe_P PPC_BIT32(2)
#define END2_W1_ESe_Q PPC_BIT32(3)
#define END2_W1_GEN_FLIPPED PPC_BIT32(8)
#define END2_W1_GENERATION PPC_BIT32(9)
#define END2_W1_PAGE_OFF PPC_BITMASK32(10, 31)
uint32_t w2;
#define END2_W2_RESERVED PPC_BITMASK32(4, 7)
#define END2_W2_EQ_ADDR_HI PPC_BITMASK32(8, 31)
uint32_t w3;
#define END2_W3_EQ_ADDR_LO PPC_BITMASK32(0, 24)
#define END2_W3_QSIZE PPC_BITMASK32(28, 31)
uint32_t w4;
#define END2_W4_END_BLOCK PPC_BITMASK32(4, 7)
#define END2_W4_ESC_END_INDEX PPC_BITMASK32(8, 31)
#define END2_W4_ESB_BLOCK PPC_BITMASK32(0, 3)
#define END2_W4_ESC_ESB_INDEX PPC_BITMASK32(4, 31)
uint32_t w5;
#define END2_W5_ESC_END_DATA PPC_BITMASK32(1, 31)
uint32_t w6;
#define END2_W6_FORMAT_BIT PPC_BIT32(0)
#define END2_W6_IGNORE PPC_BIT32(1)
#define END2_W6_VP_BLOCK PPC_BITMASK32(4, 7)
#define END2_W6_VP_OFFSET PPC_BITMASK32(8, 31)
#define END2_W6_VP_OFFSET_GEN1 PPC_BITMASK32(13, 31)
uint32_t w7;
#define END2_W7_TOPO PPC_BITMASK32(0, 3) /* Owned by HW */
#define END2_W7_F0_PRIORITY PPC_BITMASK32(8, 15)
#define END2_W7_F1_LOG_SERVER_ID PPC_BITMASK32(4, 31)
} Xive2End;
#define xive2_end_is_valid(end) (be32_to_cpu((end)->w0) & END2_W0_VALID)
#define xive2_end_is_enqueue(end) (be32_to_cpu((end)->w0) & END2_W0_ENQUEUE)
#define xive2_end_is_notify(end) \
(be32_to_cpu((end)->w0) & END2_W0_UCOND_NOTIFY)
#define xive2_end_is_backlog(end) (be32_to_cpu((end)->w0) & END2_W0_BACKLOG)
#define xive2_end_is_escalate(end) \
(be32_to_cpu((end)->w0) & END2_W0_ESCALATE_CTL)
#define xive2_end_is_uncond_escalation(end) \
(be32_to_cpu((end)->w0) & END2_W0_UNCOND_ESCALATE)
#define xive2_end_is_silent_escalation(end) \
(be32_to_cpu((end)->w0) & END2_W0_SILENT_ESCALATE)
#define xive2_end_is_escalate_end(end) \
(be32_to_cpu((end)->w0) & END2_W0_ESCALATE_END)
#define xive2_end_is_firmware1(end) \
(be32_to_cpu((end)->w0) & END2_W0_FIRMWARE1)
#define xive2_end_is_firmware2(end) \
(be32_to_cpu((end)->w0) & END2_W0_FIRMWARE2)
static inline uint64_t xive2_end_qaddr(Xive2End *end)
{
return ((uint64_t) be32_to_cpu(end->w2) & END2_W2_EQ_ADDR_HI) << 32 |
(be32_to_cpu(end->w3) & END2_W3_EQ_ADDR_LO);
}
void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, Monitor *mon);
void xive2_end_queue_pic_print_info(Xive2End *end, uint32_t width,
Monitor *mon);
void xive2_end_eas_pic_print_info(Xive2End *end, uint32_t end_idx,
Monitor *mon);
/*
* Notification Virtual Processor (NVP)
*/
typedef struct Xive2Nvp {
uint32_t w0;
#define NVP2_W0_VALID PPC_BIT32(0)
#define NVP2_W0_HW PPC_BIT32(7)
#define NVP2_W0_ESC_END PPC_BIT32(25) /* 'N' bit 0:ESB 1:END */
uint32_t w1;
#define NVP2_W1_CO PPC_BIT32(13)
#define NVP2_W1_CO_PRIV PPC_BITMASK32(14, 15)
#define NVP2_W1_CO_THRID_VALID PPC_BIT32(16)
#define NVP2_W1_CO_THRID PPC_BITMASK32(17, 31)
uint32_t w2;
#define NVP2_W2_CPPR PPC_BITMASK32(0, 7)
#define NVP2_W2_IPB PPC_BITMASK32(8, 15)
#define NVP2_W2_LSMFB PPC_BITMASK32(16, 23)
uint32_t w3;
uint32_t w4;
#define NVP2_W4_ESC_ESB_BLOCK PPC_BITMASK32(0, 3) /* N:0 */
#define NVP2_W4_ESC_ESB_INDEX PPC_BITMASK32(4, 31) /* N:0 */
#define NVP2_W4_ESC_END_BLOCK PPC_BITMASK32(4, 7) /* N:1 */
#define NVP2_W4_ESC_END_INDEX PPC_BITMASK32(8, 31) /* N:1 */
uint32_t w5;
#define NVP2_W5_PSIZE PPC_BITMASK32(0, 1)
#define NVP2_W5_VP_END_BLOCK PPC_BITMASK32(4, 7)
#define NVP2_W5_VP_END_INDEX PPC_BITMASK32(8, 31)
uint32_t w6;
uint32_t w7;
} Xive2Nvp;
#define xive2_nvp_is_valid(nvp) (be32_to_cpu((nvp)->w0) & NVP2_W0_VALID)
#define xive2_nvp_is_hw(nvp) (be32_to_cpu((nvp)->w0) & NVP2_W0_HW)
#define xive2_nvp_is_co(nvp) (be32_to_cpu((nvp)->w1) & NVP2_W1_CO)
/*
* The VP number space in a block is defined by the END2_W6_VP_OFFSET
* field of the XIVE END. When running in Gen1 mode (P9 compat mode),
* the VP space is reduced to (1 << 19) VPs per block
*/
#define XIVE2_NVP_SHIFT 24
#define XIVE2_NVP_COUNT (1 << XIVE2_NVP_SHIFT)
static inline uint32_t xive2_nvp_cam_line(uint8_t nvp_blk, uint32_t nvp_idx)
{
return (nvp_blk << XIVE2_NVP_SHIFT) | nvp_idx;
}
static inline uint32_t xive2_nvp_idx(uint32_t cam_line)
{
return cam_line & ((1 << XIVE2_NVP_SHIFT) - 1);
}
static inline uint32_t xive2_nvp_blk(uint32_t cam_line)
{
return (cam_line >> XIVE2_NVP_SHIFT) & 0xf;
}
/*
* Notification Virtual Group or Crowd (NVG/NVC)
*/
typedef struct Xive2Nvgc {
uint32_t w0;
#define NVGC2_W0_VALID PPC_BIT32(0)
uint32_t w1;
uint32_t w2;
uint32_t w3;
uint32_t w4;
uint32_t w5;
uint32_t w6;
uint32_t w7;
} Xive2Nvgc;
#endif /* PPC_XIVE2_REGS_H */

View File

@ -218,6 +218,25 @@ typedef struct {
bool write_aofs;
} GVecGen4;
typedef struct {
/*
* Expand inline as a 64-bit or 32-bit integer. Only one of these will be
* non-NULL.
*/
void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64, int64_t);
void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32, int32_t);
/* Expand inline with a host vector type. */
void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec, TCGv_vec, int64_t);
/* Expand out-of-line helper w/descriptor, data in descriptor. */
gen_helper_gvec_4 *fno;
/* The optional opcodes, if any, utilized by .fniv. */
const TCGOpcode *opt_opc;
/* The vector element size, if applicable. */
uint8_t vece;
/* Prefer i64 to v64. */
bool prefer_i64;
} GVecGen4i;
void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
uint32_t oprsz, uint32_t maxsz, const GVecGen2 *);
void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
@ -231,6 +250,9 @@ void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs,
const GVecGen3i *);
void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
uint32_t oprsz, uint32_t maxsz, const GVecGen4 *);
void tcg_gen_gvec_4i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
uint32_t oprsz, uint32_t maxsz, int64_t c,
const GVecGen4i *);
/* Expand a specific vector operation. */

View File

@ -127,8 +127,10 @@ enum {
/* ISA 3.00 additions */
POWERPC_EXCP_HVIRT = 101,
POWERPC_EXCP_SYSCALL_VECTORED = 102, /* scv exception */
POWERPC_EXCP_PERFM_EBB = 103, /* Performance Monitor EBB Exception */
POWERPC_EXCP_EXTERNAL_EBB = 104, /* External EBB Exception */
/* EOL */
POWERPC_EXCP_NB = 103,
POWERPC_EXCP_NB = 105,
/* QEMU exceptions: special cases we want to stop translation */
POWERPC_EXCP_SYSCALL_USER = 0x203, /* System call in user mode only */
};
@ -2434,6 +2436,7 @@ enum {
PPC_INTERRUPT_HMI, /* Hypervisor Maintenance interrupt */
PPC_INTERRUPT_HDOORBELL, /* Hypervisor Doorbell interrupt */
PPC_INTERRUPT_HVIRT, /* Hypervisor virtualization interrupt */
PPC_INTERRUPT_EBB, /* Event-based Branch exception */
};
/* Processor Compatibility mask (PCR) */
@ -2499,6 +2502,11 @@ void QEMU_NORETURN raise_exception_err(CPUPPCState *env, uint32_t exception,
void QEMU_NORETURN raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
uint32_t error_code, uintptr_t raddr);
/* PERFM EBB helper*/
#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
void raise_ebb_perfm_exception(CPUPPCState *env);
#endif
#if !defined(CONFIG_USER_ONLY)
static inline int booke206_tlbm_id(CPUPPCState *env, ppcmas_tlb_t *tlbm)
{

View File

@ -2060,6 +2060,10 @@ static void init_excp_POWER8(CPUPPCState *env)
env->excp_vectors[POWERPC_EXCP_FU] = 0x00000F60;
env->excp_vectors[POWERPC_EXCP_HV_FU] = 0x00000F80;
env->excp_vectors[POWERPC_EXCP_SDOOR_HV] = 0x00000E80;
/* Userland exceptions without vector value in PowerISA v3.1 */
env->excp_vectors[POWERPC_EXCP_PERFM_EBB] = 0x0;
env->excp_vectors[POWERPC_EXCP_EXTERNAL_EBB] = 0x0;
#endif
}
@ -5698,12 +5702,10 @@ static void register_power9_mmu_sprs(CPUPPCState *env)
*/
static void init_tcg_pmu_power8(CPUPPCState *env)
{
#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
/* Init PMU overflow timers */
if (!kvm_enabled()) {
if (tcg_enabled()) {
cpu_ppc_pmu_init(env);
}
#endif
}
static void init_proc_book3s_common(CPUPPCState *env)
@ -7167,14 +7169,14 @@ static void ppc_cpu_reset(DeviceState *dev)
#if !defined(CONFIG_USER_ONLY)
env->nip = env->hreset_vector | env->excp_prefix;
#if defined(CONFIG_TCG)
if (env->mmu_model != POWERPC_MMU_REAL) {
ppc_tlb_invalidate_all(env);
}
#endif /* CONFIG_TCG */
#endif
pmu_update_summaries(env);
if (tcg_enabled()) {
if (env->mmu_model != POWERPC_MMU_REAL) {
ppc_tlb_invalidate_all(env);
}
pmu_update_summaries(env);
}
#endif
hreg_compute_hflags(env);
env->reserve_addr = (target_ulong)-1ULL;
/* Be sure no exception or interrupt is pending */

View File

@ -1554,6 +1554,21 @@ static void powerpc_excp_books(PowerPCCPU *cpu, int excp)
new_msr |= (target_ulong)MSR_HVB;
new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
break;
case POWERPC_EXCP_PERFM_EBB: /* Performance Monitor EBB Exception */
case POWERPC_EXCP_EXTERNAL_EBB: /* External EBB Exception */
env->spr[SPR_BESCR] &= ~BESCR_GE;
/*
* Save NIP for rfebb insn in SPR_EBBRR. Next nip is
* stored in the EBB Handler SPR_EBBHR.
*/
env->spr[SPR_EBBRR] = env->nip;
powerpc_set_excp_state(cpu, env->spr[SPR_EBBHR], env->msr);
/*
* This exception is handled in userspace. No need to proceed.
*/
return;
case POWERPC_EXCP_THERM: /* Thermal interrupt */
case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
case POWERPC_EXCP_VPUA: /* Vector assist exception */
@ -1797,6 +1812,24 @@ static void ppc_hw_interrupt(CPUPPCState *env)
powerpc_excp(cpu, POWERPC_EXCP_THERM);
return;
}
/* EBB exception */
if (env->pending_interrupts & (1 << PPC_INTERRUPT_EBB)) {
/*
* EBB exception must be taken in problem state and
* with BESCR_GE set.
*/
if (msr_pr == 1 && env->spr[SPR_BESCR] & BESCR_GE) {
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_EBB);
if (env->spr[SPR_BESCR] & BESCR_PMEO) {
powerpc_excp(cpu, POWERPC_EXCP_PERFM_EBB);
} else if (env->spr[SPR_BESCR] & BESCR_EEO) {
powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL_EBB);
}
return;
}
}
}
if (env->resume_as_sreset) {
@ -2033,6 +2066,54 @@ void helper_rfebb(CPUPPCState *env, target_ulong s)
env->spr[SPR_BESCR] &= ~BESCR_GE;
}
}
/*
* Triggers or queues an 'ebb_excp' EBB exception. All checks
* but FSCR, HFSCR and msr_pr must be done beforehand.
*
* PowerISA v3.1 isn't clear about whether an EBB should be
* postponed or cancelled if the EBB facility is unavailable.
* Our assumption here is that the EBB is cancelled if both
* FSCR and HFSCR EBB facilities aren't available.
*/
static void do_ebb(CPUPPCState *env, int ebb_excp)
{
PowerPCCPU *cpu = env_archcpu(env);
CPUState *cs = CPU(cpu);
/*
* FSCR_EBB and FSCR_IC_EBB are the same bits used with
* HFSCR.
*/
helper_fscr_facility_check(env, FSCR_EBB, 0, FSCR_IC_EBB);
helper_hfscr_facility_check(env, FSCR_EBB, "EBB", FSCR_IC_EBB);
if (ebb_excp == POWERPC_EXCP_PERFM_EBB) {
env->spr[SPR_BESCR] |= BESCR_PMEO;
} else if (ebb_excp == POWERPC_EXCP_EXTERNAL_EBB) {
env->spr[SPR_BESCR] |= BESCR_EEO;
}
if (msr_pr == 1) {
powerpc_excp(cpu, ebb_excp);
} else {
env->pending_interrupts |= 1 << PPC_INTERRUPT_EBB;
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
}
}
void raise_ebb_perfm_exception(CPUPPCState *env)
{
bool perfm_ebb_enabled = env->spr[SPR_POWER_MMCR0] & MMCR0_EBE &&
env->spr[SPR_BESCR] & BESCR_PME &&
env->spr[SPR_BESCR] & BESCR_GE;
if (!perfm_ebb_enabled) {
return;
}
do_ebb(env, POWERPC_EXCP_PERFM_EBB);
}
#endif
/*****************************************************************************/

View File

@ -2156,10 +2156,11 @@ VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
* maddflgs - flags for the float*muladd routine that control the
* various forms (madd, msub, nmadd, nmsub)
* sfprf - set FPRF
* r2sp - round intermediate double precision result to single precision
*/
#define VSX_MADD(op, nels, tp, fld, maddflgs, sfprf, r2sp) \
void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
ppc_vsr_t *xa, ppc_vsr_t *b, ppc_vsr_t *c) \
ppc_vsr_t *s1, ppc_vsr_t *s2, ppc_vsr_t *s3) \
{ \
ppc_vsr_t t = *xt; \
int i; \
@ -2175,12 +2176,12 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
* result to odd. \
*/ \
set_float_rounding_mode(float_round_to_zero, &tstat); \
t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \
t.fld = tp##_muladd(s1->fld, s3->fld, s2->fld, \
maddflgs, &tstat); \
t.fld |= (get_float_exception_flags(&tstat) & \
float_flag_inexact) != 0; \
} else { \
t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \
t.fld = tp##_muladd(s1->fld, s3->fld, s2->fld, \
maddflgs, &tstat); \
} \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
@ -2202,14 +2203,14 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
do_float_check_status(env, GETPC()); \
}
VSX_MADD(xsmadddp, 1, float64, VsrD(0), MADD_FLGS, 1, 0)
VSX_MADD(xsmsubdp, 1, float64, VsrD(0), MSUB_FLGS, 1, 0)
VSX_MADD(xsnmadddp, 1, float64, VsrD(0), NMADD_FLGS, 1, 0)
VSX_MADD(xsnmsubdp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 0)
VSX_MADD(xsmaddsp, 1, float64, VsrD(0), MADD_FLGS, 1, 1)
VSX_MADD(xsmsubsp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1)
VSX_MADD(xsnmaddsp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1)
VSX_MADD(xsnmsubsp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1)
VSX_MADD(XSMADDDP, 1, float64, VsrD(0), MADD_FLGS, 1, 0)
VSX_MADD(XSMSUBDP, 1, float64, VsrD(0), MSUB_FLGS, 1, 0)
VSX_MADD(XSNMADDDP, 1, float64, VsrD(0), NMADD_FLGS, 1, 0)
VSX_MADD(XSNMSUBDP, 1, float64, VsrD(0), NMSUB_FLGS, 1, 0)
VSX_MADD(XSMADDSP, 1, float64, VsrD(0), MADD_FLGS, 1, 1)
VSX_MADD(XSMSUBSP, 1, float64, VsrD(0), MSUB_FLGS, 1, 1)
VSX_MADD(XSNMADDSP, 1, float64, VsrD(0), NMADD_FLGS, 1, 1)
VSX_MADD(XSNMSUBSP, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1)
VSX_MADD(xvmadddp, 2, float64, VsrD(i), MADD_FLGS, 0, 0)
VSX_MADD(xvmsubdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0)
@ -2222,55 +2223,93 @@ VSX_MADD(xvnmaddsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0)
VSX_MADD(xvnmsubsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0)
/*
* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
* VSX_MADDQ - VSX floating point quad-precision muliply/add
* op - instruction mnemonic
* maddflgs - flags for the float*muladd routine that control the
* various forms (madd, msub, nmadd, nmsub)
* ro - round to odd
*/
#define VSX_MADDQ(op, maddflgs, ro) \
void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *s1, ppc_vsr_t *s2,\
ppc_vsr_t *s3) \
{ \
ppc_vsr_t t = *xt; \
\
helper_reset_fpstatus(env); \
\
float_status tstat = env->fp_status; \
set_float_exception_flags(0, &tstat); \
if (ro) { \
tstat.float_rounding_mode = float_round_to_odd; \
} \
t.f128 = float128_muladd(s1->f128, s3->f128, s2->f128, maddflgs, &tstat); \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
float_invalid_op_madd(env, tstat.float_exception_flags, \
false, GETPC()); \
} \
\
helper_compute_fprf_float128(env, t.f128); \
*xt = t; \
do_float_check_status(env, GETPC()); \
}
VSX_MADDQ(XSMADDQP, MADD_FLGS, 0)
VSX_MADDQ(XSMADDQPO, MADD_FLGS, 1)
VSX_MADDQ(XSMSUBQP, MSUB_FLGS, 0)
VSX_MADDQ(XSMSUBQPO, MSUB_FLGS, 1)
VSX_MADDQ(XSNMADDQP, NMADD_FLGS, 0)
VSX_MADDQ(XSNMADDQPO, NMADD_FLGS, 1)
VSX_MADDQ(XSNMSUBQP, NMSUB_FLGS, 0)
VSX_MADDQ(XSNMSUBQPO, NMSUB_FLGS, 0)
/*
* VSX_SCALAR_CMP - VSX scalar floating point compare
* op - instruction mnemonic
* tp - type
* cmp - comparison operation
* exp - expected result of comparison
* fld - vsr_t field
* svxvc - set VXVC bit
*/
#define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc) \
void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
ppc_vsr_t *xa, ppc_vsr_t *xb) \
#define VSX_SCALAR_CMP(op, tp, cmp, fld, svxvc) \
void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
ppc_vsr_t t = *xt; \
bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false; \
int flags; \
bool r, vxvc; \
\
if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \
float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
vxsnan_flag = true; \
if (fpscr_ve == 0 && svxvc) { \
vxvc_flag = true; \
helper_reset_fpstatus(env); \
\
if (svxvc) { \
r = tp##_##cmp(xb->fld, xa->fld, &env->fp_status); \
} else { \
r = tp##_##cmp##_quiet(xb->fld, xa->fld, &env->fp_status); \
} \
\
flags = get_float_exception_flags(&env->fp_status); \
if (unlikely(flags & float_flag_invalid)) { \
vxvc = svxvc; \
if (flags & float_flag_invalid_snan) { \
float_invalid_op_vxsnan(env, GETPC()); \
vxvc &= fpscr_ve == 0; \
} \
} else if (svxvc) { \
vxvc_flag = float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) || \
float64_is_quiet_nan(xb->VsrD(0), &env->fp_status); \
} \
if (vxsnan_flag) { \
float_invalid_op_vxsnan(env, GETPC()); \
} \
if (vxvc_flag) { \
float_invalid_op_vxvc(env, 0, GETPC()); \
} \
vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag); \
\
if (!vex_flag) { \
if (float64_##cmp(xb->VsrD(0), xa->VsrD(0), \
&env->fp_status) == exp) { \
t.VsrD(0) = -1; \
t.VsrD(1) = 0; \
} else { \
t.VsrD(0) = 0; \
t.VsrD(1) = 0; \
if (vxvc) { \
float_invalid_op_vxvc(env, 0, GETPC()); \
} \
} \
*xt = t; \
\
memset(xt, 0, sizeof(*xt)); \
memset(&xt->fld, -r, sizeof(xt->fld)); \
do_float_check_status(env, GETPC()); \
}
VSX_SCALAR_CMP_DP(xscmpeqdp, eq, 1, 0)
VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1)
VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1)
VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0)
VSX_SCALAR_CMP(XSCMPEQDP, float64, eq, VsrD(0), 0)
VSX_SCALAR_CMP(XSCMPGEDP, float64, le, VsrD(0), 1)
VSX_SCALAR_CMP(XSCMPGTDP, float64, lt, VsrD(0), 1)
VSX_SCALAR_CMP(XSCMPEQQP, float128, eq, f128, 0)
VSX_SCALAR_CMP(XSCMPGEQP, float128, le, f128, 1)
VSX_SCALAR_CMP(XSCMPGTQP, float128, lt, f128, 1)
void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode,
ppc_vsr_t *xa, ppc_vsr_t *xb)
@ -2494,40 +2533,35 @@ VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
#define VSX_MAX_MINC(name, max) \
#define VSX_MAX_MINC(name, max, tp, fld) \
void helper_##name(CPUPPCState *env, \
ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
ppc_vsr_t t = { }; \
bool vxsnan_flag = false, vex_flag = false; \
bool first; \
\
if (unlikely(float64_is_any_nan(xa->VsrD(0)) || \
float64_is_any_nan(xb->VsrD(0)))) { \
if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \
float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
vxsnan_flag = true; \
} \
t.VsrD(0) = xb->VsrD(0); \
} else if ((max && \
!float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \
(!max && \
float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \
t.VsrD(0) = xa->VsrD(0); \
if (max) { \
first = tp##_le_quiet(xb->fld, xa->fld, &env->fp_status); \
} else { \
t.VsrD(0) = xb->VsrD(0); \
first = tp##_lt_quiet(xa->fld, xb->fld, &env->fp_status); \
} \
\
vex_flag = fpscr_ve & vxsnan_flag; \
if (vxsnan_flag) { \
float_invalid_op_vxsnan(env, GETPC()); \
if (first) { \
t.fld = xa->fld; \
} else { \
t.fld = xb->fld; \
if (env->fp_status.float_exception_flags & float_flag_invalid_snan) { \
float_invalid_op_vxsnan(env, GETPC()); \
} \
} \
if (!vex_flag) { \
*xt = t; \
} \
} \
\
*xt = t; \
}
VSX_MAX_MINC(xsmaxcdp, 1);
VSX_MAX_MINC(xsmincdp, 0);
VSX_MAX_MINC(XSMAXCDP, true, float64, VsrD(0));
VSX_MAX_MINC(XSMINCDP, false, float64, VsrD(0));
VSX_MAX_MINC(XSMAXCQP, true, float128, f128);
VSX_MAX_MINC(XSMINCQP, false, float128, f128);
#define VSX_MAX_MINJ(name, max) \
void helper_##name(CPUPPCState *env, \
@ -2581,8 +2615,8 @@ void helper_##name(CPUPPCState *env, \
} \
} \
VSX_MAX_MINJ(xsmaxjdp, 1);
VSX_MAX_MINJ(xsminjdp, 0);
VSX_MAX_MINJ(XSMAXJDP, 1);
VSX_MAX_MINJ(XSMINJDP, 0);
/*
* VSX_CMP - VSX floating point compare
@ -2751,6 +2785,24 @@ VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1)
VSX_CVT_FP_TO_FP_HP(xvcvsphp, 4, float32, float16, VsrW(i), VsrH(2 * i + 1), 0)
VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0)
void helper_XVCVSPBF16(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)
{
ppc_vsr_t t = { };
int i, status;
for (i = 0; i < 4; i++) {
t.VsrH(2 * i + 1) = float32_to_bfloat16(xb->VsrW(i), &env->fp_status);
}
status = get_float_exception_flags(&env->fp_status);
if (unlikely(status & float_flag_invalid_snan)) {
float_invalid_op_vxsnan(env, GETPC());
}
*xt = t;
do_float_check_status(env, GETPC());
}
void helper_XSCVQPDP(CPUPPCState *env, uint32_t ro, ppc_vsr_t *xt,
ppc_vsr_t *xb)
{
@ -3055,27 +3107,6 @@ uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
return xt;
}
#define VSX_XXPERM(op, indexed) \
void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
ppc_vsr_t *xa, ppc_vsr_t *pcv) \
{ \
ppc_vsr_t t = *xt; \
int i, idx; \
\
for (i = 0; i < 16; i++) { \
idx = pcv->VsrB(i) & 0x1F; \
if (indexed) { \
idx = 31 - idx; \
} \
t.VsrB(i) = (idx <= 15) ? xa->VsrB(idx) \
: xt->VsrB(idx - 16); \
} \
*xt = t; \
}
VSX_XXPERM(xxperm, 0)
VSX_XXPERM(xxpermr, 1)
void helper_xvxsigsp(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)
{
ppc_vsr_t t = { };

View File

@ -142,46 +142,13 @@ DEF_HELPER_3(vabsduw, void, avr, avr, avr)
DEF_HELPER_3(vavgsb, void, avr, avr, avr)
DEF_HELPER_3(vavgsh, void, avr, avr, avr)
DEF_HELPER_3(vavgsw, void, avr, avr, avr)
DEF_HELPER_4(vcmpequb, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpequh, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpequw, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpequd, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpneb, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpneh, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpnew, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpnezb, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpnezh, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpnezw, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtub, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtuh, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtuw, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtud, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtsb, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtsh, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtsw, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtsd, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpeqfp, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgefp, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtfp, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpbfp, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpequb_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpequh_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpequw_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpequd_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpneb_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpneh_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpnew_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpnezb_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpnezh_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpnezw_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtub_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtuh_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtuw_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtud_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtsb_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtsh_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtsw_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtsd_dot, void, env, avr, avr, avr)
DEF_HELPER_FLAGS_4(VCMPNEZB, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
DEF_HELPER_FLAGS_4(VCMPNEZH, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
DEF_HELPER_FLAGS_4(VCMPNEZW, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
DEF_HELPER_4(vcmpeqfp_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgefp_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtfp_dot, void, env, avr, avr, avr)
@ -192,22 +159,18 @@ DEF_HELPER_3(vmrglw, void, avr, avr, avr)
DEF_HELPER_3(vmrghb, void, avr, avr, avr)
DEF_HELPER_3(vmrghh, void, avr, avr, avr)
DEF_HELPER_3(vmrghw, void, avr, avr, avr)
DEF_HELPER_3(vmulesb, void, avr, avr, avr)
DEF_HELPER_3(vmulesh, void, avr, avr, avr)
DEF_HELPER_3(vmulesw, void, avr, avr, avr)
DEF_HELPER_3(vmuleub, void, avr, avr, avr)
DEF_HELPER_3(vmuleuh, void, avr, avr, avr)
DEF_HELPER_3(vmuleuw, void, avr, avr, avr)
DEF_HELPER_3(vmulosb, void, avr, avr, avr)
DEF_HELPER_3(vmulosh, void, avr, avr, avr)
DEF_HELPER_3(vmulosw, void, avr, avr, avr)
DEF_HELPER_3(vmuloub, void, avr, avr, avr)
DEF_HELPER_3(vmulouh, void, avr, avr, avr)
DEF_HELPER_3(vmulouw, void, avr, avr, avr)
DEF_HELPER_3(vmulhsw, void, avr, avr, avr)
DEF_HELPER_3(vmulhuw, void, avr, avr, avr)
DEF_HELPER_3(vmulhsd, void, avr, avr, avr)
DEF_HELPER_3(vmulhud, void, avr, avr, avr)
DEF_HELPER_FLAGS_3(VMULESB, TCG_CALL_NO_RWG, void, avr, avr, avr)
DEF_HELPER_FLAGS_3(VMULESH, TCG_CALL_NO_RWG, void, avr, avr, avr)
DEF_HELPER_FLAGS_3(VMULESW, TCG_CALL_NO_RWG, void, avr, avr, avr)
DEF_HELPER_FLAGS_3(VMULEUB, TCG_CALL_NO_RWG, void, avr, avr, avr)
DEF_HELPER_FLAGS_3(VMULEUH, TCG_CALL_NO_RWG, void, avr, avr, avr)
DEF_HELPER_FLAGS_3(VMULEUW, TCG_CALL_NO_RWG, void, avr, avr, avr)
DEF_HELPER_FLAGS_3(VMULOSB, TCG_CALL_NO_RWG, void, avr, avr, avr)
DEF_HELPER_FLAGS_3(VMULOSH, TCG_CALL_NO_RWG, void, avr, avr, avr)
DEF_HELPER_FLAGS_3(VMULOSW, TCG_CALL_NO_RWG, void, avr, avr, avr)
DEF_HELPER_FLAGS_3(VMULOUB, TCG_CALL_NO_RWG, void, avr, avr, avr)
DEF_HELPER_FLAGS_3(VMULOUH, TCG_CALL_NO_RWG, void, avr, avr, avr)
DEF_HELPER_FLAGS_3(VMULOUW, TCG_CALL_NO_RWG, void, avr, avr, avr)
DEF_HELPER_3(vslo, void, avr, avr, avr)
DEF_HELPER_3(vsro, void, avr, avr, avr)
DEF_HELPER_3(vsrv, void, avr, avr, avr)
@ -246,11 +209,10 @@ DEF_HELPER_4(VINSBLX, void, env, avr, i64, tl)
DEF_HELPER_4(VINSHLX, void, env, avr, i64, tl)
DEF_HELPER_4(VINSWLX, void, env, avr, i64, tl)
DEF_HELPER_4(VINSDLX, void, env, avr, i64, tl)
DEF_HELPER_2(vextsb2w, void, avr, avr)
DEF_HELPER_2(vextsh2w, void, avr, avr)
DEF_HELPER_2(vextsb2d, void, avr, avr)
DEF_HELPER_2(vextsh2d, void, avr, avr)
DEF_HELPER_2(vextsw2d, void, avr, avr)
DEF_HELPER_FLAGS_2(VSTRIBL, TCG_CALL_NO_RWG, i32, avr, avr)
DEF_HELPER_FLAGS_2(VSTRIBR, TCG_CALL_NO_RWG, i32, avr, avr)
DEF_HELPER_FLAGS_2(VSTRIHL, TCG_CALL_NO_RWG, i32, avr, avr)
DEF_HELPER_FLAGS_2(VSTRIHR, TCG_CALL_NO_RWG, i32, avr, avr)
DEF_HELPER_2(vnegw, void, avr, avr)
DEF_HELPER_2(vnegd, void, avr, avr)
DEF_HELPER_2(vupkhpx, void, avr, avr)
@ -263,9 +225,8 @@ DEF_HELPER_2(vupklsh, void, avr, avr)
DEF_HELPER_2(vupklsw, void, avr, avr)
DEF_HELPER_5(vmsumubm, void, env, avr, avr, avr, avr)
DEF_HELPER_5(vmsummbm, void, env, avr, avr, avr, avr)
DEF_HELPER_5(vsel, void, env, avr, avr, avr, avr)
DEF_HELPER_5(vperm, void, env, avr, avr, avr, avr)
DEF_HELPER_5(vpermr, void, env, avr, avr, avr, avr)
DEF_HELPER_FLAGS_4(VPERM, TCG_CALL_NO_RWG, void, avr, avr, avr, avr)
DEF_HELPER_FLAGS_4(VPERMR, TCG_CALL_NO_RWG, void, avr, avr, avr, avr)
DEF_HELPER_4(vpkshss, void, env, avr, avr, avr)
DEF_HELPER_4(vpkshus, void, env, avr, avr, avr)
DEF_HELPER_4(vpkswss, void, env, avr, avr, avr)
@ -311,10 +272,10 @@ DEF_HELPER_4(vmaxfp, void, env, avr, avr, avr)
DEF_HELPER_4(vminfp, void, env, avr, avr, avr)
DEF_HELPER_3(vrefp, void, env, avr, avr)
DEF_HELPER_3(vrsqrtefp, void, env, avr, avr)
DEF_HELPER_3(vrlwmi, void, avr, avr, avr)
DEF_HELPER_3(vrldmi, void, avr, avr, avr)
DEF_HELPER_3(vrldnm, void, avr, avr, avr)
DEF_HELPER_3(vrlwnm, void, avr, avr, avr)
DEF_HELPER_FLAGS_4(VRLWMI, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
DEF_HELPER_FLAGS_4(VRLDMI, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
DEF_HELPER_FLAGS_4(VRLDNM, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
DEF_HELPER_FLAGS_4(VRLWNM, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
DEF_HELPER_5(vmaddfp, void, env, avr, avr, avr, avr)
DEF_HELPER_5(vnmsubfp, void, env, avr, avr, avr, avr)
DEF_HELPER_3(vexptefp, void, env, avr, avr)
@ -394,14 +355,16 @@ DEF_HELPER_3(xssqrtdp, void, env, vsr, vsr)
DEF_HELPER_3(xsrsqrtedp, void, env, vsr, vsr)
DEF_HELPER_4(xstdivdp, void, env, i32, vsr, vsr)
DEF_HELPER_3(xstsqrtdp, void, env, i32, vsr)
DEF_HELPER_5(xsmadddp, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(xsmsubdp, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(xsnmadddp, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(xsnmsubdp, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_4(xscmpeqdp, void, env, vsr, vsr, vsr)
DEF_HELPER_4(xscmpgtdp, void, env, vsr, vsr, vsr)
DEF_HELPER_4(xscmpgedp, void, env, vsr, vsr, vsr)
DEF_HELPER_4(xscmpnedp, void, env, vsr, vsr, vsr)
DEF_HELPER_5(XSMADDDP, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(XSMSUBDP, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(XSNMADDDP, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(XSNMSUBDP, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_4(XSCMPEQDP, void, env, vsr, vsr, vsr)
DEF_HELPER_4(XSCMPGTDP, void, env, vsr, vsr, vsr)
DEF_HELPER_4(XSCMPGEDP, void, env, vsr, vsr, vsr)
DEF_HELPER_4(XSCMPEQQP, void, env, vsr, vsr, vsr)
DEF_HELPER_4(XSCMPGTQP, void, env, vsr, vsr, vsr)
DEF_HELPER_4(XSCMPGEQP, void, env, vsr, vsr, vsr)
DEF_HELPER_4(xscmpexpdp, void, env, i32, vsr, vsr)
DEF_HELPER_4(xscmpexpqp, void, env, i32, vsr, vsr)
DEF_HELPER_4(xscmpodp, void, env, i32, vsr, vsr)
@ -410,10 +373,12 @@ DEF_HELPER_4(xscmpoqp, void, env, i32, vsr, vsr)
DEF_HELPER_4(xscmpuqp, void, env, i32, vsr, vsr)
DEF_HELPER_4(xsmaxdp, void, env, vsr, vsr, vsr)
DEF_HELPER_4(xsmindp, void, env, vsr, vsr, vsr)
DEF_HELPER_4(xsmaxcdp, void, env, vsr, vsr, vsr)
DEF_HELPER_4(xsmincdp, void, env, vsr, vsr, vsr)
DEF_HELPER_4(xsmaxjdp, void, env, vsr, vsr, vsr)
DEF_HELPER_4(xsminjdp, void, env, vsr, vsr, vsr)
DEF_HELPER_4(XSMAXCDP, void, env, vsr, vsr, vsr)
DEF_HELPER_4(XSMINCDP, void, env, vsr, vsr, vsr)
DEF_HELPER_4(XSMAXJDP, void, env, vsr, vsr, vsr)
DEF_HELPER_4(XSMINJDP, void, env, vsr, vsr, vsr)
DEF_HELPER_4(XSMAXCQP, void, env, vsr, vsr, vsr)
DEF_HELPER_4(XSMINCQP, void, env, vsr, vsr, vsr)
DEF_HELPER_3(xscvdphp, void, env, vsr, vsr)
DEF_HELPER_4(xscvdpqp, void, env, i32, vsr, vsr)
DEF_HELPER_3(xscvdpsp, void, env, vsr, vsr)
@ -457,10 +422,19 @@ DEF_HELPER_3(xsresp, void, env, vsr, vsr)
DEF_HELPER_2(xsrsp, i64, env, i64)
DEF_HELPER_3(xssqrtsp, void, env, vsr, vsr)
DEF_HELPER_3(xsrsqrtesp, void, env, vsr, vsr)
DEF_HELPER_5(xsmaddsp, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(xsmsubsp, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(xsnmaddsp, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(xsnmsubsp, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(XSMADDSP, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(XSMSUBSP, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(XSNMADDSP, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(XSNMSUBSP, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(XSMADDQP, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(XSMADDQPO, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(XSMSUBQP, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(XSMSUBQPO, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(XSNMADDQP, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(XSNMADDQPO, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(XSNMSUBQP, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_5(XSNMSUBQPO, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_4(xvadddp, void, env, vsr, vsr, vsr)
DEF_HELPER_4(xvsubdp, void, env, vsr, vsr, vsr)
@ -518,6 +492,7 @@ DEF_HELPER_FLAGS_4(xvcmpnesp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
DEF_HELPER_3(xvcvspdp, void, env, vsr, vsr)
DEF_HELPER_3(xvcvsphp, void, env, vsr, vsr)
DEF_HELPER_3(xvcvhpsp, void, env, vsr, vsr)
DEF_HELPER_3(XVCVSPBF16, void, env, vsr, vsr)
DEF_HELPER_3(xvcvspsxds, void, env, vsr, vsr)
DEF_HELPER_3(xvcvspsxws, void, env, vsr, vsr)
DEF_HELPER_3(xvcvspuxds, void, env, vsr, vsr)
@ -533,11 +508,27 @@ DEF_HELPER_3(xvrspic, void, env, vsr, vsr)
DEF_HELPER_3(xvrspim, void, env, vsr, vsr)
DEF_HELPER_3(xvrspip, void, env, vsr, vsr)
DEF_HELPER_3(xvrspiz, void, env, vsr, vsr)
DEF_HELPER_4(xxperm, void, env, vsr, vsr, vsr)
DEF_HELPER_4(xxpermr, void, env, vsr, vsr, vsr)
DEF_HELPER_FLAGS_2(XXGENPCVBM_be_exp, TCG_CALL_NO_RWG, void, vsr, avr)
DEF_HELPER_FLAGS_2(XXGENPCVBM_be_comp, TCG_CALL_NO_RWG, void, vsr, avr)
DEF_HELPER_FLAGS_2(XXGENPCVBM_le_exp, TCG_CALL_NO_RWG, void, vsr, avr)
DEF_HELPER_FLAGS_2(XXGENPCVBM_le_comp, TCG_CALL_NO_RWG, void, vsr, avr)
DEF_HELPER_FLAGS_2(XXGENPCVHM_be_exp, TCG_CALL_NO_RWG, void, vsr, avr)
DEF_HELPER_FLAGS_2(XXGENPCVHM_be_comp, TCG_CALL_NO_RWG, void, vsr, avr)
DEF_HELPER_FLAGS_2(XXGENPCVHM_le_exp, TCG_CALL_NO_RWG, void, vsr, avr)
DEF_HELPER_FLAGS_2(XXGENPCVHM_le_comp, TCG_CALL_NO_RWG, void, vsr, avr)
DEF_HELPER_FLAGS_2(XXGENPCVWM_be_exp, TCG_CALL_NO_RWG, void, vsr, avr)
DEF_HELPER_FLAGS_2(XXGENPCVWM_be_comp, TCG_CALL_NO_RWG, void, vsr, avr)
DEF_HELPER_FLAGS_2(XXGENPCVWM_le_exp, TCG_CALL_NO_RWG, void, vsr, avr)
DEF_HELPER_FLAGS_2(XXGENPCVWM_le_comp, TCG_CALL_NO_RWG, void, vsr, avr)
DEF_HELPER_FLAGS_2(XXGENPCVDM_be_exp, TCG_CALL_NO_RWG, void, vsr, avr)
DEF_HELPER_FLAGS_2(XXGENPCVDM_be_comp, TCG_CALL_NO_RWG, void, vsr, avr)
DEF_HELPER_FLAGS_2(XXGENPCVDM_le_exp, TCG_CALL_NO_RWG, void, vsr, avr)
DEF_HELPER_FLAGS_2(XXGENPCVDM_le_comp, TCG_CALL_NO_RWG, void, vsr, avr)
DEF_HELPER_4(xxextractuw, void, env, vsr, vsr, i32)
DEF_HELPER_FLAGS_5(XXPERMX, TCG_CALL_NO_RWG, void, vsr, vsr, vsr, vsr, tl)
DEF_HELPER_4(xxinsertw, void, env, vsr, vsr, i32)
DEF_HELPER_3(xvxsigsp, void, env, vsr, vsr)
DEF_HELPER_FLAGS_5(XXEVAL, TCG_CALL_NO_RWG, void, vsr, vsr, vsr, vsr, i32)
DEF_HELPER_5(XXBLENDVB, void, vsr, vsr, vsr, vsr, i32)
DEF_HELPER_5(XXBLENDVH, void, vsr, vsr, vsr, vsr, i32)
DEF_HELPER_5(XXBLENDVW, void, vsr, vsr, vsr, vsr, i32)

View File

@ -51,12 +51,27 @@
&VA vrt vra vrb rc
@VA ...... vrt:5 vra:5 vrb:5 rc:5 ...... &VA
&VC vrt vra vrb rc:bool
@VC ...... vrt:5 vra:5 vrb:5 rc:1 .......... &VC
&VN vrt vra vrb sh
@VN ...... vrt:5 vra:5 vrb:5 .. sh:3 ...... &VN
&VX vrt vra vrb
@VX ...... vrt:5 vra:5 vrb:5 .......... . &VX
&VX_bf bf vra vrb
@VX_bf ...... bf:3 .. vra:5 vrb:5 ........... &VX_bf
&VX_mp rt mp:bool vrb
@VX_mp ...... rt:5 .... mp:1 vrb:5 ........... &VX_mp
&VX_n rt vrb n
@VX_n ...... rt:5 .. n:3 vrb:5 ........... &VX_n
&VX_tb_rc vrt vrb rc:bool
@VX_tb_rc ...... vrt:5 ..... vrb:5 rc:1 .......... &VX_tb_rc
&VX_uim4 vrt uim vrb
@VX_uim4 ...... vrt:5 . uim:4 vrb:5 ........... &VX_uim4
@ -104,6 +119,9 @@
@X_bfl ...... bf:3 - l:1 ra:5 rb:5 ..........- &X_bfl
%x_xt 0:1 21:5
&X_imm5 xt imm:uint8_t vrb
@X_imm5 ...... ..... imm:5 vrb:5 .......... . &X_imm5 xt=%x_xt
&X_imm8 xt imm:uint8_t
@X_imm8 ...... ..... .. imm:8 .......... . &X_imm8 xt=%x_xt
@ -133,12 +151,25 @@
%xx_xt 0:1 21:5
%xx_xb 1:1 11:5
%xx_xa 2:1 16:5
&XX2 xt xb uim:uint8_t
@XX2 ...... ..... ... uim:2 ..... ......... .. &XX2 xt=%xx_xt xb=%xx_xb
%xx_xc 3:1 6:5
&XX2 xt xb
@XX2 ...... ..... ..... ..... ......... .. &XX2 xt=%xx_xt xb=%xx_xb
&XX2_uim2 xt xb uim:uint8_t
@XX2_uim2 ...... ..... ... uim:2 ..... ......... .. &XX2_uim2 xt=%xx_xt xb=%xx_xb
&XX2_bf_xb bf xb
@XX2_bf_xb ...... bf:3 .. ..... ..... ......... . . &XX2_bf_xb xb=%xx_xb
&XX3 xt xa xb
@XX3 ...... ..... ..... ..... ........ ... &XX3 xt=%xx_xt xa=%xx_xa xb=%xx_xb
&XX3_dm xt xa xb dm
@XX3_dm ...... ..... ..... ..... . dm:2 ..... ... &XX3_dm xt=%xx_xt xa=%xx_xa xb=%xx_xb
&XX4 xt xa xb xc
@XX4 ...... ..... ..... ..... ..... .. .... &XX4 xt=%xx_xt xa=%xx_xa xb=%xx_xb xc=%xx_xc
&Z22_bf_fra bf fra dm
@Z22_bf_fra ...... bf:3 .. fra:5 dm:6 ......... . &Z22_bf_fra
@ -373,8 +404,41 @@ DSCLIQ 111111 ..... ..... ...... 001000010 . @Z22_tap_sh_rc
DSCRI 111011 ..... ..... ...... 001100010 . @Z22_ta_sh_rc
DSCRIQ 111111 ..... ..... ...... 001100010 . @Z22_tap_sh_rc
## Vector Integer Instructions
VCMPEQUB 000100 ..... ..... ..... . 0000000110 @VC
VCMPEQUH 000100 ..... ..... ..... . 0001000110 @VC
VCMPEQUW 000100 ..... ..... ..... . 0010000110 @VC
VCMPEQUD 000100 ..... ..... ..... . 0011000111 @VC
VCMPEQUQ 000100 ..... ..... ..... . 0111000111 @VC
VCMPGTSB 000100 ..... ..... ..... . 1100000110 @VC
VCMPGTSH 000100 ..... ..... ..... . 1101000110 @VC
VCMPGTSW 000100 ..... ..... ..... . 1110000110 @VC
VCMPGTSD 000100 ..... ..... ..... . 1111000111 @VC
VCMPGTSQ 000100 ..... ..... ..... . 1110000111 @VC
VCMPGTUB 000100 ..... ..... ..... . 1000000110 @VC
VCMPGTUH 000100 ..... ..... ..... . 1001000110 @VC
VCMPGTUW 000100 ..... ..... ..... . 1010000110 @VC
VCMPGTUD 000100 ..... ..... ..... . 1011000111 @VC
VCMPGTUQ 000100 ..... ..... ..... . 1010000111 @VC
VCMPNEB 000100 ..... ..... ..... . 0000000111 @VC
VCMPNEH 000100 ..... ..... ..... . 0001000111 @VC
VCMPNEW 000100 ..... ..... ..... . 0010000111 @VC
VCMPNEZB 000100 ..... ..... ..... . 0100000111 @VC
VCMPNEZH 000100 ..... ..... ..... . 0101000111 @VC
VCMPNEZW 000100 ..... ..... ..... . 0110000111 @VC
VCMPSQ 000100 ... -- ..... ..... 00101000001 @VX_bf
VCMPUQ 000100 ... -- ..... ..... 00100000001 @VX_bf
## Vector Bit Manipulation Instruction
VGNB 000100 ..... -- ... ..... 10011001100 @VX_n
VCFUGED 000100 ..... ..... ..... 10101001101 @VX
VCLZDM 000100 ..... ..... ..... 11110000100 @VX
VCTZDM 000100 ..... ..... ..... 11111000100 @VX
@ -419,6 +483,54 @@ VINSWVRX 000100 ..... ..... ..... 00110001111 @VX
VSLDBI 000100 ..... ..... ..... 00 ... 010110 @VN
VSRDBI 000100 ..... ..... ..... 01 ... 010110 @VN
VPERM 000100 ..... ..... ..... ..... 101011 @VA
VPERMR 000100 ..... ..... ..... ..... 111011 @VA
VSEL 000100 ..... ..... ..... ..... 101010 @VA
## Vector Integer Shift Instruction
VSLB 000100 ..... ..... ..... 00100000100 @VX
VSLH 000100 ..... ..... ..... 00101000100 @VX
VSLW 000100 ..... ..... ..... 00110000100 @VX
VSLD 000100 ..... ..... ..... 10111000100 @VX
VSLQ 000100 ..... ..... ..... 00100000101 @VX
VSRB 000100 ..... ..... ..... 01000000100 @VX
VSRH 000100 ..... ..... ..... 01001000100 @VX
VSRW 000100 ..... ..... ..... 01010000100 @VX
VSRD 000100 ..... ..... ..... 11011000100 @VX
VSRQ 000100 ..... ..... ..... 01000000101 @VX
VSRAB 000100 ..... ..... ..... 01100000100 @VX
VSRAH 000100 ..... ..... ..... 01101000100 @VX
VSRAW 000100 ..... ..... ..... 01110000100 @VX
VSRAD 000100 ..... ..... ..... 01111000100 @VX
VSRAQ 000100 ..... ..... ..... 01100000101 @VX
VRLB 000100 ..... ..... ..... 00000000100 @VX
VRLH 000100 ..... ..... ..... 00001000100 @VX
VRLW 000100 ..... ..... ..... 00010000100 @VX
VRLD 000100 ..... ..... ..... 00011000100 @VX
VRLQ 000100 ..... ..... ..... 00000000101 @VX
VRLWMI 000100 ..... ..... ..... 00010000101 @VX
VRLDMI 000100 ..... ..... ..... 00011000101 @VX
VRLQMI 000100 ..... ..... ..... 00001000101 @VX
VRLWNM 000100 ..... ..... ..... 00110000101 @VX
VRLDNM 000100 ..... ..... ..... 00111000101 @VX
VRLQNM 000100 ..... ..... ..... 00101000101 @VX
## Vector Integer Arithmetic Instructions
VEXTSB2W 000100 ..... 10000 ..... 11000000010 @VX_tb
VEXTSH2W 000100 ..... 10001 ..... 11000000010 @VX_tb
VEXTSB2D 000100 ..... 11000 ..... 11000000010 @VX_tb
VEXTSH2D 000100 ..... 11001 ..... 11000000010 @VX_tb
VEXTSW2D 000100 ..... 11010 ..... 11000000010 @VX_tb
VEXTSD2Q 000100 ..... 11011 ..... 11000000010 @VX_tb
## Vector Mask Manipulation Instructions
MTVSRBM 000100 ..... 10000 ..... 11001000010 @VX_tb
@ -440,8 +552,60 @@ VEXTRACTWM 000100 ..... 01010 ..... 11001000010 @VX_tb
VEXTRACTDM 000100 ..... 01011 ..... 11001000010 @VX_tb
VEXTRACTQM 000100 ..... 01100 ..... 11001000010 @VX_tb
VCNTMBB 000100 ..... 1100 . ..... 11001000010 @VX_mp
VCNTMBH 000100 ..... 1101 . ..... 11001000010 @VX_mp
VCNTMBW 000100 ..... 1110 . ..... 11001000010 @VX_mp
VCNTMBD 000100 ..... 1111 . ..... 11001000010 @VX_mp
## Vector Multiply Instruction
VMULESB 000100 ..... ..... ..... 01100001000 @VX
VMULOSB 000100 ..... ..... ..... 00100001000 @VX
VMULEUB 000100 ..... ..... ..... 01000001000 @VX
VMULOUB 000100 ..... ..... ..... 00000001000 @VX
VMULESH 000100 ..... ..... ..... 01101001000 @VX
VMULOSH 000100 ..... ..... ..... 00101001000 @VX
VMULEUH 000100 ..... ..... ..... 01001001000 @VX
VMULOUH 000100 ..... ..... ..... 00001001000 @VX
VMULESW 000100 ..... ..... ..... 01110001000 @VX
VMULOSW 000100 ..... ..... ..... 00110001000 @VX
VMULEUW 000100 ..... ..... ..... 01010001000 @VX
VMULOUW 000100 ..... ..... ..... 00010001000 @VX
VMULESD 000100 ..... ..... ..... 01111001000 @VX
VMULOSD 000100 ..... ..... ..... 00111001000 @VX
VMULEUD 000100 ..... ..... ..... 01011001000 @VX
VMULOUD 000100 ..... ..... ..... 00011001000 @VX
VMULHSW 000100 ..... ..... ..... 01110001001 @VX
VMULHUW 000100 ..... ..... ..... 01010001001 @VX
VMULHSD 000100 ..... ..... ..... 01111001001 @VX
VMULHUD 000100 ..... ..... ..... 01011001001 @VX
VMULLD 000100 ..... ..... ..... 00111001001 @VX
## Vector Multiply-Sum Instructions
VMSUMCUD 000100 ..... ..... ..... ..... 010111 @VA
VMSUMUDM 000100 ..... ..... ..... ..... 100011 @VA
## Vector String Instructions
VSTRIBL 000100 ..... 00000 ..... . 0000001101 @VX_tb_rc
VSTRIBR 000100 ..... 00001 ..... . 0000001101 @VX_tb_rc
VSTRIHL 000100 ..... 00010 ..... . 0000001101 @VX_tb_rc
VSTRIHR 000100 ..... 00011 ..... . 0000001101 @VX_tb_rc
VCLRLB 000100 ..... ..... ..... 00110001101 @VX
VCLRRB 000100 ..... ..... ..... 00111001101 @VX
# VSX Load/Store Instructions
LXSD 111001 ..... ..... .............. 10 @DS
STXSD 111101 ..... ..... .............. 10 @DS
LXSSP 111001 ..... ..... .............. 11 @DS
STXSSP 111101 ..... ..... .............. 11 @DS
LXV 111101 ..... ..... ............ . 001 @DQ_TSX
STXV 111101 ..... ..... ............ . 101 @DQ_TSX
LXVP 000110 ..... ..... ............ 0000 @DQ_TSXP
@ -450,11 +614,60 @@ LXVX 011111 ..... ..... ..... 0100 - 01100 . @X_TSX
STXVX 011111 ..... ..... ..... 0110001100 . @X_TSX
LXVPX 011111 ..... ..... ..... 0101001101 - @X_TSXP
STXVPX 011111 ..... ..... ..... 0111001101 - @X_TSXP
LXVRBX 011111 ..... ..... ..... 0000001101 . @X_TSX
LXVRHX 011111 ..... ..... ..... 0000101101 . @X_TSX
LXVRWX 011111 ..... ..... ..... 0001001101 . @X_TSX
LXVRDX 011111 ..... ..... ..... 0001101101 . @X_TSX
STXVRBX 011111 ..... ..... ..... 0010001101 . @X_TSX
STXVRHX 011111 ..... ..... ..... 0010101101 . @X_TSX
STXVRWX 011111 ..... ..... ..... 0011001101 . @X_TSX
STXVRDX 011111 ..... ..... ..... 0011101101 . @X_TSX
## VSX Scalar Multiply-Add Instructions
XSMADDADP 111100 ..... ..... ..... 00100001 . . . @XX3
XSMADDMDP 111100 ..... ..... ..... 00101001 . . . @XX3
XSMADDASP 111100 ..... ..... ..... 00000001 . . . @XX3
XSMADDMSP 111100 ..... ..... ..... 00001001 . . . @XX3
XSMADDQP 111111 ..... ..... ..... 0110000100 . @X_rc
XSMSUBADP 111100 ..... ..... ..... 00110001 . . . @XX3
XSMSUBMDP 111100 ..... ..... ..... 00111001 . . . @XX3
XSMSUBASP 111100 ..... ..... ..... 00010001 . . . @XX3
XSMSUBMSP 111100 ..... ..... ..... 00011001 . . . @XX3
XSMSUBQP 111111 ..... ..... ..... 0110100100 . @X_rc
XSNMADDASP 111100 ..... ..... ..... 10000001 . . . @XX3
XSNMADDMSP 111100 ..... ..... ..... 10001001 . . . @XX3
XSNMADDADP 111100 ..... ..... ..... 10100001 . . . @XX3
XSNMADDMDP 111100 ..... ..... ..... 10101001 . . . @XX3
XSNMADDQP 111111 ..... ..... ..... 0111000100 . @X_rc
XSNMSUBASP 111100 ..... ..... ..... 10010001 . . . @XX3
XSNMSUBMSP 111100 ..... ..... ..... 10011001 . . . @XX3
XSNMSUBADP 111100 ..... ..... ..... 10110001 . . . @XX3
XSNMSUBMDP 111100 ..... ..... ..... 10111001 . . . @XX3
XSNMSUBQP 111111 ..... ..... ..... 0111100100 . @X_rc
## VSX splat instruction
XXSPLTIB 111100 ..... 00 ........ 0101101000 . @X_imm8
XXSPLTW 111100 ..... ---.. ..... 010100100 . . @XX2
XXSPLTW 111100 ..... ---.. ..... 010100100 . . @XX2_uim2
## VSX Permute Instructions
XXPERM 111100 ..... ..... ..... 00011010 ... @XX3
XXPERMR 111100 ..... ..... ..... 00111010 ... @XX3
XXPERMDI 111100 ..... ..... ..... 0 .. 01010 ... @XX3_dm
XXSEL 111100 ..... ..... ..... ..... 11 .... @XX4
## VSX Vector Generate PCV
XXGENPCVBM 111100 ..... ..... ..... 1110010100 . @X_imm5
XXGENPCVHM 111100 ..... ..... ..... 1110010101 . @X_imm5
XXGENPCVWM 111100 ..... ..... ..... 1110110100 . @X_imm5
XXGENPCVDM 111100 ..... ..... ..... 1110110101 . @X_imm5
## VSX Vector Load Special Value Instruction
@ -466,10 +679,25 @@ XSMAXCDP 111100 ..... ..... ..... 10000000 ... @XX3
XSMINCDP 111100 ..... ..... ..... 10001000 ... @XX3
XSMAXJDP 111100 ..... ..... ..... 10010000 ... @XX3
XSMINJDP 111100 ..... ..... ..... 10011000 ... @XX3
XSMAXCQP 111111 ..... ..... ..... 1010100100 - @X
XSMINCQP 111111 ..... ..... ..... 1011100100 - @X
XSCMPEQDP 111100 ..... ..... ..... 00000011 ... @XX3
XSCMPGEDP 111100 ..... ..... ..... 00010011 ... @XX3
XSCMPGTDP 111100 ..... ..... ..... 00001011 ... @XX3
XSCMPEQQP 111111 ..... ..... ..... 0001000100 - @X
XSCMPGEQP 111111 ..... ..... ..... 0011000100 - @X
XSCMPGTQP 111111 ..... ..... ..... 0011100100 - @X
## VSX Binary Floating-Point Convert Instructions
XSCVQPDP 111111 ..... 10100 ..... 1101000100 . @X_tb_rc
XVCVBF16SPN 111100 ..... 10000 ..... 111011011 .. @XX2
XVCVSPBF16 111100 ..... 10001 ..... 111011011 .. @XX2
## VSX Vector Test Least-Significant Bit by Byte Instruction
XVTLSBB 111100 ... -- 00010 ..... 111011011 . - @XX2_bf_xb
### rfebb
&XL_s s:uint8_t

View File

@ -32,6 +32,10 @@
...... ..... ra:5 ................ \
&PLS_D si=%pls_si rt=%rt_tsxp
@8LS_D ...... .. . .. r:1 .. .................. \
...... rt:5 ra:5 ................ \
&PLS_D si=%pls_si
# Format 8RR:D
%8rr_si 32:s16 0:16
%8rr_xt 16:1 21:5
@ -44,15 +48,25 @@
...... ..... .... . ................ \
&8RR_D si=%8rr_si xt=%8rr_xt
# Format XX4
&XX4 xt xa xb xc
%xx4_xt 0:1 21:5
%xx4_xa 2:1 16:5
%xx4_xb 1:1 11:5
%xx4_xc 3:1 6:5
@XX4 ........ ........ ........ ........ \
# Format 8RR:XX4
%8rr_xx_xt 0:1 21:5
%8rr_xx_xa 2:1 16:5
%8rr_xx_xb 1:1 11:5
%8rr_xx_xc 3:1 6:5
&8RR_XX4 xt xa xb xc
@8RR_XX4 ........ ........ ........ ........ \
...... ..... ..... ..... ..... .. .... \
&XX4 xt=%xx4_xt xa=%xx4_xa xb=%xx4_xb xc=%xx4_xc
&8RR_XX4 xt=%8rr_xx_xt xa=%8rr_xx_xa xb=%8rr_xx_xb xc=%8rr_xx_xc
&8RR_XX4_imm xt xa xb xc imm
@8RR_XX4_imm ........ ........ ........ imm:8 \
...... ..... ..... ..... ..... .. .... \
&8RR_XX4_imm xt=%8rr_xx_xt xa=%8rr_xx_xa xb=%8rr_xx_xb xc=%8rr_xx_xc
&8RR_XX4_uim3 xt xa xb xc uim3
@8RR_XX4_uim3 ...... .. .... .. ............... uim3:3 \
...... ..... ..... ..... ..... .. .... \
&8RR_XX4_uim3 xt=%8rr_xx_xt xa=%8rr_xx_xa xb=%8rr_xx_xb xc=%8rr_xx_xc
### Fixed-Point Load Instructions
@ -170,6 +184,18 @@ PSTFD 000001 10 0--.-- .................. \
### VSX instructions
PLXSD 000001 00 0--.-- .................. \
101010 ..... ..... ................ @8LS_D
PSTXSD 000001 00 0--.-- .................. \
101110 ..... ..... ................ @8LS_D
PLXSSP 000001 00 0--.-- .................. \
101011 ..... ..... ................ @8LS_D
PSTXSSP 000001 00 0--.-- .................. \
101111 ..... ..... ................ @8LS_D
PLXV 000001 00 0--.-- .................. \
11001 ...... ..... ................ @8LS_D_TSX
PSTXV 000001 00 0--.-- .................. \
@ -179,6 +205,9 @@ PLXVP 000001 00 0--.-- .................. \
PSTXVP 000001 00 0--.-- .................. \
111110 ..... ..... ................ @8LS_D_TSXP
XXEVAL 000001 01 0000 -- ---------- ........ \
100010 ..... ..... ..... ..... 01 .... @8RR_XX4_imm
XXSPLTIDP 000001 01 0000 -- -- ................ \
100000 ..... 0010 . ................ @8RR_D
XXSPLTIW 000001 01 0000 -- -- ................ \
@ -187,10 +216,13 @@ XXSPLTI32DX 000001 01 0000 -- -- ................ \
100000 ..... 000 .. ................ @8RR_D_IX
XXBLENDVD 000001 01 0000 -- ------------------ \
100001 ..... ..... ..... ..... 11 .... @XX4
100001 ..... ..... ..... ..... 11 .... @8RR_XX4
XXBLENDVW 000001 01 0000 -- ------------------ \
100001 ..... ..... ..... ..... 10 .... @XX4
100001 ..... ..... ..... ..... 10 .... @8RR_XX4
XXBLENDVH 000001 01 0000 -- ------------------ \
100001 ..... ..... ..... ..... 01 .... @XX4
100001 ..... ..... ..... ..... 01 .... @8RR_XX4
XXBLENDVB 000001 01 0000 -- ------------------ \
100001 ..... ..... ..... ..... 00 .... @XX4
100001 ..... ..... ..... ..... 00 .... @8RR_XX4
XXPERMX 000001 01 0000 -- --------------- ... \
100010 ..... ..... ..... ..... 00 .... @8RR_XX4_uim3

View File

@ -28,6 +28,7 @@
#include "fpu/softfloat.h"
#include "qapi/error.h"
#include "qemu/guest-random.h"
#include "tcg/tcg-gvec-desc.h"
#include "helper_regs.h"
/*****************************************************************************/
@ -662,100 +663,18 @@ VCF(ux, uint32_to_float32, u32)
VCF(sx, int32_to_float32, s32)
#undef VCF
#define VCMP_DO(suffix, compare, element, record) \
void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \
ppc_avr_t *a, ppc_avr_t *b) \
{ \
uint64_t ones = (uint64_t)-1; \
uint64_t all = ones; \
uint64_t none = 0; \
int i; \
\
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
uint64_t result = (a->element[i] compare b->element[i] ? \
ones : 0x0); \
switch (sizeof(a->element[0])) { \
case 8: \
r->u64[i] = result; \
break; \
case 4: \
r->u32[i] = result; \
break; \
case 2: \
r->u16[i] = result; \
break; \
case 1: \
r->u8[i] = result; \
break; \
} \
all &= result; \
none |= result; \
} \
if (record) { \
env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
} \
}
#define VCMP(suffix, compare, element) \
VCMP_DO(suffix, compare, element, 0) \
VCMP_DO(suffix##_dot, compare, element, 1)
VCMP(equb, ==, u8)
VCMP(equh, ==, u16)
VCMP(equw, ==, u32)
VCMP(equd, ==, u64)
VCMP(gtub, >, u8)
VCMP(gtuh, >, u16)
VCMP(gtuw, >, u32)
VCMP(gtud, >, u64)
VCMP(gtsb, >, s8)
VCMP(gtsh, >, s16)
VCMP(gtsw, >, s32)
VCMP(gtsd, >, s64)
#undef VCMP_DO
#undef VCMP
#define VCMPNE_DO(suffix, element, etype, cmpzero, record) \
void helper_vcmpne##suffix(CPUPPCState *env, ppc_avr_t *r, \
ppc_avr_t *a, ppc_avr_t *b) \
{ \
etype ones = (etype)-1; \
etype all = ones; \
etype result, none = 0; \
int i; \
\
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
if (cmpzero) { \
result = ((a->element[i] == 0) \
|| (b->element[i] == 0) \
|| (a->element[i] != b->element[i]) ? \
ones : 0x0); \
} else { \
result = (a->element[i] != b->element[i]) ? ones : 0x0; \
} \
r->element[i] = result; \
all &= result; \
none |= result; \
} \
if (record) { \
env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
} \
#define VCMPNEZ(NAME, ELEM) \
void helper_##NAME(ppc_vsr_t *t, ppc_vsr_t *a, ppc_vsr_t *b, uint32_t desc) \
{ \
for (int i = 0; i < ARRAY_SIZE(t->ELEM); i++) { \
t->ELEM[i] = ((a->ELEM[i] == 0) || (b->ELEM[i] == 0) || \
(a->ELEM[i] != b->ELEM[i])) ? -1 : 0; \
} \
}
/*
* VCMPNEZ - Vector compare not equal to zero
* suffix - instruction mnemonic suffix (b: byte, h: halfword, w: word)
* element - element type to access from vector
*/
#define VCMPNE(suffix, element, etype, cmpzero) \
VCMPNE_DO(suffix, element, etype, cmpzero, 0) \
VCMPNE_DO(suffix##_dot, element, etype, cmpzero, 1)
VCMPNE(zb, u8, uint8_t, 1)
VCMPNE(zh, u16, uint16_t, 1)
VCMPNE(zw, u32, uint32_t, 1)
VCMPNE(b, u8, uint8_t, 0)
VCMPNE(h, u16, uint16_t, 0)
VCMPNE(w, u32, uint32_t, 0)
#undef VCMPNE_DO
#undef VCMPNE
VCMPNEZ(VCMPNEZB, u8)
VCMPNEZ(VCMPNEZH, u16)
VCMPNEZ(VCMPNEZW, u32)
#undef VCMPNEZ
#define VCMPFP_DO(suffix, compare, order, record) \
void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \
@ -1063,7 +982,7 @@ void helper_vmsumuhs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
}
#define VMUL_DO_EVN(name, mul_element, mul_access, prod_access, cast) \
void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
void helper_V##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
{ \
int i; \
\
@ -1074,7 +993,7 @@ void helper_vmsumuhs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
}
#define VMUL_DO_ODD(name, mul_element, mul_access, prod_access, cast) \
void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
void helper_V##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
{ \
int i; \
\
@ -1085,55 +1004,39 @@ void helper_vmsumuhs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
}
#define VMUL(suffix, mul_element, mul_access, prod_access, cast) \
VMUL_DO_EVN(mule##suffix, mul_element, mul_access, prod_access, cast) \
VMUL_DO_ODD(mulo##suffix, mul_element, mul_access, prod_access, cast)
VMUL(sb, s8, VsrSB, VsrSH, int16_t)
VMUL(sh, s16, VsrSH, VsrSW, int32_t)
VMUL(sw, s32, VsrSW, VsrSD, int64_t)
VMUL(ub, u8, VsrB, VsrH, uint16_t)
VMUL(uh, u16, VsrH, VsrW, uint32_t)
VMUL(uw, u32, VsrW, VsrD, uint64_t)
VMUL_DO_EVN(MULE##suffix, mul_element, mul_access, prod_access, cast) \
VMUL_DO_ODD(MULO##suffix, mul_element, mul_access, prod_access, cast)
VMUL(SB, s8, VsrSB, VsrSH, int16_t)
VMUL(SH, s16, VsrSH, VsrSW, int32_t)
VMUL(SW, s32, VsrSW, VsrSD, int64_t)
VMUL(UB, u8, VsrB, VsrH, uint16_t)
VMUL(UH, u16, VsrH, VsrW, uint32_t)
VMUL(UW, u32, VsrW, VsrD, uint64_t)
#undef VMUL_DO_EVN
#undef VMUL_DO_ODD
#undef VMUL
void helper_vmulhsw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
void helper_XXPERMX(ppc_vsr_t *t, ppc_vsr_t *s0, ppc_vsr_t *s1, ppc_vsr_t *pcv,
target_ulong uim)
{
int i;
int i, idx;
ppc_vsr_t tmp = { .u64 = {0, 0} };
for (i = 0; i < 4; i++) {
r->s32[i] = (int32_t)(((int64_t)a->s32[i] * (int64_t)b->s32[i]) >> 32);
for (i = 0; i < ARRAY_SIZE(t->u8); i++) {
if ((pcv->VsrB(i) >> 5) == uim) {
idx = pcv->VsrB(i) & 0x1f;
if (idx < ARRAY_SIZE(t->u8)) {
tmp.VsrB(i) = s0->VsrB(idx);
} else {
tmp.VsrB(i) = s1->VsrB(idx - ARRAY_SIZE(t->u8));
}
}
}
*t = tmp;
}
void helper_vmulhuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
int i;
for (i = 0; i < 4; i++) {
r->u32[i] = (uint32_t)(((uint64_t)a->u32[i] *
(uint64_t)b->u32[i]) >> 32);
}
}
void helper_vmulhsd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
uint64_t discard;
muls64(&discard, &r->u64[0], a->s64[0], b->s64[0]);
muls64(&discard, &r->u64[1], a->s64[1], b->s64[1]);
}
void helper_vmulhud(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
uint64_t discard;
mulu64(&discard, &r->u64[0], a->u64[0], b->u64[0]);
mulu64(&discard, &r->u64[1], a->u64[1], b->u64[1]);
}
void helper_vperm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
ppc_avr_t *c)
void helper_VPERM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
{
ppc_avr_t result;
int i;
@ -1151,8 +1054,7 @@ void helper_vperm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
*r = result;
}
void helper_vpermr(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
ppc_avr_t *c)
void helper_VPERMR(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
{
ppc_avr_t result;
int i;
@ -1170,6 +1072,97 @@ void helper_vpermr(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
*r = result;
}
#define XXGENPCV(NAME, SZ) \
void glue(helper_, glue(NAME, _be_exp))(ppc_vsr_t *t, ppc_vsr_t *b) \
{ \
ppc_vsr_t tmp; \
\
/* Initialize tmp with the result of an all-zeros mask */ \
tmp.VsrD(0) = 0x1011121314151617; \
tmp.VsrD(1) = 0x18191A1B1C1D1E1F; \
\
/* Iterate over the most significant byte of each element */ \
for (int i = 0, j = 0; i < ARRAY_SIZE(b->u8); i += SZ) { \
if (b->VsrB(i) & 0x80) { \
/* Update each byte of the element */ \
for (int k = 0; k < SZ; k++) { \
tmp.VsrB(i + k) = j + k; \
} \
j += SZ; \
} \
} \
\
*t = tmp; \
} \
\
void glue(helper_, glue(NAME, _be_comp))(ppc_vsr_t *t, ppc_vsr_t *b)\
{ \
ppc_vsr_t tmp = { .u64 = { 0, 0 } }; \
\
/* Iterate over the most significant byte of each element */ \
for (int i = 0, j = 0; i < ARRAY_SIZE(b->u8); i += SZ) { \
if (b->VsrB(i) & 0x80) { \
/* Update each byte of the element */ \
for (int k = 0; k < SZ; k++) { \
tmp.VsrB(j + k) = i + k; \
} \
j += SZ; \
} \
} \
\
*t = tmp; \
} \
\
void glue(helper_, glue(NAME, _le_exp))(ppc_vsr_t *t, ppc_vsr_t *b) \
{ \
ppc_vsr_t tmp; \
\
/* Initialize tmp with the result of an all-zeros mask */ \
tmp.VsrD(0) = 0x1F1E1D1C1B1A1918; \
tmp.VsrD(1) = 0x1716151413121110; \
\
/* Iterate over the most significant byte of each element */ \
for (int i = 0, j = 0; i < ARRAY_SIZE(b->u8); i += SZ) { \
/* Reverse indexing of "i" */ \
const int idx = ARRAY_SIZE(b->u8) - i - SZ; \
if (b->VsrB(idx) & 0x80) { \
/* Update each byte of the element */ \
for (int k = 0, rk = SZ - 1; k < SZ; k++, rk--) { \
tmp.VsrB(idx + rk) = j + k; \
} \
j += SZ; \
} \
} \
\
*t = tmp; \
} \
\
void glue(helper_, glue(NAME, _le_comp))(ppc_vsr_t *t, ppc_vsr_t *b)\
{ \
ppc_vsr_t tmp = { .u64 = { 0, 0 } }; \
\
/* Iterate over the most significant byte of each element */ \
for (int i = 0, j = 0; i < ARRAY_SIZE(b->u8); i += SZ) { \
if (b->VsrB(ARRAY_SIZE(b->u8) - i - SZ) & 0x80) { \
/* Update each byte of the element */ \
for (int k = 0, rk = SZ - 1; k < SZ; k++, rk--) { \
/* Reverse indexing of "j" */ \
const int idx = ARRAY_SIZE(b->u8) - j - SZ; \
tmp.VsrB(idx + rk) = i + k; \
} \
j += SZ; \
} \
} \
\
*t = tmp; \
}
XXGENPCV(XXGENPCVBM, 1)
XXGENPCV(XXGENPCVHM, 2)
XXGENPCV(XXGENPCVWM, 4)
XXGENPCV(XXGENPCVDM, 8)
#undef XXGENPCV
#if defined(HOST_WORDS_BIGENDIAN)
#define VBPERMQ_INDEX(avr, i) ((avr)->u8[(i)])
#define VBPERMD_INDEX(i) (i)
@ -1392,40 +1385,33 @@ void helper_vrsqrtefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
}
}
#define VRLMI(name, size, element, insert) \
void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
{ \
int i; \
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
uint##size##_t src1 = a->element[i]; \
uint##size##_t src2 = b->element[i]; \
uint##size##_t src3 = r->element[i]; \
uint##size##_t begin, end, shift, mask, rot_val; \
\
shift = extract##size(src2, 0, 6); \
end = extract##size(src2, 8, 6); \
begin = extract##size(src2, 16, 6); \
rot_val = rol##size(src1, shift); \
mask = mask_u##size(begin, end); \
if (insert) { \
r->element[i] = (rot_val & mask) | (src3 & ~mask); \
} else { \
r->element[i] = (rot_val & mask); \
} \
} \
#define VRLMI(name, size, element, insert) \
void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t desc) \
{ \
int i; \
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
uint##size##_t src1 = a->element[i]; \
uint##size##_t src2 = b->element[i]; \
uint##size##_t src3 = r->element[i]; \
uint##size##_t begin, end, shift, mask, rot_val; \
\
shift = extract##size(src2, 0, 6); \
end = extract##size(src2, 8, 6); \
begin = extract##size(src2, 16, 6); \
rot_val = rol##size(src1, shift); \
mask = mask_u##size(begin, end); \
if (insert) { \
r->element[i] = (rot_val & mask) | (src3 & ~mask); \
} else { \
r->element[i] = (rot_val & mask); \
} \
} \
}
VRLMI(vrldmi, 64, u64, 1);
VRLMI(vrlwmi, 32, u32, 1);
VRLMI(vrldnm, 64, u64, 0);
VRLMI(vrlwnm, 32, u32, 0);
void helper_vsel(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
ppc_avr_t *c)
{
r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
}
VRLMI(VRLDMI, 64, u64, 1);
VRLMI(VRLWMI, 32, u32, 1);
VRLMI(VRLDNM, 64, u64, 0);
VRLMI(VRLWNM, 32, u32, 0);
void helper_vexptefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
{
@ -1619,6 +1605,34 @@ VEXTRACT(uw, u32)
VEXTRACT(d, u64)
#undef VEXTRACT
#define VSTRI(NAME, ELEM, NUM_ELEMS, LEFT) \
uint32_t helper_##NAME(ppc_avr_t *t, ppc_avr_t *b) \
{ \
int i, idx, crf = 0; \
\
for (i = 0; i < NUM_ELEMS; i++) { \
idx = LEFT ? i : NUM_ELEMS - i - 1; \
if (b->Vsr##ELEM(idx)) { \
t->Vsr##ELEM(idx) = b->Vsr##ELEM(idx); \
} else { \
crf = 0b0010; \
break; \
} \
} \
\
for (; i < NUM_ELEMS; i++) { \
idx = LEFT ? i : NUM_ELEMS - i - 1; \
t->Vsr##ELEM(idx) = 0; \
} \
\
return crf; \
}
VSTRI(VSTRIBL, B, 16, true)
VSTRI(VSTRIBR, B, 16, false)
VSTRI(VSTRIHL, H, 8, true)
VSTRI(VSTRIHR, H, 8, false)
#undef VSTRI
void helper_xxextractuw(CPUPPCState *env, ppc_vsr_t *xt,
ppc_vsr_t *xb, uint32_t index)
{
@ -1650,6 +1664,47 @@ void helper_xxinsertw(CPUPPCState *env, ppc_vsr_t *xt,
*xt = t;
}
void helper_XXEVAL(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c,
uint32_t desc)
{
/*
* Instead of processing imm bit-by-bit, we'll skip the computation of
* conjunctions whose corresponding bit is unset.
*/
int bit, imm = simd_data(desc);
Int128 conj, disj = int128_zero();
/* Iterate over set bits from the least to the most significant bit */
while (imm) {
/*
* Get the next bit to be processed with ctz64. Invert the result of
* ctz64 to match the indexing used by PowerISA.
*/
bit = 7 - ctzl(imm);
if (bit & 0x4) {
conj = a->s128;
} else {
conj = int128_not(a->s128);
}
if (bit & 0x2) {
conj = int128_and(conj, b->s128);
} else {
conj = int128_and(conj, int128_not(b->s128));
}
if (bit & 0x1) {
conj = int128_and(conj, c->s128);
} else {
conj = int128_and(conj, int128_not(c->s128));
}
disj = int128_or(disj, conj);
/* Unset the least significant bit that is set */
imm &= imm - 1;
}
t->s128 = disj;
}
#define XXBLEND(name, sz) \
void glue(helper_XXBLENDV, name)(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, \
ppc_avr_t *c, uint32_t desc) \
@ -1665,21 +1720,6 @@ XXBLEND(W, 32)
XXBLEND(D, 64)
#undef XXBLEND
#define VEXT_SIGNED(name, element, cast) \
void helper_##name(ppc_avr_t *r, ppc_avr_t *b) \
{ \
int i; \
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
r->element[i] = (cast)b->element[i]; \
} \
}
VEXT_SIGNED(vextsb2w, s32, int8_t)
VEXT_SIGNED(vextsb2d, s64, int8_t)
VEXT_SIGNED(vextsh2w, s32, int16_t)
VEXT_SIGNED(vextsh2d, s64, int16_t)
VEXT_SIGNED(vextsw2d, s64, int32_t)
#undef VEXT_SIGNED
#define VNEG(name, element) \
void helper_##name(ppc_avr_t *r, ppc_avr_t *b) \
{ \

View File

@ -2,6 +2,7 @@
#include "cpu.h"
#include "exec/exec-all.h"
#include "sysemu/kvm.h"
#include "sysemu/tcg.h"
#include "helper_regs.h"
#include "mmu-hash64.h"
#include "migration/cpu.h"
@ -20,7 +21,10 @@ static void post_load_update_msr(CPUPPCState *env)
*/
env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB);
ppc_store_msr(env, msr);
pmu_update_summaries(env);
if (tcg_enabled()) {
pmu_update_summaries(env);
}
}
static int get_avr(QEMUFile *f, void *pv, size_t size,

View File

@ -16,6 +16,7 @@ ppc_ss.add(when: 'CONFIG_TCG', if_true: files(
'misc_helper.c',
'timebase_helper.c',
'translate.c',
'power8-pmu.c',
))
ppc_ss.add(libdecnumber)
@ -51,7 +52,6 @@ ppc_softmmu_ss.add(when: 'TARGET_PPC64', if_true: files(
'mmu-book3s-v3.c',
'mmu-hash64.c',
'mmu-radix64.c',
'power8-pmu.c',
))
target_arch += {'ppc': ppc_ss}

View File

@ -222,6 +222,20 @@ static void pmu_update_overflow_timers(CPUPPCState *env)
}
}
static void pmu_delete_timers(CPUPPCState *env)
{
QEMUTimer *pmc_overflow_timer;
int sprn;
for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
pmc_overflow_timer = get_cyc_overflow_timer(env, sprn);
if (pmc_overflow_timer) {
timer_del(pmc_overflow_timer);
}
}
}
void helper_store_mmcr0(CPUPPCState *env, target_ulong value)
{
bool hflags_pmcc0 = (value & MMCR0_PMCC0) != 0;
@ -271,12 +285,29 @@ static void fire_PMC_interrupt(PowerPCCPU *cpu)
{
CPUPPCState *env = &cpu->env;
if (!(env->spr[SPR_POWER_MMCR0] & MMCR0_EBE)) {
return;
pmu_update_cycles(env);
if (env->spr[SPR_POWER_MMCR0] & MMCR0_FCECE) {
env->spr[SPR_POWER_MMCR0] &= ~MMCR0_FCECE;
env->spr[SPR_POWER_MMCR0] |= MMCR0_FC;
/* Changing MMCR0_FC requires a new HFLAGS_INSN_CNT calc */
pmu_update_summaries(env);
/*
* Delete all pending timers if we need to freeze
* the PMC. We'll restart them when the PMC starts
* running again.
*/
pmu_delete_timers(env);
}
/* PMC interrupt not implemented yet */
return;
if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMAE) {
env->spr[SPR_POWER_MMCR0] &= ~MMCR0_PMAE;
env->spr[SPR_POWER_MMCR0] |= MMCR0_PMAO;
}
raise_ebb_perfm_exception(env);
}
/* This helper assumes that the PMC is running. */

View File

@ -13,11 +13,11 @@
#ifndef POWER8_PMU
#define POWER8_PMU
void cpu_ppc_pmu_init(CPUPPCState *env);
#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
void cpu_ppc_pmu_init(CPUPPCState *env);
void pmu_update_summaries(CPUPPCState *env);
#else
static inline void cpu_ppc_pmu_init(CPUPPCState *env) { }
static inline void pmu_update_summaries(CPUPPCState *env) { }
#endif

View File

@ -6604,10 +6604,29 @@ static int times_16(DisasContext *ctx, int x)
#define TRANS(NAME, FUNC, ...) \
static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
{ return FUNC(ctx, a, __VA_ARGS__); }
#define TRANS_FLAGS(FLAGS, NAME, FUNC, ...) \
static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
{ \
REQUIRE_INSNS_FLAGS(ctx, FLAGS); \
return FUNC(ctx, a, __VA_ARGS__); \
}
#define TRANS_FLAGS2(FLAGS2, NAME, FUNC, ...) \
static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
{ \
REQUIRE_INSNS_FLAGS2(ctx, FLAGS2); \
return FUNC(ctx, a, __VA_ARGS__); \
}
#define TRANS64(NAME, FUNC, ...) \
static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
{ REQUIRE_64BIT(ctx); return FUNC(ctx, a, __VA_ARGS__); }
#define TRANS64_FLAGS2(FLAGS2, NAME, FUNC, ...) \
static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
{ \
REQUIRE_64BIT(ctx); \
REQUIRE_INSNS_FLAGS2(ctx, FLAGS2); \
return FUNC(ctx, a, __VA_ARGS__); \
}
/* TODO: More TRANS* helpers for extra insn_flags checks. */
@ -6649,49 +6668,24 @@ static bool resolve_PLS_D(DisasContext *ctx, arg_D *d, arg_PLS_D *a)
#include "translate/branch-impl.c.inc"
/* Handles lfdp, lxsd, lxssp */
/* Handles lfdp */
static void gen_dform39(DisasContext *ctx)
{
switch (ctx->opcode & 0x3) {
case 0: /* lfdp */
if ((ctx->opcode & 0x3) == 0) {
if (ctx->insns_flags2 & PPC2_ISA205) {
return gen_lfdp(ctx);
}
break;
case 2: /* lxsd */
if (ctx->insns_flags2 & PPC2_ISA300) {
return gen_lxsd(ctx);
}
break;
case 3: /* lxssp */
if (ctx->insns_flags2 & PPC2_ISA300) {
return gen_lxssp(ctx);
}
break;
}
return gen_invalid(ctx);
}
/* handles stfdp, lxv, stxsd, stxssp lxvx */
/* Handles stfdp */
static void gen_dform3D(DisasContext *ctx)
{
if ((ctx->opcode & 3) != 1) { /* DS-FORM */
switch (ctx->opcode & 0x3) {
case 0: /* stfdp */
if (ctx->insns_flags2 & PPC2_ISA205) {
return gen_stfdp(ctx);
}
break;
case 2: /* stxsd */
if (ctx->insns_flags2 & PPC2_ISA300) {
return gen_stxsd(ctx);
}
break;
case 3: /* stxssp */
if (ctx->insns_flags2 & PPC2_ISA300) {
return gen_stxssp(ctx);
}
break;
if ((ctx->opcode & 3) == 0) { /* DS-FORM */
/* stfdp */
if (ctx->insns_flags2 & PPC2_ISA205) {
return gen_stfdp(ctx);
}
}
return gen_invalid(ctx);

File diff suppressed because it is too large Load Diff

View File

@ -101,33 +101,7 @@ GEN_VXFORM_DUAL(vmrgow, vextuwlx, 6, 26, PPC_NONE, PPC2_ALTIVEC_207),
GEN_VXFORM_300(vextubrx, 6, 28),
GEN_VXFORM_300(vextuhrx, 6, 29),
GEN_VXFORM_DUAL(vmrgew, vextuwrx, 6, 30, PPC_NONE, PPC2_ALTIVEC_207),
GEN_VXFORM(vmuloub, 4, 0),
GEN_VXFORM(vmulouh, 4, 1),
GEN_VXFORM_DUAL(vmulouw, vmuluwm, 4, 2, PPC_ALTIVEC, PPC_NONE),
GEN_VXFORM(vmulosb, 4, 4),
GEN_VXFORM(vmulosh, 4, 5),
GEN_VXFORM_207(vmulosw, 4, 6),
GEN_VXFORM_310(vmulld, 4, 7),
GEN_VXFORM(vmuleub, 4, 8),
GEN_VXFORM(vmuleuh, 4, 9),
GEN_VXFORM_DUAL(vmuleuw, vmulhuw, 4, 10, PPC_ALTIVEC, PPC_NONE),
GEN_VXFORM_310(vmulhud, 4, 11),
GEN_VXFORM(vmulesb, 4, 12),
GEN_VXFORM(vmulesh, 4, 13),
GEN_VXFORM_DUAL(vmulesw, vmulhsw, 4, 14, PPC_ALTIVEC, PPC_NONE),
GEN_VXFORM_310(vmulhsd, 4, 15),
GEN_VXFORM(vslb, 2, 4),
GEN_VXFORM(vslh, 2, 5),
GEN_VXFORM_DUAL(vslw, vrlwnm, 2, 6, PPC_ALTIVEC, PPC_NONE),
GEN_VXFORM_207(vsld, 2, 23),
GEN_VXFORM(vsrb, 2, 8),
GEN_VXFORM(vsrh, 2, 9),
GEN_VXFORM(vsrw, 2, 10),
GEN_VXFORM_207(vsrd, 2, 27),
GEN_VXFORM(vsrab, 2, 12),
GEN_VXFORM(vsrah, 2, 13),
GEN_VXFORM(vsraw, 2, 14),
GEN_VXFORM_207(vsrad, 2, 15),
GEN_VXFORM_207(vmuluwm, 4, 2),
GEN_VXFORM_300(vsrv, 2, 28),
GEN_VXFORM_300(vslv, 2, 29),
GEN_VXFORM(vslo, 6, 16),
@ -158,11 +132,7 @@ GEN_VXFORM_DUAL(vaddeuqm, vaddecuq, 30, 0xFF, PPC_NONE, PPC2_ALTIVEC_207),
GEN_VXFORM_DUAL(vsubuqm, bcdtrunc, 0, 20, PPC2_ALTIVEC_207, PPC2_ISA300),
GEN_VXFORM_DUAL(vsubcuq, bcdutrunc, 0, 21, PPC2_ALTIVEC_207, PPC2_ISA300),
GEN_VXFORM_DUAL(vsubeuqm, vsubecuq, 31, 0xFF, PPC_NONE, PPC2_ALTIVEC_207),
GEN_VXFORM(vrlb, 2, 0),
GEN_VXFORM(vrlh, 2, 1),
GEN_VXFORM_DUAL(vrlw, vrlwmi, 2, 2, PPC_ALTIVEC, PPC_NONE),
GEN_VXFORM_DUAL(vrld, vrldmi, 2, 3, PPC_NONE, PPC2_ALTIVEC_207),
GEN_VXFORM_DUAL(vsl, vrldnm, 2, 7, PPC_ALTIVEC, PPC_NONE),
GEN_VXFORM(vsl, 2, 7),
GEN_VXFORM(vsr, 2, 11),
GEN_VXFORM(vpkuhum, 7, 0),
GEN_VXFORM(vpkuwum, 7, 1),
@ -198,22 +168,10 @@ GEN_HANDLER2_E(name, str, 0x4, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ISA300),
GEN_VXRFORM1_300(name, name, #name, opc2, opc3) \
GEN_VXRFORM1_300(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4)))
GEN_VXRFORM_300(vcmpnezb, 3, 4)
GEN_VXRFORM_300(vcmpnezh, 3, 5)
GEN_VXRFORM_300(vcmpnezw, 3, 6)
GEN_VXRFORM(vcmpgtsb, 3, 12)
GEN_VXRFORM(vcmpgtsh, 3, 13)
GEN_VXRFORM(vcmpgtsw, 3, 14)
GEN_VXRFORM(vcmpgtub, 3, 8)
GEN_VXRFORM(vcmpgtuh, 3, 9)
GEN_VXRFORM(vcmpgtuw, 3, 10)
GEN_VXRFORM_DUAL(vcmpeqfp, vcmpequd, 3, 3, PPC_ALTIVEC, PPC_NONE)
GEN_VXRFORM(vcmpeqfp, 3, 3)
GEN_VXRFORM(vcmpgefp, 3, 7)
GEN_VXRFORM_DUAL(vcmpgtfp, vcmpgtud, 3, 11, PPC_ALTIVEC, PPC_NONE)
GEN_VXRFORM_DUAL(vcmpbfp, vcmpgtsd, 3, 15, PPC_ALTIVEC, PPC_NONE)
GEN_VXRFORM_DUAL(vcmpequb, vcmpneb, 3, 0, PPC_ALTIVEC, PPC_NONE)
GEN_VXRFORM_DUAL(vcmpequh, vcmpneh, 3, 1, PPC_ALTIVEC, PPC_NONE)
GEN_VXRFORM_DUAL(vcmpequw, vcmpnew, 3, 2, PPC_ALTIVEC, PPC_NONE)
GEN_VXRFORM(vcmpgtfp, 3, 11)
GEN_VXRFORM(vcmpbfp, 3, 15)
#define GEN_VXFORM_DUAL_INV(name0, name1, opc2, opc3, inval0, inval1, type) \
GEN_OPCODE_DUAL(name0##_##name1, 0x04, opc2, opc3, inval0, inval1, type, \
@ -230,18 +188,12 @@ GEN_VXFORM(vspltish, 6, 13),
GEN_VXFORM(vspltisw, 6, 14),
GEN_VXFORM_300_EO(vnegw, 0x01, 0x18, 0x06),
GEN_VXFORM_300_EO(vnegd, 0x01, 0x18, 0x07),
GEN_VXFORM_300_EO(vextsb2w, 0x01, 0x18, 0x10),
GEN_VXFORM_300_EO(vextsh2w, 0x01, 0x18, 0x11),
GEN_VXFORM_300_EO(vextsb2d, 0x01, 0x18, 0x18),
GEN_VXFORM_300_EO(vextsh2d, 0x01, 0x18, 0x19),
GEN_VXFORM_300_EO(vextsw2d, 0x01, 0x18, 0x1A),
GEN_VXFORM_300_EO(vctzb, 0x01, 0x18, 0x1C),
GEN_VXFORM_300_EO(vctzh, 0x01, 0x18, 0x1D),
GEN_VXFORM_300_EO(vctzw, 0x01, 0x18, 0x1E),
GEN_VXFORM_300_EO(vctzd, 0x01, 0x18, 0x1F),
GEN_VXFORM_300_EO(vclzlsbb, 0x01, 0x18, 0x0),
GEN_VXFORM_300_EO(vctzlsbb, 0x01, 0x18, 0x1),
GEN_VXFORM_300(vpermr, 0x1D, 0xFF),
#define GEN_VXFORM_NOA(name, opc2, opc3) \
GEN_HANDLER(name, 0x04, opc2, opc3, 0x001f0000, PPC_ALTIVEC)
@ -276,7 +228,6 @@ GEN_VAFORM_PAIRED(vmhaddshs, vmhraddshs, 16),
GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18),
GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19),
GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20),
GEN_VAFORM_PAIRED(vsel, vperm, 21),
GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23),
GEN_VXFORM_DUAL(vclzb, vpopcntb, 1, 28, PPC_NONE, PPC2_ALTIVEC_207),

View File

@ -288,30 +288,6 @@ VSX_VECTOR_LOAD_STORE_LENGTH(stxvl)
VSX_VECTOR_LOAD_STORE_LENGTH(stxvll)
#endif
#define VSX_LOAD_SCALAR_DS(name, operation) \
static void gen_##name(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 xth; \
\
if (unlikely(!ctx->altivec_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_VPU); \
return; \
} \
xth = tcg_temp_new_i64(); \
gen_set_access_type(ctx, ACCESS_INT); \
EA = tcg_temp_new(); \
gen_addr_imm_index(ctx, EA, 0x03); \
gen_qemu_##operation(ctx, xth, EA); \
set_cpu_vsr(rD(ctx->opcode) + 32, xth, true); \
/* NOTE: cpu_vsrl is undefined */ \
tcg_temp_free(EA); \
tcg_temp_free_i64(xth); \
}
VSX_LOAD_SCALAR_DS(lxsd, ld64_i64)
VSX_LOAD_SCALAR_DS(lxssp, ld32fs)
#define VSX_STORE_SCALAR(name, operation) \
static void gen_##name(DisasContext *ctx) \
{ \
@ -461,30 +437,6 @@ static void gen_stxvb16x(DisasContext *ctx)
tcg_temp_free_i64(xsl);
}
#define VSX_STORE_SCALAR_DS(name, operation) \
static void gen_##name(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 xth; \
\
if (unlikely(!ctx->altivec_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_VPU); \
return; \
} \
xth = tcg_temp_new_i64(); \
get_cpu_vsr(xth, rD(ctx->opcode) + 32, true); \
gen_set_access_type(ctx, ACCESS_INT); \
EA = tcg_temp_new(); \
gen_addr_imm_index(ctx, EA, 0x03); \
gen_qemu_##operation(ctx, xth, EA); \
/* NOTE: cpu_vsrl is undefined */ \
tcg_temp_free(EA); \
tcg_temp_free_i64(xth); \
}
VSX_STORE_SCALAR_DS(stxsd, st64_i64)
VSX_STORE_SCALAR_DS(stxssp, st32fs)
static void gen_mfvsrwz(DisasContext *ctx)
{
if (xS(ctx->opcode) < 32) {
@ -665,45 +617,6 @@ static void gen_mtvsrws(DisasContext *ctx)
#endif
static void gen_xxpermdi(DisasContext *ctx)
{
TCGv_i64 xh, xl;
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
xh = tcg_temp_new_i64();
xl = tcg_temp_new_i64();
if (unlikely((xT(ctx->opcode) == xA(ctx->opcode)) ||
(xT(ctx->opcode) == xB(ctx->opcode)))) {
get_cpu_vsr(xh, xA(ctx->opcode), (DM(ctx->opcode) & 2) == 0);
get_cpu_vsr(xl, xB(ctx->opcode), (DM(ctx->opcode) & 1) == 0);
set_cpu_vsr(xT(ctx->opcode), xh, true);
set_cpu_vsr(xT(ctx->opcode), xl, false);
} else {
if ((DM(ctx->opcode) & 2) == 0) {
get_cpu_vsr(xh, xA(ctx->opcode), true);
set_cpu_vsr(xT(ctx->opcode), xh, true);
} else {
get_cpu_vsr(xh, xA(ctx->opcode), false);
set_cpu_vsr(xT(ctx->opcode), xh, true);
}
if ((DM(ctx->opcode) & 1) == 0) {
get_cpu_vsr(xl, xB(ctx->opcode), true);
set_cpu_vsr(xT(ctx->opcode), xl, false);
} else {
get_cpu_vsr(xl, xB(ctx->opcode), false);
set_cpu_vsr(xT(ctx->opcode), xl, false);
}
}
tcg_temp_free_i64(xh);
tcg_temp_free_i64(xl);
}
#define OP_ABS 1
#define OP_NABS 2
#define OP_NEG 3
@ -1091,10 +1004,6 @@ GEN_VSX_HELPER_X2(xssqrtdp, 0x16, 0x04, 0, PPC2_VSX)
GEN_VSX_HELPER_X2(xsrsqrtedp, 0x14, 0x04, 0, PPC2_VSX)
GEN_VSX_HELPER_X2_AB(xstdivdp, 0x14, 0x07, 0, PPC2_VSX)
GEN_VSX_HELPER_X1(xstsqrtdp, 0x14, 0x06, 0, PPC2_VSX)
GEN_VSX_HELPER_X3(xscmpeqdp, 0x0C, 0x00, 0, PPC2_ISA300)
GEN_VSX_HELPER_X3(xscmpgtdp, 0x0C, 0x01, 0, PPC2_ISA300)
GEN_VSX_HELPER_X3(xscmpgedp, 0x0C, 0x02, 0, PPC2_ISA300)
GEN_VSX_HELPER_X3(xscmpnedp, 0x0C, 0x03, 0, PPC2_ISA300)
GEN_VSX_HELPER_X2_AB(xscmpexpdp, 0x0C, 0x07, 0, PPC2_ISA300)
GEN_VSX_HELPER_R2_AB(xscmpexpqp, 0x04, 0x05, 0, PPC2_ISA300)
GEN_VSX_HELPER_X2_AB(xscmpodp, 0x0C, 0x05, 0, PPC2_VSX)
@ -1200,8 +1109,216 @@ GEN_VSX_HELPER_X2(xvrspip, 0x12, 0x0A, 0, PPC2_VSX)
GEN_VSX_HELPER_X2(xvrspiz, 0x12, 0x09, 0, PPC2_VSX)
GEN_VSX_HELPER_2(xvtstdcsp, 0x14, 0x1A, 0, PPC2_VSX)
GEN_VSX_HELPER_2(xvtstdcdp, 0x14, 0x1E, 0, PPC2_VSX)
GEN_VSX_HELPER_X3(xxperm, 0x08, 0x03, 0, PPC2_ISA300)
GEN_VSX_HELPER_X3(xxpermr, 0x08, 0x07, 0, PPC2_ISA300)
static bool trans_XXPERM(DisasContext *ctx, arg_XX3 *a)
{
TCGv_ptr xt, xa, xb;
REQUIRE_INSNS_FLAGS2(ctx, ISA300);
REQUIRE_VSX(ctx);
xt = gen_vsr_ptr(a->xt);
xa = gen_vsr_ptr(a->xa);
xb = gen_vsr_ptr(a->xb);
gen_helper_VPERM(xt, xa, xt, xb);
tcg_temp_free_ptr(xt);
tcg_temp_free_ptr(xa);
tcg_temp_free_ptr(xb);
return true;
}
static bool trans_XXPERMR(DisasContext *ctx, arg_XX3 *a)
{
TCGv_ptr xt, xa, xb;
REQUIRE_INSNS_FLAGS2(ctx, ISA300);
REQUIRE_VSX(ctx);
xt = gen_vsr_ptr(a->xt);
xa = gen_vsr_ptr(a->xa);
xb = gen_vsr_ptr(a->xb);
gen_helper_VPERMR(xt, xa, xt, xb);
tcg_temp_free_ptr(xt);
tcg_temp_free_ptr(xa);
tcg_temp_free_ptr(xb);
return true;
}
static bool trans_XXPERMDI(DisasContext *ctx, arg_XX3_dm *a)
{
TCGv_i64 t0, t1;
REQUIRE_INSNS_FLAGS2(ctx, VSX);
REQUIRE_VSX(ctx);
t0 = tcg_temp_new_i64();
if (unlikely(a->xt == a->xa || a->xt == a->xb)) {
t1 = tcg_temp_new_i64();
get_cpu_vsr(t0, a->xa, (a->dm & 2) == 0);
get_cpu_vsr(t1, a->xb, (a->dm & 1) == 0);
set_cpu_vsr(a->xt, t0, true);
set_cpu_vsr(a->xt, t1, false);
tcg_temp_free_i64(t1);
} else {
get_cpu_vsr(t0, a->xa, (a->dm & 2) == 0);
set_cpu_vsr(a->xt, t0, true);
get_cpu_vsr(t0, a->xb, (a->dm & 1) == 0);
set_cpu_vsr(a->xt, t0, false);
}
tcg_temp_free_i64(t0);
return true;
}
static bool trans_XXPERMX(DisasContext *ctx, arg_8RR_XX4_uim3 *a)
{
TCGv_ptr xt, xa, xb, xc;
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
REQUIRE_VSX(ctx);
xt = gen_vsr_ptr(a->xt);
xa = gen_vsr_ptr(a->xa);
xb = gen_vsr_ptr(a->xb);
xc = gen_vsr_ptr(a->xc);
gen_helper_XXPERMX(xt, xa, xb, xc, tcg_constant_tl(a->uim3));
tcg_temp_free_ptr(xt);
tcg_temp_free_ptr(xa);
tcg_temp_free_ptr(xb);
tcg_temp_free_ptr(xc);
return true;
}
#define XXGENPCV(NAME) \
static bool trans_##NAME(DisasContext *ctx, arg_X_imm5 *a) \
{ \
TCGv_ptr xt, vrb; \
\
REQUIRE_INSNS_FLAGS2(ctx, ISA310); \
REQUIRE_VSX(ctx); \
\
if (a->imm & ~0x3) { \
gen_invalid(ctx); \
return true; \
} \
\
xt = gen_vsr_ptr(a->xt); \
vrb = gen_avr_ptr(a->vrb); \
\
switch (a->imm) { \
case 0b00000: /* Big-Endian expansion */ \
glue(gen_helper_, glue(NAME, _be_exp))(xt, vrb); \
break; \
case 0b00001: /* Big-Endian compression */ \
glue(gen_helper_, glue(NAME, _be_comp))(xt, vrb); \
break; \
case 0b00010: /* Little-Endian expansion */ \
glue(gen_helper_, glue(NAME, _le_exp))(xt, vrb); \
break; \
case 0b00011: /* Little-Endian compression */ \
glue(gen_helper_, glue(NAME, _le_comp))(xt, vrb); \
break; \
} \
\
tcg_temp_free_ptr(xt); \
tcg_temp_free_ptr(vrb); \
\
return true; \
}
XXGENPCV(XXGENPCVBM)
XXGENPCV(XXGENPCVHM)
XXGENPCV(XXGENPCVWM)
XXGENPCV(XXGENPCVDM)
#undef XXGENPCV
static bool do_xsmadd(DisasContext *ctx, int tgt, int src1, int src2, int src3,
void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
{
TCGv_ptr t, s1, s2, s3;
t = gen_vsr_ptr(tgt);
s1 = gen_vsr_ptr(src1);
s2 = gen_vsr_ptr(src2);
s3 = gen_vsr_ptr(src3);
gen_helper(cpu_env, t, s1, s2, s3);
tcg_temp_free_ptr(t);
tcg_temp_free_ptr(s1);
tcg_temp_free_ptr(s2);
tcg_temp_free_ptr(s3);
return true;
}
static bool do_xsmadd_XX3(DisasContext *ctx, arg_XX3 *a, bool type_a,
void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
{
REQUIRE_VSX(ctx);
if (type_a) {
return do_xsmadd(ctx, a->xt, a->xa, a->xt, a->xb, gen_helper);
}
return do_xsmadd(ctx, a->xt, a->xa, a->xb, a->xt, gen_helper);
}
TRANS_FLAGS2(VSX, XSMADDADP, do_xsmadd_XX3, true, gen_helper_XSMADDDP)
TRANS_FLAGS2(VSX, XSMADDMDP, do_xsmadd_XX3, false, gen_helper_XSMADDDP)
TRANS_FLAGS2(VSX, XSMSUBADP, do_xsmadd_XX3, true, gen_helper_XSMSUBDP)
TRANS_FLAGS2(VSX, XSMSUBMDP, do_xsmadd_XX3, false, gen_helper_XSMSUBDP)
TRANS_FLAGS2(VSX, XSNMADDADP, do_xsmadd_XX3, true, gen_helper_XSNMADDDP)
TRANS_FLAGS2(VSX, XSNMADDMDP, do_xsmadd_XX3, false, gen_helper_XSNMADDDP)
TRANS_FLAGS2(VSX, XSNMSUBADP, do_xsmadd_XX3, true, gen_helper_XSNMSUBDP)
TRANS_FLAGS2(VSX, XSNMSUBMDP, do_xsmadd_XX3, false, gen_helper_XSNMSUBDP)
TRANS_FLAGS2(VSX207, XSMADDASP, do_xsmadd_XX3, true, gen_helper_XSMADDSP)
TRANS_FLAGS2(VSX207, XSMADDMSP, do_xsmadd_XX3, false, gen_helper_XSMADDSP)
TRANS_FLAGS2(VSX207, XSMSUBASP, do_xsmadd_XX3, true, gen_helper_XSMSUBSP)
TRANS_FLAGS2(VSX207, XSMSUBMSP, do_xsmadd_XX3, false, gen_helper_XSMSUBSP)
TRANS_FLAGS2(VSX207, XSNMADDASP, do_xsmadd_XX3, true, gen_helper_XSNMADDSP)
TRANS_FLAGS2(VSX207, XSNMADDMSP, do_xsmadd_XX3, false, gen_helper_XSNMADDSP)
TRANS_FLAGS2(VSX207, XSNMSUBASP, do_xsmadd_XX3, true, gen_helper_XSNMSUBSP)
TRANS_FLAGS2(VSX207, XSNMSUBMSP, do_xsmadd_XX3, false, gen_helper_XSNMSUBSP)
static bool do_xsmadd_X(DisasContext *ctx, arg_X_rc *a,
void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr),
void (*gen_helper_ro)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
{
int vrt, vra, vrb;
REQUIRE_INSNS_FLAGS2(ctx, ISA300);
REQUIRE_VSX(ctx);
vrt = a->rt + 32;
vra = a->ra + 32;
vrb = a->rb + 32;
if (a->rc) {
return do_xsmadd(ctx, vrt, vra, vrt, vrb, gen_helper_ro);
}
return do_xsmadd(ctx, vrt, vra, vrt, vrb, gen_helper);
}
TRANS(XSMADDQP, do_xsmadd_X, gen_helper_XSMADDQP, gen_helper_XSMADDQPO)
TRANS(XSMSUBQP, do_xsmadd_X, gen_helper_XSMSUBQP, gen_helper_XSMSUBQPO)
TRANS(XSNMADDQP, do_xsmadd_X, gen_helper_XSNMADDQP, gen_helper_XSNMADDQPO)
TRANS(XSNMSUBQP, do_xsmadd_X, gen_helper_XSNMSUBQP, gen_helper_XSNMSUBQPO)
#define GEN_VSX_HELPER_VSX_MADD(name, op1, aop, mop, inval, type) \
static void gen_##name(DisasContext *ctx) \
@ -1233,14 +1350,6 @@ static void gen_##name(DisasContext *ctx) \
tcg_temp_free_ptr(c); \
}
GEN_VSX_HELPER_VSX_MADD(xsmadddp, 0x04, 0x04, 0x05, 0, PPC2_VSX)
GEN_VSX_HELPER_VSX_MADD(xsmsubdp, 0x04, 0x06, 0x07, 0, PPC2_VSX)
GEN_VSX_HELPER_VSX_MADD(xsnmadddp, 0x04, 0x14, 0x15, 0, PPC2_VSX)
GEN_VSX_HELPER_VSX_MADD(xsnmsubdp, 0x04, 0x16, 0x17, 0, PPC2_VSX)
GEN_VSX_HELPER_VSX_MADD(xsmaddsp, 0x04, 0x00, 0x01, 0, PPC2_VSX207)
GEN_VSX_HELPER_VSX_MADD(xsmsubsp, 0x04, 0x02, 0x03, 0, PPC2_VSX207)
GEN_VSX_HELPER_VSX_MADD(xsnmaddsp, 0x04, 0x10, 0x11, 0, PPC2_VSX207)
GEN_VSX_HELPER_VSX_MADD(xsnmsubsp, 0x04, 0x12, 0x13, 0, PPC2_VSX207)
GEN_VSX_HELPER_VSX_MADD(xvmadddp, 0x04, 0x0C, 0x0D, 0, PPC2_VSX)
GEN_VSX_HELPER_VSX_MADD(xvmsubdp, 0x04, 0x0E, 0x0F, 0, PPC2_VSX)
GEN_VSX_HELPER_VSX_MADD(xvnmadddp, 0x04, 0x1C, 0x1D, 0, PPC2_VSX)
@ -1422,22 +1531,18 @@ static void glue(gen_, name)(DisasContext *ctx) \
VSX_XXMRG(xxmrghw, 1)
VSX_XXMRG(xxmrglw, 0)
static void gen_xxsel(DisasContext *ctx)
static bool trans_XXSEL(DisasContext *ctx, arg_XX4 *a)
{
int rt = xT(ctx->opcode);
int ra = xA(ctx->opcode);
int rb = xB(ctx->opcode);
int rc = xC(ctx->opcode);
REQUIRE_INSNS_FLAGS2(ctx, VSX);
REQUIRE_VSX(ctx);
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
tcg_gen_gvec_bitsel(MO_64, vsr_full_offset(rt), vsr_full_offset(rc),
vsr_full_offset(rb), vsr_full_offset(ra), 16, 16);
tcg_gen_gvec_bitsel(MO_64, vsr_full_offset(a->xt), vsr_full_offset(a->xc),
vsr_full_offset(a->xb), vsr_full_offset(a->xa), 16, 16);
return true;
}
static bool trans_XXSPLTW(DisasContext *ctx, arg_XX2 *a)
static bool trans_XXSPLTW(DisasContext *ctx, arg_XX2_uim2 *a)
{
int tofs, bofs;
@ -1547,6 +1652,46 @@ static bool trans_LXVKQ(DisasContext *ctx, arg_X_uim5 *a)
return true;
}
static bool trans_XVTLSBB(DisasContext *ctx, arg_XX2_bf_xb *a)
{
TCGv_i64 xb, t0, t1, all_true, all_false, mask, zero;
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
REQUIRE_VSX(ctx);
xb = tcg_temp_new_i64();
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
all_true = tcg_temp_new_i64();
all_false = tcg_temp_new_i64();
mask = tcg_constant_i64(dup_const(MO_8, 1));
zero = tcg_constant_i64(0);
get_cpu_vsr(xb, a->xb, true);
tcg_gen_and_i64(t0, mask, xb);
get_cpu_vsr(xb, a->xb, false);
tcg_gen_and_i64(t1, mask, xb);
tcg_gen_or_i64(all_false, t0, t1);
tcg_gen_and_i64(all_true, t0, t1);
tcg_gen_setcond_i64(TCG_COND_EQ, all_false, all_false, zero);
tcg_gen_shli_i64(all_false, all_false, 1);
tcg_gen_setcond_i64(TCG_COND_EQ, all_true, all_true, mask);
tcg_gen_shli_i64(all_true, all_true, 3);
tcg_gen_or_i64(t0, all_false, all_true);
tcg_gen_extrl_i64_i32(cpu_crf[a->bf], t0);
tcg_temp_free_i64(xb);
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
tcg_temp_free_i64(all_true);
tcg_temp_free_i64(all_false);
return true;
}
static void gen_xxsldwi(DisasContext *ctx)
{
TCGv_i64 xth, xtl;
@ -2072,12 +2217,6 @@ static bool do_lstxv(DisasContext *ctx, int ra, TCGv displ,
static bool do_lstxv_D(DisasContext *ctx, arg_D *a, bool store, bool paired)
{
if (paired) {
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
} else {
REQUIRE_INSNS_FLAGS2(ctx, ISA300);
}
if (paired || a->rt >= 32) {
REQUIRE_VSX(ctx);
} else {
@ -2091,7 +2230,6 @@ static bool do_lstxv_PLS_D(DisasContext *ctx, arg_PLS_D *a,
bool store, bool paired)
{
arg_D d;
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
REQUIRE_VSX(ctx);
if (!resolve_PLS_D(ctx, &d, a)) {
@ -2103,12 +2241,6 @@ static bool do_lstxv_PLS_D(DisasContext *ctx, arg_PLS_D *a,
static bool do_lstxv_X(DisasContext *ctx, arg_X *a, bool store, bool paired)
{
if (paired) {
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
} else {
REQUIRE_INSNS_FLAGS2(ctx, ISA300);
}
if (paired || a->rt >= 32) {
REQUIRE_VSX(ctx);
} else {
@ -2118,18 +2250,373 @@ static bool do_lstxv_X(DisasContext *ctx, arg_X *a, bool store, bool paired)
return do_lstxv(ctx, a->ra, cpu_gpr[a->rb], a->rt, store, paired);
}
TRANS(STXV, do_lstxv_D, true, false)
TRANS(LXV, do_lstxv_D, false, false)
TRANS(STXVP, do_lstxv_D, true, true)
TRANS(LXVP, do_lstxv_D, false, true)
TRANS(STXVX, do_lstxv_X, true, false)
TRANS(LXVX, do_lstxv_X, false, false)
TRANS(STXVPX, do_lstxv_X, true, true)
TRANS(LXVPX, do_lstxv_X, false, true)
TRANS64(PSTXV, do_lstxv_PLS_D, true, false)
TRANS64(PLXV, do_lstxv_PLS_D, false, false)
TRANS64(PSTXVP, do_lstxv_PLS_D, true, true)
TRANS64(PLXVP, do_lstxv_PLS_D, false, true)
static bool do_lstxsd(DisasContext *ctx, int rt, int ra, TCGv displ, bool store)
{
TCGv ea;
TCGv_i64 xt;
MemOp mop;
if (store) {
REQUIRE_VECTOR(ctx);
} else {
REQUIRE_VSX(ctx);
}
xt = tcg_temp_new_i64();
mop = DEF_MEMOP(MO_UQ);
gen_set_access_type(ctx, ACCESS_INT);
ea = do_ea_calc(ctx, ra, displ);
if (store) {
get_cpu_vsr(xt, rt + 32, true);
tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
} else {
tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
set_cpu_vsr(rt + 32, xt, true);
set_cpu_vsr(rt + 32, tcg_constant_i64(0), false);
}
tcg_temp_free(ea);
tcg_temp_free_i64(xt);
return true;
}
static bool do_lstxsd_DS(DisasContext *ctx, arg_D *a, bool store)
{
return do_lstxsd(ctx, a->rt, a->ra, tcg_constant_tl(a->si), store);
}
static bool do_plstxsd_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool store)
{
arg_D d;
if (!resolve_PLS_D(ctx, &d, a)) {
return true;
}
return do_lstxsd(ctx, d.rt, d.ra, tcg_constant_tl(d.si), store);
}
static bool do_lstxssp(DisasContext *ctx, int rt, int ra, TCGv displ, bool store)
{
TCGv ea;
TCGv_i64 xt;
REQUIRE_VECTOR(ctx);
xt = tcg_temp_new_i64();
gen_set_access_type(ctx, ACCESS_INT);
ea = do_ea_calc(ctx, ra, displ);
if (store) {
get_cpu_vsr(xt, rt + 32, true);
gen_qemu_st32fs(ctx, xt, ea);
} else {
gen_qemu_ld32fs(ctx, xt, ea);
set_cpu_vsr(rt + 32, xt, true);
set_cpu_vsr(rt + 32, tcg_constant_i64(0), false);
}
tcg_temp_free(ea);
tcg_temp_free_i64(xt);
return true;
}
static bool do_lstxssp_DS(DisasContext *ctx, arg_D *a, bool store)
{
return do_lstxssp(ctx, a->rt, a->ra, tcg_constant_tl(a->si), store);
}
static bool do_plstxssp_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool store)
{
arg_D d;
if (!resolve_PLS_D(ctx, &d, a)) {
return true;
}
return do_lstxssp(ctx, d.rt, d.ra, tcg_constant_tl(d.si), store);
}
TRANS_FLAGS2(ISA300, LXSD, do_lstxsd_DS, false)
TRANS_FLAGS2(ISA300, STXSD, do_lstxsd_DS, true)
TRANS_FLAGS2(ISA300, LXSSP, do_lstxssp_DS, false)
TRANS_FLAGS2(ISA300, STXSSP, do_lstxssp_DS, true)
TRANS_FLAGS2(ISA300, STXV, do_lstxv_D, true, false)
TRANS_FLAGS2(ISA300, LXV, do_lstxv_D, false, false)
TRANS_FLAGS2(ISA310, STXVP, do_lstxv_D, true, true)
TRANS_FLAGS2(ISA310, LXVP, do_lstxv_D, false, true)
TRANS_FLAGS2(ISA300, STXVX, do_lstxv_X, true, false)
TRANS_FLAGS2(ISA300, LXVX, do_lstxv_X, false, false)
TRANS_FLAGS2(ISA310, STXVPX, do_lstxv_X, true, true)
TRANS_FLAGS2(ISA310, LXVPX, do_lstxv_X, false, true)
TRANS64_FLAGS2(ISA310, PLXSD, do_plstxsd_PLS_D, false)
TRANS64_FLAGS2(ISA310, PSTXSD, do_plstxsd_PLS_D, true)
TRANS64_FLAGS2(ISA310, PLXSSP, do_plstxssp_PLS_D, false)
TRANS64_FLAGS2(ISA310, PSTXSSP, do_plstxssp_PLS_D, true)
TRANS64_FLAGS2(ISA310, PSTXV, do_lstxv_PLS_D, true, false)
TRANS64_FLAGS2(ISA310, PLXV, do_lstxv_PLS_D, false, false)
TRANS64_FLAGS2(ISA310, PSTXVP, do_lstxv_PLS_D, true, true)
TRANS64_FLAGS2(ISA310, PLXVP, do_lstxv_PLS_D, false, true)
static bool do_lstrm(DisasContext *ctx, arg_X *a, MemOp mop, bool store)
{
TCGv ea;
TCGv_i64 xt;
REQUIRE_VSX(ctx);
xt = tcg_temp_new_i64();
gen_set_access_type(ctx, ACCESS_INT);
ea = do_ea_calc(ctx, a->ra , cpu_gpr[a->rb]);
if (store) {
get_cpu_vsr(xt, a->rt, false);
tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
} else {
tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
set_cpu_vsr(a->rt, xt, false);
set_cpu_vsr(a->rt, tcg_constant_i64(0), true);
}
tcg_temp_free(ea);
tcg_temp_free_i64(xt);
return true;
}
TRANS_FLAGS2(ISA310, LXVRBX, do_lstrm, DEF_MEMOP(MO_UB), false)
TRANS_FLAGS2(ISA310, LXVRHX, do_lstrm, DEF_MEMOP(MO_UW), false)
TRANS_FLAGS2(ISA310, LXVRWX, do_lstrm, DEF_MEMOP(MO_UL), false)
TRANS_FLAGS2(ISA310, LXVRDX, do_lstrm, DEF_MEMOP(MO_UQ), false)
TRANS_FLAGS2(ISA310, STXVRBX, do_lstrm, DEF_MEMOP(MO_UB), true)
TRANS_FLAGS2(ISA310, STXVRHX, do_lstrm, DEF_MEMOP(MO_UW), true)
TRANS_FLAGS2(ISA310, STXVRWX, do_lstrm, DEF_MEMOP(MO_UL), true)
TRANS_FLAGS2(ISA310, STXVRDX, do_lstrm, DEF_MEMOP(MO_UQ), true)
static void gen_xxeval_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, TCGv_i64 c,
int64_t imm)
{
/*
* Instead of processing imm bit-by-bit, we'll skip the computation of
* conjunctions whose corresponding bit is unset.
*/
int bit;
TCGv_i64 conj, disj;
conj = tcg_temp_new_i64();
disj = tcg_const_i64(0);
/* Iterate over set bits from the least to the most significant bit */
while (imm) {
/*
* Get the next bit to be processed with ctz64. Invert the result of
* ctz64 to match the indexing used by PowerISA.
*/
bit = 7 - ctz64(imm);
if (bit & 0x4) {
tcg_gen_mov_i64(conj, a);
} else {
tcg_gen_not_i64(conj, a);
}
if (bit & 0x2) {
tcg_gen_and_i64(conj, conj, b);
} else {
tcg_gen_andc_i64(conj, conj, b);
}
if (bit & 0x1) {
tcg_gen_and_i64(conj, conj, c);
} else {
tcg_gen_andc_i64(conj, conj, c);
}
tcg_gen_or_i64(disj, disj, conj);
/* Unset the least significant bit that is set */
imm &= imm - 1;
}
tcg_gen_mov_i64(t, disj);
tcg_temp_free_i64(conj);
tcg_temp_free_i64(disj);
}
static void gen_xxeval_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
TCGv_vec c, int64_t imm)
{
/*
* Instead of processing imm bit-by-bit, we'll skip the computation of
* conjunctions whose corresponding bit is unset.
*/
int bit;
TCGv_vec disj, conj;
disj = tcg_const_zeros_vec_matching(t);
conj = tcg_temp_new_vec_matching(t);
/* Iterate over set bits from the least to the most significant bit */
while (imm) {
/*
* Get the next bit to be processed with ctz64. Invert the result of
* ctz64 to match the indexing used by PowerISA.
*/
bit = 7 - ctz64(imm);
if (bit & 0x4) {
tcg_gen_mov_vec(conj, a);
} else {
tcg_gen_not_vec(vece, conj, a);
}
if (bit & 0x2) {
tcg_gen_and_vec(vece, conj, conj, b);
} else {
tcg_gen_andc_vec(vece, conj, conj, b);
}
if (bit & 0x1) {
tcg_gen_and_vec(vece, conj, conj, c);
} else {
tcg_gen_andc_vec(vece, conj, conj, c);
}
tcg_gen_or_vec(vece, disj, disj, conj);
/* Unset the least significant bit that is set */
imm &= imm - 1;
}
tcg_gen_mov_vec(t, disj);
tcg_temp_free_vec(disj);
tcg_temp_free_vec(conj);
}
static bool trans_XXEVAL(DisasContext *ctx, arg_8RR_XX4_imm *a)
{
static const TCGOpcode vecop_list[] = {
INDEX_op_andc_vec, 0
};
static const GVecGen4i op = {
.fniv = gen_xxeval_vec,
.fno = gen_helper_XXEVAL,
.fni8 = gen_xxeval_i64,
.opt_opc = vecop_list,
.vece = MO_64
};
int xt = vsr_full_offset(a->xt), xa = vsr_full_offset(a->xa),
xb = vsr_full_offset(a->xb), xc = vsr_full_offset(a->xc);
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
REQUIRE_VSX(ctx);
/* Equivalent functions that can be implemented with a single gen_gvec */
switch (a->imm) {
case 0b00000000: /* true */
set_cpu_vsr(a->xt, tcg_constant_i64(0), true);
set_cpu_vsr(a->xt, tcg_constant_i64(0), false);
break;
case 0b00000011: /* and(B,A) */
tcg_gen_gvec_and(MO_64, xt, xb, xa, 16, 16);
break;
case 0b00000101: /* and(C,A) */
tcg_gen_gvec_and(MO_64, xt, xc, xa, 16, 16);
break;
case 0b00001111: /* A */
tcg_gen_gvec_mov(MO_64, xt, xa, 16, 16);
break;
case 0b00010001: /* and(C,B) */
tcg_gen_gvec_and(MO_64, xt, xc, xb, 16, 16);
break;
case 0b00011011: /* C?B:A */
tcg_gen_gvec_bitsel(MO_64, xt, xc, xb, xa, 16, 16);
break;
case 0b00011101: /* B?C:A */
tcg_gen_gvec_bitsel(MO_64, xt, xb, xc, xa, 16, 16);
break;
case 0b00100111: /* C?A:B */
tcg_gen_gvec_bitsel(MO_64, xt, xc, xa, xb, 16, 16);
break;
case 0b00110011: /* B */
tcg_gen_gvec_mov(MO_64, xt, xb, 16, 16);
break;
case 0b00110101: /* A?C:B */
tcg_gen_gvec_bitsel(MO_64, xt, xa, xc, xb, 16, 16);
break;
case 0b00111100: /* xor(B,A) */
tcg_gen_gvec_xor(MO_64, xt, xb, xa, 16, 16);
break;
case 0b00111111: /* or(B,A) */
tcg_gen_gvec_or(MO_64, xt, xb, xa, 16, 16);
break;
case 0b01000111: /* B?A:C */
tcg_gen_gvec_bitsel(MO_64, xt, xb, xa, xc, 16, 16);
break;
case 0b01010011: /* A?B:C */
tcg_gen_gvec_bitsel(MO_64, xt, xa, xb, xc, 16, 16);
break;
case 0b01010101: /* C */
tcg_gen_gvec_mov(MO_64, xt, xc, 16, 16);
break;
case 0b01011010: /* xor(C,A) */
tcg_gen_gvec_xor(MO_64, xt, xc, xa, 16, 16);
break;
case 0b01011111: /* or(C,A) */
tcg_gen_gvec_or(MO_64, xt, xc, xa, 16, 16);
break;
case 0b01100110: /* xor(C,B) */
tcg_gen_gvec_xor(MO_64, xt, xc, xb, 16, 16);
break;
case 0b01110111: /* or(C,B) */
tcg_gen_gvec_or(MO_64, xt, xc, xb, 16, 16);
break;
case 0b10001000: /* nor(C,B) */
tcg_gen_gvec_nor(MO_64, xt, xc, xb, 16, 16);
break;
case 0b10011001: /* eqv(C,B) */
tcg_gen_gvec_eqv(MO_64, xt, xc, xb, 16, 16);
break;
case 0b10100000: /* nor(C,A) */
tcg_gen_gvec_nor(MO_64, xt, xc, xa, 16, 16);
break;
case 0b10100101: /* eqv(C,A) */
tcg_gen_gvec_eqv(MO_64, xt, xc, xa, 16, 16);
break;
case 0b10101010: /* not(C) */
tcg_gen_gvec_not(MO_64, xt, xc, 16, 16);
break;
case 0b11000000: /* nor(B,A) */
tcg_gen_gvec_nor(MO_64, xt, xb, xa, 16, 16);
break;
case 0b11000011: /* eqv(B,A) */
tcg_gen_gvec_eqv(MO_64, xt, xb, xa, 16, 16);
break;
case 0b11001100: /* not(B) */
tcg_gen_gvec_not(MO_64, xt, xb, 16, 16);
break;
case 0b11101110: /* nand(C,B) */
tcg_gen_gvec_nand(MO_64, xt, xc, xb, 16, 16);
break;
case 0b11110000: /* not(A) */
tcg_gen_gvec_not(MO_64, xt, xa, 16, 16);
break;
case 0b11111010: /* nand(C,A) */
tcg_gen_gvec_nand(MO_64, xt, xc, xa, 16, 16);
break;
case 0b11111100: /* nand(B,A) */
tcg_gen_gvec_nand(MO_64, xt, xb, xa, 16, 16);
break;
case 0b11111111: /* true */
set_cpu_vsr(a->xt, tcg_constant_i64(-1), true);
set_cpu_vsr(a->xt, tcg_constant_i64(-1), false);
break;
default:
/* Fallback to compute all conjunctions/disjunctions */
tcg_gen_gvec_4i(xt, xa, xb, xc, 16, 16, a->imm, &op);
}
return true;
}
static void gen_xxblendv_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
TCGv_vec c)
@ -2140,7 +2627,7 @@ static void gen_xxblendv_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
tcg_temp_free_vec(tmp);
}
static bool do_xxblendv(DisasContext *ctx, arg_XX4 *a, unsigned vece)
static bool do_xxblendv(DisasContext *ctx, arg_8RR_XX4 *a, unsigned vece)
{
static const TCGOpcode vecop_list[] = {
INDEX_op_sari_vec, 0
@ -2186,8 +2673,8 @@ TRANS(XXBLENDVH, do_xxblendv, MO_16)
TRANS(XXBLENDVW, do_xxblendv, MO_32)
TRANS(XXBLENDVD, do_xxblendv, MO_64)
static bool do_xsmaxmincjdp(DisasContext *ctx, arg_XX3 *a,
void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
static bool do_helper_XX3(DisasContext *ctx, arg_XX3 *a,
void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
{
TCGv_ptr xt, xa, xb;
@ -2207,10 +2694,75 @@ static bool do_xsmaxmincjdp(DisasContext *ctx, arg_XX3 *a,
return true;
}
TRANS(XSMAXCDP, do_xsmaxmincjdp, gen_helper_xsmaxcdp)
TRANS(XSMINCDP, do_xsmaxmincjdp, gen_helper_xsmincdp)
TRANS(XSMAXJDP, do_xsmaxmincjdp, gen_helper_xsmaxjdp)
TRANS(XSMINJDP, do_xsmaxmincjdp, gen_helper_xsminjdp)
TRANS(XSCMPEQDP, do_helper_XX3, gen_helper_XSCMPEQDP)
TRANS(XSCMPGEDP, do_helper_XX3, gen_helper_XSCMPGEDP)
TRANS(XSCMPGTDP, do_helper_XX3, gen_helper_XSCMPGTDP)
TRANS(XSMAXCDP, do_helper_XX3, gen_helper_XSMAXCDP)
TRANS(XSMINCDP, do_helper_XX3, gen_helper_XSMINCDP)
TRANS(XSMAXJDP, do_helper_XX3, gen_helper_XSMAXJDP)
TRANS(XSMINJDP, do_helper_XX3, gen_helper_XSMINJDP)
static bool do_helper_X(arg_X *a,
void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
{
TCGv_ptr rt, ra, rb;
rt = gen_avr_ptr(a->rt);
ra = gen_avr_ptr(a->ra);
rb = gen_avr_ptr(a->rb);
helper(cpu_env, rt, ra, rb);
tcg_temp_free_ptr(rt);
tcg_temp_free_ptr(ra);
tcg_temp_free_ptr(rb);
return true;
}
static bool do_xscmpqp(DisasContext *ctx, arg_X *a,
void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
{
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
REQUIRE_VSX(ctx);
return do_helper_X(a, helper);
}
TRANS(XSCMPEQQP, do_xscmpqp, gen_helper_XSCMPEQQP)
TRANS(XSCMPGEQP, do_xscmpqp, gen_helper_XSCMPGEQP)
TRANS(XSCMPGTQP, do_xscmpqp, gen_helper_XSCMPGTQP)
TRANS(XSMAXCQP, do_xscmpqp, gen_helper_XSMAXCQP)
TRANS(XSMINCQP, do_xscmpqp, gen_helper_XSMINCQP)
static bool trans_XVCVSPBF16(DisasContext *ctx, arg_XX2 *a)
{
TCGv_ptr xt, xb;
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
REQUIRE_VSX(ctx);
xt = gen_vsr_ptr(a->xt);
xb = gen_vsr_ptr(a->xb);
gen_helper_XVCVSPBF16(cpu_env, xt, xb);
tcg_temp_free_ptr(xt);
tcg_temp_free_ptr(xb);
return true;
}
static bool trans_XVCVBF16SPN(DisasContext *ctx, arg_XX2 *a)
{
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
REQUIRE_VSX(ctx);
tcg_gen_gvec_shli(MO_32, vsr_full_offset(a->xt), vsr_full_offset(a->xb),
16, 16, 16);
return true;
}
#undef GEN_XX2FORM
#undef GEN_XX3FORM

View File

@ -186,18 +186,6 @@ GEN_XX2FORM(xssqrtdp, 0x16, 0x04, PPC2_VSX),
GEN_XX2FORM(xsrsqrtedp, 0x14, 0x04, PPC2_VSX),
GEN_XX3FORM(xstdivdp, 0x14, 0x07, PPC2_VSX),
GEN_XX2FORM(xstsqrtdp, 0x14, 0x06, PPC2_VSX),
GEN_XX3FORM_NAME(xsmadddp, "xsmaddadp", 0x04, 0x04, PPC2_VSX),
GEN_XX3FORM_NAME(xsmadddp, "xsmaddmdp", 0x04, 0x05, PPC2_VSX),
GEN_XX3FORM_NAME(xsmsubdp, "xsmsubadp", 0x04, 0x06, PPC2_VSX),
GEN_XX3FORM_NAME(xsmsubdp, "xsmsubmdp", 0x04, 0x07, PPC2_VSX),
GEN_XX3FORM_NAME(xsnmadddp, "xsnmaddadp", 0x04, 0x14, PPC2_VSX),
GEN_XX3FORM_NAME(xsnmadddp, "xsnmaddmdp", 0x04, 0x15, PPC2_VSX),
GEN_XX3FORM_NAME(xsnmsubdp, "xsnmsubadp", 0x04, 0x16, PPC2_VSX),
GEN_XX3FORM_NAME(xsnmsubdp, "xsnmsubmdp", 0x04, 0x17, PPC2_VSX),
GEN_XX3FORM(xscmpeqdp, 0x0C, 0x00, PPC2_ISA300),
GEN_XX3FORM(xscmpgtdp, 0x0C, 0x01, PPC2_ISA300),
GEN_XX3FORM(xscmpgedp, 0x0C, 0x02, PPC2_ISA300),
GEN_XX3FORM(xscmpnedp, 0x0C, 0x03, PPC2_ISA300),
GEN_XX3FORM(xscmpexpdp, 0x0C, 0x07, PPC2_ISA300),
GEN_VSX_XFORM_300(xscmpexpqp, 0x04, 0x05, 0x00600001),
GEN_XX2IFORM(xscmpodp, 0x0C, 0x05, PPC2_VSX),
@ -235,14 +223,6 @@ GEN_XX2FORM(xsresp, 0x14, 0x01, PPC2_VSX207),
GEN_XX2FORM(xsrsp, 0x12, 0x11, PPC2_VSX207),
GEN_XX2FORM(xssqrtsp, 0x16, 0x00, PPC2_VSX207),
GEN_XX2FORM(xsrsqrtesp, 0x14, 0x00, PPC2_VSX207),
GEN_XX3FORM_NAME(xsmaddsp, "xsmaddasp", 0x04, 0x00, PPC2_VSX207),
GEN_XX3FORM_NAME(xsmaddsp, "xsmaddmsp", 0x04, 0x01, PPC2_VSX207),
GEN_XX3FORM_NAME(xsmsubsp, "xsmsubasp", 0x04, 0x02, PPC2_VSX207),
GEN_XX3FORM_NAME(xsmsubsp, "xsmsubmsp", 0x04, 0x03, PPC2_VSX207),
GEN_XX3FORM_NAME(xsnmaddsp, "xsnmaddasp", 0x04, 0x10, PPC2_VSX207),
GEN_XX3FORM_NAME(xsnmaddsp, "xsnmaddmsp", 0x04, 0x11, PPC2_VSX207),
GEN_XX3FORM_NAME(xsnmsubsp, "xsnmsubasp", 0x04, 0x12, PPC2_VSX207),
GEN_XX3FORM_NAME(xsnmsubsp, "xsnmsubmsp", 0x04, 0x13, PPC2_VSX207),
GEN_XX2FORM(xscvsxdsp, 0x10, 0x13, PPC2_VSX207),
GEN_XX2FORM(xscvuxdsp, 0x10, 0x12, PPC2_VSX207),
@ -341,53 +321,6 @@ VSX_LOGICAL(xxlnand, 0x8, 0x16, PPC2_VSX207),
VSX_LOGICAL(xxlorc, 0x8, 0x15, PPC2_VSX207),
GEN_XX3FORM(xxmrghw, 0x08, 0x02, PPC2_VSX),
GEN_XX3FORM(xxmrglw, 0x08, 0x06, PPC2_VSX),
GEN_XX3FORM(xxperm, 0x08, 0x03, PPC2_ISA300),
GEN_XX3FORM(xxpermr, 0x08, 0x07, PPC2_ISA300),
GEN_XX3FORM_DM(xxsldwi, 0x08, 0x00),
GEN_XX2FORM_EXT(xxextractuw, 0x0A, 0x0A, PPC2_ISA300),
GEN_XX2FORM_EXT(xxinsertw, 0x0A, 0x0B, PPC2_ISA300),
#define GEN_XXSEL_ROW(opc3) \
GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x18, opc3, 0, PPC_NONE, PPC2_VSX), \
GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x19, opc3, 0, PPC_NONE, PPC2_VSX), \
GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1A, opc3, 0, PPC_NONE, PPC2_VSX), \
GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1B, opc3, 0, PPC_NONE, PPC2_VSX), \
GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1C, opc3, 0, PPC_NONE, PPC2_VSX), \
GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1D, opc3, 0, PPC_NONE, PPC2_VSX), \
GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1E, opc3, 0, PPC_NONE, PPC2_VSX), \
GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1F, opc3, 0, PPC_NONE, PPC2_VSX), \
GEN_XXSEL_ROW(0x00)
GEN_XXSEL_ROW(0x01)
GEN_XXSEL_ROW(0x02)
GEN_XXSEL_ROW(0x03)
GEN_XXSEL_ROW(0x04)
GEN_XXSEL_ROW(0x05)
GEN_XXSEL_ROW(0x06)
GEN_XXSEL_ROW(0x07)
GEN_XXSEL_ROW(0x08)
GEN_XXSEL_ROW(0x09)
GEN_XXSEL_ROW(0x0A)
GEN_XXSEL_ROW(0x0B)
GEN_XXSEL_ROW(0x0C)
GEN_XXSEL_ROW(0x0D)
GEN_XXSEL_ROW(0x0E)
GEN_XXSEL_ROW(0x0F)
GEN_XXSEL_ROW(0x10)
GEN_XXSEL_ROW(0x11)
GEN_XXSEL_ROW(0x12)
GEN_XXSEL_ROW(0x13)
GEN_XXSEL_ROW(0x14)
GEN_XXSEL_ROW(0x15)
GEN_XXSEL_ROW(0x16)
GEN_XXSEL_ROW(0x17)
GEN_XXSEL_ROW(0x18)
GEN_XXSEL_ROW(0x19)
GEN_XXSEL_ROW(0x1A)
GEN_XXSEL_ROW(0x1B)
GEN_XXSEL_ROW(0x1C)
GEN_XXSEL_ROW(0x1D)
GEN_XXSEL_ROW(0x1E)
GEN_XXSEL_ROW(0x1F)
GEN_XX3FORM_DM(xxpermdi, 0x08, 0x01),

View File

@ -3987,3 +3987,9 @@ void tcg_register_jit(const void *buf, size_t buf_size)
tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
}
#endif /* __ELF__ */
#undef VMULEUB
#undef VMULEUH
#undef VMULEUW
#undef VMULOUB
#undef VMULOUH
#undef VMULOUW

View File

@ -836,6 +836,30 @@ static void expand_4_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
tcg_temp_free_i32(t0);
}
static void expand_4i_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
uint32_t cofs, uint32_t oprsz, int32_t c,
void (*fni)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32,
int32_t))
{
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t1 = tcg_temp_new_i32();
TCGv_i32 t2 = tcg_temp_new_i32();
TCGv_i32 t3 = tcg_temp_new_i32();
uint32_t i;
for (i = 0; i < oprsz; i += 4) {
tcg_gen_ld_i32(t1, cpu_env, aofs + i);
tcg_gen_ld_i32(t2, cpu_env, bofs + i);
tcg_gen_ld_i32(t3, cpu_env, cofs + i);
fni(t0, t1, t2, t3, c);
tcg_gen_st_i32(t0, cpu_env, dofs + i);
}
tcg_temp_free_i32(t3);
tcg_temp_free_i32(t2);
tcg_temp_free_i32(t1);
tcg_temp_free_i32(t0);
}
/* Expand OPSZ bytes worth of two-operand operations using i64 elements. */
static void expand_2_i64(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
bool load_dest, void (*fni)(TCGv_i64, TCGv_i64))
@ -971,6 +995,30 @@ static void expand_4_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs,
tcg_temp_free_i64(t0);
}
static void expand_4i_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs,
uint32_t cofs, uint32_t oprsz, int64_t c,
void (*fni)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64,
int64_t))
{
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
uint32_t i;
for (i = 0; i < oprsz; i += 8) {
tcg_gen_ld_i64(t1, cpu_env, aofs + i);
tcg_gen_ld_i64(t2, cpu_env, bofs + i);
tcg_gen_ld_i64(t3, cpu_env, cofs + i);
fni(t0, t1, t2, t3, c);
tcg_gen_st_i64(t0, cpu_env, dofs + i);
}
tcg_temp_free_i64(t3);
tcg_temp_free_i64(t2);
tcg_temp_free_i64(t1);
tcg_temp_free_i64(t0);
}
/* Expand OPSZ bytes worth of two-operand operations using host vectors. */
static void expand_2_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t oprsz, uint32_t tysz, TCGType type,
@ -1121,6 +1169,35 @@ static void expand_4_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
tcg_temp_free_vec(t0);
}
/*
* Expand OPSZ bytes worth of four-vector operands and an immediate operand
* using host vectors.
*/
static void expand_4i_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t bofs, uint32_t cofs, uint32_t oprsz,
uint32_t tysz, TCGType type, int64_t c,
void (*fni)(unsigned, TCGv_vec, TCGv_vec,
TCGv_vec, TCGv_vec, int64_t))
{
TCGv_vec t0 = tcg_temp_new_vec(type);
TCGv_vec t1 = tcg_temp_new_vec(type);
TCGv_vec t2 = tcg_temp_new_vec(type);
TCGv_vec t3 = tcg_temp_new_vec(type);
uint32_t i;
for (i = 0; i < oprsz; i += tysz) {
tcg_gen_ld_vec(t1, cpu_env, aofs + i);
tcg_gen_ld_vec(t2, cpu_env, bofs + i);
tcg_gen_ld_vec(t3, cpu_env, cofs + i);
fni(vece, t0, t1, t2, t3, c);
tcg_gen_st_vec(t0, cpu_env, dofs + i);
}
tcg_temp_free_vec(t3);
tcg_temp_free_vec(t2);
tcg_temp_free_vec(t1);
tcg_temp_free_vec(t0);
}
/* Expand a vector two-operand operation. */
void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
uint32_t oprsz, uint32_t maxsz, const GVecGen2 *g)
@ -1533,6 +1610,75 @@ void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
}
}
/* Expand a vector four-operand operation. */
void tcg_gen_gvec_4i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
uint32_t oprsz, uint32_t maxsz, int64_t c,
const GVecGen4i *g)
{
const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty;
const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list);
TCGType type;
uint32_t some;
check_size_align(oprsz, maxsz, dofs | aofs | bofs | cofs);
check_overlap_4(dofs, aofs, bofs, cofs, maxsz);
type = 0;
if (g->fniv) {
type = choose_vector_type(g->opt_opc, g->vece, oprsz, g->prefer_i64);
}
switch (type) {
case TCG_TYPE_V256:
/*
* Recall that ARM SVE allows vector sizes that are not a
* power of 2, but always a multiple of 16. The intent is
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
*/
some = QEMU_ALIGN_DOWN(oprsz, 32);
expand_4i_vec(g->vece, dofs, aofs, bofs, cofs, some,
32, TCG_TYPE_V256, c, g->fniv);
if (some == oprsz) {
break;
}
dofs += some;
aofs += some;
bofs += some;
cofs += some;
oprsz -= some;
maxsz -= some;
/* fallthru */
case TCG_TYPE_V128:
expand_4i_vec(g->vece, dofs, aofs, bofs, cofs, oprsz,
16, TCG_TYPE_V128, c, g->fniv);
break;
case TCG_TYPE_V64:
expand_4i_vec(g->vece, dofs, aofs, bofs, cofs, oprsz,
8, TCG_TYPE_V64, c, g->fniv);
break;
case 0:
if (g->fni8 && check_size_impl(oprsz, 8)) {
expand_4i_i64(dofs, aofs, bofs, cofs, oprsz, c, g->fni8);
} else if (g->fni4 && check_size_impl(oprsz, 4)) {
expand_4i_i32(dofs, aofs, bofs, cofs, oprsz, c, g->fni4);
} else {
assert(g->fno != NULL);
tcg_gen_gvec_4_ool(dofs, aofs, bofs, cofs,
oprsz, maxsz, c, g->fno);
oprsz = maxsz;
}
break;
default:
g_assert_not_reached();
}
tcg_swap_vecop_list(hold_list);
if (oprsz < maxsz) {
expand_clr(dofs + oprsz, maxsz - oprsz);
}
}
/*
* Expand specific vector operations.
*/