ppc patch queue 2019-04-26

Here's the first ppc target pull request for qemu-4.1.  This has a
 number of things that have accumulated while qemu-4.0 was frozen.
 
  * A number of emulated MMU improvements from Ben Herrenschmidt
 
  * Assorted cleanups fro Greg Kurz
 
  * A large set of mostly mechanical cleanups from me to make target/ppc
    much closer to compliant with the modern coding style
 
  * Support for passthrough of NVIDIA GPUs using NVLink2
 
 As well as some other assorted fixes.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEdfRlhq5hpmzETofcbDjKyiDZs5IFAlzCnusACgkQbDjKyiDZ
 s5LfhhAAuem5UBGKPKPj33c87HC+GGG+S4y89ic3ebyKplWulGgouHCa4Dnc7Y5m
 9MfIEcljRDpuRJCEONo6yg9aaRb3cW2Go9TpTwxmF8o1suG/v5bIQIdiRbBuMa2t
 yhNujVg5kkWSU1G4mCZjL9FS2ADPsxsKZVd73DPEqjlNJg981+2qtSnfR8SXhfnk
 dSSKxyfC6Hq1+uhGkLI+xtft+BCTWOstjz+efHpZ5l2mbiaMeh7zMKrIXXy/FtKA
 ufIyxbZznMS5MAZk7t90YldznfwOCqfh3di1kx8GTZ40LkBKbuI5LLHTG0sT75z5
 LHwFuLkBgWmS8RyIRRh9opr7ifrayHx8bQFpW368Qu+PbPzUCcTVIrWUfPmaNR74
 CkYJvhiYZfTwKtUeP7b2wUkHpZF4KINI4TKNaS4QAlm3DNbO67DFYkBrytpXsSzv
 smEpe+sqlbY40olw9q4ESP80r+kGdEPLkRjfdj0R7qS4fsqAH1bjuSkNqlPaCTJQ
 hNsoz2D+f56z0bBq4x8FRzDpqnBkdy4x6PlLxkJuAaV7WAtvq7n7tiMA3TRr/rIB
 OYFP2xPNajjP8MfyOB94+S4WDltmsgXoM7HyyvrKp2JBpe7mFjpep5fMp5GUpweV
 OOYrTsN1Nuu3kFpeimEc+IOyp1BWXnJF4vHhKTOqHeqZEs5Fgus=
 =RpAK
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-4.1-20190426' into staging

ppc patch queue 2019-04-26

Here's the first ppc target pull request for qemu-4.1.  This has a
number of things that have accumulated while qemu-4.0 was frozen.

 * A number of emulated MMU improvements from Ben Herrenschmidt

 * Assorted cleanups fro Greg Kurz

 * A large set of mostly mechanical cleanups from me to make target/ppc
   much closer to compliant with the modern coding style

 * Support for passthrough of NVIDIA GPUs using NVLink2

As well as some other assorted fixes.

# gpg: Signature made Fri 26 Apr 2019 07:02:19 BST
# gpg:                using RSA key 75F46586AE61A66CC44E87DC6C38CACA20D9B392
# gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" [full]
# gpg:                 aka "David Gibson (Red Hat) <dgibson@redhat.com>" [full]
# gpg:                 aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" [full]
# gpg:                 aka "David Gibson (kernel.org) <dwg@kernel.org>" [unknown]
# Primary key fingerprint: 75F4 6586 AE61 A66C C44E  87DC 6C38 CACA 20D9 B392

* remotes/dgibson/tags/ppc-for-4.1-20190426: (36 commits)
  target/ppc: improve performance of large BAT invalidations
  ppc/hash32: Rework R and C bit updates
  ppc/hash64: Rework R and C bit updates
  ppc/spapr: Use proper HPTE accessors for H_READ
  target/ppc: Don't check UPRT in radix mode when in HV real mode
  target/ppc/kvm: Convert DPRINTF to traces
  target/ppc/trace-events: Fix trivial typo
  spapr: Drop duplicate PCI swizzle code
  spapr_pci: Get rid of duplicate code for node name creation
  target/ppc: Style fixes for translate/spe-impl.inc.c
  target/ppc: Style fixes for translate/vmx-impl.inc.c
  target/ppc: Style fixes for translate/vsx-impl.inc.c
  target/ppc: Style fixes for translate/fp-impl.inc.c
  target/ppc: Style fixes for translate.c
  target/ppc: Style fixes for translate_init.inc.c
  target/ppc: Style fixes for monitor.c
  target/ppc: Style fixes for mmu_helper.c
  target/ppc: Style fixes for mmu-hash64.[ch]
  target/ppc: Style fixes for mmu-hash32.[ch]
  target/ppc: Style fixes for misc_helper.c
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2019-04-27 21:34:46 +01:00
commit 9ec34ecc97
45 changed files with 2152 additions and 973 deletions

View File

@ -1556,7 +1556,7 @@ void pci_device_set_intx_routing_notifier(PCIDevice *dev,
*/
int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin)
{
return (pin + PCI_SLOT(pci_dev->devfn)) % PCI_NUM_PINS;
return pci_swizzle(PCI_SLOT(pci_dev->devfn), pin);
}
/***********************************************************/

View File

@ -9,7 +9,7 @@ obj-$(CONFIG_SPAPR_RNG) += spapr_rng.o
# IBM PowerNV
obj-$(CONFIG_POWERNV) += pnv.o pnv_xscom.o pnv_core.o pnv_lpc.o pnv_psi.o pnv_occ.o pnv_bmc.o
ifeq ($(CONFIG_PCI)$(CONFIG_PSERIES)$(CONFIG_LINUX), yyy)
obj-y += spapr_pci_vfio.o
obj-y += spapr_pci_vfio.o spapr_pci_nvlink2.o
endif
obj-$(CONFIG_PSERIES) += spapr_rtas_ddw.o
# PowerPC 4xx boards

View File

@ -40,7 +40,6 @@
#include "hw/ide.h"
#include "hw/loader.h"
#include "hw/timer/mc146818rtc.h"
#include "hw/input/i8042.h"
#include "hw/isa/pc87312.h"
#include "hw/net/ne2000-isa.h"
#include "sysemu/arch_init.h"

View File

@ -1034,12 +1034,13 @@ static void spapr_dt_rtas(SpaprMachineState *spapr, void *fdt)
0, cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE),
cpu_to_be32(max_cpus / smp_threads),
};
uint32_t maxdomain = cpu_to_be32(spapr->gpu_numa_id > 1 ? 1 : 0);
uint32_t maxdomains[] = {
cpu_to_be32(4),
cpu_to_be32(0),
cpu_to_be32(0),
cpu_to_be32(0),
cpu_to_be32(nb_numa_nodes ? nb_numa_nodes : 1),
maxdomain,
maxdomain,
maxdomain,
cpu_to_be32(spapr->gpu_numa_id),
};
_FDT(rtas = fdt_add_subnode(fdt, 0, "rtas"));
@ -1519,10 +1520,10 @@ static void spapr_unmap_hptes(PPCVirtualHypervisor *vhyp,
/* Nothing to do for qemu managed HPT */
}
static void spapr_store_hpte(PPCVirtualHypervisor *vhyp, hwaddr ptex,
uint64_t pte0, uint64_t pte1)
void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
uint64_t pte0, uint64_t pte1)
{
SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
SpaprMachineState *spapr = SPAPR_MACHINE(cpu->vhyp);
hwaddr offset = ptex * HASH_PTE_SIZE_64;
if (!spapr->htab) {
@ -1550,6 +1551,38 @@ static void spapr_store_hpte(PPCVirtualHypervisor *vhyp, hwaddr ptex,
}
}
static void spapr_hpte_set_c(PPCVirtualHypervisor *vhyp, hwaddr ptex,
uint64_t pte1)
{
hwaddr offset = ptex * HASH_PTE_SIZE_64 + 15;
SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
if (!spapr->htab) {
/* There should always be a hash table when this is called */
error_report("spapr_hpte_set_c called with no hash table !");
return;
}
/* The HW performs a non-atomic byte update */
stb_p(spapr->htab + offset, (pte1 & 0xff) | 0x80);
}
static void spapr_hpte_set_r(PPCVirtualHypervisor *vhyp, hwaddr ptex,
uint64_t pte1)
{
hwaddr offset = ptex * HASH_PTE_SIZE_64 + 14;
SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
if (!spapr->htab) {
/* There should always be a hash table when this is called */
error_report("spapr_hpte_set_r called with no hash table !");
return;
}
/* The HW performs a non-atomic byte update */
stb_p(spapr->htab + offset, ((pte1 >> 8) & 0xff) | 0x01);
}
int spapr_hpt_shift_for_ramsize(uint64_t ramsize)
{
int shift;
@ -1698,6 +1731,16 @@ static void spapr_machine_reset(void)
spapr_irq_msi_reset(spapr);
}
/*
* NVLink2-connected GPU RAM needs to be placed on a separate NUMA node.
* We assign a new numa ID per GPU in spapr_pci_collect_nvgpu() which is
* called from vPHB reset handler so we initialize the counter here.
* If no NUMA is configured from the QEMU side, we start from 1 as GPU RAM
* must be equally distant from any other node.
* The final value of spapr->gpu_numa_id is going to be written to
* max-associativity-domains in spapr_build_fdt().
*/
spapr->gpu_numa_id = MAX(1, nb_numa_nodes);
qemu_devices_reset();
/*
@ -3907,7 +3950,9 @@ static void spapr_phb_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
smc->phb_placement(spapr, sphb->index,
&sphb->buid, &sphb->io_win_addr,
&sphb->mem_win_addr, &sphb->mem64_win_addr,
windows_supported, sphb->dma_liobn, errp);
windows_supported, sphb->dma_liobn,
&sphb->nv2_gpa_win_addr, &sphb->nv2_atsd_win_addr,
errp);
}
static void spapr_phb_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
@ -4108,7 +4153,8 @@ static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine)
static void spapr_phb_placement(SpaprMachineState *spapr, uint32_t index,
uint64_t *buid, hwaddr *pio,
hwaddr *mmio32, hwaddr *mmio64,
unsigned n_dma, uint32_t *liobns, Error **errp)
unsigned n_dma, uint32_t *liobns,
hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp)
{
/*
* New-style PHB window placement.
@ -4153,6 +4199,9 @@ static void spapr_phb_placement(SpaprMachineState *spapr, uint32_t index,
*pio = SPAPR_PCI_BASE + index * SPAPR_PCI_IO_WIN_SIZE;
*mmio32 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM32_WIN_SIZE;
*mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE;
*nv2gpa = SPAPR_PCI_NV2RAM64_WIN_BASE + index * SPAPR_PCI_NV2RAM64_WIN_SIZE;
*nv2atsd = SPAPR_PCI_NV2ATSD_WIN_BASE + index * SPAPR_PCI_NV2ATSD_WIN_SIZE;
}
static ICSState *spapr_ics_get(XICSFabric *dev, int irq)
@ -4274,7 +4323,8 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
vhc->hpt_mask = spapr_hpt_mask;
vhc->map_hptes = spapr_map_hptes;
vhc->unmap_hptes = spapr_unmap_hptes;
vhc->store_hpte = spapr_store_hpte;
vhc->hpte_set_c = spapr_hpte_set_c;
vhc->hpte_set_r = spapr_hpte_set_r;
vhc->get_pate = spapr_get_pate;
vhc->encode_hpt_for_kvm_pr = spapr_encode_hpt_for_kvm_pr;
xic->ics_get = spapr_ics_get;
@ -4368,6 +4418,18 @@ DEFINE_SPAPR_MACHINE(4_0, "4.0", false);
/*
* pseries-3.1
*/
static void phb_placement_3_1(SpaprMachineState *spapr, uint32_t index,
uint64_t *buid, hwaddr *pio,
hwaddr *mmio32, hwaddr *mmio64,
unsigned n_dma, uint32_t *liobns,
hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp)
{
spapr_phb_placement(spapr, index, buid, pio, mmio32, mmio64, n_dma, liobns,
nv2gpa, nv2atsd, errp);
*nv2gpa = 0;
*nv2atsd = 0;
}
static void spapr_machine_3_1_class_options(MachineClass *mc)
{
SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
@ -4383,6 +4445,7 @@ static void spapr_machine_3_1_class_options(MachineClass *mc)
smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_BROKEN;
smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_BROKEN;
smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_OFF;
smc->phb_placement = phb_placement_3_1;
}
DEFINE_SPAPR_MACHINE(3_1, "3.1", false);
@ -4514,7 +4577,8 @@ DEFINE_SPAPR_MACHINE(2_8, "2.8", false);
static void phb_placement_2_7(SpaprMachineState *spapr, uint32_t index,
uint64_t *buid, hwaddr *pio,
hwaddr *mmio32, hwaddr *mmio64,
unsigned n_dma, uint32_t *liobns, Error **errp)
unsigned n_dma, uint32_t *liobns,
hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp)
{
/* Legacy PHB placement for pseries-2.7 and earlier machine types */
const uint64_t base_buid = 0x800000020000000ULL;
@ -4558,6 +4622,9 @@ static void phb_placement_2_7(SpaprMachineState *spapr, uint32_t index,
* fallback behaviour of automatically splitting a large "32-bit"
* window into contiguous 32-bit and 64-bit windows
*/
*nv2gpa = 0;
*nv2atsd = 0;
}
static void spapr_machine_2_7_class_options(MachineClass *mc)

View File

@ -118,7 +118,7 @@ static target_ulong h_enter(PowerPCCPU *cpu, SpaprMachineState *spapr,
ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
}
ppc_hash64_store_hpte(cpu, ptex + slot, pteh | HPTE64_V_HPTE_DIRTY, ptel);
spapr_store_hpte(cpu, ptex + slot, pteh | HPTE64_V_HPTE_DIRTY, ptel);
args[0] = ptex + slot;
return H_SUCCESS;
@ -131,7 +131,8 @@ typedef enum {
REMOVE_HW = 3,
} RemoveResult;
static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex,
static RemoveResult remove_hpte(PowerPCCPU *cpu
, target_ulong ptex,
target_ulong avpn,
target_ulong flags,
target_ulong *vp, target_ulong *rp)
@ -155,7 +156,7 @@ static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex,
}
*vp = v;
*rp = r;
ppc_hash64_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0);
spapr_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0);
ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
return REMOVE_SUCCESS;
}
@ -289,13 +290,13 @@ static target_ulong h_protect(PowerPCCPU *cpu, SpaprMachineState *spapr,
r |= (flags << 55) & HPTE64_R_PP0;
r |= (flags << 48) & HPTE64_R_KEY_HI;
r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
ppc_hash64_store_hpte(cpu, ptex,
(v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
spapr_store_hpte(cpu, ptex,
(v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
/* Flush the tlb */
check_tlb_flush(env, true);
/* Don't need a memory barrier, due to qemu's global lock */
ppc_hash64_store_hpte(cpu, ptex, v | HPTE64_V_HPTE_DIRTY, r);
spapr_store_hpte(cpu, ptex, v | HPTE64_V_HPTE_DIRTY, r);
return H_SUCCESS;
}
@ -304,8 +305,8 @@ static target_ulong h_read(PowerPCCPU *cpu, SpaprMachineState *spapr,
{
target_ulong flags = args[0];
target_ulong ptex = args[1];
uint8_t *hpte;
int i, ridx, n_entries = 1;
const ppc_hash_pte64_t *hptes;
if (!valid_ptex(cpu, ptex)) {
return H_PARAMETER;
@ -317,13 +318,12 @@ static target_ulong h_read(PowerPCCPU *cpu, SpaprMachineState *spapr,
n_entries = 4;
}
hpte = spapr->htab + (ptex * HASH_PTE_SIZE_64);
hptes = ppc_hash64_map_hptes(cpu, ptex, n_entries);
for (i = 0, ridx = 0; i < n_entries; i++) {
args[ridx++] = ldq_p(hpte);
args[ridx++] = ldq_p(hpte + (HASH_PTE_SIZE_64/2));
hpte += HASH_PTE_SIZE_64;
args[ridx++] = ppc_hash64_hpte0(cpu, hptes, i);
args[ridx++] = ppc_hash64_hpte1(cpu, hptes, i);
}
ppc_hash64_unmap_hptes(cpu, hptes, ptex, n_entries);
return H_SUCCESS;
}

View File

@ -67,36 +67,11 @@ void spapr_irq_msi_reset(SpaprMachineState *spapr)
* XICS IRQ backend.
*/
static ICSState *spapr_ics_create(SpaprMachineState *spapr,
int nr_irqs, Error **errp)
{
Error *local_err = NULL;
Object *obj;
obj = object_new(TYPE_ICS_SIMPLE);
object_property_add_child(OBJECT(spapr), "ics", obj, &error_abort);
object_property_add_const_link(obj, ICS_PROP_XICS, OBJECT(spapr),
&error_abort);
object_property_set_int(obj, nr_irqs, "nr-irqs", &local_err);
if (local_err) {
goto error;
}
object_property_set_bool(obj, true, "realized", &local_err);
if (local_err) {
goto error;
}
return ICS_BASE(obj);
error:
error_propagate(errp, local_err);
return NULL;
}
static void spapr_irq_init_xics(SpaprMachineState *spapr, int nr_irqs,
Error **errp)
{
MachineState *machine = MACHINE(spapr);
Object *obj;
Error *local_err = NULL;
bool xics_kvm = false;
@ -108,7 +83,8 @@ static void spapr_irq_init_xics(SpaprMachineState *spapr, int nr_irqs,
if (machine_kernel_irqchip_required(machine) && !xics_kvm) {
error_prepend(&local_err,
"kernel_irqchip requested but unavailable: ");
goto error;
error_propagate(errp, local_err);
return;
}
error_free(local_err);
local_err = NULL;
@ -118,10 +94,18 @@ static void spapr_irq_init_xics(SpaprMachineState *spapr, int nr_irqs,
xics_spapr_init(spapr);
}
spapr->ics = spapr_ics_create(spapr, nr_irqs, &local_err);
obj = object_new(TYPE_ICS_SIMPLE);
object_property_add_child(OBJECT(spapr), "ics", obj, &error_abort);
object_property_add_const_link(obj, ICS_PROP_XICS, OBJECT(spapr),
&error_fatal);
object_property_set_int(obj, nr_irqs, "nr-irqs", &error_fatal);
object_property_set_bool(obj, true, "realized", &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
error:
error_propagate(errp, local_err);
spapr->ics = ICS_BASE(obj);
}
#define ICS_IRQ_FREE(ics, srcno) \

View File

@ -719,26 +719,10 @@ param_error_exit:
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
}
static int pci_spapr_swizzle(int slot, int pin)
{
return (slot + pin) % PCI_NUM_PINS;
}
static int pci_spapr_map_irq(PCIDevice *pci_dev, int irq_num)
{
/*
* Here we need to convert pci_dev + irq_num to some unique value
* which is less than number of IRQs on the specific bus (4). We
* use standard PCI swizzling, that is (slot number + pin number)
* % 4.
*/
return pci_spapr_swizzle(PCI_SLOT(pci_dev->devfn), irq_num);
}
static void pci_spapr_set_irq(void *opaque, int irq_num, int level)
{
/*
* Here we use the number returned by pci_spapr_map_irq to find a
* Here we use the number returned by pci_swizzle_map_irq_fn to find a
* corresponding qemu_irq.
*/
SpaprPhbState *phb = opaque;
@ -1355,6 +1339,8 @@ static void spapr_populate_pci_child_dt(PCIDevice *dev, void *fdt, int offset,
if (sphb->pcie_ecs && pci_is_express(dev)) {
_FDT(fdt_setprop_cell(fdt, offset, "ibm,pci-config-space-type", 0x1));
}
spapr_phb_nvgpu_populate_pcidev_dt(dev, fdt, offset, sphb);
}
/* create OF node for pci device and required OF DT properties */
@ -1587,6 +1573,8 @@ static void spapr_phb_unrealize(DeviceState *dev, Error **errp)
int i;
const unsigned windows_supported = spapr_phb_windows_supported(sphb);
spapr_phb_nvgpu_free(sphb);
if (sphb->msi) {
g_hash_table_unref(sphb->msi);
sphb->msi = NULL;
@ -1762,7 +1750,7 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
&sphb->iowindow);
bus = pci_register_root_bus(dev, NULL,
pci_spapr_set_irq, pci_spapr_map_irq, sphb,
pci_spapr_set_irq, pci_swizzle_map_irq_fn, sphb,
&sphb->memspace, &sphb->iospace,
PCI_DEVFN(0, 0), PCI_NUM_PINS,
TYPE_SPAPR_PHB_ROOT_BUS);
@ -1898,8 +1886,14 @@ void spapr_phb_dma_reset(SpaprPhbState *sphb)
static void spapr_phb_reset(DeviceState *qdev)
{
SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(qdev);
Error *errp = NULL;
spapr_phb_dma_reset(sphb);
spapr_phb_nvgpu_free(sphb);
spapr_phb_nvgpu_setup(sphb, &errp);
if (errp) {
error_report_err(errp);
}
/* Reset the IOMMU state */
object_child_foreach(OBJECT(qdev), spapr_phb_children_reset, NULL);
@ -1932,6 +1926,8 @@ static Property spapr_phb_properties[] = {
pre_2_8_migration, false),
DEFINE_PROP_BOOL("pcie-extended-configuration-space", SpaprPhbState,
pcie_ecs, true),
DEFINE_PROP_UINT64("gpa", SpaprPhbState, nv2_gpa_win_addr, 0),
DEFINE_PROP_UINT64("atsd", SpaprPhbState, nv2_atsd_win_addr, 0),
DEFINE_PROP_END_OF_LIST(),
};
@ -2164,7 +2160,6 @@ int spapr_populate_pci_dt(SpaprPhbState *phb, uint32_t intc_phandle, void *fdt,
uint32_t nr_msis, int *node_offset)
{
int bus_off, i, j, ret;
gchar *nodename;
uint32_t bus_range[] = { cpu_to_be32(0), cpu_to_be32(0xff) };
struct {
uint32_t hi;
@ -2212,11 +2207,10 @@ int spapr_populate_pci_dt(SpaprPhbState *phb, uint32_t intc_phandle, void *fdt,
PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus;
SpaprFdt s_fdt;
SpaprDrc *drc;
Error *errp = NULL;
/* Start populating the FDT */
nodename = g_strdup_printf("pci@%" PRIx64, phb->buid);
_FDT(bus_off = fdt_add_subnode(fdt, 0, nodename));
g_free(nodename);
_FDT(bus_off = fdt_add_subnode(fdt, 0, phb->dtbusname));
if (node_offset) {
*node_offset = bus_off;
}
@ -2249,14 +2243,14 @@ int spapr_populate_pci_dt(SpaprPhbState *phb, uint32_t intc_phandle, void *fdt,
}
/* Build the interrupt-map, this must matches what is done
* in pci_spapr_map_irq
* in pci_swizzle_map_irq_fn
*/
_FDT(fdt_setprop(fdt, bus_off, "interrupt-map-mask",
&interrupt_map_mask, sizeof(interrupt_map_mask)));
for (i = 0; i < PCI_SLOT_MAX; i++) {
for (j = 0; j < PCI_NUM_PINS; j++) {
uint32_t *irqmap = interrupt_map[i*PCI_NUM_PINS + j];
int lsi_num = pci_spapr_swizzle(i, j);
int lsi_num = pci_swizzle(i, j);
irqmap[0] = cpu_to_be32(b_ddddd(i)|b_fff(0));
irqmap[1] = 0;
@ -2304,6 +2298,12 @@ int spapr_populate_pci_dt(SpaprPhbState *phb, uint32_t intc_phandle, void *fdt,
return ret;
}
spapr_phb_nvgpu_populate_dt(phb, fdt, bus_off, &errp);
if (errp) {
error_report_err(errp);
}
spapr_phb_nvgpu_ram_populate_dt(phb, fdt);
return 0;
}

450
hw/ppc/spapr_pci_nvlink2.c Normal file
View File

@ -0,0 +1,450 @@
/*
* QEMU sPAPR PCI for NVLink2 pass through
*
* Copyright (c) 2019 Alexey Kardashevskiy, IBM Corporation.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu-common.h"
#include "hw/pci/pci.h"
#include "hw/pci-host/spapr.h"
#include "qemu/error-report.h"
#include "hw/ppc/fdt.h"
#include "hw/pci/pci_bridge.h"
#define PHANDLE_PCIDEV(phb, pdev) (0x12000000 | \
(((phb)->index) << 16) | ((pdev)->devfn))
#define PHANDLE_GPURAM(phb, n) (0x110000FF | ((n) << 8) | \
(((phb)->index) << 16))
#define PHANDLE_NVLINK(phb, gn, nn) (0x00130000 | (((phb)->index) << 8) | \
((gn) << 4) | (nn))
#define SPAPR_GPU_NUMA_ID (cpu_to_be32(1))
struct spapr_phb_pci_nvgpu_config {
uint64_t nv2_ram_current;
uint64_t nv2_atsd_current;
int num; /* number of non empty (i.e. tgt!=0) entries in slots[] */
struct spapr_phb_pci_nvgpu_slot {
uint64_t tgt;
uint64_t gpa;
unsigned numa_id;
PCIDevice *gpdev;
int linknum;
struct {
uint64_t atsd_gpa;
PCIDevice *npdev;
uint32_t link_speed;
} links[NVGPU_MAX_LINKS];
} slots[NVGPU_MAX_NUM];
Error *errp;
};
static struct spapr_phb_pci_nvgpu_slot *
spapr_nvgpu_get_slot(struct spapr_phb_pci_nvgpu_config *nvgpus, uint64_t tgt)
{
int i;
/* Search for partially collected "slot" */
for (i = 0; i < nvgpus->num; ++i) {
if (nvgpus->slots[i].tgt == tgt) {
return &nvgpus->slots[i];
}
}
if (nvgpus->num == ARRAY_SIZE(nvgpus->slots)) {
return NULL;
}
i = nvgpus->num;
nvgpus->slots[i].tgt = tgt;
++nvgpus->num;
return &nvgpus->slots[i];
}
static void spapr_pci_collect_nvgpu(struct spapr_phb_pci_nvgpu_config *nvgpus,
PCIDevice *pdev, uint64_t tgt,
MemoryRegion *mr, Error **errp)
{
MachineState *machine = MACHINE(qdev_get_machine());
SpaprMachineState *spapr = SPAPR_MACHINE(machine);
struct spapr_phb_pci_nvgpu_slot *nvslot = spapr_nvgpu_get_slot(nvgpus, tgt);
if (!nvslot) {
error_setg(errp, "Found too many GPUs per vPHB");
return;
}
g_assert(!nvslot->gpdev);
nvslot->gpdev = pdev;
nvslot->gpa = nvgpus->nv2_ram_current;
nvgpus->nv2_ram_current += memory_region_size(mr);
nvslot->numa_id = spapr->gpu_numa_id;
++spapr->gpu_numa_id;
}
static void spapr_pci_collect_nvnpu(struct spapr_phb_pci_nvgpu_config *nvgpus,
PCIDevice *pdev, uint64_t tgt,
MemoryRegion *mr, Error **errp)
{
struct spapr_phb_pci_nvgpu_slot *nvslot = spapr_nvgpu_get_slot(nvgpus, tgt);
int j;
if (!nvslot) {
error_setg(errp, "Found too many NVLink bridges per vPHB");
return;
}
j = nvslot->linknum;
if (j == ARRAY_SIZE(nvslot->links)) {
error_setg(errp, "Found too many NVLink bridges per GPU");
return;
}
++nvslot->linknum;
g_assert(!nvslot->links[j].npdev);
nvslot->links[j].npdev = pdev;
nvslot->links[j].atsd_gpa = nvgpus->nv2_atsd_current;
nvgpus->nv2_atsd_current += memory_region_size(mr);
nvslot->links[j].link_speed =
object_property_get_uint(OBJECT(pdev), "nvlink2-link-speed", NULL);
}
static void spapr_phb_pci_collect_nvgpu(PCIBus *bus, PCIDevice *pdev,
void *opaque)
{
PCIBus *sec_bus;
Object *po = OBJECT(pdev);
uint64_t tgt = object_property_get_uint(po, "nvlink2-tgt", NULL);
if (tgt) {
Error *local_err = NULL;
struct spapr_phb_pci_nvgpu_config *nvgpus = opaque;
Object *mr_gpu = object_property_get_link(po, "nvlink2-mr[0]", NULL);
Object *mr_npu = object_property_get_link(po, "nvlink2-atsd-mr[0]",
NULL);
g_assert(mr_gpu || mr_npu);
if (mr_gpu) {
spapr_pci_collect_nvgpu(nvgpus, pdev, tgt, MEMORY_REGION(mr_gpu),
&local_err);
} else {
spapr_pci_collect_nvnpu(nvgpus, pdev, tgt, MEMORY_REGION(mr_npu),
&local_err);
}
error_propagate(&nvgpus->errp, local_err);
}
if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) !=
PCI_HEADER_TYPE_BRIDGE)) {
return;
}
sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev));
if (!sec_bus) {
return;
}
pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
spapr_phb_pci_collect_nvgpu, opaque);
}
void spapr_phb_nvgpu_setup(SpaprPhbState *sphb, Error **errp)
{
int i, j, valid_gpu_num;
PCIBus *bus;
/* Search for GPUs and NPUs */
if (!sphb->nv2_gpa_win_addr || !sphb->nv2_atsd_win_addr) {
return;
}
sphb->nvgpus = g_new0(struct spapr_phb_pci_nvgpu_config, 1);
sphb->nvgpus->nv2_ram_current = sphb->nv2_gpa_win_addr;
sphb->nvgpus->nv2_atsd_current = sphb->nv2_atsd_win_addr;
bus = PCI_HOST_BRIDGE(sphb)->bus;
pci_for_each_device(bus, pci_bus_num(bus),
spapr_phb_pci_collect_nvgpu, sphb->nvgpus);
if (sphb->nvgpus->errp) {
error_propagate(errp, sphb->nvgpus->errp);
sphb->nvgpus->errp = NULL;
goto cleanup_exit;
}
/* Add found GPU RAM and ATSD MRs if found */
for (i = 0, valid_gpu_num = 0; i < sphb->nvgpus->num; ++i) {
Object *nvmrobj;
struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i];
if (!nvslot->gpdev) {
continue;
}
nvmrobj = object_property_get_link(OBJECT(nvslot->gpdev),
"nvlink2-mr[0]", NULL);
/* ATSD is pointless without GPU RAM MR so skip those */
if (!nvmrobj) {
continue;
}
++valid_gpu_num;
memory_region_add_subregion(get_system_memory(), nvslot->gpa,
MEMORY_REGION(nvmrobj));
for (j = 0; j < nvslot->linknum; ++j) {
Object *atsdmrobj;
atsdmrobj = object_property_get_link(OBJECT(nvslot->links[j].npdev),
"nvlink2-atsd-mr[0]", NULL);
if (!atsdmrobj) {
continue;
}
memory_region_add_subregion(get_system_memory(),
nvslot->links[j].atsd_gpa,
MEMORY_REGION(atsdmrobj));
}
}
if (valid_gpu_num) {
return;
}
/* We did not find any interesting GPU */
cleanup_exit:
g_free(sphb->nvgpus);
sphb->nvgpus = NULL;
}
void spapr_phb_nvgpu_free(SpaprPhbState *sphb)
{
int i, j;
if (!sphb->nvgpus) {
return;
}
for (i = 0; i < sphb->nvgpus->num; ++i) {
struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i];
Object *nv_mrobj = object_property_get_link(OBJECT(nvslot->gpdev),
"nvlink2-mr[0]", NULL);
if (nv_mrobj) {
memory_region_del_subregion(get_system_memory(),
MEMORY_REGION(nv_mrobj));
}
for (j = 0; j < nvslot->linknum; ++j) {
PCIDevice *npdev = nvslot->links[j].npdev;
Object *atsd_mrobj;
atsd_mrobj = object_property_get_link(OBJECT(npdev),
"nvlink2-atsd-mr[0]", NULL);
if (atsd_mrobj) {
memory_region_del_subregion(get_system_memory(),
MEMORY_REGION(atsd_mrobj));
}
}
}
g_free(sphb->nvgpus);
sphb->nvgpus = NULL;
}
void spapr_phb_nvgpu_populate_dt(SpaprPhbState *sphb, void *fdt, int bus_off,
Error **errp)
{
int i, j, atsdnum = 0;
uint64_t atsd[8]; /* The existing limitation of known guests */
if (!sphb->nvgpus) {
return;
}
for (i = 0; (i < sphb->nvgpus->num) && (atsdnum < ARRAY_SIZE(atsd)); ++i) {
struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i];
if (!nvslot->gpdev) {
continue;
}
for (j = 0; j < nvslot->linknum; ++j) {
if (!nvslot->links[j].atsd_gpa) {
continue;
}
if (atsdnum == ARRAY_SIZE(atsd)) {
error_report("Only %"PRIuPTR" ATSD registers supported",
ARRAY_SIZE(atsd));
break;
}
atsd[atsdnum] = cpu_to_be64(nvslot->links[j].atsd_gpa);
++atsdnum;
}
}
if (!atsdnum) {
error_setg(errp, "No ATSD registers found");
return;
}
if (!spapr_phb_eeh_available(sphb)) {
/*
* ibm,mmio-atsd contains ATSD registers; these belong to an NPU PHB
* which we do not emulate as a separate device. Instead we put
* ibm,mmio-atsd to the vPHB with GPU and make sure that we do not
* put GPUs from different IOMMU groups to the same vPHB to ensure
* that the guest will use ATSDs from the corresponding NPU.
*/
error_setg(errp, "ATSD requires separate vPHB per GPU IOMMU group");
return;
}
_FDT((fdt_setprop(fdt, bus_off, "ibm,mmio-atsd", atsd,
atsdnum * sizeof(atsd[0]))));
}
void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt)
{
int i, j, linkidx, npuoff;
char *npuname;
if (!sphb->nvgpus) {
return;
}
npuname = g_strdup_printf("npuphb%d", sphb->index);
npuoff = fdt_add_subnode(fdt, 0, npuname);
_FDT(npuoff);
_FDT(fdt_setprop_cell(fdt, npuoff, "#address-cells", 1));
_FDT(fdt_setprop_cell(fdt, npuoff, "#size-cells", 0));
/* Advertise NPU as POWER9 so the guest can enable NPU2 contexts */
_FDT((fdt_setprop_string(fdt, npuoff, "compatible", "ibm,power9-npu")));
g_free(npuname);
for (i = 0, linkidx = 0; i < sphb->nvgpus->num; ++i) {
for (j = 0; j < sphb->nvgpus->slots[i].linknum; ++j) {
char *linkname = g_strdup_printf("link@%d", linkidx);
int off = fdt_add_subnode(fdt, npuoff, linkname);
_FDT(off);
/* _FDT((fdt_setprop_cell(fdt, off, "reg", linkidx))); */
_FDT((fdt_setprop_string(fdt, off, "compatible",
"ibm,npu-link")));
_FDT((fdt_setprop_cell(fdt, off, "phandle",
PHANDLE_NVLINK(sphb, i, j))));
_FDT((fdt_setprop_cell(fdt, off, "ibm,npu-link-index", linkidx)));
g_free(linkname);
++linkidx;
}
}
/* Add memory nodes for GPU RAM and mark them unusable */
for (i = 0; i < sphb->nvgpus->num; ++i) {
struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i];
Object *nv_mrobj = object_property_get_link(OBJECT(nvslot->gpdev),
"nvlink2-mr[0]", NULL);
uint32_t associativity[] = {
cpu_to_be32(0x4),
SPAPR_GPU_NUMA_ID,
SPAPR_GPU_NUMA_ID,
SPAPR_GPU_NUMA_ID,
cpu_to_be32(nvslot->numa_id)
};
uint64_t size = object_property_get_uint(nv_mrobj, "size", NULL);
uint64_t mem_reg[2] = { cpu_to_be64(nvslot->gpa), cpu_to_be64(size) };
char *mem_name = g_strdup_printf("memory@%"PRIx64, nvslot->gpa);
int off = fdt_add_subnode(fdt, 0, mem_name);
_FDT(off);
_FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
_FDT((fdt_setprop(fdt, off, "reg", mem_reg, sizeof(mem_reg))));
_FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity,
sizeof(associativity))));
_FDT((fdt_setprop_string(fdt, off, "compatible",
"ibm,coherent-device-memory")));
mem_reg[1] = cpu_to_be64(0);
_FDT((fdt_setprop(fdt, off, "linux,usable-memory", mem_reg,
sizeof(mem_reg))));
_FDT((fdt_setprop_cell(fdt, off, "phandle",
PHANDLE_GPURAM(sphb, i))));
g_free(mem_name);
}
}
void spapr_phb_nvgpu_populate_pcidev_dt(PCIDevice *dev, void *fdt, int offset,
SpaprPhbState *sphb)
{
int i, j;
if (!sphb->nvgpus) {
return;
}
for (i = 0; i < sphb->nvgpus->num; ++i) {
struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i];
/* Skip "slot" without attached GPU */
if (!nvslot->gpdev) {
continue;
}
if (dev == nvslot->gpdev) {
uint32_t npus[nvslot->linknum];
for (j = 0; j < nvslot->linknum; ++j) {
PCIDevice *npdev = nvslot->links[j].npdev;
npus[j] = cpu_to_be32(PHANDLE_PCIDEV(sphb, npdev));
}
_FDT(fdt_setprop(fdt, offset, "ibm,npu", npus,
j * sizeof(npus[0])));
_FDT((fdt_setprop_cell(fdt, offset, "phandle",
PHANDLE_PCIDEV(sphb, dev))));
continue;
}
for (j = 0; j < nvslot->linknum; ++j) {
if (dev != nvslot->links[j].npdev) {
continue;
}
_FDT((fdt_setprop_cell(fdt, offset, "phandle",
PHANDLE_PCIDEV(sphb, dev))));
_FDT(fdt_setprop_cell(fdt, offset, "ibm,gpu",
PHANDLE_PCIDEV(sphb, nvslot->gpdev)));
_FDT((fdt_setprop_cell(fdt, offset, "ibm,nvlink",
PHANDLE_NVLINK(sphb, i, j))));
/*
* If we ever want to emulate GPU RAM at the same location as on
* the host - here is the encoding GPA->TGT:
*
* gta = ((sphb->nv2_gpa >> 42) & 0x1) << 42;
* gta |= ((sphb->nv2_gpa >> 45) & 0x3) << 43;
* gta |= ((sphb->nv2_gpa >> 49) & 0x3) << 45;
* gta |= sphb->nv2_gpa & ((1UL << 43) - 1);
*/
_FDT(fdt_setprop_cell(fdt, offset, "memory-region",
PHANDLE_GPURAM(sphb, i)));
_FDT(fdt_setprop_u64(fdt, offset, "ibm,device-tgt-addr",
nvslot->tgt));
_FDT(fdt_setprop_cell(fdt, offset, "ibm,nvlink-speed",
nvslot->links[j].link_speed));
}
}
}

View File

@ -404,7 +404,7 @@ void spapr_rtas_register(int token, const char *name, spapr_rtas_fn fn)
token -= RTAS_TOKEN_BASE;
assert(!rtas_table[token].name);
assert(!name || !rtas_table[token].name);
rtas_table[token].name = name;
rtas_table[token].fn = fn;

View File

@ -2180,3 +2180,134 @@ int vfio_add_virt_caps(VFIOPCIDevice *vdev, Error **errp)
return 0;
}
static void vfio_pci_nvlink2_get_tgt(Object *obj, Visitor *v,
const char *name,
void *opaque, Error **errp)
{
uint64_t tgt = (uintptr_t) opaque;
visit_type_uint64(v, name, &tgt, errp);
}
static void vfio_pci_nvlink2_get_link_speed(Object *obj, Visitor *v,
const char *name,
void *opaque, Error **errp)
{
uint32_t link_speed = (uint32_t)(uintptr_t) opaque;
visit_type_uint32(v, name, &link_speed, errp);
}
int vfio_pci_nvidia_v100_ram_init(VFIOPCIDevice *vdev, Error **errp)
{
int ret;
void *p;
struct vfio_region_info *nv2reg = NULL;
struct vfio_info_cap_header *hdr;
struct vfio_region_info_cap_nvlink2_ssatgt *cap;
VFIOQuirk *quirk;
ret = vfio_get_dev_region_info(&vdev->vbasedev,
VFIO_REGION_TYPE_PCI_VENDOR_TYPE |
PCI_VENDOR_ID_NVIDIA,
VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM,
&nv2reg);
if (ret) {
return ret;
}
hdr = vfio_get_region_info_cap(nv2reg, VFIO_REGION_INFO_CAP_NVLINK2_SSATGT);
if (!hdr) {
ret = -ENODEV;
goto free_exit;
}
cap = (void *) hdr;
p = mmap(NULL, nv2reg->size, PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_SHARED, vdev->vbasedev.fd, nv2reg->offset);
if (p == MAP_FAILED) {
ret = -errno;
goto free_exit;
}
quirk = vfio_quirk_alloc(1);
memory_region_init_ram_ptr(&quirk->mem[0], OBJECT(vdev), "nvlink2-mr",
nv2reg->size, p);
QLIST_INSERT_HEAD(&vdev->bars[0].quirks, quirk, next);
object_property_add(OBJECT(vdev), "nvlink2-tgt", "uint64",
vfio_pci_nvlink2_get_tgt, NULL, NULL,
(void *) (uintptr_t) cap->tgt, NULL);
trace_vfio_pci_nvidia_gpu_setup_quirk(vdev->vbasedev.name, cap->tgt,
nv2reg->size);
free_exit:
g_free(nv2reg);
return ret;
}
int vfio_pci_nvlink2_init(VFIOPCIDevice *vdev, Error **errp)
{
int ret;
void *p;
struct vfio_region_info *atsdreg = NULL;
struct vfio_info_cap_header *hdr;
struct vfio_region_info_cap_nvlink2_ssatgt *captgt;
struct vfio_region_info_cap_nvlink2_lnkspd *capspeed;
VFIOQuirk *quirk;
ret = vfio_get_dev_region_info(&vdev->vbasedev,
VFIO_REGION_TYPE_PCI_VENDOR_TYPE |
PCI_VENDOR_ID_IBM,
VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD,
&atsdreg);
if (ret) {
return ret;
}
hdr = vfio_get_region_info_cap(atsdreg,
VFIO_REGION_INFO_CAP_NVLINK2_SSATGT);
if (!hdr) {
ret = -ENODEV;
goto free_exit;
}
captgt = (void *) hdr;
hdr = vfio_get_region_info_cap(atsdreg,
VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD);
if (!hdr) {
ret = -ENODEV;
goto free_exit;
}
capspeed = (void *) hdr;
/* Some NVLink bridges may not have assigned ATSD */
if (atsdreg->size) {
p = mmap(NULL, atsdreg->size, PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_SHARED, vdev->vbasedev.fd, atsdreg->offset);
if (p == MAP_FAILED) {
ret = -errno;
goto free_exit;
}
quirk = vfio_quirk_alloc(1);
memory_region_init_ram_device_ptr(&quirk->mem[0], OBJECT(vdev),
"nvlink2-atsd-mr", atsdreg->size, p);
QLIST_INSERT_HEAD(&vdev->bars[0].quirks, quirk, next);
}
object_property_add(OBJECT(vdev), "nvlink2-tgt", "uint64",
vfio_pci_nvlink2_get_tgt, NULL, NULL,
(void *) (uintptr_t) captgt->tgt, NULL);
trace_vfio_pci_nvlink2_setup_quirk_ssatgt(vdev->vbasedev.name, captgt->tgt,
atsdreg->size);
object_property_add(OBJECT(vdev), "nvlink2-link-speed", "uint32",
vfio_pci_nvlink2_get_link_speed, NULL, NULL,
(void *) (uintptr_t) capspeed->link_speed, NULL);
trace_vfio_pci_nvlink2_setup_quirk_lnkspd(vdev->vbasedev.name,
capspeed->link_speed);
free_exit:
g_free(atsdreg);
return ret;
}

View File

@ -3086,6 +3086,20 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
}
}
if (vdev->vendor_id == PCI_VENDOR_ID_NVIDIA) {
ret = vfio_pci_nvidia_v100_ram_init(vdev, errp);
if (ret && ret != -ENODEV) {
error_report("Failed to setup NVIDIA V100 GPU RAM");
}
}
if (vdev->vendor_id == PCI_VENDOR_ID_IBM) {
ret = vfio_pci_nvlink2_init(vdev, errp);
if (ret && ret != -ENODEV) {
error_report("Failed to setup NVlink2 bridge");
}
}
vfio_register_err_notifier(vdev);
vfio_register_req_notifier(vdev);
vfio_setup_resetfn_quirk(vdev);

View File

@ -196,6 +196,8 @@ int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp);
int vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev,
struct vfio_region_info *info,
Error **errp);
int vfio_pci_nvidia_v100_ram_init(VFIOPCIDevice *vdev, Error **errp);
int vfio_pci_nvlink2_init(VFIOPCIDevice *vdev, Error **errp);
void vfio_display_reset(VFIOPCIDevice *vdev);
int vfio_display_probe(VFIOPCIDevice *vdev, Error **errp);

View File

@ -86,6 +86,10 @@ vfio_pci_igd_opregion_enabled(const char *name) "%s"
vfio_pci_igd_host_bridge_enabled(const char *name) "%s"
vfio_pci_igd_lpc_bridge_enabled(const char *name) "%s"
vfio_pci_nvidia_gpu_setup_quirk(const char *name, uint64_t tgt, uint64_t size) "%s tgt=0x%"PRIx64" size=0x%"PRIx64
vfio_pci_nvlink2_setup_quirk_ssatgt(const char *name, uint64_t tgt, uint64_t size) "%s tgt=0x%"PRIx64" size=0x%"PRIx64
vfio_pci_nvlink2_setup_quirk_lnkspd(const char *name, uint32_t link_speed) "%s link_speed=0x%x"
# common.c
vfio_region_write(const char *name, int index, uint64_t addr, uint64_t data, unsigned size) " (%s:region%d+0x%"PRIx64", 0x%"PRIx64 ", %d)"
vfio_region_read(char *name, int index, uint64_t addr, unsigned size, uint64_t data) " (%s:region%d+0x%"PRIx64", %d) = 0x%"PRIx64

View File

@ -87,6 +87,9 @@ struct SpaprPhbState {
uint32_t mig_liobn;
hwaddr mig_mem_win_addr, mig_mem_win_size;
hwaddr mig_io_win_addr, mig_io_win_size;
hwaddr nv2_gpa_win_addr;
hwaddr nv2_atsd_win_addr;
struct spapr_phb_pci_nvgpu_config *nvgpus;
};
#define SPAPR_PCI_MEM_WIN_BUS_OFFSET 0x80000000ULL
@ -105,6 +108,22 @@ struct SpaprPhbState {
#define SPAPR_PCI_MSI_WINDOW 0x40000000000ULL
#define SPAPR_PCI_NV2RAM64_WIN_BASE SPAPR_PCI_LIMIT
#define SPAPR_PCI_NV2RAM64_WIN_SIZE (2 * TiB) /* For up to 6 GPUs 256GB each */
/* Max number of these GPUsper a physical box */
#define NVGPU_MAX_NUM 6
/* Max number of NVLinks per GPU in any physical box */
#define NVGPU_MAX_LINKS 3
/*
* GPU RAM starts at 64TiB so huge DMA window to cover it all ends at 128TiB
* which is enough. We do not need DMA for ATSD so we put them at 128TiB.
*/
#define SPAPR_PCI_NV2ATSD_WIN_BASE (128 * TiB)
#define SPAPR_PCI_NV2ATSD_WIN_SIZE (NVGPU_MAX_NUM * NVGPU_MAX_LINKS * \
64 * KiB)
static inline qemu_irq spapr_phb_lsi_qirq(struct SpaprPhbState *phb, int pin)
{
SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
@ -135,6 +154,13 @@ int spapr_phb_vfio_eeh_get_state(SpaprPhbState *sphb, int *state);
int spapr_phb_vfio_eeh_reset(SpaprPhbState *sphb, int option);
int spapr_phb_vfio_eeh_configure(SpaprPhbState *sphb);
void spapr_phb_vfio_reset(DeviceState *qdev);
void spapr_phb_nvgpu_setup(SpaprPhbState *sphb, Error **errp);
void spapr_phb_nvgpu_free(SpaprPhbState *sphb);
void spapr_phb_nvgpu_populate_dt(SpaprPhbState *sphb, void *fdt, int bus_off,
Error **errp);
void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt);
void spapr_phb_nvgpu_populate_pcidev_dt(PCIDevice *dev, void *fdt, int offset,
SpaprPhbState *sphb);
#else
static inline bool spapr_phb_eeh_available(SpaprPhbState *sphb)
{
@ -161,6 +187,25 @@ static inline int spapr_phb_vfio_eeh_configure(SpaprPhbState *sphb)
static inline void spapr_phb_vfio_reset(DeviceState *qdev)
{
}
static inline void spapr_phb_nvgpu_setup(SpaprPhbState *sphb, Error **errp)
{
}
static inline void spapr_phb_nvgpu_free(SpaprPhbState *sphb)
{
}
static inline void spapr_phb_nvgpu_populate_dt(SpaprPhbState *sphb, void *fdt,
int bus_off, Error **errp)
{
}
static inline void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb,
void *fdt)
{
}
static inline void spapr_phb_nvgpu_populate_pcidev_dt(PCIDevice *dev, void *fdt,
int offset,
SpaprPhbState *sphb)
{
}
#endif
void spapr_phb_dma_reset(SpaprPhbState *sphb);

View File

@ -413,6 +413,10 @@ void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
void pci_bus_irqs_cleanup(PCIBus *bus);
int pci_bus_get_irq_level(PCIBus *bus, int irq_num);
/* 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD */
static inline int pci_swizzle(int slot, int pin)
{
return (slot + pin) % PCI_NUM_PINS;
}
int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin);
PCIBus *pci_register_root_bus(DeviceState *parent, const char *name,
pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,

View File

@ -123,7 +123,8 @@ struct SpaprMachineClass {
void (*phb_placement)(SpaprMachineState *spapr, uint32_t index,
uint64_t *buid, hwaddr *pio,
hwaddr *mmio32, hwaddr *mmio64,
unsigned n_dma, uint32_t *liobns, Error **errp);
unsigned n_dma, uint32_t *liobns, hwaddr *nv2gpa,
hwaddr *nv2atsd, Error **errp);
SpaprResizeHpt resize_hpt_default;
SpaprCapabilities default_caps;
SpaprIrq *irq;
@ -199,6 +200,8 @@ struct SpaprMachineState {
bool cmd_line_caps[SPAPR_CAP_NUM];
SpaprCapabilities def, eff, mig;
unsigned gpu_numa_id;
};
#define H_SUCCESS 0
@ -672,6 +675,10 @@ typedef void (*spapr_rtas_fn)(PowerPCCPU *cpu, SpaprMachineState *sm,
uint32_t nargs, target_ulong args,
uint32_t nret, target_ulong rets);
void spapr_rtas_register(int token, const char *name, spapr_rtas_fn fn);
static inline void spapr_rtas_unregister(int token)
{
spapr_rtas_register(token, NULL, NULL);
}
target_ulong spapr_rtas_call(PowerPCCPU *cpu, SpaprMachineState *sm,
uint32_t token, uint32_t nargs, target_ulong args,
uint32_t nret, target_ulong rets);
@ -777,6 +784,8 @@ void spapr_reallocate_hpt(SpaprMachineState *spapr, int shift,
Error **errp);
void spapr_clear_pending_events(SpaprMachineState *spapr);
int spapr_max_server_number(SpaprMachineState *spapr);
void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
uint64_t pte0, uint64_t pte1);
/* DRC callbacks. */
void spapr_core_release(DeviceState *dev);

View File

@ -740,7 +740,7 @@
POWERPC_DEF("7457a_v1.2", CPU_POWERPC_74x7A_v12, 7455,
"PowerPC 7457A v1.2 (G4)")
/* 64 bits PowerPC */
#if defined (TARGET_PPC64)
#if defined(TARGET_PPC64)
POWERPC_DEF("970_v2.2", CPU_POWERPC_970_v22, 970,
"PowerPC 970 v2.2")
POWERPC_DEF("970fx_v1.0", CPU_POWERPC_970FX_v10, 970,

View File

@ -393,7 +393,8 @@ enum {
CPU_POWERPC_RS64IV = 0x00370000,
#endif /* defined(TARGET_PPC64) */
/* Original POWER */
/* XXX: should be POWER (RIOS), RSC3308, RSC4608,
/*
* XXX: should be POWER (RIOS), RSC3308, RSC4608,
* POWER2 (RIOS2) & RSC2 (P2SC) here
*/
/* PA Semi core */

View File

@ -23,23 +23,28 @@
#include "qemu-common.h"
#include "qemu/int128.h"
//#define PPC_EMULATE_32BITS_HYPV
/* #define PPC_EMULATE_32BITS_HYPV */
#if defined (TARGET_PPC64)
#if defined(TARGET_PPC64)
/* PowerPC 64 definitions */
#define TARGET_LONG_BITS 64
#define TARGET_PAGE_BITS 12
#define TCG_GUEST_DEFAULT_MO 0
/* Note that the official physical address space bits is 62-M where M
is implementation dependent. I've not looked up M for the set of
cpus we emulate at the system level. */
/*
* Note that the official physical address space bits is 62-M where M
* is implementation dependent. I've not looked up M for the set of
* cpus we emulate at the system level.
*/
#define TARGET_PHYS_ADDR_SPACE_BITS 62
/* Note that the PPC environment architecture talks about 80 bit virtual
addresses, with segmentation. Obviously that's not all visible to a
single process, which is all we're concerned with here. */
/*
* Note that the PPC environment architecture talks about 80 bit
* virtual addresses, with segmentation. Obviously that's not all
* visible to a single process, which is all we're concerned with
* here.
*/
#ifdef TARGET_ABI32
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#else
@ -49,7 +54,7 @@
#define TARGET_PAGE_BITS_64K 16
#define TARGET_PAGE_BITS_16M 24
#else /* defined (TARGET_PPC64) */
#else /* defined(TARGET_PPC64) */
/* PowerPC 32 definitions */
#define TARGET_LONG_BITS 32
#define TARGET_PAGE_BITS 12
@ -57,14 +62,14 @@
#define TARGET_PHYS_ADDR_SPACE_BITS 36
#define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif /* defined (TARGET_PPC64) */
#endif /* defined(TARGET_PPC64) */
#define CPUArchState struct CPUPPCState
#include "exec/cpu-defs.h"
#include "cpu-qom.h"
#if defined (TARGET_PPC64)
#if defined(TARGET_PPC64)
#define PPC_ELF_MACHINE EM_PPC64
#else
#define PPC_ELF_MACHINE EM_PPC
@ -237,9 +242,11 @@ struct ppc_spr_t {
const char *name;
target_ulong default_value;
#ifdef CONFIG_KVM
/* We (ab)use the fact that all the SPRs will have ids for the
/*
* We (ab)use the fact that all the SPRs will have ids for the
* ONE_REG interface will have KVM_REG_PPC to use 0 as meaning,
* don't sync this */
* don't sync this
*/
uint64_t one_reg_id;
#endif
};
@ -656,39 +663,39 @@ enum {
#define fpscr_eex (((env->fpscr) >> FPSCR_XX) & ((env->fpscr) >> FPSCR_XE) & \
0x1F)
#define FP_FX (1ull << FPSCR_FX)
#define FP_FEX (1ull << FPSCR_FEX)
#define FP_VX (1ull << FPSCR_VX)
#define FP_OX (1ull << FPSCR_OX)
#define FP_UX (1ull << FPSCR_UX)
#define FP_ZX (1ull << FPSCR_ZX)
#define FP_XX (1ull << FPSCR_XX)
#define FP_VXSNAN (1ull << FPSCR_VXSNAN)
#define FP_VXISI (1ull << FPSCR_VXISI)
#define FP_VXIDI (1ull << FPSCR_VXIDI)
#define FP_VXZDZ (1ull << FPSCR_VXZDZ)
#define FP_VXIMZ (1ull << FPSCR_VXIMZ)
#define FP_VXVC (1ull << FPSCR_VXVC)
#define FP_FR (1ull << FSPCR_FR)
#define FP_FI (1ull << FPSCR_FI)
#define FP_C (1ull << FPSCR_C)
#define FP_FL (1ull << FPSCR_FL)
#define FP_FG (1ull << FPSCR_FG)
#define FP_FE (1ull << FPSCR_FE)
#define FP_FU (1ull << FPSCR_FU)
#define FP_FPCC (FP_FL | FP_FG | FP_FE | FP_FU)
#define FP_FPRF (FP_C | FP_FL | FP_FG | FP_FE | FP_FU)
#define FP_VXSOFT (1ull << FPSCR_VXSOFT)
#define FP_VXSQRT (1ull << FPSCR_VXSQRT)
#define FP_VXCVI (1ull << FPSCR_VXCVI)
#define FP_VE (1ull << FPSCR_VE)
#define FP_OE (1ull << FPSCR_OE)
#define FP_UE (1ull << FPSCR_UE)
#define FP_ZE (1ull << FPSCR_ZE)
#define FP_XE (1ull << FPSCR_XE)
#define FP_NI (1ull << FPSCR_NI)
#define FP_RN1 (1ull << FPSCR_RN1)
#define FP_RN (1ull << FPSCR_RN)
#define FP_FX (1ull << FPSCR_FX)
#define FP_FEX (1ull << FPSCR_FEX)
#define FP_VX (1ull << FPSCR_VX)
#define FP_OX (1ull << FPSCR_OX)
#define FP_UX (1ull << FPSCR_UX)
#define FP_ZX (1ull << FPSCR_ZX)
#define FP_XX (1ull << FPSCR_XX)
#define FP_VXSNAN (1ull << FPSCR_VXSNAN)
#define FP_VXISI (1ull << FPSCR_VXISI)
#define FP_VXIDI (1ull << FPSCR_VXIDI)
#define FP_VXZDZ (1ull << FPSCR_VXZDZ)
#define FP_VXIMZ (1ull << FPSCR_VXIMZ)
#define FP_VXVC (1ull << FPSCR_VXVC)
#define FP_FR (1ull << FSPCR_FR)
#define FP_FI (1ull << FPSCR_FI)
#define FP_C (1ull << FPSCR_C)
#define FP_FL (1ull << FPSCR_FL)
#define FP_FG (1ull << FPSCR_FG)
#define FP_FE (1ull << FPSCR_FE)
#define FP_FU (1ull << FPSCR_FU)
#define FP_FPCC (FP_FL | FP_FG | FP_FE | FP_FU)
#define FP_FPRF (FP_C | FP_FL | FP_FG | FP_FE | FP_FU)
#define FP_VXSOFT (1ull << FPSCR_VXSOFT)
#define FP_VXSQRT (1ull << FPSCR_VXSQRT)
#define FP_VXCVI (1ull << FPSCR_VXCVI)
#define FP_VE (1ull << FPSCR_VE)
#define FP_OE (1ull << FPSCR_OE)
#define FP_UE (1ull << FPSCR_UE)
#define FP_ZE (1ull << FPSCR_ZE)
#define FP_XE (1ull << FPSCR_XE)
#define FP_NI (1ull << FPSCR_NI)
#define FP_RN1 (1ull << FPSCR_RN1)
#define FP_RN (1ull << FPSCR_RN)
/* the exception bits which can be cleared by mcrfs - includes FX */
#define FP_EX_CLEAR_BITS (FP_FX | FP_OX | FP_UX | FP_ZX | \
@ -698,8 +705,8 @@ enum {
/*****************************************************************************/
/* Vector status and control register */
#define VSCR_NJ 16 /* Vector non-java */
#define VSCR_SAT 0 /* Vector saturation */
#define VSCR_NJ 16 /* Vector non-java */
#define VSCR_SAT 0 /* Vector saturation */
/*****************************************************************************/
/* BookE e500 MMU registers */
@ -962,9 +969,10 @@ struct ppc_radix_page_info {
/*****************************************************************************/
/* The whole PowerPC CPU context */
/* PowerPC needs eight modes for different hypervisor/supervisor/guest +
* real/paged mode combinations. The other two modes are for external PID
* load/store.
/*
* PowerPC needs eight modes for different hypervisor/supervisor/guest
* + real/paged mode combinations. The other two modes are for
* external PID load/store.
*/
#define NB_MMU_MODES 10
#define MMU_MODE8_SUFFIX _epl
@ -976,8 +984,9 @@ struct ppc_radix_page_info {
#define PPC_CPU_INDIRECT_OPCODES_LEN 0x20
struct CPUPPCState {
/* First are the most commonly used resources
* during translated code execution
/*
* First are the most commonly used resources during translated
* code execution
*/
/* general purpose registers */
target_ulong gpr[32];
@ -1023,8 +1032,8 @@ struct CPUPPCState {
/* High part of 128-bit helper return. */
uint64_t retxh;
int access_type; /* when a memory exception occurs, the access
type is stored here */
/* when a memory exception occurs, the access type is stored here */
int access_type;
CPU_COMMON
@ -1072,8 +1081,10 @@ struct CPUPPCState {
/* SPE registers */
uint64_t spe_acc;
uint32_t spe_fscr;
/* SPE and Altivec can share a status since they will never be used
* simultaneously */
/*
* SPE and Altivec can share a status since they will never be
* used simultaneously
*/
float_status vec_status;
/* Internal devices resources */
@ -1103,7 +1114,8 @@ struct CPUPPCState {
int error_code;
uint32_t pending_interrupts;
#if !defined(CONFIG_USER_ONLY)
/* This is the IRQ controller, which is implementation dependent
/*
* This is the IRQ controller, which is implementation dependent
* and only relevant when emulating a complete machine.
*/
uint32_t irq_input_state;
@ -1117,7 +1129,8 @@ struct CPUPPCState {
hwaddr mpic_iack;
/* true when the external proxy facility mode is enabled */
bool mpic_proxy;
/* set when the processor has an HV mode, thus HV priv
/*
* set when the processor has an HV mode, thus HV priv
* instructions and SPRs are diallowed if MSR:HV is 0
*/
bool has_hv_mode;
@ -1149,8 +1162,10 @@ struct CPUPPCState {
/* booke timers */
/* Specifies bit locations of the Time Base used to signal a fixed timer
* exception on a transition from 0 to 1. (watchdog or fixed-interval timer)
/*
* Specifies bit locations of the Time Base used to signal a fixed
* timer exception on a transition from 0 to 1. (watchdog or
* fixed-interval timer)
*
* 0 selects the least significant bit.
* 63 selects the most significant bit.
@ -1250,8 +1265,8 @@ struct PPCVirtualHypervisorClass {
void (*unmap_hptes)(PPCVirtualHypervisor *vhyp,
const ppc_hash_pte64_t *hptes,
hwaddr ptex, int n);
void (*store_hpte)(PPCVirtualHypervisor *vhyp, hwaddr ptex,
uint64_t pte0, uint64_t pte1);
void (*hpte_set_c)(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte1);
void (*hpte_set_r)(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte1);
void (*get_pate)(PPCVirtualHypervisor *vhyp, ppc_v3_pate_t *entry);
target_ulong (*encode_hpt_for_kvm_pr)(PPCVirtualHypervisor *vhyp);
};
@ -1290,53 +1305,54 @@ extern const struct VMStateDescription vmstate_ppc_cpu;
/*****************************************************************************/
void ppc_translate_init(void);
/* you can call this signal handler from your SIGBUS and SIGSEGV
signal handlers to inform the virtual CPU of exceptions. non zero
is returned if the signal was handled by the virtual CPU. */
int cpu_ppc_signal_handler (int host_signum, void *pinfo,
void *puc);
/*
* you can call this signal handler from your SIGBUS and SIGSEGV
* signal handlers to inform the virtual CPU of exceptions. non zero
* is returned if the signal was handled by the virtual CPU.
*/
int cpu_ppc_signal_handler(int host_signum, void *pinfo, void *puc);
#if defined(CONFIG_USER_ONLY)
int ppc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
#endif
#if !defined(CONFIG_USER_ONLY)
void ppc_store_sdr1 (CPUPPCState *env, target_ulong value);
void ppc_store_sdr1(CPUPPCState *env, target_ulong value);
void ppc_store_ptcr(CPUPPCState *env, target_ulong value);
#endif /* !defined(CONFIG_USER_ONLY) */
void ppc_store_msr (CPUPPCState *env, target_ulong value);
void ppc_store_msr(CPUPPCState *env, target_ulong value);
void ppc_cpu_list(void);
/* Time-base and decrementer management */
#ifndef NO_CPU_IO_DEFS
uint64_t cpu_ppc_load_tbl (CPUPPCState *env);
uint32_t cpu_ppc_load_tbu (CPUPPCState *env);
void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value);
void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value);
uint64_t cpu_ppc_load_atbl (CPUPPCState *env);
uint32_t cpu_ppc_load_atbu (CPUPPCState *env);
void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value);
void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value);
uint64_t cpu_ppc_load_tbl(CPUPPCState *env);
uint32_t cpu_ppc_load_tbu(CPUPPCState *env);
void cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value);
void cpu_ppc_store_tbl(CPUPPCState *env, uint32_t value);
uint64_t cpu_ppc_load_atbl(CPUPPCState *env);
uint32_t cpu_ppc_load_atbu(CPUPPCState *env);
void cpu_ppc_store_atbl(CPUPPCState *env, uint32_t value);
void cpu_ppc_store_atbu(CPUPPCState *env, uint32_t value);
bool ppc_decr_clear_on_delivery(CPUPPCState *env);
target_ulong cpu_ppc_load_decr(CPUPPCState *env);
void cpu_ppc_store_decr(CPUPPCState *env, target_ulong value);
target_ulong cpu_ppc_load_hdecr(CPUPPCState *env);
void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value);
uint64_t cpu_ppc_load_purr (CPUPPCState *env);
uint32_t cpu_ppc601_load_rtcl (CPUPPCState *env);
uint32_t cpu_ppc601_load_rtcu (CPUPPCState *env);
uint64_t cpu_ppc_load_purr(CPUPPCState *env);
uint32_t cpu_ppc601_load_rtcl(CPUPPCState *env);
uint32_t cpu_ppc601_load_rtcu(CPUPPCState *env);
#if !defined(CONFIG_USER_ONLY)
void cpu_ppc601_store_rtcl (CPUPPCState *env, uint32_t value);
void cpu_ppc601_store_rtcu (CPUPPCState *env, uint32_t value);
target_ulong load_40x_pit (CPUPPCState *env);
void store_40x_pit (CPUPPCState *env, target_ulong val);
void store_40x_dbcr0 (CPUPPCState *env, uint32_t val);
void store_40x_sler (CPUPPCState *env, uint32_t val);
void store_booke_tcr (CPUPPCState *env, target_ulong val);
void store_booke_tsr (CPUPPCState *env, target_ulong val);
void ppc_tlb_invalidate_all (CPUPPCState *env);
void ppc_tlb_invalidate_one (CPUPPCState *env, target_ulong addr);
void cpu_ppc601_store_rtcl(CPUPPCState *env, uint32_t value);
void cpu_ppc601_store_rtcu(CPUPPCState *env, uint32_t value);
target_ulong load_40x_pit(CPUPPCState *env);
void store_40x_pit(CPUPPCState *env, target_ulong val);
void store_40x_dbcr0(CPUPPCState *env, uint32_t val);
void store_40x_sler(CPUPPCState *env, uint32_t val);
void store_booke_tcr(CPUPPCState *env, target_ulong val);
void store_booke_tsr(CPUPPCState *env, target_ulong val);
void ppc_tlb_invalidate_all(CPUPPCState *env);
void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr);
void cpu_ppc_set_vhyp(PowerPCCPU *cpu, PPCVirtualHypervisor *vhyp);
#endif
#endif
@ -1349,7 +1365,8 @@ static inline uint64_t ppc_dump_gpr(CPUPPCState *env, int gprn)
gprv = env->gpr[gprn];
if (env->flags & POWERPC_FLAG_SPE) {
/* If the CPU implements the SPE extension, we have to get the
/*
* If the CPU implements the SPE extension, we have to get the
* high bits of the GPR from the gprh storage area
*/
gprv &= 0xFFFFFFFFULL;
@ -1360,8 +1377,8 @@ static inline uint64_t ppc_dump_gpr(CPUPPCState *env, int gprn)
}
/* Device control registers */
int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp);
int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val);
int ppc_dcr_read(ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp);
int ppc_dcr_write(ppc_dcr_t *dcr_env, int dcrn, uint32_t val);
#define POWERPC_CPU_TYPE_SUFFIX "-" TYPE_POWERPC_CPU
#define POWERPC_CPU_TYPE_NAME(model) model POWERPC_CPU_TYPE_SUFFIX
@ -1372,7 +1389,7 @@ int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val);
/* MMU modes definitions */
#define MMU_USER_IDX 0
static inline int cpu_mmu_index (CPUPPCState *env, bool ifetch)
static inline int cpu_mmu_index(CPUPPCState *env, bool ifetch)
{
return ifetch ? env->immu_idx : env->dmmu_idx;
}
@ -1990,17 +2007,17 @@ void ppc_compat_add_property(Object *obj, const char *name,
/* External Input Interrupt Directed to Guest State */
#define EPCR_EXTGS (1 << 31)
#define L1CSR0_CPE 0x00010000 /* Data Cache Parity Enable */
#define L1CSR0_CUL 0x00000400 /* (D-)Cache Unable to Lock */
#define L1CSR0_DCLFR 0x00000100 /* D-Cache Lock Flash Reset */
#define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */
#define L1CSR0_DCE 0x00000001 /* Data Cache Enable */
#define L1CSR0_CPE 0x00010000 /* Data Cache Parity Enable */
#define L1CSR0_CUL 0x00000400 /* (D-)Cache Unable to Lock */
#define L1CSR0_DCLFR 0x00000100 /* D-Cache Lock Flash Reset */
#define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */
#define L1CSR0_DCE 0x00000001 /* Data Cache Enable */
#define L1CSR1_CPE 0x00010000 /* Instruction Cache Parity Enable */
#define L1CSR1_ICUL 0x00000400 /* I-Cache Unable to Lock */
#define L1CSR1_ICLFR 0x00000100 /* I-Cache Lock Flash Reset */
#define L1CSR1_ICFI 0x00000002 /* Instruction Cache Flash Invalidate */
#define L1CSR1_ICE 0x00000001 /* Instruction Cache Enable */
#define L1CSR1_CPE 0x00010000 /* Instruction Cache Parity Enable */
#define L1CSR1_ICUL 0x00000400 /* I-Cache Unable to Lock */
#define L1CSR1_ICLFR 0x00000100 /* I-Cache Lock Flash Reset */
#define L1CSR1_ICFI 0x00000002 /* Instruction Cache Flash Invalidate */
#define L1CSR1_ICE 0x00000001 /* Instruction Cache Enable */
/* HID0 bits */
#define HID0_DEEPNAP (1 << 24) /* pre-2.06 */
@ -2226,7 +2243,8 @@ enum {
};
/*****************************************************************************/
/* Memory access type :
/*
* Memory access type :
* may be needed for precise access rights control and precise exceptions.
*/
enum {
@ -2242,8 +2260,9 @@ enum {
ACCESS_CACHE = 0x60, /* Cache manipulation */
};
/* Hardware interruption sources:
* all those exception can be raised simulteaneously
/*
* Hardware interrupt sources:
* all those exception can be raised simulteaneously
*/
/* Input pins definitions */
enum {
@ -2325,9 +2344,11 @@ enum {
enum {
/* POWER7 input pins */
POWER7_INPUT_INT = 0,
/* POWER7 probably has other inputs, but we don't care about them
/*
* POWER7 probably has other inputs, but we don't care about them
* for any existing machine. We can wire these up when we need
* them */
* them
*/
POWER7_INPUT_NB,
};

View File

@ -1104,19 +1104,19 @@ void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *b, uint32_t s) \
} \
} \
\
while (offset < (size)/4) { \
while (offset < (size) / 4) { \
n++; \
digits[(size)/4-n] = dfp_get_bcd_digit_##size(dfp.b64, offset++); \
if (digits[(size)/4-n] > 10) { \
digits[(size) / 4 - n] = dfp_get_bcd_digit_##size(dfp.b64, offset++); \
if (digits[(size) / 4 - n] > 10) { \
dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FPSCR_VE); \
return; \
} else { \
nonzero |= (digits[(size)/4-n] > 0); \
nonzero |= (digits[(size) / 4 - n] > 0); \
} \
} \
\
if (nonzero) { \
decNumberSetBCD(&dfp.t, digits+((size)/4)-n, n); \
decNumberSetBCD(&dfp.t, digits + ((size) / 4) - n, n); \
} \
\
if (s && sgn) { \
@ -1170,13 +1170,13 @@ DFP_HELPER_XEX(dxexq, 128)
static void dfp_set_raw_exp_64(uint64_t *t, uint64_t raw)
{
*t &= 0x8003ffffffffffffULL;
*t |= (raw << (63-13));
*t |= (raw << (63 - 13));
}
static void dfp_set_raw_exp_128(uint64_t *t, uint64_t raw)
{
t[HI_IDX] &= 0x80003fffffffffffULL;
t[HI_IDX] |= (raw << (63-17));
t[HI_IDX] |= (raw << (63 - 17));
}
#define DFP_HELPER_IEX(op, size) \

View File

@ -25,9 +25,9 @@
#include "internal.h"
#include "helper_regs.h"
//#define DEBUG_OP
//#define DEBUG_SOFTWARE_TLB
//#define DEBUG_EXCEPTIONS
/* #define DEBUG_OP */
/* #define DEBUG_SOFTWARE_TLB */
/* #define DEBUG_EXCEPTIONS */
#ifdef DEBUG_EXCEPTIONS
# define LOG_EXCP(...) qemu_log(__VA_ARGS__)
@ -126,8 +126,9 @@ static uint64_t ppc_excp_vector_offset(CPUState *cs, int ail)
return offset;
}
/* Note that this function should be greatly optimized
* when called with a constant excp, from ppc_hw_interrupt
/*
* Note that this function should be greatly optimized when called
* with a constant excp, from ppc_hw_interrupt
*/
static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
{
@ -147,7 +148,8 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
msr = env->msr & ~0x783f0000ULL;
}
/* new interrupt handler msr preserves existing HV and ME unless
/*
* new interrupt handler msr preserves existing HV and ME unless
* explicitly overriden
*/
new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB);
@ -166,7 +168,8 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
excp = powerpc_reset_wakeup(cs, env, excp, &msr);
}
/* Exception targetting modifiers
/*
* Exception targetting modifiers
*
* LPES0 is supported on POWER7/8/9
* LPES1 is not supported (old iSeries mode)
@ -194,7 +197,8 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
ail = 0;
}
/* Hypervisor emulation assistance interrupt only exists on server
/*
* Hypervisor emulation assistance interrupt only exists on server
* arch 2.05 server or later. We also don't want to generate it if
* we don't have HVB in msr_mask (PAPR mode).
*/
@ -229,8 +233,9 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
break;
case POWERPC_EXCP_MCHECK: /* Machine check exception */
if (msr_me == 0) {
/* Machine check exception is not enabled.
* Enter checkstop state.
/*
* Machine check exception is not enabled. Enter
* checkstop state.
*/
fprintf(stderr, "Machine check while not allowed. "
"Entering checkstop state\n");
@ -242,8 +247,9 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
cpu_interrupt_exittb(cs);
}
if (env->msr_mask & MSR_HVB) {
/* ISA specifies HV, but can be delivered to guest with HV clear
* (e.g., see FWNMI in PAPR).
/*
* ISA specifies HV, but can be delivered to guest with HV
* clear (e.g., see FWNMI in PAPR).
*/
new_msr |= (target_ulong)MSR_HVB;
}
@ -294,9 +300,10 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
break;
case POWERPC_EXCP_ALIGN: /* Alignment exception */
/* Get rS/rD and rA from faulting opcode */
/* Note: the opcode fields will not be set properly for a direct
* store load/store, but nobody cares as nobody actually uses
* direct store segments.
/*
* Note: the opcode fields will not be set properly for a
* direct store load/store, but nobody cares as nobody
* actually uses direct store segments.
*/
env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
break;
@ -310,7 +317,8 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
return;
}
/* FP exceptions always have NIP pointing to the faulting
/*
* FP exceptions always have NIP pointing to the faulting
* instruction, so always use store_next and claim we are
* precise in the MSR.
*/
@ -341,7 +349,8 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
dump_syscall(env);
lev = env->error_code;
/* We need to correct the NIP which in this case is supposed
/*
* We need to correct the NIP which in this case is supposed
* to point to the next instruction
*/
env->nip += 4;
@ -425,8 +434,9 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
new_msr |= ((target_ulong)1 << MSR_ME);
}
if (env->msr_mask & MSR_HVB) {
/* ISA specifies HV, but can be delivered to guest with HV clear
* (e.g., see FWNMI in PAPR, NMI injection in QEMU).
/*
* ISA specifies HV, but can be delivered to guest with HV
* clear (e.g., see FWNMI in PAPR, NMI injection in QEMU).
*/
new_msr |= (target_ulong)MSR_HVB;
} else {
@ -675,7 +685,8 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
env->spr[asrr1] = env->spr[srr1];
}
/* Sort out endianness of interrupt, this differs depending on the
/*
* Sort out endianness of interrupt, this differs depending on the
* CPU, the HV mode, etc...
*/
#ifdef TARGET_PPC64
@ -716,8 +727,9 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
}
vector |= env->excp_prefix;
/* AIL only works if there is no HV transition and we are running with
* translations enabled
/*
* AIL only works if there is no HV transition and we are running
* with translations enabled
*/
if (!((msr >> MSR_IR) & 1) || !((msr >> MSR_DR) & 1) ||
((new_msr & MSR_HVB) && !(msr & MSR_HVB))) {
@ -745,8 +757,9 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
}
}
#endif
/* We don't use hreg_store_msr here as already have treated
* any special case that could occur. Just store MSR and update hflags
/*
* We don't use hreg_store_msr here as already have treated any
* special case that could occur. Just store MSR and update hflags
*
* Note: We *MUST* not use hreg_store_msr() as-is anyway because it
* will prevent setting of the HV bit which some exceptions might need
@ -762,8 +775,9 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
/* Reset the reservation */
env->reserve_addr = -1;
/* Any interrupt is context synchronizing, check if TCG TLB
* needs a delayed flush on ppc64
/*
* Any interrupt is context synchronizing, check if TCG TLB needs
* a delayed flush on ppc64
*/
check_tlb_flush(env, false);
}
@ -1015,8 +1029,9 @@ void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn)
cs = CPU(ppc_env_get_cpu(env));
cs->halted = 1;
/* The architecture specifies that HDEC interrupts are
* discarded in PM states
/*
* The architecture specifies that HDEC interrupts are discarded
* in PM states
*/
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
@ -1047,8 +1062,9 @@ static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
#if defined(DEBUG_OP)
cpu_dump_rfi(env->nip, env->msr);
#endif
/* No need to raise an exception here,
* as rfi is always the last insn of a TB
/*
* No need to raise an exception here, as rfi is always the last
* insn of a TB
*/
cpu_interrupt_exittb(cs);
/* Reset the reservation */
@ -1067,8 +1083,9 @@ void helper_rfi(CPUPPCState *env)
#if defined(TARGET_PPC64)
void helper_rfid(CPUPPCState *env)
{
/* The architeture defines a number of rules for which bits
* can change but in practice, we handle this in hreg_store_msr()
/*
* The architeture defines a number of rules for which bits can
* change but in practice, we handle this in hreg_store_msr()
* which will be called by do_rfi(), so there is no need to filter
* here
*/
@ -1206,9 +1223,11 @@ static int book3s_dbell2irq(target_ulong rb)
{
int msg = rb & DBELL_TYPE_MASK;
/* A Directed Hypervisor Doorbell message is sent only if the
/*
* A Directed Hypervisor Doorbell message is sent only if the
* message type is 5. All other types are reserved and the
* instruction is a no-op */
* instruction is a no-op
*/
return msg == DBELL_TYPE_DBELL_SERVER ? PPC_INTERRUPT_HDOORBELL : -1;
}

View File

@ -90,10 +90,12 @@ uint32_t helper_tosingle(uint64_t arg)
ret = extract64(arg, 62, 2) << 30;
ret |= extract64(arg, 29, 30);
} else {
/* Zero or Denormal result. If the exponent is in bounds for
* a single-precision denormal result, extract the proper bits.
* If the input is not zero, and the exponent is out of bounds,
* then the result is undefined; this underflows to zero.
/*
* Zero or Denormal result. If the exponent is in bounds for
* a single-precision denormal result, extract the proper
* bits. If the input is not zero, and the exponent is out of
* bounds, then the result is undefined; this underflows to
* zero.
*/
ret = extract64(arg, 63, 1) << 31;
if (unlikely(exp >= 874)) {
@ -1090,7 +1092,7 @@ uint32_t helper_ftsqrt(uint64_t frb)
fe_flag = 1;
} else if (unlikely(float64_is_neg(frb))) {
fe_flag = 1;
} else if (!float64_is_zero(frb) && (e_b <= (-1022+52))) {
} else if (!float64_is_zero(frb) && (e_b <= (-1022 + 52))) {
fe_flag = 1;
}
@ -1789,7 +1791,8 @@ uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
#define float64_to_float64(x, env) x
/* VSX_ADD_SUB - VSX floating point add/subract
/*
* VSX_ADD_SUB - VSX floating point add/subract
* name - instruction mnemonic
* op - operation (add or sub)
* nels - number of elements (1, 2 or 4)
@ -1872,7 +1875,8 @@ void helper_xsaddqp(CPUPPCState *env, uint32_t opcode)
do_float_check_status(env, GETPC());
}
/* VSX_MUL - VSX floating point multiply
/*
* VSX_MUL - VSX floating point multiply
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
@ -1950,7 +1954,8 @@ void helper_xsmulqp(CPUPPCState *env, uint32_t opcode)
do_float_check_status(env, GETPC());
}
/* VSX_DIV - VSX floating point divide
/*
* VSX_DIV - VSX floating point divide
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
@ -2034,7 +2039,8 @@ void helper_xsdivqp(CPUPPCState *env, uint32_t opcode)
do_float_check_status(env, GETPC());
}
/* VSX_RE - VSX floating point reciprocal estimate
/*
* VSX_RE - VSX floating point reciprocal estimate
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
@ -2075,7 +2081,8 @@ VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
/* VSX_SQRT - VSX floating point square root
/*
* VSX_SQRT - VSX floating point square root
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
@ -2124,7 +2131,8 @@ VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
/* VSX_RSQRTE - VSX floating point reciprocal square root estimate
/*
*VSX_RSQRTE - VSX floating point reciprocal square root estimate
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
@ -2174,7 +2182,8 @@ VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
/* VSX_TDIV - VSX floating point test for divide
/*
* VSX_TDIV - VSX floating point test for divide
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
@ -2207,18 +2216,20 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
if (unlikely(tp##_is_any_nan(xa.fld) || \
tp##_is_any_nan(xb.fld))) { \
fe_flag = 1; \
} else if ((e_b <= emin) || (e_b >= (emax-2))) { \
} else if ((e_b <= emin) || (e_b >= (emax - 2))) { \
fe_flag = 1; \
} else if (!tp##_is_zero(xa.fld) && \
(((e_a - e_b) >= emax) || \
((e_a - e_b) <= (emin+1)) || \
(e_a <= (emin+nbits)))) { \
((e_a - e_b) <= (emin + 1)) || \
(e_a <= (emin + nbits)))) { \
fe_flag = 1; \
} \
\
if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
/* XB is not zero because of the above check and */ \
/* so must be denormalized. */ \
/* \
* XB is not zero because of the above check and so \
* must be denormalized. \
*/ \
fg_flag = 1; \
} \
} \
@ -2231,7 +2242,8 @@ VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
/* VSX_TSQRT - VSX floating point test for square root
/*
* VSX_TSQRT - VSX floating point test for square root
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
@ -2266,13 +2278,15 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
} else if (unlikely(tp##_is_neg(xb.fld))) { \
fe_flag = 1; \
} else if (!tp##_is_zero(xb.fld) && \
(e_b <= (emin+nbits))) { \
(e_b <= (emin + nbits))) { \
fe_flag = 1; \
} \
\
if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
/* XB is not zero because of the above check and */ \
/* therefore must be denormalized. */ \
/* \
* XB is not zero because of the above check and \
* therefore must be denormalized. \
*/ \
fg_flag = 1; \
} \
} \
@ -2285,7 +2299,8 @@ VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
/* VSX_MADD - VSX floating point muliply/add variations
/*
* VSX_MADD - VSX floating point muliply/add variations
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
@ -2322,8 +2337,10 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
float_status tstat = env->fp_status; \
set_float_exception_flags(0, &tstat); \
if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
/* Avoid double rounding errors by rounding the intermediate */ \
/* result to odd. */ \
/* \
* Avoid double rounding errors by rounding the intermediate \
* result to odd. \
*/ \
set_float_rounding_mode(float_round_to_zero, &tstat); \
xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
maddflgs, &tstat); \
@ -2388,7 +2405,8 @@ VSX_MADD(xvnmaddmsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0, 0)
VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0)
VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0)
/* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
/*
* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
* op - instruction mnemonic
* cmp - comparison operation
* exp - expected result of comparison
@ -2604,7 +2622,8 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
VSX_SCALAR_CMPQ(xscmpoqp, 1)
VSX_SCALAR_CMPQ(xscmpuqp, 0)
/* VSX_MAX_MIN - VSX floating point maximum/minimum
/*
* VSX_MAX_MIN - VSX floating point maximum/minimum
* name - instruction mnemonic
* op - operation (max or min)
* nels - number of elements (1, 2 or 4)
@ -2733,7 +2752,8 @@ void helper_##name(CPUPPCState *env, uint32_t opcode) \
VSX_MAX_MINJ(xsmaxjdp, 1);
VSX_MAX_MINJ(xsminjdp, 0);
/* VSX_CMP - VSX floating point compare
/*
* VSX_CMP - VSX floating point compare
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
@ -2778,7 +2798,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
} \
\
putVSR(xT(opcode), &xt, env); \
if ((opcode >> (31-21)) & 1) { \
if ((opcode >> (31 - 21)) & 1) { \
env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
} \
do_float_check_status(env, GETPC()); \
@ -2793,7 +2813,8 @@ VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1)
VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
/* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
/*
* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* stp - source type (float32 or float64)
@ -2829,10 +2850,11 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1)
VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2*i), 0)
VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2*i), VsrD(i), 0)
VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2 * i), 0)
VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2 * i), VsrD(i), 0)
/* VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
/*
* VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* stp - source type (float32 or float64)
@ -2868,7 +2890,8 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
/* VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
/*
* VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
* involving one half precision value
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
@ -2953,7 +2976,8 @@ uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
return float32_to_float64(xb >> 32, &tstat);
}
/* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
/*
* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* stp - source type (float32 or float64)
@ -2996,17 +3020,18 @@ VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL)
VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U)
VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \
0x8000000000000000ULL)
VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2*i), \
VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2 * i), \
0x80000000U)
VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL)
VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2*i), 0U)
VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2*i), VsrD(i), \
VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2 * i), 0U)
VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2 * i), VsrD(i), \
0x8000000000000000ULL)
VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U)
VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2*i), VsrD(i), 0ULL)
VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2 * i), VsrD(i), 0ULL)
VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
/* VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
/*
* VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
* op - instruction mnemonic
* stp - source type (float32 or float64)
* ttp - target type (int32, uint32, int64 or uint64)
@ -3040,7 +3065,8 @@ VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0), \
VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
/* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
/*
* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* stp - source type (int32, uint32, int64 or uint64)
@ -3079,14 +3105,15 @@ VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2*i), VsrD(i), 0, 0)
VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2*i), VsrD(i), 0, 0)
VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2*i), 0, 0)
VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2*i), 0, 0)
VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2 * i), VsrD(i), 0, 0)
VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2 * i), VsrD(i), 0, 0)
VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2 * i), 0, 0)
VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2 * i), 0, 0)
VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
/* VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
/*
* VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
* op - instruction mnemonic
* stp - source type (int32, uint32, int64 or uint64)
* ttp - target type (float32 or float64)
@ -3111,13 +3138,15 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128)
VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
/* For "use current rounding mode", define a value that will not be one of
* the existing rounding model enums.
/*
* For "use current rounding mode", define a value that will not be
* one of the existing rounding model enums.
*/
#define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
float_round_up + float_round_to_zero)
/* VSX_ROUND - VSX floating point round
/*
* VSX_ROUND - VSX floating point round
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
@ -3150,9 +3179,11 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
} \
} \
\
/* If this is not a "use current rounding mode" instruction, \
/* \
* If this is not a "use current rounding mode" instruction, \
* then inhibit setting of the XX bit and restore rounding \
* mode from FPSCR */ \
* mode from FPSCR \
*/ \
if (rmode != FLOAT_ROUND_CURRENT) { \
fpscr_set_rounding_mode(env); \
env->fp_status.float_exception_flags &= ~float_flag_inexact; \
@ -3234,7 +3265,8 @@ void helper_xvxsigsp(CPUPPCState *env, uint32_t opcode)
putVSR(xT(opcode), &xt, env);
}
/* VSX_TEST_DC - VSX floating point test data class
/*
* VSX_TEST_DC - VSX floating point test data class
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* xbn - VSR register number

View File

@ -33,14 +33,14 @@ static int ppc_gdb_register_len_apple(int n)
return 8;
case 64 ... 95:
return 16;
case 64+32: /* nip */
case 65+32: /* msr */
case 67+32: /* lr */
case 68+32: /* ctr */
case 70+32: /* fpscr */
case 64 + 32: /* nip */
case 65 + 32: /* msr */
case 67 + 32: /* lr */
case 68 + 32: /* ctr */
case 70 + 32: /* fpscr */
return 8;
case 66+32: /* cr */
case 69+32: /* xer */
case 66 + 32: /* cr */
case 69 + 32: /* xer */
return 4;
default:
return 0;
@ -84,11 +84,14 @@ static int ppc_gdb_register_len(int n)
}
}
/* We need to present the registers to gdb in the "current" memory ordering.
For user-only mode we get this for free; TARGET_WORDS_BIGENDIAN is set to
the proper ordering for the binary, and cannot be changed.
For system mode, TARGET_WORDS_BIGENDIAN is always set, and we must check
the current mode of the chip to see if we're running in little-endian. */
/*
* We need to present the registers to gdb in the "current" memory
* ordering. For user-only mode we get this for free;
* TARGET_WORDS_BIGENDIAN is set to the proper ordering for the
* binary, and cannot be changed. For system mode,
* TARGET_WORDS_BIGENDIAN is always set, and we must check the current
* mode of the chip to see if we're running in little-endian.
*/
void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len)
{
#ifndef CONFIG_USER_ONLY
@ -104,11 +107,12 @@ void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len)
#endif
}
/* Old gdb always expects FP registers. Newer (xml-aware) gdb only
/*
* Old gdb always expects FP registers. Newer (xml-aware) gdb only
* expects whatever the target description contains. Due to a
* historical mishap the FP registers appear in between core integer
* regs and PC, MSR, CR, and so forth. We hack round this by giving the
* FP regs zero size when talking to a newer gdb.
* regs and PC, MSR, CR, and so forth. We hack round this by giving
* the FP regs zero size when talking to a newer gdb.
*/
int ppc_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)

View File

@ -44,10 +44,11 @@ static inline void hreg_swap_gpr_tgpr(CPUPPCState *env)
static inline void hreg_compute_mem_idx(CPUPPCState *env)
{
/* This is our encoding for server processors. The architecture
/*
* This is our encoding for server processors. The architecture
* specifies that there is no such thing as userspace with
* translation off, however it appears that MacOS does it and
* some 32-bit CPUs support it. Weird...
* translation off, however it appears that MacOS does it and some
* 32-bit CPUs support it. Weird...
*
* 0 = Guest User space virtual mode
* 1 = Guest Kernel space virtual mode
@ -143,7 +144,8 @@ static inline int hreg_store_msr(CPUPPCState *env, target_ulong value,
/* Change the exception prefix on PowerPC 601 */
env->excp_prefix = ((value >> MSR_EP) & 1) * 0xFFF00000;
}
/* If PR=1 then EE, IR and DR must be 1
/*
* If PR=1 then EE, IR and DR must be 1
*
* Note: We only enforce this on 64-bit server processors.
* It appears that:

View File

@ -137,7 +137,8 @@ uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
/* if x = 0xab, returns 0xababababababababa */
#define pattern(x) (((x) & 0xff) * (~(target_ulong)0 / 0xff))
/* substract 1 from each byte, and with inverse, check if MSB is set at each
/*
* subtract 1 from each byte, and with inverse, check if MSB is set at each
* byte.
* i.e. ((0x00 - 0x01) & ~(0x00)) & 0x80
* (0xFF & 0xFF) & 0x80 = 0x80 (zero found)
@ -156,7 +157,8 @@ uint32_t helper_cmpeqb(target_ulong ra, target_ulong rb)
#undef haszero
#undef hasvalue
/* Return invalid random number.
/*
* Return invalid random number.
*
* FIXME: Add rng backend or other mechanism to get cryptographically suitable
* random number
@ -181,7 +183,7 @@ uint64_t helper_bpermd(uint64_t rs, uint64_t rb)
uint64_t ra = 0;
for (i = 0; i < 8; i++) {
int index = (rs >> (i*8)) & 0xFF;
int index = (rs >> (i * 8)) & 0xFF;
if (index < 64) {
if (rb & PPC_BIT(index)) {
ra |= 1 << i;
@ -370,7 +372,8 @@ target_ulong helper_divso(CPUPPCState *env, target_ulong arg1,
/* 602 specific instructions */
/* mfrom is the most crazy instruction ever seen, imho ! */
/* Real implementation uses a ROM table. Do the same */
/* Extremely decomposed:
/*
* Extremely decomposed:
* -arg / 256
* return 256 * log10(10 + 1.0) + 0.5
*/
@ -393,7 +396,7 @@ target_ulong helper_602_mfrom(target_ulong arg)
for (index = 0; index < ARRAY_SIZE(r->element); index++)
#else
#define VECTOR_FOR_INORDER_I(index, element) \
for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
for (index = ARRAY_SIZE(r->element) - 1; index >= 0; index--)
#endif
/* Saturating arithmetic helpers. */
@ -634,7 +637,8 @@ void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
} \
}
/* VABSDU - Vector absolute difference unsigned
/*
* VABSDU - Vector absolute difference unsigned
* name - instruction mnemonic suffix (b: byte, h: halfword, w: word)
* element - element type to access from vector
*/
@ -739,7 +743,8 @@ void helper_vcmpne##suffix(CPUPPCState *env, ppc_avr_t *r, \
} \
}
/* VCMPNEZ - Vector compare not equal to zero
/*
* VCMPNEZ - Vector compare not equal to zero
* suffix - instruction mnemonic suffix (b: byte, h: halfword, w: word)
* element - element type to access from vector
*/
@ -1138,7 +1143,7 @@ void helper_vpermr(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
#define VBPERMQ_DW(index) (((index) & 0x40) != 0)
#define EXTRACT_BIT(avr, i, index) (extract64((avr)->u64[i], index, 1))
#else
#define VBPERMQ_INDEX(avr, i) ((avr)->u8[15-(i)])
#define VBPERMQ_INDEX(avr, i) ((avr)->u8[15 - (i)])
#define VBPERMD_INDEX(i) (1 - i)
#define VBPERMQ_DW(index) (((index) & 0x40) == 0)
#define EXTRACT_BIT(avr, i, index) \
@ -1169,7 +1174,7 @@ void helper_vbpermq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
int index = VBPERMQ_INDEX(b, i);
if (index < 128) {
uint64_t mask = (1ull << (63-(index & 0x3F)));
uint64_t mask = (1ull << (63 - (index & 0x3F)));
if (a->u64[VBPERMQ_DW(index)] & mask) {
perm |= (0x8000 >> i);
}
@ -1449,9 +1454,9 @@ void helper_vgbbd(ppc_avr_t *r, ppc_avr_t *b)
VECTOR_FOR_INORDER_I(i, u8) {
#if defined(HOST_WORDS_BIGENDIAN)
t[i>>3] |= VGBBD_MASKS[b->u8[i]] >> (i & 7);
t[i >> 3] |= VGBBD_MASKS[b->u8[i]] >> (i & 7);
#else
t[i>>3] |= VGBBD_MASKS[b->u8[i]] >> (7-(i & 7));
t[i >> 3] |= VGBBD_MASKS[b->u8[i]] >> (7 - (i & 7));
#endif
}
@ -1463,19 +1468,19 @@ void helper_vgbbd(ppc_avr_t *r, ppc_avr_t *b)
void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
{ \
int i, j; \
trgtyp prod[sizeof(ppc_avr_t)/sizeof(a->srcfld[0])]; \
trgtyp prod[sizeof(ppc_avr_t) / sizeof(a->srcfld[0])]; \
\
VECTOR_FOR_INORDER_I(i, srcfld) { \
prod[i] = 0; \
for (j = 0; j < sizeof(a->srcfld[0]) * 8; j++) { \
if (a->srcfld[i] & (1ull<<j)) { \
if (a->srcfld[i] & (1ull << j)) { \
prod[i] ^= ((trgtyp)b->srcfld[i] << j); \
} \
} \
} \
\
VECTOR_FOR_INORDER_I(i, trgfld) { \
r->trgfld[i] = prod[2*i] ^ prod[2*i+1]; \
r->trgfld[i] = prod[2 * i] ^ prod[2 * i + 1]; \
} \
}
@ -1493,7 +1498,7 @@ void helper_vpmsumd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
VECTOR_FOR_INORDER_I(i, u64) {
prod[i] = 0;
for (j = 0; j < 64; j++) {
if (a->u64[i] & (1ull<<j)) {
if (a->u64[i] & (1ull << j)) {
prod[i] ^= (((__uint128_t)b->u64[i]) << j);
}
}
@ -1508,7 +1513,7 @@ void helper_vpmsumd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
VECTOR_FOR_INORDER_I(i, u64) {
prod[i].VsrD(1) = prod[i].VsrD(0) = 0;
for (j = 0; j < 64; j++) {
if (a->u64[i] & (1ull<<j)) {
if (a->u64[i] & (1ull << j)) {
ppc_avr_t bshift;
if (j == 0) {
bshift.VsrD(0) = 0;
@ -1548,9 +1553,9 @@ void helper_vpkpx(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
VECTOR_FOR_INORDER_I(j, u32) {
uint32_t e = x[i]->u32[j];
result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
((e >> 6) & 0x3e0) |
((e >> 3) & 0x1f));
result.u16[4 * i + j] = (((e >> 9) & 0xfc00) |
((e >> 6) & 0x3e0) |
((e >> 3) & 0x1f));
}
}
*r = result;
@ -1568,7 +1573,7 @@ void helper_vpkpx(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
\
VECTOR_FOR_INORDER_I(i, from) { \
result.to[i] = cvt(a0->from[i], &sat); \
result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
result.to[i + ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat);\
} \
*r = result; \
if (dosat && sat) { \
@ -1736,9 +1741,11 @@ VEXTU_X_DO(vextuhrx, 16, 0)
VEXTU_X_DO(vextuwrx, 32, 0)
#undef VEXTU_X_DO
/* The specification says that the results are undefined if all of the
* shift counts are not identical. We check to make sure that they are
* to conform to what real hardware appears to do. */
/*
* The specification says that the results are undefined if all of the
* shift counts are not identical. We check to make sure that they
* are to conform to what real hardware appears to do.
*/
#define VSHIFT(suffix, leftp) \
void helper_vs##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
{ \
@ -1805,9 +1812,10 @@ void helper_vsrv(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
int i;
unsigned int shift, bytes;
/* Use reverse order, as destination and source register can be same. Its
* being modified in place saving temporary, reverse order will guarantee
* that computed result is not fed back.
/*
* Use reverse order, as destination and source register can be
* same. Its being modified in place saving temporary, reverse
* order will guarantee that computed result is not fed back.
*/
for (i = ARRAY_SIZE(r->u8) - 1; i >= 0; i--) {
shift = b->u8[i] & 0x7; /* extract shift value */
@ -1840,7 +1848,7 @@ void helper_vslo(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
#if defined(HOST_WORDS_BIGENDIAN)
memmove(&r->u8[0], &a->u8[sh], 16 - sh);
memset(&r->u8[16-sh], 0, sh);
memset(&r->u8[16 - sh], 0, sh);
#else
memmove(&r->u8[sh], &a->u8[0], 16 - sh);
memset(&r->u8[0], 0, sh);
@ -2112,7 +2120,7 @@ void helper_vsum4ubs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
ppc_avr_t result; \
\
for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
uint16_t e = b->u16[hi ? i : i+4]; \
uint16_t e = b->u16[hi ? i : i + 4]; \
uint8_t a = (e >> 15) ? 0xff : 0; \
uint8_t r = (e >> 10) & 0x1f; \
uint8_t g = (e >> 5) & 0x1f; \
@ -2463,7 +2471,7 @@ static void bcd_put_digit(ppc_avr_t *bcd, uint8_t digit, int n)
{
if (n & 1) {
bcd->u8[BCD_DIG_BYTE(n)] &= 0x0F;
bcd->u8[BCD_DIG_BYTE(n)] |= (digit<<4);
bcd->u8[BCD_DIG_BYTE(n)] |= (digit << 4);
} else {
bcd->u8[BCD_DIG_BYTE(n)] &= 0xF0;
bcd->u8[BCD_DIG_BYTE(n)] |= digit;
@ -3220,7 +3228,7 @@ void helper_vshasigmad(ppc_avr_t *r, ppc_avr_t *a, uint32_t st_six)
for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
if (st == 0) {
if ((six & (0x8 >> (2*i))) == 0) {
if ((six & (0x8 >> (2 * i))) == 0) {
r->VsrD(i) = ror64(a->VsrD(i), 1) ^
ror64(a->VsrD(i), 8) ^
(a->VsrD(i) >> 7);
@ -3230,7 +3238,7 @@ void helper_vshasigmad(ppc_avr_t *r, ppc_avr_t *a, uint32_t st_six)
(a->VsrD(i) >> 6);
}
} else { /* st == 1 */
if ((six & (0x8 >> (2*i))) == 0) {
if ((six & (0x8 >> (2 * i))) == 0) {
r->VsrD(i) = ror64(a->VsrD(i), 28) ^
ror64(a->VsrD(i), 34) ^
ror64(a->VsrD(i), 39);

View File

@ -49,24 +49,14 @@
#include "elf.h"
#include "sysemu/kvm_int.h"
//#define DEBUG_KVM
#ifdef DEBUG_KVM
#define DPRINTF(fmt, ...) \
do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
#else
#define DPRINTF(fmt, ...) \
do { } while (0)
#endif
#define PROC_DEVTREE_CPU "/proc/device-tree/cpus/"
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
KVM_CAP_LAST_INFO
};
static int cap_interrupt_unset = false;
static int cap_interrupt_level = false;
static int cap_interrupt_unset;
static int cap_interrupt_level;
static int cap_segstate;
static int cap_booke_sregs;
static int cap_ppc_smt;
@ -96,7 +86,8 @@ static int cap_large_decr;
static uint32_t debug_inst_opcode;
/* XXX We have a race condition where we actually have a level triggered
/*
* XXX We have a race condition where we actually have a level triggered
* interrupt, but the infrastructure can't expose that yet, so the guest
* takes but ignores it, goes to sleep and never gets notified that there's
* still an interrupt pending.
@ -114,10 +105,12 @@ static void kvm_kick_cpu(void *opaque)
qemu_cpu_kick(CPU(cpu));
}
/* Check whether we are running with KVM-PR (instead of KVM-HV). This
/*
* Check whether we are running with KVM-PR (instead of KVM-HV). This
* should only be used for fallback tests - generally we should use
* explicit capabilities for the features we want, rather than
* assuming what is/isn't available depending on the KVM variant. */
* assuming what is/isn't available depending on the KVM variant.
*/
static bool kvmppc_is_pr(KVMState *ks)
{
/* Assume KVM-PR if the GET_PVINFO capability is available */
@ -143,8 +136,10 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
/* Note: we don't set cap_papr here, because this capability is
* only activated after this by kvmppc_set_papr() */
/*
* Note: we don't set cap_papr here, because this capability is
* only activated after this by kvmppc_set_papr()
*/
cap_htab_fd = kvm_vm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
cap_ppc_smt = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT);
@ -160,7 +155,8 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
* in KVM at this moment.
*
* TODO: call kvm_vm_check_extension() with the right capability
* after the kernel starts implementing it.*/
* after the kernel starts implementing it.
*/
cap_ppc_pvr_compat = false;
if (!cap_interrupt_level) {
@ -186,10 +182,13 @@ static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
int ret;
if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
/* What we're really trying to say is "if we're on BookE, we use
the native PVR for now". This is the only sane way to check
it though, so we potentially confuse users that they can run
BookE guests on BookS. Let's hope nobody dares enough :) */
/*
* What we're really trying to say is "if we're on BookE, we
* use the native PVR for now". This is the only sane way to
* check it though, so we potentially confuse users that they
* can run BookE guests on BookS. Let's hope nobody dares
* enough :)
*/
return 0;
} else {
if (!cap_segstate) {
@ -421,12 +420,14 @@ void kvm_check_mmu(PowerPCCPU *cpu, Error **errp)
}
if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) {
/* Mostly what guest pagesizes we can use are related to the
/*
* Mostly what guest pagesizes we can use are related to the
* host pages used to map guest RAM, which is handled in the
* platform code. Cache-Inhibited largepages (64k) however are
* used for I/O, so if they're mapped to the host at all it
* will be a normal mapping, not a special hugepage one used
* for RAM. */
* for RAM.
*/
if (getpagesize() < 0x10000) {
error_setg(errp,
"KVM can't supply 64kiB CI pages, which guest expects");
@ -440,9 +441,9 @@ unsigned long kvm_arch_vcpu_id(CPUState *cpu)
return POWERPC_CPU(cpu)->vcpu_id;
}
/* e500 supports 2 h/w breakpoint and 2 watchpoint.
* book3s supports only 1 watchpoint, so array size
* of 4 is sufficient for now.
/*
* e500 supports 2 h/w breakpoint and 2 watchpoint. book3s supports
* only 1 watchpoint, so array size of 4 is sufficient for now.
*/
#define MAX_HW_BKPTS 4
@ -497,9 +498,12 @@ int kvm_arch_init_vcpu(CPUState *cs)
break;
case POWERPC_MMU_2_07:
if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
/* KVM-HV has transactional memory on POWER8 also without the
* KVM_CAP_PPC_HTM extension, so enable it here instead as
* long as it's availble to userspace on the host. */
/*
* KVM-HV has transactional memory on POWER8 also without
* the KVM_CAP_PPC_HTM extension, so enable it here
* instead as long as it's availble to userspace on the
* host.
*/
if (qemu_getauxval(AT_HWCAP2) & PPC_FEATURE2_HAS_HTM) {
cap_htm = true;
}
@ -626,7 +630,7 @@ static int kvm_put_fp(CPUState *cs)
reg.addr = (uintptr_t)&fpscr;
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
DPRINTF("Unable to set FPSCR to KVM: %s\n", strerror(errno));
trace_kvm_failed_fpscr_set(strerror(errno));
return ret;
}
@ -647,8 +651,8 @@ static int kvm_put_fp(CPUState *cs)
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
DPRINTF("Unable to set %s%d to KVM: %s\n", vsx ? "VSR" : "FPR",
i, strerror(errno));
trace_kvm_failed_fp_set(vsx ? "VSR" : "FPR", i,
strerror(errno));
return ret;
}
}
@ -659,7 +663,7 @@ static int kvm_put_fp(CPUState *cs)
reg.addr = (uintptr_t)&env->vscr;
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
DPRINTF("Unable to set VSCR to KVM: %s\n", strerror(errno));
trace_kvm_failed_vscr_set(strerror(errno));
return ret;
}
@ -668,7 +672,7 @@ static int kvm_put_fp(CPUState *cs)
reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
DPRINTF("Unable to set VR%d to KVM: %s\n", i, strerror(errno));
trace_kvm_failed_vr_set(i, strerror(errno));
return ret;
}
}
@ -693,7 +697,7 @@ static int kvm_get_fp(CPUState *cs)
reg.addr = (uintptr_t)&fpscr;
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
if (ret < 0) {
DPRINTF("Unable to get FPSCR from KVM: %s\n", strerror(errno));
trace_kvm_failed_fpscr_get(strerror(errno));
return ret;
} else {
env->fpscr = fpscr;
@ -709,8 +713,8 @@ static int kvm_get_fp(CPUState *cs)
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
if (ret < 0) {
DPRINTF("Unable to get %s%d from KVM: %s\n",
vsx ? "VSR" : "FPR", i, strerror(errno));
trace_kvm_failed_fp_get(vsx ? "VSR" : "FPR", i,
strerror(errno));
return ret;
} else {
#ifdef HOST_WORDS_BIGENDIAN
@ -733,7 +737,7 @@ static int kvm_get_fp(CPUState *cs)
reg.addr = (uintptr_t)&env->vscr;
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
if (ret < 0) {
DPRINTF("Unable to get VSCR from KVM: %s\n", strerror(errno));
trace_kvm_failed_vscr_get(strerror(errno));
return ret;
}
@ -742,8 +746,7 @@ static int kvm_get_fp(CPUState *cs)
reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
if (ret < 0) {
DPRINTF("Unable to get VR%d from KVM: %s\n",
i, strerror(errno));
trace_kvm_failed_vr_get(i, strerror(errno));
return ret;
}
}
@ -764,7 +767,7 @@ static int kvm_get_vpa(CPUState *cs)
reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
if (ret < 0) {
DPRINTF("Unable to get VPA address from KVM: %s\n", strerror(errno));
trace_kvm_failed_vpa_addr_get(strerror(errno));
return ret;
}
@ -774,8 +777,7 @@ static int kvm_get_vpa(CPUState *cs)
reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
if (ret < 0) {
DPRINTF("Unable to get SLB shadow state from KVM: %s\n",
strerror(errno));
trace_kvm_failed_slb_get(strerror(errno));
return ret;
}
@ -785,8 +787,7 @@ static int kvm_get_vpa(CPUState *cs)
reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
if (ret < 0) {
DPRINTF("Unable to get dispatch trace log state from KVM: %s\n",
strerror(errno));
trace_kvm_failed_dtl_get(strerror(errno));
return ret;
}
@ -800,10 +801,12 @@ static int kvm_put_vpa(CPUState *cs)
struct kvm_one_reg reg;
int ret;
/* SLB shadow or DTL can't be registered unless a master VPA is
/*
* SLB shadow or DTL can't be registered unless a master VPA is
* registered. That means when restoring state, if a VPA *is*
* registered, we need to set that up first. If not, we need to
* deregister the others before deregistering the master VPA */
* deregister the others before deregistering the master VPA
*/
assert(spapr_cpu->vpa_addr
|| !(spapr_cpu->slb_shadow_addr || spapr_cpu->dtl_addr));
@ -812,7 +815,7 @@ static int kvm_put_vpa(CPUState *cs)
reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
trace_kvm_failed_vpa_addr_set(strerror(errno));
return ret;
}
}
@ -823,7 +826,7 @@ static int kvm_put_vpa(CPUState *cs)
reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
DPRINTF("Unable to set SLB shadow state to KVM: %s\n", strerror(errno));
trace_kvm_failed_slb_set(strerror(errno));
return ret;
}
@ -833,8 +836,7 @@ static int kvm_put_vpa(CPUState *cs)
reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
DPRINTF("Unable to set dispatch trace log state to KVM: %s\n",
strerror(errno));
trace_kvm_failed_dtl_set(strerror(errno));
return ret;
}
@ -843,7 +845,7 @@ static int kvm_put_vpa(CPUState *cs)
reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
trace_kvm_failed_null_vpa_addr_set(strerror(errno));
return ret;
}
}
@ -929,8 +931,9 @@ int kvm_arch_put_registers(CPUState *cs, int level)
regs.pid = env->spr[SPR_BOOKE_PID];
for (i = 0;i < 32; i++)
for (i = 0; i < 32; i++) {
regs.gpr[i] = env->gpr[i];
}
regs.cr = 0;
for (i = 0; i < 8; i++) {
@ -938,8 +941,9 @@ int kvm_arch_put_registers(CPUState *cs, int level)
}
ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
if (ret < 0)
if (ret < 0) {
return ret;
}
kvm_put_fp(cs);
@ -962,10 +966,12 @@ int kvm_arch_put_registers(CPUState *cs, int level)
if (cap_one_reg) {
int i;
/* We deliberately ignore errors here, for kernels which have
/*
* We deliberately ignore errors here, for kernels which have
* the ONE_REG calls, but don't support the specific
* registers, there's a reasonable chance things will still
* work, at least until we try to migrate. */
* work, at least until we try to migrate.
*/
for (i = 0; i < 1024; i++) {
uint64_t id = env->spr_cb[i].one_reg_id;
@ -996,7 +1002,7 @@ int kvm_arch_put_registers(CPUState *cs, int level)
if (cap_papr) {
if (kvm_put_vpa(cs) < 0) {
DPRINTF("Warning: Unable to set VPA information to KVM\n");
trace_kvm_failed_put_vpa();
}
}
@ -1207,8 +1213,9 @@ int kvm_arch_get_registers(CPUState *cs)
int i, ret;
ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
if (ret < 0)
if (ret < 0) {
return ret;
}
cr = regs.cr;
for (i = 7; i >= 0; i--) {
@ -1236,8 +1243,9 @@ int kvm_arch_get_registers(CPUState *cs)
env->spr[SPR_BOOKE_PID] = regs.pid;
for (i = 0;i < 32; i++)
for (i = 0; i < 32; i++) {
env->gpr[i] = regs.gpr[i];
}
kvm_get_fp(cs);
@ -1262,10 +1270,12 @@ int kvm_arch_get_registers(CPUState *cs)
if (cap_one_reg) {
int i;
/* We deliberately ignore errors here, for kernels which have
/*
* We deliberately ignore errors here, for kernels which have
* the ONE_REG calls, but don't support the specific
* registers, there's a reasonable chance things will still
* work, at least until we try to migrate. */
* work, at least until we try to migrate.
*/
for (i = 0; i < 1024; i++) {
uint64_t id = env->spr_cb[i].one_reg_id;
@ -1296,7 +1306,7 @@ int kvm_arch_get_registers(CPUState *cs)
if (cap_papr) {
if (kvm_get_vpa(cs) < 0) {
DPRINTF("Warning: Unable to get VPA information from KVM\n");
trace_kvm_failed_get_vpa();
}
}
@ -1339,20 +1349,24 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
qemu_mutex_lock_iothread();
/* PowerPC QEMU tracks the various core input pins (interrupt, critical
* interrupt, reset, etc) in PPC-specific env->irq_input_state. */
/*
* PowerPC QEMU tracks the various core input pins (interrupt,
* critical interrupt, reset, etc) in PPC-specific
* env->irq_input_state.
*/
if (!cap_interrupt_level &&
run->ready_for_interrupt_injection &&
(cs->interrupt_request & CPU_INTERRUPT_HARD) &&
(env->irq_input_state & (1<<PPC_INPUT_INT)))
(env->irq_input_state & (1 << PPC_INPUT_INT)))
{
/* For now KVM disregards the 'irq' argument. However, in the
* future KVM could cache it in-kernel to avoid a heavyweight exit
* when reading the UIC.
/*
* For now KVM disregards the 'irq' argument. However, in the
* future KVM could cache it in-kernel to avoid a heavyweight
* exit when reading the UIC.
*/
irq = KVM_INTERRUPT_SET;
DPRINTF("injected interrupt %d\n", irq);
trace_kvm_injected_interrupt(irq);
r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq);
if (r < 0) {
printf("cpu %d fail inject %x\n", cs->cpu_index, irq);
@ -1363,9 +1377,12 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
(NANOSECONDS_PER_SECOND / 50));
}
/* We don't know if there are more interrupts pending after this. However,
* the guest will return to userspace in the course of handling this one
* anyways, so we will get a chance to deliver the rest. */
/*
* We don't know if there are more interrupts pending after
* this. However, the guest will return to userspace in the course
* of handling this one anyways, so we will get a chance to
* deliver the rest.
*/
qemu_mutex_unlock_iothread();
}
@ -1394,18 +1411,22 @@ static int kvmppc_handle_halt(PowerPCCPU *cpu)
}
/* map dcr access to existing qemu dcr emulation */
static int kvmppc_handle_dcr_read(CPUPPCState *env, uint32_t dcrn, uint32_t *data)
static int kvmppc_handle_dcr_read(CPUPPCState *env,
uint32_t dcrn, uint32_t *data)
{
if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0) {
fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
}
return 0;
}
static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t data)
static int kvmppc_handle_dcr_write(CPUPPCState *env,
uint32_t dcrn, uint32_t data)
{
if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0) {
fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
}
return 0;
}
@ -1697,20 +1718,20 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
switch (run->exit_reason) {
case KVM_EXIT_DCR:
if (run->dcr.is_write) {
DPRINTF("handle dcr write\n");
trace_kvm_handle_dcr_write();
ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
} else {
DPRINTF("handle dcr read\n");
trace_kvm_handle_drc_read();
ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
}
break;
case KVM_EXIT_HLT:
DPRINTF("handle halt\n");
trace_kvm_handle_halt();
ret = kvmppc_handle_halt(cpu);
break;
#if defined(TARGET_PPC64)
case KVM_EXIT_PAPR_HCALL:
DPRINTF("handle PAPR hypercall\n");
trace_kvm_handle_papr_hcall();
run->papr_hcall.ret = spapr_hypercall(cpu,
run->papr_hcall.nr,
run->papr_hcall.args);
@ -1718,18 +1739,18 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
break;
#endif
case KVM_EXIT_EPR:
DPRINTF("handle epr\n");
trace_kvm_handle_epr();
run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
ret = 0;
break;
case KVM_EXIT_WATCHDOG:
DPRINTF("handle watchdog expiry\n");
trace_kvm_handle_watchdog_expiry();
watchdog_perform_action();
ret = 0;
break;
case KVM_EXIT_DEBUG:
DPRINTF("handle debug exception\n");
trace_kvm_handle_debug_exception();
if (kvm_handle_debug(cpu, run)) {
ret = EXCP_DEBUG;
break;
@ -1832,7 +1853,7 @@ static int read_cpuinfo(const char *field, char *value, int len)
ret = 0;
break;
}
} while(*line);
} while (*line);
fclose(f);
@ -1849,7 +1870,8 @@ uint32_t kvmppc_get_tbfreq(void)
return retval;
}
if (!(ns = strchr(line, ':'))) {
ns = strchr(line, ':');
if (!ns) {
return retval;
}
@ -1875,7 +1897,8 @@ static int kvmppc_find_cpu_dt(char *buf, int buf_len)
struct dirent *dirp;
DIR *dp;
if ((dp = opendir(PROC_DEVTREE_CPU)) == NULL) {
dp = opendir(PROC_DEVTREE_CPU);
if (!dp) {
printf("Can't open directory " PROC_DEVTREE_CPU "\n");
return -1;
}
@ -1929,10 +1952,11 @@ static uint64_t kvmppc_read_int_dt(const char *filename)
return 0;
}
/* Read a CPU node property from the host device tree that's a single
/*
* Read a CPU node property from the host device tree that's a single
* integer (32-bit or 64-bit). Returns 0 if anything goes wrong
* (can't find or open the property, or doesn't understand the
* format) */
* (can't find or open the property, or doesn't understand the format)
*/
static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
{
char buf[PATH_MAX], *tmp;
@ -1991,7 +2015,7 @@ int kvmppc_get_hasidle(CPUPPCState *env)
int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
{
uint32_t *hc = (uint32_t*)buf;
uint32_t *hc = (uint32_t *)buf;
struct kvm_ppc_pvinfo pvinfo;
if (!kvmppc_get_pvinfo(env, &pvinfo)) {
@ -2064,8 +2088,10 @@ void kvmppc_set_papr(PowerPCCPU *cpu)
exit(1);
}
/* Update the capability flag so we sync the right information
* with kvm */
/*
* Update the capability flag so we sync the right information
* with kvm
*/
cap_papr = 1;
}
@ -2133,8 +2159,10 @@ uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
long rampagesize, best_page_shift;
int i;
/* Find the largest hardware supported page size that's less than
* or equal to the (logical) backing page size of guest RAM */
/*
* Find the largest hardware supported page size that's less than
* or equal to the (logical) backing page size of guest RAM
*/
kvm_get_smmu_info(&info, &error_fatal);
rampagesize = qemu_minrampagesize();
best_page_shift = 0;
@ -2184,7 +2212,8 @@ void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
int fd;
void *table;
/* Must set fd to -1 so we don't try to munmap when called for
/*
* Must set fd to -1 so we don't try to munmap when called for
* destroying the table, which the upper layers -will- do
*/
*pfd = -1;
@ -2229,7 +2258,7 @@ void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
len = nb_table * sizeof(uint64_t);
/* FIXME: round this up to page size */
table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
table = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (table == MAP_FAILED) {
fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
liobn);
@ -2272,10 +2301,12 @@ int kvmppc_reset_htab(int shift_hint)
int ret;
ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
if (ret == -ENOTTY) {
/* At least some versions of PR KVM advertise the
/*
* At least some versions of PR KVM advertise the
* capability, but don't implement the ioctl(). Oops.
* Return 0 so that we allocate the htab in qemu, as is
* correct for PR. */
* correct for PR.
*/
return 0;
} else if (ret < 0) {
return ret;
@ -2283,9 +2314,12 @@ int kvmppc_reset_htab(int shift_hint)
return shift;
}
/* We have a kernel that predates the htab reset calls. For PR
/*
* We have a kernel that predates the htab reset calls. For PR
* KVM, we need to allocate the htab ourselves, for an HV KVM of
* this era, it has allocated a 16MB fixed size hash table already. */
* this era, it has allocated a 16MB fixed size hash table
* already.
*/
if (kvmppc_is_pr(kvm_state)) {
/* PR - tell caller to allocate htab */
return 0;
@ -2667,8 +2701,8 @@ int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
}
}
} while ((rc != 0)
&& ((max_ns < 0)
|| ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
&& ((max_ns < 0) ||
((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
return (rc == 0) ? 1 : 0;
}
@ -2677,7 +2711,7 @@ int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
uint16_t n_valid, uint16_t n_invalid)
{
struct kvm_get_htab_header *buf;
size_t chunksize = sizeof(*buf) + n_valid*HASH_PTE_SIZE_64;
size_t chunksize = sizeof(*buf) + n_valid * HASH_PTE_SIZE_64;
ssize_t rc;
buf = alloca(chunksize);
@ -2685,7 +2719,7 @@ int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
buf->n_valid = n_valid;
buf->n_invalid = n_invalid;
qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64*n_valid);
qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64 * n_valid);
rc = write(fd, buf, chunksize);
if (rc < 0) {

View File

@ -117,7 +117,8 @@ static inline int kvmppc_get_hasidle(CPUPPCState *env)
return 0;
}
static inline int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
static inline int kvmppc_get_hypercall(CPUPPCState *env,
uint8_t *buf, int buf_len)
{
return -1;
}

View File

@ -24,22 +24,26 @@ static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
#endif
target_ulong xer;
for (i = 0; i < 32; i++)
for (i = 0; i < 32; i++) {
qemu_get_betls(f, &env->gpr[i]);
}
#if !defined(TARGET_PPC64)
for (i = 0; i < 32; i++)
for (i = 0; i < 32; i++) {
qemu_get_betls(f, &env->gprh[i]);
}
#endif
qemu_get_betls(f, &env->lr);
qemu_get_betls(f, &env->ctr);
for (i = 0; i < 8; i++)
for (i = 0; i < 8; i++) {
qemu_get_be32s(f, &env->crf[i]);
}
qemu_get_betls(f, &xer);
cpu_write_xer(env, xer);
qemu_get_betls(f, &env->reserve_addr);
qemu_get_betls(f, &env->msr);
for (i = 0; i < 4; i++)
for (i = 0; i < 4; i++) {
qemu_get_betls(f, &env->tgpr[i]);
}
for (i = 0; i < 32; i++) {
union {
float64 d;
@ -56,14 +60,19 @@ static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
qemu_get_sbe32s(f, &slb_nr);
#endif
qemu_get_betls(f, &sdr1);
for (i = 0; i < 32; i++)
for (i = 0; i < 32; i++) {
qemu_get_betls(f, &env->sr[i]);
for (i = 0; i < 2; i++)
for (j = 0; j < 8; j++)
}
for (i = 0; i < 2; i++) {
for (j = 0; j < 8; j++) {
qemu_get_betls(f, &env->DBAT[i][j]);
for (i = 0; i < 2; i++)
for (j = 0; j < 8; j++)
}
}
for (i = 0; i < 2; i++) {
for (j = 0; j < 8; j++) {
qemu_get_betls(f, &env->IBAT[i][j]);
}
}
qemu_get_sbe32s(f, &env->nb_tlb);
qemu_get_sbe32s(f, &env->tlb_per_way);
qemu_get_sbe32s(f, &env->nb_ways);
@ -71,17 +80,19 @@ static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
qemu_get_sbe32s(f, &env->id_tlbs);
qemu_get_sbe32s(f, &env->nb_pids);
if (env->tlb.tlb6) {
// XXX assumes 6xx
/* XXX assumes 6xx */
for (i = 0; i < env->nb_tlb; i++) {
qemu_get_betls(f, &env->tlb.tlb6[i].pte0);
qemu_get_betls(f, &env->tlb.tlb6[i].pte1);
qemu_get_betls(f, &env->tlb.tlb6[i].EPN);
}
}
for (i = 0; i < 4; i++)
for (i = 0; i < 4; i++) {
qemu_get_betls(f, &env->pb[i]);
for (i = 0; i < 1024; i++)
}
for (i = 0; i < 1024; i++) {
qemu_get_betls(f, &env->spr[i]);
}
if (!cpu->vhyp) {
ppc_store_sdr1(env, sdr1);
}
@ -94,8 +105,9 @@ static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
qemu_get_sbe32s(f, &env->error_code);
qemu_get_be32s(f, &env->pending_interrupts);
qemu_get_be32s(f, &env->irq_input_state);
for (i = 0; i < POWERPC_EXCP_NB; i++)
for (i = 0; i < POWERPC_EXCP_NB; i++) {
qemu_get_betls(f, &env->excp_vectors[i]);
}
qemu_get_betls(f, &env->excp_prefix);
qemu_get_betls(f, &env->ivor_mask);
qemu_get_betls(f, &env->ivpr_mask);
@ -253,22 +265,24 @@ static int cpu_pre_save(void *opaque)
env->spr[SPR_BOOKE_SPEFSCR] = env->spe_fscr;
for (i = 0; (i < 4) && (i < env->nb_BATs); i++) {
env->spr[SPR_DBAT0U + 2*i] = env->DBAT[0][i];
env->spr[SPR_DBAT0U + 2*i + 1] = env->DBAT[1][i];
env->spr[SPR_IBAT0U + 2*i] = env->IBAT[0][i];
env->spr[SPR_IBAT0U + 2*i + 1] = env->IBAT[1][i];
env->spr[SPR_DBAT0U + 2 * i] = env->DBAT[0][i];
env->spr[SPR_DBAT0U + 2 * i + 1] = env->DBAT[1][i];
env->spr[SPR_IBAT0U + 2 * i] = env->IBAT[0][i];
env->spr[SPR_IBAT0U + 2 * i + 1] = env->IBAT[1][i];
}
for (i = 0; (i < 4) && ((i+4) < env->nb_BATs); i++) {
env->spr[SPR_DBAT4U + 2*i] = env->DBAT[0][i+4];
env->spr[SPR_DBAT4U + 2*i + 1] = env->DBAT[1][i+4];
env->spr[SPR_IBAT4U + 2*i] = env->IBAT[0][i+4];
env->spr[SPR_IBAT4U + 2*i + 1] = env->IBAT[1][i+4];
for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) {
env->spr[SPR_DBAT4U + 2 * i] = env->DBAT[0][i + 4];
env->spr[SPR_DBAT4U + 2 * i + 1] = env->DBAT[1][i + 4];
env->spr[SPR_IBAT4U + 2 * i] = env->IBAT[0][i + 4];
env->spr[SPR_IBAT4U + 2 * i + 1] = env->IBAT[1][i + 4];
}
/* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */
if (cpu->pre_2_8_migration) {
/* Mask out bits that got added to msr_mask since the versions
* which stupidly included it in the migration stream. */
/*
* Mask out bits that got added to msr_mask since the versions
* which stupidly included it in the migration stream.
*/
target_ulong metamask = 0
#if defined(TARGET_PPC64)
| (1ULL << MSR_TS0)
@ -277,9 +291,10 @@ static int cpu_pre_save(void *opaque)
;
cpu->mig_msr_mask = env->msr_mask & ~metamask;
cpu->mig_insns_flags = env->insns_flags & insns_compat_mask;
/* CPU models supported by old machines all have PPC_MEM_TLBIE,
* so we set it unconditionally to allow backward migration from
* a POWER9 host to a POWER8 host.
/*
* CPU models supported by old machines all have
* PPC_MEM_TLBIE, so we set it unconditionally to allow
* backward migration from a POWER9 host to a POWER8 host.
*/
cpu->mig_insns_flags |= PPC_MEM_TLBIE;
cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2;
@ -379,23 +394,26 @@ static int cpu_post_load(void *opaque, int version_id)
env->spe_fscr = env->spr[SPR_BOOKE_SPEFSCR];
for (i = 0; (i < 4) && (i < env->nb_BATs); i++) {
env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2*i];
env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2*i + 1];
env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2*i];
env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2*i + 1];
env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2 * i];
env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2 * i + 1];
env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2 * i];
env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2 * i + 1];
}
for (i = 0; (i < 4) && ((i+4) < env->nb_BATs); i++) {
env->DBAT[0][i+4] = env->spr[SPR_DBAT4U + 2*i];
env->DBAT[1][i+4] = env->spr[SPR_DBAT4U + 2*i + 1];
env->IBAT[0][i+4] = env->spr[SPR_IBAT4U + 2*i];
env->IBAT[1][i+4] = env->spr[SPR_IBAT4U + 2*i + 1];
for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) {
env->DBAT[0][i + 4] = env->spr[SPR_DBAT4U + 2 * i];
env->DBAT[1][i + 4] = env->spr[SPR_DBAT4U + 2 * i + 1];
env->IBAT[0][i + 4] = env->spr[SPR_IBAT4U + 2 * i];
env->IBAT[1][i + 4] = env->spr[SPR_IBAT4U + 2 * i + 1];
}
if (!cpu->vhyp) {
ppc_store_sdr1(env, env->spr[SPR_SDR1]);
}
/* Invalidate all supported msr bits except MSR_TGPR/MSR_HVB before restoring */
/*
* Invalidate all supported msr bits except MSR_TGPR/MSR_HVB
* before restoring
*/
msr = env->msr;
env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB);
ppc_store_msr(env, msr);
@ -409,7 +427,7 @@ static bool fpu_needed(void *opaque)
{
PowerPCCPU *cpu = opaque;
return (cpu->env.insns_flags & PPC_FLOAT);
return cpu->env.insns_flags & PPC_FLOAT;
}
static const VMStateDescription vmstate_fpu = {
@ -428,7 +446,7 @@ static bool altivec_needed(void *opaque)
{
PowerPCCPU *cpu = opaque;
return (cpu->env.insns_flags & PPC_ALTIVEC);
return cpu->env.insns_flags & PPC_ALTIVEC;
}
static int get_vscr(QEMUFile *f, void *opaque, size_t size,
@ -483,7 +501,7 @@ static bool vsx_needed(void *opaque)
{
PowerPCCPU *cpu = opaque;
return (cpu->env.insns_flags2 & PPC2_VSX);
return cpu->env.insns_flags2 & PPC2_VSX;
}
static const VMStateDescription vmstate_vsx = {
@ -591,7 +609,7 @@ static bool slb_needed(void *opaque)
PowerPCCPU *cpu = opaque;
/* We don't support any of the old segment table based 64-bit CPUs */
return (cpu->env.mmu_model & POWERPC_MMU_64);
return cpu->env.mmu_model & POWERPC_MMU_64;
}
static int slb_post_load(void *opaque, int version_id)
@ -600,8 +618,10 @@ static int slb_post_load(void *opaque, int version_id)
CPUPPCState *env = &cpu->env;
int i;
/* We've pulled in the raw esid and vsid values from the migration
* stream, but we need to recompute the page size pointers */
/*
* We've pulled in the raw esid and vsid values from the migration
* stream, but we need to recompute the page size pointers
*/
for (i = 0; i < cpu->hash64_opts->slb_size; i++) {
if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) {
/* Migration source had bad values in its SLB */

View File

@ -27,7 +27,7 @@
#include "internal.h"
#include "qemu/atomic128.h"
//#define DEBUG_OP
/* #define DEBUG_OP */
static inline bool needs_byteswap(const CPUPPCState *env)
{
@ -103,10 +103,11 @@ void helper_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, uint32_t reg)
do_lsw(env, addr, nb, reg, GETPC());
}
/* PPC32 specification says we must generate an exception if
* rA is in the range of registers to be loaded.
* In an other hand, IBM says this is valid, but rA won't be loaded.
* For now, I'll follow the spec...
/*
* PPC32 specification says we must generate an exception if rA is in
* the range of registers to be loaded. In an other hand, IBM says
* this is valid, but rA won't be loaded. For now, I'll follow the
* spec...
*/
void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg,
uint32_t ra, uint32_t rb)
@ -199,7 +200,8 @@ void helper_dcbzep(CPUPPCState *env, target_ulong addr, uint32_t opcode)
void helper_icbi(CPUPPCState *env, target_ulong addr)
{
addr &= ~(env->dcache_line_size - 1);
/* Invalidate one cache line :
/*
* Invalidate one cache line :
* PowerPC specification says this is to be treated like a load
* (not a fetch) by the MMU. To be sure it will be so,
* do the load "by hand".
@ -346,17 +348,19 @@ uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr,
#define LO_IDX 0
#endif
/* We use msr_le to determine index ordering in a vector. However,
byteswapping is not simply controlled by msr_le. We also need to take
into account endianness of the target. This is done for the little-endian
PPC64 user-mode target. */
/*
* We use msr_le to determine index ordering in a vector. However,
* byteswapping is not simply controlled by msr_le. We also need to
* take into account endianness of the target. This is done for the
* little-endian PPC64 user-mode target.
*/
#define LVE(name, access, swap, element) \
void helper_##name(CPUPPCState *env, ppc_avr_t *r, \
target_ulong addr) \
{ \
size_t n_elems = ARRAY_SIZE(r->element); \
int adjust = HI_IDX*(n_elems - 1); \
int adjust = HI_IDX * (n_elems - 1); \
int sh = sizeof(r->element[0]) >> 1; \
int index = (addr & 0xf) >> sh; \
if (msr_le) { \
@ -476,12 +480,13 @@ VSX_STXVL(stxvll, 1)
void helper_tbegin(CPUPPCState *env)
{
/* As a degenerate implementation, always fail tbegin. The reason
/*
* As a degenerate implementation, always fail tbegin. The reason
* given is "Nesting overflow". The "persistent" bit is set,
* providing a hint to the error handler to not retry. The TFIAR
* captures the address of the failure, which is this tbegin
* instruction. Instruction execution will continue with the
* next instruction in memory, which is precisely what we want.
* instruction. Instruction execution will continue with the next
* instruction in memory, which is precisely what we want.
*/
env->spr[SPR_TEXASR] =

View File

@ -1,5 +1,4 @@
static const uint8_t mfrom_ROM_table[602] =
{
static const uint8_t mfrom_ROM_table[602] = {
77, 77, 76, 76, 75, 75, 74, 74,
73, 73, 72, 72, 71, 71, 70, 70,
69, 69, 68, 68, 68, 67, 67, 66,

View File

@ -2,7 +2,7 @@
#include "qemu/osdep.h"
#include <math.h>
int main (void)
int main(void)
{
double d;
uint8_t n;
@ -10,7 +10,8 @@ int main (void)
printf("static const uint8_t mfrom_ROM_table[602] =\n{\n ");
for (i = 0; i < 602; i++) {
/* Extremely decomposed:
/*
* Extremely decomposed:
* -T0 / 256
* T0 = 256 * log10(10 + 1.0) + 0.5
*/
@ -23,8 +24,9 @@ int main (void)
d += 0.5;
n = d;
printf("%3d, ", n);
if ((i & 7) == 7)
if ((i & 7) == 7) {
printf("\n ");
}
}
printf("\n};\n");

View File

@ -210,10 +210,11 @@ void ppc_store_msr(CPUPPCState *env, target_ulong value)
hreg_store_msr(env, value, 0);
}
/* This code is lifted from MacOnLinux. It is called whenever
* THRM1,2 or 3 is read an fixes up the values in such a way
* that will make MacOS not hang. These registers exist on some
* 75x and 74xx processors.
/*
* This code is lifted from MacOnLinux. It is called whenever THRM1,2
* or 3 is read an fixes up the values in such a way that will make
* MacOS not hang. These registers exist on some 75x and 74xx
* processors.
*/
void helper_fixup_thrm(CPUPPCState *env)
{

View File

@ -27,7 +27,7 @@
#include "mmu-hash32.h"
#include "exec/log.h"
//#define DEBUG_BAT
/* #define DEBUG_BAT */
#ifdef DEBUG_BATS
# define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
@ -228,8 +228,10 @@ static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr,
qemu_log_mask(CPU_LOG_MMU, "direct store...\n");
if ((sr & 0x1FF00000) >> 20 == 0x07f) {
/* Memory-forced I/O controller interface access */
/* If T=1 and BUID=x'07F', the 601 performs a memory access
/*
* Memory-forced I/O controller interface access
*
* If T=1 and BUID=x'07F', the 601 performs a memory access
* to SR[28-31] LA[4-31], bypassing all protection mechanisms.
*/
*raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF);
@ -265,9 +267,11 @@ static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr,
}
return 1;
case ACCESS_CACHE:
/* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */
/* Should make the instruction do no-op.
* As it already do no-op, it's quite easy :-)
/*
* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi
*
* Should make the instruction do no-op. As it already do
* no-op, it's quite easy :-)
*/
*raddr = eaddr;
return 0;
@ -341,6 +345,24 @@ static hwaddr ppc_hash32_pteg_search(PowerPCCPU *cpu, hwaddr pteg_off,
return -1;
}
static void ppc_hash32_set_r(PowerPCCPU *cpu, hwaddr pte_offset, uint32_t pte1)
{
target_ulong base = ppc_hash32_hpt_base(cpu);
hwaddr offset = pte_offset + 6;
/* The HW performs a non-atomic byte update */
stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01);
}
static void ppc_hash32_set_c(PowerPCCPU *cpu, hwaddr pte_offset, uint64_t pte1)
{
target_ulong base = ppc_hash32_hpt_base(cpu);
hwaddr offset = pte_offset + 7;
/* The HW performs a non-atomic byte update */
stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80);
}
static hwaddr ppc_hash32_htab_lookup(PowerPCCPU *cpu,
target_ulong sr, target_ulong eaddr,
ppc_hash_pte32_t *pte)
@ -399,7 +421,6 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
hwaddr pte_offset;
ppc_hash_pte32_t pte;
int prot;
uint32_t new_pte1;
const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
hwaddr raddr;
@ -515,18 +536,20 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
/* 8. Update PTE referenced and changed bits if necessary */
new_pte1 = pte.pte1 | HPTE32_R_R; /* set referenced bit */
if (rwx == 1) {
new_pte1 |= HPTE32_R_C; /* set changed (dirty) bit */
} else {
/* Treat the page as read-only for now, so that a later write
* will pass through this function again to set the C bit */
prot &= ~PAGE_WRITE;
}
if (new_pte1 != pte.pte1) {
ppc_hash32_store_hpte1(cpu, pte_offset, new_pte1);
if (!(pte.pte1 & HPTE32_R_R)) {
ppc_hash32_set_r(cpu, pte_offset, pte.pte1);
}
if (!(pte.pte1 & HPTE32_R_C)) {
if (rwx == 1) {
ppc_hash32_set_c(cpu, pte_offset, pte.pte1);
} else {
/*
* Treat the page as read-only for now, so that a later write
* will pass through this function again to set the C bit
*/
prot &= ~PAGE_WRITE;
}
}
/* 9. Determine the real address from the PTE */

View File

@ -30,7 +30,7 @@
#include "hw/hw.h"
#include "mmu-book3s-v3.h"
//#define DEBUG_SLB
/* #define DEBUG_SLB */
#ifdef DEBUG_SLB
# define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
@ -58,9 +58,11 @@ static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
LOG_SLB("%s: slot %d %016" PRIx64 " %016"
PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
/* We check for 1T matches on all MMUs here - if the MMU
/*
* We check for 1T matches on all MMUs here - if the MMU
* doesn't have 1T segment support, we will have prevented 1T
* entries from being inserted in the slbmte code. */
* entries from being inserted in the slbmte code.
*/
if (((slb->esid == esid_256M) &&
((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
|| ((slb->esid == esid_1T) &&
@ -103,7 +105,8 @@ void helper_slbia(CPUPPCState *env)
if (slb->esid & SLB_ESID_V) {
slb->esid &= ~SLB_ESID_V;
/* XXX: given the fact that segment size is 256 MB or 1TB,
/*
* XXX: given the fact that segment size is 256 MB or 1TB,
* and we still don't have a tlb_flush_mask(env, n, mask)
* in QEMU, we just invalidate all TLBs
*/
@ -126,7 +129,8 @@ static void __helper_slbie(CPUPPCState *env, target_ulong addr,
if (slb->esid & SLB_ESID_V) {
slb->esid &= ~SLB_ESID_V;
/* XXX: given the fact that segment size is 256 MB or 1TB,
/*
* XXX: given the fact that segment size is 256 MB or 1TB,
* and we still don't have a tlb_flush_mask(env, n, mask)
* in QEMU, we just invalidate all TLBs
*/
@ -306,8 +310,10 @@ static int ppc_hash64_pte_prot(PowerPCCPU *cpu,
{
CPUPPCState *env = &cpu->env;
unsigned pp, key;
/* Some pp bit combinations have undefined behaviour, so default
* to no access in those cases */
/*
* Some pp bit combinations have undefined behaviour, so default
* to no access in those cases
*/
int prot = 0;
key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP)
@ -376,7 +382,7 @@ static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
}
key = HPTE64_R_KEY(pte.pte1);
amrbits = (env->spr[SPR_AMR] >> 2*(31 - key)) & 0x3;
amrbits = (env->spr[SPR_AMR] >> 2 * (31 - key)) & 0x3;
/* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
/* env->spr[SPR_AMR]); */
@ -547,8 +553,9 @@ static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
if (*pshift == 0) {
continue;
}
/* We don't do anything with pshift yet as qemu TLB only deals
* with 4K pages anyway
/*
* We don't do anything with pshift yet as qemu TLB only
* deals with 4K pages anyway
*/
pte->pte0 = pte0;
pte->pte1 = pte1;
@ -572,8 +579,10 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
uint64_t vsid, epnmask, epn, ptem;
const PPCHash64SegmentPageSizes *sps = slb->sps;
/* The SLB store path should prevent any bad page size encodings
* getting in there, so: */
/*
* The SLB store path should prevent any bad page size encodings
* getting in there, so:
*/
assert(sps);
/* If ISL is set in LPCR we need to clamp the page size to 4K */
@ -716,6 +725,39 @@ static void ppc_hash64_set_dsi(CPUState *cs, uint64_t dar, uint64_t dsisr)
}
static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
{
hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 16;
if (cpu->vhyp) {
PPCVirtualHypervisorClass *vhc =
PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
vhc->hpte_set_r(cpu->vhyp, ptex, pte1);
return;
}
base = ppc_hash64_hpt_base(cpu);
/* The HW performs a non-atomic byte update */
stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01);
}
static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
{
hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 15;
if (cpu->vhyp) {
PPCVirtualHypervisorClass *vhc =
PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
vhc->hpte_set_c(cpu->vhyp, ptex, pte1);
return;
}
base = ppc_hash64_hpt_base(cpu);
/* The HW performs a non-atomic byte update */
stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80);
}
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
int rwx, int mmu_idx)
{
@ -726,23 +768,25 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
hwaddr ptex;
ppc_hash_pte64_t pte;
int exec_prot, pp_prot, amr_prot, prot;
uint64_t new_pte1;
const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
hwaddr raddr;
assert((rwx == 0) || (rwx == 1) || (rwx == 2));
/* Note on LPCR usage: 970 uses HID4, but our special variant
* of store_spr copies relevant fields into env->spr[SPR_LPCR].
* Similarily we filter unimplemented bits when storing into
* LPCR depending on the MMU version. This code can thus just
* use the LPCR "as-is".
/*
* Note on LPCR usage: 970 uses HID4, but our special variant of
* store_spr copies relevant fields into env->spr[SPR_LPCR].
* Similarily we filter unimplemented bits when storing into LPCR
* depending on the MMU version. This code can thus just use the
* LPCR "as-is".
*/
/* 1. Handle real mode accesses */
if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
/* Translation is supposedly "off" */
/* In real mode the top 4 effective address bits are (mostly) ignored */
/*
* Translation is supposedly "off", but in real mode the top 4
* effective address bits are (mostly) ignored
*/
raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
/* In HV mode, add HRMOR if top EA bit is clear */
@ -871,17 +915,19 @@ skip_slb_search:
/* 6. Update PTE referenced and changed bits if necessary */
new_pte1 = pte.pte1 | HPTE64_R_R; /* set referenced bit */
if (rwx == 1) {
new_pte1 |= HPTE64_R_C; /* set changed (dirty) bit */
} else {
/* Treat the page as read-only for now, so that a later write
* will pass through this function again to set the C bit */
prot &= ~PAGE_WRITE;
if (!(pte.pte1 & HPTE64_R_R)) {
ppc_hash64_set_r(cpu, ptex, pte.pte1);
}
if (new_pte1 != pte.pte1) {
ppc_hash64_store_hpte(cpu, ptex, pte.pte0, new_pte1);
if (!(pte.pte1 & HPTE64_R_C)) {
if (rwx == 1) {
ppc_hash64_set_c(cpu, ptex, pte.pte1);
} else {
/*
* Treat the page as read-only for now, so that a later write
* will pass through this function again to set the C bit
*/
prot &= ~PAGE_WRITE;
}
}
/* 7. Determine the real address from the PTE */
@ -940,24 +986,6 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
& TARGET_PAGE_MASK;
}
void ppc_hash64_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
uint64_t pte0, uint64_t pte1)
{
hwaddr base;
hwaddr offset = ptex * HASH_PTE_SIZE_64;
if (cpu->vhyp) {
PPCVirtualHypervisorClass *vhc =
PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
vhc->store_hpte(cpu->vhyp, ptex, pte0, pte1);
return;
}
base = ppc_hash64_hpt_base(cpu);
stq_phys(CPU(cpu)->as, base + offset, pte0);
stq_phys(CPU(cpu)->as, base + offset + HASH_PTE_SIZE_64 / 2, pte1);
}
void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex,
target_ulong pte0, target_ulong pte1)
{
@ -1023,8 +1051,9 @@ static void ppc_hash64_update_vrma(PowerPCCPU *cpu)
return;
}
/* Make one up. Mostly ignore the ESID which will not be
* needed for translation
/*
* Make one up. Mostly ignore the ESID which will not be needed
* for translation
*/
vsid = SLB_VSID_VRMA;
vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
@ -1080,11 +1109,12 @@ void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val)
}
env->spr[SPR_RMOR] = ((lpcr >> 41) & 0xffffull) << 26;
/* XXX We could also write LPID from HID4 here
/*
* XXX We could also write LPID from HID4 here
* but since we don't tag any translation on it
* it doesn't actually matter
*/
/* XXX For proper emulation of 970 we also need
*
* XXX For proper emulation of 970 we also need
* to dig HRMOR out of HID5
*/
break;

View File

@ -10,8 +10,6 @@ int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr);
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr address, int rw,
int mmu_idx);
void ppc_hash64_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
uint64_t pte0, uint64_t pte1);
void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
target_ulong pte_index,
target_ulong pte0, target_ulong pte1);

View File

@ -228,10 +228,10 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
ppc_v3_pate_t pate;
assert((rwx == 0) || (rwx == 1) || (rwx == 2));
assert(ppc64_use_proc_tbl(cpu));
/* Real Mode Access */
if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
/* HV or virtual hypervisor Real Mode Access */
if ((msr_hv || cpu->vhyp) &&
(((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0)))) {
/* In real mode top 4 effective addr bits (mostly) ignored */
raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
@ -241,6 +241,16 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
return 0;
}
/*
* Check UPRT (we avoid the check in real mode to deal with
* transitional states during kexec.
*/
if (!ppc64_use_proc_tbl(cpu)) {
qemu_log_mask(LOG_GUEST_ERROR,
"LPCR:UPRT not set in radix mode ! LPCR="
TARGET_FMT_lx "\n", env->spr[SPR_LPCR]);
}
/* Virtual Mode Access - get the fully qualified address */
if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) {
ppc_radix64_raise_segi(cpu, rwx, eaddr);

View File

@ -33,11 +33,11 @@
#include "mmu-book3s-v3.h"
#include "mmu-radix64.h"
//#define DEBUG_MMU
//#define DEBUG_BATS
//#define DEBUG_SOFTWARE_TLB
//#define DUMP_PAGE_TABLES
//#define FLUSH_ALL_TLBS
/* #define DEBUG_MMU */
/* #define DEBUG_BATS */
/* #define DEBUG_SOFTWARE_TLB */
/* #define DUMP_PAGE_TABLES */
/* #define FLUSH_ALL_TLBS */
#ifdef DEBUG_MMU
# define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0)
@ -152,7 +152,8 @@ static int check_prot(int prot, int rw, int access_type)
}
static inline int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0,
target_ulong pte1, int h, int rw, int type)
target_ulong pte1, int h,
int rw, int type)
{
target_ulong ptem, mmask;
int access, ret, pteh, ptev, pp;
@ -332,7 +333,8 @@ static inline int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx,
pte_is_valid(tlb->pte0) ? "valid" : "inval",
tlb->EPN, eaddr, tlb->pte1,
rw ? 'S' : 'L', access_type == ACCESS_CODE ? 'I' : 'D');
switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1, 0, rw, access_type)) {
switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1,
0, rw, access_type)) {
case -3:
/* TLB inconsistency */
return -1;
@ -347,9 +349,11 @@ static inline int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx,
break;
case 0:
/* access granted */
/* XXX: we should go on looping to check all TLBs consistency
* but we can speed-up the whole thing as the
* result would be undefined if TLBs are not consistent.
/*
* XXX: we should go on looping to check all TLBs
* consistency but we can speed-up the whole thing as
* the result would be undefined if TLBs are not
* consistent.
*/
ret = 0;
best = nr;
@ -550,14 +554,18 @@ static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
qemu_log_mask(CPU_LOG_MMU, "direct store...\n");
/* Direct-store segment : absolutely *BUGGY* for now */
/* Direct-store implies a 32-bit MMU.
/*
* Direct-store implies a 32-bit MMU.
* Check the Segment Register's bus unit ID (BUID).
*/
sr = env->sr[eaddr >> 28];
if ((sr & 0x1FF00000) >> 20 == 0x07f) {
/* Memory-forced I/O controller interface access */
/* If T=1 and BUID=x'07F', the 601 performs a memory access
* to SR[28-31] LA[4-31], bypassing all protection mechanisms.
/*
* Memory-forced I/O controller interface access
*
* If T=1 and BUID=x'07F', the 601 performs a memory
* access to SR[28-31] LA[4-31], bypassing all protection
* mechanisms.
*/
ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF);
ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
@ -578,9 +586,11 @@ static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
/* lwarx, ldarx or srwcx. */
return -4;
case ACCESS_CACHE:
/* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */
/* Should make the instruction do no-op.
* As it already do no-op, it's quite easy :-)
/*
* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi
*
* Should make the instruction do no-op. As it already do
* no-op, it's quite easy :-)
*/
ctx->raddr = eaddr;
return 0;
@ -942,12 +952,14 @@ static uint32_t mmubooke206_esr(int mmu_idx, bool rw)
return esr;
}
/* Get EPID register given the mmu_idx. If this is regular load,
* construct the EPID access bits from current processor state */
/* Get the effective AS and PR bits and the PID. The PID is returned only if
* EPID load is requested, otherwise the caller must detect the correct EPID.
* Return true if valid EPID is returned. */
/*
* Get EPID register given the mmu_idx. If this is regular load,
* construct the EPID access bits from current processor state
*
* Get the effective AS and PR bits and the PID. The PID is returned
* only if EPID load is requested, otherwise the caller must detect
* the correct EPID. Return true if valid EPID is returned.
*/
static bool mmubooke206_get_as(CPUPPCState *env,
int mmu_idx, uint32_t *epid_out,
bool *as_out, bool *pr_out)
@ -1369,8 +1381,9 @@ static inline int check_physical(CPUPPCState *env, mmu_ctx_t *ctx,
case POWERPC_MMU_SOFT_4xx_Z:
if (unlikely(msr_pe != 0)) {
/* 403 family add some particular protections,
* using PBL/PBU registers for accesses with no translation.
/*
* 403 family add some particular protections, using
* PBL/PBU registers for accesses with no translation.
*/
in_plb =
/* Check PLB validity */
@ -1453,7 +1466,8 @@ static int get_physical_address_wtlb(
if (real_mode) {
ret = check_physical(env, ctx, eaddr, rw);
} else {
cpu_abort(CPU(cpu), "PowerPC in real mode do not do any translation\n");
cpu_abort(CPU(cpu),
"PowerPC in real mode do not do any translation\n");
}
return -1;
default:
@ -1498,9 +1512,10 @@ hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
if (unlikely(get_physical_address(env, &ctx, addr, 0, ACCESS_INT) != 0)) {
/* Some MMUs have separate TLBs for code and data. If we only try an
* ACCESS_INT, we may not be able to read instructions mapped by code
* TLBs, so we also try a ACCESS_CODE.
/*
* Some MMUs have separate TLBs for code and data. If we only
* try an ACCESS_INT, we may not be able to read instructions
* mapped by code TLBs, so we also try a ACCESS_CODE.
*/
if (unlikely(get_physical_address(env, &ctx, addr, 0,
ACCESS_CODE) != 0)) {
@ -1805,6 +1820,13 @@ static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu,
base = BATu & ~0x0001FFFF;
end = base + mask + 0x00020000;
if (((end - base) >> TARGET_PAGE_BITS) > 1024) {
/* Flushing 1024 4K pages is slower than a complete flush */
LOG_BATS("Flush all BATs\n");
tlb_flush(CPU(cs));
LOG_BATS("Flush done\n");
return;
}
LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " ("
TARGET_FMT_lx ")\n", base, end, mask);
for (page = base; page != end; page += TARGET_PAGE_SIZE) {
@ -1834,8 +1856,9 @@ void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value)
#if !defined(FLUSH_ALL_TLBS)
do_invalidate_BAT(env, env->IBAT[0][nr], mask);
#endif
/* When storing valid upper BAT, mask BEPI and BRPN
* and invalidate all TLBs covered by this BAT
/*
* When storing valid upper BAT, mask BEPI and BRPN and
* invalidate all TLBs covered by this BAT
*/
mask = (value << 15) & 0x0FFE0000UL;
env->IBAT[0][nr] = (value & 0x00001FFFUL) |
@ -1865,8 +1888,9 @@ void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value)
dump_store_bat(env, 'D', 0, nr, value);
if (env->DBAT[0][nr] != value) {
/* When storing valid upper BAT, mask BEPI and BRPN
* and invalidate all TLBs covered by this BAT
/*
* When storing valid upper BAT, mask BEPI and BRPN and
* invalidate all TLBs covered by this BAT
*/
mask = (value << 15) & 0x0FFE0000UL;
#if !defined(FLUSH_ALL_TLBS)
@ -1913,8 +1937,9 @@ void helper_store_601_batu(CPUPPCState *env, uint32_t nr, target_ulong value)
do_inval = 1;
#endif
}
/* When storing valid upper BAT, mask BEPI and BRPN
* and invalidate all TLBs covered by this BAT
/*
* When storing valid upper BAT, mask BEPI and BRPN and
* invalidate all TLBs covered by this BAT
*/
env->IBAT[0][nr] = (value & 0x00001FFFUL) |
(value & ~0x0001FFFFUL & ~mask);
@ -2027,7 +2052,8 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
#if defined(TARGET_PPC64)
if (env->mmu_model & POWERPC_MMU_64) {
/* tlbie invalidate TLBs for all segments */
/* XXX: given the fact that there are too many segments to invalidate,
/*
* XXX: given the fact that there are too many segments to invalidate,
* and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
* we just invalidate all TLBs
*/
@ -2044,10 +2070,11 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
break;
case POWERPC_MMU_32B:
case POWERPC_MMU_601:
/* Actual CPUs invalidate entire congruence classes based on the
* geometry of their TLBs and some OSes take that into account,
* we just mark the TLB to be flushed later (context synchronizing
* event or sync instruction on 32-bit).
/*
* Actual CPUs invalidate entire congruence classes based on
* the geometry of their TLBs and some OSes take that into
* account, we just mark the TLB to be flushed later (context
* synchronizing event or sync instruction on 32-bit).
*/
env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
break;
@ -2152,8 +2179,10 @@ void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
#endif
if (env->sr[srnum] != value) {
env->sr[srnum] = value;
/* Invalidating 256MB of virtual memory in 4kB pages is way longer than
flusing the whole TLB. */
/*
* Invalidating 256MB of virtual memory in 4kB pages is way
* longer than flusing the whole TLB.
*/
#if !defined(FLUSH_ALL_TLBS) && 0
{
target_ulong page, end;
@ -2264,10 +2293,12 @@ target_ulong helper_rac(CPUPPCState *env, target_ulong addr)
int nb_BATs;
target_ulong ret = 0;
/* We don't have to generate many instances of this instruction,
/*
* We don't have to generate many instances of this instruction,
* as rac is supervisor only.
*
* XXX: FIX THIS: Pretend we have no BAT
*/
/* XXX: FIX THIS: Pretend we have no BAT */
nb_BATs = env->nb_BATs;
env->nb_BATs = 0;
if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) {
@ -2422,7 +2453,8 @@ void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry,
}
tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
& PPC4XX_TLBHI_SIZE_MASK);
/* We cannot handle TLB size < TARGET_PAGE_SIZE.
/*
* We cannot handle TLB size < TARGET_PAGE_SIZE.
* If this ever occurs, we should implement TARGET_PAGE_BITS_VARY
*/
if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
@ -2742,7 +2774,8 @@ void helper_booke206_tlbwe(CPUPPCState *env)
}
if (tlb->mas1 & MAS1_VALID) {
/* Invalidate the page in QEMU TLB if it was a valid entry.
/*
* Invalidate the page in QEMU TLB if it was a valid entry.
*
* In "PowerPC e500 Core Family Reference Manual, Rev. 1",
* Section "12.4.2 TLB Write Entry (tlbwe) Instruction":
@ -2751,7 +2784,8 @@ void helper_booke206_tlbwe(CPUPPCState *env)
* "Note that when an L2 TLB entry is written, it may be displacing an
* already valid entry in the same L2 TLB location (a victim). If a
* valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1
* TLB entry is automatically invalidated." */
* TLB entry is automatically invalidated."
*/
flush_page(env, tlb);
}
@ -2777,8 +2811,9 @@ void helper_booke206_tlbwe(CPUPPCState *env)
mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E;
if (!msr_cm) {
/* Executing a tlbwe instruction in 32-bit mode will set
* bits 0:31 of the TLB EPN field to zero.
/*
* Executing a tlbwe instruction in 32-bit mode will set bits
* 0:31 of the TLB EPN field to zero.
*/
mask &= 0xffffffff;
}
@ -3022,10 +3057,13 @@ void helper_check_tlb_flush_global(CPUPPCState *env)
/*****************************************************************************/
/* try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
/* XXX: fix it to restore all registers */
/*
* try to fill the TLB and return an exception if error. If retaddr is
* NULL, it means that the function was called in C code (i.e. not
* from generated code or from helper.c)
*
* XXX: fix it to restore all registers
*/
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{

View File

@ -27,32 +27,33 @@
#include "monitor/hmp-target.h"
#include "hmp.h"
static target_long monitor_get_ccr (const struct MonitorDef *md, int val)
static target_long monitor_get_ccr(const struct MonitorDef *md, int val)
{
CPUArchState *env = mon_get_cpu_env();
unsigned int u;
int i;
u = 0;
for (i = 0; i < 8; i++)
for (i = 0; i < 8; i++) {
u |= env->crf[i] << (32 - (4 * (i + 1)));
}
return u;
}
static target_long monitor_get_decr (const struct MonitorDef *md, int val)
static target_long monitor_get_decr(const struct MonitorDef *md, int val)
{
CPUArchState *env = mon_get_cpu_env();
return cpu_ppc_load_decr(env);
}
static target_long monitor_get_tbu (const struct MonitorDef *md, int val)
static target_long monitor_get_tbu(const struct MonitorDef *md, int val)
{
CPUArchState *env = mon_get_cpu_env();
return cpu_ppc_load_tbu(env);
}
static target_long monitor_get_tbl (const struct MonitorDef *md, int val)
static target_long monitor_get_tbl(const struct MonitorDef *md, int val)
{
CPUArchState *env = mon_get_cpu_env();
return cpu_ppc_load_tbl(env);

View File

@ -1,5 +1,30 @@
# See docs/devel/tracing.txt for syntax documentation.
# kvm.c
kvm_failed_spr_set(int str, const char *msg) "Warning: Unable to set SPR %d to KVM: %s"
kvm_failed_spr_get(int str, const char *msg) "Warning: Unable to retrieve SPR %d from KVM: %s"
kvm_failed_spr_set(int spr, const char *msg) "Warning: Unable to set SPR %d to KVM: %s"
kvm_failed_spr_get(int spr, const char *msg) "Warning: Unable to retrieve SPR %d from KVM: %s"
kvm_failed_fpscr_set(const char *msg) "Unable to set FPSCR to KVM: %s"
kvm_failed_fp_set(const char *fpname, int fpnum, const char *msg) "Unable to set %s%d to KVM: %s"
kvm_failed_vscr_set(const char *msg) "Unable to set VSCR to KVM: %s"
kvm_failed_vr_set(int vr, const char *msg) "Unable to set VR%d to KVM: %s"
kvm_failed_fpscr_get(const char *msg) "Unable to get FPSCR from KVM: %s"
kvm_failed_fp_get(const char *fpname, int fpnum, const char *msg) "Unable to get %s%d from KVM: %s"
kvm_failed_vscr_get(const char *msg) "Unable to get VSCR from KVM: %s"
kvm_failed_vr_get(int vr, const char *msg) "Unable to get VR%d from KVM: %s"
kvm_failed_vpa_addr_get(const char *msg) "Unable to get VPA address from KVM: %s"
kvm_failed_slb_get(const char *msg) "Unable to get SLB shadow state from KVM: %s"
kvm_failed_dtl_get(const char *msg) "Unable to get dispatch trace log state from KVM: %s"
kvm_failed_vpa_addr_set(const char *msg) "Unable to set VPA address to KVM: %s"
kvm_failed_slb_set(const char *msg) "Unable to set SLB shadow state to KVM: %s"
kvm_failed_dtl_set(const char *msg) "Unable to set dispatch trace log state to KVM: %s"
kvm_failed_null_vpa_addr_set(const char *msg) "Unable to set VPA address to KVM: %s"
kvm_failed_put_vpa(void) "Warning: Unable to set VPA information to KVM"
kvm_failed_get_vpa(void) "Warning: Unable to get VPA information from KVM"
kvm_injected_interrupt(int irq) "injected interrupt %d"
kvm_handle_dcr_write(void) "handle dcr write"
kvm_handle_drc_read(void) "handle dcr read"
kvm_handle_halt(void) "handle halt"
kvm_handle_papr_hcall(void) "handle PAPR hypercall"
kvm_handle_epr(void) "handle epr"
kvm_handle_watchdog_expiry(void) "handle watchdog expiry"
kvm_handle_debug_exception(void) "handle debug exception"

File diff suppressed because it is too large Load Diff

View File

@ -585,11 +585,13 @@ static void gen_mcrfs(DisasContext *ctx)
shift = 4 * nibble;
tcg_gen_shri_tl(tmp, cpu_fpscr, shift);
tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp);
tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf);
tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)],
0xf);
tcg_temp_free(tmp);
tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr);
/* Only the exception bits (including FX) should be cleared if read */
tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr, ~((0xF << shift) & FP_EX_CLEAR_BITS));
tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr,
~((0xF << shift) & FP_EX_CLEAR_BITS));
/* FEX and VX need to be updated, so don't set fpscr directly */
tmask = tcg_const_i32(1 << nibble);
gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask);
@ -735,7 +737,7 @@ static void gen_mtfsfi(DisasContext *ctx)
/*** Floating-point load ***/
#define GEN_LDF(name, ldop, opc, type) \
static void glue(gen_, name)(DisasContext *ctx) \
static void glue(gen_, name)(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 t0; \
@ -754,7 +756,7 @@ static void glue(gen_, name)(DisasContext *ctx)
}
#define GEN_LDUF(name, ldop, opc, type) \
static void glue(gen_, name##u)(DisasContext *ctx) \
static void glue(gen_, name##u)(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 t0; \
@ -778,7 +780,7 @@ static void glue(gen_, name##u)(DisasContext *ctx)
}
#define GEN_LDUXF(name, ldop, opc, type) \
static void glue(gen_, name##ux)(DisasContext *ctx) \
static void glue(gen_, name##ux)(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 t0; \
@ -802,7 +804,7 @@ static void glue(gen_, name##ux)(DisasContext *ctx)
}
#define GEN_LDXF(name, ldop, opc2, opc3, type) \
static void glue(gen_, name##x)(DisasContext *ctx) \
static void glue(gen_, name##x)(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 t0; \
@ -872,8 +874,10 @@ static void gen_lfdp(DisasContext *ctx)
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0);
t0 = tcg_temp_new_i64();
/* We only need to swap high and low halves. gen_qemu_ld64_i64 does
necessary 64-bit byteswap already. */
/*
* We only need to swap high and low halves. gen_qemu_ld64_i64
* does necessary 64-bit byteswap already.
*/
if (unlikely(ctx->le_mode)) {
gen_qemu_ld64_i64(ctx, t0, EA);
set_fpr(rD(ctx->opcode) + 1, t0);
@ -904,8 +908,10 @@ static void gen_lfdpx(DisasContext *ctx)
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
t0 = tcg_temp_new_i64();
/* We only need to swap high and low halves. gen_qemu_ld64_i64 does
necessary 64-bit byteswap already. */
/*
* We only need to swap high and low halves. gen_qemu_ld64_i64
* does necessary 64-bit byteswap already.
*/
if (unlikely(ctx->le_mode)) {
gen_qemu_ld64_i64(ctx, t0, EA);
set_fpr(rD(ctx->opcode) + 1, t0);
@ -966,7 +972,7 @@ static void gen_lfiwzx(DisasContext *ctx)
}
/*** Floating-point store ***/
#define GEN_STF(name, stop, opc, type) \
static void glue(gen_, name)(DisasContext *ctx) \
static void glue(gen_, name)(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 t0; \
@ -985,7 +991,7 @@ static void glue(gen_, name)(DisasContext *ctx)
}
#define GEN_STUF(name, stop, opc, type) \
static void glue(gen_, name##u)(DisasContext *ctx) \
static void glue(gen_, name##u)(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 t0; \
@ -1009,7 +1015,7 @@ static void glue(gen_, name##u)(DisasContext *ctx)
}
#define GEN_STUXF(name, stop, opc, type) \
static void glue(gen_, name##ux)(DisasContext *ctx) \
static void glue(gen_, name##ux)(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 t0; \
@ -1033,7 +1039,7 @@ static void glue(gen_, name##ux)(DisasContext *ctx)
}
#define GEN_STXF(name, stop, opc2, opc3, type) \
static void glue(gen_, name##x)(DisasContext *ctx) \
static void glue(gen_, name##x)(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 t0; \
@ -1103,8 +1109,10 @@ static void gen_stfdp(DisasContext *ctx)
EA = tcg_temp_new();
t0 = tcg_temp_new_i64();
gen_addr_imm_index(ctx, EA, 0);
/* We only need to swap high and low halves. gen_qemu_st64_i64 does
necessary 64-bit byteswap already. */
/*
* We only need to swap high and low halves. gen_qemu_st64_i64
* does necessary 64-bit byteswap already.
*/
if (unlikely(ctx->le_mode)) {
get_fpr(t0, rD(ctx->opcode) + 1);
gen_qemu_st64_i64(ctx, t0, EA);
@ -1135,8 +1143,10 @@ static void gen_stfdpx(DisasContext *ctx)
EA = tcg_temp_new();
t0 = tcg_temp_new_i64();
gen_addr_reg_index(ctx, EA);
/* We only need to swap high and low halves. gen_qemu_st64_i64 does
necessary 64-bit byteswap already. */
/*
* We only need to swap high and low halves. gen_qemu_st64_i64
* does necessary 64-bit byteswap already.
*/
if (unlikely(ctx->le_mode)) {
get_fpr(t0, rD(ctx->opcode) + 1);
gen_qemu_st64_i64(ctx, t0, EA);
@ -1204,8 +1214,9 @@ static void gen_lfqu(DisasContext *ctx)
gen_addr_add(ctx, t1, t0, 8);
gen_qemu_ld64_i64(ctx, t2, t1);
set_fpr((rd + 1) % 32, t2);
if (ra != 0)
if (ra != 0) {
tcg_gen_mov_tl(cpu_gpr[ra], t0);
}
tcg_temp_free(t0);
tcg_temp_free(t1);
tcg_temp_free_i64(t2);
@ -1229,8 +1240,9 @@ static void gen_lfqux(DisasContext *ctx)
gen_qemu_ld64_i64(ctx, t2, t1);
set_fpr((rd + 1) % 32, t2);
tcg_temp_free(t1);
if (ra != 0)
if (ra != 0) {
tcg_gen_mov_tl(cpu_gpr[ra], t0);
}
tcg_temp_free(t0);
tcg_temp_free_i64(t2);
}

View File

@ -18,7 +18,8 @@ static inline void gen_evmra(DisasContext *ctx)
TCGv_i64 tmp = tcg_temp_new_i64();
/* tmp := rA_lo + rA_hi << 32 */
tcg_gen_concat_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
tcg_gen_concat_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)],
cpu_gprh[rA(ctx->opcode)]);
/* spe_acc := tmp */
tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc));
@ -780,7 +781,7 @@ static inline void gen_op_evstwwo(DisasContext *ctx, TCGv addr)
}
#define GEN_SPEOP_LDST(name, opc2, sh) \
static void glue(gen_, name)(DisasContext *ctx) \
static void glue(gen_, name)(DisasContext *ctx) \
{ \
TCGv t0; \
if (unlikely(!ctx->spe_enabled)) { \
@ -1089,7 +1090,8 @@ static inline void gen_efsabs(DisasContext *ctx)
gen_exception(ctx, POWERPC_EXCP_SPEU);
return;
}
tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], (target_long)~0x80000000LL);
tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
(target_long)~0x80000000LL);
}
static inline void gen_efsnabs(DisasContext *ctx)
{
@ -1097,7 +1099,8 @@ static inline void gen_efsnabs(DisasContext *ctx)
gen_exception(ctx, POWERPC_EXCP_SPEU);
return;
}
tcg_gen_ori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000);
tcg_gen_ori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
0x80000000);
}
static inline void gen_efsneg(DisasContext *ctx)
{
@ -1105,7 +1108,8 @@ static inline void gen_efsneg(DisasContext *ctx)
gen_exception(ctx, POWERPC_EXCP_SPEU);
return;
}
tcg_gen_xori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000);
tcg_gen_xori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
0x80000000);
}
/* Conversion */

View File

@ -15,7 +15,7 @@ static inline TCGv_ptr gen_avr_ptr(int reg)
}
#define GEN_VR_LDX(name, opc2, opc3) \
static void glue(gen_, name)(DisasContext *ctx) \
static void glue(gen_, name)(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 avr; \
@ -28,8 +28,10 @@ static void glue(gen_, name)(DisasContext *ctx)
EA = tcg_temp_new(); \
gen_addr_reg_index(ctx, EA); \
tcg_gen_andi_tl(EA, EA, ~0xf); \
/* We only need to swap high and low halves. gen_qemu_ld64_i64 does \
necessary 64-bit byteswap already. */ \
/* \
* We only need to swap high and low halves. gen_qemu_ld64_i64 \
* does necessary 64-bit byteswap already. \
*/ \
if (ctx->le_mode) { \
gen_qemu_ld64_i64(ctx, avr, EA); \
set_avr64(rD(ctx->opcode), avr, false); \
@ -61,8 +63,10 @@ static void gen_st##name(DisasContext *ctx) \
EA = tcg_temp_new(); \
gen_addr_reg_index(ctx, EA); \
tcg_gen_andi_tl(EA, EA, ~0xf); \
/* We only need to swap high and low halves. gen_qemu_st64_i64 does \
necessary 64-bit byteswap already. */ \
/* \
* We only need to swap high and low halves. gen_qemu_st64_i64 \
* does necessary 64-bit byteswap already. \
*/ \
if (ctx->le_mode) { \
get_avr64(avr, rD(ctx->opcode), false); \
gen_qemu_st64_i64(ctx, avr, EA); \
@ -296,7 +300,7 @@ GEN_VXFORM_V(vnand, MO_64, tcg_gen_gvec_nand, 2, 22);
GEN_VXFORM_V(vorc, MO_64, tcg_gen_gvec_orc, 2, 21);
#define GEN_VXFORM(name, opc2, opc3) \
static void glue(gen_, name)(DisasContext *ctx) \
static void glue(gen_, name)(DisasContext *ctx) \
{ \
TCGv_ptr ra, rb, rd; \
if (unlikely(!ctx->altivec_enabled)) { \
@ -306,7 +310,7 @@ static void glue(gen_, name)(DisasContext *ctx)
ra = gen_avr_ptr(rA(ctx->opcode)); \
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
gen_helper_##name (rd, ra, rb); \
gen_helper_##name(rd, ra, rb); \
tcg_temp_free_ptr(ra); \
tcg_temp_free_ptr(rb); \
tcg_temp_free_ptr(rd); \
@ -758,7 +762,7 @@ GEN_VXFORM_DUPI(vspltish, tcg_gen_gvec_dup16i, 6, 13);
GEN_VXFORM_DUPI(vspltisw, tcg_gen_gvec_dup32i, 6, 14);
#define GEN_VXFORM_NOA(name, opc2, opc3) \
static void glue(gen_, name)(DisasContext *ctx) \
static void glue(gen_, name)(DisasContext *ctx) \
{ \
TCGv_ptr rb, rd; \
if (unlikely(!ctx->altivec_enabled)) { \
@ -767,9 +771,9 @@ static void glue(gen_, name)(DisasContext *ctx)
} \
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
gen_helper_##name (rd, rb); \
gen_helper_##name(rd, rb); \
tcg_temp_free_ptr(rb); \
tcg_temp_free_ptr(rd); \
tcg_temp_free_ptr(rd); \
}
#define GEN_VXFORM_NOA_ENV(name, opc2, opc3) \
@ -943,7 +947,7 @@ static void gen_vsldoi(DisasContext *ctx)
rb = gen_avr_ptr(rB(ctx->opcode));
rd = gen_avr_ptr(rD(ctx->opcode));
sh = tcg_const_i32(VSH(ctx->opcode));
gen_helper_vsldoi (rd, ra, rb, sh);
gen_helper_vsldoi(rd, ra, rb, sh);
tcg_temp_free_ptr(ra);
tcg_temp_free_ptr(rb);
tcg_temp_free_ptr(rd);

View File

@ -751,7 +751,7 @@ static void gen_xxpermdi(DisasContext *ctx)
#define SGN_MASK_SP 0x8000000080000000ull
#define VSX_SCALAR_MOVE(name, op, sgn_mask) \
static void glue(gen_, name)(DisasContext * ctx) \
static void glue(gen_, name)(DisasContext *ctx) \
{ \
TCGv_i64 xb, sgm; \
if (unlikely(!ctx->vsx_enabled)) { \
@ -848,7 +848,7 @@ VSX_SCALAR_MOVE_QP(xsnegqp, OP_NEG, SGN_MASK_DP)
VSX_SCALAR_MOVE_QP(xscpsgnqp, OP_CPSGN, SGN_MASK_DP)
#define VSX_VECTOR_MOVE(name, op, sgn_mask) \
static void glue(gen_, name)(DisasContext * ctx) \
static void glue(gen_, name)(DisasContext *ctx) \
{ \
TCGv_i64 xbh, xbl, sgm; \
if (unlikely(!ctx->vsx_enabled)) { \
@ -910,7 +910,7 @@ VSX_VECTOR_MOVE(xvnegsp, OP_NEG, SGN_MASK_SP)
VSX_VECTOR_MOVE(xvcpsgnsp, OP_CPSGN, SGN_MASK_SP)
#define GEN_VSX_HELPER_2(name, op1, op2, inval, type) \
static void gen_##name(DisasContext * ctx) \
static void gen_##name(DisasContext *ctx) \
{ \
TCGv_i32 opc; \
if (unlikely(!ctx->vsx_enabled)) { \
@ -923,7 +923,7 @@ static void gen_##name(DisasContext * ctx) \
}
#define GEN_VSX_HELPER_XT_XB_ENV(name, op1, op2, inval, type) \
static void gen_##name(DisasContext * ctx) \
static void gen_##name(DisasContext *ctx) \
{ \
TCGv_i64 t0; \
TCGv_i64 t1; \
@ -1230,7 +1230,7 @@ static void gen_xxbrw(DisasContext *ctx)
}
#define VSX_LOGICAL(name, vece, tcg_op) \
static void glue(gen_, name)(DisasContext * ctx) \
static void glue(gen_, name)(DisasContext *ctx) \
{ \
if (unlikely(!ctx->vsx_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_VSXU); \
@ -1251,7 +1251,7 @@ VSX_LOGICAL(xxlnand, MO_64, tcg_gen_gvec_nand)
VSX_LOGICAL(xxlorc, MO_64, tcg_gen_gvec_orc)
#define VSX_XXMRG(name, high) \
static void glue(gen_, name)(DisasContext * ctx) \
static void glue(gen_, name)(DisasContext *ctx) \
{ \
TCGv_i64 a0, a1, b0, b1, tmp; \
if (unlikely(!ctx->vsx_enabled)) { \
@ -1444,7 +1444,8 @@ static void gen_##name(DisasContext *ctx) \
xb = tcg_const_tl(xB(ctx->opcode)); \
t0 = tcg_temp_new_i32(); \
t1 = tcg_temp_new_i64(); \
/* uimm > 15 out of bound and for \
/* \
* uimm > 15 out of bound and for \
* uimm > 12 handle as per hardware in helper \
*/ \
if (uimm > 15) { \

View File

@ -41,12 +41,13 @@
#include "fpu/softfloat.h"
#include "qapi/qapi-commands-target.h"
//#define PPC_DUMP_CPU
//#define PPC_DEBUG_SPR
//#define PPC_DUMP_SPR_ACCESSES
/* #define PPC_DUMP_CPU */
/* #define PPC_DEBUG_SPR */
/* #define PPC_DUMP_SPR_ACCESSES */
/* #define USE_APPLE_GDB */
/* Generic callbacks:
/*
* Generic callbacks:
* do nothing but store/retrieve spr value
*/
static void spr_load_dump_spr(int sprn)
@ -58,7 +59,7 @@ static void spr_load_dump_spr(int sprn)
#endif
}
static void spr_read_generic (DisasContext *ctx, int gprn, int sprn)
static void spr_read_generic(DisasContext *ctx, int gprn, int sprn)
{
gen_load_spr(cpu_gpr[gprn], sprn);
spr_load_dump_spr(sprn);
@ -230,13 +231,13 @@ static void spr_read_tbu(DisasContext *ctx, int gprn, int sprn)
}
}
__attribute__ (( unused ))
ATTRIBUTE_UNUSED
static void spr_read_atbl(DisasContext *ctx, int gprn, int sprn)
{
gen_helper_load_atbl(cpu_gpr[gprn], cpu_env);
}
__attribute__ (( unused ))
ATTRIBUTE_UNUSED
static void spr_read_atbu(DisasContext *ctx, int gprn, int sprn)
{
gen_helper_load_atbu(cpu_gpr[gprn], cpu_env);
@ -267,20 +268,20 @@ static void spr_write_tbu(DisasContext *ctx, int sprn, int gprn)
}
}
__attribute__ (( unused ))
ATTRIBUTE_UNUSED
static void spr_write_atbl(DisasContext *ctx, int sprn, int gprn)
{
gen_helper_store_atbl(cpu_env, cpu_gpr[gprn]);
}
__attribute__ (( unused ))
ATTRIBUTE_UNUSED
static void spr_write_atbu(DisasContext *ctx, int sprn, int gprn)
{
gen_helper_store_atbu(cpu_env, cpu_gpr[gprn]);
}
#if defined(TARGET_PPC64)
__attribute__ (( unused ))
ATTRIBUTE_UNUSED
static void spr_read_purr(DisasContext *ctx, int gprn, int sprn)
{
gen_helper_load_purr(cpu_gpr[gprn], cpu_env);
@ -319,12 +320,16 @@ static void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn)
/* IBAT0L...IBAT7L */
static void spr_read_ibat(DisasContext *ctx, int gprn, int sprn)
{
tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
offsetof(CPUPPCState,
IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
}
static void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn)
{
tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4]));
tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
offsetof(CPUPPCState,
IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4]));
}
static void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn)
@ -359,12 +364,16 @@ static void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn)
/* DBAT0L...DBAT7L */
static void spr_read_dbat(DisasContext *ctx, int gprn, int sprn)
{
tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2]));
tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
offsetof(CPUPPCState,
DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2]));
}
static void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn)
{
tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4]));
tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
offsetof(CPUPPCState,
DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4]));
}
static void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn)
@ -473,7 +482,9 @@ static void spr_write_hid0_601(DisasContext *ctx, int sprn, int gprn)
#if !defined(CONFIG_USER_ONLY)
static void spr_read_601_ubat(DisasContext *ctx, int gprn, int sprn)
{
tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
offsetof(CPUPPCState,
IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
}
static void spr_write_601_ubatu(DisasContext *ctx, int sprn, int gprn)
@ -532,7 +543,8 @@ static void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn)
#if !defined(CONFIG_USER_ONLY)
static void spr_read_403_pbr(DisasContext *ctx, int gprn, int sprn)
{
tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, pb[sprn - SPR_403_PBL1]));
tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
offsetof(CPUPPCState, pb[sprn - SPR_403_PBL1]));
}
static void spr_write_403_pbr(DisasContext *ctx, int sprn, int gprn)
@ -661,14 +673,20 @@ static inline void vscr_init(CPUPPCState *env, uint32_t val)
static inline void _spr_register(CPUPPCState *env, int num,
const char *name,
void (*uea_read)(DisasContext *ctx, int gprn, int sprn),
void (*uea_write)(DisasContext *ctx, int sprn, int gprn),
void (*uea_read)(DisasContext *ctx,
int gprn, int sprn),
void (*uea_write)(DisasContext *ctx,
int sprn, int gprn),
#if !defined(CONFIG_USER_ONLY)
void (*oea_read)(DisasContext *ctx, int gprn, int sprn),
void (*oea_write)(DisasContext *ctx, int sprn, int gprn),
void (*hea_read)(DisasContext *opaque, int gprn, int sprn),
void (*hea_write)(DisasContext *opaque, int sprn, int gprn),
void (*oea_read)(DisasContext *ctx,
int gprn, int sprn),
void (*oea_write)(DisasContext *ctx,
int sprn, int gprn),
void (*hea_read)(DisasContext *opaque,
int gprn, int sprn),
void (*hea_write)(DisasContext *opaque,
int sprn, int gprn),
#endif
#if defined(CONFIG_KVM)
uint64_t one_reg_id,
@ -678,7 +696,7 @@ static inline void _spr_register(CPUPPCState *env, int num,
ppc_spr_t *spr;
spr = &env->spr_cb[num];
if (spr->name != NULL ||env-> spr[num] != 0x00000000 ||
if (spr->name != NULL || env->spr[num] != 0x00000000 ||
#if !defined(CONFIG_USER_ONLY)
spr->oea_read != NULL || spr->oea_write != NULL ||
#endif
@ -774,8 +792,10 @@ static void gen_spr_sdr1(CPUPPCState *env)
{
#ifndef CONFIG_USER_ONLY
if (env->has_hv_mode) {
/* SDR1 is a hypervisor resource on CPUs which have a
* hypervisor mode */
/*
* SDR1 is a hypervisor resource on CPUs which have a
* hypervisor mode
*/
spr_register_hv(env, SPR_SDR1, "SDR1",
SPR_NOACCESS, SPR_NOACCESS,
SPR_NOACCESS, SPR_NOACCESS,
@ -1123,7 +1143,8 @@ static void spr_write_amr(DisasContext *ctx, int sprn, int gprn)
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
/* Note, the HV=1 PR=0 case is handled earlier by simply using
/*
* Note, the HV=1 PR=0 case is handled earlier by simply using
* spr_write_generic for HV mode in the SPR table
*/
@ -1157,7 +1178,8 @@ static void spr_write_uamor(DisasContext *ctx, int sprn, int gprn)
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
/* Note, the HV=1 case is handled earlier by simply using
/*
* Note, the HV=1 case is handled earlier by simply using
* spr_write_generic for HV mode in the SPR table
*/
@ -1187,7 +1209,8 @@ static void spr_write_iamr(DisasContext *ctx, int sprn, int gprn)
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
/* Note, the HV=1 case is handled earlier by simply using
/*
* Note, the HV=1 case is handled earlier by simply using
* spr_write_generic for HV mode in the SPR table
*/
@ -1215,10 +1238,13 @@ static void spr_write_iamr(DisasContext *ctx, int sprn, int gprn)
static void gen_spr_amr(CPUPPCState *env)
{
#ifndef CONFIG_USER_ONLY
/* Virtual Page Class Key protection */
/* The AMR is accessible either via SPR 13 or SPR 29. 13 is
/*
* Virtual Page Class Key protection
*
* The AMR is accessible either via SPR 13 or SPR 29. 13 is
* userspace accessible, 29 is privileged. So we only need to set
* the kvm ONE_REG id on one of them, we use 29 */
* the kvm ONE_REG id on one of them, we use 29
*/
spr_register(env, SPR_UAMR, "UAMR",
&spr_read_generic, &spr_write_amr,
&spr_read_generic, &spr_write_amr,
@ -1902,7 +1928,8 @@ static void gen_spr_BookE206(CPUPPCState *env, uint32_t mas_mask,
/* TLB assist registers */
/* XXX : not implemented */
for (i = 0; i < 8; i++) {
void (*uea_write)(DisasContext *ctx, int sprn, int gprn) = &spr_write_generic32;
void (*uea_write)(DisasContext *ctx, int sprn, int gprn) =
&spr_write_generic32;
if (i == 2 && (mas_mask & (1 << i)) && (env->insns_flags & PPC_64B)) {
uea_write = &spr_write_generic;
}
@ -2798,7 +2825,6 @@ static void gen_spr_8xx(CPUPPCState *env)
0x00000000);
}
// XXX: TODO
/*
* AMR => SPR 29 (Power 2.04)
* CTRL => SPR 136 (Power 2.04)
@ -3344,16 +3370,18 @@ static int check_pow_nocheck(CPUPPCState *env)
static int check_pow_hid0(CPUPPCState *env)
{
if (env->spr[SPR_HID0] & 0x00E00000)
if (env->spr[SPR_HID0] & 0x00E00000) {
return 1;
}
return 0;
}
static int check_pow_hid0_74xx(CPUPPCState *env)
{
if (env->spr[SPR_HID0] & 0x00600000)
if (env->spr[SPR_HID0] & 0x00600000) {
return 1;
}
return 0;
}
@ -4602,7 +4630,8 @@ POWERPC_FAMILY(e200)(ObjectClass *oc, void *data)
dc->desc = "e200 core";
pcc->init_proc = init_proc_e200;
pcc->check_pow = check_pow_hid0;
/* XXX: unimplemented instructions:
/*
* XXX: unimplemented instructions:
* dcblc
* dcbtlst
* dcbtstls
@ -4797,18 +4826,18 @@ static void init_proc_e500(CPUPPCState *env, int version)
* gen_spr_BookE(env, 0x0000000F0000FD7FULL);
*/
switch (version) {
case fsl_e500v1:
case fsl_e500v2:
default:
ivor_mask = 0x0000000F0000FFFFULL;
break;
case fsl_e500mc:
case fsl_e5500:
ivor_mask = 0x000003FE0000FFFFULL;
break;
case fsl_e6500:
ivor_mask = 0x000003FF0000FFFFULL;
break;
case fsl_e500v1:
case fsl_e500v2:
default:
ivor_mask = 0x0000000F0000FFFFULL;
break;
case fsl_e500mc:
case fsl_e5500:
ivor_mask = 0x000003FE0000FFFFULL;
break;
case fsl_e6500:
ivor_mask = 0x000003FF0000FFFFULL;
break;
}
gen_spr_BookE(env, ivor_mask);
gen_spr_usprg3(env);
@ -4848,7 +4877,8 @@ static void init_proc_e500(CPUPPCState *env, int version)
tlbncfg[1] = 0x40028040;
break;
default:
cpu_abort(CPU(cpu), "Unknown CPU: " TARGET_FMT_lx "\n", env->spr[SPR_PVR]);
cpu_abort(CPU(cpu), "Unknown CPU: " TARGET_FMT_lx "\n",
env->spr[SPR_PVR]);
}
#endif
/* Cache sizes */
@ -4872,7 +4902,8 @@ static void init_proc_e500(CPUPPCState *env, int version)
l1cfg1 |= 0x0B83820;
break;
default:
cpu_abort(CPU(cpu), "Unknown CPU: " TARGET_FMT_lx "\n", env->spr[SPR_PVR]);
cpu_abort(CPU(cpu), "Unknown CPU: " TARGET_FMT_lx "\n",
env->spr[SPR_PVR]);
}
gen_spr_BookE206(env, 0x000000DF, tlbncfg, mmucfg);
/* XXX : not implemented */
@ -5252,7 +5283,8 @@ static void init_proc_601(CPUPPCState *env)
0x00000000);
/* Memory management */
init_excp_601(env);
/* XXX: beware that dcache line size is 64
/*
* XXX: beware that dcache line size is 64
* but dcbz uses 32 bytes "sectors"
* XXX: this breaks clcs instruction !
*/
@ -5789,7 +5821,8 @@ static void init_proc_750(CPUPPCState *env)
0x00000000);
/* Memory management */
gen_low_BATs(env);
/* XXX: high BATs are also present but are known to be bugged on
/*
* XXX: high BATs are also present but are known to be bugged on
* die version 1.x
*/
init_excp_7x0(env);
@ -5971,7 +6004,8 @@ POWERPC_FAMILY(750cl)(ObjectClass *oc, void *data)
dc->desc = "PowerPC 750 CL";
pcc->init_proc = init_proc_750cl;
pcc->check_pow = check_pow_hid0;
/* XXX: not implemented:
/*
* XXX: not implemented:
* cache lock instructions:
* dcbz_l
* floating point paired instructions
@ -7569,8 +7603,10 @@ static void gen_spr_book3s_altivec(CPUPPCState *env)
&spr_read_generic, &spr_write_generic,
KVM_REG_PPC_VRSAVE, 0x00000000);
/* Can't find information on what this should be on reset. This
* value is the one used by 74xx processors. */
/*
* Can't find information on what this should be on reset. This
* value is the one used by 74xx processors.
*/
vscr_init(env, 0x00010000);
}
@ -8975,8 +9011,9 @@ static void init_ppc_proc(PowerPCCPU *cpu)
env->irq_inputs = NULL;
/* Set all exception vectors to an invalid address */
for (i = 0; i < POWERPC_EXCP_NB; i++)
for (i = 0; i < POWERPC_EXCP_NB; i++) {
env->excp_vectors[i] = (target_ulong)(-1ULL);
}
env->ivor_mask = 0x00000000;
env->ivpr_mask = 0x00000000;
/* Default MMU definitions */
@ -9108,8 +9145,9 @@ static void init_ppc_proc(PowerPCCPU *cpu)
#if !defined(CONFIG_USER_ONLY)
if (env->nb_tlb != 0) {
int nb_tlb = env->nb_tlb;
if (env->id_tlbs != 0)
if (env->id_tlbs != 0) {
nb_tlb *= 2;
}
switch (env->tlb_type) {
case TLB_6XX:
env->tlb.tlb6 = g_new0(ppc6xx_tlb_t, nb_tlb);
@ -9201,8 +9239,9 @@ static void fill_new_table(opc_handler_t **table, int len)
{
int i;
for (i = 0; i < len; i++)
for (i = 0; i < len; i++) {
table[i] = &invalid_handler;
}
}
static int create_new_table(opc_handler_t **table, unsigned char idx)
@ -9219,8 +9258,9 @@ static int create_new_table(opc_handler_t **table, unsigned char idx)
static int insert_in_table(opc_handler_t **table, unsigned char idx,
opc_handler_t *handler)
{
if (table[idx] != &invalid_handler)
if (table[idx] != &invalid_handler) {
return -1;
}
table[idx] = handler;
return 0;
@ -9341,17 +9381,20 @@ static int register_insn(opc_handler_t **ppc_opcodes, opcode_t *insn)
}
} else {
if (register_dblind_insn(ppc_opcodes, insn->opc1, insn->opc2,
insn->opc3, &insn->handler) < 0)
insn->opc3, &insn->handler) < 0) {
return -1;
}
}
} else {
if (register_ind_insn(ppc_opcodes, insn->opc1,
insn->opc2, &insn->handler) < 0)
insn->opc2, &insn->handler) < 0) {
return -1;
}
}
} else {
if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0)
if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0) {
return -1;
}
}
return 0;
@ -9363,8 +9406,9 @@ static int test_opcode_table(opc_handler_t **table, int len)
for (i = 0, count = 0; i < len; i++) {
/* Consistency fixup */
if (table[i] == NULL)
if (table[i] == NULL) {
table[i] = &invalid_handler;
}
if (table[i] != &invalid_handler) {
if (is_indirect_opcode(table[i])) {
tmp = test_opcode_table(ind_table(table[i]),
@ -9386,8 +9430,9 @@ static int test_opcode_table(opc_handler_t **table, int len)
static void fix_opcode_tables(opc_handler_t **ppc_opcodes)
{
if (test_opcode_table(ppc_opcodes, PPC_CPU_OPCODES_LEN) == 0)
if (test_opcode_table(ppc_opcodes, PPC_CPU_OPCODES_LEN) == 0) {
printf("*** WARNING: no opcode defined !\n");
}
}
/*****************************************************************************/
@ -9726,14 +9771,15 @@ static int ppc_fixup_cpu(PowerPCCPU *cpu)
{
CPUPPCState *env = &cpu->env;
/* TCG doesn't (yet) emulate some groups of instructions that
* are implemented on some otherwise supported CPUs (e.g. VSX
* and decimal floating point instructions on POWER7). We
* remove unsupported instruction groups from the cpu state's
* instruction masks and hope the guest can cope. For at
* least the pseries machine, the unavailability of these
* instructions can be advertised to the guest via the device
* tree. */
/*
* TCG doesn't (yet) emulate some groups of instructions that are
* implemented on some otherwise supported CPUs (e.g. VSX and
* decimal floating point instructions on POWER7). We remove
* unsupported instruction groups from the cpu state's instruction
* masks and hope the guest can cope. For at least the pseries
* machine, the unavailability of these instructions can be
* advertised to the guest via the device tree.
*/
if ((env->insns_flags & ~PPC_TCG_INSNS)
|| (env->insns_flags2 & ~PPC_TCG_INSNS2)) {
warn_report("Disabling some instructions which are not "
@ -9928,31 +9974,37 @@ static void ppc_cpu_realize(DeviceState *dev, Error **errp)
" Bus model : %s\n",
excp_model, bus_model);
printf(" MSR features :\n");
if (env->flags & POWERPC_FLAG_SPE)
if (env->flags & POWERPC_FLAG_SPE) {
printf(" signal processing engine enable"
"\n");
else if (env->flags & POWERPC_FLAG_VRE)
} else if (env->flags & POWERPC_FLAG_VRE) {
printf(" vector processor enable\n");
if (env->flags & POWERPC_FLAG_TGPR)
}
if (env->flags & POWERPC_FLAG_TGPR) {
printf(" temporary GPRs\n");
else if (env->flags & POWERPC_FLAG_CE)
} else if (env->flags & POWERPC_FLAG_CE) {
printf(" critical input enable\n");
if (env->flags & POWERPC_FLAG_SE)
}
if (env->flags & POWERPC_FLAG_SE) {
printf(" single-step trace mode\n");
else if (env->flags & POWERPC_FLAG_DWE)
} else if (env->flags & POWERPC_FLAG_DWE) {
printf(" debug wait enable\n");
else if (env->flags & POWERPC_FLAG_UBLE)
} else if (env->flags & POWERPC_FLAG_UBLE) {
printf(" user BTB lock enable\n");
if (env->flags & POWERPC_FLAG_BE)
}
if (env->flags & POWERPC_FLAG_BE) {
printf(" branch-step trace mode\n");
else if (env->flags & POWERPC_FLAG_DE)
} else if (env->flags & POWERPC_FLAG_DE) {
printf(" debug interrupt enable\n");
if (env->flags & POWERPC_FLAG_PX)
}
if (env->flags & POWERPC_FLAG_PX) {
printf(" inclusive protection\n");
else if (env->flags & POWERPC_FLAG_PMM)
} else if (env->flags & POWERPC_FLAG_PMM) {
printf(" performance monitor mark\n");
if (env->flags == POWERPC_FLAG_NONE)
}
if (env->flags == POWERPC_FLAG_NONE) {
printf(" none\n");
}
printf(" Time-base/decrementer clock source: %s\n",
env->flags & POWERPC_FLAG_RTC_CLK ? "RTC clock" : "bus clock");
dump_ppc_insns(env);
@ -10094,8 +10146,9 @@ static ObjectClass *ppc_cpu_class_by_name(const char *name)
const char *p;
unsigned long pvr;
/* Lookup by PVR if cpu_model is valid 8 digit hex number
* (excl: 0x prefix if present)
/*
* Lookup by PVR if cpu_model is valid 8 digit hex number (excl:
* 0x prefix if present)
*/
if (!qemu_strtoul(name, &p, 16, &pvr)) {
int len = p - name;
@ -10439,14 +10492,14 @@ static void ppc_cpu_instance_init(Object *obj)
env->bfd_mach = pcc->bfd_mach;
env->check_pow = pcc->check_pow;
/* Mark HV mode as supported if the CPU has an MSR_HV bit
* in the msr_mask. The mask can later be cleared by PAPR
* mode but the hv mode support will remain, thus enforcing
* that we cannot use priv. instructions in guest in PAPR
* mode. For 970 we currently simply don't set HV in msr_mask
* thus simulating an "Apple mode" 970. If we ever want to
* support 970 HV mode, we'll have to add a processor attribute
* of some sort.
/*
* Mark HV mode as supported if the CPU has an MSR_HV bit in the
* msr_mask. The mask can later be cleared by PAPR mode but the hv
* mode support will remain, thus enforcing that we cannot use
* priv. instructions in guest in PAPR mode. For 970 we currently
* simply don't set HV in msr_mask thus simulating an "Apple mode"
* 970. If we ever want to support 970 HV mode, we'll have to add
* a processor attribute of some sort.
*/
#if !defined(CONFIG_USER_ONLY)
env->has_hv_mode = !!(env->msr_mask & MSR_HVB);
@ -10573,7 +10626,7 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
cc->tcg_initialize = ppc_translate_init;
#endif
cc->disas_set_info = ppc_disas_set_info;
dc->fw_name = "PowerPC,UNKNOWN";
}