ppc patch queue 2019-10-24
Last pull request before soft freeze. * Lots of fixes and cleanups for spapr interrupt controllers * More SLOF updates to fix problems with full FDT rendering at CAS time (alas, more yet are to come) * A few other assorted changes This isn't quite as well tested as I usually try to do before a pull request. But I've been sick and running into some other difficulties, and wanted to get this sent out before heading towards KVM forum. -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEdfRlhq5hpmzETofcbDjKyiDZs5IFAl2xXWcACgkQbDjKyiDZ s5Jy/BAAsSo514vGCjdszXcRH3nFeODKJadlSsUX+32JFP1yJS9ooxkcmIN7o9Wp 3VCkMHQPVV9jjIvvShWOSGfDDO3o8fTEucOIX/Nn9wfq+NiY+EJst0v+8OT48CSX LEXiy9Wghs9pZMLCUZ3rlLPBiU/Lhzf+KTCoUdc40tfoZMMz1lp/Uy8IdIYTYwLl /z++r4X8FOsXsDDsFopWffVdVBLJz6Var6NgBa8ISk2gGnUOAKsrTE3bD9L6n4PR YYbMSkv+SbvXg4gm53jUb9cQgpBqQpWHUYBIbKia/16EzbIkkZjFE2jGQMP5c72h ZOml7ZQtQVWIEEZwKPN67S8bKiVbEfayxHYViejn/uUqv3AwW0wi7FlBVv37YNJ4 TxPvLBu+0DaFbk5y6/XHyL6XomG1/oH6qXOM2JhIWON7HI3rRWoMQbZ6QVJ1Gwk2 uwrvOOL5kVZySotOw5bDkTXYp/Nm1JE4QwOXFPkXzaekcZhRlEqqrkBddhKtF80p 1e5hGp5RgoILIe8uHJQ7decUMk889J7Qdtakv6BWvOci4dbIiZEp/smFlzgTcPnW DQJONP/awnoAOS3v0bItf59DROvkZ5xyv8yQZFP3qThSOfZl4e95WNRbtR3vtjU4 Bl4Pdte15URKy5nM0XnnLg9mzl2xufdwEsu76lQMuYpe6nCI2h0= =FRHF -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-4.2-20191024' into staging ppc patch queue 2019-10-24 Last pull request before soft freeze. * Lots of fixes and cleanups for spapr interrupt controllers * More SLOF updates to fix problems with full FDT rendering at CAS time (alas, more yet are to come) * A few other assorted changes This isn't quite as well tested as I usually try to do before a pull request. But I've been sick and running into some other difficulties, and wanted to get this sent out before heading towards KVM forum. # gpg: Signature made Thu 24 Oct 2019 09:14:31 BST # gpg: using RSA key 75F46586AE61A66CC44E87DC6C38CACA20D9B392 # gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" [full] # gpg: aka "David Gibson (Red Hat) <dgibson@redhat.com>" [full] # gpg: aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" [full] # gpg: aka "David Gibson (kernel.org) <dwg@kernel.org>" [unknown] # Primary key fingerprint: 75F4 6586 AE61 A66C C44E 87DC 6C38 CACA 20D9 B392 * remotes/dgibson/tags/ppc-for-4.2-20191024: (28 commits) spapr/xive: Set the OS CAM line at reset ppc/pnv: Fix naming of routines realizing the CPUs ppc: Reset the interrupt presenter from the CPU reset handler ppc/pnv: Add a PnvChip pointer to PnvCore ppc/pnv: Introduce a PnvCore reset handler spapr_cpu_core: Implement DeviceClass::reset spapr: move CPU reset after presenter creation spapr: Don't request to unplug the same core twice pseries: Update SLOF firmware image spapr: Move SpaprIrq::nr_xirqs to SpaprMachineClass spapr: Remove SpaprIrq::nr_msis spapr, xics, xive: Move SpaprIrq::post_load hook to backends spapr, xics, xive: Move SpaprIrq::reset hook logic into activate/deactivate spapr: Remove SpaprIrq::init_kvm hook spapr, xics, xive: Match signatures for XICS and XIVE KVM connect routines spapr, xics, xive: Move dt_populate from SpaprIrq to SpaprInterruptController spapr, xics, xive: Move print_info from SpaprIrq to SpaprInterruptController spapr, xics, xive: Move set_irq from SpaprIrq to SpaprInterruptController spapr: Formalize notion of active interrupt controller spapr, xics, xive: Move irq claim and free from SpaprIrq to SpaprInterruptController ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
58560ad254
@ -385,7 +385,7 @@ static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
|
||||
PnvXive *xive = PNV_XIVE(xrtr);
|
||||
|
||||
if (pnv_xive_get_ic(blk) != xive) {
|
||||
xive_error(xive, "VST: EAS %x is remote !?", XIVE_SRCNO(blk, idx));
|
||||
xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -431,7 +431,7 @@ static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno)
|
||||
PnvXive *xive = PNV_XIVE(xn);
|
||||
uint8_t blk = xive->chip->chip_id;
|
||||
|
||||
xive_router_notify(xn, XIVE_SRCNO(blk, srcno));
|
||||
xive_router_notify(xn, XIVE_EAS(blk, srcno));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1225,12 +1225,24 @@ static const MemoryRegionOps pnv_xive_ic_reg_ops = {
|
||||
|
||||
static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val)
|
||||
{
|
||||
uint8_t blk;
|
||||
uint32_t idx;
|
||||
|
||||
if (val & XIVE_TRIGGER_END) {
|
||||
xive_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
|
||||
addr, val);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Forward the source event notification directly to the Router.
|
||||
* The source interrupt number should already be correctly encoded
|
||||
* with the chip block id by the sending device (PHB, PSI).
|
||||
*/
|
||||
xive_router_notify(XIVE_NOTIFIER(xive), val);
|
||||
blk = XIVE_EAS_BLOCK(val);
|
||||
idx = XIVE_EAS_INDEX(val);
|
||||
|
||||
xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx));
|
||||
}
|
||||
|
||||
static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val,
|
||||
@ -1566,7 +1578,7 @@ void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon)
|
||||
{
|
||||
XiveRouter *xrtr = XIVE_ROUTER(xive);
|
||||
uint8_t blk = xive->chip->chip_id;
|
||||
uint32_t srcno0 = XIVE_SRCNO(blk, 0);
|
||||
uint32_t srcno0 = XIVE_EAS(blk, 0);
|
||||
uint32_t nr_ipis = pnv_xive_nr_ipis(xive);
|
||||
uint32_t nr_ends = pnv_xive_nr_ends(xive);
|
||||
XiveEAS eas;
|
||||
|
@ -205,23 +205,6 @@ void spapr_xive_mmio_set_enabled(SpaprXive *xive, bool enable)
|
||||
memory_region_set_enabled(&xive->end_source.esb_mmio, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* When a Virtual Processor is scheduled to run on a HW thread, the
|
||||
* hypervisor pushes its identifier in the OS CAM line. Emulate the
|
||||
* same behavior under QEMU.
|
||||
*/
|
||||
void spapr_xive_set_tctx_os_cam(XiveTCTX *tctx)
|
||||
{
|
||||
uint8_t nvt_blk;
|
||||
uint32_t nvt_idx;
|
||||
uint32_t nvt_cam;
|
||||
|
||||
spapr_xive_cpu_to_nvt(POWERPC_CPU(tctx->cs), &nvt_blk, &nvt_idx);
|
||||
|
||||
nvt_cam = cpu_to_be32(TM_QW1W2_VO | xive_nvt_cam_line(nvt_blk, nvt_idx));
|
||||
memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &nvt_cam, 4);
|
||||
}
|
||||
|
||||
static void spapr_xive_end_reset(XiveEND *end)
|
||||
{
|
||||
memset(end, 0, sizeof(*end));
|
||||
@ -462,10 +445,10 @@ static int vmstate_spapr_xive_pre_save(void *opaque)
|
||||
* Called by the sPAPR IRQ backend 'post_load' method at the machine
|
||||
* level.
|
||||
*/
|
||||
int spapr_xive_post_load(SpaprXive *xive, int version_id)
|
||||
static int spapr_xive_post_load(SpaprInterruptController *intc, int version_id)
|
||||
{
|
||||
if (kvm_irqchip_in_kernel()) {
|
||||
return kvmppc_xive_post_load(xive, version_id);
|
||||
return kvmppc_xive_post_load(SPAPR_XIVE(intc), version_id);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -487,49 +470,10 @@ static const VMStateDescription vmstate_spapr_xive = {
|
||||
},
|
||||
};
|
||||
|
||||
static Property spapr_xive_properties[] = {
|
||||
DEFINE_PROP_UINT32("nr-irqs", SpaprXive, nr_irqs, 0),
|
||||
DEFINE_PROP_UINT32("nr-ends", SpaprXive, nr_ends, 0),
|
||||
DEFINE_PROP_UINT64("vc-base", SpaprXive, vc_base, SPAPR_XIVE_VC_BASE),
|
||||
DEFINE_PROP_UINT64("tm-base", SpaprXive, tm_base, SPAPR_XIVE_TM_BASE),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static void spapr_xive_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
|
||||
|
||||
dc->desc = "sPAPR XIVE Interrupt Controller";
|
||||
dc->props = spapr_xive_properties;
|
||||
dc->realize = spapr_xive_realize;
|
||||
dc->vmsd = &vmstate_spapr_xive;
|
||||
|
||||
xrc->get_eas = spapr_xive_get_eas;
|
||||
xrc->get_end = spapr_xive_get_end;
|
||||
xrc->write_end = spapr_xive_write_end;
|
||||
xrc->get_nvt = spapr_xive_get_nvt;
|
||||
xrc->write_nvt = spapr_xive_write_nvt;
|
||||
xrc->get_tctx = spapr_xive_get_tctx;
|
||||
}
|
||||
|
||||
static const TypeInfo spapr_xive_info = {
|
||||
.name = TYPE_SPAPR_XIVE,
|
||||
.parent = TYPE_XIVE_ROUTER,
|
||||
.instance_init = spapr_xive_instance_init,
|
||||
.instance_size = sizeof(SpaprXive),
|
||||
.class_init = spapr_xive_class_init,
|
||||
};
|
||||
|
||||
static void spapr_xive_register_types(void)
|
||||
{
|
||||
type_register_static(&spapr_xive_info);
|
||||
}
|
||||
|
||||
type_init(spapr_xive_register_types)
|
||||
|
||||
int spapr_xive_irq_claim(SpaprXive *xive, int lisn, bool lsi, Error **errp)
|
||||
static int spapr_xive_claim_irq(SpaprInterruptController *intc, int lisn,
|
||||
bool lsi, Error **errp)
|
||||
{
|
||||
SpaprXive *xive = SPAPR_XIVE(intc);
|
||||
XiveSource *xsrc = &xive->source;
|
||||
|
||||
assert(lisn < xive->nr_irqs);
|
||||
@ -554,13 +498,227 @@ int spapr_xive_irq_claim(SpaprXive *xive, int lisn, bool lsi, Error **errp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void spapr_xive_irq_free(SpaprXive *xive, int lisn)
|
||||
static void spapr_xive_free_irq(SpaprInterruptController *intc, int lisn)
|
||||
{
|
||||
SpaprXive *xive = SPAPR_XIVE(intc);
|
||||
assert(lisn < xive->nr_irqs);
|
||||
|
||||
xive->eat[lisn].w &= cpu_to_be64(~EAS_VALID);
|
||||
}
|
||||
|
||||
static Property spapr_xive_properties[] = {
|
||||
DEFINE_PROP_UINT32("nr-irqs", SpaprXive, nr_irqs, 0),
|
||||
DEFINE_PROP_UINT32("nr-ends", SpaprXive, nr_ends, 0),
|
||||
DEFINE_PROP_UINT64("vc-base", SpaprXive, vc_base, SPAPR_XIVE_VC_BASE),
|
||||
DEFINE_PROP_UINT64("tm-base", SpaprXive, tm_base, SPAPR_XIVE_TM_BASE),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static int spapr_xive_cpu_intc_create(SpaprInterruptController *intc,
|
||||
PowerPCCPU *cpu, Error **errp)
|
||||
{
|
||||
SpaprXive *xive = SPAPR_XIVE(intc);
|
||||
Object *obj;
|
||||
SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
|
||||
|
||||
obj = xive_tctx_create(OBJECT(cpu), XIVE_ROUTER(xive), errp);
|
||||
if (!obj) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
spapr_cpu->tctx = XIVE_TCTX(obj);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xive_tctx_set_os_cam(XiveTCTX *tctx, uint32_t os_cam)
|
||||
{
|
||||
uint32_t qw1w2 = cpu_to_be32(TM_QW1W2_VO | os_cam);
|
||||
memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
|
||||
}
|
||||
|
||||
static void spapr_xive_cpu_intc_reset(SpaprInterruptController *intc,
|
||||
PowerPCCPU *cpu)
|
||||
{
|
||||
XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx;
|
||||
uint8_t nvt_blk;
|
||||
uint32_t nvt_idx;
|
||||
|
||||
xive_tctx_reset(tctx);
|
||||
|
||||
/*
|
||||
* When a Virtual Processor is scheduled to run on a HW thread,
|
||||
* the hypervisor pushes its identifier in the OS CAM line.
|
||||
* Emulate the same behavior under QEMU.
|
||||
*/
|
||||
spapr_xive_cpu_to_nvt(cpu, &nvt_blk, &nvt_idx);
|
||||
|
||||
xive_tctx_set_os_cam(tctx, xive_nvt_cam_line(nvt_blk, nvt_idx));
|
||||
}
|
||||
|
||||
static void spapr_xive_set_irq(SpaprInterruptController *intc, int irq, int val)
|
||||
{
|
||||
SpaprXive *xive = SPAPR_XIVE(intc);
|
||||
|
||||
if (kvm_irqchip_in_kernel()) {
|
||||
kvmppc_xive_source_set_irq(&xive->source, irq, val);
|
||||
} else {
|
||||
xive_source_set_irq(&xive->source, irq, val);
|
||||
}
|
||||
}
|
||||
|
||||
static void spapr_xive_print_info(SpaprInterruptController *intc, Monitor *mon)
|
||||
{
|
||||
SpaprXive *xive = SPAPR_XIVE(intc);
|
||||
CPUState *cs;
|
||||
|
||||
CPU_FOREACH(cs) {
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
|
||||
xive_tctx_pic_print_info(spapr_cpu_state(cpu)->tctx, mon);
|
||||
}
|
||||
|
||||
spapr_xive_pic_print_info(xive, mon);
|
||||
}
|
||||
|
||||
static void spapr_xive_dt(SpaprInterruptController *intc, uint32_t nr_servers,
|
||||
void *fdt, uint32_t phandle)
|
||||
{
|
||||
SpaprXive *xive = SPAPR_XIVE(intc);
|
||||
int node;
|
||||
uint64_t timas[2 * 2];
|
||||
/* Interrupt number ranges for the IPIs */
|
||||
uint32_t lisn_ranges[] = {
|
||||
cpu_to_be32(0),
|
||||
cpu_to_be32(nr_servers),
|
||||
};
|
||||
/*
|
||||
* EQ size - the sizes of pages supported by the system 4K, 64K,
|
||||
* 2M, 16M. We only advertise 64K for the moment.
|
||||
*/
|
||||
uint32_t eq_sizes[] = {
|
||||
cpu_to_be32(16), /* 64K */
|
||||
};
|
||||
/*
|
||||
* The following array is in sync with the reserved priorities
|
||||
* defined by the 'spapr_xive_priority_is_reserved' routine.
|
||||
*/
|
||||
uint32_t plat_res_int_priorities[] = {
|
||||
cpu_to_be32(7), /* start */
|
||||
cpu_to_be32(0xf8), /* count */
|
||||
};
|
||||
|
||||
/* Thread Interrupt Management Area : User (ring 3) and OS (ring 2) */
|
||||
timas[0] = cpu_to_be64(xive->tm_base +
|
||||
XIVE_TM_USER_PAGE * (1ull << TM_SHIFT));
|
||||
timas[1] = cpu_to_be64(1ull << TM_SHIFT);
|
||||
timas[2] = cpu_to_be64(xive->tm_base +
|
||||
XIVE_TM_OS_PAGE * (1ull << TM_SHIFT));
|
||||
timas[3] = cpu_to_be64(1ull << TM_SHIFT);
|
||||
|
||||
_FDT(node = fdt_add_subnode(fdt, 0, xive->nodename));
|
||||
|
||||
_FDT(fdt_setprop_string(fdt, node, "device_type", "power-ivpe"));
|
||||
_FDT(fdt_setprop(fdt, node, "reg", timas, sizeof(timas)));
|
||||
|
||||
_FDT(fdt_setprop_string(fdt, node, "compatible", "ibm,power-ivpe"));
|
||||
_FDT(fdt_setprop(fdt, node, "ibm,xive-eq-sizes", eq_sizes,
|
||||
sizeof(eq_sizes)));
|
||||
_FDT(fdt_setprop(fdt, node, "ibm,xive-lisn-ranges", lisn_ranges,
|
||||
sizeof(lisn_ranges)));
|
||||
|
||||
/* For Linux to link the LSIs to the interrupt controller. */
|
||||
_FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0));
|
||||
_FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2));
|
||||
|
||||
/* For SLOF */
|
||||
_FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle));
|
||||
_FDT(fdt_setprop_cell(fdt, node, "phandle", phandle));
|
||||
|
||||
/*
|
||||
* The "ibm,plat-res-int-priorities" property defines the priority
|
||||
* ranges reserved by the hypervisor
|
||||
*/
|
||||
_FDT(fdt_setprop(fdt, 0, "ibm,plat-res-int-priorities",
|
||||
plat_res_int_priorities, sizeof(plat_res_int_priorities)));
|
||||
}
|
||||
|
||||
static int spapr_xive_activate(SpaprInterruptController *intc, Error **errp)
|
||||
{
|
||||
SpaprXive *xive = SPAPR_XIVE(intc);
|
||||
|
||||
if (kvm_enabled()) {
|
||||
int rc = spapr_irq_init_kvm(kvmppc_xive_connect, intc, errp);
|
||||
if (rc < 0) {
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
/* Activate the XIVE MMIOs */
|
||||
spapr_xive_mmio_set_enabled(xive, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void spapr_xive_deactivate(SpaprInterruptController *intc)
|
||||
{
|
||||
SpaprXive *xive = SPAPR_XIVE(intc);
|
||||
|
||||
spapr_xive_mmio_set_enabled(xive, false);
|
||||
|
||||
if (kvm_irqchip_in_kernel()) {
|
||||
kvmppc_xive_disconnect(intc);
|
||||
}
|
||||
}
|
||||
|
||||
static void spapr_xive_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
|
||||
SpaprInterruptControllerClass *sicc = SPAPR_INTC_CLASS(klass);
|
||||
|
||||
dc->desc = "sPAPR XIVE Interrupt Controller";
|
||||
dc->props = spapr_xive_properties;
|
||||
dc->realize = spapr_xive_realize;
|
||||
dc->vmsd = &vmstate_spapr_xive;
|
||||
|
||||
xrc->get_eas = spapr_xive_get_eas;
|
||||
xrc->get_end = spapr_xive_get_end;
|
||||
xrc->write_end = spapr_xive_write_end;
|
||||
xrc->get_nvt = spapr_xive_get_nvt;
|
||||
xrc->write_nvt = spapr_xive_write_nvt;
|
||||
xrc->get_tctx = spapr_xive_get_tctx;
|
||||
|
||||
sicc->activate = spapr_xive_activate;
|
||||
sicc->deactivate = spapr_xive_deactivate;
|
||||
sicc->cpu_intc_create = spapr_xive_cpu_intc_create;
|
||||
sicc->cpu_intc_reset = spapr_xive_cpu_intc_reset;
|
||||
sicc->claim_irq = spapr_xive_claim_irq;
|
||||
sicc->free_irq = spapr_xive_free_irq;
|
||||
sicc->set_irq = spapr_xive_set_irq;
|
||||
sicc->print_info = spapr_xive_print_info;
|
||||
sicc->dt = spapr_xive_dt;
|
||||
sicc->post_load = spapr_xive_post_load;
|
||||
}
|
||||
|
||||
static const TypeInfo spapr_xive_info = {
|
||||
.name = TYPE_SPAPR_XIVE,
|
||||
.parent = TYPE_XIVE_ROUTER,
|
||||
.instance_init = spapr_xive_instance_init,
|
||||
.instance_size = sizeof(SpaprXive),
|
||||
.class_init = spapr_xive_class_init,
|
||||
.interfaces = (InterfaceInfo[]) {
|
||||
{ TYPE_SPAPR_INTC },
|
||||
{ }
|
||||
},
|
||||
};
|
||||
|
||||
static void spapr_xive_register_types(void)
|
||||
{
|
||||
type_register_static(&spapr_xive_info);
|
||||
}
|
||||
|
||||
type_init(spapr_xive_register_types)
|
||||
|
||||
/*
|
||||
* XIVE hcalls
|
||||
*
|
||||
@ -1540,65 +1698,3 @@ void spapr_xive_hcall_init(SpaprMachineState *spapr)
|
||||
spapr_register_hypercall(H_INT_SYNC, h_int_sync);
|
||||
spapr_register_hypercall(H_INT_RESET, h_int_reset);
|
||||
}
|
||||
|
||||
void spapr_dt_xive(SpaprMachineState *spapr, uint32_t nr_servers, void *fdt,
|
||||
uint32_t phandle)
|
||||
{
|
||||
SpaprXive *xive = spapr->xive;
|
||||
int node;
|
||||
uint64_t timas[2 * 2];
|
||||
/* Interrupt number ranges for the IPIs */
|
||||
uint32_t lisn_ranges[] = {
|
||||
cpu_to_be32(0),
|
||||
cpu_to_be32(nr_servers),
|
||||
};
|
||||
/*
|
||||
* EQ size - the sizes of pages supported by the system 4K, 64K,
|
||||
* 2M, 16M. We only advertise 64K for the moment.
|
||||
*/
|
||||
uint32_t eq_sizes[] = {
|
||||
cpu_to_be32(16), /* 64K */
|
||||
};
|
||||
/*
|
||||
* The following array is in sync with the reserved priorities
|
||||
* defined by the 'spapr_xive_priority_is_reserved' routine.
|
||||
*/
|
||||
uint32_t plat_res_int_priorities[] = {
|
||||
cpu_to_be32(7), /* start */
|
||||
cpu_to_be32(0xf8), /* count */
|
||||
};
|
||||
|
||||
/* Thread Interrupt Management Area : User (ring 3) and OS (ring 2) */
|
||||
timas[0] = cpu_to_be64(xive->tm_base +
|
||||
XIVE_TM_USER_PAGE * (1ull << TM_SHIFT));
|
||||
timas[1] = cpu_to_be64(1ull << TM_SHIFT);
|
||||
timas[2] = cpu_to_be64(xive->tm_base +
|
||||
XIVE_TM_OS_PAGE * (1ull << TM_SHIFT));
|
||||
timas[3] = cpu_to_be64(1ull << TM_SHIFT);
|
||||
|
||||
_FDT(node = fdt_add_subnode(fdt, 0, xive->nodename));
|
||||
|
||||
_FDT(fdt_setprop_string(fdt, node, "device_type", "power-ivpe"));
|
||||
_FDT(fdt_setprop(fdt, node, "reg", timas, sizeof(timas)));
|
||||
|
||||
_FDT(fdt_setprop_string(fdt, node, "compatible", "ibm,power-ivpe"));
|
||||
_FDT(fdt_setprop(fdt, node, "ibm,xive-eq-sizes", eq_sizes,
|
||||
sizeof(eq_sizes)));
|
||||
_FDT(fdt_setprop(fdt, node, "ibm,xive-lisn-ranges", lisn_ranges,
|
||||
sizeof(lisn_ranges)));
|
||||
|
||||
/* For Linux to link the LSIs to the interrupt controller. */
|
||||
_FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0));
|
||||
_FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2));
|
||||
|
||||
/* For SLOF */
|
||||
_FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle));
|
||||
_FDT(fdt_setprop_cell(fdt, node, "phandle", phandle));
|
||||
|
||||
/*
|
||||
* The "ibm,plat-res-int-priorities" property defines the priority
|
||||
* ranges reserved by the hypervisor
|
||||
*/
|
||||
_FDT(fdt_setprop(fdt, 0, "ibm,plat-res-int-priorities",
|
||||
plat_res_int_priorities, sizeof(plat_res_int_priorities)));
|
||||
}
|
||||
|
@ -740,8 +740,9 @@ static void *kvmppc_xive_mmap(SpaprXive *xive, int pgoff, size_t len,
|
||||
* All the XIVE memory regions are now backed by mappings from the KVM
|
||||
* XIVE device.
|
||||
*/
|
||||
void kvmppc_xive_connect(SpaprXive *xive, Error **errp)
|
||||
int kvmppc_xive_connect(SpaprInterruptController *intc, Error **errp)
|
||||
{
|
||||
SpaprXive *xive = SPAPR_XIVE(intc);
|
||||
XiveSource *xsrc = &xive->source;
|
||||
Error *local_err = NULL;
|
||||
size_t esb_len = (1ull << xsrc->esb_shift) * xsrc->nr_irqs;
|
||||
@ -753,19 +754,19 @@ void kvmppc_xive_connect(SpaprXive *xive, Error **errp)
|
||||
* rebooting under the XIVE-only interrupt mode.
|
||||
*/
|
||||
if (xive->fd != -1) {
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!kvmppc_has_cap_xive()) {
|
||||
error_setg(errp, "IRQ_XIVE capability must be present for KVM");
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* First, create the KVM XIVE device */
|
||||
xive->fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_XIVE, false);
|
||||
if (xive->fd < 0) {
|
||||
error_setg_errno(errp, -xive->fd, "XIVE: error creating KVM device");
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -821,15 +822,17 @@ void kvmppc_xive_connect(SpaprXive *xive, Error **errp)
|
||||
kvm_kernel_irqchip = true;
|
||||
kvm_msi_via_irqfd_allowed = true;
|
||||
kvm_gsi_direct_mapping = true;
|
||||
return;
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
error_propagate(errp, local_err);
|
||||
kvmppc_xive_disconnect(xive, NULL);
|
||||
kvmppc_xive_disconnect(intc);
|
||||
return -1;
|
||||
}
|
||||
|
||||
void kvmppc_xive_disconnect(SpaprXive *xive, Error **errp)
|
||||
void kvmppc_xive_disconnect(SpaprInterruptController *intc)
|
||||
{
|
||||
SpaprXive *xive = SPAPR_XIVE(intc);
|
||||
XiveSource *xsrc;
|
||||
size_t esb_len;
|
||||
|
||||
@ -838,11 +841,6 @@ void kvmppc_xive_disconnect(SpaprXive *xive, Error **errp)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!kvmppc_has_cap_xive()) {
|
||||
error_setg(errp, "IRQ_XIVE capability must be present for KVM");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Clear the KVM mapping */
|
||||
xsrc = &xive->source;
|
||||
esb_len = (1ull << xsrc->esb_shift) * xsrc->nr_irqs;
|
||||
|
@ -274,10 +274,8 @@ static const VMStateDescription vmstate_icp_server = {
|
||||
},
|
||||
};
|
||||
|
||||
static void icp_reset_handler(void *dev)
|
||||
void icp_reset(ICPState *icp)
|
||||
{
|
||||
ICPState *icp = ICP(dev);
|
||||
|
||||
icp->xirr = 0;
|
||||
icp->pending_priority = 0xff;
|
||||
icp->mfrr = 0xff;
|
||||
@ -288,7 +286,7 @@ static void icp_reset_handler(void *dev)
|
||||
if (kvm_irqchip_in_kernel()) {
|
||||
Error *local_err = NULL;
|
||||
|
||||
icp_set_kvm_state(ICP(dev), &local_err);
|
||||
icp_set_kvm_state(icp, &local_err);
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
}
|
||||
@ -351,7 +349,6 @@ static void icp_realize(DeviceState *dev, Error **errp)
|
||||
}
|
||||
}
|
||||
|
||||
qemu_register_reset(icp_reset_handler, dev);
|
||||
vmstate_register(NULL, icp->cs->cpu_index, &vmstate_icp_server, icp);
|
||||
}
|
||||
|
||||
@ -360,7 +357,6 @@ static void icp_unrealize(DeviceState *dev, Error **errp)
|
||||
ICPState *icp = ICP(dev);
|
||||
|
||||
vmstate_unregister(NULL, &vmstate_icp_server, icp);
|
||||
qemu_unregister_reset(icp_reset_handler, dev);
|
||||
}
|
||||
|
||||
static void icp_class_init(ObjectClass *klass, void *data)
|
||||
@ -369,6 +365,11 @@ static void icp_class_init(ObjectClass *klass, void *data)
|
||||
|
||||
dc->realize = icp_realize;
|
||||
dc->unrealize = icp_unrealize;
|
||||
/*
|
||||
* Reason: part of XICS interrupt controller, needs to be wired up
|
||||
* by icp_create().
|
||||
*/
|
||||
dc->user_creatable = false;
|
||||
}
|
||||
|
||||
static const TypeInfo icp_info = {
|
||||
@ -689,6 +690,11 @@ static void ics_class_init(ObjectClass *klass, void *data)
|
||||
dc->props = ics_properties;
|
||||
dc->reset = ics_reset;
|
||||
dc->vmsd = &vmstate_ics;
|
||||
/*
|
||||
* Reason: part of XICS interrupt controller, needs to be wired up,
|
||||
* e.g. by spapr_irq_init().
|
||||
*/
|
||||
dc->user_creatable = false;
|
||||
}
|
||||
|
||||
static const TypeInfo ics_info = {
|
||||
|
@ -342,8 +342,9 @@ void ics_kvm_set_irq(ICSState *ics, int srcno, int val)
|
||||
}
|
||||
}
|
||||
|
||||
int xics_kvm_connect(SpaprMachineState *spapr, Error **errp)
|
||||
int xics_kvm_connect(SpaprInterruptController *intc, Error **errp)
|
||||
{
|
||||
ICSState *ics = ICS_SPAPR(intc);
|
||||
int rc;
|
||||
CPUState *cs;
|
||||
Error *local_err = NULL;
|
||||
@ -413,7 +414,7 @@ int xics_kvm_connect(SpaprMachineState *spapr, Error **errp)
|
||||
}
|
||||
|
||||
/* Update the KVM sources */
|
||||
ics_set_kvm_state(spapr->ics, &local_err);
|
||||
ics_set_kvm_state(ics, &local_err);
|
||||
if (local_err) {
|
||||
goto fail;
|
||||
}
|
||||
@ -431,11 +432,11 @@ int xics_kvm_connect(SpaprMachineState *spapr, Error **errp)
|
||||
|
||||
fail:
|
||||
error_propagate(errp, local_err);
|
||||
xics_kvm_disconnect(spapr, NULL);
|
||||
xics_kvm_disconnect(intc);
|
||||
return -1;
|
||||
}
|
||||
|
||||
void xics_kvm_disconnect(SpaprMachineState *spapr, Error **errp)
|
||||
void xics_kvm_disconnect(SpaprInterruptController *intc)
|
||||
{
|
||||
/*
|
||||
* Only on P9 using the XICS-on XIVE KVM device:
|
||||
|
@ -308,8 +308,8 @@ static void ics_spapr_realize(DeviceState *dev, Error **errp)
|
||||
spapr_register_hypercall(H_IPOLL, h_ipoll);
|
||||
}
|
||||
|
||||
void spapr_dt_xics(SpaprMachineState *spapr, uint32_t nr_servers, void *fdt,
|
||||
uint32_t phandle)
|
||||
static void xics_spapr_dt(SpaprInterruptController *intc, uint32_t nr_servers,
|
||||
void *fdt, uint32_t phandle)
|
||||
{
|
||||
uint32_t interrupt_server_ranges_prop[] = {
|
||||
0, cpu_to_be32(nr_servers),
|
||||
@ -330,19 +330,132 @@ void spapr_dt_xics(SpaprMachineState *spapr, uint32_t nr_servers, void *fdt,
|
||||
_FDT(fdt_setprop_cell(fdt, node, "phandle", phandle));
|
||||
}
|
||||
|
||||
static int xics_spapr_cpu_intc_create(SpaprInterruptController *intc,
|
||||
PowerPCCPU *cpu, Error **errp)
|
||||
{
|
||||
ICSState *ics = ICS_SPAPR(intc);
|
||||
Object *obj;
|
||||
SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
|
||||
|
||||
obj = icp_create(OBJECT(cpu), TYPE_ICP, ics->xics, errp);
|
||||
if (!obj) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
spapr_cpu->icp = ICP(obj);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xics_spapr_cpu_intc_reset(SpaprInterruptController *intc,
|
||||
PowerPCCPU *cpu)
|
||||
{
|
||||
icp_reset(spapr_cpu_state(cpu)->icp);
|
||||
}
|
||||
|
||||
static int xics_spapr_claim_irq(SpaprInterruptController *intc, int irq,
|
||||
bool lsi, Error **errp)
|
||||
{
|
||||
ICSState *ics = ICS_SPAPR(intc);
|
||||
|
||||
assert(ics);
|
||||
assert(ics_valid_irq(ics, irq));
|
||||
|
||||
if (!ics_irq_free(ics, irq - ics->offset)) {
|
||||
error_setg(errp, "IRQ %d is not free", irq);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
ics_set_irq_type(ics, irq - ics->offset, lsi);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xics_spapr_free_irq(SpaprInterruptController *intc, int irq)
|
||||
{
|
||||
ICSState *ics = ICS_SPAPR(intc);
|
||||
uint32_t srcno = irq - ics->offset;
|
||||
|
||||
assert(ics_valid_irq(ics, irq));
|
||||
|
||||
memset(&ics->irqs[srcno], 0, sizeof(ICSIRQState));
|
||||
}
|
||||
|
||||
static void xics_spapr_set_irq(SpaprInterruptController *intc, int irq, int val)
|
||||
{
|
||||
ICSState *ics = ICS_SPAPR(intc);
|
||||
uint32_t srcno = irq - ics->offset;
|
||||
|
||||
ics_set_irq(ics, srcno, val);
|
||||
}
|
||||
|
||||
static void xics_spapr_print_info(SpaprInterruptController *intc, Monitor *mon)
|
||||
{
|
||||
ICSState *ics = ICS_SPAPR(intc);
|
||||
CPUState *cs;
|
||||
|
||||
CPU_FOREACH(cs) {
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
|
||||
icp_pic_print_info(spapr_cpu_state(cpu)->icp, mon);
|
||||
}
|
||||
|
||||
ics_pic_print_info(ics, mon);
|
||||
}
|
||||
|
||||
static int xics_spapr_post_load(SpaprInterruptController *intc, int version_id)
|
||||
{
|
||||
if (!kvm_irqchip_in_kernel()) {
|
||||
CPUState *cs;
|
||||
CPU_FOREACH(cs) {
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
icp_resend(spapr_cpu_state(cpu)->icp);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xics_spapr_activate(SpaprInterruptController *intc, Error **errp)
|
||||
{
|
||||
if (kvm_enabled()) {
|
||||
return spapr_irq_init_kvm(xics_kvm_connect, intc, errp);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xics_spapr_deactivate(SpaprInterruptController *intc)
|
||||
{
|
||||
if (kvm_irqchip_in_kernel()) {
|
||||
xics_kvm_disconnect(intc);
|
||||
}
|
||||
}
|
||||
|
||||
static void ics_spapr_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
ICSStateClass *isc = ICS_CLASS(klass);
|
||||
SpaprInterruptControllerClass *sicc = SPAPR_INTC_CLASS(klass);
|
||||
|
||||
device_class_set_parent_realize(dc, ics_spapr_realize,
|
||||
&isc->parent_realize);
|
||||
sicc->activate = xics_spapr_activate;
|
||||
sicc->deactivate = xics_spapr_deactivate;
|
||||
sicc->cpu_intc_create = xics_spapr_cpu_intc_create;
|
||||
sicc->cpu_intc_reset = xics_spapr_cpu_intc_reset;
|
||||
sicc->claim_irq = xics_spapr_claim_irq;
|
||||
sicc->free_irq = xics_spapr_free_irq;
|
||||
sicc->set_irq = xics_spapr_set_irq;
|
||||
sicc->print_info = xics_spapr_print_info;
|
||||
sicc->dt = xics_spapr_dt;
|
||||
sicc->post_load = xics_spapr_post_load;
|
||||
}
|
||||
|
||||
static const TypeInfo ics_spapr_info = {
|
||||
.name = TYPE_ICS_SPAPR,
|
||||
.parent = TYPE_ICS,
|
||||
.class_init = ics_spapr_class_init,
|
||||
.interfaces = (InterfaceInfo[]) {
|
||||
{ TYPE_SPAPR_INTC },
|
||||
{ }
|
||||
},
|
||||
};
|
||||
|
||||
static void xics_spapr_register_types(void)
|
||||
|
@ -547,10 +547,8 @@ void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon)
|
||||
}
|
||||
}
|
||||
|
||||
static void xive_tctx_reset(void *dev)
|
||||
void xive_tctx_reset(XiveTCTX *tctx)
|
||||
{
|
||||
XiveTCTX *tctx = XIVE_TCTX(dev);
|
||||
|
||||
memset(tctx->regs, 0, sizeof(tctx->regs));
|
||||
|
||||
/* Set some defaults */
|
||||
@ -607,13 +605,6 @@ static void xive_tctx_realize(DeviceState *dev, Error **errp)
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
qemu_register_reset(xive_tctx_reset, dev);
|
||||
}
|
||||
|
||||
static void xive_tctx_unrealize(DeviceState *dev, Error **errp)
|
||||
{
|
||||
qemu_unregister_reset(xive_tctx_reset, dev);
|
||||
}
|
||||
|
||||
static int vmstate_xive_tctx_pre_save(void *opaque)
|
||||
@ -668,8 +659,12 @@ static void xive_tctx_class_init(ObjectClass *klass, void *data)
|
||||
|
||||
dc->desc = "XIVE Interrupt Thread Context";
|
||||
dc->realize = xive_tctx_realize;
|
||||
dc->unrealize = xive_tctx_unrealize;
|
||||
dc->vmsd = &vmstate_xive_tctx;
|
||||
/*
|
||||
* Reason: part of XIVE interrupt controller, needs to be wired up
|
||||
* by xive_tctx_create().
|
||||
*/
|
||||
dc->user_creatable = false;
|
||||
}
|
||||
|
||||
static const TypeInfo xive_tctx_info = {
|
||||
@ -1118,6 +1113,11 @@ static void xive_source_class_init(ObjectClass *klass, void *data)
|
||||
dc->props = xive_source_properties;
|
||||
dc->realize = xive_source_realize;
|
||||
dc->vmsd = &vmstate_xive_source;
|
||||
/*
|
||||
* Reason: part of XIVE interrupt controller, needs to be wired up,
|
||||
* e.g. by spapr_xive_instance_init().
|
||||
*/
|
||||
dc->user_creatable = false;
|
||||
}
|
||||
|
||||
static const TypeInfo xive_source_info = {
|
||||
@ -1648,8 +1648,8 @@ do_escalation:
|
||||
void xive_router_notify(XiveNotifier *xn, uint32_t lisn)
|
||||
{
|
||||
XiveRouter *xrtr = XIVE_ROUTER(xn);
|
||||
uint8_t eas_blk = XIVE_SRCNO_BLOCK(lisn);
|
||||
uint32_t eas_idx = XIVE_SRCNO_INDEX(lisn);
|
||||
uint8_t eas_blk = XIVE_EAS_BLOCK(lisn);
|
||||
uint32_t eas_idx = XIVE_EAS_INDEX(lisn);
|
||||
XiveEAS eas;
|
||||
|
||||
/* EAS cache lookup */
|
||||
@ -1853,6 +1853,11 @@ static void xive_end_source_class_init(ObjectClass *klass, void *data)
|
||||
dc->desc = "XIVE END Source";
|
||||
dc->props = xive_end_source_properties;
|
||||
dc->realize = xive_end_source_realize;
|
||||
/*
|
||||
* Reason: part of XIVE interrupt controller, needs to be wired up,
|
||||
* e.g. by spapr_xive_instance_init().
|
||||
*/
|
||||
dc->user_creatable = false;
|
||||
}
|
||||
|
||||
static const TypeInfo xive_end_source_info = {
|
||||
|
18
hw/ppc/pnv.c
18
hw/ppc/pnv.c
@ -778,6 +778,13 @@ static void pnv_chip_power8_intc_create(PnvChip *chip, PowerPCCPU *cpu,
|
||||
pnv_cpu->intc = obj;
|
||||
}
|
||||
|
||||
static void pnv_chip_power8_intc_reset(PnvChip *chip, PowerPCCPU *cpu)
|
||||
{
|
||||
PnvCPUState *pnv_cpu = pnv_cpu_state(cpu);
|
||||
|
||||
icp_reset(ICP(pnv_cpu->intc));
|
||||
}
|
||||
|
||||
/*
|
||||
* 0:48 Reserved - Read as zeroes
|
||||
* 49:52 Node ID
|
||||
@ -815,6 +822,13 @@ static void pnv_chip_power9_intc_create(PnvChip *chip, PowerPCCPU *cpu,
|
||||
pnv_cpu->intc = obj;
|
||||
}
|
||||
|
||||
static void pnv_chip_power9_intc_reset(PnvChip *chip, PowerPCCPU *cpu)
|
||||
{
|
||||
PnvCPUState *pnv_cpu = pnv_cpu_state(cpu);
|
||||
|
||||
xive_tctx_reset(XIVE_TCTX(pnv_cpu->intc));
|
||||
}
|
||||
|
||||
/*
|
||||
* Allowed core identifiers on a POWER8 Processor Chip :
|
||||
*
|
||||
@ -984,6 +998,7 @@ static void pnv_chip_power8e_class_init(ObjectClass *klass, void *data)
|
||||
k->cores_mask = POWER8E_CORE_MASK;
|
||||
k->core_pir = pnv_chip_core_pir_p8;
|
||||
k->intc_create = pnv_chip_power8_intc_create;
|
||||
k->intc_reset = pnv_chip_power8_intc_reset;
|
||||
k->isa_create = pnv_chip_power8_isa_create;
|
||||
k->dt_populate = pnv_chip_power8_dt_populate;
|
||||
k->pic_print_info = pnv_chip_power8_pic_print_info;
|
||||
@ -1003,6 +1018,7 @@ static void pnv_chip_power8_class_init(ObjectClass *klass, void *data)
|
||||
k->cores_mask = POWER8_CORE_MASK;
|
||||
k->core_pir = pnv_chip_core_pir_p8;
|
||||
k->intc_create = pnv_chip_power8_intc_create;
|
||||
k->intc_reset = pnv_chip_power8_intc_reset;
|
||||
k->isa_create = pnv_chip_power8_isa_create;
|
||||
k->dt_populate = pnv_chip_power8_dt_populate;
|
||||
k->pic_print_info = pnv_chip_power8_pic_print_info;
|
||||
@ -1022,6 +1038,7 @@ static void pnv_chip_power8nvl_class_init(ObjectClass *klass, void *data)
|
||||
k->cores_mask = POWER8_CORE_MASK;
|
||||
k->core_pir = pnv_chip_core_pir_p8;
|
||||
k->intc_create = pnv_chip_power8_intc_create;
|
||||
k->intc_reset = pnv_chip_power8_intc_reset;
|
||||
k->isa_create = pnv_chip_power8nvl_isa_create;
|
||||
k->dt_populate = pnv_chip_power8_dt_populate;
|
||||
k->pic_print_info = pnv_chip_power8_pic_print_info;
|
||||
@ -1191,6 +1208,7 @@ static void pnv_chip_power9_class_init(ObjectClass *klass, void *data)
|
||||
k->cores_mask = POWER9_CORE_MASK;
|
||||
k->core_pir = pnv_chip_core_pir_p9;
|
||||
k->intc_create = pnv_chip_power9_intc_create;
|
||||
k->intc_reset = pnv_chip_power9_intc_reset;
|
||||
k->isa_create = pnv_chip_power9_isa_create;
|
||||
k->dt_populate = pnv_chip_power9_dt_populate;
|
||||
k->pic_print_info = pnv_chip_power9_pic_print_info;
|
||||
|
@ -40,11 +40,11 @@ static const char *pnv_core_cpu_typename(PnvCore *pc)
|
||||
return cpu_type;
|
||||
}
|
||||
|
||||
static void pnv_cpu_reset(void *opaque)
|
||||
static void pnv_core_cpu_reset(PowerPCCPU *cpu, PnvChip *chip)
|
||||
{
|
||||
PowerPCCPU *cpu = opaque;
|
||||
CPUState *cs = CPU(cpu);
|
||||
CPUPPCState *env = &cpu->env;
|
||||
PnvChipClass *pcc = PNV_CHIP_GET_CLASS(chip);
|
||||
|
||||
cpu_reset(cs);
|
||||
|
||||
@ -55,6 +55,8 @@ static void pnv_cpu_reset(void *opaque)
|
||||
env->gpr[3] = PNV_FDT_ADDR;
|
||||
env->nip = 0x10;
|
||||
env->msr |= MSR_HVB; /* Hypervisor mode */
|
||||
|
||||
pcc->intc_reset(chip, cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -160,7 +162,7 @@ static const MemoryRegionOps pnv_core_power9_xscom_ops = {
|
||||
.endianness = DEVICE_BIG_ENDIAN,
|
||||
};
|
||||
|
||||
static void pnv_realize_vcpu(PowerPCCPU *cpu, PnvChip *chip, Error **errp)
|
||||
static void pnv_core_cpu_realize(PowerPCCPU *cpu, PnvChip *chip, Error **errp)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
int core_pir;
|
||||
@ -192,8 +194,17 @@ static void pnv_realize_vcpu(PowerPCCPU *cpu, PnvChip *chip, Error **errp)
|
||||
|
||||
/* Set time-base frequency to 512 MHz */
|
||||
cpu_ppc_tb_init(env, PNV_TIMEBASE_FREQ);
|
||||
}
|
||||
|
||||
qemu_register_reset(pnv_cpu_reset, cpu);
|
||||
static void pnv_core_reset(void *dev)
|
||||
{
|
||||
CPUCore *cc = CPU_CORE(dev);
|
||||
PnvCore *pc = PNV_CORE(dev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cc->nr_threads; i++) {
|
||||
pnv_core_cpu_reset(pc->threads[i], pc->chip);
|
||||
}
|
||||
}
|
||||
|
||||
static void pnv_core_realize(DeviceState *dev, Error **errp)
|
||||
@ -214,6 +225,7 @@ static void pnv_core_realize(DeviceState *dev, Error **errp)
|
||||
"required link 'chip' not found: ");
|
||||
return;
|
||||
}
|
||||
pc->chip = PNV_CHIP(chip);
|
||||
|
||||
pc->threads = g_new(PowerPCCPU *, cc->nr_threads);
|
||||
for (i = 0; i < cc->nr_threads; i++) {
|
||||
@ -235,7 +247,7 @@ static void pnv_core_realize(DeviceState *dev, Error **errp)
|
||||
}
|
||||
|
||||
for (j = 0; j < cc->nr_threads; j++) {
|
||||
pnv_realize_vcpu(pc->threads[j], PNV_CHIP(chip), &local_err);
|
||||
pnv_core_cpu_realize(pc->threads[j], pc->chip, &local_err);
|
||||
if (local_err) {
|
||||
goto err;
|
||||
}
|
||||
@ -244,6 +256,8 @@ static void pnv_core_realize(DeviceState *dev, Error **errp)
|
||||
snprintf(name, sizeof(name), "xscom-core.%d", cc->core_id);
|
||||
pnv_xscom_region_init(&pc->xscom_regs, OBJECT(dev), pcc->xscom_ops,
|
||||
pc, name, PNV_XSCOM_EX_SIZE);
|
||||
|
||||
qemu_register_reset(pnv_core_reset, pc);
|
||||
return;
|
||||
|
||||
err:
|
||||
@ -255,11 +269,10 @@ err:
|
||||
error_propagate(errp, local_err);
|
||||
}
|
||||
|
||||
static void pnv_unrealize_vcpu(PowerPCCPU *cpu)
|
||||
static void pnv_core_cpu_unrealize(PowerPCCPU *cpu)
|
||||
{
|
||||
PnvCPUState *pnv_cpu = pnv_cpu_state(cpu);
|
||||
|
||||
qemu_unregister_reset(pnv_cpu_reset, cpu);
|
||||
object_unparent(OBJECT(pnv_cpu_state(cpu)->intc));
|
||||
cpu_remove_sync(CPU(cpu));
|
||||
cpu->machine_data = NULL;
|
||||
@ -273,8 +286,10 @@ static void pnv_core_unrealize(DeviceState *dev, Error **errp)
|
||||
CPUCore *cc = CPU_CORE(dev);
|
||||
int i;
|
||||
|
||||
qemu_unregister_reset(pnv_core_reset, pc);
|
||||
|
||||
for (i = 0; i < cc->nr_threads; i++) {
|
||||
pnv_unrealize_vcpu(pc->threads[i]);
|
||||
pnv_core_cpu_unrealize(pc->threads[i]);
|
||||
}
|
||||
g_free(pc->threads);
|
||||
}
|
||||
|
@ -660,10 +660,19 @@ static void pnv_psi_notify(XiveNotifier *xf, uint32_t srcno)
|
||||
|
||||
uint32_t offset =
|
||||
(psi->regs[PSIHB_REG(PSIHB9_IVT_OFFSET)] >> PSIHB9_IVT_OFF_SHIFT);
|
||||
uint64_t lisn = cpu_to_be64(offset + srcno);
|
||||
uint64_t data = XIVE_TRIGGER_PQ | offset | srcno;
|
||||
MemTxResult result;
|
||||
|
||||
if (valid) {
|
||||
cpu_physical_memory_write(notify_addr, &lisn, sizeof(lisn));
|
||||
if (!valid) {
|
||||
return;
|
||||
}
|
||||
|
||||
address_space_stq_be(&address_space_memory, notify_addr, data,
|
||||
MEMTXATTRS_UNSPECIFIED, &result);
|
||||
if (result != MEMTX_OK) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "%s: trigger failed @%"
|
||||
HWADDR_PRIx "\n", __func__, notif_port);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1247,8 +1247,7 @@ static void *spapr_build_fdt(SpaprMachineState *spapr)
|
||||
_FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2));
|
||||
|
||||
/* /interrupt controller */
|
||||
spapr->irq->dt_populate(spapr, spapr_max_server_number(spapr), fdt,
|
||||
PHANDLE_INTC);
|
||||
spapr_irq_dt(spapr, spapr_max_server_number(spapr), fdt, PHANDLE_INTC);
|
||||
|
||||
ret = spapr_populate_memory(spapr, fdt);
|
||||
if (ret < 0) {
|
||||
@ -1268,7 +1267,7 @@ static void *spapr_build_fdt(SpaprMachineState *spapr)
|
||||
}
|
||||
|
||||
QLIST_FOREACH(phb, &spapr->phbs, list) {
|
||||
ret = spapr_dt_phb(phb, PHANDLE_INTC, fdt, spapr->irq->nr_msis, NULL);
|
||||
ret = spapr_dt_phb(spapr, phb, PHANDLE_INTC, fdt, NULL);
|
||||
if (ret < 0) {
|
||||
error_report("couldn't setup PCI devices in fdt");
|
||||
exit(1);
|
||||
@ -2496,6 +2495,7 @@ static CPUArchId *spapr_find_cpu_slot(MachineState *ms, uint32_t id, int *idx)
|
||||
static void spapr_set_vsmt_mode(SpaprMachineState *spapr, Error **errp)
|
||||
{
|
||||
MachineState *ms = MACHINE(spapr);
|
||||
SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
|
||||
Error *local_err = NULL;
|
||||
bool vsmt_user = !!spapr->vsmt;
|
||||
int kvm_smt = kvmppc_smt_threads();
|
||||
@ -2522,7 +2522,7 @@ static void spapr_set_vsmt_mode(SpaprMachineState *spapr, Error **errp)
|
||||
goto out;
|
||||
}
|
||||
/* In this case, spapr->vsmt has been set by the command line */
|
||||
} else {
|
||||
} else if (!smc->smp_threads_vsmt) {
|
||||
/*
|
||||
* Default VSMT value is tricky, because we need it to be as
|
||||
* consistent as possible (for migration), but this requires
|
||||
@ -2531,6 +2531,8 @@ static void spapr_set_vsmt_mode(SpaprMachineState *spapr, Error **errp)
|
||||
* overwhelmingly common case in production systems.
|
||||
*/
|
||||
spapr->vsmt = MAX(8, smp_threads);
|
||||
} else {
|
||||
spapr->vsmt = smp_threads;
|
||||
}
|
||||
|
||||
/* KVM: If necessary, set the SMT mode: */
|
||||
@ -3739,9 +3741,10 @@ void spapr_core_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev,
|
||||
spapr_vcpu_id(spapr, cc->core_id));
|
||||
g_assert(drc);
|
||||
|
||||
spapr_drc_detach(drc);
|
||||
|
||||
spapr_hotplug_req_remove_by_index(drc);
|
||||
if (!spapr_drc_unplug_requested(drc)) {
|
||||
spapr_drc_detach(drc);
|
||||
spapr_hotplug_req_remove_by_index(drc);
|
||||
}
|
||||
}
|
||||
|
||||
int spapr_core_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
|
||||
@ -3903,8 +3906,7 @@ int spapr_phb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (spapr_dt_phb(sphb, intc_phandle, fdt, spapr->irq->nr_msis,
|
||||
fdt_start_offset)) {
|
||||
if (spapr_dt_phb(spapr, sphb, intc_phandle, fdt, fdt_start_offset)) {
|
||||
error_setg(errp, "unable to create FDT node for PHB %d", sphb->index);
|
||||
return -1;
|
||||
}
|
||||
@ -4263,7 +4265,7 @@ static void spapr_pic_print_info(InterruptStatsProvider *obj,
|
||||
{
|
||||
SpaprMachineState *spapr = SPAPR_MACHINE(obj);
|
||||
|
||||
spapr->irq->print_info(spapr, mon);
|
||||
spapr_irq_print_info(spapr, mon);
|
||||
monitor_printf(mon, "irqchip: %s\n",
|
||||
kvm_irqchip_in_kernel() ? "in-kernel" : "emulated");
|
||||
}
|
||||
@ -4438,6 +4440,8 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
|
||||
smc->irq = &spapr_irq_dual;
|
||||
smc->dr_phb_enabled = true;
|
||||
smc->linux_pci_probe = true;
|
||||
smc->smp_threads_vsmt = true;
|
||||
smc->nr_xirqs = SPAPR_NR_XIRQS;
|
||||
}
|
||||
|
||||
static const TypeInfo spapr_machine_info = {
|
||||
@ -4505,6 +4509,7 @@ static void spapr_machine_4_1_class_options(MachineClass *mc)
|
||||
|
||||
spapr_machine_4_2_class_options(mc);
|
||||
smc->linux_pci_probe = false;
|
||||
smc->smp_threads_vsmt = false;
|
||||
compat_props_add(mc->compat_props, hw_compat_4_1, hw_compat_4_1_len);
|
||||
compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
|
||||
}
|
||||
@ -4573,6 +4578,7 @@ static void spapr_machine_3_0_class_options(MachineClass *mc)
|
||||
compat_props_add(mc->compat_props, hw_compat_3_0, hw_compat_3_0_len);
|
||||
|
||||
smc->legacy_irq_allocation = true;
|
||||
smc->nr_xirqs = 0x400;
|
||||
smc->irq = &spapr_irq_xics_legacy;
|
||||
}
|
||||
|
||||
|
@ -25,14 +25,14 @@
|
||||
#include "sysemu/hw_accel.h"
|
||||
#include "qemu/error-report.h"
|
||||
|
||||
static void spapr_cpu_reset(void *opaque)
|
||||
static void spapr_reset_vcpu(PowerPCCPU *cpu)
|
||||
{
|
||||
PowerPCCPU *cpu = opaque;
|
||||
CPUState *cs = CPU(cpu);
|
||||
CPUPPCState *env = &cpu->env;
|
||||
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
|
||||
SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
|
||||
target_ulong lpcr;
|
||||
SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
|
||||
|
||||
cpu_reset(cs);
|
||||
|
||||
@ -77,9 +77,11 @@ static void spapr_cpu_reset(void *opaque)
|
||||
spapr_cpu->dtl_addr = 0;
|
||||
spapr_cpu->dtl_size = 0;
|
||||
|
||||
spapr_caps_cpu_apply(SPAPR_MACHINE(qdev_get_machine()), cpu);
|
||||
spapr_caps_cpu_apply(spapr, cpu);
|
||||
|
||||
kvm_check_mmu(cpu, &error_fatal);
|
||||
|
||||
spapr_irq_cpu_intc_reset(spapr, cpu);
|
||||
}
|
||||
|
||||
void spapr_cpu_set_entry_state(PowerPCCPU *cpu, target_ulong nip, target_ulong r3)
|
||||
@ -193,7 +195,6 @@ static void spapr_unrealize_vcpu(PowerPCCPU *cpu, SpaprCpuCore *sc)
|
||||
if (!sc->pre_3_0_migration) {
|
||||
vmstate_unregister(NULL, &vmstate_spapr_cpu_state, cpu->machine_data);
|
||||
}
|
||||
qemu_unregister_reset(spapr_cpu_reset, cpu);
|
||||
if (spapr_cpu_state(cpu)->icp) {
|
||||
object_unparent(OBJECT(spapr_cpu_state(cpu)->icp));
|
||||
}
|
||||
@ -204,12 +205,36 @@ static void spapr_unrealize_vcpu(PowerPCCPU *cpu, SpaprCpuCore *sc)
|
||||
object_unparent(OBJECT(cpu));
|
||||
}
|
||||
|
||||
/*
|
||||
* Called when CPUs are hot-plugged.
|
||||
*/
|
||||
static void spapr_cpu_core_reset(DeviceState *dev)
|
||||
{
|
||||
CPUCore *cc = CPU_CORE(dev);
|
||||
SpaprCpuCore *sc = SPAPR_CPU_CORE(dev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cc->nr_threads; i++) {
|
||||
spapr_reset_vcpu(sc->threads[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by the machine reset.
|
||||
*/
|
||||
static void spapr_cpu_core_reset_handler(void *opaque)
|
||||
{
|
||||
spapr_cpu_core_reset(opaque);
|
||||
}
|
||||
|
||||
static void spapr_cpu_core_unrealize(DeviceState *dev, Error **errp)
|
||||
{
|
||||
SpaprCpuCore *sc = SPAPR_CPU_CORE(OBJECT(dev));
|
||||
CPUCore *cc = CPU_CORE(dev);
|
||||
int i;
|
||||
|
||||
qemu_unregister_reset(spapr_cpu_core_reset_handler, sc);
|
||||
|
||||
for (i = 0; i < cc->nr_threads; i++) {
|
||||
spapr_unrealize_vcpu(sc->threads[i], sc);
|
||||
}
|
||||
@ -234,12 +259,8 @@ static void spapr_realize_vcpu(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
||||
cpu_ppc_set_vhyp(cpu, PPC_VIRTUAL_HYPERVISOR(spapr));
|
||||
kvmppc_set_papr(cpu);
|
||||
|
||||
qemu_register_reset(spapr_cpu_reset, cpu);
|
||||
spapr_cpu_reset(cpu);
|
||||
|
||||
spapr->irq->cpu_intc_create(spapr, cpu, &local_err);
|
||||
if (local_err) {
|
||||
goto error_unregister;
|
||||
if (spapr_irq_cpu_intc_create(spapr, cpu, &local_err) < 0) {
|
||||
goto error_intc_create;
|
||||
}
|
||||
|
||||
if (!sc->pre_3_0_migration) {
|
||||
@ -249,8 +270,7 @@ static void spapr_realize_vcpu(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
||||
|
||||
return;
|
||||
|
||||
error_unregister:
|
||||
qemu_unregister_reset(spapr_cpu_reset, cpu);
|
||||
error_intc_create:
|
||||
cpu_remove_sync(CPU(cpu));
|
||||
error:
|
||||
error_propagate(errp, local_err);
|
||||
@ -337,6 +357,8 @@ static void spapr_cpu_core_realize(DeviceState *dev, Error **errp)
|
||||
goto err_unrealize;
|
||||
}
|
||||
}
|
||||
|
||||
qemu_register_reset(spapr_cpu_core_reset_handler, sc);
|
||||
return;
|
||||
|
||||
err_unrealize:
|
||||
@ -365,6 +387,7 @@ static void spapr_cpu_core_class_init(ObjectClass *oc, void *data)
|
||||
|
||||
dc->realize = spapr_cpu_core_realize;
|
||||
dc->unrealize = spapr_cpu_core_unrealize;
|
||||
dc->reset = spapr_cpu_core_reset;
|
||||
dc->props = spapr_cpu_core_properties;
|
||||
scc->cpu_type = data;
|
||||
}
|
||||
|
@ -23,9 +23,20 @@
|
||||
|
||||
#include "trace.h"
|
||||
|
||||
void spapr_irq_msi_init(SpaprMachineState *spapr, uint32_t nr_msis)
|
||||
static const TypeInfo spapr_intc_info = {
|
||||
.name = TYPE_SPAPR_INTC,
|
||||
.parent = TYPE_INTERFACE,
|
||||
.class_size = sizeof(SpaprInterruptControllerClass),
|
||||
};
|
||||
|
||||
static void spapr_irq_msi_init(SpaprMachineState *spapr)
|
||||
{
|
||||
spapr->irq_map_nr = nr_msis;
|
||||
if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
|
||||
/* Legacy mode doesn't use this allocator */
|
||||
return;
|
||||
}
|
||||
|
||||
spapr->irq_map_nr = spapr_irq_nr_msis(spapr);
|
||||
spapr->irq_map = bitmap_new(spapr->irq_map_nr);
|
||||
}
|
||||
|
||||
@ -59,262 +70,53 @@ void spapr_irq_msi_free(SpaprMachineState *spapr, int irq, uint32_t num)
|
||||
bitmap_clear(spapr->irq_map, irq - SPAPR_IRQ_MSI, num);
|
||||
}
|
||||
|
||||
static void spapr_irq_init_kvm(SpaprMachineState *spapr,
|
||||
SpaprIrq *irq, Error **errp)
|
||||
int spapr_irq_init_kvm(int (*fn)(SpaprInterruptController *, Error **),
|
||||
SpaprInterruptController *intc,
|
||||
Error **errp)
|
||||
{
|
||||
MachineState *machine = MACHINE(spapr);
|
||||
MachineState *machine = MACHINE(qdev_get_machine());
|
||||
Error *local_err = NULL;
|
||||
|
||||
if (kvm_enabled() && machine_kernel_irqchip_allowed(machine)) {
|
||||
irq->init_kvm(spapr, &local_err);
|
||||
if (local_err && machine_kernel_irqchip_required(machine)) {
|
||||
if (fn(intc, &local_err) < 0) {
|
||||
if (machine_kernel_irqchip_required(machine)) {
|
||||
error_prepend(&local_err,
|
||||
"kernel_irqchip requested but unavailable: ");
|
||||
error_propagate(errp, local_err);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* We failed to initialize the KVM device, fallback to
|
||||
* emulated mode
|
||||
*/
|
||||
error_prepend(&local_err,
|
||||
"kernel_irqchip requested but unavailable: ");
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
"kernel_irqchip allowed but unavailable: ");
|
||||
error_append_hint(&local_err,
|
||||
"Falling back to kernel-irqchip=off\n");
|
||||
warn_report_err(local_err);
|
||||
}
|
||||
|
||||
if (!local_err) {
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* We failed to initialize the KVM device, fallback to
|
||||
* emulated mode
|
||||
*/
|
||||
error_prepend(&local_err, "kernel_irqchip allowed but unavailable: ");
|
||||
error_append_hint(&local_err, "Falling back to kernel-irqchip=off\n");
|
||||
warn_report_err(local_err);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* XICS IRQ backend.
|
||||
*/
|
||||
|
||||
static int spapr_irq_claim_xics(SpaprMachineState *spapr, int irq, bool lsi,
|
||||
Error **errp)
|
||||
{
|
||||
ICSState *ics = spapr->ics;
|
||||
|
||||
assert(ics);
|
||||
assert(ics_valid_irq(ics, irq));
|
||||
|
||||
if (!ics_irq_free(ics, irq - ics->offset)) {
|
||||
error_setg(errp, "IRQ %d is not free", irq);
|
||||
return -1;
|
||||
}
|
||||
|
||||
ics_set_irq_type(ics, irq - ics->offset, lsi);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void spapr_irq_free_xics(SpaprMachineState *spapr, int irq)
|
||||
{
|
||||
ICSState *ics = spapr->ics;
|
||||
uint32_t srcno = irq - ics->offset;
|
||||
|
||||
assert(ics_valid_irq(ics, irq));
|
||||
|
||||
memset(&ics->irqs[srcno], 0, sizeof(ICSIRQState));
|
||||
}
|
||||
|
||||
static void spapr_irq_print_info_xics(SpaprMachineState *spapr, Monitor *mon)
|
||||
{
|
||||
CPUState *cs;
|
||||
|
||||
CPU_FOREACH(cs) {
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
|
||||
icp_pic_print_info(spapr_cpu_state(cpu)->icp, mon);
|
||||
}
|
||||
|
||||
ics_pic_print_info(spapr->ics, mon);
|
||||
}
|
||||
|
||||
static void spapr_irq_cpu_intc_create_xics(SpaprMachineState *spapr,
|
||||
PowerPCCPU *cpu, Error **errp)
|
||||
{
|
||||
Error *local_err = NULL;
|
||||
Object *obj;
|
||||
SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
|
||||
|
||||
obj = icp_create(OBJECT(cpu), TYPE_ICP, XICS_FABRIC(spapr),
|
||||
&local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
|
||||
spapr_cpu->icp = ICP(obj);
|
||||
}
|
||||
|
||||
static int spapr_irq_post_load_xics(SpaprMachineState *spapr, int version_id)
|
||||
{
|
||||
if (!kvm_irqchip_in_kernel()) {
|
||||
CPUState *cs;
|
||||
CPU_FOREACH(cs) {
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
icp_resend(spapr_cpu_state(cpu)->icp);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void spapr_irq_set_irq_xics(void *opaque, int irq, int val)
|
||||
{
|
||||
SpaprMachineState *spapr = opaque;
|
||||
uint32_t srcno = irq - spapr->ics->offset;
|
||||
|
||||
ics_set_irq(spapr->ics, srcno, val);
|
||||
}
|
||||
|
||||
static void spapr_irq_reset_xics(SpaprMachineState *spapr, Error **errp)
|
||||
{
|
||||
Error *local_err = NULL;
|
||||
|
||||
spapr_irq_init_kvm(spapr, &spapr_irq_xics, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static void spapr_irq_init_kvm_xics(SpaprMachineState *spapr, Error **errp)
|
||||
{
|
||||
if (kvm_enabled()) {
|
||||
xics_kvm_connect(spapr, errp);
|
||||
}
|
||||
}
|
||||
|
||||
SpaprIrq spapr_irq_xics = {
|
||||
.nr_xirqs = SPAPR_NR_XIRQS,
|
||||
.nr_msis = SPAPR_NR_MSIS,
|
||||
.xics = true,
|
||||
.xive = false,
|
||||
|
||||
.claim = spapr_irq_claim_xics,
|
||||
.free = spapr_irq_free_xics,
|
||||
.print_info = spapr_irq_print_info_xics,
|
||||
.dt_populate = spapr_dt_xics,
|
||||
.cpu_intc_create = spapr_irq_cpu_intc_create_xics,
|
||||
.post_load = spapr_irq_post_load_xics,
|
||||
.reset = spapr_irq_reset_xics,
|
||||
.set_irq = spapr_irq_set_irq_xics,
|
||||
.init_kvm = spapr_irq_init_kvm_xics,
|
||||
};
|
||||
|
||||
/*
|
||||
* XIVE IRQ backend.
|
||||
*/
|
||||
|
||||
static int spapr_irq_claim_xive(SpaprMachineState *spapr, int irq, bool lsi,
|
||||
Error **errp)
|
||||
{
|
||||
return spapr_xive_irq_claim(spapr->xive, irq, lsi, errp);
|
||||
}
|
||||
|
||||
static void spapr_irq_free_xive(SpaprMachineState *spapr, int irq)
|
||||
{
|
||||
spapr_xive_irq_free(spapr->xive, irq);
|
||||
}
|
||||
|
||||
static void spapr_irq_print_info_xive(SpaprMachineState *spapr,
|
||||
Monitor *mon)
|
||||
{
|
||||
CPUState *cs;
|
||||
|
||||
CPU_FOREACH(cs) {
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
|
||||
xive_tctx_pic_print_info(spapr_cpu_state(cpu)->tctx, mon);
|
||||
}
|
||||
|
||||
spapr_xive_pic_print_info(spapr->xive, mon);
|
||||
}
|
||||
|
||||
static void spapr_irq_cpu_intc_create_xive(SpaprMachineState *spapr,
|
||||
PowerPCCPU *cpu, Error **errp)
|
||||
{
|
||||
Error *local_err = NULL;
|
||||
Object *obj;
|
||||
SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
|
||||
|
||||
obj = xive_tctx_create(OBJECT(cpu), XIVE_ROUTER(spapr->xive), &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
|
||||
spapr_cpu->tctx = XIVE_TCTX(obj);
|
||||
|
||||
/*
|
||||
* (TCG) Early setting the OS CAM line for hotplugged CPUs as they
|
||||
* don't beneficiate from the reset of the XIVE IRQ backend
|
||||
*/
|
||||
spapr_xive_set_tctx_os_cam(spapr_cpu->tctx);
|
||||
}
|
||||
|
||||
static int spapr_irq_post_load_xive(SpaprMachineState *spapr, int version_id)
|
||||
{
|
||||
return spapr_xive_post_load(spapr->xive, version_id);
|
||||
}
|
||||
|
||||
static void spapr_irq_reset_xive(SpaprMachineState *spapr, Error **errp)
|
||||
{
|
||||
CPUState *cs;
|
||||
Error *local_err = NULL;
|
||||
|
||||
CPU_FOREACH(cs) {
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
|
||||
/* (TCG) Set the OS CAM line of the thread interrupt context. */
|
||||
spapr_xive_set_tctx_os_cam(spapr_cpu_state(cpu)->tctx);
|
||||
}
|
||||
|
||||
spapr_irq_init_kvm(spapr, &spapr_irq_xive, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Activate the XIVE MMIOs */
|
||||
spapr_xive_mmio_set_enabled(spapr->xive, true);
|
||||
}
|
||||
|
||||
static void spapr_irq_set_irq_xive(void *opaque, int irq, int val)
|
||||
{
|
||||
SpaprMachineState *spapr = opaque;
|
||||
|
||||
if (kvm_irqchip_in_kernel()) {
|
||||
kvmppc_xive_source_set_irq(&spapr->xive->source, irq, val);
|
||||
} else {
|
||||
xive_source_set_irq(&spapr->xive->source, irq, val);
|
||||
}
|
||||
}
|
||||
|
||||
static void spapr_irq_init_kvm_xive(SpaprMachineState *spapr, Error **errp)
|
||||
{
|
||||
if (kvm_enabled()) {
|
||||
kvmppc_xive_connect(spapr->xive, errp);
|
||||
}
|
||||
}
|
||||
|
||||
SpaprIrq spapr_irq_xive = {
|
||||
.nr_xirqs = SPAPR_NR_XIRQS,
|
||||
.nr_msis = SPAPR_NR_MSIS,
|
||||
.xics = false,
|
||||
.xive = true,
|
||||
|
||||
.claim = spapr_irq_claim_xive,
|
||||
.free = spapr_irq_free_xive,
|
||||
.print_info = spapr_irq_print_info_xive,
|
||||
.dt_populate = spapr_dt_xive,
|
||||
.cpu_intc_create = spapr_irq_cpu_intc_create_xive,
|
||||
.post_load = spapr_irq_post_load_xive,
|
||||
.reset = spapr_irq_reset_xive,
|
||||
.set_irq = spapr_irq_set_irq_xive,
|
||||
.init_kvm = spapr_irq_init_kvm_xive,
|
||||
};
|
||||
|
||||
/*
|
||||
@ -326,139 +128,12 @@ SpaprIrq spapr_irq_xive = {
|
||||
* activated after an extra machine reset.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Returns the sPAPR IRQ backend negotiated by CAS. XICS is the
|
||||
* default.
|
||||
*/
|
||||
static SpaprIrq *spapr_irq_current(SpaprMachineState *spapr)
|
||||
{
|
||||
return spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT) ?
|
||||
&spapr_irq_xive : &spapr_irq_xics;
|
||||
}
|
||||
|
||||
static int spapr_irq_claim_dual(SpaprMachineState *spapr, int irq, bool lsi,
|
||||
Error **errp)
|
||||
{
|
||||
Error *local_err = NULL;
|
||||
int ret;
|
||||
|
||||
ret = spapr_irq_xics.claim(spapr, irq, lsi, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = spapr_irq_xive.claim(spapr, irq, lsi, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void spapr_irq_free_dual(SpaprMachineState *spapr, int irq)
|
||||
{
|
||||
spapr_irq_xics.free(spapr, irq);
|
||||
spapr_irq_xive.free(spapr, irq);
|
||||
}
|
||||
|
||||
static void spapr_irq_print_info_dual(SpaprMachineState *spapr, Monitor *mon)
|
||||
{
|
||||
spapr_irq_current(spapr)->print_info(spapr, mon);
|
||||
}
|
||||
|
||||
static void spapr_irq_dt_populate_dual(SpaprMachineState *spapr,
|
||||
uint32_t nr_servers, void *fdt,
|
||||
uint32_t phandle)
|
||||
{
|
||||
spapr_irq_current(spapr)->dt_populate(spapr, nr_servers, fdt, phandle);
|
||||
}
|
||||
|
||||
static void spapr_irq_cpu_intc_create_dual(SpaprMachineState *spapr,
|
||||
PowerPCCPU *cpu, Error **errp)
|
||||
{
|
||||
Error *local_err = NULL;
|
||||
|
||||
spapr_irq_xive.cpu_intc_create(spapr, cpu, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
|
||||
spapr_irq_xics.cpu_intc_create(spapr, cpu, errp);
|
||||
}
|
||||
|
||||
static int spapr_irq_post_load_dual(SpaprMachineState *spapr, int version_id)
|
||||
{
|
||||
/*
|
||||
* Force a reset of the XIVE backend after migration. The machine
|
||||
* defaults to XICS at startup.
|
||||
*/
|
||||
if (spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
|
||||
if (kvm_irqchip_in_kernel()) {
|
||||
xics_kvm_disconnect(spapr, &error_fatal);
|
||||
}
|
||||
spapr_irq_xive.reset(spapr, &error_fatal);
|
||||
}
|
||||
|
||||
return spapr_irq_current(spapr)->post_load(spapr, version_id);
|
||||
}
|
||||
|
||||
static void spapr_irq_reset_dual(SpaprMachineState *spapr, Error **errp)
|
||||
{
|
||||
Error *local_err = NULL;
|
||||
|
||||
/*
|
||||
* Deactivate the XIVE MMIOs. The XIVE backend will reenable them
|
||||
* if selected.
|
||||
*/
|
||||
spapr_xive_mmio_set_enabled(spapr->xive, false);
|
||||
|
||||
/* Destroy all KVM devices */
|
||||
if (kvm_irqchip_in_kernel()) {
|
||||
xics_kvm_disconnect(spapr, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
error_prepend(errp, "KVM XICS disconnect failed: ");
|
||||
return;
|
||||
}
|
||||
kvmppc_xive_disconnect(spapr->xive, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
error_prepend(errp, "KVM XIVE disconnect failed: ");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
spapr_irq_current(spapr)->reset(spapr, errp);
|
||||
}
|
||||
|
||||
static void spapr_irq_set_irq_dual(void *opaque, int irq, int val)
|
||||
{
|
||||
SpaprMachineState *spapr = opaque;
|
||||
|
||||
spapr_irq_current(spapr)->set_irq(spapr, irq, val);
|
||||
}
|
||||
|
||||
/*
|
||||
* Define values in sync with the XIVE and XICS backend
|
||||
*/
|
||||
SpaprIrq spapr_irq_dual = {
|
||||
.nr_xirqs = SPAPR_NR_XIRQS,
|
||||
.nr_msis = SPAPR_NR_MSIS,
|
||||
.xics = true,
|
||||
.xive = true,
|
||||
|
||||
.claim = spapr_irq_claim_dual,
|
||||
.free = spapr_irq_free_dual,
|
||||
.print_info = spapr_irq_print_info_dual,
|
||||
.dt_populate = spapr_irq_dt_populate_dual,
|
||||
.cpu_intc_create = spapr_irq_cpu_intc_create_dual,
|
||||
.post_load = spapr_irq_post_load_dual,
|
||||
.reset = spapr_irq_reset_dual,
|
||||
.set_irq = spapr_irq_set_irq_dual,
|
||||
.init_kvm = NULL, /* should not be used */
|
||||
};
|
||||
|
||||
|
||||
@ -521,9 +196,85 @@ static int spapr_irq_check(SpaprMachineState *spapr, Error **errp)
|
||||
/*
|
||||
* sPAPR IRQ frontend routines for devices
|
||||
*/
|
||||
#define ALL_INTCS(spapr_) \
|
||||
{ SPAPR_INTC((spapr_)->ics), SPAPR_INTC((spapr_)->xive), }
|
||||
|
||||
int spapr_irq_cpu_intc_create(SpaprMachineState *spapr,
|
||||
PowerPCCPU *cpu, Error **errp)
|
||||
{
|
||||
SpaprInterruptController *intcs[] = ALL_INTCS(spapr);
|
||||
int i;
|
||||
int rc;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(intcs); i++) {
|
||||
SpaprInterruptController *intc = intcs[i];
|
||||
if (intc) {
|
||||
SpaprInterruptControllerClass *sicc = SPAPR_INTC_GET_CLASS(intc);
|
||||
rc = sicc->cpu_intc_create(intc, cpu, errp);
|
||||
if (rc < 0) {
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void spapr_irq_cpu_intc_reset(SpaprMachineState *spapr, PowerPCCPU *cpu)
|
||||
{
|
||||
SpaprInterruptController *intcs[] = ALL_INTCS(spapr);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(intcs); i++) {
|
||||
SpaprInterruptController *intc = intcs[i];
|
||||
if (intc) {
|
||||
SpaprInterruptControllerClass *sicc = SPAPR_INTC_GET_CLASS(intc);
|
||||
sicc->cpu_intc_reset(intc, cpu);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void spapr_set_irq(void *opaque, int irq, int level)
|
||||
{
|
||||
SpaprMachineState *spapr = SPAPR_MACHINE(opaque);
|
||||
SpaprInterruptControllerClass *sicc
|
||||
= SPAPR_INTC_GET_CLASS(spapr->active_intc);
|
||||
|
||||
sicc->set_irq(spapr->active_intc, irq, level);
|
||||
}
|
||||
|
||||
void spapr_irq_print_info(SpaprMachineState *spapr, Monitor *mon)
|
||||
{
|
||||
SpaprInterruptControllerClass *sicc
|
||||
= SPAPR_INTC_GET_CLASS(spapr->active_intc);
|
||||
|
||||
sicc->print_info(spapr->active_intc, mon);
|
||||
}
|
||||
|
||||
void spapr_irq_dt(SpaprMachineState *spapr, uint32_t nr_servers,
|
||||
void *fdt, uint32_t phandle)
|
||||
{
|
||||
SpaprInterruptControllerClass *sicc
|
||||
= SPAPR_INTC_GET_CLASS(spapr->active_intc);
|
||||
|
||||
sicc->dt(spapr->active_intc, nr_servers, fdt, phandle);
|
||||
}
|
||||
|
||||
uint32_t spapr_irq_nr_msis(SpaprMachineState *spapr)
|
||||
{
|
||||
SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
|
||||
|
||||
if (smc->legacy_irq_allocation) {
|
||||
return smc->nr_xirqs;
|
||||
} else {
|
||||
return SPAPR_XIRQ_BASE + smc->nr_xirqs - SPAPR_IRQ_MSI;
|
||||
}
|
||||
}
|
||||
|
||||
void spapr_irq_init(SpaprMachineState *spapr, Error **errp)
|
||||
{
|
||||
MachineState *machine = MACHINE(spapr);
|
||||
SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
|
||||
|
||||
if (machine_kernel_irqchip_split(machine)) {
|
||||
error_setg(errp, "kernel_irqchip split mode not supported on pseries");
|
||||
@ -541,9 +292,7 @@ void spapr_irq_init(SpaprMachineState *spapr, Error **errp)
|
||||
}
|
||||
|
||||
/* Initialize the MSI IRQ allocator. */
|
||||
if (!SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
|
||||
spapr_irq_msi_init(spapr, spapr->irq->nr_msis);
|
||||
}
|
||||
spapr_irq_msi_init(spapr);
|
||||
|
||||
if (spapr->irq->xics) {
|
||||
Error *local_err = NULL;
|
||||
@ -563,8 +312,7 @@ void spapr_irq_init(SpaprMachineState *spapr, Error **errp)
|
||||
return;
|
||||
}
|
||||
|
||||
object_property_set_int(obj, spapr->irq->nr_xirqs, "nr-irqs",
|
||||
&local_err);
|
||||
object_property_set_int(obj, smc->nr_xirqs, "nr-irqs", &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
@ -585,8 +333,7 @@ void spapr_irq_init(SpaprMachineState *spapr, Error **errp)
|
||||
int i;
|
||||
|
||||
dev = qdev_create(NULL, TYPE_SPAPR_XIVE);
|
||||
qdev_prop_set_uint32(dev, "nr-irqs",
|
||||
spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE);
|
||||
qdev_prop_set_uint32(dev, "nr-irqs", smc->nr_xirqs + SPAPR_XIRQ_BASE);
|
||||
/*
|
||||
* 8 XIVE END structures per CPU. One for each available
|
||||
* priority
|
||||
@ -598,8 +345,11 @@ void spapr_irq_init(SpaprMachineState *spapr, Error **errp)
|
||||
|
||||
/* Enable the CPU IPIs */
|
||||
for (i = 0; i < nr_servers; ++i) {
|
||||
if (spapr_xive_irq_claim(spapr->xive, SPAPR_IRQ_IPI + i,
|
||||
false, errp) < 0) {
|
||||
SpaprInterruptControllerClass *sicc
|
||||
= SPAPR_INTC_GET_CLASS(spapr->xive);
|
||||
|
||||
if (sicc->claim_irq(SPAPR_INTC(spapr->xive), SPAPR_IRQ_IPI + i,
|
||||
false, errp) < 0) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -607,32 +357,60 @@ void spapr_irq_init(SpaprMachineState *spapr, Error **errp)
|
||||
spapr_xive_hcall_init(spapr);
|
||||
}
|
||||
|
||||
spapr->qirqs = qemu_allocate_irqs(spapr->irq->set_irq, spapr,
|
||||
spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE);
|
||||
spapr->qirqs = qemu_allocate_irqs(spapr_set_irq, spapr,
|
||||
smc->nr_xirqs + SPAPR_XIRQ_BASE);
|
||||
}
|
||||
|
||||
int spapr_irq_claim(SpaprMachineState *spapr, int irq, bool lsi, Error **errp)
|
||||
{
|
||||
assert(irq >= SPAPR_XIRQ_BASE);
|
||||
assert(irq < (spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE));
|
||||
SpaprInterruptController *intcs[] = ALL_INTCS(spapr);
|
||||
int i;
|
||||
SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
|
||||
int rc;
|
||||
|
||||
return spapr->irq->claim(spapr, irq, lsi, errp);
|
||||
assert(irq >= SPAPR_XIRQ_BASE);
|
||||
assert(irq < (smc->nr_xirqs + SPAPR_XIRQ_BASE));
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(intcs); i++) {
|
||||
SpaprInterruptController *intc = intcs[i];
|
||||
if (intc) {
|
||||
SpaprInterruptControllerClass *sicc = SPAPR_INTC_GET_CLASS(intc);
|
||||
rc = sicc->claim_irq(intc, irq, lsi, errp);
|
||||
if (rc < 0) {
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void spapr_irq_free(SpaprMachineState *spapr, int irq, int num)
|
||||
{
|
||||
int i;
|
||||
SpaprInterruptController *intcs[] = ALL_INTCS(spapr);
|
||||
int i, j;
|
||||
SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
|
||||
|
||||
assert(irq >= SPAPR_XIRQ_BASE);
|
||||
assert((irq + num) <= (spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE));
|
||||
assert((irq + num) <= (smc->nr_xirqs + SPAPR_XIRQ_BASE));
|
||||
|
||||
for (i = irq; i < (irq + num); i++) {
|
||||
spapr->irq->free(spapr, i);
|
||||
for (j = 0; j < ARRAY_SIZE(intcs); j++) {
|
||||
SpaprInterruptController *intc = intcs[j];
|
||||
|
||||
if (intc) {
|
||||
SpaprInterruptControllerClass *sicc
|
||||
= SPAPR_INTC_GET_CLASS(intc);
|
||||
sicc->free_irq(intc, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
qemu_irq spapr_qirq(SpaprMachineState *spapr, int irq)
|
||||
{
|
||||
SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
|
||||
|
||||
/*
|
||||
* This interface is basically for VIO and PHB devices to find the
|
||||
* right qemu_irq to manipulate, so we only allow access to the
|
||||
@ -641,7 +419,7 @@ qemu_irq spapr_qirq(SpaprMachineState *spapr, int irq)
|
||||
* interfaces, we can change this if we need to in future.
|
||||
*/
|
||||
assert(irq >= SPAPR_XIRQ_BASE);
|
||||
assert(irq < (spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE));
|
||||
assert(irq < (smc->nr_xirqs + SPAPR_XIRQ_BASE));
|
||||
|
||||
if (spapr->ics) {
|
||||
assert(ics_valid_irq(spapr->ics, irq));
|
||||
@ -656,16 +434,18 @@ qemu_irq spapr_qirq(SpaprMachineState *spapr, int irq)
|
||||
|
||||
int spapr_irq_post_load(SpaprMachineState *spapr, int version_id)
|
||||
{
|
||||
return spapr->irq->post_load(spapr, version_id);
|
||||
SpaprInterruptControllerClass *sicc;
|
||||
|
||||
spapr_irq_update_active_intc(spapr);
|
||||
sicc = SPAPR_INTC_GET_CLASS(spapr->active_intc);
|
||||
return sicc->post_load(spapr->active_intc, version_id);
|
||||
}
|
||||
|
||||
void spapr_irq_reset(SpaprMachineState *spapr, Error **errp)
|
||||
{
|
||||
assert(!spapr->irq_map || bitmap_empty(spapr->irq_map, spapr->irq_map_nr));
|
||||
|
||||
if (spapr->irq->reset) {
|
||||
spapr->irq->reset(spapr, errp);
|
||||
}
|
||||
spapr_irq_update_active_intc(spapr);
|
||||
}
|
||||
|
||||
int spapr_irq_get_phandle(SpaprMachineState *spapr, void *fdt, Error **errp)
|
||||
@ -689,6 +469,54 @@ int spapr_irq_get_phandle(SpaprMachineState *spapr, void *fdt, Error **errp)
|
||||
return phandle;
|
||||
}
|
||||
|
||||
static void set_active_intc(SpaprMachineState *spapr,
|
||||
SpaprInterruptController *new_intc)
|
||||
{
|
||||
SpaprInterruptControllerClass *sicc;
|
||||
|
||||
assert(new_intc);
|
||||
|
||||
if (new_intc == spapr->active_intc) {
|
||||
/* Nothing to do */
|
||||
return;
|
||||
}
|
||||
|
||||
if (spapr->active_intc) {
|
||||
sicc = SPAPR_INTC_GET_CLASS(spapr->active_intc);
|
||||
if (sicc->deactivate) {
|
||||
sicc->deactivate(spapr->active_intc);
|
||||
}
|
||||
}
|
||||
|
||||
sicc = SPAPR_INTC_GET_CLASS(new_intc);
|
||||
if (sicc->activate) {
|
||||
sicc->activate(new_intc, &error_fatal);
|
||||
}
|
||||
|
||||
spapr->active_intc = new_intc;
|
||||
}
|
||||
|
||||
void spapr_irq_update_active_intc(SpaprMachineState *spapr)
|
||||
{
|
||||
SpaprInterruptController *new_intc;
|
||||
|
||||
if (!spapr->ics) {
|
||||
/*
|
||||
* XXX before we run CAS, ov5_cas is initialized empty, which
|
||||
* indicates XICS, even if we have ic-mode=xive. TODO: clean
|
||||
* up the CAS path so that we have a clearer way of handling
|
||||
* this.
|
||||
*/
|
||||
new_intc = SPAPR_INTC(spapr->xive);
|
||||
} else if (spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
|
||||
new_intc = SPAPR_INTC(spapr->xive);
|
||||
} else {
|
||||
new_intc = SPAPR_INTC(spapr->ics);
|
||||
}
|
||||
|
||||
set_active_intc(spapr, new_intc);
|
||||
}
|
||||
|
||||
/*
|
||||
* XICS legacy routines - to deprecate one day
|
||||
*/
|
||||
@ -744,21 +572,14 @@ int spapr_irq_find(SpaprMachineState *spapr, int num, bool align, Error **errp)
|
||||
return first + ics->offset;
|
||||
}
|
||||
|
||||
#define SPAPR_IRQ_XICS_LEGACY_NR_XIRQS 0x400
|
||||
|
||||
SpaprIrq spapr_irq_xics_legacy = {
|
||||
.nr_xirqs = SPAPR_IRQ_XICS_LEGACY_NR_XIRQS,
|
||||
.nr_msis = SPAPR_IRQ_XICS_LEGACY_NR_XIRQS,
|
||||
.xics = true,
|
||||
.xive = false,
|
||||
|
||||
.claim = spapr_irq_claim_xics,
|
||||
.free = spapr_irq_free_xics,
|
||||
.print_info = spapr_irq_print_info_xics,
|
||||
.dt_populate = spapr_dt_xics,
|
||||
.cpu_intc_create = spapr_irq_cpu_intc_create_xics,
|
||||
.post_load = spapr_irq_post_load_xics,
|
||||
.reset = spapr_irq_reset_xics,
|
||||
.set_irq = spapr_irq_set_irq_xics,
|
||||
.init_kvm = spapr_irq_init_kvm_xics,
|
||||
};
|
||||
|
||||
static void spapr_irq_register_types(void)
|
||||
{
|
||||
type_register_static(&spapr_intc_info);
|
||||
}
|
||||
|
||||
type_init(spapr_irq_register_types)
|
||||
|
@ -2277,8 +2277,8 @@ static void spapr_phb_pci_enumerate(SpaprPhbState *phb)
|
||||
|
||||
}
|
||||
|
||||
int spapr_dt_phb(SpaprPhbState *phb, uint32_t intc_phandle, void *fdt,
|
||||
uint32_t nr_msis, int *node_offset)
|
||||
int spapr_dt_phb(SpaprMachineState *spapr, SpaprPhbState *phb,
|
||||
uint32_t intc_phandle, void *fdt, int *node_offset)
|
||||
{
|
||||
int bus_off, i, j, ret;
|
||||
uint32_t bus_range[] = { cpu_to_be32(0), cpu_to_be32(0xff) };
|
||||
@ -2343,7 +2343,8 @@ int spapr_dt_phb(SpaprPhbState *phb, uint32_t intc_phandle, void *fdt,
|
||||
_FDT(fdt_setprop(fdt, bus_off, "ranges", &ranges, sizeof_ranges));
|
||||
_FDT(fdt_setprop(fdt, bus_off, "reg", &bus_reg, sizeof(bus_reg)));
|
||||
_FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pci-config-space-type", 0x1));
|
||||
_FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pe-total-#msi", nr_msis));
|
||||
_FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pe-total-#msi",
|
||||
spapr_irq_nr_msis(spapr)));
|
||||
|
||||
/* Dynamic DMA window */
|
||||
if (phb->ddw_enabled) {
|
||||
|
@ -128,8 +128,8 @@ struct SpaprPhbState {
|
||||
#define SPAPR_PCI_NV2ATSD_WIN_SIZE (NVGPU_MAX_NUM * NVGPU_MAX_LINKS * \
|
||||
64 * KiB)
|
||||
|
||||
int spapr_dt_phb(SpaprPhbState *phb, uint32_t intc_phandle, void *fdt,
|
||||
uint32_t nr_msis, int *node_offset);
|
||||
int spapr_dt_phb(SpaprMachineState *spapr, SpaprPhbState *phb,
|
||||
uint32_t intc_phandle, void *fdt, int *node_offset);
|
||||
|
||||
void spapr_pci_rtas_init(void);
|
||||
|
||||
|
@ -111,6 +111,7 @@ typedef struct PnvChipClass {
|
||||
|
||||
uint32_t (*core_pir)(PnvChip *chip, uint32_t core_id);
|
||||
void (*intc_create)(PnvChip *chip, PowerPCCPU *cpu, Error **errp);
|
||||
void (*intc_reset)(PnvChip *chip, PowerPCCPU *cpu);
|
||||
ISABus *(*isa_create)(PnvChip *chip, Error **errp);
|
||||
void (*dt_populate)(PnvChip *chip, void *fdt);
|
||||
void (*pic_print_info)(PnvChip *chip, Monitor *mon);
|
||||
|
@ -31,6 +31,8 @@
|
||||
#define PNV_CORE_GET_CLASS(obj) \
|
||||
OBJECT_GET_CLASS(PnvCoreClass, (obj), TYPE_PNV_CORE)
|
||||
|
||||
typedef struct PnvChip PnvChip;
|
||||
|
||||
typedef struct PnvCore {
|
||||
/*< private >*/
|
||||
CPUCore parent_obj;
|
||||
@ -38,6 +40,7 @@ typedef struct PnvCore {
|
||||
/*< public >*/
|
||||
PowerPCCPU **threads;
|
||||
uint32_t pir;
|
||||
PnvChip *chip;
|
||||
|
||||
MemoryRegion xscom_regs;
|
||||
} PnvCore;
|
||||
|
@ -119,9 +119,11 @@ struct SpaprMachineClass {
|
||||
bool use_ohci_by_default; /* use USB-OHCI instead of XHCI */
|
||||
bool pre_2_10_has_unused_icps;
|
||||
bool legacy_irq_allocation;
|
||||
uint32_t nr_xirqs;
|
||||
bool broken_host_serial_model; /* present real host info to the guest */
|
||||
bool pre_4_1_migration; /* don't migrate hpt-max-page-size */
|
||||
bool linux_pci_probe;
|
||||
bool smp_threads_vsmt; /* set VSMT to smp_threads by default */
|
||||
|
||||
void (*phb_placement)(SpaprMachineState *spapr, uint32_t index,
|
||||
uint64_t *buid, hwaddr *pio,
|
||||
@ -143,7 +145,6 @@ struct SpaprMachineState {
|
||||
struct SpaprVioBus *vio_bus;
|
||||
QLIST_HEAD(, SpaprPhbState) phbs;
|
||||
struct SpaprNvram *nvram;
|
||||
ICSState *ics;
|
||||
SpaprRtcState rtc;
|
||||
|
||||
SpaprResizeHpt resize_hpt;
|
||||
@ -195,9 +196,11 @@ struct SpaprMachineState {
|
||||
|
||||
int32_t irq_map_nr;
|
||||
unsigned long *irq_map;
|
||||
SpaprXive *xive;
|
||||
SpaprIrq *irq;
|
||||
qemu_irq *qirqs;
|
||||
SpaprInterruptController *active_intc;
|
||||
ICSState *ics;
|
||||
SpaprXive *xive;
|
||||
|
||||
bool cmd_line_caps[SPAPR_CAP_NUM];
|
||||
SpaprCapabilities def, eff, mig;
|
||||
|
@ -27,32 +27,61 @@
|
||||
#define SPAPR_IRQ_MSI (SPAPR_XIRQ_BASE + 0x0300)
|
||||
|
||||
#define SPAPR_NR_XIRQS 0x1000
|
||||
#define SPAPR_NR_MSIS (SPAPR_XIRQ_BASE + SPAPR_NR_XIRQS - SPAPR_IRQ_MSI)
|
||||
|
||||
typedef struct SpaprMachineState SpaprMachineState;
|
||||
|
||||
void spapr_irq_msi_init(SpaprMachineState *spapr, uint32_t nr_msis);
|
||||
typedef struct SpaprInterruptController SpaprInterruptController;
|
||||
|
||||
#define TYPE_SPAPR_INTC "spapr-interrupt-controller"
|
||||
#define SPAPR_INTC(obj) \
|
||||
INTERFACE_CHECK(SpaprInterruptController, (obj), TYPE_SPAPR_INTC)
|
||||
#define SPAPR_INTC_CLASS(klass) \
|
||||
OBJECT_CLASS_CHECK(SpaprInterruptControllerClass, (klass), TYPE_SPAPR_INTC)
|
||||
#define SPAPR_INTC_GET_CLASS(obj) \
|
||||
OBJECT_GET_CLASS(SpaprInterruptControllerClass, (obj), TYPE_SPAPR_INTC)
|
||||
|
||||
typedef struct SpaprInterruptControllerClass {
|
||||
InterfaceClass parent;
|
||||
|
||||
int (*activate)(SpaprInterruptController *intc, Error **errp);
|
||||
void (*deactivate)(SpaprInterruptController *intc);
|
||||
|
||||
/*
|
||||
* These methods will typically be called on all intcs, active and
|
||||
* inactive
|
||||
*/
|
||||
int (*cpu_intc_create)(SpaprInterruptController *intc,
|
||||
PowerPCCPU *cpu, Error **errp);
|
||||
void (*cpu_intc_reset)(SpaprInterruptController *intc, PowerPCCPU *cpu);
|
||||
int (*claim_irq)(SpaprInterruptController *intc, int irq, bool lsi,
|
||||
Error **errp);
|
||||
void (*free_irq)(SpaprInterruptController *intc, int irq);
|
||||
|
||||
/* These methods should only be called on the active intc */
|
||||
void (*set_irq)(SpaprInterruptController *intc, int irq, int val);
|
||||
void (*print_info)(SpaprInterruptController *intc, Monitor *mon);
|
||||
void (*dt)(SpaprInterruptController *intc, uint32_t nr_servers,
|
||||
void *fdt, uint32_t phandle);
|
||||
int (*post_load)(SpaprInterruptController *intc, int version_id);
|
||||
} SpaprInterruptControllerClass;
|
||||
|
||||
void spapr_irq_update_active_intc(SpaprMachineState *spapr);
|
||||
|
||||
int spapr_irq_cpu_intc_create(SpaprMachineState *spapr,
|
||||
PowerPCCPU *cpu, Error **errp);
|
||||
void spapr_irq_cpu_intc_reset(SpaprMachineState *spapr, PowerPCCPU *cpu);
|
||||
void spapr_irq_print_info(SpaprMachineState *spapr, Monitor *mon);
|
||||
void spapr_irq_dt(SpaprMachineState *spapr, uint32_t nr_servers,
|
||||
void *fdt, uint32_t phandle);
|
||||
|
||||
uint32_t spapr_irq_nr_msis(SpaprMachineState *spapr);
|
||||
int spapr_irq_msi_alloc(SpaprMachineState *spapr, uint32_t num, bool align,
|
||||
Error **errp);
|
||||
void spapr_irq_msi_free(SpaprMachineState *spapr, int irq, uint32_t num);
|
||||
|
||||
typedef struct SpaprIrq {
|
||||
uint32_t nr_xirqs;
|
||||
uint32_t nr_msis;
|
||||
bool xics;
|
||||
bool xive;
|
||||
|
||||
int (*claim)(SpaprMachineState *spapr, int irq, bool lsi, Error **errp);
|
||||
void (*free)(SpaprMachineState *spapr, int irq);
|
||||
void (*print_info)(SpaprMachineState *spapr, Monitor *mon);
|
||||
void (*dt_populate)(SpaprMachineState *spapr, uint32_t nr_servers,
|
||||
void *fdt, uint32_t phandle);
|
||||
void (*cpu_intc_create)(SpaprMachineState *spapr, PowerPCCPU *cpu,
|
||||
Error **errp);
|
||||
int (*post_load)(SpaprMachineState *spapr, int version_id);
|
||||
void (*reset)(SpaprMachineState *spapr, Error **errp);
|
||||
void (*set_irq)(void *opaque, int srcno, int val);
|
||||
void (*init_kvm)(SpaprMachineState *spapr, Error **errp);
|
||||
} SpaprIrq;
|
||||
|
||||
extern SpaprIrq spapr_irq_xics;
|
||||
@ -67,6 +96,9 @@ qemu_irq spapr_qirq(SpaprMachineState *spapr, int irq);
|
||||
int spapr_irq_post_load(SpaprMachineState *spapr, int version_id);
|
||||
void spapr_irq_reset(SpaprMachineState *spapr, Error **errp);
|
||||
int spapr_irq_get_phandle(SpaprMachineState *spapr, void *fdt, Error **errp);
|
||||
int spapr_irq_init_kvm(int (*fn)(SpaprInterruptController *, Error **),
|
||||
SpaprInterruptController *intc,
|
||||
Error **errp);
|
||||
|
||||
/*
|
||||
* XICS legacy routines
|
||||
|
@ -54,15 +54,9 @@ typedef struct SpaprXive {
|
||||
*/
|
||||
#define SPAPR_XIVE_BLOCK_ID 0x0
|
||||
|
||||
int spapr_xive_irq_claim(SpaprXive *xive, int lisn, bool lsi, Error **errp);
|
||||
void spapr_xive_irq_free(SpaprXive *xive, int lisn);
|
||||
void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon);
|
||||
int spapr_xive_post_load(SpaprXive *xive, int version_id);
|
||||
|
||||
void spapr_xive_hcall_init(SpaprMachineState *spapr);
|
||||
void spapr_dt_xive(SpaprMachineState *spapr, uint32_t nr_servers, void *fdt,
|
||||
uint32_t phandle);
|
||||
void spapr_xive_set_tctx_os_cam(XiveTCTX *tctx);
|
||||
void spapr_xive_mmio_set_enabled(SpaprXive *xive, bool enable);
|
||||
void spapr_xive_map_mmio(SpaprXive *xive);
|
||||
|
||||
@ -72,8 +66,8 @@ int spapr_xive_end_to_target(uint8_t end_blk, uint32_t end_idx,
|
||||
/*
|
||||
* KVM XIVE device helpers
|
||||
*/
|
||||
void kvmppc_xive_connect(SpaprXive *xive, Error **errp);
|
||||
void kvmppc_xive_disconnect(SpaprXive *xive, Error **errp);
|
||||
int kvmppc_xive_connect(SpaprInterruptController *intc, Error **errp);
|
||||
void kvmppc_xive_disconnect(SpaprInterruptController *intc);
|
||||
void kvmppc_xive_reset(SpaprXive *xive, Error **errp);
|
||||
void kvmppc_xive_set_source_config(SpaprXive *xive, uint32_t lisn, XiveEAS *eas,
|
||||
Error **errp);
|
||||
|
@ -161,6 +161,7 @@ void icp_set_mfrr(ICPState *icp, uint8_t mfrr);
|
||||
uint32_t icp_accept(ICPState *ss);
|
||||
uint32_t icp_ipoll(ICPState *ss, uint32_t *mfrr);
|
||||
void icp_eoi(ICPState *icp, uint32_t xirr);
|
||||
void icp_reset(ICPState *icp);
|
||||
|
||||
void ics_write_xive(ICSState *ics, int nr, int server,
|
||||
uint8_t priority, uint8_t saved_priority);
|
||||
|
@ -32,10 +32,8 @@
|
||||
#define TYPE_ICS_SPAPR "ics-spapr"
|
||||
#define ICS_SPAPR(obj) OBJECT_CHECK(ICSState, (obj), TYPE_ICS_SPAPR)
|
||||
|
||||
void spapr_dt_xics(SpaprMachineState *spapr, uint32_t nr_servers, void *fdt,
|
||||
uint32_t phandle);
|
||||
int xics_kvm_connect(SpaprMachineState *spapr, Error **errp);
|
||||
void xics_kvm_disconnect(SpaprMachineState *spapr, Error **errp);
|
||||
int xics_kvm_connect(SpaprInterruptController *intc, Error **errp);
|
||||
void xics_kvm_disconnect(SpaprInterruptController *intc);
|
||||
bool xics_kvm_has_broken_disconnect(SpaprMachineState *spapr);
|
||||
|
||||
#endif /* XICS_SPAPR_H */
|
||||
|
@ -415,6 +415,7 @@ uint64_t xive_tctx_tm_read(XiveTCTX *tctx, hwaddr offset, unsigned size);
|
||||
|
||||
void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon);
|
||||
Object *xive_tctx_create(Object *cpu, XiveRouter *xrtr, Error **errp);
|
||||
void xive_tctx_reset(XiveTCTX *tctx);
|
||||
|
||||
static inline uint32_t xive_nvt_cam_line(uint8_t nvt_blk, uint32_t nvt_idx)
|
||||
{
|
||||
|
@ -22,9 +22,29 @@
|
||||
/*
|
||||
* Interrupt source number encoding on PowerBUS
|
||||
*/
|
||||
#define XIVE_SRCNO_BLOCK(srcno) (((srcno) >> 28) & 0xf)
|
||||
#define XIVE_SRCNO_INDEX(srcno) ((srcno) & 0x0fffffff)
|
||||
#define XIVE_SRCNO(blk, idx) ((uint32_t)(blk) << 28 | (idx))
|
||||
/*
|
||||
* Trigger data definition
|
||||
*
|
||||
* The trigger definition is used for triggers both for HW source
|
||||
* interrupts (PHB, PSI), as well as for rerouting interrupts between
|
||||
* Interrupt Controller.
|
||||
*
|
||||
* HW source controllers set bit0 of word0 to ‘0’ as they provide EAS
|
||||
* information (EAS block + EAS index) in the 8 byte data and not END
|
||||
* information, which is use for rerouting interrupts.
|
||||
*
|
||||
* bit1 of word0 to ‘1’ signals that the state bit check has been
|
||||
* performed.
|
||||
*/
|
||||
#define XIVE_TRIGGER_END PPC_BIT(0)
|
||||
#define XIVE_TRIGGER_PQ PPC_BIT(1)
|
||||
|
||||
/*
|
||||
* QEMU macros to manipulate the trigger payload in native endian
|
||||
*/
|
||||
#define XIVE_EAS_BLOCK(n) (((n) >> 28) & 0xf)
|
||||
#define XIVE_EAS_INDEX(n) ((n) & 0x0fffffff)
|
||||
#define XIVE_EAS(blk, idx) ((uint32_t)(blk) << 28 | (idx))
|
||||
|
||||
#define TM_SHIFT 16
|
||||
|
||||
|
@ -17,7 +17,7 @@
|
||||
- SLOF (Slimline Open Firmware) is a free IEEE 1275 Open Firmware
|
||||
implementation for certain IBM POWER hardware. The sources are at
|
||||
https://github.com/aik/SLOF, and the image currently in qemu is
|
||||
built from git tag qemu-slof-20190911.
|
||||
built from git tag qemu-slof-20191022.
|
||||
|
||||
- sgabios (the Serial Graphics Adapter option ROM) provides a means for
|
||||
legacy x86 software to communicate with an attached serial console as
|
||||
|
BIN
pc-bios/slof.bin
BIN
pc-bios/slof.bin
Binary file not shown.
@ -1 +1 @@
|
||||
Subproject commit bcc3c4e5c21a015f4680894c4ec978a90d4a2d69
|
||||
Subproject commit 899d98836513bb3d6a4f4e48ef7cee887ee5f57b
|
@ -590,40 +590,38 @@ static void trans_vsl(DisasContext *ctx)
|
||||
int VT = rD(ctx->opcode);
|
||||
int VA = rA(ctx->opcode);
|
||||
int VB = rB(ctx->opcode);
|
||||
TCGv_i64 avrA = tcg_temp_new_i64();
|
||||
TCGv_i64 avrB = tcg_temp_new_i64();
|
||||
TCGv_i64 avr = tcg_temp_new_i64();
|
||||
TCGv_i64 sh = tcg_temp_new_i64();
|
||||
TCGv_i64 shifted = tcg_temp_new_i64();
|
||||
TCGv_i64 carry = tcg_temp_new_i64();
|
||||
TCGv_i64 tmp = tcg_temp_new_i64();
|
||||
|
||||
/* Place bits 125-127 of vB in sh. */
|
||||
get_avr64(avrB, VB, false);
|
||||
tcg_gen_andi_i64(sh, avrB, 0x07ULL);
|
||||
/* Place bits 125-127 of vB in 'sh'. */
|
||||
get_avr64(avr, VB, false);
|
||||
tcg_gen_andi_i64(sh, avr, 0x07ULL);
|
||||
|
||||
/*
|
||||
* Save highest sh bits of lower doubleword element of vA in variable
|
||||
* shifted and perform shift on lower doubleword.
|
||||
* Save highest 'sh' bits of lower doubleword element of vA in variable
|
||||
* 'carry' and perform shift on lower doubleword.
|
||||
*/
|
||||
get_avr64(avrA, VA, false);
|
||||
tcg_gen_subfi_i64(tmp, 64, sh);
|
||||
tcg_gen_shr_i64(shifted, avrA, tmp);
|
||||
tcg_gen_andi_i64(shifted, shifted, 0x7fULL);
|
||||
tcg_gen_shl_i64(avrA, avrA, sh);
|
||||
set_avr64(VT, avrA, false);
|
||||
get_avr64(avr, VA, false);
|
||||
tcg_gen_subfi_i64(tmp, 32, sh);
|
||||
tcg_gen_shri_i64(carry, avr, 32);
|
||||
tcg_gen_shr_i64(carry, carry, tmp);
|
||||
tcg_gen_shl_i64(avr, avr, sh);
|
||||
set_avr64(VT, avr, false);
|
||||
|
||||
/*
|
||||
* Perform shift on higher doubleword element of vA and replace lowest
|
||||
* sh bits with shifted.
|
||||
* 'sh' bits with 'carry'.
|
||||
*/
|
||||
get_avr64(avrA, VA, true);
|
||||
tcg_gen_shl_i64(avrA, avrA, sh);
|
||||
tcg_gen_or_i64(avrA, avrA, shifted);
|
||||
set_avr64(VT, avrA, true);
|
||||
get_avr64(avr, VA, true);
|
||||
tcg_gen_shl_i64(avr, avr, sh);
|
||||
tcg_gen_or_i64(avr, avr, carry);
|
||||
set_avr64(VT, avr, true);
|
||||
|
||||
tcg_temp_free_i64(avrA);
|
||||
tcg_temp_free_i64(avrB);
|
||||
tcg_temp_free_i64(avr);
|
||||
tcg_temp_free_i64(sh);
|
||||
tcg_temp_free_i64(shifted);
|
||||
tcg_temp_free_i64(carry);
|
||||
tcg_temp_free_i64(tmp);
|
||||
}
|
||||
|
||||
@ -639,39 +637,37 @@ static void trans_vsr(DisasContext *ctx)
|
||||
int VT = rD(ctx->opcode);
|
||||
int VA = rA(ctx->opcode);
|
||||
int VB = rB(ctx->opcode);
|
||||
TCGv_i64 avrA = tcg_temp_new_i64();
|
||||
TCGv_i64 avrB = tcg_temp_new_i64();
|
||||
TCGv_i64 avr = tcg_temp_new_i64();
|
||||
TCGv_i64 sh = tcg_temp_new_i64();
|
||||
TCGv_i64 shifted = tcg_temp_new_i64();
|
||||
TCGv_i64 carry = tcg_temp_new_i64();
|
||||
TCGv_i64 tmp = tcg_temp_new_i64();
|
||||
|
||||
/* Place bits 125-127 of vB in sh. */
|
||||
get_avr64(avrB, VB, false);
|
||||
tcg_gen_andi_i64(sh, avrB, 0x07ULL);
|
||||
/* Place bits 125-127 of vB in 'sh'. */
|
||||
get_avr64(avr, VB, false);
|
||||
tcg_gen_andi_i64(sh, avr, 0x07ULL);
|
||||
|
||||
/*
|
||||
* Save lowest sh bits of higher doubleword element of vA in variable
|
||||
* shifted and perform shift on higher doubleword.
|
||||
* Save lowest 'sh' bits of higher doubleword element of vA in variable
|
||||
* 'carry' and perform shift on higher doubleword.
|
||||
*/
|
||||
get_avr64(avrA, VA, true);
|
||||
tcg_gen_subfi_i64(tmp, 64, sh);
|
||||
tcg_gen_shl_i64(shifted, avrA, tmp);
|
||||
tcg_gen_andi_i64(shifted, shifted, 0xfe00000000000000ULL);
|
||||
tcg_gen_shr_i64(avrA, avrA, sh);
|
||||
set_avr64(VT, avrA, true);
|
||||
get_avr64(avr, VA, true);
|
||||
tcg_gen_subfi_i64(tmp, 32, sh);
|
||||
tcg_gen_shli_i64(carry, avr, 32);
|
||||
tcg_gen_shl_i64(carry, carry, tmp);
|
||||
tcg_gen_shr_i64(avr, avr, sh);
|
||||
set_avr64(VT, avr, true);
|
||||
/*
|
||||
* Perform shift on lower doubleword element of vA and replace highest
|
||||
* sh bits with shifted.
|
||||
* 'sh' bits with 'carry'.
|
||||
*/
|
||||
get_avr64(avrA, VA, false);
|
||||
tcg_gen_shr_i64(avrA, avrA, sh);
|
||||
tcg_gen_or_i64(avrA, avrA, shifted);
|
||||
set_avr64(VT, avrA, false);
|
||||
get_avr64(avr, VA, false);
|
||||
tcg_gen_shr_i64(avr, avr, sh);
|
||||
tcg_gen_or_i64(avr, avr, carry);
|
||||
set_avr64(VT, avr, false);
|
||||
|
||||
tcg_temp_free_i64(avrA);
|
||||
tcg_temp_free_i64(avrB);
|
||||
tcg_temp_free_i64(avr);
|
||||
tcg_temp_free_i64(sh);
|
||||
tcg_temp_free_i64(shifted);
|
||||
tcg_temp_free_i64(carry);
|
||||
tcg_temp_free_i64(tmp);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user