ppc/xics: introduce an 'intc' backlink under PowerPCCPU
Today, the ICPState array of the sPAPR machine is indexed with 'cpu_index' of the CPUState. This numbering of CPUs is internal to QEMU and the guest only knows about what is exposed in the device tree, that is the 'cpu_dt_id'. This is why sPAPR uses the helper xics_get_cpu_index_by_dt_id() to do the mapping in a couple of places. To provide a more generic XICS layer, we need to abstract the IRQ 'server' number and remove any assumption made on its nature. It should not be used as a 'cpu_index' for lookups like xics_cpu_setup() and xics_cpu_destroy() do. To reach that goal, we choose to introduce a generic 'intc' backlink under PowerPCCPU, and let the machine core init routine do the ICPState lookup. The resulting object is passed on to xics_cpu_setup() which does the store under PowerPCCPU. The IRQ 'server' number in XICS is now generic. sPAPR uses 'cpu_dt_id' and PowerNV will use 'PIR' number. This also has the benefit of simplifying the sPAPR hcall routines which do not need to do any ICPState lookups anymore. Signed-off-by: Cédric Le Goater <clg@kaod.org> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
This commit is contained in:
parent
ccd531b9c9
commit
ad5d1add86
@ -52,7 +52,7 @@ int xics_get_cpu_index_by_dt_id(int cpu_dt_id)
|
||||
void xics_cpu_destroy(XICSFabric *xi, PowerPCCPU *cpu)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
ICPState *icp = xics_icp_get(xi, cs->cpu_index);
|
||||
ICPState *icp = ICP(cpu->intc);
|
||||
|
||||
assert(icp);
|
||||
assert(cs == icp->cs);
|
||||
@ -61,15 +61,15 @@ void xics_cpu_destroy(XICSFabric *xi, PowerPCCPU *cpu)
|
||||
icp->cs = NULL;
|
||||
}
|
||||
|
||||
void xics_cpu_setup(XICSFabric *xi, PowerPCCPU *cpu)
|
||||
void xics_cpu_setup(XICSFabric *xi, PowerPCCPU *cpu, ICPState *icp)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
CPUPPCState *env = &cpu->env;
|
||||
ICPState *icp = xics_icp_get(xi, cs->cpu_index);
|
||||
ICPStateClass *icpc;
|
||||
|
||||
assert(icp);
|
||||
|
||||
cpu->intc = OBJECT(icp);
|
||||
icp->cs = cs;
|
||||
|
||||
icpc = ICP_GET_CLASS(icp);
|
||||
|
@ -43,11 +43,9 @@
|
||||
static target_ulong h_cppr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
ICPState *icp = xics_icp_get(XICS_FABRIC(spapr), cs->cpu_index);
|
||||
target_ulong cppr = args[0];
|
||||
|
||||
icp_set_cppr(icp, cppr);
|
||||
icp_set_cppr(ICP(cpu->intc), cppr);
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
@ -69,9 +67,7 @@ static target_ulong h_ipi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
static target_ulong h_xirr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
ICPState *icp = xics_icp_get(XICS_FABRIC(spapr), cs->cpu_index);
|
||||
uint32_t xirr = icp_accept(icp);
|
||||
uint32_t xirr = icp_accept(ICP(cpu->intc));
|
||||
|
||||
args[0] = xirr;
|
||||
return H_SUCCESS;
|
||||
@ -80,9 +76,7 @@ static target_ulong h_xirr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
static target_ulong h_xirr_x(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
ICPState *icp = xics_icp_get(XICS_FABRIC(spapr), cs->cpu_index);
|
||||
uint32_t xirr = icp_accept(icp);
|
||||
uint32_t xirr = icp_accept(ICP(cpu->intc));
|
||||
|
||||
args[0] = xirr;
|
||||
args[1] = cpu_get_host_ticks();
|
||||
@ -92,21 +86,17 @@ static target_ulong h_xirr_x(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
static target_ulong h_eoi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
ICPState *icp = xics_icp_get(XICS_FABRIC(spapr), cs->cpu_index);
|
||||
target_ulong xirr = args[0];
|
||||
|
||||
icp_eoi(icp, xirr);
|
||||
icp_eoi(ICP(cpu->intc), xirr);
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
static target_ulong h_ipoll(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
ICPState *icp = xics_icp_get(XICS_FABRIC(spapr), cs->cpu_index);
|
||||
uint32_t mfrr;
|
||||
uint32_t xirr = icp_ipoll(icp, &mfrr);
|
||||
uint32_t xirr = icp_ipoll(ICP(cpu->intc), &mfrr);
|
||||
|
||||
args[0] = xirr;
|
||||
args[1] = mfrr;
|
||||
|
@ -63,6 +63,8 @@ static void spapr_cpu_init(sPAPRMachineState *spapr, PowerPCCPU *cpu,
|
||||
Error **errp)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
XICSFabric *xi = XICS_FABRIC(spapr);
|
||||
ICPState *icp = xics_icp_get(xi, CPU(cpu)->cpu_index);
|
||||
|
||||
/* Set time-base frequency to 512 MHz */
|
||||
cpu_ppc_tb_init(env, SPAPR_TIMEBASE_FREQ);
|
||||
@ -80,7 +82,7 @@ static void spapr_cpu_init(sPAPRMachineState *spapr, PowerPCCPU *cpu,
|
||||
}
|
||||
}
|
||||
|
||||
xics_cpu_setup(XICS_FABRIC(spapr), cpu);
|
||||
xics_cpu_setup(xi, cpu, icp);
|
||||
|
||||
qemu_register_reset(spapr_cpu_reset, cpu);
|
||||
spapr_cpu_reset(cpu);
|
||||
|
@ -168,7 +168,7 @@ void spapr_dt_xics(int nr_servers, void *fdt, uint32_t phandle);
|
||||
|
||||
qemu_irq xics_get_qirq(XICSFabric *xi, int irq);
|
||||
ICPState *xics_icp_get(XICSFabric *xi, int server);
|
||||
void xics_cpu_setup(XICSFabric *xi, PowerPCCPU *cpu);
|
||||
void xics_cpu_setup(XICSFabric *xi, PowerPCCPU *cpu, ICPState *icp);
|
||||
void xics_cpu_destroy(XICSFabric *xi, PowerPCCPU *cpu);
|
||||
|
||||
/* Internal XICS interfaces */
|
||||
|
@ -1200,6 +1200,7 @@ struct PowerPCCPU {
|
||||
uint32_t max_compat;
|
||||
uint32_t compat_pvr;
|
||||
PPCVirtualHypervisor *vhyp;
|
||||
Object *intc;
|
||||
|
||||
/* Fields related to migration compatibility hacks */
|
||||
bool pre_2_8_migration;
|
||||
|
Loading…
Reference in New Issue
Block a user