diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 821f0d4a49..12ed4b065c 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -1726,6 +1726,18 @@ static void spapr_machine_reset(MachineState *machine) spapr_setup_hpt_and_vrma(spapr); } + /* + * NVLink2-connected GPU RAM needs to be placed on a separate NUMA node. + * We assign a new numa ID per GPU in spapr_pci_collect_nvgpu() which is + * called from vPHB reset handler so we initialize the counter here. + * If no NUMA is configured from the QEMU side, we start from 1 as GPU RAM + * must be equally distant from any other node. + * The final value of spapr->gpu_numa_id is going to be written to + * max-associativity-domains in spapr_build_fdt(). + */ + spapr->gpu_numa_id = MAX(1, nb_numa_nodes); + qemu_devices_reset(); + /* * If this reset wasn't generated by CAS, we should reset our * negotiated options and start from scratch @@ -1741,18 +1753,6 @@ static void spapr_machine_reset(MachineState *machine) spapr_irq_msi_reset(spapr); } - /* - * NVLink2-connected GPU RAM needs to be placed on a separate NUMA node. - * We assign a new numa ID per GPU in spapr_pci_collect_nvgpu() which is - * called from vPHB reset handler so we initialize the counter here. - * If no NUMA is configured from the QEMU side, we start from 1 as GPU RAM - * must be equally distant from any other node. - * The final value of spapr->gpu_numa_id is going to be written to - * max-associativity-domains in spapr_build_fdt(). - */ - spapr->gpu_numa_id = MAX(1, nb_numa_nodes); - qemu_devices_reset(); - /* * This is fixing some of the default configuration of the XIVE * devices. To be called after the reset of the machine devices.