pc,pci,virtio: fixes, cleanups
Fixes, cleanups all over the place. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmEz5lIPHG1zdEByZWRo YXQuY29tAAoJECgfDbjSjVRp0WMIAL/keMtzBfVNaDAER/gaaklluJ7XCFYJgKfX Pg173ZAWd+KNryiKDEn9SVIDzjKmllvQu0P18St92hHCCRhzIqlGwZ6IpB1XgVLh OmcRlccepb+84FNiCD6RIxE+iLQ3eTrFUrpF6CIHZlD+TUTm7u1gtZFidV3v5EZp BaT+BFxFZgejfKnxEYjnxOgAYmyXI18fFtW/GX6VgVTiy10XRoNqNZ6DwO2sZBNu 2NI2RMVMNopwQyn/1vj3KtBvhaqPdhrPV0qGzi9isU7t5Z2JE/n14O9WHcA4K2ed 3HktAGlv+glSbNldfc+9jIIHlkwHAi/T4PAGYE3HbCP0atONl/M= =DN6u -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging pc,pci,virtio: fixes, cleanups Fixes, cleanups all over the place. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> # gpg: Signature made Sat 04 Sep 2021 22:34:10 BST # gpg: using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469 # gpg: issuer "mst@redhat.com" # gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [full] # gpg: aka "Michael S. Tsirkin <mst@redhat.com>" [full] # Primary key fingerprint: 0270 606B 6F3C DF3D 0B17 0970 C350 3912 AFBE 8E67 # Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA 8A0D 281F 0DB8 D28D 5469 * remotes/mst/tags/for_upstream: (35 commits) vhost-vdpa: remove the unncessary queue_index assignment vhost-vdpa: fix the wrong assertion in vhost_vdpa_init() vhost-vdpa: tweak the error label in vhost_vdpa_add() vhost-vdpa: fix leaking of vhost_net in vhost_vdpa_add() vhost-vdpa: don't cleanup twice in vhost_vdpa_add() vhost-vdpa: remove the unnecessary check in vhost_vdpa_add() vhost_net: do not assume nvqs is always 2 vhost: use unsigned int for nvqs vhost_net: remove the meaningless assignment in vhost_net_start_one() vhost-vdpa: correctly return err in vhost_vdpa_set_backend_cap() vhost-vdpa: remove unused variable "acked_features" tests/vhost-user-bridge.c: Fix typo in help message tests/vhost-user-bridge.c: Sanity check socket path length hw/virtio: Add flatview update in vhost_user_cleanup() hw/virtio: Remove NULL check in virtio_free_region_cache() hw/virtio: Document virtio_queue_packed_empty_rcu is called within RCU MAINTAINERS: Added myself as a reviewer for acpi/smbios subsystem hw/acpi: use existing references to pci device struct within functions hw/pci: remove all references to find_i440fx function hw/i386/acpi-build: Get NUMA information from struct NumaState ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
88afdc92b6
@ -1751,6 +1751,7 @@ F: docs/specs/*pci*
|
||||
ACPI/SMBIOS
|
||||
M: Michael S. Tsirkin <mst@redhat.com>
|
||||
M: Igor Mammedov <imammedo@redhat.com>
|
||||
R: Ani Sinha <ani@anisinha.ca>
|
||||
S: Supported
|
||||
F: include/hw/acpi/*
|
||||
F: include/hw/firmware/smbios.h
|
||||
|
@ -18,10 +18,7 @@ CONFIG_PCSPK=y
|
||||
CONFIG_PCKBD=y
|
||||
CONFIG_FDC=y
|
||||
CONFIG_ACPI=y
|
||||
CONFIG_ACPI_X86=y
|
||||
CONFIG_ACPI_MEMORY_HOTPLUG=y
|
||||
CONFIG_ACPI_NVDIMM=y
|
||||
CONFIG_ACPI_CPU_HOTPLUG=y
|
||||
CONFIG_ACPI_PIIX4=y
|
||||
CONFIG_APM=y
|
||||
CONFIG_I8257=y
|
||||
CONFIG_PIIX4=y
|
||||
|
@ -8,6 +8,8 @@ config ACPI_X86
|
||||
select ACPI_CPU_HOTPLUG
|
||||
select ACPI_MEMORY_HOTPLUG
|
||||
select ACPI_HMAT
|
||||
select ACPI_PIIX4
|
||||
select ACPI_PCIHP
|
||||
|
||||
config ACPI_X86_ICH
|
||||
bool
|
||||
@ -24,6 +26,14 @@ config ACPI_NVDIMM
|
||||
bool
|
||||
depends on ACPI
|
||||
|
||||
config ACPI_PIIX4
|
||||
bool
|
||||
depends on ACPI
|
||||
|
||||
config ACPI_PCIHP
|
||||
bool
|
||||
depends on ACPI
|
||||
|
||||
config ACPI_HMAT
|
||||
bool
|
||||
depends on ACPI
|
||||
|
50
hw/acpi/acpi-cpu-hotplug-stub.c
Normal file
50
hw/acpi/acpi-cpu-hotplug-stub.c
Normal file
@ -0,0 +1,50 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "hw/acpi/cpu_hotplug.h"
|
||||
#include "migration/vmstate.h"
|
||||
|
||||
|
||||
/* Following stubs are all related to ACPI cpu hotplug */
|
||||
const VMStateDescription vmstate_cpu_hotplug;
|
||||
|
||||
void acpi_switch_to_modern_cphp(AcpiCpuHotplug *gpe_cpu,
|
||||
CPUHotplugState *cpuhp_state,
|
||||
uint16_t io_port)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void legacy_acpi_cpu_hotplug_init(MemoryRegion *parent, Object *owner,
|
||||
AcpiCpuHotplug *gpe_cpu, uint16_t base)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void acpi_cpu_ospm_status(CPUHotplugState *cpu_st, ACPIOSTInfoList ***list)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void acpi_cpu_plug_cb(HotplugHandler *hotplug_dev,
|
||||
CPUHotplugState *cpu_st, DeviceState *dev, Error **errp)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void legacy_acpi_cpu_plug_cb(HotplugHandler *hotplug_dev,
|
||||
AcpiCpuHotplug *g, DeviceState *dev, Error **errp)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void acpi_cpu_unplug_cb(CPUHotplugState *cpu_st,
|
||||
DeviceState *dev, Error **errp)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void acpi_cpu_unplug_request_cb(HotplugHandler *hotplug_dev,
|
||||
CPUHotplugState *cpu_st,
|
||||
DeviceState *dev, Error **errp)
|
||||
{
|
||||
return;
|
||||
}
|
35
hw/acpi/acpi-mem-hotplug-stub.c
Normal file
35
hw/acpi/acpi-mem-hotplug-stub.c
Normal file
@ -0,0 +1,35 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "hw/acpi/memory_hotplug.h"
|
||||
#include "migration/vmstate.h"
|
||||
|
||||
const VMStateDescription vmstate_memory_hotplug;
|
||||
|
||||
void acpi_memory_hotplug_init(MemoryRegion *as, Object *owner,
|
||||
MemHotplugState *state, hwaddr io_base)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void acpi_memory_ospm_status(MemHotplugState *mem_st, ACPIOSTInfoList ***list)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void acpi_memory_plug_cb(HotplugHandler *hotplug_dev, MemHotplugState *mem_st,
|
||||
DeviceState *dev, Error **errp)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void acpi_memory_unplug_cb(MemHotplugState *mem_st,
|
||||
DeviceState *dev, Error **errp)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void acpi_memory_unplug_request_cb(HotplugHandler *hotplug_dev,
|
||||
MemHotplugState *mem_st,
|
||||
DeviceState *dev, Error **errp)
|
||||
{
|
||||
return;
|
||||
}
|
8
hw/acpi/acpi-nvdimm-stub.c
Normal file
8
hw/acpi/acpi-nvdimm-stub.c
Normal file
@ -0,0 +1,8 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "hw/mem/nvdimm.h"
|
||||
#include "hw/hotplug.h"
|
||||
|
||||
void nvdimm_acpi_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev)
|
||||
{
|
||||
return;
|
||||
}
|
47
hw/acpi/acpi-pci-hotplug-stub.c
Normal file
47
hw/acpi/acpi-pci-hotplug-stub.c
Normal file
@ -0,0 +1,47 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "hw/acpi/pcihp.h"
|
||||
#include "migration/vmstate.h"
|
||||
|
||||
const VMStateDescription vmstate_acpi_pcihp_pci_status;
|
||||
|
||||
void acpi_pcihp_init(Object *owner, AcpiPciHpState *s, PCIBus *root_bus,
|
||||
MemoryRegion *address_space_io, bool bridges_enabled,
|
||||
uint16_t io_base)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void acpi_pcihp_device_plug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s,
|
||||
DeviceState *dev, Error **errp)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void acpi_pcihp_device_pre_plug_cb(HotplugHandler *hotplug_dev,
|
||||
DeviceState *dev, Error **errp)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void acpi_pcihp_device_unplug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s,
|
||||
DeviceState *dev, Error **errp)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void acpi_pcihp_device_unplug_request_cb(HotplugHandler *hotplug_dev,
|
||||
AcpiPciHpState *s, DeviceState *dev,
|
||||
Error **errp)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void acpi_pcihp_reset(AcpiPciHpState *s, bool acpihp_root_off)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
bool vmstate_acpi_pcihp_use_acpi_index(void *opaque, int version_id)
|
||||
{
|
||||
return false;
|
||||
}
|
@ -451,7 +451,7 @@ void ich9_pm_add_properties(Object *obj, ICH9LPCPMRegs *pm)
|
||||
object_property_add_bool(obj, ACPI_PM_PROP_TCO_ENABLED,
|
||||
ich9_pm_get_enable_tco,
|
||||
ich9_pm_set_enable_tco);
|
||||
object_property_add_bool(obj, "acpi-pci-hotplug-with-bridge-support",
|
||||
object_property_add_bool(obj, ACPI_PM_PROP_ACPI_PCIHP_BRIDGE,
|
||||
ich9_pm_get_acpi_pci_hotplug,
|
||||
ich9_pm_set_acpi_pci_hotplug);
|
||||
}
|
||||
|
@ -6,16 +6,20 @@ acpi_ss.add(files(
|
||||
'core.c',
|
||||
'utils.c',
|
||||
))
|
||||
acpi_ss.add(when: 'CONFIG_ACPI_CPU_HOTPLUG', if_true: files('cpu.c'))
|
||||
acpi_ss.add(when: 'CONFIG_ACPI_CPU_HOTPLUG', if_true: files('cpu_hotplug.c'))
|
||||
acpi_ss.add(when: 'CONFIG_ACPI_CPU_HOTPLUG', if_true: files('cpu.c', 'cpu_hotplug.c'))
|
||||
acpi_ss.add(when: 'CONFIG_ACPI_CPU_HOTPLUG', if_false: files('acpi-cpu-hotplug-stub.c'))
|
||||
acpi_ss.add(when: 'CONFIG_ACPI_MEMORY_HOTPLUG', if_true: files('memory_hotplug.c'))
|
||||
acpi_ss.add(when: 'CONFIG_ACPI_MEMORY_HOTPLUG', if_false: files('acpi-mem-hotplug-stub.c'))
|
||||
acpi_ss.add(when: 'CONFIG_ACPI_NVDIMM', if_true: files('nvdimm.c'))
|
||||
acpi_ss.add(when: 'CONFIG_ACPI_NVDIMM', if_false: files('acpi-nvdimm-stub.c'))
|
||||
acpi_ss.add(when: 'CONFIG_ACPI_PCI', if_true: files('pci.c'))
|
||||
acpi_ss.add(when: 'CONFIG_ACPI_VMGENID', if_true: files('vmgenid.c'))
|
||||
acpi_ss.add(when: 'CONFIG_ACPI_HW_REDUCED', if_true: files('generic_event_device.c'))
|
||||
acpi_ss.add(when: 'CONFIG_ACPI_HMAT', if_true: files('hmat.c'))
|
||||
acpi_ss.add(when: 'CONFIG_ACPI_APEI', if_true: files('ghes.c'), if_false: files('ghes-stub.c'))
|
||||
acpi_ss.add(when: 'CONFIG_ACPI_X86', if_true: files('piix4.c', 'pcihp.c'))
|
||||
acpi_ss.add(when: 'CONFIG_ACPI_PIIX4', if_true: files('piix4.c'))
|
||||
acpi_ss.add(when: 'CONFIG_ACPI_PCIHP', if_true: files('pcihp.c'))
|
||||
acpi_ss.add(when: 'CONFIG_ACPI_PCIHP', if_false: files('acpi-pci-hotplug-stub.c'))
|
||||
acpi_ss.add(when: 'CONFIG_ACPI_X86_ICH', if_true: files('ich9.c', 'tco.c'))
|
||||
acpi_ss.add(when: 'CONFIG_IPMI', if_true: files('ipmi.c'), if_false: files('ipmi-stub.c'))
|
||||
acpi_ss.add(when: 'CONFIG_PC', if_false: files('acpi-x86-stub.c'))
|
||||
@ -23,4 +27,6 @@ acpi_ss.add(when: 'CONFIG_TPM', if_true: files('tpm.c'))
|
||||
softmmu_ss.add(when: 'CONFIG_ACPI', if_false: files('acpi-stub.c', 'aml-build-stub.c', 'ghes-stub.c'))
|
||||
softmmu_ss.add_all(when: 'CONFIG_ACPI', if_true: acpi_ss)
|
||||
softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('acpi-stub.c', 'aml-build-stub.c',
|
||||
'acpi-x86-stub.c', 'ipmi-stub.c', 'ghes-stub.c'))
|
||||
'acpi-x86-stub.c', 'ipmi-stub.c', 'ghes-stub.c',
|
||||
'acpi-mem-hotplug-stub.c', 'acpi-cpu-hotplug-stub.c',
|
||||
'acpi-pci-hotplug-stub.c', 'acpi-nvdimm-stub.c'))
|
||||
|
@ -283,7 +283,7 @@ void acpi_pcihp_device_pre_plug_cb(HotplugHandler *hotplug_dev,
|
||||
|
||||
/* Only hotplugged devices need the hotplug capability. */
|
||||
if (dev->hotplugged &&
|
||||
acpi_pcihp_get_bsel(pci_get_bus(PCI_DEVICE(dev))) < 0) {
|
||||
acpi_pcihp_get_bsel(pci_get_bus(pdev)) < 0) {
|
||||
error_setg(errp, "Unsupported bus. Bus doesn't have property '"
|
||||
ACPI_PCIHP_PROP_BSEL "' set");
|
||||
return;
|
||||
@ -363,8 +363,8 @@ void acpi_pcihp_device_unplug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s,
|
||||
{
|
||||
PCIDevice *pdev = PCI_DEVICE(dev);
|
||||
|
||||
trace_acpi_pci_unplug(PCI_SLOT(PCI_DEVICE(dev)->devfn),
|
||||
acpi_pcihp_get_bsel(pci_get_bus(PCI_DEVICE(dev))));
|
||||
trace_acpi_pci_unplug(PCI_SLOT(pdev->devfn),
|
||||
acpi_pcihp_get_bsel(pci_get_bus(pdev)));
|
||||
|
||||
/*
|
||||
* clean up acpi-index so it could reused by another device
|
||||
|
@ -647,9 +647,9 @@ static Property piix4_pm_properties[] = {
|
||||
DEFINE_PROP_UINT8(ACPI_PM_PROP_S3_DISABLED, PIIX4PMState, disable_s3, 0),
|
||||
DEFINE_PROP_UINT8(ACPI_PM_PROP_S4_DISABLED, PIIX4PMState, disable_s4, 0),
|
||||
DEFINE_PROP_UINT8(ACPI_PM_PROP_S4_VAL, PIIX4PMState, s4_val, 2),
|
||||
DEFINE_PROP_BOOL("acpi-pci-hotplug-with-bridge-support", PIIX4PMState,
|
||||
DEFINE_PROP_BOOL(ACPI_PM_PROP_ACPI_PCIHP_BRIDGE, PIIX4PMState,
|
||||
use_acpi_hotplug_bridge, true),
|
||||
DEFINE_PROP_BOOL("acpi-root-pci-hotplug", PIIX4PMState,
|
||||
DEFINE_PROP_BOOL(ACPI_PM_PROP_ACPI_PCI_ROOTHP, PIIX4PMState,
|
||||
use_acpi_root_pci_hotplug, true),
|
||||
DEFINE_PROP_BOOL("memory-hotplug-support", PIIX4PMState,
|
||||
acpi_memory_hotplug.is_enabled, true),
|
||||
|
@ -267,10 +267,10 @@ static void acpi_get_pm_info(MachineState *machine, AcpiPmInfo *pm)
|
||||
qobject_unref(o);
|
||||
|
||||
pm->pcihp_bridge_en =
|
||||
object_property_get_bool(obj, "acpi-pci-hotplug-with-bridge-support",
|
||||
object_property_get_bool(obj, ACPI_PM_PROP_ACPI_PCIHP_BRIDGE,
|
||||
NULL);
|
||||
pm->pcihp_root_en =
|
||||
object_property_get_bool(obj, "acpi-root-pci-hotplug",
|
||||
object_property_get_bool(obj, ACPI_PM_PROP_ACPI_PCI_ROOTHP,
|
||||
NULL);
|
||||
}
|
||||
|
||||
@ -303,13 +303,9 @@ Object *acpi_get_i386_pci_host(void)
|
||||
{
|
||||
PCIHostState *host;
|
||||
|
||||
host = OBJECT_CHECK(PCIHostState,
|
||||
object_resolve_path("/machine/i440fx", NULL),
|
||||
TYPE_PCI_HOST_BRIDGE);
|
||||
host = PCI_HOST_BRIDGE(object_resolve_path("/machine/i440fx", NULL));
|
||||
if (!host) {
|
||||
host = OBJECT_CHECK(PCIHostState,
|
||||
object_resolve_path("/machine/q35", NULL),
|
||||
TYPE_PCI_HOST_BRIDGE);
|
||||
host = PCI_HOST_BRIDGE(object_resolve_path("/machine/q35", NULL));
|
||||
}
|
||||
|
||||
return OBJECT(host);
|
||||
@ -1918,6 +1914,8 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
|
||||
X86MachineState *x86ms = X86_MACHINE(machine);
|
||||
const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(machine);
|
||||
PCMachineState *pcms = PC_MACHINE(machine);
|
||||
int nb_numa_nodes = machine->numa_state->num_nodes;
|
||||
NodeInfo *numa_info = machine->numa_state->nodes;
|
||||
ram_addr_t hotplugabble_address_space_size =
|
||||
object_property_get_int(OBJECT(pcms), PC_MACHINE_DEVMEM_REGION_SIZE,
|
||||
NULL);
|
||||
@ -1961,9 +1959,9 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
|
||||
next_base = 0;
|
||||
numa_start = table_data->len;
|
||||
|
||||
for (i = 1; i < pcms->numa_nodes + 1; ++i) {
|
||||
for (i = 1; i < nb_numa_nodes + 1; ++i) {
|
||||
mem_base = next_base;
|
||||
mem_len = pcms->node_mem[i - 1];
|
||||
mem_len = numa_info[i - 1].node_mem;
|
||||
next_base = mem_base + mem_len;
|
||||
|
||||
/* Cut out the 640K hole */
|
||||
@ -2011,7 +2009,7 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
|
||||
}
|
||||
|
||||
slots = (table_data->len - numa_start) / sizeof *numamem;
|
||||
for (; slots < pcms->numa_nodes + 2; slots++) {
|
||||
for (; slots < nb_numa_nodes + 2; slots++) {
|
||||
numamem = acpi_data_push(table_data, sizeof *numamem);
|
||||
build_srat_memory(numamem, 0, 0, 0, MEM_AFFINITY_NOFLAGS);
|
||||
}
|
||||
@ -2027,7 +2025,7 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
|
||||
if (hotplugabble_address_space_size) {
|
||||
numamem = acpi_data_push(table_data, sizeof *numamem);
|
||||
build_srat_memory(numamem, machine->device_memory->base,
|
||||
hotplugabble_address_space_size, pcms->numa_nodes - 1,
|
||||
hotplugabble_address_space_size, nb_numa_nodes - 1,
|
||||
MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED);
|
||||
}
|
||||
|
||||
@ -2529,7 +2527,7 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine)
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (pcms->numa_nodes) {
|
||||
if (machine->numa_state->num_nodes) {
|
||||
acpi_add_table(table_offsets, tables_blob);
|
||||
build_srat(tables_blob, tables->linker, machine);
|
||||
if (machine->numa_state->have_numa_distance) {
|
||||
|
13
hw/i386/pc.c
13
hw/i386/pc.c
@ -101,7 +101,7 @@ GlobalProperty pc_compat_6_0[] = {
|
||||
{ "qemu64" "-" TYPE_X86_CPU, "model", "6" },
|
||||
{ "qemu64" "-" TYPE_X86_CPU, "stepping", "3" },
|
||||
{ TYPE_X86_CPU, "x-vendor-cpuid-only", "off" },
|
||||
{ "ICH9-LPC", "acpi-pci-hotplug-with-bridge-support", "off" },
|
||||
{ "ICH9-LPC", ACPI_PM_PROP_ACPI_PCIHP_BRIDGE, "off" },
|
||||
};
|
||||
const size_t pc_compat_6_0_len = G_N_ELEMENTS(pc_compat_6_0);
|
||||
|
||||
@ -313,7 +313,7 @@ const size_t pc_compat_2_0_len = G_N_ELEMENTS(pc_compat_2_0);
|
||||
GlobalProperty pc_compat_1_7[] = {
|
||||
PC_CPU_MODEL_IDS("1.7.0")
|
||||
{ TYPE_USB_DEVICE, "msos-desc", "no" },
|
||||
{ "PIIX4_PM", "acpi-pci-hotplug-with-bridge-support", "off" },
|
||||
{ "PIIX4_PM", ACPI_PM_PROP_ACPI_PCIHP_BRIDGE, "off" },
|
||||
{ "hpet", HPET_INTCAP, "4" },
|
||||
};
|
||||
const size_t pc_compat_1_7_len = G_N_ELEMENTS(pc_compat_1_7);
|
||||
@ -802,18 +802,9 @@ void pc_machine_done(Notifier *notifier, void *data)
|
||||
|
||||
void pc_guest_info_init(PCMachineState *pcms)
|
||||
{
|
||||
int i;
|
||||
MachineState *ms = MACHINE(pcms);
|
||||
X86MachineState *x86ms = X86_MACHINE(pcms);
|
||||
|
||||
x86ms->apic_xrupt_override = true;
|
||||
pcms->numa_nodes = ms->numa_state->num_nodes;
|
||||
pcms->node_mem = g_malloc0(pcms->numa_nodes *
|
||||
sizeof *pcms->node_mem);
|
||||
for (i = 0; i < ms->numa_state->num_nodes; i++) {
|
||||
pcms->node_mem[i] = ms->numa_state->nodes[i].node_mem;
|
||||
}
|
||||
|
||||
pcms->machine_done.notify = pc_machine_done;
|
||||
qemu_add_machine_init_done_notifier(&pcms->machine_done);
|
||||
}
|
||||
|
@ -238,7 +238,7 @@ static void pc_q35_init(MachineState *machine)
|
||||
OBJECT(lpc), &error_abort);
|
||||
|
||||
acpi_pcihp = object_property_get_bool(OBJECT(lpc),
|
||||
"acpi-pci-hotplug-with-bridge-support",
|
||||
ACPI_PM_PROP_ACPI_PCIHP_BRIDGE,
|
||||
NULL);
|
||||
|
||||
if (acpi_pcihp) {
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/log.h"
|
||||
#include "cpu.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qapi/visitor.h"
|
||||
#include "qemu/range.h"
|
||||
#include "hw/isa/isa.h"
|
||||
@ -676,6 +677,18 @@ static void ich9_lpc_realize(PCIDevice *d, Error **errp)
|
||||
DeviceState *dev = DEVICE(d);
|
||||
ISABus *isa_bus;
|
||||
|
||||
if ((lpc->smi_host_features & BIT_ULL(ICH9_LPC_SMI_F_CPU_HOT_UNPLUG_BIT)) &&
|
||||
!(lpc->smi_host_features & BIT_ULL(ICH9_LPC_SMI_F_CPU_HOTPLUG_BIT))) {
|
||||
/*
|
||||
* smi_features_ok_callback() throws an error on this.
|
||||
*
|
||||
* So bail out here instead of advertizing the invalid
|
||||
* configuration and get obscure firmware failures from that.
|
||||
*/
|
||||
error_setg(errp, "cpu hot-unplug requires cpu hot-plug");
|
||||
return;
|
||||
}
|
||||
|
||||
isa_bus = isa_bus_new(DEVICE(d), get_system_memory(), get_system_io(),
|
||||
errp);
|
||||
if (!isa_bus) {
|
||||
|
@ -165,9 +165,9 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options)
|
||||
goto fail;
|
||||
}
|
||||
net->nc = options->net_backend;
|
||||
net->dev.nvqs = options->nvqs;
|
||||
|
||||
net->dev.max_queues = 1;
|
||||
net->dev.nvqs = 2;
|
||||
net->dev.vqs = net->vqs;
|
||||
|
||||
if (backend_kernel) {
|
||||
@ -242,9 +242,6 @@ static int vhost_net_start_one(struct vhost_net *net,
|
||||
struct vhost_vring_file file = { };
|
||||
int r;
|
||||
|
||||
net->dev.nvqs = 2;
|
||||
net->dev.vqs = net->vqs;
|
||||
|
||||
r = vhost_dev_enable_notifiers(&net->dev, dev);
|
||||
if (r < 0) {
|
||||
goto fail_notifiers;
|
||||
|
@ -314,14 +314,6 @@ PCIBus *i440fx_init(const char *host_type, const char *pci_type,
|
||||
return b;
|
||||
}
|
||||
|
||||
PCIBus *find_i440fx(void)
|
||||
{
|
||||
PCIHostState *s = OBJECT_CHECK(PCIHostState,
|
||||
object_resolve_path("/machine/i440fx", NULL),
|
||||
TYPE_PCI_HOST_BRIDGE);
|
||||
return s ? s->bus : NULL;
|
||||
}
|
||||
|
||||
static void i440fx_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
|
@ -293,7 +293,7 @@ static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
|
||||
qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
static const VhostOps kernel_ops = {
|
||||
const VhostOps kernel_ops = {
|
||||
.backend_type = VHOST_BACKEND_TYPE_KERNEL,
|
||||
.vhost_backend_init = vhost_kernel_init,
|
||||
.vhost_backend_cleanup = vhost_kernel_cleanup,
|
||||
@ -328,34 +328,6 @@ static const VhostOps kernel_ops = {
|
||||
};
|
||||
#endif
|
||||
|
||||
int vhost_set_backend_type(struct vhost_dev *dev, VhostBackendType backend_type)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
switch (backend_type) {
|
||||
#ifdef CONFIG_VHOST_KERNEL
|
||||
case VHOST_BACKEND_TYPE_KERNEL:
|
||||
dev->vhost_ops = &kernel_ops;
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VHOST_USER
|
||||
case VHOST_BACKEND_TYPE_USER:
|
||||
dev->vhost_ops = &user_ops;
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VHOST_VDPA
|
||||
case VHOST_BACKEND_TYPE_VDPA:
|
||||
dev->vhost_ops = &vdpa_ops;
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
error_report("Unknown vhost backend type");
|
||||
r = -1;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
int vhost_backend_update_device_iotlb(struct vhost_dev *dev,
|
||||
uint64_t iova, uint64_t uaddr,
|
||||
uint64_t len,
|
||||
|
@ -429,7 +429,7 @@ static int process_message_reply(struct vhost_dev *dev,
|
||||
}
|
||||
|
||||
if (msg_reply.hdr.request != msg->hdr.request) {
|
||||
error_report("Received unexpected msg type."
|
||||
error_report("Received unexpected msg type. "
|
||||
"Expected %d received %d",
|
||||
msg->hdr.request, msg_reply.hdr.request);
|
||||
return -1;
|
||||
@ -1095,23 +1095,6 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vhost_user_set_vring_addr(struct vhost_dev *dev,
|
||||
struct vhost_vring_addr *addr)
|
||||
{
|
||||
VhostUserMsg msg = {
|
||||
.hdr.request = VHOST_USER_SET_VRING_ADDR,
|
||||
.hdr.flags = VHOST_USER_VERSION,
|
||||
.payload.addr = *addr,
|
||||
.hdr.size = sizeof(msg.payload.addr),
|
||||
};
|
||||
|
||||
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vhost_user_set_vring_endian(struct vhost_dev *dev,
|
||||
struct vhost_vring_state *ring)
|
||||
{
|
||||
@ -1288,33 +1271,6 @@ static int vhost_user_set_vring_call(struct vhost_dev *dev,
|
||||
return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file);
|
||||
}
|
||||
|
||||
static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
|
||||
{
|
||||
VhostUserMsg msg = {
|
||||
.hdr.request = request,
|
||||
.hdr.flags = VHOST_USER_VERSION,
|
||||
.payload.u64 = u64,
|
||||
.hdr.size = sizeof(msg.payload.u64),
|
||||
};
|
||||
|
||||
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vhost_user_set_features(struct vhost_dev *dev,
|
||||
uint64_t features)
|
||||
{
|
||||
return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features);
|
||||
}
|
||||
|
||||
static int vhost_user_set_protocol_features(struct vhost_dev *dev,
|
||||
uint64_t features)
|
||||
{
|
||||
return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features);
|
||||
}
|
||||
|
||||
static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
|
||||
{
|
||||
@ -1360,6 +1316,107 @@ static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int enforce_reply(struct vhost_dev *dev,
|
||||
const VhostUserMsg *msg)
|
||||
{
|
||||
uint64_t dummy;
|
||||
|
||||
if (msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
|
||||
return process_message_reply(dev, msg);
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to wait for a reply but the backend does not
|
||||
* support replies for the command we just sent.
|
||||
* Send VHOST_USER_GET_FEATURES which makes all backends
|
||||
* send a reply.
|
||||
*/
|
||||
return vhost_user_get_features(dev, &dummy);
|
||||
}
|
||||
|
||||
static int vhost_user_set_vring_addr(struct vhost_dev *dev,
|
||||
struct vhost_vring_addr *addr)
|
||||
{
|
||||
VhostUserMsg msg = {
|
||||
.hdr.request = VHOST_USER_SET_VRING_ADDR,
|
||||
.hdr.flags = VHOST_USER_VERSION,
|
||||
.payload.addr = *addr,
|
||||
.hdr.size = sizeof(msg.payload.addr),
|
||||
};
|
||||
|
||||
bool reply_supported = virtio_has_feature(dev->protocol_features,
|
||||
VHOST_USER_PROTOCOL_F_REPLY_ACK);
|
||||
|
||||
/*
|
||||
* wait for a reply if logging is enabled to make sure
|
||||
* backend is actually logging changes
|
||||
*/
|
||||
bool wait_for_reply = addr->flags & (1 << VHOST_VRING_F_LOG);
|
||||
|
||||
if (reply_supported && wait_for_reply) {
|
||||
msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
|
||||
}
|
||||
|
||||
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (wait_for_reply) {
|
||||
return enforce_reply(dev, &msg);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64,
|
||||
bool wait_for_reply)
|
||||
{
|
||||
VhostUserMsg msg = {
|
||||
.hdr.request = request,
|
||||
.hdr.flags = VHOST_USER_VERSION,
|
||||
.payload.u64 = u64,
|
||||
.hdr.size = sizeof(msg.payload.u64),
|
||||
};
|
||||
|
||||
if (wait_for_reply) {
|
||||
bool reply_supported = virtio_has_feature(dev->protocol_features,
|
||||
VHOST_USER_PROTOCOL_F_REPLY_ACK);
|
||||
if (reply_supported) {
|
||||
msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
|
||||
}
|
||||
}
|
||||
|
||||
if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (wait_for_reply) {
|
||||
return enforce_reply(dev, &msg);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vhost_user_set_features(struct vhost_dev *dev,
|
||||
uint64_t features)
|
||||
{
|
||||
/*
|
||||
* wait for a reply if logging is enabled to make sure
|
||||
* backend is actually logging changes
|
||||
*/
|
||||
bool log_enabled = features & (0x1ULL << VHOST_F_LOG_ALL);
|
||||
|
||||
return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features,
|
||||
log_enabled);
|
||||
}
|
||||
|
||||
static int vhost_user_set_protocol_features(struct vhost_dev *dev,
|
||||
uint64_t features)
|
||||
{
|
||||
return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features,
|
||||
false);
|
||||
}
|
||||
|
||||
static int vhost_user_set_owner(struct vhost_dev *dev)
|
||||
{
|
||||
VhostUserMsg msg = {
|
||||
@ -1474,6 +1531,7 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
|
||||
g_free(name);
|
||||
|
||||
if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) {
|
||||
object_unparent(OBJECT(&n->mr));
|
||||
munmap(addr, page_size);
|
||||
return -1;
|
||||
}
|
||||
@ -2422,7 +2480,7 @@ void vhost_user_cleanup(VhostUserState *user)
|
||||
if (!user->chr) {
|
||||
return;
|
||||
}
|
||||
|
||||
memory_region_transaction_begin();
|
||||
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
|
||||
if (user->notifier[i].addr) {
|
||||
object_unparent(OBJECT(&user->notifier[i].mr));
|
||||
@ -2430,6 +2488,7 @@ void vhost_user_cleanup(VhostUserState *user)
|
||||
user->notifier[i].addr = NULL;
|
||||
}
|
||||
}
|
||||
memory_region_transaction_commit();
|
||||
user->chr = NULL;
|
||||
}
|
||||
|
||||
|
@ -89,19 +89,13 @@ static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vhost_vdpa_listener_begin(MemoryListener *listener)
|
||||
static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v)
|
||||
{
|
||||
struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
|
||||
struct vhost_dev *dev = v->dev;
|
||||
struct vhost_msg_v2 msg = {};
|
||||
int fd = v->device_fd;
|
||||
|
||||
if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
|
||||
return;
|
||||
}
|
||||
|
||||
msg.type = v->msg_type;
|
||||
msg.iotlb.type = VHOST_IOTLB_BATCH_BEGIN;
|
||||
struct vhost_msg_v2 msg = {
|
||||
.type = v->msg_type,
|
||||
.iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
|
||||
};
|
||||
|
||||
if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
|
||||
error_report("failed to write, fd=%d, errno=%d (%s)",
|
||||
@ -109,6 +103,16 @@ static void vhost_vdpa_listener_begin(MemoryListener *listener)
|
||||
}
|
||||
}
|
||||
|
||||
static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v)
|
||||
{
|
||||
if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
|
||||
!v->iotlb_batch_begin_sent) {
|
||||
vhost_vdpa_listener_begin_batch(v);
|
||||
}
|
||||
|
||||
v->iotlb_batch_begin_sent = true;
|
||||
}
|
||||
|
||||
static void vhost_vdpa_listener_commit(MemoryListener *listener)
|
||||
{
|
||||
struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
|
||||
@ -120,6 +124,10 @@ static void vhost_vdpa_listener_commit(MemoryListener *listener)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!v->iotlb_batch_begin_sent) {
|
||||
return;
|
||||
}
|
||||
|
||||
msg.type = v->msg_type;
|
||||
msg.iotlb.type = VHOST_IOTLB_BATCH_END;
|
||||
|
||||
@ -127,6 +135,8 @@ static void vhost_vdpa_listener_commit(MemoryListener *listener)
|
||||
error_report("failed to write, fd=%d, errno=%d (%s)",
|
||||
fd, errno, strerror(errno));
|
||||
}
|
||||
|
||||
v->iotlb_batch_begin_sent = false;
|
||||
}
|
||||
|
||||
static void vhost_vdpa_listener_region_add(MemoryListener *listener,
|
||||
@ -170,6 +180,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
|
||||
|
||||
llsize = int128_sub(llend, int128_make64(iova));
|
||||
|
||||
vhost_vdpa_iotlb_batch_begin_once(v);
|
||||
ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
|
||||
vaddr, section->readonly);
|
||||
if (ret) {
|
||||
@ -221,6 +232,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
|
||||
|
||||
llsize = int128_sub(llend, int128_make64(iova));
|
||||
|
||||
vhost_vdpa_iotlb_batch_begin_once(v);
|
||||
ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
|
||||
if (ret) {
|
||||
error_report("vhost_vdpa dma unmap error!");
|
||||
@ -234,7 +246,6 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
|
||||
* depends on the addnop().
|
||||
*/
|
||||
static const MemoryListener vhost_vdpa_memory_listener = {
|
||||
.begin = vhost_vdpa_listener_begin,
|
||||
.commit = vhost_vdpa_listener_commit,
|
||||
.region_add = vhost_vdpa_listener_region_add,
|
||||
.region_del = vhost_vdpa_listener_region_del,
|
||||
@ -432,13 +443,13 @@ static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
|
||||
int r;
|
||||
|
||||
if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
|
||||
return 0;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
features &= f;
|
||||
r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
|
||||
if (r) {
|
||||
return 0;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
dev->backend_cap = features;
|
||||
|
@ -174,6 +174,35 @@ static uint64_t vhost_get_log_size(struct vhost_dev *dev)
|
||||
return log_size;
|
||||
}
|
||||
|
||||
static int vhost_set_backend_type(struct vhost_dev *dev,
|
||||
VhostBackendType backend_type)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
switch (backend_type) {
|
||||
#ifdef CONFIG_VHOST_KERNEL
|
||||
case VHOST_BACKEND_TYPE_KERNEL:
|
||||
dev->vhost_ops = &kernel_ops;
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VHOST_USER
|
||||
case VHOST_BACKEND_TYPE_USER:
|
||||
dev->vhost_ops = &user_ops;
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VHOST_VDPA
|
||||
case VHOST_BACKEND_TYPE_VDPA:
|
||||
dev->vhost_ops = &vdpa_ops;
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
error_report("Unknown vhost backend type");
|
||||
r = -1;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
|
||||
{
|
||||
Error *err = NULL;
|
||||
@ -286,7 +315,7 @@ static int vhost_dev_has_iommu(struct vhost_dev *dev)
|
||||
* does not have IOMMU, there's no need to enable this feature
|
||||
* which may cause unnecessary IOTLB miss/update trnasactions.
|
||||
*/
|
||||
return vdev->dma_as != &address_space_memory &&
|
||||
return virtio_bus_device_iommu_enabled(vdev) &&
|
||||
virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
|
||||
}
|
||||
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "trace.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "migration/misc.h"
|
||||
#include "migration/migration.h"
|
||||
|
||||
#include "hw/virtio/virtio-bus.h"
|
||||
#include "hw/virtio/virtio-access.h"
|
||||
@ -533,22 +534,18 @@ static bool get_free_page_hints(VirtIOBalloon *dev)
|
||||
if (dev->free_page_hint_status == FREE_PAGE_HINT_S_REQUESTED &&
|
||||
id == dev->free_page_hint_cmd_id) {
|
||||
dev->free_page_hint_status = FREE_PAGE_HINT_S_START;
|
||||
} else {
|
||||
} else if (dev->free_page_hint_status == FREE_PAGE_HINT_S_START) {
|
||||
/*
|
||||
* Stop the optimization only when it has started. This
|
||||
* avoids a stale stop sign for the previous command.
|
||||
*/
|
||||
if (dev->free_page_hint_status == FREE_PAGE_HINT_S_START) {
|
||||
dev->free_page_hint_status = FREE_PAGE_HINT_S_STOP;
|
||||
}
|
||||
dev->free_page_hint_status = FREE_PAGE_HINT_S_STOP;
|
||||
}
|
||||
}
|
||||
|
||||
if (elem->in_num) {
|
||||
if (dev->free_page_hint_status == FREE_PAGE_HINT_S_START) {
|
||||
qemu_guest_free_page_hint(elem->in_sg[0].iov_base,
|
||||
elem->in_sg[0].iov_len);
|
||||
}
|
||||
if (elem->in_num && dev->free_page_hint_status == FREE_PAGE_HINT_S_START) {
|
||||
qemu_guest_free_page_hint(elem->in_sg[0].iov_base,
|
||||
elem->in_sg[0].iov_len);
|
||||
}
|
||||
|
||||
out:
|
||||
@ -591,16 +588,10 @@ static void virtio_balloon_free_page_start(VirtIOBalloon *s)
|
||||
{
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(s);
|
||||
|
||||
/* For the stop and copy phase, we don't need to start the optimization */
|
||||
if (!vdev->vm_running) {
|
||||
return;
|
||||
}
|
||||
|
||||
qemu_mutex_lock(&s->free_page_lock);
|
||||
|
||||
if (s->free_page_hint_cmd_id == UINT_MAX) {
|
||||
s->free_page_hint_cmd_id =
|
||||
VIRTIO_BALLOON_FREE_PAGE_HINT_CMD_ID_MIN;
|
||||
s->free_page_hint_cmd_id = VIRTIO_BALLOON_FREE_PAGE_HINT_CMD_ID_MIN;
|
||||
} else {
|
||||
s->free_page_hint_cmd_id++;
|
||||
}
|
||||
@ -648,8 +639,7 @@ static void virtio_balloon_free_page_done(VirtIOBalloon *s)
|
||||
static int
|
||||
virtio_balloon_free_page_hint_notify(NotifierWithReturn *n, void *data)
|
||||
{
|
||||
VirtIOBalloon *dev = container_of(n, VirtIOBalloon,
|
||||
free_page_hint_notify);
|
||||
VirtIOBalloon *dev = container_of(n, VirtIOBalloon, free_page_hint_notify);
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
||||
PrecopyNotifyData *pnd = data;
|
||||
|
||||
@ -662,6 +652,18 @@ virtio_balloon_free_page_hint_notify(NotifierWithReturn *n, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Pages hinted via qemu_guest_free_page_hint() are cleared from the dirty
|
||||
* bitmap and will not get migrated, especially also not when the postcopy
|
||||
* destination starts using them and requests migration from the source; the
|
||||
* faulting thread will stall until postcopy migration finishes and
|
||||
* all threads are woken up. Let's not start free page hinting if postcopy
|
||||
* is possible.
|
||||
*/
|
||||
if (migrate_postcopy_ram()) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (pnd->reason) {
|
||||
case PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC:
|
||||
virtio_balloon_free_page_stop(dev);
|
||||
@ -906,8 +908,7 @@ static void virtio_balloon_device_realize(DeviceState *dev, Error **errp)
|
||||
s->dvq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output);
|
||||
s->svq = virtio_add_queue(vdev, 128, virtio_balloon_receive_stats);
|
||||
|
||||
if (virtio_has_feature(s->host_features,
|
||||
VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
|
||||
if (virtio_has_feature(s->host_features, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
|
||||
s->free_page_vq = virtio_add_queue(vdev, VIRTQUEUE_MAX_SIZE,
|
||||
virtio_balloon_handle_free_page_vq);
|
||||
precopy_add_notifier(&s->free_page_hint_notify);
|
||||
|
@ -325,6 +325,20 @@ static char *virtio_bus_get_fw_dev_path(DeviceState *dev)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool virtio_bus_device_iommu_enabled(VirtIODevice *vdev)
|
||||
{
|
||||
DeviceState *qdev = DEVICE(vdev);
|
||||
BusState *qbus = BUS(qdev_get_parent_bus(qdev));
|
||||
VirtioBusState *bus = VIRTIO_BUS(qbus);
|
||||
VirtioBusClass *klass = VIRTIO_BUS_GET_CLASS(bus);
|
||||
|
||||
if (!klass->iommu_enabled) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return klass->iommu_enabled(qbus->parent);
|
||||
}
|
||||
|
||||
static void virtio_bus_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
BusClass *bus_class = BUS_CLASS(klass);
|
||||
|
@ -1121,6 +1121,19 @@ static AddressSpace *virtio_pci_get_dma_as(DeviceState *d)
|
||||
return pci_get_address_space(dev);
|
||||
}
|
||||
|
||||
static bool virtio_pci_iommu_enabled(DeviceState *d)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
|
||||
PCIDevice *dev = &proxy->pci_dev;
|
||||
AddressSpace *dma_as = pci_device_iommu_address_space(dev);
|
||||
|
||||
if (dma_as == &address_space_memory) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool virtio_pci_queue_enabled(DeviceState *d, int n)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
|
||||
@ -2202,6 +2215,7 @@ static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
|
||||
k->ioeventfd_enabled = virtio_pci_ioeventfd_enabled;
|
||||
k->ioeventfd_assign = virtio_pci_ioeventfd_assign;
|
||||
k->get_dma_as = virtio_pci_get_dma_as;
|
||||
k->iommu_enabled = virtio_pci_iommu_enabled;
|
||||
k->queue_enabled = virtio_pci_queue_enabled;
|
||||
}
|
||||
|
||||
|
@ -133,12 +133,10 @@ struct VirtQueue
|
||||
QLIST_ENTRY(VirtQueue) node;
|
||||
};
|
||||
|
||||
/* Called within call_rcu(). */
|
||||
static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
|
||||
{
|
||||
if (!caches) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert(caches != NULL);
|
||||
address_space_cache_destroy(&caches->desc);
|
||||
address_space_cache_destroy(&caches->avail);
|
||||
address_space_cache_destroy(&caches->used);
|
||||
@ -634,6 +632,7 @@ static int virtio_queue_split_empty(VirtQueue *vq)
|
||||
return empty;
|
||||
}
|
||||
|
||||
/* Called within rcu_read_lock(). */
|
||||
static int virtio_queue_packed_empty_rcu(VirtQueue *vq)
|
||||
{
|
||||
struct VRingPackedDesc desc;
|
||||
|
@ -47,6 +47,8 @@
|
||||
#define ACPI_PM_PROP_PM_IO_BASE "pm_io_base"
|
||||
#define ACPI_PM_PROP_GPE0_BLK "gpe0_blk"
|
||||
#define ACPI_PM_PROP_GPE0_BLK_LEN "gpe0_blk_len"
|
||||
#define ACPI_PM_PROP_ACPI_PCIHP_BRIDGE "acpi-pci-hotplug-with-bridge-support"
|
||||
#define ACPI_PM_PROP_ACPI_PCI_ROOTHP "acpi-root-pci-hotplug"
|
||||
|
||||
/* PM Timer ticks per second (HZ) */
|
||||
#define PM_TIMER_FREQUENCY 3579545
|
||||
|
@ -70,8 +70,6 @@
|
||||
OBJECT_DECLARE_SIMPLE_TYPE(AcpiGedState, ACPI_GED)
|
||||
|
||||
#define TYPE_ACPI_GED_X86 "acpi-ged-x86"
|
||||
#define ACPI_GED_X86(obj) \
|
||||
OBJECT_CHECK(AcpiGedX86State, (obj), TYPE_ACPI_GED_X86)
|
||||
|
||||
#define ACPI_GED_EVT_SEL_OFFSET 0x0
|
||||
#define ACPI_GED_EVT_SEL_LEN 0x4
|
||||
|
@ -47,10 +47,6 @@ typedef struct PCMachineState {
|
||||
bool default_bus_bypass_iommu;
|
||||
uint64_t max_fw_size;
|
||||
|
||||
/* NUMA information: */
|
||||
uint64_t numa_nodes;
|
||||
uint64_t *node_mem;
|
||||
|
||||
/* ACPI Memory hotplug IO base address */
|
||||
hwaddr memhp_io_base;
|
||||
} PCMachineState;
|
||||
|
@ -45,6 +45,5 @@ PCIBus *i440fx_init(const char *host_type, const char *pci_type,
|
||||
MemoryRegion *pci_memory,
|
||||
MemoryRegion *ram_memory);
|
||||
|
||||
PCIBus *find_i440fx(void);
|
||||
|
||||
#endif
|
||||
|
@ -173,12 +173,6 @@ typedef struct VhostOps {
|
||||
vhost_force_iommu_op vhost_force_iommu;
|
||||
} VhostOps;
|
||||
|
||||
extern const VhostOps user_ops;
|
||||
extern const VhostOps vdpa_ops;
|
||||
|
||||
int vhost_set_backend_type(struct vhost_dev *dev,
|
||||
VhostBackendType backend_type);
|
||||
|
||||
int vhost_backend_update_device_iotlb(struct vhost_dev *dev,
|
||||
uint64_t iova, uint64_t uaddr,
|
||||
uint64_t len,
|
||||
|
@ -22,6 +22,7 @@ typedef struct VhostVDPAHostNotifier {
|
||||
typedef struct vhost_vdpa {
|
||||
int device_fd;
|
||||
uint32_t msg_type;
|
||||
bool iotlb_batch_begin_sent;
|
||||
MemoryListener listener;
|
||||
struct vhost_dev *dev;
|
||||
VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
|
||||
|
@ -71,7 +71,7 @@ struct vhost_dev {
|
||||
int n_tmp_sections;
|
||||
MemoryRegionSection *tmp_sections;
|
||||
struct vhost_virtqueue *vqs;
|
||||
int nvqs;
|
||||
unsigned int nvqs;
|
||||
/* the first virtqueue which would be used by this vhost dev */
|
||||
int vq_index;
|
||||
/* if non-zero, minimum required value for max_queues */
|
||||
@ -95,6 +95,10 @@ struct vhost_dev {
|
||||
const VhostDevConfigOps *config_ops;
|
||||
};
|
||||
|
||||
extern const VhostOps kernel_ops;
|
||||
extern const VhostOps user_ops;
|
||||
extern const VhostOps vdpa_ops;
|
||||
|
||||
struct vhost_net {
|
||||
struct vhost_dev dev;
|
||||
struct vhost_virtqueue vqs[2];
|
||||
|
@ -93,6 +93,7 @@ struct VirtioBusClass {
|
||||
*/
|
||||
bool has_variable_vring_alignment;
|
||||
AddressSpace *(*get_dma_as)(DeviceState *d);
|
||||
bool (*iommu_enabled)(DeviceState *d);
|
||||
};
|
||||
|
||||
struct VirtioBusState {
|
||||
@ -154,5 +155,6 @@ void virtio_bus_release_ioeventfd(VirtioBusState *bus);
|
||||
int virtio_bus_set_host_notifier(VirtioBusState *bus, int n, bool assign);
|
||||
/* Tell the bus that the ioeventfd handler is no longer required. */
|
||||
void virtio_bus_cleanup_host_notifier(VirtioBusState *bus, int n);
|
||||
|
||||
/* Whether the IOMMU is enabled for this device */
|
||||
bool virtio_bus_device_iommu_enabled(VirtIODevice *vdev);
|
||||
#endif /* VIRTIO_BUS_H */
|
||||
|
@ -14,6 +14,7 @@ typedef struct VhostNetOptions {
|
||||
VhostBackendType backend_type;
|
||||
NetClientState *net_backend;
|
||||
uint32_t busyloop_timeout;
|
||||
unsigned int nvqs;
|
||||
void *opaque;
|
||||
} VhostNetOptions;
|
||||
|
||||
|
@ -749,6 +749,7 @@ static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer,
|
||||
qemu_set_nonblock(vhostfd);
|
||||
}
|
||||
options.opaque = (void *)(uintptr_t)vhostfd;
|
||||
options.nvqs = 2;
|
||||
|
||||
s->vhost_net = vhost_net_init(&options);
|
||||
if (!s->vhost_net) {
|
||||
|
@ -85,6 +85,7 @@ static int vhost_user_start(int queues, NetClientState *ncs[],
|
||||
options.net_backend = ncs[i];
|
||||
options.opaque = be;
|
||||
options.busyloop_timeout = 0;
|
||||
options.nvqs = 2;
|
||||
net = vhost_net_init(&options);
|
||||
if (!net) {
|
||||
error_report("failed to init vhost_net for queue %d", i);
|
||||
|
@ -29,7 +29,6 @@ typedef struct VhostVDPAState {
|
||||
NetClientState nc;
|
||||
struct vhost_vdpa vhost_vdpa;
|
||||
VHostNetState *vhost_net;
|
||||
uint64_t acked_features;
|
||||
bool started;
|
||||
} VhostVDPAState;
|
||||
|
||||
@ -82,16 +81,6 @@ static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vhost_vdpa_del(NetClientState *ncs)
|
||||
{
|
||||
VhostVDPAState *s;
|
||||
assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
|
||||
s = DO_UPCAST(VhostVDPAState, nc, ncs);
|
||||
if (s->vhost_net) {
|
||||
vhost_net_cleanup(s->vhost_net);
|
||||
}
|
||||
}
|
||||
|
||||
static int vhost_vdpa_add(NetClientState *ncs, void *be)
|
||||
{
|
||||
VhostNetOptions options;
|
||||
@ -105,27 +94,23 @@ static int vhost_vdpa_add(NetClientState *ncs, void *be)
|
||||
options.net_backend = ncs;
|
||||
options.opaque = be;
|
||||
options.busyloop_timeout = 0;
|
||||
options.nvqs = 2;
|
||||
|
||||
net = vhost_net_init(&options);
|
||||
if (!net) {
|
||||
error_report("failed to init vhost_net for queue");
|
||||
goto err;
|
||||
}
|
||||
if (s->vhost_net) {
|
||||
vhost_net_cleanup(s->vhost_net);
|
||||
g_free(s->vhost_net);
|
||||
goto err_init;
|
||||
}
|
||||
s->vhost_net = net;
|
||||
ret = vhost_vdpa_net_check_device_id(net);
|
||||
if (ret) {
|
||||
goto err;
|
||||
goto err_check;
|
||||
}
|
||||
return 0;
|
||||
err:
|
||||
if (net) {
|
||||
vhost_net_cleanup(net);
|
||||
}
|
||||
vhost_vdpa_del(ncs);
|
||||
err_check:
|
||||
vhost_net_cleanup(net);
|
||||
g_free(net);
|
||||
err_init:
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -180,7 +165,6 @@ static int net_vhost_vdpa_init(NetClientState *peer, const char *device,
|
||||
assert(name);
|
||||
nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device, name);
|
||||
snprintf(nc->info_str, sizeof(nc->info_str), TYPE_VHOST_VDPA);
|
||||
nc->queue_index = 0;
|
||||
s = DO_UPCAST(VhostVDPAState, nc, nc);
|
||||
vdpa_device_fd = qemu_open_old(vhostdev, O_RDWR);
|
||||
if (vdpa_device_fd == -1) {
|
||||
@ -188,7 +172,10 @@ static int net_vhost_vdpa_init(NetClientState *peer, const char *device,
|
||||
}
|
||||
s->vhost_vdpa.device_fd = vdpa_device_fd;
|
||||
ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa);
|
||||
assert(s->vhost_net);
|
||||
if (ret) {
|
||||
qemu_close(vdpa_device_fd);
|
||||
qemu_del_net_client(nc);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -26,7 +26,6 @@ stub_ss.add(files('module-opts.c'))
|
||||
stub_ss.add(files('monitor.c'))
|
||||
stub_ss.add(files('monitor-core.c'))
|
||||
stub_ss.add(files('pci-bus.c'))
|
||||
stub_ss.add(files('pci-host-piix.c'))
|
||||
stub_ss.add(files('qemu-timer-notify-cb.c'))
|
||||
stub_ss.add(files('qmp_memory_device.c'))
|
||||
stub_ss.add(files('qmp-command-available.c'))
|
||||
|
@ -1,7 +0,0 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "hw/pci-host/i440fx.h"
|
||||
|
||||
PCIBus *find_i440fx(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
@ -540,6 +540,11 @@ vubr_new(const char *path, bool client)
|
||||
CallbackFunc cb;
|
||||
size_t len;
|
||||
|
||||
if (strlen(path) >= sizeof(un.sun_path)) {
|
||||
fprintf(stderr, "unix domain socket path '%s' is too long\n", path);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* Get a UNIX socket. */
|
||||
dev->sock = socket(AF_UNIX, SOCK_STREAM, 0);
|
||||
if (dev->sock == -1) {
|
||||
@ -826,7 +831,7 @@ main(int argc, char *argv[])
|
||||
out:
|
||||
fprintf(stderr, "Usage: %s ", argv[0]);
|
||||
fprintf(stderr, "[-c] [-H] [-u ud_socket_path] [-l lhost:lport] [-r rhost:rport]\n");
|
||||
fprintf(stderr, "\t-u path to unix doman socket. default: %s\n",
|
||||
fprintf(stderr, "\t-u path to unix domain socket. default: %s\n",
|
||||
DEFAULT_UD_SOCKET);
|
||||
fprintf(stderr, "\t-l local host and port. default: %s:%s\n",
|
||||
DEFAULT_LHOST, DEFAULT_LPORT);
|
||||
|
Loading…
Reference in New Issue
Block a user