diff --git a/hw/acpi/piix4.c b/hw/acpi/piix4.c index 78c0a6d323..481a16c60a 100644 --- a/hw/acpi/piix4.c +++ b/hw/acpi/piix4.c @@ -36,6 +36,7 @@ #include "hw/mem/pc-dimm.h" #include "hw/acpi/memory_hotplug.h" #include "hw/acpi/acpi_dev_interface.h" +#include "hw/xen/xen.h" //#define DEBUG @@ -501,6 +502,9 @@ I2CBus *piix4_pm_init(PCIBus *bus, int devfn, uint32_t smb_io_base, s->irq = sci_irq; s->smi_irq = smi_irq; s->kvm_enabled = kvm_enabled; + if (xen_enabled()) { + s->use_acpi_pci_hotplug = false; + } qdev_init_nofail(dev); diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c index 231e9a7392..21842a01e7 100644 --- a/hw/block/xen_disk.c +++ b/hw/block/xen_disk.c @@ -59,6 +59,13 @@ struct PersistentGrant { typedef struct PersistentGrant PersistentGrant; +struct PersistentRegion { + void *addr; + int num; +}; + +typedef struct PersistentRegion PersistentRegion; + struct ioreq { blkif_request_t req; int16_t status; @@ -118,6 +125,7 @@ struct XenBlkDev { gboolean feature_discard; gboolean feature_persistent; GTree *persistent_gnts; + GSList *persistent_regions; unsigned int persistent_gnt_count; unsigned int max_grants; @@ -177,6 +185,23 @@ static void destroy_grant(gpointer pgnt) g_free(grant); } +static void remove_persistent_region(gpointer data, gpointer dev) +{ + PersistentRegion *region = data; + struct XenBlkDev *blkdev = dev; + XenGnttab gnt = blkdev->xendev.gnttabdev; + + if (xc_gnttab_munmap(gnt, region->addr, region->num) != 0) { + xen_be_printf(&blkdev->xendev, 0, + "xc_gnttab_munmap region %p failed: %s\n", + region->addr, strerror(errno)); + } + xen_be_printf(&blkdev->xendev, 3, + "unmapped grant region %p with %d pages\n", + region->addr, region->num); + g_free(region); +} + static struct ioreq *ioreq_start(struct XenBlkDev *blkdev) { struct ioreq *ioreq = NULL; @@ -343,6 +368,7 @@ static int ioreq_map(struct ioreq *ioreq) void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST]; int i, j, new_maps = 0; PersistentGrant *grant; + PersistentRegion *region; /* domids and refs variables will contain the information necessary * to map the grants that are needed to fulfill this request. * @@ -421,7 +447,22 @@ static int ioreq_map(struct ioreq *ioreq) } } } - if (ioreq->blkdev->feature_persistent) { + if (ioreq->blkdev->feature_persistent && new_maps != 0 && + (!batch_maps || (ioreq->blkdev->persistent_gnt_count + new_maps <= + ioreq->blkdev->max_grants))) { + /* + * If we are using persistent grants and batch mappings only + * add the new maps to the list of persistent grants if the whole + * area can be persistently mapped. + */ + if (batch_maps) { + region = g_malloc0(sizeof(*region)); + region->addr = ioreq->pages; + region->num = new_maps; + ioreq->blkdev->persistent_regions = g_slist_append( + ioreq->blkdev->persistent_regions, + region); + } while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants) && new_maps) { /* Go through the list of newly mapped grants and add as many @@ -447,6 +488,7 @@ static int ioreq_map(struct ioreq *ioreq) grant); ioreq->blkdev->persistent_gnt_count++; } + assert(!batch_maps || new_maps == 0); } for (i = 0; i < ioreq->v.niov; i++) { ioreq->v.iov[i].iov_base += (uintptr_t)page[i]; @@ -971,7 +1013,10 @@ static int blk_connect(struct XenDevice *xendev) blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST; blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp, NULL, NULL, + batch_maps ? + (GDestroyNotify)g_free : (GDestroyNotify)destroy_grant); + blkdev->persistent_regions = NULL; blkdev->persistent_gnt_count = 0; } @@ -1000,6 +1045,26 @@ static void blk_disconnect(struct XenDevice *xendev) blkdev->cnt_map--; blkdev->sring = NULL; } + + /* + * Unmap persistent grants before switching to the closed state + * so the frontend can free them. + * + * In the !batch_maps case g_tree_destroy will take care of unmapping + * the grant, but in the batch_maps case we need to iterate over every + * region in persistent_regions and unmap it. + */ + if (blkdev->feature_persistent) { + g_tree_destroy(blkdev->persistent_gnts); + assert(batch_maps || blkdev->persistent_gnt_count == 0); + if (batch_maps) { + blkdev->persistent_gnt_count = 0; + g_slist_foreach(blkdev->persistent_regions, + (GFunc)remove_persistent_region, blkdev); + g_slist_free(blkdev->persistent_regions); + } + blkdev->feature_persistent = false; + } } static int blk_free(struct XenDevice *xendev) @@ -1011,11 +1076,6 @@ static int blk_free(struct XenDevice *xendev) blk_disconnect(xendev); } - /* Free persistent grants */ - if (blkdev->feature_persistent) { - g_tree_destroy(blkdev->persistent_gnts); - } - while (!QLIST_EMPTY(&blkdev->freelist)) { ioreq = QLIST_FIRST(&blkdev->freelist); QLIST_REMOVE(ioreq, list); diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c index b559181ba7..7bb97a4923 100644 --- a/hw/i386/pc_piix.c +++ b/hw/i386/pc_piix.c @@ -916,17 +916,6 @@ static QEMUMachine xenfv_machine = { .max_cpus = HVM_MAX_VCPUS, .default_machine_opts = "accel=xen", .hot_add_cpu = pc_hot_add_cpu, - .compat_props = (GlobalProperty[]) { - /* xenfv has no fwcfg and so does not load acpi from QEMU. - * as such new acpi features don't work. - */ - { - .driver = "PIIX4_PM", - .property = "acpi-pci-hotplug-with-bridge-support", - .value = "off", - }, - { /* end of list */ } - }, }; #endif