From 54420332b595d4563d9c0e417d796baecd2debfa Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 2 Nov 2020 05:09:19 -0500 Subject: [PATCH 01/31] pc: comment style fixup Fix up checkpatch comment style warnings. Signed-off-by: Michael S. Tsirkin Reviewed-by: Chen Qun --- hw/i386/pc.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/hw/i386/pc.c b/hw/i386/pc.c index 5e6c0023e0..17b514d1da 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -1149,10 +1149,11 @@ void pc_basic_device_init(struct PCMachineState *pcms, error_report("couldn't create HPET device"); exit(1); } - /* For pc-piix-*, hpet's intcap is always IRQ2. For pc-q35-1.7 - * and earlier, use IRQ2 for compat. Otherwise, use IRQ16~23, - * IRQ8 and IRQ2. - */ + /* + * For pc-piix-*, hpet's intcap is always IRQ2. For pc-q35-1.7 and + * earlier, use IRQ2 for compat. Otherwise, use IRQ16~23, IRQ8 and + * IRQ2. + */ uint8_t compat = object_property_get_uint(OBJECT(hpet), HPET_INTCAP, NULL); if (!compat) { From d31992ae131527b63284d406d5dac21b02d4f3ef Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 8 Oct 2020 10:30:24 +0200 Subject: [PATCH 02/31] virtio-mem: Make sure "addr" is always multiples of the block size The spec states: "The device MUST set addr, region_size, usable_region_size, plugged_size, requested_size to multiples of block_size." In some cases, we currently don't guarantee that for "addr": For example, when starting a VM with 4 GiB boot memory and a virtio-mem device with a block size of 2 GiB, "memaddr"/"addr" will be auto-assigned to 0x140000000 (5 GiB). We'll try to improve auto-assignment for memory devices next, to avoid bailing out in case memory device code selects a bad address. Note: The Linux driver doesn't support such big block sizes yet. Reviewed-by: Pankaj Gupta Fixes: 910b25766b33 ("virtio-mem: Paravirtualized memory hot(un)plug") Cc: "Michael S. Tsirkin" Cc: Wei Yang Cc: Dr. David Alan Gilbert Cc: Igor Mammedov Cc: Pankaj Gupta Signed-off-by: David Hildenbrand Message-Id: <20201008083029.9504-2-david@redhat.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/virtio/virtio-mem.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hw/virtio/virtio-mem.c b/hw/virtio/virtio-mem.c index 7c8ca9f28b..70200b4eac 100644 --- a/hw/virtio/virtio-mem.c +++ b/hw/virtio/virtio-mem.c @@ -449,6 +449,11 @@ static void virtio_mem_device_realize(DeviceState *dev, Error **errp) ")", VIRTIO_MEM_REQUESTED_SIZE_PROP, VIRTIO_MEM_BLOCK_SIZE_PROP, vmem->block_size); return; + } else if (!QEMU_IS_ALIGNED(vmem->addr, vmem->block_size)) { + error_setg(errp, "'%s' property has to be multiples of '%s' (0x%" PRIx64 + ")", VIRTIO_MEM_ADDR_PROP, VIRTIO_MEM_BLOCK_SIZE_PROP, + vmem->block_size); + return; } else if (!QEMU_IS_ALIGNED(memory_region_size(&vmem->memdev->mr), vmem->block_size)) { error_setg(errp, "'%s' property memdev size has to be multiples of" From 0aed28006114b17d64a8491071d382f4c8a83e41 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 8 Oct 2020 10:30:25 +0200 Subject: [PATCH 03/31] virtio-mem: Make sure "usable_region_size" is always multiples of the block size The spec states: "The device MUST set addr, region_size, usable_region_size, plugged_size, requested_size to multiples of block_size." With block sizes > 256MB, we currently wouldn't guarantee that for the usable_region_size. Note that we cannot exceed the region_size, as we already enforce the alignment there properly. Fixes: 910b25766b33 ("virtio-mem: Paravirtualized memory hot(un)plug") Cc: "Michael S. Tsirkin" Cc: Wei Yang Cc: Dr. David Alan Gilbert Cc: Igor Mammedov Cc: Pankaj Gupta Signed-off-by: David Hildenbrand Message-Id: <20201008083029.9504-3-david@redhat.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/virtio/virtio-mem.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hw/virtio/virtio-mem.c b/hw/virtio/virtio-mem.c index 70200b4eac..461ac68ee8 100644 --- a/hw/virtio/virtio-mem.c +++ b/hw/virtio/virtio-mem.c @@ -227,6 +227,9 @@ static void virtio_mem_resize_usable_region(VirtIOMEM *vmem, uint64_t newsize = MIN(memory_region_size(&vmem->memdev->mr), requested_size + VIRTIO_MEM_USABLE_EXTENT); + /* The usable region size always has to be multiples of the block size. */ + newsize = QEMU_ALIGN_UP(newsize, vmem->block_size); + if (!requested_size) { newsize = 0; } From 228957fea3a998735524abf6354634f1fb710e61 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 8 Oct 2020 10:30:26 +0200 Subject: [PATCH 04/31] virtio-mem: Probe THP size to determine default block size Let's allow a minimum block size of 1 MiB in all configurations. Select the default block size based on - The page size of the memory backend. - The THP size if the memory backend size corresponds to the real host page size. - The global minimum of 1 MiB. and warn if something smaller is configured by the user. VIRTIO_MEM only supports Linux (depends on LINUX), so we can probe the THP size unconditionally. For now we only support virtio-mem on x86-64 - there isn't a user-visible change (x86-64 only supports 2 MiB THP on the PMD level) - the default was, and will be 2 MiB. If we ever have THP on the PUD level (e.g., 1 GiB THP on x86-64), we expect it to be more transparent - e.g., to only optimize fully populated ranges unless explicitly told /configured otherwise (in contrast to PMD THP). Reviewed-by: Pankaj Gupta Cc: "Michael S. Tsirkin" Cc: Wei Yang Cc: Dr. David Alan Gilbert Cc: Igor Mammedov Cc: Pankaj Gupta Signed-off-by: David Hildenbrand Message-Id: <20201008083029.9504-4-david@redhat.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/virtio/virtio-mem.c | 105 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 101 insertions(+), 4 deletions(-) diff --git a/hw/virtio/virtio-mem.c b/hw/virtio/virtio-mem.c index 461ac68ee8..655824ff81 100644 --- a/hw/virtio/virtio-mem.c +++ b/hw/virtio/virtio-mem.c @@ -33,10 +33,83 @@ #include "trace.h" /* - * Use QEMU_VMALLOC_ALIGN, so no THP will have to be split when unplugging - * memory (e.g., 2MB on x86_64). + * Let's not allow blocks smaller than 1 MiB, for example, to keep the tracking + * bitmap small. */ -#define VIRTIO_MEM_MIN_BLOCK_SIZE ((uint32_t)QEMU_VMALLOC_ALIGN) +#define VIRTIO_MEM_MIN_BLOCK_SIZE ((uint32_t)(1 * MiB)) + +#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) || \ + defined(__powerpc64__) +#define VIRTIO_MEM_DEFAULT_THP_SIZE ((uint32_t)(2 * MiB)) +#else + /* fallback to 1 MiB (e.g., the THP size on s390x) */ +#define VIRTIO_MEM_DEFAULT_THP_SIZE VIRTIO_MEM_MIN_BLOCK_SIZE +#endif + +/* + * We want to have a reasonable default block size such that + * 1. We avoid splitting THPs when unplugging memory, which degrades + * performance. + * 2. We avoid placing THPs for plugged blocks that also cover unplugged + * blocks. + * + * The actual THP size might differ between Linux kernels, so we try to probe + * it. In the future (if we ever run into issues regarding 2.), we might want + * to disable THP in case we fail to properly probe the THP size, or if the + * block size is configured smaller than the THP size. + */ +static uint32_t thp_size; + +#define HPAGE_PMD_SIZE_PATH "/sys/kernel/mm/transparent_hugepage/hpage_pmd_size" +static uint32_t virtio_mem_thp_size(void) +{ + gchar *content = NULL; + const char *endptr; + uint64_t tmp; + + if (thp_size) { + return thp_size; + } + + /* + * Try to probe the actual THP size, fallback to (sane but eventually + * incorrect) default sizes. + */ + if (g_file_get_contents(HPAGE_PMD_SIZE_PATH, &content, NULL, NULL) && + !qemu_strtou64(content, &endptr, 0, &tmp) && + (!endptr || *endptr == '\n')) { + /* + * Sanity-check the value, if it's too big (e.g., aarch64 with 64k base + * pages) or weird, fallback to something smaller. + */ + if (!tmp || !is_power_of_2(tmp) || tmp > 16 * MiB) { + warn_report("Read unsupported THP size: %" PRIx64, tmp); + } else { + thp_size = tmp; + } + } + + if (!thp_size) { + thp_size = VIRTIO_MEM_DEFAULT_THP_SIZE; + warn_report("Could not detect THP size, falling back to %" PRIx64 + " MiB.", thp_size / MiB); + } + + g_free(content); + return thp_size; +} + +static uint64_t virtio_mem_default_block_size(RAMBlock *rb) +{ + const uint64_t page_size = qemu_ram_pagesize(rb); + + /* We can have hugetlbfs with a page size smaller than the THP size. */ + if (page_size == qemu_real_host_page_size) { + return MAX(page_size, virtio_mem_thp_size()); + } + return MAX(page_size, VIRTIO_MEM_MIN_BLOCK_SIZE); +} + /* * Size the usable region bigger than the requested size if possible. Esp. * Linux guests will only add (aligned) memory blocks in case they fully @@ -443,10 +516,23 @@ static void virtio_mem_device_realize(DeviceState *dev, Error **errp) rb = vmem->memdev->mr.ram_block; page_size = qemu_ram_pagesize(rb); + /* + * If the block size wasn't configured by the user, use a sane default. This + * allows using hugetlbfs backends of any page size without manual + * intervention. + */ + if (!vmem->block_size) { + vmem->block_size = virtio_mem_default_block_size(rb); + } + if (vmem->block_size < page_size) { error_setg(errp, "'%s' property has to be at least the page size (0x%" PRIx64 ")", VIRTIO_MEM_BLOCK_SIZE_PROP, page_size); return; + } else if (vmem->block_size < virtio_mem_default_block_size(rb)) { + warn_report("'%s' property is smaller than the default block size (%" + PRIx64 " MiB)", VIRTIO_MEM_BLOCK_SIZE_PROP, + virtio_mem_default_block_size(rb) / MiB); } else if (!QEMU_IS_ALIGNED(vmem->requested_size, vmem->block_size)) { error_setg(errp, "'%s' property has to be multiples of '%s' (0x%" PRIx64 ")", VIRTIO_MEM_REQUESTED_SIZE_PROP, @@ -742,6 +828,18 @@ static void virtio_mem_get_block_size(Object *obj, Visitor *v, const char *name, const VirtIOMEM *vmem = VIRTIO_MEM(obj); uint64_t value = vmem->block_size; + /* + * If not configured by the user (and we're not realized yet), use the + * default block size we would use with the current memory backend. + */ + if (!value) { + if (vmem->memdev && memory_region_is_ram(&vmem->memdev->mr)) { + value = virtio_mem_default_block_size(vmem->memdev->mr.ram_block); + } else { + value = virtio_mem_thp_size(); + } + } + visit_type_size(v, name, &value, errp); } @@ -821,7 +919,6 @@ static void virtio_mem_instance_init(Object *obj) { VirtIOMEM *vmem = VIRTIO_MEM(obj); - vmem->block_size = VIRTIO_MEM_MIN_BLOCK_SIZE; notifier_list_init(&vmem->size_change_notifiers); vmem->precopy_notifier.notify = virtio_mem_precopy_notify; From 780a4d24e73dd0a7c7fc3f6f8b104aab70b7bfff Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 8 Oct 2020 10:30:27 +0200 Subject: [PATCH 05/31] memory-device: Support big alignment requirements Let's warn instead of bailing out - the worst thing that can happen is that we'll fail hot/coldplug later. The user got warned, and this should be rare. This will be necessary for memory devices with rather big (user-defined) alignment requirements - say a virtio-mem device with a 2G block size - which will become important, for example, when supporting vfio in the future. Reviewed-by: Pankaj Gupta Cc: "Michael S. Tsirkin" Cc: Wei Yang Cc: Dr. David Alan Gilbert Cc: Igor Mammedov Cc: Pankaj Gupta Signed-off-by: David Hildenbrand Message-Id: <20201008083029.9504-5-david@redhat.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/mem/memory-device.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/hw/mem/memory-device.c b/hw/mem/memory-device.c index 4bc9cf0917..8a736f1a26 100644 --- a/hw/mem/memory-device.c +++ b/hw/mem/memory-device.c @@ -119,9 +119,10 @@ static uint64_t memory_device_get_free_addr(MachineState *ms, /* start of address space indicates the maximum alignment we expect */ if (!QEMU_IS_ALIGNED(range_lob(&as), align)) { - error_setg(errp, "the alignment (0x%" PRIx64 ") is not supported", - align); - return 0; + warn_report("the alignment (0x%" PRIx64 ") exceeds the expected" + " maximum alignment, memory will get fragmented and not" + " all 'maxmem' might be usable for memory devices.", + align); } memory_device_check_addable(ms, size, &err); @@ -151,7 +152,7 @@ static uint64_t memory_device_get_free_addr(MachineState *ms, return 0; } } else { - if (range_init(&new, range_lob(&as), size)) { + if (range_init(&new, QEMU_ALIGN_UP(range_lob(&as), align), size)) { error_setg(errp, "can't add memory device, device too big"); return 0; } From c726aa69419ba2ecd38ae14dc62aaa189c3510e5 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 8 Oct 2020 10:30:28 +0200 Subject: [PATCH 06/31] memory-device: Add get_min_alignment() callback Add a callback that can be used to express additional alignment requirements (exceeding the ones from the memory region). Will be used by virtio-mem to express special alignment requirements due to manually configured, big block sizes (e.g., 1GB with an ordinary memory-backend-ram). This avoids failing later when realizing, because auto-detection wasn't able to assign a properly aligned address. Reviewed-by: Pankaj Gupta Cc: "Michael S. Tsirkin" Cc: Wei Yang Cc: Dr. David Alan Gilbert Cc: Igor Mammedov Cc: Pankaj Gupta Signed-off-by: David Hildenbrand Message-Id: <20201008083029.9504-6-david@redhat.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/mem/memory-device.c | 11 +++++++++-- include/hw/mem/memory-device.h | 10 ++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/hw/mem/memory-device.c b/hw/mem/memory-device.c index 8a736f1a26..cf0627fd01 100644 --- a/hw/mem/memory-device.c +++ b/hw/mem/memory-device.c @@ -259,7 +259,7 @@ void memory_device_pre_plug(MemoryDeviceState *md, MachineState *ms, { const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md); Error *local_err = NULL; - uint64_t addr, align; + uint64_t addr, align = 0; MemoryRegion *mr; mr = mdc->get_memory_region(md, &local_err); @@ -267,7 +267,14 @@ void memory_device_pre_plug(MemoryDeviceState *md, MachineState *ms, goto out; } - align = legacy_align ? *legacy_align : memory_region_get_alignment(mr); + if (legacy_align) { + align = *legacy_align; + } else { + if (mdc->get_min_alignment) { + align = mdc->get_min_alignment(md); + } + align = MAX(align, memory_region_get_alignment(mr)); + } addr = mdc->get_addr(md); addr = memory_device_get_free_addr(ms, !addr ? NULL : &addr, align, memory_region_size(mr), &local_err); diff --git a/include/hw/mem/memory-device.h b/include/hw/mem/memory-device.h index 30d7e99f52..48d2611fc5 100644 --- a/include/hw/mem/memory-device.h +++ b/include/hw/mem/memory-device.h @@ -88,6 +88,16 @@ struct MemoryDeviceClass { */ MemoryRegion *(*get_memory_region)(MemoryDeviceState *md, Error **errp); + /* + * Optional: Return the desired minimum alignment of the device in guest + * physical address space. The final alignment is computed based on this + * alignment and the alignment requirements of the memory region. + * + * Called when plugging the memory device to detect the required alignment + * during address assignment. + */ + uint64_t (*get_min_alignment)(const MemoryDeviceState *md); + /* * Translate the memory device into #MemoryDeviceInfo. */ From 296e88fd9f407c1ca5d749b921e12407e7f9da1d Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 8 Oct 2020 10:30:29 +0200 Subject: [PATCH 07/31] virito-mem: Implement get_min_alignment() The block size determines the alignment requirements. Implement get_min_alignment() of the TYPE_MEMORY_DEVICE interface. This allows auto-assignment of a properly aligned address in guest physical address space. For example, when specifying a 2GB block size for a virtio-mem device with 10GB with a memory setup "-m 4G, 20G", we'll no longer fail when realizing. Reviewed-by: Pankaj Gupta Cc: "Michael S. Tsirkin" Cc: Wei Yang Cc: Dr. David Alan Gilbert Cc: Igor Mammedov Cc: Pankaj Gupta Signed-off-by: David Hildenbrand Message-Id: <20201008083029.9504-7-david@redhat.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/virtio/virtio-mem-pci.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/hw/virtio/virtio-mem-pci.c b/hw/virtio/virtio-mem-pci.c index 913f4a3326..fa5395cd88 100644 --- a/hw/virtio/virtio-mem-pci.c +++ b/hw/virtio/virtio-mem-pci.c @@ -76,6 +76,12 @@ static void virtio_mem_pci_fill_device_info(const MemoryDeviceState *md, info->type = MEMORY_DEVICE_INFO_KIND_VIRTIO_MEM; } +static uint64_t virtio_mem_pci_get_min_alignment(const MemoryDeviceState *md) +{ + return object_property_get_uint(OBJECT(md), VIRTIO_MEM_BLOCK_SIZE_PROP, + &error_abort); +} + static void virtio_mem_pci_size_change_notify(Notifier *notifier, void *data) { VirtIOMEMPCI *pci_mem = container_of(notifier, VirtIOMEMPCI, @@ -110,6 +116,7 @@ static void virtio_mem_pci_class_init(ObjectClass *klass, void *data) mdc->get_plugged_size = virtio_mem_pci_get_plugged_size; mdc->get_memory_region = virtio_mem_pci_get_memory_region; mdc->fill_device_info = virtio_mem_pci_fill_device_info; + mdc->get_min_alignment = virtio_mem_pci_get_min_alignment; } static void virtio_mem_pci_instance_init(Object *obj) From 88eed1989619b4059d225c593f6c59860e0271c8 Mon Sep 17 00:00:00 2001 From: Xinhao Zhang Date: Tue, 3 Nov 2020 18:26:32 +0800 Subject: [PATCH 08/31] hw/acpi : Don't use '#' flag of printf format Fix code style. Don't use '#' flag of printf format ('%#') in format strings, use '0x' prefix instead Signed-off-by: Xinhao Zhang Signed-off-by: Kai Deng Message-Id: <20201103102634.273021-1-zhangxinhao1@huawei.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/acpi/nvdimm.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c index 8f7cc16add..8ad5516142 100644 --- a/hw/acpi/nvdimm.c +++ b/hw/acpi/nvdimm.c @@ -556,7 +556,7 @@ static void nvdimm_dsm_func_read_fit(NVDIMMState *state, NvdimmDsmIn *in, fit = fit_buf->fit; - nvdimm_debug("Read FIT: offset %#x FIT size %#x Dirty %s.\n", + nvdimm_debug("Read FIT: offset 0x%x FIT size 0x%x Dirty %s.\n", read_fit->offset, fit->len, fit_buf->dirty ? "Yes" : "No"); if (read_fit->offset > fit->len) { @@ -664,7 +664,7 @@ static void nvdimm_dsm_label_size(NVDIMMDevice *nvdimm, hwaddr dsm_mem_addr) label_size = nvdimm->label_size; mxfer = nvdimm_get_max_xfer_label_size(); - nvdimm_debug("label_size %#x, max_xfer %#x.\n", label_size, mxfer); + nvdimm_debug("label_size 0x%x, max_xfer 0x%x.\n", label_size, mxfer); label_size_out.func_ret_status = cpu_to_le32(NVDIMM_DSM_RET_STATUS_SUCCESS); label_size_out.label_size = cpu_to_le32(label_size); @@ -680,19 +680,19 @@ static uint32_t nvdimm_rw_label_data_check(NVDIMMDevice *nvdimm, uint32_t ret = NVDIMM_DSM_RET_STATUS_INVALID; if (offset + length < offset) { - nvdimm_debug("offset %#x + length %#x is overflow.\n", offset, + nvdimm_debug("offset 0x%x + length 0x%x is overflow.\n", offset, length); return ret; } if (nvdimm->label_size < offset + length) { - nvdimm_debug("position %#x is beyond label data (len = %" PRIx64 ").\n", + nvdimm_debug("position 0x%x is beyond label data (len = %" PRIx64 ").\n", offset + length, nvdimm->label_size); return ret; } if (length > nvdimm_get_max_xfer_label_size()) { - nvdimm_debug("length (%#x) is larger than max_xfer (%#x).\n", + nvdimm_debug("length (0x%x) is larger than max_xfer (0x%x).\n", length, nvdimm_get_max_xfer_label_size()); return ret; } @@ -716,7 +716,7 @@ static void nvdimm_dsm_get_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in, get_label_data->offset = le32_to_cpu(get_label_data->offset); get_label_data->length = le32_to_cpu(get_label_data->length); - nvdimm_debug("Read Label Data: offset %#x length %#x.\n", + nvdimm_debug("Read Label Data: offset 0x%x length 0x%x.\n", get_label_data->offset, get_label_data->length); status = nvdimm_rw_label_data_check(nvdimm, get_label_data->offset, @@ -755,7 +755,7 @@ static void nvdimm_dsm_set_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in, set_label_data->offset = le32_to_cpu(set_label_data->offset); set_label_data->length = le32_to_cpu(set_label_data->length); - nvdimm_debug("Write Label Data: offset %#x length %#x.\n", + nvdimm_debug("Write Label Data: offset 0x%x length 0x%x.\n", set_label_data->offset, set_label_data->length); status = nvdimm_rw_label_data_check(nvdimm, set_label_data->offset, @@ -838,7 +838,7 @@ nvdimm_dsm_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) NvdimmDsmIn *in; hwaddr dsm_mem_addr = val; - nvdimm_debug("dsm memory address %#" HWADDR_PRIx ".\n", dsm_mem_addr); + nvdimm_debug("dsm memory address 0x%" HWADDR_PRIx ".\n", dsm_mem_addr); /* * The DSM memory is mapped to guest address space so an evil guest @@ -852,11 +852,11 @@ nvdimm_dsm_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) in->function = le32_to_cpu(in->function); in->handle = le32_to_cpu(in->handle); - nvdimm_debug("Revision %#x Handler %#x Function %#x.\n", in->revision, + nvdimm_debug("Revision 0x%x Handler 0x%x Function 0x%x.\n", in->revision, in->handle, in->function); if (in->revision != 0x1 /* Currently we only support DSM Spec Rev1. */) { - nvdimm_debug("Revision %#x is not supported, expect %#x.\n", + nvdimm_debug("Revision 0x%x is not supported, expect 0x%x.\n", in->revision, 0x1); nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr); goto exit; From 4cbf31a8fef91648995f3f576f7ede661b3e0c18 Mon Sep 17 00:00:00 2001 From: Xinhao Zhang Date: Tue, 3 Nov 2020 18:26:33 +0800 Subject: [PATCH 09/31] hw/acpi : add space before the open parenthesis '(' Fix code style. Space required before the open parenthesis '('. Signed-off-by: Xinhao Zhang Signed-off-by: Kai Deng Message-Id: <20201103102634.273021-2-zhangxinhao1@huawei.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/acpi/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hw/acpi/core.c b/hw/acpi/core.c index ade9158cbf..2c0c83221f 100644 --- a/hw/acpi/core.c +++ b/hw/acpi/core.c @@ -558,7 +558,7 @@ static void acpi_pm1_cnt_write(ACPIREGS *ar, uint16_t val) if (val & ACPI_BITMASK_SLEEP_ENABLE) { /* change suspend type */ uint16_t sus_typ = (val >> 10) & 7; - switch(sus_typ) { + switch (sus_typ) { case 0: /* soft power off */ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); break; From 510feed79a364521df41508ced6498ebf30728cd Mon Sep 17 00:00:00 2001 From: Xinhao Zhang Date: Tue, 3 Nov 2020 18:26:34 +0800 Subject: [PATCH 10/31] hw/acpi : add spaces around operator Fix code style. Operator needs spaces both sides. Signed-off-by: Xinhao Zhang Signed-off-by: Kai Deng Message-Id: <20201103102634.273021-3-zhangxinhao1@huawei.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/acpi/pcihp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hw/acpi/pcihp.c b/hw/acpi/pcihp.c index 32ae8b2c0a..17c32e0ffd 100644 --- a/hw/acpi/pcihp.c +++ b/hw/acpi/pcihp.c @@ -400,7 +400,7 @@ void acpi_pcihp_init(Object *owner, AcpiPciHpState *s, PCIBus *root_bus, s->io_len = ACPI_PCIHP_SIZE; s->io_base = ACPI_PCIHP_ADDR; - s->root= root_bus; + s->root = root_bus; s->legacy_piix = !bridges_enabled; memory_region_init_io(&s->io, owner, &acpi_pcihp_io_ops, s, From 8faf2f1de40e2fb919d8f584478d407a2fa7c80a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Tue, 3 Nov 2020 07:35:41 +0100 Subject: [PATCH 11/31] hw/virtio/vhost-backend: Fix Coverity CID 1432871 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix uninitialized value issues reported by Coverity: Field 'msg.reserved' is uninitialized when calling write(). While the 'struct vhost_msg' does not have a 'reserved' field, we still initialize it to have the two parts of the function consistent. Reported-by: Coverity (CID 1432864: UNINIT) Fixes: c471ad0e9bd ("vhost_net: device IOTLB support") Reviewed-by: Peter Maydell Signed-off-by: Philippe Mathieu-Daudé Message-Id: <20201103063541.2463363-1-philmd@redhat.com> Reviewed-by: Stefano Garzarella Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/virtio/vhost-backend.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c index 88c8ecc9e0..222bbcc62d 100644 --- a/hw/virtio/vhost-backend.c +++ b/hw/virtio/vhost-backend.c @@ -257,7 +257,7 @@ static int vhost_kernel_send_device_iotlb_msg(struct vhost_dev *dev, struct vhost_iotlb_msg *imsg) { if (dev->backend_cap & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)) { - struct vhost_msg_v2 msg; + struct vhost_msg_v2 msg = {}; msg.type = VHOST_IOTLB_MSG_V2; msg.iotlb = *imsg; @@ -267,7 +267,7 @@ static int vhost_kernel_send_device_iotlb_msg(struct vhost_dev *dev, return -EFAULT; } } else { - struct vhost_msg msg; + struct vhost_msg msg = {}; msg.type = VHOST_IOTLB_MSG; msg.iotlb = *imsg; From 8055d2fb7f4717c6aa7c4ca801a487153945a15d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Fri, 30 Oct 2020 16:27:42 +0100 Subject: [PATCH 12/31] hw/smbios: Fix leaked fd in save_opt_one() error path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the following Coverity issue (RESOURCE_LEAK): CID 1432879: Resource leak Handle variable fd going out of scope leaks the handle. Replace a close() call by qemu_close() since the handle is opened with qemu_open(). Fixes: bb99f4772f5 ("hw/smbios: support loading OEM strings values from a file") Signed-off-by: Philippe Mathieu-Daudé Message-Id: <20201030152742.1553968-1-philmd@redhat.com> Reviewed-by: Stefano Garzarella Reviewed-by: Laszlo Ersek Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/smbios/smbios.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hw/smbios/smbios.c b/hw/smbios/smbios.c index 8b30906e50..6a3d39793b 100644 --- a/hw/smbios/smbios.c +++ b/hw/smbios/smbios.c @@ -988,16 +988,18 @@ static int save_opt_one(void *opaque, if (ret < 0) { error_setg(errp, "Unable to read from %s: %s", value, strerror(errno)); + qemu_close(fd); return -1; } if (memchr(buf, '\0', ret)) { error_setg(errp, "NUL in OEM strings value in %s", value); + qemu_close(fd); return -1; } g_byte_array_append(data, (guint8 *)buf, ret); } - close(fd); + qemu_close(fd); *opt->dest = g_renew(char *, *opt->dest, (*opt->ndest) + 1); (*opt->dest)[*opt->ndest] = (char *)g_byte_array_free(data, FALSE); From bfe7a961737452ae8e616df758406e86ac289972 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Fri, 30 Oct 2020 19:05:01 +0100 Subject: [PATCH 13/31] virtio-iommu: Fix virtio_iommu_mr() Due to an invalid mask, virtio_iommu_mr() may return the wrong memory region. It hasn't been too problematic so far because the function was only used to test existence of an endpoint, but that is about to change. Fixes: cfb42188b24d ("virtio-iommu: Implement attach/detach command") Cc: QEMU Stable Acked-by: Eric Auger Reviewed-by: Peter Xu Signed-off-by: Jean-Philippe Brucker Message-Id: <20201030180510.747225-2-jean-philippe@linaro.org> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/virtio/virtio-iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c index 21ec63b108..4c8f3909b7 100644 --- a/hw/virtio/virtio-iommu.c +++ b/hw/virtio/virtio-iommu.c @@ -101,7 +101,7 @@ static IOMMUMemoryRegion *virtio_iommu_mr(VirtIOIOMMU *s, uint32_t sid) bus_n = PCI_BUS_NUM(sid); iommu_pci_bus = iommu_find_iommu_pcibus(s, bus_n); if (iommu_pci_bus) { - devfn = sid & PCI_DEVFN_MAX; + devfn = sid & (PCI_DEVFN_MAX - 1); dev = iommu_pci_bus->pbdev[devfn]; if (dev) { return &dev->iommu_mr; From 31aa323fb97bae3786f5bce5a88668f76fdb0cec Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Fri, 30 Oct 2020 19:05:02 +0100 Subject: [PATCH 14/31] virtio-iommu: Store memory region in endpoint struct Store the memory region associated to each endpoint into the endpoint structure, to allow efficient memory notification on map/unmap. Acked-by: Eric Auger Signed-off-by: Jean-Philippe Brucker Message-Id: <20201030180510.747225-3-jean-philippe@linaro.org> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/virtio/virtio-iommu.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c index 4c8f3909b7..a5c2d69aad 100644 --- a/hw/virtio/virtio-iommu.c +++ b/hw/virtio/virtio-iommu.c @@ -49,6 +49,7 @@ typedef struct VirtIOIOMMUDomain { typedef struct VirtIOIOMMUEndpoint { uint32_t id; VirtIOIOMMUDomain *domain; + IOMMUMemoryRegion *iommu_mr; QLIST_ENTRY(VirtIOIOMMUEndpoint) next; } VirtIOIOMMUEndpoint; @@ -137,16 +138,19 @@ static VirtIOIOMMUEndpoint *virtio_iommu_get_endpoint(VirtIOIOMMU *s, uint32_t ep_id) { VirtIOIOMMUEndpoint *ep; + IOMMUMemoryRegion *mr; ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id)); if (ep) { return ep; } - if (!virtio_iommu_mr(s, ep_id)) { + mr = virtio_iommu_mr(s, ep_id); + if (!mr) { return NULL; } ep = g_malloc0(sizeof(*ep)); ep->id = ep_id; + ep->iommu_mr = mr; trace_virtio_iommu_get_endpoint(ep_id); g_tree_insert(s->endpoints, GUINT_TO_POINTER(ep_id), ep); return ep; @@ -910,9 +914,14 @@ static gboolean reconstruct_endpoints(gpointer key, gpointer value, VirtIOIOMMU *s = (VirtIOIOMMU *)data; VirtIOIOMMUDomain *d = (VirtIOIOMMUDomain *)value; VirtIOIOMMUEndpoint *iter; + IOMMUMemoryRegion *mr; QLIST_FOREACH(iter, &d->endpoint_list, next) { + mr = virtio_iommu_mr(s, iter->id); + assert(mr); + iter->domain = d; + iter->iommu_mr = mr; g_tree_insert(s->endpoints, GUINT_TO_POINTER(iter->id), iter); } return false; /* continue the domain traversal */ From 15e4c8f01b7f06b9dde13bc13949c834b25160f3 Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Fri, 30 Oct 2020 19:05:03 +0100 Subject: [PATCH 15/31] virtio-iommu: Add memory notifiers for map/unmap Extend VIRTIO_IOMMU_T_MAP/UNMAP request to notify memory listeners. It will call VFIO notifier to map/unmap regions in the physical IOMMU. Signed-off-by: Bharat Bhushan Signed-off-by: Eric Auger Signed-off-by: Jean-Philippe Brucker Message-Id: <20201030180510.747225-4-jean-philippe@linaro.org> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/virtio/trace-events | 2 ++ hw/virtio/virtio-iommu.c | 56 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+) diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events index cf1e59de30..b87a397406 100644 --- a/hw/virtio/trace-events +++ b/hw/virtio/trace-events @@ -106,6 +106,8 @@ virtio_iommu_put_domain(uint32_t domain_id) "Free domain=%d" virtio_iommu_translate_out(uint64_t virt_addr, uint64_t phys_addr, uint32_t sid) "0x%"PRIx64" -> 0x%"PRIx64 " for sid=%d" virtio_iommu_report_fault(uint8_t reason, uint32_t flags, uint32_t endpoint, uint64_t addr) "FAULT reason=%d flags=%d endpoint=%d address =0x%"PRIx64 virtio_iommu_fill_resv_property(uint32_t devid, uint8_t subtype, uint64_t start, uint64_t end) "dev= %d, type=%d start=0x%"PRIx64" end=0x%"PRIx64 +virtio_iommu_notify_map(const char *name, uint64_t virt_start, uint64_t virt_end, uint64_t phys_start, uint32_t flags) "mr=%s virt_start=0x%"PRIx64" virt_end=0x%"PRIx64" phys_start=0x%"PRIx64" flags=%d" +virtio_iommu_notify_unmap(const char *name, uint64_t virt_start, uint64_t virt_end) "mr=%s virt_start=0x%"PRIx64" virt_end=0x%"PRIx64 # virtio-mem.c virtio_mem_send_response(uint16_t type) "type=%" PRIu16 diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c index a5c2d69aad..7dd15c5eac 100644 --- a/hw/virtio/virtio-iommu.c +++ b/hw/virtio/virtio-iommu.c @@ -125,6 +125,51 @@ static gint interval_cmp(gconstpointer a, gconstpointer b, gpointer user_data) } } +static void virtio_iommu_notify_map(IOMMUMemoryRegion *mr, hwaddr virt_start, + hwaddr virt_end, hwaddr paddr, + uint32_t flags) +{ + IOMMUTLBEntry entry; + IOMMUAccessFlags perm = IOMMU_ACCESS_FLAG(flags & VIRTIO_IOMMU_MAP_F_READ, + flags & VIRTIO_IOMMU_MAP_F_WRITE); + + if (!(mr->iommu_notify_flags & IOMMU_NOTIFIER_MAP) || + (flags & VIRTIO_IOMMU_MAP_F_MMIO) || !perm) { + return; + } + + trace_virtio_iommu_notify_map(mr->parent_obj.name, virt_start, virt_end, + paddr, perm); + + entry.target_as = &address_space_memory; + entry.addr_mask = virt_end - virt_start; + entry.iova = virt_start; + entry.perm = perm; + entry.translated_addr = paddr; + + memory_region_notify_iommu(mr, 0, entry); +} + +static void virtio_iommu_notify_unmap(IOMMUMemoryRegion *mr, hwaddr virt_start, + hwaddr virt_end) +{ + IOMMUTLBEntry entry; + + if (!(mr->iommu_notify_flags & IOMMU_NOTIFIER_UNMAP)) { + return; + } + + trace_virtio_iommu_notify_unmap(mr->parent_obj.name, virt_start, virt_end); + + entry.target_as = &address_space_memory; + entry.addr_mask = virt_end - virt_start; + entry.iova = virt_start; + entry.perm = IOMMU_NONE; + entry.translated_addr = 0; + + memory_region_notify_iommu(mr, 0, entry); +} + static void virtio_iommu_detach_endpoint_from_domain(VirtIOIOMMUEndpoint *ep) { if (!ep->domain) { @@ -315,6 +360,7 @@ static int virtio_iommu_map(VirtIOIOMMU *s, VirtIOIOMMUDomain *domain; VirtIOIOMMUInterval *interval; VirtIOIOMMUMapping *mapping; + VirtIOIOMMUEndpoint *ep; if (flags & ~VIRTIO_IOMMU_MAP_F_MASK) { return VIRTIO_IOMMU_S_INVAL; @@ -344,6 +390,11 @@ static int virtio_iommu_map(VirtIOIOMMU *s, g_tree_insert(domain->mappings, interval, mapping); + QLIST_FOREACH(ep, &domain->endpoint_list, next) { + virtio_iommu_notify_map(ep->iommu_mr, virt_start, virt_end, phys_start, + flags); + } + return VIRTIO_IOMMU_S_OK; } @@ -356,6 +407,7 @@ static int virtio_iommu_unmap(VirtIOIOMMU *s, VirtIOIOMMUMapping *iter_val; VirtIOIOMMUInterval interval, *iter_key; VirtIOIOMMUDomain *domain; + VirtIOIOMMUEndpoint *ep; int ret = VIRTIO_IOMMU_S_OK; trace_virtio_iommu_unmap(domain_id, virt_start, virt_end); @@ -373,6 +425,10 @@ static int virtio_iommu_unmap(VirtIOIOMMU *s, uint64_t current_high = iter_key->high; if (interval.low <= current_low && interval.high >= current_high) { + QLIST_FOREACH(ep, &domain->endpoint_list, next) { + virtio_iommu_notify_unmap(ep->iommu_mr, current_low, + current_high); + } g_tree_remove(domain->mappings, iter_key); trace_virtio_iommu_unmap_done(domain_id, current_low, current_high); } else { From 2f6eeb5f0bb1efea09510b9481e2ff82fe69b440 Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Fri, 30 Oct 2020 19:05:04 +0100 Subject: [PATCH 16/31] virtio-iommu: Call memory notifiers in attach/detach Call the memory notifiers when attaching an endpoint to a domain, to replay existing mappings, and when detaching the endpoint, to remove all mappings. Signed-off-by: Bharat Bhushan Signed-off-by: Jean-Philippe Brucker Message-Id: <20201030180510.747225-5-jean-philippe@linaro.org> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/virtio/virtio-iommu.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c index 7dd15c5eac..7b64892351 100644 --- a/hw/virtio/virtio-iommu.c +++ b/hw/virtio/virtio-iommu.c @@ -170,11 +170,39 @@ static void virtio_iommu_notify_unmap(IOMMUMemoryRegion *mr, hwaddr virt_start, memory_region_notify_iommu(mr, 0, entry); } +static gboolean virtio_iommu_notify_unmap_cb(gpointer key, gpointer value, + gpointer data) +{ + VirtIOIOMMUInterval *interval = (VirtIOIOMMUInterval *) key; + IOMMUMemoryRegion *mr = (IOMMUMemoryRegion *) data; + + virtio_iommu_notify_unmap(mr, interval->low, interval->high); + + return false; +} + +static gboolean virtio_iommu_notify_map_cb(gpointer key, gpointer value, + gpointer data) +{ + VirtIOIOMMUMapping *mapping = (VirtIOIOMMUMapping *) value; + VirtIOIOMMUInterval *interval = (VirtIOIOMMUInterval *) key; + IOMMUMemoryRegion *mr = (IOMMUMemoryRegion *) data; + + virtio_iommu_notify_map(mr, interval->low, interval->high, + mapping->phys_addr, mapping->flags); + + return false; +} + static void virtio_iommu_detach_endpoint_from_domain(VirtIOIOMMUEndpoint *ep) { + VirtIOIOMMUDomain *domain = ep->domain; + if (!ep->domain) { return; } + g_tree_foreach(domain->mappings, virtio_iommu_notify_unmap_cb, + ep->iommu_mr); QLIST_REMOVE(ep, next); ep->domain = NULL; } @@ -317,6 +345,10 @@ static int virtio_iommu_attach(VirtIOIOMMU *s, ep->domain = domain; + /* Replay domain mappings on the associated memory region */ + g_tree_foreach(domain->mappings, virtio_iommu_notify_map_cb, + ep->iommu_mr); + return VIRTIO_IOMMU_S_OK; } From 308e5e1b5f811aa28063006088ff276a63a034d3 Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Fri, 30 Oct 2020 19:05:05 +0100 Subject: [PATCH 17/31] virtio-iommu: Add replay() memory region callback Implement the replay callback to setup all mappings for a new memory region. Signed-off-by: Bharat Bhushan Signed-off-by: Jean-Philippe Brucker Message-Id: <20201030180510.747225-6-jean-philippe@linaro.org> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/virtio/trace-events | 1 + hw/virtio/virtio-iommu.c | 40 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events index b87a397406..ea3c3b25ad 100644 --- a/hw/virtio/trace-events +++ b/hw/virtio/trace-events @@ -108,6 +108,7 @@ virtio_iommu_report_fault(uint8_t reason, uint32_t flags, uint32_t endpoint, uin virtio_iommu_fill_resv_property(uint32_t devid, uint8_t subtype, uint64_t start, uint64_t end) "dev= %d, type=%d start=0x%"PRIx64" end=0x%"PRIx64 virtio_iommu_notify_map(const char *name, uint64_t virt_start, uint64_t virt_end, uint64_t phys_start, uint32_t flags) "mr=%s virt_start=0x%"PRIx64" virt_end=0x%"PRIx64" phys_start=0x%"PRIx64" flags=%d" virtio_iommu_notify_unmap(const char *name, uint64_t virt_start, uint64_t virt_end) "mr=%s virt_start=0x%"PRIx64" virt_end=0x%"PRIx64 +virtio_iommu_remap(const char *name, uint64_t virt_start, uint64_t virt_end, uint64_t phys_start) "mr=%s virt_start=0x%"PRIx64" virt_end=0x%"PRIx64" phys_start=0x%"PRIx64 # virtio-mem.c virtio_mem_send_response(uint16_t type) "type=%" PRIu16 diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c index 7b64892351..985257c88f 100644 --- a/hw/virtio/virtio-iommu.c +++ b/hw/virtio/virtio-iommu.c @@ -847,6 +847,45 @@ static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data) return (ua > ub) - (ua < ub); } +static gboolean virtio_iommu_remap(gpointer key, gpointer value, gpointer data) +{ + VirtIOIOMMUMapping *mapping = (VirtIOIOMMUMapping *) value; + VirtIOIOMMUInterval *interval = (VirtIOIOMMUInterval *) key; + IOMMUMemoryRegion *mr = (IOMMUMemoryRegion *) data; + + trace_virtio_iommu_remap(mr->parent_obj.name, interval->low, interval->high, + mapping->phys_addr); + virtio_iommu_notify_map(mr, interval->low, interval->high, + mapping->phys_addr, mapping->flags); + return false; +} + +static void virtio_iommu_replay(IOMMUMemoryRegion *mr, IOMMUNotifier *n) +{ + IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr); + VirtIOIOMMU *s = sdev->viommu; + uint32_t sid; + VirtIOIOMMUEndpoint *ep; + + sid = virtio_iommu_get_bdf(sdev); + + qemu_mutex_lock(&s->mutex); + + if (!s->endpoints) { + goto unlock; + } + + ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(sid)); + if (!ep || !ep->domain) { + goto unlock; + } + + g_tree_foreach(ep->domain->mappings, virtio_iommu_remap, mr); + +unlock: + qemu_mutex_unlock(&s->mutex); +} + static void virtio_iommu_device_realize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); @@ -1076,6 +1115,7 @@ static void virtio_iommu_memory_region_class_init(ObjectClass *klass, IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); imrc->translate = virtio_iommu_translate; + imrc->replay = virtio_iommu_replay; } static const TypeInfo virtio_iommu_info = { From 6978bfaa688df55dc9ea581fe32b226f81aebc3a Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Fri, 30 Oct 2020 19:05:06 +0100 Subject: [PATCH 18/31] virtio-iommu: Add notify_flag_changed() memory region callback Add notify_flag_changed() to notice when memory listeners are added and removed. Acked-by: Eric Auger Signed-off-by: Bharat Bhushan Signed-off-by: Jean-Philippe Brucker Message-Id: <20201030180510.747225-7-jean-philippe@linaro.org> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/virtio/trace-events | 2 ++ hw/virtio/virtio-iommu.c | 14 ++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events index ea3c3b25ad..982d0002a6 100644 --- a/hw/virtio/trace-events +++ b/hw/virtio/trace-events @@ -109,6 +109,8 @@ virtio_iommu_fill_resv_property(uint32_t devid, uint8_t subtype, uint64_t start, virtio_iommu_notify_map(const char *name, uint64_t virt_start, uint64_t virt_end, uint64_t phys_start, uint32_t flags) "mr=%s virt_start=0x%"PRIx64" virt_end=0x%"PRIx64" phys_start=0x%"PRIx64" flags=%d" virtio_iommu_notify_unmap(const char *name, uint64_t virt_start, uint64_t virt_end) "mr=%s virt_start=0x%"PRIx64" virt_end=0x%"PRIx64 virtio_iommu_remap(const char *name, uint64_t virt_start, uint64_t virt_end, uint64_t phys_start) "mr=%s virt_start=0x%"PRIx64" virt_end=0x%"PRIx64" phys_start=0x%"PRIx64 +virtio_iommu_notify_flag_add(const char *name) "add notifier to mr %s" +virtio_iommu_notify_flag_del(const char *name) "del notifier from mr %s" # virtio-mem.c virtio_mem_send_response(uint16_t type) "type=%" PRIu16 diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c index 985257c88f..78e07aa40a 100644 --- a/hw/virtio/virtio-iommu.c +++ b/hw/virtio/virtio-iommu.c @@ -886,6 +886,19 @@ unlock: qemu_mutex_unlock(&s->mutex); } +static int virtio_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu_mr, + IOMMUNotifierFlag old, + IOMMUNotifierFlag new, + Error **errp) +{ + if (old == IOMMU_NOTIFIER_NONE) { + trace_virtio_iommu_notify_flag_add(iommu_mr->parent_obj.name); + } else if (new == IOMMU_NOTIFIER_NONE) { + trace_virtio_iommu_notify_flag_del(iommu_mr->parent_obj.name); + } + return 0; +} + static void virtio_iommu_device_realize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); @@ -1116,6 +1129,7 @@ static void virtio_iommu_memory_region_class_init(ObjectClass *klass, imrc->translate = virtio_iommu_translate; imrc->replay = virtio_iommu_replay; + imrc->notify_flag_changed = virtio_iommu_notify_flag_changed; } static const TypeInfo virtio_iommu_info = { From 457f8cbbd80f631cee02057c3c844a43ca65b5c4 Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Fri, 30 Oct 2020 19:05:07 +0100 Subject: [PATCH 19/31] memory: Add interface to set iommu page size mask Allow to set the page size mask supported by an iommu memory region. This enables a vIOMMU to communicate the page size granule supported by an assigned device, on hosts that use page sizes greater than 4kB. Acked-by: Peter Xu Reviewed-by: Eric Auger Signed-off-by: Bharat Bhushan Signed-off-by: Jean-Philippe Brucker Message-Id: <20201030180510.747225-8-jean-philippe@linaro.org> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- include/exec/memory.h | 38 ++++++++++++++++++++++++++++++++++++++ softmmu/memory.c | 13 +++++++++++++ 2 files changed, 51 insertions(+) diff --git a/include/exec/memory.h b/include/exec/memory.h index aff6ef7605..0f3e6bcd5e 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -397,6 +397,32 @@ struct IOMMUMemoryRegionClass { * @iommu: the IOMMUMemoryRegion */ int (*num_indexes)(IOMMUMemoryRegion *iommu); + + /** + * @iommu_set_page_size_mask: + * + * Restrict the page size mask that can be supported with a given IOMMU + * memory region. Used for example to propagate host physical IOMMU page + * size mask limitations to the virtual IOMMU. + * + * Optional method: if this method is not provided, then the default global + * page mask is used. + * + * @iommu: the IOMMUMemoryRegion + * + * @page_size_mask: a bitmask of supported page sizes. At least one bit, + * representing the smallest page size, must be set. Additional set bits + * represent supported block sizes. For example a host physical IOMMU that + * uses page tables with a page size of 4kB, and supports 2MB and 4GB + * blocks, will set mask 0x40201000. A granule of 4kB with indiscriminate + * block sizes is specified with mask 0xfffffffffffff000. + * + * Returns 0 on success, or a negative error. In case of failure, the error + * object must be created. + */ + int (*iommu_set_page_size_mask)(IOMMUMemoryRegion *iommu, + uint64_t page_size_mask, + Error **errp); }; typedef struct CoalescedMemoryRange CoalescedMemoryRange; @@ -1409,6 +1435,18 @@ int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr, */ int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr); +/** + * memory_region_iommu_set_page_size_mask: set the supported page + * sizes for a given IOMMU memory region + * + * @iommu_mr: IOMMU memory region + * @page_size_mask: supported page size mask + * @errp: pointer to Error*, to store an error if it happens. + */ +int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion *iommu_mr, + uint64_t page_size_mask, + Error **errp); + /** * memory_region_name: get a memory region's name * diff --git a/softmmu/memory.c b/softmmu/memory.c index 21d533d8ed..71951fe4dc 100644 --- a/softmmu/memory.c +++ b/softmmu/memory.c @@ -1841,6 +1841,19 @@ static int memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr, return ret; } +int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion *iommu_mr, + uint64_t page_size_mask, + Error **errp) +{ + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); + int ret = 0; + + if (imrc->iommu_set_page_size_mask) { + ret = imrc->iommu_set_page_size_mask(iommu_mr, page_size_mask, errp); + } + return ret; +} + int memory_region_register_iommu_notifier(MemoryRegion *mr, IOMMUNotifier *n, Error **errp) { From b917749842493abdfa49f5265ea236c922c05cb2 Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Fri, 30 Oct 2020 19:05:08 +0100 Subject: [PATCH 20/31] vfio: Set IOMMU page size as per host supported page size Set IOMMU supported page size mask same as host Linux supported page size mask. Acked-by: Alex Williamson Reviewed-by: Eric Auger Signed-off-by: Bharat Bhushan Signed-off-by: Jean-Philippe Brucker Message-Id: <20201030180510.747225-9-jean-philippe@linaro.org> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/vfio/common.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/hw/vfio/common.c b/hw/vfio/common.c index e18ea2cf91..35895b18a6 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -789,6 +789,14 @@ static void vfio_listener_region_add(MemoryListener *listener, int128_get64(llend), iommu_idx); + ret = memory_region_iommu_set_page_size_mask(giommu->iommu, + container->pgsizes, + &err); + if (ret) { + g_free(giommu); + goto fail; + } + ret = memory_region_register_iommu_notifier(section->mr, &giommu->n, &err); if (ret) { From 5c3cfe33f4185841feaedd07bea1d6d7e02011a0 Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Fri, 30 Oct 2020 19:05:09 +0100 Subject: [PATCH 21/31] virtio-iommu: Set supported page size mask The virtio-iommu device can deal with arbitrary page sizes for virtual endpoints, but for endpoints assigned with VFIO it must follow the page granule used by the host IOMMU driver. Implement the interface to set the vIOMMU page size mask, called by VFIO for each endpoint. We assume that all host IOMMU drivers use the same page granule (the host page granule). Override the page_size_mask field in the virtio config space. Signed-off-by: Bharat Bhushan Signed-off-by: Jean-Philippe Brucker Message-Id: <20201030180510.747225-10-jean-philippe@linaro.org> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/virtio/trace-events | 1 + hw/virtio/virtio-iommu.c | 50 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+) diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events index 982d0002a6..2060a144a2 100644 --- a/hw/virtio/trace-events +++ b/hw/virtio/trace-events @@ -109,6 +109,7 @@ virtio_iommu_fill_resv_property(uint32_t devid, uint8_t subtype, uint64_t start, virtio_iommu_notify_map(const char *name, uint64_t virt_start, uint64_t virt_end, uint64_t phys_start, uint32_t flags) "mr=%s virt_start=0x%"PRIx64" virt_end=0x%"PRIx64" phys_start=0x%"PRIx64" flags=%d" virtio_iommu_notify_unmap(const char *name, uint64_t virt_start, uint64_t virt_end) "mr=%s virt_start=0x%"PRIx64" virt_end=0x%"PRIx64 virtio_iommu_remap(const char *name, uint64_t virt_start, uint64_t virt_end, uint64_t phys_start) "mr=%s virt_start=0x%"PRIx64" virt_end=0x%"PRIx64" phys_start=0x%"PRIx64 +virtio_iommu_set_page_size_mask(const char *name, uint64_t old, uint64_t new) "mr=%s old_mask=0x%"PRIx64" new_mask=0x%"PRIx64 virtio_iommu_notify_flag_add(const char *name) "add notifier to mr %s" virtio_iommu_notify_flag_del(const char *name) "del notifier from mr %s" diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c index 78e07aa40a..fc5c75d693 100644 --- a/hw/virtio/virtio-iommu.c +++ b/hw/virtio/virtio-iommu.c @@ -899,6 +899,55 @@ static int virtio_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu_mr, return 0; } +/* + * The default mask (TARGET_PAGE_MASK) is the smallest supported guest granule, + * for example 0xfffffffffffff000. When an assigned device has page size + * restrictions due to the hardware IOMMU configuration, apply this restriction + * to the mask. + */ +static int virtio_iommu_set_page_size_mask(IOMMUMemoryRegion *mr, + uint64_t new_mask, + Error **errp) +{ + IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr); + VirtIOIOMMU *s = sdev->viommu; + uint64_t cur_mask = s->config.page_size_mask; + + trace_virtio_iommu_set_page_size_mask(mr->parent_obj.name, cur_mask, + new_mask); + + if ((cur_mask & new_mask) == 0) { + error_setg(errp, "virtio-iommu page mask 0x%"PRIx64 + " is incompatible with mask 0x%"PRIx64, cur_mask, new_mask); + return -1; + } + + /* + * After the machine is finalized, we can't change the mask anymore. If by + * chance the hotplugged device supports the same granule, we can still + * accept it. Having a different masks is possible but the guest will use + * sub-optimal block sizes, so warn about it. + */ + if (qdev_hotplug) { + int new_granule = ctz64(new_mask); + int cur_granule = ctz64(cur_mask); + + if (new_granule != cur_granule) { + error_setg(errp, "virtio-iommu page mask 0x%"PRIx64 + " is incompatible with mask 0x%"PRIx64, cur_mask, + new_mask); + return -1; + } else if (new_mask != cur_mask) { + warn_report("virtio-iommu page mask 0x%"PRIx64 + " does not match 0x%"PRIx64, cur_mask, new_mask); + } + return 0; + } + + s->config.page_size_mask &= new_mask; + return 0; +} + static void virtio_iommu_device_realize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); @@ -1130,6 +1179,7 @@ static void virtio_iommu_memory_region_class_init(ObjectClass *klass, imrc->translate = virtio_iommu_translate; imrc->replay = virtio_iommu_replay; imrc->notify_flag_changed = virtio_iommu_notify_flag_changed; + imrc->iommu_set_page_size_mask = virtio_iommu_set_page_size_mask; } static const TypeInfo virtio_iommu_info = { From 1b296c3def4b9b63d2fdbce6646edd108a3e616c Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Fri, 30 Oct 2020 19:05:10 +0100 Subject: [PATCH 22/31] vfio: Don't issue full 2^64 unmap IOMMUs may declare memory regions spanning from 0 to UINT64_MAX. When attempting to deal with such region, vfio_listener_region_del() passes a size of 2^64 to int128_get64() which throws an assertion failure. Even ignoring this, the VFIO_IOMMU_DMA_MAP ioctl cannot handle this size since the size field is 64-bit. Split the request in two. Acked-by: Alex Williamson Reviewed-by: Eric Auger Signed-off-by: Jean-Philippe Brucker Message-Id: <20201030180510.747225-11-jean-philippe@linaro.org> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/vfio/common.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/hw/vfio/common.c b/hw/vfio/common.c index 35895b18a6..c1fdbf17f2 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -950,6 +950,17 @@ static void vfio_listener_region_del(MemoryListener *listener, } if (try_unmap) { + if (int128_eq(llsize, int128_2_64())) { + /* The unmap ioctl doesn't accept a full 64-bit span. */ + llsize = int128_rshift(llsize, 1); + ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL); + if (ret) { + error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " + "0x%"HWADDR_PRIx") = %d (%m)", + container, iova, int128_get64(llsize), ret); + } + iova += int128_get64(llsize); + } ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL); if (ret) { error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " From 57b3a7d81bd7b5fb846ab5c05597a853259a1853 Mon Sep 17 00:00:00 2001 From: Cindy Lu Date: Fri, 16 Oct 2020 11:09:08 +0800 Subject: [PATCH 23/31] vhost-vdpa: Add qemu_close in vhost_vdpa_cleanup fix the bug that fd will still open after the cleanup Signed-off-by: Cindy Lu Message-Id: <20201016030909.9522-1-lulu@redhat.com> Acked-by: Jason Wang Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- net/vhost-vdpa.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index 99c476db8c..fe659ec9e2 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -145,6 +145,10 @@ static void vhost_vdpa_cleanup(NetClientState *nc) g_free(s->vhost_net); s->vhost_net = NULL; } + if (s->vhost_vdpa.device_fd >= 0) { + qemu_close(s->vhost_vdpa.device_fd); + s->vhost_vdpa.device_fd = -1; + } } static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc) From 1bc211a166be2c98f98852124b6fdb61e0b0be32 Mon Sep 17 00:00:00 2001 From: Cindy Lu Date: Fri, 16 Oct 2020 11:09:09 +0800 Subject: [PATCH 24/31] net: Add vhost-vdpa in show_netdevs() Fix the bug that while Check qemu supported netdev, there is no vhost-vdpa Signed-off-by: Cindy Lu Message-Id: <20201016030909.9522-2-lulu@redhat.com> Acked-by: Jason Wang Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- net/net.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/net.c b/net/net.c index 7a2a0fb5ac..794c652282 100644 --- a/net/net.c +++ b/net/net.c @@ -1049,6 +1049,9 @@ static void show_netdevs(void) #endif #ifdef CONFIG_POSIX "vhost-user", +#endif +#ifdef CONFIG_VHOST_VDPA + "vhost-vdpa", #endif }; From b7c1bd9d78480481455678602c9a8505cc8adadd Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Mon, 2 Nov 2020 16:57:09 +0000 Subject: [PATCH 25/31] Revert "vhost-blk: set features before setting inflight feature" This reverts commit adb29c027341ba095a3ef4beef6aaef86d3a520e. The commit broke -device vhost-user-blk-pci because the vhost_dev_prepare_inflight() function it introduced segfaults in vhost_dev_set_features() when attempting to access struct vhost_dev's vdev pointer before it has been assigned. To reproduce the segfault simply launch a vhost-user-blk device with the contrib vhost-user-blk device backend: $ build/contrib/vhost-user-blk/vhost-user-blk -s /tmp/vhost-user-blk.sock -r -b /var/tmp/foo.img $ build/qemu-system-x86_64 \ -device vhost-user-blk-pci,id=drv0,chardev=char1,addr=4.0 \ -object memory-backend-memfd,id=mem,size=1G,share=on \ -M memory-backend=mem,accel=kvm \ -chardev socket,id=char1,path=/tmp/vhost-user-blk.sock Segmentation fault (core dumped) Cc: Jin Yu Cc: Raphael Norwitz Cc: Michael S. Tsirkin Signed-off-by: Stefan Hajnoczi Message-Id: <20201102165709.232180-1-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/block/vhost-user-blk.c | 6 ------ hw/virtio/vhost.c | 18 ------------------ include/hw/virtio/vhost.h | 1 - 3 files changed, 25 deletions(-) diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c index f67b29bbf3..a076b1e54d 100644 --- a/hw/block/vhost-user-blk.c +++ b/hw/block/vhost-user-blk.c @@ -131,12 +131,6 @@ static int vhost_user_blk_start(VirtIODevice *vdev) s->dev.acked_features = vdev->guest_features; - ret = vhost_dev_prepare_inflight(&s->dev); - if (ret < 0) { - error_report("Error set inflight format: %d", -ret); - goto err_guest_notifiers; - } - if (!s->inflight->addr) { ret = vhost_dev_get_inflight(&s->dev, s->queue_size, s->inflight); if (ret < 0) { diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index f2482378c6..79b2be20df 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -1645,24 +1645,6 @@ int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f) return 0; } -int vhost_dev_prepare_inflight(struct vhost_dev *hdev) -{ - int r; - - if (hdev->vhost_ops->vhost_get_inflight_fd == NULL || - hdev->vhost_ops->vhost_set_inflight_fd == NULL) { - return 0; - } - - r = vhost_dev_set_features(hdev, hdev->log_enabled); - if (r < 0) { - VHOST_OPS_DEBUG("vhost_dev_prepare_inflight failed"); - return r; - } - - return 0; -} - int vhost_dev_set_inflight(struct vhost_dev *dev, struct vhost_inflight *inflight) { diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h index 839bfb153c..94585067f7 100644 --- a/include/hw/virtio/vhost.h +++ b/include/hw/virtio/vhost.h @@ -141,7 +141,6 @@ void vhost_dev_reset_inflight(struct vhost_inflight *inflight); void vhost_dev_free_inflight(struct vhost_inflight *inflight); void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f); int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f); -int vhost_dev_prepare_inflight(struct vhost_dev *hdev); int vhost_dev_set_inflight(struct vhost_dev *dev, struct vhost_inflight *inflight); int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size, From 1b0063b3048af65dfaae6422a572c87db8575a92 Mon Sep 17 00:00:00 2001 From: Jin Yu Date: Tue, 3 Nov 2020 20:36:17 +0800 Subject: [PATCH 26/31] vhost-blk: set features before setting inflight feature Virtqueue has split and packed, so before setting inflight, you need to inform the back-end virtqueue format. Signed-off-by: Jin Yu Acked-by: Raphael Norwitz Message-Id: <20201103123617.28256-1-jin.yu@intel.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/block/vhost-user-blk.c | 6 ++++++ hw/virtio/vhost.c | 20 ++++++++++++++++++++ include/hw/virtio/vhost.h | 1 + 3 files changed, 27 insertions(+) diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c index a076b1e54d..2dd3d93ca0 100644 --- a/hw/block/vhost-user-blk.c +++ b/hw/block/vhost-user-blk.c @@ -131,6 +131,12 @@ static int vhost_user_blk_start(VirtIODevice *vdev) s->dev.acked_features = vdev->guest_features; + ret = vhost_dev_prepare_inflight(&s->dev, vdev); + if (ret < 0) { + error_report("Error set inflight format: %d", -ret); + goto err_guest_notifiers; + } + if (!s->inflight->addr) { ret = vhost_dev_get_inflight(&s->dev, s->queue_size, s->inflight); if (ret < 0) { diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index 79b2be20df..614ccc2bcb 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -1645,6 +1645,26 @@ int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f) return 0; } +int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev) +{ + int r; + + if (hdev->vhost_ops->vhost_get_inflight_fd == NULL || + hdev->vhost_ops->vhost_set_inflight_fd == NULL) { + return 0; + } + + hdev->vdev = vdev; + + r = vhost_dev_set_features(hdev, hdev->log_enabled); + if (r < 0) { + VHOST_OPS_DEBUG("vhost_dev_prepare_inflight failed"); + return r; + } + + return 0; +} + int vhost_dev_set_inflight(struct vhost_dev *dev, struct vhost_inflight *inflight) { diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h index 94585067f7..4a8bc75415 100644 --- a/include/hw/virtio/vhost.h +++ b/include/hw/virtio/vhost.h @@ -141,6 +141,7 @@ void vhost_dev_reset_inflight(struct vhost_inflight *inflight); void vhost_dev_free_inflight(struct vhost_inflight *inflight); void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f); int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f); +int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev); int vhost_dev_set_inflight(struct vhost_dev *dev, struct vhost_inflight *inflight); int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size, From de65d4978571769eae98e4d757b23dcd03313ba2 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Tue, 27 Oct 2020 17:35:17 +0000 Subject: [PATCH 27/31] libvhost-user: follow QEMU comment style Signed-off-by: Stefan Hajnoczi Message-Id: <20201027173528.213464-2-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- contrib/libvhost-user/libvhost-user.h | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/contrib/libvhost-user/libvhost-user.h b/contrib/libvhost-user/libvhost-user.h index 3bbeae8587..a1539dbb69 100644 --- a/contrib/libvhost-user/libvhost-user.h +++ b/contrib/libvhost-user/libvhost-user.h @@ -392,7 +392,8 @@ struct VuDev { bool broken; uint16_t max_queues; - /* @read_msg: custom method to read vhost-user message + /* + * @read_msg: custom method to read vhost-user message * * Read data from vhost_user socket fd and fill up * the passed VhostUserMsg *vmsg struct. @@ -409,15 +410,19 @@ struct VuDev { * */ vu_read_msg_cb read_msg; - /* @set_watch: add or update the given fd to the watch set, - * call cb when condition is met */ + + /* + * @set_watch: add or update the given fd to the watch set, + * call cb when condition is met. + */ vu_set_watch_cb set_watch; /* @remove_watch: remove the given fd from the watch set */ vu_remove_watch_cb remove_watch; - /* @panic: encountered an unrecoverable error, you may try to - * re-initialize */ + /* + * @panic: encountered an unrecoverable error, you may try to re-initialize + */ vu_panic_cb panic; const VuDevIface *iface; From bc15e44cb2191bbb2318878acdf5038134e56394 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Tue, 27 Oct 2020 17:35:18 +0000 Subject: [PATCH 28/31] configure: introduce --enable-vhost-user-blk-server Make it possible to compile out the vhost-user-blk server. It is enabled by default on Linux. Note that vhost-user-server.c depends on libvhost-user, which requires CONFIG_LINUX. The CONFIG_VHOST_USER dependency was erroneous since that option controls vhost-user frontends (previously known as "master") and not device backends (previously known as "slave"). Signed-off-by: Stefan Hajnoczi Message-Id: <20201027173528.213464-3-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- block/export/export.c | 4 ++-- block/export/meson.build | 2 +- configure | 15 +++++++++++++++ util/meson.build | 2 +- 4 files changed, 19 insertions(+), 4 deletions(-) diff --git a/block/export/export.c b/block/export/export.c index c3478c6c97..bad6f21b1c 100644 --- a/block/export/export.c +++ b/block/export/export.c @@ -22,13 +22,13 @@ #include "qapi/qapi-commands-block-export.h" #include "qapi/qapi-events-block-export.h" #include "qemu/id.h" -#if defined(CONFIG_LINUX) && defined(CONFIG_VHOST_USER) +#ifdef CONFIG_VHOST_USER_BLK_SERVER #include "vhost-user-blk-server.h" #endif static const BlockExportDriver *blk_exp_drivers[] = { &blk_exp_nbd, -#if defined(CONFIG_LINUX) && defined(CONFIG_VHOST_USER) +#ifdef CONFIG_VHOST_USER_BLK_SERVER &blk_exp_vhost_user_blk, #endif }; diff --git a/block/export/meson.build b/block/export/meson.build index 9fb4fbf81d..19526435d8 100644 --- a/block/export/meson.build +++ b/block/export/meson.build @@ -1,2 +1,2 @@ blockdev_ss.add(files('export.c')) -blockdev_ss.add(when: ['CONFIG_LINUX', 'CONFIG_VHOST_USER'], if_true: files('vhost-user-blk-server.c')) +blockdev_ss.add(when: 'CONFIG_VHOST_USER_BLK_SERVER', if_true: files('vhost-user-blk-server.c')) diff --git a/configure b/configure index 2c3c69f118..b5e8f5f72c 100755 --- a/configure +++ b/configure @@ -329,6 +329,7 @@ vhost_crypto="" vhost_scsi="" vhost_vsock="" vhost_user="" +vhost_user_blk_server="" vhost_user_fs="" kvm="auto" hax="auto" @@ -1246,6 +1247,10 @@ for opt do ;; --enable-vhost-vsock) vhost_vsock="yes" ;; + --disable-vhost-user-blk-server) vhost_user_blk_server="no" + ;; + --enable-vhost-user-blk-server) vhost_user_blk_server="yes" + ;; --disable-vhost-user-fs) vhost_user_fs="no" ;; --enable-vhost-user-fs) vhost_user_fs="yes" @@ -1791,6 +1796,7 @@ disabled with --disable-FEATURE, default is enabled if available: vhost-crypto vhost-user-crypto backend support vhost-kernel vhost kernel backend support vhost-user vhost-user backend support + vhost-user-blk-server vhost-user-blk server support vhost-vdpa vhost-vdpa kernel backend support spice spice rbd rados block device (rbd) @@ -2382,6 +2388,12 @@ if test "$vhost_net" = ""; then test "$vhost_kernel" = "yes" && vhost_net=yes fi +# libvhost-user is Linux-only +test "$vhost_user_blk_server" = "" && vhost_user_blk_server=$linux +if test "$vhost_user_blk_server" = "yes" && test "$linux" = "no"; then + error_exit "--enable-vhost-user-blk-server is only available on Linux" +fi + ########################################## # pkg-config probe @@ -6275,6 +6287,9 @@ fi if test "$vhost_vdpa" = "yes" ; then echo "CONFIG_VHOST_VDPA=y" >> $config_host_mak fi +if test "$vhost_user_blk_server" = "yes" ; then + echo "CONFIG_VHOST_USER_BLK_SERVER=y" >> $config_host_mak +fi if test "$vhost_user_fs" = "yes" ; then echo "CONFIG_VHOST_USER_FS=y" >> $config_host_mak fi diff --git a/util/meson.build b/util/meson.build index c5159ad79d..f359af0d46 100644 --- a/util/meson.build +++ b/util/meson.build @@ -66,7 +66,7 @@ if have_block util_ss.add(files('main-loop.c')) util_ss.add(files('nvdimm-utils.c')) util_ss.add(files('qemu-coroutine.c', 'qemu-coroutine-lock.c', 'qemu-coroutine-io.c')) - util_ss.add(when: ['CONFIG_LINUX', 'CONFIG_VHOST_USER'], if_true: [ + util_ss.add(when: 'CONFIG_LINUX', if_true: [ files('vhost-user-server.c'), vhost_user ]) util_ss.add(files('block-helpers.c')) From 11f60f7eaee2630dd6fa0c3a8c49f792e46c4cf1 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Tue, 27 Oct 2020 17:35:19 +0000 Subject: [PATCH 29/31] block/export: make vhost-user-blk config space little-endian VIRTIO 1.0 devices have little-endian configuration space. The vhost-user-blk-server.c code already uses little-endian for virtqueue processing but not for the configuration space fields. Fix this so the vhost-user-blk export works on big-endian hosts. Signed-off-by: Stefan Hajnoczi Message-Id: <20201027173528.213464-4-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- block/export/vhost-user-blk-server.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/block/export/vhost-user-blk-server.c b/block/export/vhost-user-blk-server.c index 41f4933d6e..33cc0818b8 100644 --- a/block/export/vhost-user-blk-server.c +++ b/block/export/vhost-user-blk-server.c @@ -264,7 +264,6 @@ static uint64_t vu_blk_get_protocol_features(VuDev *dev) static int vu_blk_get_config(VuDev *vu_dev, uint8_t *config, uint32_t len) { - /* TODO blkcfg must be little-endian for VIRTIO 1.0 */ VuServer *server = container_of(vu_dev, VuServer, vu_dev); VuBlkExport *vexp = container_of(server, VuBlkExport, vu_server); memcpy(config, &vexp->blkcfg, len); @@ -343,18 +342,18 @@ vu_blk_initialize_config(BlockDriverState *bs, uint32_t blk_size, uint16_t num_queues) { - config->capacity = bdrv_getlength(bs) >> BDRV_SECTOR_BITS; - config->blk_size = blk_size; - config->size_max = 0; - config->seg_max = 128 - 2; - config->min_io_size = 1; - config->opt_io_size = 1; - config->num_queues = num_queues; - config->max_discard_sectors = 32768; - config->max_discard_seg = 1; - config->discard_sector_alignment = config->blk_size >> 9; - config->max_write_zeroes_sectors = 32768; - config->max_write_zeroes_seg = 1; + config->capacity = cpu_to_le64(bdrv_getlength(bs) >> BDRV_SECTOR_BITS); + config->blk_size = cpu_to_le32(blk_size); + config->size_max = cpu_to_le32(0); + config->seg_max = cpu_to_le32(128 - 2); + config->min_io_size = cpu_to_le16(1); + config->opt_io_size = cpu_to_le32(1); + config->num_queues = cpu_to_le16(num_queues); + config->max_discard_sectors = cpu_to_le32(32768); + config->max_discard_seg = cpu_to_le32(1); + config->discard_sector_alignment = cpu_to_le32(config->blk_size >> 9); + config->max_write_zeroes_sectors = cpu_to_le32(32768); + config->max_write_zeroes_seg = cpu_to_le32(1); } static void vu_blk_exp_request_shutdown(BlockExport *exp) From f8ffcb2bda22bad8e91da70c28ec52724a054f92 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Tue, 27 Oct 2020 17:35:20 +0000 Subject: [PATCH 30/31] block/export: fix vhost-user-blk get_config() information leak Refuse get_config() requests in excess of sizeof(struct virtio_blk_config). Signed-off-by: Stefan Hajnoczi Message-Id: <20201027173528.213464-5-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- block/export/vhost-user-blk-server.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/block/export/vhost-user-blk-server.c b/block/export/vhost-user-blk-server.c index 33cc0818b8..62672d1cb9 100644 --- a/block/export/vhost-user-blk-server.c +++ b/block/export/vhost-user-blk-server.c @@ -266,6 +266,9 @@ vu_blk_get_config(VuDev *vu_dev, uint8_t *config, uint32_t len) { VuServer *server = container_of(vu_dev, VuServer, vu_dev); VuBlkExport *vexp = container_of(server, VuBlkExport, vu_server); + + g_return_val_if_fail(len <= sizeof(struct virtio_blk_config), -1); + memcpy(config, &vexp->blkcfg, len); return 0; } From 9f6df01d0e128c2df179789b37140d6aeddfcb92 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Tue, 27 Oct 2020 17:35:21 +0000 Subject: [PATCH 31/31] contrib/vhost-user-blk: fix get_config() information leak Refuse get_config() in excess of sizeof(struct virtio_blk_config). Signed-off-by: Stefan Hajnoczi Message-Id: <20201027173528.213464-6-stefanha@redhat.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- contrib/vhost-user-blk/vhost-user-blk.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/contrib/vhost-user-blk/vhost-user-blk.c b/contrib/vhost-user-blk/vhost-user-blk.c index 25eccd02b5..caad88637e 100644 --- a/contrib/vhost-user-blk/vhost-user-blk.c +++ b/contrib/vhost-user-blk/vhost-user-blk.c @@ -404,6 +404,8 @@ vub_get_config(VuDev *vu_dev, uint8_t *config, uint32_t len) VugDev *gdev; VubDev *vdev_blk; + g_return_val_if_fail(len <= sizeof(struct virtio_blk_config), -1); + gdev = container_of(vu_dev, VugDev, parent); vdev_blk = container_of(gdev, VubDev, parent); memcpy(config, &vdev_blk->blkcfg, len);