qemu-e2k/hw/virtio/vhost-user-fs.c

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

454 lines
13 KiB
C
Raw Normal View History

/*
* Vhost-user filesystem virtio device
*
* Copyright 2018-2019 Red Hat, Inc.
*
* Authors:
* Stefan Hajnoczi <stefanha@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or
* (at your option) any later version. See the COPYING file in the
* top-level directory.
*/
#include "qemu/osdep.h"
#include <sys/ioctl.h>
#include "standard-headers/linux/virtio_fs.h"
#include "qapi/error.h"
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-access.h"
#include "qemu/error-report.h"
#include "hw/virtio/vhost.h"
#include "hw/virtio/vhost-user-fs.h"
#include "monitor/monitor.h"
#include "sysemu/sysemu.h"
static const int user_feature_bits[] = {
VIRTIO_F_VERSION_1,
VIRTIO_RING_F_INDIRECT_DESC,
VIRTIO_RING_F_EVENT_IDX,
VIRTIO_F_NOTIFY_ON_EMPTY,
VIRTIO_F_RING_PACKED,
VIRTIO_F_IOMMU_PLATFORM,
VIRTIO_F_RING_RESET,
VHOST_INVALID_FEATURE_BIT
};
static void vuf_get_config(VirtIODevice *vdev, uint8_t *config)
{
VHostUserFS *fs = VHOST_USER_FS(vdev);
struct virtio_fs_config fscfg = {};
memcpy((char *)fscfg.tag, fs->conf.tag,
MIN(strlen(fs->conf.tag) + 1, sizeof(fscfg.tag)));
virtio_stl_p(vdev, &fscfg.num_request_queues, fs->conf.num_request_queues);
memcpy(config, &fscfg, sizeof(fscfg));
}
static void vuf_start(VirtIODevice *vdev)
{
VHostUserFS *fs = VHOST_USER_FS(vdev);
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
int ret;
int i;
if (!k->set_guest_notifiers) {
error_report("binding does not support guest notifiers");
return;
}
ret = vhost_dev_enable_notifiers(&fs->vhost_dev, vdev);
if (ret < 0) {
error_report("Error enabling host notifiers: %d", -ret);
return;
}
ret = k->set_guest_notifiers(qbus->parent, fs->vhost_dev.nvqs, true);
if (ret < 0) {
error_report("Error binding guest notifier: %d", -ret);
goto err_host_notifiers;
}
fs->vhost_dev.acked_features = vdev->guest_features;
vhost: enable vrings in vhost_dev_start() for vhost-user devices Commit 02b61f38d3 ("hw/virtio: incorporate backend features in features") properly negotiates VHOST_USER_F_PROTOCOL_FEATURES with the vhost-user backend, but we forgot to enable vrings as specified in docs/interop/vhost-user.rst: If ``VHOST_USER_F_PROTOCOL_FEATURES`` has not been negotiated, the ring starts directly in the enabled state. If ``VHOST_USER_F_PROTOCOL_FEATURES`` has been negotiated, the ring is initialized in a disabled state and is enabled by ``VHOST_USER_SET_VRING_ENABLE`` with parameter 1. Some vhost-user front-ends already did this by calling vhost_ops.vhost_set_vring_enable() directly: - backends/cryptodev-vhost.c - hw/net/virtio-net.c - hw/virtio/vhost-user-gpio.c But most didn't do that, so we would leave the vrings disabled and some backends would not work. We observed this issue with the rust version of virtiofsd [1], which uses the event loop [2] provided by the vhost-user-backend crate where requests are not processed if vring is not enabled. Let's fix this issue by enabling the vrings in vhost_dev_start() for vhost-user front-ends that don't already do this directly. Same thing also in vhost_dev_stop() where we disable vrings. [1] https://gitlab.com/virtio-fs/virtiofsd [2] https://github.com/rust-vmm/vhost/blob/240fc2966/crates/vhost-user-backend/src/event_loop.rs#L217 Fixes: 02b61f38d3 ("hw/virtio: incorporate backend features in features") Reported-by: German Maglione <gmaglione@redhat.com> Tested-by: German Maglione <gmaglione@redhat.com> Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> Acked-by: Raphael Norwitz <raphael.norwitz@nutanix.com> Message-Id: <20221123131630.52020-1-sgarzare@redhat.com> Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Message-Id: <20221130112439.2527228-3-alex.bennee@linaro.org> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-11-30 12:24:36 +01:00
ret = vhost_dev_start(&fs->vhost_dev, vdev, true);
if (ret < 0) {
error_report("Error starting vhost: %d", -ret);
goto err_guest_notifiers;
}
/*
* guest_notifier_mask/pending not used yet, so just unmask
* everything here. virtio-pci will do the right thing by
* enabling/disabling irqfd.
*/
for (i = 0; i < fs->vhost_dev.nvqs; i++) {
vhost_virtqueue_mask(&fs->vhost_dev, vdev, i, false);
}
return;
err_guest_notifiers:
k->set_guest_notifiers(qbus->parent, fs->vhost_dev.nvqs, false);
err_host_notifiers:
vhost_dev_disable_notifiers(&fs->vhost_dev, vdev);
}
static void vuf_stop(VirtIODevice *vdev)
{
VHostUserFS *fs = VHOST_USER_FS(vdev);
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
int ret;
if (!k->set_guest_notifiers) {
return;
}
vhost: enable vrings in vhost_dev_start() for vhost-user devices Commit 02b61f38d3 ("hw/virtio: incorporate backend features in features") properly negotiates VHOST_USER_F_PROTOCOL_FEATURES with the vhost-user backend, but we forgot to enable vrings as specified in docs/interop/vhost-user.rst: If ``VHOST_USER_F_PROTOCOL_FEATURES`` has not been negotiated, the ring starts directly in the enabled state. If ``VHOST_USER_F_PROTOCOL_FEATURES`` has been negotiated, the ring is initialized in a disabled state and is enabled by ``VHOST_USER_SET_VRING_ENABLE`` with parameter 1. Some vhost-user front-ends already did this by calling vhost_ops.vhost_set_vring_enable() directly: - backends/cryptodev-vhost.c - hw/net/virtio-net.c - hw/virtio/vhost-user-gpio.c But most didn't do that, so we would leave the vrings disabled and some backends would not work. We observed this issue with the rust version of virtiofsd [1], which uses the event loop [2] provided by the vhost-user-backend crate where requests are not processed if vring is not enabled. Let's fix this issue by enabling the vrings in vhost_dev_start() for vhost-user front-ends that don't already do this directly. Same thing also in vhost_dev_stop() where we disable vrings. [1] https://gitlab.com/virtio-fs/virtiofsd [2] https://github.com/rust-vmm/vhost/blob/240fc2966/crates/vhost-user-backend/src/event_loop.rs#L217 Fixes: 02b61f38d3 ("hw/virtio: incorporate backend features in features") Reported-by: German Maglione <gmaglione@redhat.com> Tested-by: German Maglione <gmaglione@redhat.com> Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> Acked-by: Raphael Norwitz <raphael.norwitz@nutanix.com> Message-Id: <20221123131630.52020-1-sgarzare@redhat.com> Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Message-Id: <20221130112439.2527228-3-alex.bennee@linaro.org> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-11-30 12:24:36 +01:00
vhost_dev_stop(&fs->vhost_dev, vdev, true);
ret = k->set_guest_notifiers(qbus->parent, fs->vhost_dev.nvqs, false);
if (ret < 0) {
error_report("vhost guest notifier cleanup failed: %d", ret);
return;
}
vhost_dev_disable_notifiers(&fs->vhost_dev, vdev);
}
static void vuf_set_status(VirtIODevice *vdev, uint8_t status)
{
VHostUserFS *fs = VHOST_USER_FS(vdev);
bool should_start = virtio_device_should_start(vdev, status);
if (vhost_dev_is_started(&fs->vhost_dev) == should_start) {
return;
}
if (should_start) {
vuf_start(vdev);
} else {
vuf_stop(vdev);
}
}
static uint64_t vuf_get_features(VirtIODevice *vdev,
uint64_t features,
Error **errp)
{
VHostUserFS *fs = VHOST_USER_FS(vdev);
return vhost_get_features(&fs->vhost_dev, user_feature_bits, features);
}
static void vuf_handle_output(VirtIODevice *vdev, VirtQueue *vq)
{
/*
* Not normally called; it's the daemon that handles the queue;
* however virtio's cleanup path can call this.
*/
}
static void vuf_guest_notifier_mask(VirtIODevice *vdev, int idx,
bool mask)
{
VHostUserFS *fs = VHOST_USER_FS(vdev);
/*
* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
* as the macro of configure interrupt's IDX, If this driver does not
* support, the function will return
*/
if (idx == VIRTIO_CONFIG_IRQ_IDX) {
return;
}
vhost_virtqueue_mask(&fs->vhost_dev, vdev, idx, mask);
}
static bool vuf_guest_notifier_pending(VirtIODevice *vdev, int idx)
{
VHostUserFS *fs = VHOST_USER_FS(vdev);
/*
* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
* as the macro of configure interrupt's IDX, If this driver does not
* support, the function will return
*/
if (idx == VIRTIO_CONFIG_IRQ_IDX) {
return false;
}
return vhost_virtqueue_pending(&fs->vhost_dev, idx);
}
static void vuf_device_realize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserFS *fs = VHOST_USER_FS(dev);
unsigned int i;
size_t len;
int ret;
if (!fs->conf.chardev.chr) {
error_setg(errp, "missing chardev");
return;
}
if (!fs->conf.tag) {
error_setg(errp, "missing tag property");
return;
}
len = strlen(fs->conf.tag);
if (len == 0) {
error_setg(errp, "tag property cannot be empty");
return;
}
if (len > sizeof_field(struct virtio_fs_config, tag)) {
error_setg(errp, "tag property must be %zu bytes or less",
sizeof_field(struct virtio_fs_config, tag));
return;
}
if (fs->conf.num_request_queues == 0) {
error_setg(errp, "num-request-queues property must be larger than 0");
return;
}
if (!is_power_of_2(fs->conf.queue_size)) {
error_setg(errp, "queue-size property must be a power of 2");
return;
}
if (fs->conf.queue_size > VIRTQUEUE_MAX_SIZE) {
error_setg(errp, "queue-size property must be %u or smaller",
VIRTQUEUE_MAX_SIZE);
return;
}
if (!vhost_user_init(&fs->vhost_user, &fs->conf.chardev, errp)) {
return;
}
virtio_init(vdev, VIRTIO_ID_FS, sizeof(struct virtio_fs_config));
/* Hiprio queue */
fs->hiprio_vq = virtio_add_queue(vdev, fs->conf.queue_size, vuf_handle_output);
/* Request queues */
fs->req_vqs = g_new(VirtQueue *, fs->conf.num_request_queues);
for (i = 0; i < fs->conf.num_request_queues; i++) {
fs->req_vqs[i] = virtio_add_queue(vdev, fs->conf.queue_size, vuf_handle_output);
}
/* 1 high prio queue, plus the number configured */
fs->vhost_dev.nvqs = 1 + fs->conf.num_request_queues;
fs->vhost_dev.vqs = g_new0(struct vhost_virtqueue, fs->vhost_dev.nvqs);
ret = vhost_dev_init(&fs->vhost_dev, &fs->vhost_user,
VHOST_BACKEND_TYPE_USER, 0, errp);
if (ret < 0) {
goto err_virtio;
}
return;
err_virtio:
vhost_user_cleanup(&fs->vhost_user);
virtio_delete_queue(fs->hiprio_vq);
vhost-user-fs: do delete virtio_queues in unrealize Similar to other virtio device(https://patchwork.kernel.org/patch/11399237/), virtio queues forgot to delete in unrealize, and aslo error path in realize, this patch fix these memleaks, the leak stack is as follow: Direct leak of 57344 byte(s) in 1 object(s) allocated from: #0 0x7f15784fb970 in __interceptor_calloc (/lib64/libasan.so.5+0xef970) #1 0x7f157790849d in g_malloc0 (/lib64/libglib-2.0.so.0+0x5249d) #2 0x55587a1bf859 in virtio_add_queue /mnt/sdb/qemu-new/qemu_test/qemu/hw/virtio/virtio.c:2333 #3 0x55587a2071d5 in vuf_device_realize /mnt/sdb/qemu-new/qemu_test/qemu/hw/virtio/vhost-user-fs.c:212 #4 0x55587a1ae360 in virtio_device_realize /mnt/sdb/qemu-new/qemu_test/qemu/hw/virtio/virtio.c:3531 #5 0x55587a63fb7b in device_set_realized /mnt/sdb/qemu-new/qemu_test/qemu/hw/core/qdev.c:891 #6 0x55587acf03f5 in property_set_bool /mnt/sdb/qemu-new/qemu_test/qemu/qom/object.c:2238 #7 0x55587acfce0d in object_property_set_qobject /mnt/sdb/qemu-new/qemu_test/qemu/qom/qom-qobject.c:26 #8 0x55587acf5c8c in object_property_set_bool /mnt/sdb/qemu-new/qemu_test/qemu/qom/object.c:1390 #9 0x55587a8e22a2 in pci_qdev_realize /mnt/sdb/qemu-new/qemu_test/qemu/hw/pci/pci.c:2095 #10 0x55587a63fb7b in device_set_realized /mnt/sdb/qemu-new/qemu_test/qemu/hw/core/qdev.c:891 #11 0x55587acf03f5 in property_set_bool /mnt/sdb/qemu-new/qemu_test/qemu/qom/object.c:2238 #12 0x55587acfce0d in object_property_set_qobject /mnt/sdb/qemu-new/qemu_test/qemu/qom/qom-qobject.c:26 #13 0x55587acf5c8c in object_property_set_bool /mnt/sdb/qemu-new/qemu_test/qemu/qom/object.c:1390 #14 0x55587a496d65 in qdev_device_add /mnt/sdb/qemu-new/qemu_test/qemu/qdev-monitor.c:679 Reported-by: Euler Robot <euler.robot@huawei.com> Signed-off-by: Pan Nengyuan <pannengyuan@huawei.com> Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20200225075554.10835-2-pannengyuan@huawei.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-02-25 08:55:51 +01:00
for (i = 0; i < fs->conf.num_request_queues; i++) {
virtio_delete_queue(fs->req_vqs[i]);
vhost-user-fs: do delete virtio_queues in unrealize Similar to other virtio device(https://patchwork.kernel.org/patch/11399237/), virtio queues forgot to delete in unrealize, and aslo error path in realize, this patch fix these memleaks, the leak stack is as follow: Direct leak of 57344 byte(s) in 1 object(s) allocated from: #0 0x7f15784fb970 in __interceptor_calloc (/lib64/libasan.so.5+0xef970) #1 0x7f157790849d in g_malloc0 (/lib64/libglib-2.0.so.0+0x5249d) #2 0x55587a1bf859 in virtio_add_queue /mnt/sdb/qemu-new/qemu_test/qemu/hw/virtio/virtio.c:2333 #3 0x55587a2071d5 in vuf_device_realize /mnt/sdb/qemu-new/qemu_test/qemu/hw/virtio/vhost-user-fs.c:212 #4 0x55587a1ae360 in virtio_device_realize /mnt/sdb/qemu-new/qemu_test/qemu/hw/virtio/virtio.c:3531 #5 0x55587a63fb7b in device_set_realized /mnt/sdb/qemu-new/qemu_test/qemu/hw/core/qdev.c:891 #6 0x55587acf03f5 in property_set_bool /mnt/sdb/qemu-new/qemu_test/qemu/qom/object.c:2238 #7 0x55587acfce0d in object_property_set_qobject /mnt/sdb/qemu-new/qemu_test/qemu/qom/qom-qobject.c:26 #8 0x55587acf5c8c in object_property_set_bool /mnt/sdb/qemu-new/qemu_test/qemu/qom/object.c:1390 #9 0x55587a8e22a2 in pci_qdev_realize /mnt/sdb/qemu-new/qemu_test/qemu/hw/pci/pci.c:2095 #10 0x55587a63fb7b in device_set_realized /mnt/sdb/qemu-new/qemu_test/qemu/hw/core/qdev.c:891 #11 0x55587acf03f5 in property_set_bool /mnt/sdb/qemu-new/qemu_test/qemu/qom/object.c:2238 #12 0x55587acfce0d in object_property_set_qobject /mnt/sdb/qemu-new/qemu_test/qemu/qom/qom-qobject.c:26 #13 0x55587acf5c8c in object_property_set_bool /mnt/sdb/qemu-new/qemu_test/qemu/qom/object.c:1390 #14 0x55587a496d65 in qdev_device_add /mnt/sdb/qemu-new/qemu_test/qemu/qdev-monitor.c:679 Reported-by: Euler Robot <euler.robot@huawei.com> Signed-off-by: Pan Nengyuan <pannengyuan@huawei.com> Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20200225075554.10835-2-pannengyuan@huawei.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-02-25 08:55:51 +01:00
}
g_free(fs->req_vqs);
virtio_cleanup(vdev);
g_free(fs->vhost_dev.vqs);
return;
}
qdev: Unrealize must not fail Devices may have component devices and buses. Device realization may fail. Realization is recursive: a device's realize() method realizes its components, and device_set_realized() realizes its buses (which should in turn realize the devices on that bus, except bus_set_realized() doesn't implement that, yet). When realization of a component or bus fails, we need to roll back: unrealize everything we realized so far. If any of these unrealizes failed, the device would be left in an inconsistent state. Must not happen. device_set_realized() lets it happen: it ignores errors in the roll back code starting at label child_realize_fail. Since realization is recursive, unrealization must be recursive, too. But how could a partly failed unrealize be rolled back? We'd have to re-realize, which can fail. This design is fundamentally broken. device_set_realized() does not roll back at all. Instead, it keeps unrealizing, ignoring further errors. It can screw up even for a device with no buses: if the lone dc->unrealize() fails, it still unregisters vmstate, and calls listeners' unrealize() callback. bus_set_realized() does not roll back either. Instead, it stops unrealizing. Fortunately, no unrealize method can fail, as we'll see below. To fix the design error, drop parameter @errp from all the unrealize methods. Any unrealize method that uses @errp now needs an update. This leads us to unrealize() methods that can fail. Merely passing it to another unrealize method cannot cause failure, though. Here are the ones that do other things with @errp: * virtio_serial_device_unrealize() Fails when qbus_set_hotplug_handler() fails, but still does all the other work. On failure, the device would stay realized with its resources completely gone. Oops. Can't happen, because qbus_set_hotplug_handler() can't actually fail here. Pass &error_abort to qbus_set_hotplug_handler() instead. * hw/ppc/spapr_drc.c's unrealize() Fails when object_property_del() fails, but all the other work is already done. On failure, the device would stay realized with its vmstate registration gone. Oops. Can't happen, because object_property_del() can't actually fail here. Pass &error_abort to object_property_del() instead. * spapr_phb_unrealize() Fails and bails out when remove_drcs() fails, but other work is already done. On failure, the device would stay realized with some of its resources gone. Oops. remove_drcs() fails only when chassis_from_bus()'s object_property_get_uint() fails, and it can't here. Pass &error_abort to remove_drcs() instead. Therefore, no unrealize method can fail before this patch. device_set_realized()'s recursive unrealization via bus uses object_property_set_bool(). Can't drop @errp there, so pass &error_abort. We similarly unrealize with object_property_set_bool() elsewhere, always ignoring errors. Pass &error_abort instead. Several unrealize methods no longer handle errors from other unrealize methods: virtio_9p_device_unrealize(), virtio_input_device_unrealize(), scsi_qdev_unrealize(), ... Much of the deleted error handling looks wrong anyway. One unrealize methods no longer ignore such errors: usb_ehci_pci_exit(). Several realize methods no longer ignore errors when rolling back: v9fs_device_realize_common(), pci_qdev_unrealize(), spapr_phb_realize(), usb_qdev_realize(), vfio_ccw_realize(), virtio_device_realize(). Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200505152926.18877-17-armbru@redhat.com>
2020-05-05 17:29:24 +02:00
static void vuf_device_unrealize(DeviceState *dev)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserFS *fs = VHOST_USER_FS(dev);
struct vhost_virtqueue *vhost_vqs = fs->vhost_dev.vqs;
vhost-user-fs: do delete virtio_queues in unrealize Similar to other virtio device(https://patchwork.kernel.org/patch/11399237/), virtio queues forgot to delete in unrealize, and aslo error path in realize, this patch fix these memleaks, the leak stack is as follow: Direct leak of 57344 byte(s) in 1 object(s) allocated from: #0 0x7f15784fb970 in __interceptor_calloc (/lib64/libasan.so.5+0xef970) #1 0x7f157790849d in g_malloc0 (/lib64/libglib-2.0.so.0+0x5249d) #2 0x55587a1bf859 in virtio_add_queue /mnt/sdb/qemu-new/qemu_test/qemu/hw/virtio/virtio.c:2333 #3 0x55587a2071d5 in vuf_device_realize /mnt/sdb/qemu-new/qemu_test/qemu/hw/virtio/vhost-user-fs.c:212 #4 0x55587a1ae360 in virtio_device_realize /mnt/sdb/qemu-new/qemu_test/qemu/hw/virtio/virtio.c:3531 #5 0x55587a63fb7b in device_set_realized /mnt/sdb/qemu-new/qemu_test/qemu/hw/core/qdev.c:891 #6 0x55587acf03f5 in property_set_bool /mnt/sdb/qemu-new/qemu_test/qemu/qom/object.c:2238 #7 0x55587acfce0d in object_property_set_qobject /mnt/sdb/qemu-new/qemu_test/qemu/qom/qom-qobject.c:26 #8 0x55587acf5c8c in object_property_set_bool /mnt/sdb/qemu-new/qemu_test/qemu/qom/object.c:1390 #9 0x55587a8e22a2 in pci_qdev_realize /mnt/sdb/qemu-new/qemu_test/qemu/hw/pci/pci.c:2095 #10 0x55587a63fb7b in device_set_realized /mnt/sdb/qemu-new/qemu_test/qemu/hw/core/qdev.c:891 #11 0x55587acf03f5 in property_set_bool /mnt/sdb/qemu-new/qemu_test/qemu/qom/object.c:2238 #12 0x55587acfce0d in object_property_set_qobject /mnt/sdb/qemu-new/qemu_test/qemu/qom/qom-qobject.c:26 #13 0x55587acf5c8c in object_property_set_bool /mnt/sdb/qemu-new/qemu_test/qemu/qom/object.c:1390 #14 0x55587a496d65 in qdev_device_add /mnt/sdb/qemu-new/qemu_test/qemu/qdev-monitor.c:679 Reported-by: Euler Robot <euler.robot@huawei.com> Signed-off-by: Pan Nengyuan <pannengyuan@huawei.com> Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20200225075554.10835-2-pannengyuan@huawei.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-02-25 08:55:51 +01:00
int i;
/* This will stop vhost backend if appropriate. */
vuf_set_status(vdev, 0);
vhost_dev_cleanup(&fs->vhost_dev);
vhost_user_cleanup(&fs->vhost_user);
virtio_delete_queue(fs->hiprio_vq);
vhost-user-fs: do delete virtio_queues in unrealize Similar to other virtio device(https://patchwork.kernel.org/patch/11399237/), virtio queues forgot to delete in unrealize, and aslo error path in realize, this patch fix these memleaks, the leak stack is as follow: Direct leak of 57344 byte(s) in 1 object(s) allocated from: #0 0x7f15784fb970 in __interceptor_calloc (/lib64/libasan.so.5+0xef970) #1 0x7f157790849d in g_malloc0 (/lib64/libglib-2.0.so.0+0x5249d) #2 0x55587a1bf859 in virtio_add_queue /mnt/sdb/qemu-new/qemu_test/qemu/hw/virtio/virtio.c:2333 #3 0x55587a2071d5 in vuf_device_realize /mnt/sdb/qemu-new/qemu_test/qemu/hw/virtio/vhost-user-fs.c:212 #4 0x55587a1ae360 in virtio_device_realize /mnt/sdb/qemu-new/qemu_test/qemu/hw/virtio/virtio.c:3531 #5 0x55587a63fb7b in device_set_realized /mnt/sdb/qemu-new/qemu_test/qemu/hw/core/qdev.c:891 #6 0x55587acf03f5 in property_set_bool /mnt/sdb/qemu-new/qemu_test/qemu/qom/object.c:2238 #7 0x55587acfce0d in object_property_set_qobject /mnt/sdb/qemu-new/qemu_test/qemu/qom/qom-qobject.c:26 #8 0x55587acf5c8c in object_property_set_bool /mnt/sdb/qemu-new/qemu_test/qemu/qom/object.c:1390 #9 0x55587a8e22a2 in pci_qdev_realize /mnt/sdb/qemu-new/qemu_test/qemu/hw/pci/pci.c:2095 #10 0x55587a63fb7b in device_set_realized /mnt/sdb/qemu-new/qemu_test/qemu/hw/core/qdev.c:891 #11 0x55587acf03f5 in property_set_bool /mnt/sdb/qemu-new/qemu_test/qemu/qom/object.c:2238 #12 0x55587acfce0d in object_property_set_qobject /mnt/sdb/qemu-new/qemu_test/qemu/qom/qom-qobject.c:26 #13 0x55587acf5c8c in object_property_set_bool /mnt/sdb/qemu-new/qemu_test/qemu/qom/object.c:1390 #14 0x55587a496d65 in qdev_device_add /mnt/sdb/qemu-new/qemu_test/qemu/qdev-monitor.c:679 Reported-by: Euler Robot <euler.robot@huawei.com> Signed-off-by: Pan Nengyuan <pannengyuan@huawei.com> Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20200225075554.10835-2-pannengyuan@huawei.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-02-25 08:55:51 +01:00
for (i = 0; i < fs->conf.num_request_queues; i++) {
virtio_delete_queue(fs->req_vqs[i]);
vhost-user-fs: do delete virtio_queues in unrealize Similar to other virtio device(https://patchwork.kernel.org/patch/11399237/), virtio queues forgot to delete in unrealize, and aslo error path in realize, this patch fix these memleaks, the leak stack is as follow: Direct leak of 57344 byte(s) in 1 object(s) allocated from: #0 0x7f15784fb970 in __interceptor_calloc (/lib64/libasan.so.5+0xef970) #1 0x7f157790849d in g_malloc0 (/lib64/libglib-2.0.so.0+0x5249d) #2 0x55587a1bf859 in virtio_add_queue /mnt/sdb/qemu-new/qemu_test/qemu/hw/virtio/virtio.c:2333 #3 0x55587a2071d5 in vuf_device_realize /mnt/sdb/qemu-new/qemu_test/qemu/hw/virtio/vhost-user-fs.c:212 #4 0x55587a1ae360 in virtio_device_realize /mnt/sdb/qemu-new/qemu_test/qemu/hw/virtio/virtio.c:3531 #5 0x55587a63fb7b in device_set_realized /mnt/sdb/qemu-new/qemu_test/qemu/hw/core/qdev.c:891 #6 0x55587acf03f5 in property_set_bool /mnt/sdb/qemu-new/qemu_test/qemu/qom/object.c:2238 #7 0x55587acfce0d in object_property_set_qobject /mnt/sdb/qemu-new/qemu_test/qemu/qom/qom-qobject.c:26 #8 0x55587acf5c8c in object_property_set_bool /mnt/sdb/qemu-new/qemu_test/qemu/qom/object.c:1390 #9 0x55587a8e22a2 in pci_qdev_realize /mnt/sdb/qemu-new/qemu_test/qemu/hw/pci/pci.c:2095 #10 0x55587a63fb7b in device_set_realized /mnt/sdb/qemu-new/qemu_test/qemu/hw/core/qdev.c:891 #11 0x55587acf03f5 in property_set_bool /mnt/sdb/qemu-new/qemu_test/qemu/qom/object.c:2238 #12 0x55587acfce0d in object_property_set_qobject /mnt/sdb/qemu-new/qemu_test/qemu/qom/qom-qobject.c:26 #13 0x55587acf5c8c in object_property_set_bool /mnt/sdb/qemu-new/qemu_test/qemu/qom/object.c:1390 #14 0x55587a496d65 in qdev_device_add /mnt/sdb/qemu-new/qemu_test/qemu/qdev-monitor.c:679 Reported-by: Euler Robot <euler.robot@huawei.com> Signed-off-by: Pan Nengyuan <pannengyuan@huawei.com> Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com> Cc: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20200225075554.10835-2-pannengyuan@huawei.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2020-02-25 08:55:51 +01:00
}
g_free(fs->req_vqs);
virtio_cleanup(vdev);
g_free(vhost_vqs);
}
static struct vhost_dev *vuf_get_vhost(VirtIODevice *vdev)
{
VHostUserFS *fs = VHOST_USER_FS(vdev);
return &fs->vhost_dev;
}
/**
* Fetch the internal state from virtiofsd and save it to `f`.
*/
static int vuf_save_state(QEMUFile *f, void *pv, size_t size,
const VMStateField *field, JSONWriter *vmdesc)
{
VirtIODevice *vdev = pv;
VHostUserFS *fs = VHOST_USER_FS(vdev);
Error *local_error = NULL;
int ret;
ret = vhost_save_backend_state(&fs->vhost_dev, f, &local_error);
if (ret < 0) {
error_reportf_err(local_error,
"Error saving back-end state of %s device %s "
"(tag: \"%s\"): ",
vdev->name, vdev->parent_obj.canonical_path,
fs->conf.tag ?: "<none>");
return ret;
}
return 0;
}
/**
* Load virtiofsd's internal state from `f` and send it over to virtiofsd.
*/
static int vuf_load_state(QEMUFile *f, void *pv, size_t size,
const VMStateField *field)
{
VirtIODevice *vdev = pv;
VHostUserFS *fs = VHOST_USER_FS(vdev);
Error *local_error = NULL;
int ret;
ret = vhost_load_backend_state(&fs->vhost_dev, f, &local_error);
if (ret < 0) {
error_reportf_err(local_error,
"Error loading back-end state of %s device %s "
"(tag: \"%s\"): ",
vdev->name, vdev->parent_obj.canonical_path,
fs->conf.tag ?: "<none>");
return ret;
}
return 0;
}
static bool vuf_is_internal_migration(void *opaque)
{
/* TODO: Return false when an external migration is requested */
return true;
}
static int vuf_check_migration_support(void *opaque)
{
VirtIODevice *vdev = opaque;
VHostUserFS *fs = VHOST_USER_FS(vdev);
if (!vhost_supports_device_state(&fs->vhost_dev)) {
error_report("Back-end of %s device %s (tag: \"%s\") does not support "
"migration through qemu",
vdev->name, vdev->parent_obj.canonical_path,
fs->conf.tag ?: "<none>");
return -ENOTSUP;
}
return 0;
}
static const VMStateDescription vuf_backend_vmstate;
static const VMStateDescription vuf_vmstate = {
.name = "vhost-user-fs",
.version_id = 0,
.fields = (VMStateField[]) {
VMSTATE_VIRTIO_DEVICE,
VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription * []) {
&vuf_backend_vmstate,
NULL,
}
};
static const VMStateDescription vuf_backend_vmstate = {
.name = "vhost-user-fs-backend",
.version_id = 0,
.needed = vuf_is_internal_migration,
.pre_load = vuf_check_migration_support,
.pre_save = vuf_check_migration_support,
.fields = (VMStateField[]) {
{
.name = "back-end",
.info = &(const VMStateInfo) {
.name = "virtio-fs back-end state",
.get = vuf_load_state,
.put = vuf_save_state,
},
},
VMSTATE_END_OF_LIST()
},
};
static Property vuf_properties[] = {
DEFINE_PROP_CHR("chardev", VHostUserFS, conf.chardev),
DEFINE_PROP_STRING("tag", VHostUserFS, conf.tag),
DEFINE_PROP_UINT16("num-request-queues", VHostUserFS,
conf.num_request_queues, 1),
DEFINE_PROP_UINT16("queue-size", VHostUserFS, conf.queue_size, 128),
DEFINE_PROP_END_OF_LIST(),
};
static void vuf_instance_init(Object *obj)
{
VHostUserFS *fs = VHOST_USER_FS(obj);
device_add_bootindex_property(obj, &fs->bootindex, "bootindex",
"/filesystem@0", DEVICE(obj));
}
static void vuf_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
device_class_set_props(dc, vuf_properties);
dc->vmsd = &vuf_vmstate;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
vdc->realize = vuf_device_realize;
vdc->unrealize = vuf_device_unrealize;
vdc->get_features = vuf_get_features;
vdc->get_config = vuf_get_config;
vdc->set_status = vuf_set_status;
vdc->guest_notifier_mask = vuf_guest_notifier_mask;
vdc->guest_notifier_pending = vuf_guest_notifier_pending;
vdc->get_vhost = vuf_get_vhost;
}
static const TypeInfo vuf_info = {
.name = TYPE_VHOST_USER_FS,
.parent = TYPE_VIRTIO_DEVICE,
.instance_size = sizeof(VHostUserFS),
.instance_init = vuf_instance_init,
.class_init = vuf_class_init,
};
static void vuf_register_types(void)
{
type_register_static(&vuf_info);
}
type_init(vuf_register_types)