a9f98bb5eb
This patch lets vhost support multiqueue. The idea is simple, just launching multiple threads of vhost and let each of vhost thread processing a subset of the virtqueues of the device. After this change each emulated device can have multiple vhost threads as its backend. To do this, a virtqueue index were introduced to record to first virtqueue that will be handled by this vhost_net device. Based on this and nvqs, vhost could calculate its relative index to setup vhost_net device. Since we may have many vhost/net devices for a virtio-net device. The setting of guest notifiers were moved out of the starting/stopping of a specific vhost thread. The vhost_net_{start|stop}() were renamed to vhost_net_{start|stop}_one(), and a new vhost_net_{start|stop}() were introduced to configure the guest notifiers and start/stop all vhost/vhost_net devices. Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
69 lines
2.0 KiB
C
69 lines
2.0 KiB
C
#ifndef VHOST_H
|
|
#define VHOST_H
|
|
|
|
#include "hw/hw.h"
|
|
#include "hw/virtio.h"
|
|
#include "exec/memory.h"
|
|
|
|
/* Generic structures common for any vhost based device. */
|
|
struct vhost_virtqueue {
|
|
int kick;
|
|
int call;
|
|
void *desc;
|
|
void *avail;
|
|
void *used;
|
|
int num;
|
|
unsigned long long used_phys;
|
|
unsigned used_size;
|
|
void *ring;
|
|
unsigned long long ring_phys;
|
|
unsigned ring_size;
|
|
EventNotifier masked_notifier;
|
|
};
|
|
|
|
typedef unsigned long vhost_log_chunk_t;
|
|
#define VHOST_LOG_PAGE 0x1000
|
|
#define VHOST_LOG_BITS (8 * sizeof(vhost_log_chunk_t))
|
|
#define VHOST_LOG_CHUNK (VHOST_LOG_PAGE * VHOST_LOG_BITS)
|
|
|
|
struct vhost_memory;
|
|
struct vhost_dev {
|
|
MemoryListener memory_listener;
|
|
int control;
|
|
struct vhost_memory *mem;
|
|
int n_mem_sections;
|
|
MemoryRegionSection *mem_sections;
|
|
struct vhost_virtqueue *vqs;
|
|
int nvqs;
|
|
/* the first virtuque which would be used by this vhost dev */
|
|
int vq_index;
|
|
unsigned long long features;
|
|
unsigned long long acked_features;
|
|
unsigned long long backend_features;
|
|
bool started;
|
|
bool log_enabled;
|
|
vhost_log_chunk_t *log;
|
|
unsigned long long log_size;
|
|
bool force;
|
|
};
|
|
|
|
int vhost_dev_init(struct vhost_dev *hdev, int devfd, const char *devpath,
|
|
bool force);
|
|
void vhost_dev_cleanup(struct vhost_dev *hdev);
|
|
bool vhost_dev_query(struct vhost_dev *hdev, VirtIODevice *vdev);
|
|
int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev);
|
|
void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev);
|
|
int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
|
|
void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
|
|
|
|
/* Test and clear masked event pending status.
|
|
* Should be called after unmask to avoid losing events.
|
|
*/
|
|
bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n);
|
|
|
|
/* Mask/unmask events from this vq.
|
|
*/
|
|
void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
|
|
bool mask);
|
|
#endif
|