Revert "vhost-user: add multi queue support"

This reverts commit 830d70db69.

The interface isn't fully backwards-compatible, which is bad.
Let's redo this properly after 2.4.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
Michael S. Tsirkin 2015-07-15 13:47:31 +03:00
parent 75d663611e
commit d345ed2da3
6 changed files with 18 additions and 49 deletions

View File

@ -127,11 +127,6 @@ in the ancillary data:
If Master is unable to send the full message or receives a wrong reply it will If Master is unable to send the full message or receives a wrong reply it will
close the connection. An optional reconnection mechanism can be implemented. close the connection. An optional reconnection mechanism can be implemented.
Multi queue support
-------------------
The protocol supports multiple queues by setting all index fields in the sent
messages to a properly calculated value.
Message types Message types
------------- -------------

View File

@ -160,7 +160,6 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options)
net->dev.nvqs = 2; net->dev.nvqs = 2;
net->dev.vqs = net->vqs; net->dev.vqs = net->vqs;
net->dev.vq_index = net->nc->queue_index;
r = vhost_dev_init(&net->dev, options->opaque, r = vhost_dev_init(&net->dev, options->opaque,
options->backend_type); options->backend_type);
@ -287,7 +286,7 @@ static void vhost_net_stop_one(struct vhost_net *net,
for (file.index = 0; file.index < net->dev.nvqs; ++file.index) { for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
const VhostOps *vhost_ops = net->dev.vhost_ops; const VhostOps *vhost_ops = net->dev.vhost_ops;
int r = vhost_ops->vhost_call(&net->dev, VHOST_RESET_OWNER, int r = vhost_ops->vhost_call(&net->dev, VHOST_RESET_OWNER,
&file); NULL);
assert(r >= 0); assert(r >= 0);
} }
} }

View File

@ -210,12 +210,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
break; break;
case VHOST_SET_OWNER: case VHOST_SET_OWNER:
break;
case VHOST_RESET_OWNER: case VHOST_RESET_OWNER:
memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
msg.state.index += dev->vq_index;
msg.size = sizeof(m.state);
break; break;
case VHOST_SET_MEM_TABLE: case VHOST_SET_MEM_TABLE:
@ -258,20 +253,17 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
case VHOST_SET_VRING_NUM: case VHOST_SET_VRING_NUM:
case VHOST_SET_VRING_BASE: case VHOST_SET_VRING_BASE:
memcpy(&msg.state, arg, sizeof(struct vhost_vring_state)); memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
msg.state.index += dev->vq_index;
msg.size = sizeof(m.state); msg.size = sizeof(m.state);
break; break;
case VHOST_GET_VRING_BASE: case VHOST_GET_VRING_BASE:
memcpy(&msg.state, arg, sizeof(struct vhost_vring_state)); memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
msg.state.index += dev->vq_index;
msg.size = sizeof(m.state); msg.size = sizeof(m.state);
need_reply = 1; need_reply = 1;
break; break;
case VHOST_SET_VRING_ADDR: case VHOST_SET_VRING_ADDR:
memcpy(&msg.addr, arg, sizeof(struct vhost_vring_addr)); memcpy(&msg.addr, arg, sizeof(struct vhost_vring_addr));
msg.addr.index += dev->vq_index;
msg.size = sizeof(m.addr); msg.size = sizeof(m.addr);
break; break;
@ -279,7 +271,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
case VHOST_SET_VRING_CALL: case VHOST_SET_VRING_CALL:
case VHOST_SET_VRING_ERR: case VHOST_SET_VRING_ERR:
file = arg; file = arg;
msg.u64 = (file->index + dev->vq_index) & VHOST_USER_VRING_IDX_MASK; msg.u64 = file->index & VHOST_USER_VRING_IDX_MASK;
msg.size = sizeof(m.u64); msg.size = sizeof(m.u64);
if (ioeventfd_enabled() && file->fd > 0) { if (ioeventfd_enabled() && file->fd > 0) {
fds[fd_num++] = file->fd; fds[fd_num++] = file->fd;
@ -321,7 +313,6 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
error_report("Received bad msg size."); error_report("Received bad msg size.");
return -1; return -1;
} }
msg.state.index -= dev->vq_index;
memcpy(arg, &msg.state, sizeof(struct vhost_vring_state)); memcpy(arg, &msg.state, sizeof(struct vhost_vring_state));
break; break;
default: default:

View File

@ -120,39 +120,35 @@ static void net_vhost_user_event(void *opaque, int event)
case CHR_EVENT_OPENED: case CHR_EVENT_OPENED:
vhost_user_start(s); vhost_user_start(s);
net_vhost_link_down(s, false); net_vhost_link_down(s, false);
error_report("chardev \"%s\" went up", s->nc.info_str); error_report("chardev \"%s\" went up", s->chr->label);
break; break;
case CHR_EVENT_CLOSED: case CHR_EVENT_CLOSED:
net_vhost_link_down(s, true); net_vhost_link_down(s, true);
vhost_user_stop(s); vhost_user_stop(s);
error_report("chardev \"%s\" went down", s->nc.info_str); error_report("chardev \"%s\" went down", s->chr->label);
break; break;
} }
} }
static int net_vhost_user_init(NetClientState *peer, const char *device, static int net_vhost_user_init(NetClientState *peer, const char *device,
const char *name, CharDriverState *chr, const char *name, CharDriverState *chr)
uint32_t queues)
{ {
NetClientState *nc; NetClientState *nc;
VhostUserState *s; VhostUserState *s;
int i;
for (i = 0; i < queues; i++) { nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user%d to %s", snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user to %s",
i, chr->label); chr->label);
s = DO_UPCAST(VhostUserState, nc, nc); s = DO_UPCAST(VhostUserState, nc, nc);
/* We don't provide a receive callback */ /* We don't provide a receive callback */
s->nc.receive_disabled = 1; s->nc.receive_disabled = 1;
s->chr = chr; s->chr = chr;
s->nc.queue_index = i;
qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
}
return 0; return 0;
} }
@ -230,7 +226,6 @@ static int net_vhost_check_net(void *opaque, QemuOpts *opts, Error **errp)
int net_init_vhost_user(const NetClientOptions *opts, const char *name, int net_init_vhost_user(const NetClientOptions *opts, const char *name,
NetClientState *peer, Error **errp) NetClientState *peer, Error **errp)
{ {
uint32_t queues;
const NetdevVhostUserOptions *vhost_user_opts; const NetdevVhostUserOptions *vhost_user_opts;
CharDriverState *chr; CharDriverState *chr;
@ -248,12 +243,6 @@ int net_init_vhost_user(const NetClientOptions *opts, const char *name,
return -1; return -1;
} }
/* number of queues for multiqueue */
if (vhost_user_opts->has_queues) {
queues = vhost_user_opts->queues;
} else {
queues = 1;
}
return net_vhost_user_init(peer, "vhost_user", name, chr, queues); return net_vhost_user_init(peer, "vhost_user", name, chr);
} }

View File

@ -2466,16 +2466,12 @@
# #
# @vhostforce: #optional vhost on for non-MSIX virtio guests (default: false). # @vhostforce: #optional vhost on for non-MSIX virtio guests (default: false).
# #
# @queues: #optional number of queues to be created for multiqueue vhost-user
# (default: 1) (Since 2.4)
#
# Since 2.1 # Since 2.1
## ##
{ 'struct': 'NetdevVhostUserOptions', { 'struct': 'NetdevVhostUserOptions',
'data': { 'data': {
'chardev': 'str', 'chardev': 'str',
'*vhostforce': 'bool', '*vhostforce': 'bool' } }
'*queues': 'uint32' } }
## ##
# @NetClientOptions # @NetClientOptions

View File

@ -1963,14 +1963,13 @@ The hubport netdev lets you connect a NIC to a QEMU "vlan" instead of a single
netdev. @code{-net} and @code{-device} with parameter @option{vlan} create the netdev. @code{-net} and @code{-device} with parameter @option{vlan} create the
required hub automatically. required hub automatically.
@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off][,queues=n] @item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off]
Establish a vhost-user netdev, backed by a chardev @var{id}. The chardev should Establish a vhost-user netdev, backed by a chardev @var{id}. The chardev should
be a unix domain socket backed one. The vhost-user uses a specifically defined be a unix domain socket backed one. The vhost-user uses a specifically defined
protocol to pass vhost ioctl replacement messages to an application on the other protocol to pass vhost ioctl replacement messages to an application on the other
end of the socket. On non-MSIX guests, the feature can be forced with end of the socket. On non-MSIX guests, the feature can be forced with
@var{vhostforce}. Use 'queues=@var{n}' to specify the number of queues to @var{vhostforce}.
be created for multiqueue vhost-user.
Example: Example:
@example @example