Merge remote branch 'mst/for_anthony' into staging

This commit is contained in:
Anthony Liguori 2010-10-11 15:37:11 -05:00
commit a2d3f69530
8 changed files with 168 additions and 88 deletions

View File

@ -219,7 +219,8 @@ typedef enum {
typedef struct {
PCIDevice dev;
uint8_t mult[8]; /* multicast mask array */
/* Hash register (multicast mask array, multiple individual addresses). */
uint8_t mult[8];
int mmio_index;
NICState *nic;
NICConf conf;
@ -599,7 +600,7 @@ static void nic_reset(void *opaque)
{
EEPRO100State *s = opaque;
TRACE(OTHER, logout("%p\n", s));
/* TODO: Clearing of multicast table for selective reset, too? */
/* TODO: Clearing of hash register for selective reset, too? */
memset(&s->mult[0], 0, sizeof(s->mult));
nic_selective_reset(s);
}
@ -851,7 +852,14 @@ static void action_command(EEPRO100State *s)
case CmdConfigure:
cpu_physical_memory_read(s->cb_address + 8, &s->configuration[0],
sizeof(s->configuration));
TRACE(OTHER, logout("configuration: %s\n", nic_dump(&s->configuration[0], 16)));
TRACE(OTHER, logout("configuration: %s\n",
nic_dump(&s->configuration[0], 16)));
TRACE(OTHER, logout("configuration: %s\n",
nic_dump(&s->configuration[16],
ARRAY_SIZE(s->configuration) - 16)));
if (s->configuration[20] & BIT(6)) {
TRACE(OTHER, logout("Multiple IA bit\n"));
}
break;
case CmdMulticastList:
set_multicast_list(s);
@ -1647,12 +1655,6 @@ static ssize_t nic_receive(VLANClientState *nc, const uint8_t * buf, size_t size
static const uint8_t broadcast_macaddr[6] =
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
/* TODO: check multiple IA bit. */
if (s->configuration[20] & BIT(6)) {
missing("Multiple IA bit");
return -1;
}
if (s->configuration[8] & 0x80) {
/* CSMA is disabled. */
logout("%p received while CSMA is disabled\n", s);
@ -1702,6 +1704,16 @@ static ssize_t nic_receive(VLANClientState *nc, const uint8_t * buf, size_t size
/* Promiscuous: receive all. */
TRACE(RXTX, logout("%p received frame in promiscuous mode, len=%zu\n", s, size));
rfd_status |= 0x0004;
} else if (s->configuration[20] & BIT(6)) {
/* Multiple IA bit set. */
unsigned mcast_idx = compute_mcast_idx(buf);
assert(mcast_idx < 64);
if (s->mult[mcast_idx >> 3] & (1 << (mcast_idx & 7))) {
TRACE(RXTX, logout("%p accepted, multiple IA bit set\n", s));
} else {
TRACE(RXTX, logout("%p frame ignored, multiple IA bit set\n", s));
return -1;
}
} else {
TRACE(RXTX, logout("%p received frame, ignored, len=%zu,%s\n", s, size,
nic_dump(buf, size)));

View File

@ -454,11 +454,6 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
};
struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
if (!vdev->binding->set_guest_notifier) {
fprintf(stderr, "binding does not support guest notifiers\n");
return -ENOSYS;
}
if (!vdev->binding->set_host_notifier) {
fprintf(stderr, "binding does not support host notifiers\n");
return -ENOSYS;
@ -511,12 +506,6 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
r = -errno;
goto fail_alloc;
}
r = vdev->binding->set_guest_notifier(vdev->binding_opaque, idx, true);
if (r < 0) {
fprintf(stderr, "Error binding guest notifier: %d\n", -r);
goto fail_guest_notifier;
}
r = vdev->binding->set_host_notifier(vdev->binding_opaque, idx, true);
if (r < 0) {
fprintf(stderr, "Error binding host notifier: %d\n", -r);
@ -526,12 +515,14 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
if (r) {
r = -errno;
goto fail_kick;
}
file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file);
if (r) {
r = -errno;
goto fail_call;
}
@ -541,8 +532,6 @@ fail_call:
fail_kick:
vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false);
fail_host_notifier:
vdev->binding->set_guest_notifier(vdev->binding_opaque, idx, false);
fail_guest_notifier:
fail_alloc:
cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
0, 0);
@ -568,13 +557,6 @@ static void vhost_virtqueue_cleanup(struct vhost_dev *dev,
.index = idx,
};
int r;
r = vdev->binding->set_guest_notifier(vdev->binding_opaque, idx, false);
if (r < 0) {
fprintf(stderr, "vhost VQ %d guest cleanup failed: %d\n", idx, r);
fflush(stderr);
}
assert (r >= 0);
r = vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false);
if (r < 0) {
fprintf(stderr, "vhost VQ %d host cleanup failed: %d\n", idx, r);
@ -647,15 +629,26 @@ void vhost_dev_cleanup(struct vhost_dev *hdev)
int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
{
int i, r;
if (!vdev->binding->set_guest_notifiers) {
fprintf(stderr, "binding does not support guest notifiers\n");
r = -ENOSYS;
goto fail;
}
r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, true);
if (r < 0) {
fprintf(stderr, "Error binding guest notifier: %d\n", -r);
goto fail_notifiers;
}
r = vhost_dev_set_features(hdev, hdev->log_enabled);
if (r < 0) {
goto fail;
goto fail_features;
}
r = ioctl(hdev->control, VHOST_SET_MEM_TABLE, hdev->mem);
if (r < 0) {
r = -errno;
goto fail;
goto fail_mem;
}
for (i = 0; i < hdev->nvqs; ++i) {
r = vhost_virtqueue_init(hdev,
@ -675,13 +668,14 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
(uint64_t)(unsigned long)hdev->log);
if (r < 0) {
r = -errno;
goto fail_vq;
goto fail_log;
}
}
hdev->started = true;
return 0;
fail_log:
fail_vq:
while (--i >= 0) {
vhost_virtqueue_cleanup(hdev,
@ -689,13 +683,18 @@ fail_vq:
hdev->vqs + i,
i);
}
fail_mem:
fail_features:
vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
fail_notifiers:
fail:
return r;
}
void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
{
int i;
int i, r;
for (i = 0; i < hdev->nvqs; ++i) {
vhost_virtqueue_cleanup(hdev,
vdev,
@ -704,6 +703,13 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
}
vhost_client_sync_dirty_bitmap(&hdev->client, 0,
(target_phys_addr_t)~0x0ull);
r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
if (r < 0) {
fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
fflush(stderr);
}
assert (r >= 0);
hdev->started = false;
qemu_free(hdev->log);
hdev->log_size = 0;

View File

@ -54,6 +54,7 @@ typedef struct VirtIONet
uint8_t nouni;
uint8_t nobcast;
uint8_t vhost_started;
bool vm_running;
VMChangeStateEntry *vmstate;
struct {
int in_use;
@ -98,6 +99,38 @@ static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
}
}
static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
{
VirtIONet *n = to_virtio_net(vdev);
if (!n->nic->nc.peer) {
return;
}
if (n->nic->nc.peer->info->type != NET_CLIENT_TYPE_TAP) {
return;
}
if (!tap_get_vhost_net(n->nic->nc.peer)) {
return;
}
if (!!n->vhost_started == ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
(n->status & VIRTIO_NET_S_LINK_UP) &&
n->vm_running)) {
return;
}
if (!n->vhost_started) {
int r = vhost_net_start(tap_get_vhost_net(n->nic->nc.peer), &n->vdev);
if (r < 0) {
fprintf(stderr, "unable to start vhost net: %d: "
"falling back on userspace virtio\n", -r);
} else {
n->vhost_started = 1;
}
} else {
vhost_net_stop(tap_get_vhost_net(n->nic->nc.peer), &n->vdev);
n->vhost_started = 0;
}
}
static void virtio_net_set_link_status(VLANClientState *nc)
{
VirtIONet *n = DO_UPCAST(NICState, nc, nc)->opaque;
@ -110,6 +143,8 @@ static void virtio_net_set_link_status(VLANClientState *nc)
if (n->status != old_status)
virtio_notify_config(&n->vdev);
virtio_net_set_status(&n->vdev, n->vdev.status);
}
static void virtio_net_reset(VirtIODevice *vdev)
@ -123,10 +158,6 @@ static void virtio_net_reset(VirtIODevice *vdev)
n->nomulti = 0;
n->nouni = 0;
n->nobcast = 0;
if (n->vhost_started) {
vhost_net_stop(tap_get_vhost_net(n->nic->nc.peer), vdev);
n->vhost_started = 0;
}
/* Flush any MAC and VLAN filter table state */
n->mac_table.in_use = 0;
@ -783,12 +814,9 @@ static void virtio_net_save(QEMUFile *f, void *opaque)
{
VirtIONet *n = opaque;
if (n->vhost_started) {
/* TODO: should we really stop the backend?
* If we don't, it might keep writing to memory. */
vhost_net_stop(tap_get_vhost_net(n->nic->nc.peer), &n->vdev);
n->vhost_started = 0;
}
/* At this point, backend must be stopped, otherwise
* it might keep writing to memory. */
assert(!n->vhost_started);
virtio_save(&n->vdev, f);
qemu_put_buffer(f, n->mac, ETH_ALEN);
@ -924,44 +952,14 @@ static NetClientInfo net_virtio_info = {
.link_status_changed = virtio_net_set_link_status,
};
static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
{
VirtIONet *n = to_virtio_net(vdev);
if (!n->nic->nc.peer) {
return;
}
if (n->nic->nc.peer->info->type != NET_CLIENT_TYPE_TAP) {
return;
}
if (!tap_get_vhost_net(n->nic->nc.peer)) {
return;
}
if (!!n->vhost_started == !!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
return;
}
if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
int r = vhost_net_start(tap_get_vhost_net(n->nic->nc.peer), vdev);
if (r < 0) {
fprintf(stderr, "unable to start vhost net: %d: "
"falling back on userspace virtio\n", -r);
} else {
n->vhost_started = 1;
}
} else {
vhost_net_stop(tap_get_vhost_net(n->nic->nc.peer), vdev);
n->vhost_started = 0;
}
}
static void virtio_net_vmstate_change(void *opaque, int running, int reason)
{
VirtIONet *n = opaque;
uint8_t status = running ? VIRTIO_CONFIG_S_DRIVER_OK : 0;
n->vm_running = running;
/* This is called when vm is started/stopped,
* it will start/stop vhost backend if * appropriate
* it will start/stop vhost backend if appropriate
* e.g. after migration. */
virtio_net_set_status(&n->vdev, n->vdev.status & status);
virtio_net_set_status(&n->vdev, n->vdev.status);
}
VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf,
@ -1028,9 +1026,8 @@ void virtio_net_exit(VirtIODevice *vdev)
VirtIONet *n = DO_UPCAST(VirtIONet, vdev, vdev);
qemu_del_vm_change_state_handler(n->vmstate);
if (n->vhost_started) {
vhost_net_stop(tap_get_vhost_net(n->nic->nc.peer), vdev);
}
/* This will stop vhost backend if appropriate. */
virtio_net_set_status(vdev, 0);
qemu_purge_queued_packets(&n->nic->nc);

View File

@ -451,6 +451,33 @@ static int virtio_pci_set_guest_notifier(void *opaque, int n, bool assign)
return 0;
}
static int virtio_pci_set_guest_notifiers(void *opaque, bool assign)
{
VirtIOPCIProxy *proxy = opaque;
VirtIODevice *vdev = proxy->vdev;
int r, n;
for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
if (!virtio_queue_get_num(vdev, n)) {
break;
}
r = virtio_pci_set_guest_notifier(opaque, n, assign);
if (r < 0) {
goto assign_error;
}
}
return 0;
assign_error:
/* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
while (--n >= 0) {
virtio_pci_set_guest_notifier(opaque, n, !assign);
}
return r;
}
static int virtio_pci_set_host_notifier(void *opaque, int n, bool assign)
{
VirtIOPCIProxy *proxy = opaque;
@ -488,7 +515,7 @@ static const VirtIOBindings virtio_pci_bindings = {
.load_queue = virtio_pci_load_queue,
.get_features = virtio_pci_get_features,
.set_host_notifier = virtio_pci_set_host_notifier,
.set_guest_notifier = virtio_pci_set_guest_notifier,
.set_guest_notifiers = virtio_pci_set_guest_notifiers,
};
static void virtio_init_pci(VirtIOPCIProxy *proxy, VirtIODevice *vdev,

View File

@ -458,6 +458,8 @@ void virtio_reset(void *opaque)
VirtIODevice *vdev = opaque;
int i;
virtio_set_status(vdev, 0);
if (vdev->reset)
vdev->reset(vdev);

View File

@ -93,7 +93,7 @@ typedef struct {
int (*load_config)(void * opaque, QEMUFile *f);
int (*load_queue)(void * opaque, int n, QEMUFile *f);
unsigned (*get_features)(void * opaque);
int (*set_guest_notifier)(void * opaque, int n, bool assigned);
int (*set_guest_notifiers)(void * opaque, bool assigned);
int (*set_host_notifier)(void * opaque, int n, bool assigned);
} VirtIOBindings;

49
net.c
View File

@ -281,29 +281,64 @@ NICState *qemu_new_nic(NetClientInfo *info,
return nic;
}
void qemu_del_vlan_client(VLANClientState *vc)
static void qemu_cleanup_vlan_client(VLANClientState *vc)
{
if (vc->vlan) {
QTAILQ_REMOVE(&vc->vlan->clients, vc, next);
} else {
if (vc->send_queue) {
qemu_del_net_queue(vc->send_queue);
}
QTAILQ_REMOVE(&non_vlan_clients, vc, next);
if (vc->peer) {
vc->peer->peer = NULL;
}
}
if (vc->info->cleanup) {
vc->info->cleanup(vc);
}
}
static void qemu_free_vlan_client(VLANClientState *vc)
{
if (!vc->vlan) {
if (vc->send_queue) {
qemu_del_net_queue(vc->send_queue);
}
if (vc->peer) {
vc->peer->peer = NULL;
}
}
qemu_free(vc->name);
qemu_free(vc->model);
qemu_free(vc);
}
void qemu_del_vlan_client(VLANClientState *vc)
{
/* If there is a peer NIC, delete and cleanup client, but do not free. */
if (!vc->vlan && vc->peer && vc->peer->info->type == NET_CLIENT_TYPE_NIC) {
NICState *nic = DO_UPCAST(NICState, nc, vc->peer);
if (nic->peer_deleted) {
return;
}
nic->peer_deleted = true;
/* Let NIC know peer is gone. */
vc->peer->link_down = true;
if (vc->peer->info->link_status_changed) {
vc->peer->info->link_status_changed(vc->peer);
}
qemu_cleanup_vlan_client(vc);
return;
}
/* If this is a peer NIC and peer has already been deleted, free it now. */
if (!vc->vlan && vc->peer && vc->info->type == NET_CLIENT_TYPE_NIC) {
NICState *nic = DO_UPCAST(NICState, nc, vc);
if (nic->peer_deleted) {
qemu_free_vlan_client(vc->peer);
}
}
qemu_cleanup_vlan_client(vc);
qemu_free_vlan_client(vc);
}
VLANClientState *
qemu_find_vlan_client_by_name(Monitor *mon, int vlan_id,
const char *client_str)

1
net.h
View File

@ -72,6 +72,7 @@ typedef struct NICState {
VLANClientState nc;
NICConf *conf;
void *opaque;
bool peer_deleted;
} NICState;
struct VLANState {