Net patches

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJTDJtxAAoJEJykq7OBq3PIEBAH/0Fh1TUsHpLGwtVC+HObl4HF
 19KJpaXfG3L1x5pV4qbRvsmgXDNjnm8sSAbAvINn5uKJqk371qshvgssaIyLOA/o
 VD+9lR1mwE9mwiktCfFniddoZmTsMyGxsbE4i1dZAxsGuuiJ3x2zRotpQCLjmWMJ
 OSIdy1v657tR0EnhjI19g4fNvCpfOwX3tvMt20vgL6/x9AuNjMfFeP6S/KNuxBBC
 6vflVNmN0AIc/tIbR5bql5/wcDixJcxobFXHxAmIbaaJTNtFXq0PNpo0kfLi5Zzx
 s6iVoYVXNMx1xqpegBEVLyIvtcX7SCggO8FT+bhzaOfPVL8NaSXRJMRjH22V+D0=
 =UA+s
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/stefanha/tags/net-pull-request' into staging

Net patches

# gpg: Signature made Tue 25 Feb 2014 13:32:33 GMT using RSA key ID 81AB73C8
# gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>"
# gpg:                 aka "Stefan Hajnoczi <stefanha@gmail.com>"
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35  775A 9CA4 ABB3 81AB 73C8

* remotes/stefanha/tags/net-pull-request:
  virtio-net: use qemu_get_queue() where possible
  vhost_net: use offload API instead of bypassing it
  net: remove implicit peer from offload API
  net: Disable netmap backend when not supported
  net: add offloading support to netmap backend
  net: make tap offloading callbacks static
  net: virtio-net and vmxnet3 use offloading API
  net: TAP uses NetClientInfo offloading callbacks
  net: extend NetClientInfo for offloading
  net: change vnet-hdr TAP prototypes
  opencores_eth: flush queue whenever can_receive can go from false to true

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2014-02-26 20:04:37 +00:00
commit bc3fbad816
10 changed files with 244 additions and 125 deletions

10
configure vendored
View File

@ -2249,13 +2249,21 @@ EOF
fi
##########################################
# netmap headers probe
# netmap support probe
# Apart from looking for netmap headers, we make sure that the host API version
# supports the netmap backend (>=11). The upper bound (15) is meant to simulate
# a minor/major version number. Minor new features will be marked with values up
# to 15, and if something happens that requires a change to the backend we will
# move above 15, submit the backend fixes and modify this two bounds.
if test "$netmap" != "no" ; then
cat > $TMPC << EOF
#include <inttypes.h>
#include <net/if.h>
#include <net/netmap.h>
#include <net/netmap_user.h>
#if (NETMAP_API < 11) || (NETMAP_API > 15)
#error
#endif
int main(void) { return 0; }
EOF
if compile_prog "" "" ; then

View File

@ -106,7 +106,7 @@ struct vhost_net *vhost_net_init(NetClientState *backend, int devfd,
goto fail;
}
net->nc = backend;
net->dev.backend_features = tap_has_vnet_hdr(backend) ? 0 :
net->dev.backend_features = qemu_has_vnet_hdr(backend) ? 0 :
(1 << VHOST_NET_F_VIRTIO_NET_HDR);
net->backend = r;
@ -117,8 +117,8 @@ struct vhost_net *vhost_net_init(NetClientState *backend, int devfd,
if (r < 0) {
goto fail;
}
if (!tap_has_vnet_hdr_len(backend,
sizeof(struct virtio_net_hdr_mrg_rxbuf))) {
if (!qemu_has_vnet_hdr_len(backend,
sizeof(struct virtio_net_hdr_mrg_rxbuf))) {
net->dev.features &= ~(1 << VIRTIO_NET_F_MRG_RXBUF);
}
if (~net->dev.features & net->dev.backend_features) {

View File

@ -325,11 +325,7 @@ static void peer_test_vnet_hdr(VirtIONet *n)
return;
}
if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
return;
}
n->has_vnet_hdr = tap_has_vnet_hdr(nc->peer);
n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer);
}
static int peer_has_vnet_hdr(VirtIONet *n)
@ -342,7 +338,7 @@ static int peer_has_ufo(VirtIONet *n)
if (!peer_has_vnet_hdr(n))
return 0;
n->has_ufo = tap_has_ufo(qemu_get_queue(n->nic)->peer);
n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer);
return n->has_ufo;
}
@ -361,8 +357,8 @@ static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs)
nc = qemu_get_subqueue(n->nic, i);
if (peer_has_vnet_hdr(n) &&
tap_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
tap_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
n->host_hdr_len = n->guest_hdr_len;
}
}
@ -463,7 +459,7 @@ static uint32_t virtio_net_bad_features(VirtIODevice *vdev)
static void virtio_net_apply_guest_offloads(VirtIONet *n)
{
tap_set_offload(qemu_get_subqueue(n->nic, 0)->peer,
qemu_set_offload(qemu_get_queue(n->nic)->peer,
!!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
!!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
!!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
@ -1544,7 +1540,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
peer_test_vnet_hdr(n);
if (peer_has_vnet_hdr(n)) {
for (i = 0; i < n->max_queues; i++) {
tap_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
}
n->host_hdr_len = sizeof(struct virtio_net_hdr);
} else {

View File

@ -1290,12 +1290,12 @@ static void vmxnet3_update_features(VMXNET3State *s)
s->lro_supported, rxcso_supported,
s->rx_vlan_stripping);
if (s->peer_has_vhdr) {
tap_set_offload(qemu_get_queue(s->nic)->peer,
rxcso_supported,
s->lro_supported,
s->lro_supported,
0,
0);
qemu_set_offload(qemu_get_queue(s->nic)->peer,
rxcso_supported,
s->lro_supported,
s->lro_supported,
0,
0);
}
}
@ -1883,11 +1883,9 @@ static NetClientInfo net_vmxnet3_info = {
static bool vmxnet3_peer_has_vnet_hdr(VMXNET3State *s)
{
NetClientState *peer = qemu_get_queue(s->nic)->peer;
NetClientState *nc = qemu_get_queue(s->nic);
if ((NULL != peer) &&
(peer->info->type == NET_CLIENT_OPTIONS_KIND_TAP) &&
tap_has_vnet_hdr(peer)) {
if (qemu_has_vnet_hdr(nc->peer)) {
return true;
}
@ -1935,10 +1933,10 @@ static void vmxnet3_net_init(VMXNET3State *s)
s->lro_supported = false;
if (s->peer_has_vhdr) {
tap_set_vnet_hdr_len(qemu_get_queue(s->nic)->peer,
qemu_set_vnet_hdr_len(qemu_get_queue(s->nic)->peer,
sizeof(struct virtio_net_hdr));
tap_using_vnet_hdr(qemu_get_queue(s->nic)->peer, 1);
qemu_using_vnet_hdr(qemu_get_queue(s->nic)->peer, 1);
}
qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);

View File

@ -50,6 +50,12 @@ typedef void (NetCleanup) (NetClientState *);
typedef void (LinkStatusChanged)(NetClientState *);
typedef void (NetClientDestructor)(NetClientState *);
typedef RxFilterInfo *(QueryRxFilter)(NetClientState *);
typedef bool (HasUfo)(NetClientState *);
typedef bool (HasVnetHdr)(NetClientState *);
typedef bool (HasVnetHdrLen)(NetClientState *, int);
typedef void (UsingVnetHdr)(NetClientState *, bool);
typedef void (SetOffload)(NetClientState *, int, int, int, int, int);
typedef void (SetVnetHdrLen)(NetClientState *, int);
typedef struct NetClientInfo {
NetClientOptionsKind type;
@ -62,6 +68,12 @@ typedef struct NetClientInfo {
LinkStatusChanged *link_status_changed;
QueryRxFilter *query_rx_filter;
NetPoll *poll;
HasUfo *has_ufo;
HasVnetHdr *has_vnet_hdr;
HasVnetHdrLen *has_vnet_hdr_len;
UsingVnetHdr *using_vnet_hdr;
SetOffload *set_offload;
SetVnetHdrLen *set_vnet_hdr_len;
} NetClientInfo;
struct NetClientState {
@ -120,6 +132,13 @@ ssize_t qemu_send_packet_async(NetClientState *nc, const uint8_t *buf,
void qemu_purge_queued_packets(NetClientState *nc);
void qemu_flush_queued_packets(NetClientState *nc);
void qemu_format_nic_info_str(NetClientState *nc, uint8_t macaddr[6]);
bool qemu_has_ufo(NetClientState *nc);
bool qemu_has_vnet_hdr(NetClientState *nc);
bool qemu_has_vnet_hdr_len(NetClientState *nc, int len);
void qemu_using_vnet_hdr(NetClientState *nc, bool enable);
void qemu_set_offload(NetClientState *nc, int csum, int tso4, int tso6,
int ecn, int ufo);
void qemu_set_vnet_hdr_len(NetClientState *nc, int len);
void qemu_macaddr_default_if_unset(MACAddr *macaddr);
int qemu_show_nic_models(const char *arg, const char *const *models);
void qemu_check_nic_model(NICInfo *nd, const char *model);

View File

@ -29,12 +29,6 @@
#include "qemu-common.h"
#include "qapi-types.h"
bool tap_has_ufo(NetClientState *nc);
int tap_has_vnet_hdr(NetClientState *nc);
int tap_has_vnet_hdr_len(NetClientState *nc, int len);
void tap_using_vnet_hdr(NetClientState *nc, bool using_vnet_hdr);
void tap_set_offload(NetClientState *nc, int csum, int tso4, int tso6, int ecn, int ufo);
void tap_set_vnet_hdr_len(NetClientState *nc, int len);
int tap_enable(NetClientState *nc);
int tap_disable(NetClientState *nc);

View File

@ -378,6 +378,61 @@ void qemu_foreach_nic(qemu_nic_foreach func, void *opaque)
}
}
bool qemu_has_ufo(NetClientState *nc)
{
if (!nc || !nc->info->has_ufo) {
return false;
}
return nc->info->has_ufo(nc);
}
bool qemu_has_vnet_hdr(NetClientState *nc)
{
if (!nc || !nc->info->has_vnet_hdr) {
return false;
}
return nc->info->has_vnet_hdr(nc);
}
bool qemu_has_vnet_hdr_len(NetClientState *nc, int len)
{
if (!nc || !nc->info->has_vnet_hdr_len) {
return false;
}
return nc->info->has_vnet_hdr_len(nc, len);
}
void qemu_using_vnet_hdr(NetClientState *nc, bool enable)
{
if (!nc || !nc->info->using_vnet_hdr) {
return;
}
nc->info->using_vnet_hdr(nc, enable);
}
void qemu_set_offload(NetClientState *nc, int csum, int tso4, int tso6,
int ecn, int ufo)
{
if (!nc || !nc->info->set_offload) {
return;
}
nc->info->set_offload(nc, csum, tso4, tso6, ecn, ufo);
}
void qemu_set_vnet_hdr_len(NetClientState *nc, int len)
{
if (!nc || !nc->info->set_vnet_hdr_len) {
return;
}
nc->info->set_vnet_hdr_len(nc, len);
}
int qemu_can_send_packet(NetClientState *sender)
{
if (!sender->peer) {

View File

@ -27,10 +27,13 @@
#include <net/if.h>
#include <sys/mman.h>
#include <stdint.h>
#include <stdio.h>
#define NETMAP_WITH_LIBS
#include <net/netmap.h>
#include <net/netmap_user.h>
#include "net/net.h"
#include "net/tap.h"
#include "clients.h"
#include "sysemu/sysemu.h"
#include "qemu/error-report.h"
@ -54,33 +57,9 @@ typedef struct NetmapState {
bool read_poll;
bool write_poll;
struct iovec iov[IOV_MAX];
int vnet_hdr_len; /* Current virtio-net header length. */
} NetmapState;
#define D(format, ...) \
do { \
struct timeval __xxts; \
gettimeofday(&__xxts, NULL); \
printf("%03d.%06d %s [%d] " format "\n", \
(int)__xxts.tv_sec % 1000, (int)__xxts.tv_usec, \
__func__, __LINE__, ##__VA_ARGS__); \
} while (0)
/* Rate limited version of "D", lps indicates how many per second */
#define RD(lps, format, ...) \
do { \
static int t0, __cnt; \
struct timeval __xxts; \
gettimeofday(&__xxts, NULL); \
if (t0 != __xxts.tv_sec) { \
t0 = __xxts.tv_sec; \
__cnt = 0; \
} \
if (__cnt++ < lps) { \
D(format, ##__VA_ARGS__); \
} \
} while (0)
#ifndef __FreeBSD__
#define pkt_copy bcopy
#else
@ -237,7 +216,7 @@ static ssize_t netmap_receive(NetClientState *nc,
return size;
}
if (ring->avail == 0) {
if (nm_ring_empty(ring)) {
/* No available slots in the netmap TX ring. */
netmap_write_poll(s, true);
return 0;
@ -250,8 +229,7 @@ static ssize_t netmap_receive(NetClientState *nc,
ring->slot[i].len = size;
ring->slot[i].flags = 0;
pkt_copy(buf, dst, size);
ring->cur = NETMAP_RING_NEXT(ring, i);
ring->avail--;
ring->cur = ring->head = nm_ring_next(ring, i);
ioctl(s->me.fd, NIOCTXSYNC, NULL);
return size;
@ -267,17 +245,15 @@ static ssize_t netmap_receive_iov(NetClientState *nc,
uint8_t *dst;
int j;
uint32_t i;
uint32_t avail;
if (unlikely(!ring)) {
/* Drop the packet. */
return iov_size(iov, iovcnt);
}
i = ring->cur;
avail = ring->avail;
last = i = ring->cur;
if (avail < iovcnt) {
if (nm_ring_space(ring) < iovcnt) {
/* Not enough netmap slots. */
netmap_write_poll(s, true);
return 0;
@ -293,7 +269,7 @@ static ssize_t netmap_receive_iov(NetClientState *nc,
while (iov_frag_size) {
nm_frag_size = MIN(iov_frag_size, ring->nr_buf_size);
if (unlikely(avail == 0)) {
if (unlikely(nm_ring_empty(ring))) {
/* We run out of netmap slots while splitting the
iovec fragments. */
netmap_write_poll(s, true);
@ -308,8 +284,7 @@ static ssize_t netmap_receive_iov(NetClientState *nc,
pkt_copy(iov[j].iov_base + offset, dst, nm_frag_size);
last = i;
i = NETMAP_RING_NEXT(ring, i);
avail--;
i = nm_ring_next(ring, i);
offset += nm_frag_size;
iov_frag_size -= nm_frag_size;
@ -318,9 +293,8 @@ static ssize_t netmap_receive_iov(NetClientState *nc,
/* The last slot must not have NS_MOREFRAG set. */
ring->slot[last].flags &= ~NS_MOREFRAG;
/* Now update ring->cur and ring->avail. */
ring->cur = i;
ring->avail = avail;
/* Now update ring->cur and ring->head. */
ring->cur = ring->head = i;
ioctl(s->me.fd, NIOCTXSYNC, NULL);
@ -343,7 +317,7 @@ static void netmap_send(void *opaque)
/* Keep sending while there are available packets into the netmap
RX ring and the forwarding path towards the peer is open. */
while (ring->avail > 0 && qemu_can_send_packet(&s->nc)) {
while (!nm_ring_empty(ring) && qemu_can_send_packet(&s->nc)) {
uint32_t i;
uint32_t idx;
bool morefrag;
@ -358,11 +332,10 @@ static void netmap_send(void *opaque)
s->iov[iovcnt].iov_len = ring->slot[i].len;
iovcnt++;
ring->cur = NETMAP_RING_NEXT(ring, i);
ring->avail--;
} while (ring->avail && morefrag);
ring->cur = ring->head = nm_ring_next(ring, i);
} while (!nm_ring_empty(ring) && morefrag);
if (unlikely(!ring->avail && morefrag)) {
if (unlikely(nm_ring_empty(ring) && morefrag)) {
RD(5, "[netmap_send] ran out of slots, with a pending"
"incomplete packet\n");
}
@ -394,6 +367,63 @@ static void netmap_cleanup(NetClientState *nc)
s->me.fd = -1;
}
/* Offloading manipulation support callbacks. */
static bool netmap_has_ufo(NetClientState *nc)
{
return true;
}
static bool netmap_has_vnet_hdr(NetClientState *nc)
{
return true;
}
static bool netmap_has_vnet_hdr_len(NetClientState *nc, int len)
{
return len == 0 || len == sizeof(struct virtio_net_hdr) ||
len == sizeof(struct virtio_net_hdr_mrg_rxbuf);
}
static void netmap_using_vnet_hdr(NetClientState *nc, bool enable)
{
}
static void netmap_set_vnet_hdr_len(NetClientState *nc, int len)
{
NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
int err;
struct nmreq req;
/* Issue a NETMAP_BDG_VNET_HDR command to change the virtio-net header
* length for the netmap adapter associated to 'me->ifname'.
*/
memset(&req, 0, sizeof(req));
pstrcpy(req.nr_name, sizeof(req.nr_name), s->me.ifname);
req.nr_version = NETMAP_API;
req.nr_cmd = NETMAP_BDG_VNET_HDR;
req.nr_arg1 = len;
err = ioctl(s->me.fd, NIOCREGIF, &req);
if (err) {
error_report("Unable to execute NETMAP_BDG_VNET_HDR on %s: %s",
s->me.ifname, strerror(errno));
} else {
/* Keep track of the current length. */
s->vnet_hdr_len = len;
}
}
static void netmap_set_offload(NetClientState *nc, int csum, int tso4, int tso6,
int ecn, int ufo)
{
NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
/* Setting a virtio-net header length greater than zero automatically
* enables the offloadings.
*/
if (!s->vnet_hdr_len) {
netmap_set_vnet_hdr_len(nc, sizeof(struct virtio_net_hdr));
}
}
/* NetClientInfo methods */
static NetClientInfo net_netmap_info = {
@ -403,6 +433,12 @@ static NetClientInfo net_netmap_info = {
.receive_iov = netmap_receive_iov,
.poll = netmap_poll,
.cleanup = netmap_cleanup,
.has_ufo = netmap_has_ufo,
.has_vnet_hdr = netmap_has_vnet_hdr,
.has_vnet_hdr_len = netmap_has_vnet_hdr_len,
.using_vnet_hdr = netmap_using_vnet_hdr,
.set_offload = netmap_set_offload,
.set_vnet_hdr_len = netmap_set_vnet_hdr_len,
};
/* The exported init function
@ -428,6 +464,7 @@ int net_init_netmap(const NetClientOptions *opts,
nc = qemu_new_net_client(&net_netmap_info, peer, "netmap", name);
s = DO_UPCAST(NetmapState, nc, nc);
s->me = me;
s->vnet_hdr_len = 0;
netmap_read_poll(s, true); /* Initially only poll for reads. */
return 0;

View File

@ -669,11 +669,60 @@ static void tap_win32_send(void *opaque)
}
}
static bool tap_has_ufo(NetClientState *nc)
{
return false;
}
static bool tap_has_vnet_hdr(NetClientState *nc)
{
return false;
}
int tap_probe_vnet_hdr_len(int fd, int len)
{
return 0;
}
void tap_fd_set_vnet_hdr_len(int fd, int len)
{
}
static void tap_using_vnet_hdr(NetClientState *nc, bool using_vnet_hdr)
{
}
static void tap_set_offload(NetClientState *nc, int csum, int tso4,
int tso6, int ecn, int ufo)
{
}
struct vhost_net *tap_get_vhost_net(NetClientState *nc)
{
return NULL;
}
static bool tap_has_vnet_hdr_len(NetClientState *nc, int len)
{
return false;
}
static void tap_set_vnet_hdr_len(NetClientState *nc, int len)
{
abort();
}
static NetClientInfo net_tap_win32_info = {
.type = NET_CLIENT_OPTIONS_KIND_TAP,
.size = sizeof(TAPState),
.receive = tap_receive,
.cleanup = tap_cleanup,
.has_ufo = tap_has_ufo,
.has_vnet_hdr = tap_has_vnet_hdr,
.has_vnet_hdr_len = tap_has_vnet_hdr_len,
.using_vnet_hdr = tap_using_vnet_hdr,
.set_offload = tap_set_offload,
.set_vnet_hdr_len = tap_set_vnet_hdr_len,
};
static int tap_win32_init(NetClientState *peer, const char *model,
@ -722,49 +771,6 @@ int net_init_tap(const NetClientOptions *opts, const char *name,
return 0;
}
bool tap_has_ufo(NetClientState *nc)
{
return false;
}
int tap_has_vnet_hdr(NetClientState *nc)
{
return 0;
}
int tap_probe_vnet_hdr_len(int fd, int len)
{
return 0;
}
void tap_fd_set_vnet_hdr_len(int fd, int len)
{
}
void tap_using_vnet_hdr(NetClientState *nc, bool using_vnet_hdr)
{
}
void tap_set_offload(NetClientState *nc, int csum, int tso4,
int tso6, int ecn, int ufo)
{
}
struct vhost_net *tap_get_vhost_net(NetClientState *nc)
{
return NULL;
}
int tap_has_vnet_hdr_len(NetClientState *nc, int len)
{
return 0;
}
void tap_set_vnet_hdr_len(NetClientState *nc, int len)
{
abort();
}
int tap_enable(NetClientState *nc)
{
abort();

View File

@ -210,7 +210,7 @@ static void tap_send(void *opaque)
} while (size > 0 && qemu_can_send_packet(&s->nc));
}
bool tap_has_ufo(NetClientState *nc)
static bool tap_has_ufo(NetClientState *nc)
{
TAPState *s = DO_UPCAST(TAPState, nc, nc);
@ -219,7 +219,7 @@ bool tap_has_ufo(NetClientState *nc)
return s->has_ufo;
}
int tap_has_vnet_hdr(NetClientState *nc)
static bool tap_has_vnet_hdr(NetClientState *nc)
{
TAPState *s = DO_UPCAST(TAPState, nc, nc);
@ -228,16 +228,16 @@ int tap_has_vnet_hdr(NetClientState *nc)
return !!s->host_vnet_hdr_len;
}
int tap_has_vnet_hdr_len(NetClientState *nc, int len)
static bool tap_has_vnet_hdr_len(NetClientState *nc, int len)
{
TAPState *s = DO_UPCAST(TAPState, nc, nc);
assert(nc->info->type == NET_CLIENT_OPTIONS_KIND_TAP);
return tap_probe_vnet_hdr_len(s->fd, len);
return !!tap_probe_vnet_hdr_len(s->fd, len);
}
void tap_set_vnet_hdr_len(NetClientState *nc, int len)
static void tap_set_vnet_hdr_len(NetClientState *nc, int len)
{
TAPState *s = DO_UPCAST(TAPState, nc, nc);
@ -249,7 +249,7 @@ void tap_set_vnet_hdr_len(NetClientState *nc, int len)
s->host_vnet_hdr_len = len;
}
void tap_using_vnet_hdr(NetClientState *nc, bool using_vnet_hdr)
static void tap_using_vnet_hdr(NetClientState *nc, bool using_vnet_hdr)
{
TAPState *s = DO_UPCAST(TAPState, nc, nc);
@ -259,7 +259,7 @@ void tap_using_vnet_hdr(NetClientState *nc, bool using_vnet_hdr)
s->using_vnet_hdr = using_vnet_hdr;
}
void tap_set_offload(NetClientState *nc, int csum, int tso4,
static void tap_set_offload(NetClientState *nc, int csum, int tso4,
int tso6, int ecn, int ufo)
{
TAPState *s = DO_UPCAST(TAPState, nc, nc);
@ -314,6 +314,12 @@ static NetClientInfo net_tap_info = {
.receive_iov = tap_receive_iov,
.poll = tap_poll,
.cleanup = tap_cleanup,
.has_ufo = tap_has_ufo,
.has_vnet_hdr = tap_has_vnet_hdr,
.has_vnet_hdr_len = tap_has_vnet_hdr_len,
.using_vnet_hdr = tap_using_vnet_hdr,
.set_offload = tap_set_offload,
.set_vnet_hdr_len = tap_set_vnet_hdr_len,
};
static TAPState *net_tap_fd_init(NetClientState *peer,