Merge remote-tracking branch 'stefanha/net' into staging

# By Dmitry Fleytman (5) and others
# Via Stefan Hajnoczi
* stefanha/net:
  net: increase buffer size to accommodate Jumbo frame pkts
  VMXNET3 device implementation
  Packet abstraction for VMWARE network devices
  Common definitions for VMWARE devices
  net: iovec checksum calculator
  Checksum-related utility functions
  net: use socket_set_nodelay() for -netdev socket
This commit is contained in:
Anthony Liguori 2013-03-25 13:14:26 -05:00
commit dcadaa9b40
21 changed files with 5198 additions and 17 deletions

View File

@ -13,6 +13,7 @@ CONFIG_LSI_SCSI_PCI=y
CONFIG_MEGASAS_SCSI_PCI=y
CONFIG_RTL8139_PCI=y
CONFIG_E1000_PCI=y
CONFIG_VMXNET3_PCI=y
CONFIG_IDE_CORE=y
CONFIG_IDE_QDEV=y
CONFIG_IDE_PCI=y

View File

@ -119,6 +119,8 @@ common-obj-$(CONFIG_PCNET_PCI) += pcnet-pci.o
common-obj-$(CONFIG_PCNET_COMMON) += pcnet.o
common-obj-$(CONFIG_E1000_PCI) += e1000.o
common-obj-$(CONFIG_RTL8139_PCI) += rtl8139.o
common-obj-$(CONFIG_VMXNET3_PCI) += vmxnet_tx_pkt.o vmxnet_rx_pkt.o
common-obj-$(CONFIG_VMXNET3_PCI) += vmxnet3.o
common-obj-$(CONFIG_SMC91C111) += smc91c111.o
common-obj-$(CONFIG_LAN9118) += lan9118.o

View File

@ -60,6 +60,7 @@
#define PCI_DEVICE_ID_VMWARE_NET 0x0720
#define PCI_DEVICE_ID_VMWARE_SCSI 0x0730
#define PCI_DEVICE_ID_VMWARE_IDE 0x1729
#define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07B0
/* Intel (0x8086) */
#define PCI_DEVICE_ID_INTEL_82551IT 0x1209

143
hw/vmware_utils.h Normal file
View File

@ -0,0 +1,143 @@
/*
* QEMU VMWARE paravirtual devices - auxiliary code
*
* Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
*
* Developed by Daynix Computing LTD (http://www.daynix.com)
*
* Authors:
* Dmitry Fleytman <dmitry@daynix.com>
* Yan Vugenfirer <yan@daynix.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#ifndef VMWARE_UTILS_H
#define VMWARE_UTILS_H
#include "qemu/range.h"
#ifndef VMW_SHPRN
#define VMW_SHPRN(fmt, ...) do {} while (0)
#endif
/*
* Shared memory access functions with byte swap support
* Each function contains printout for reverse-engineering needs
*
*/
static inline void
vmw_shmem_read(hwaddr addr, void *buf, int len)
{
VMW_SHPRN("SHMEM r: %" PRIx64 ", len: %d to %p", addr, len, buf);
cpu_physical_memory_read(addr, buf, len);
}
static inline void
vmw_shmem_write(hwaddr addr, void *buf, int len)
{
VMW_SHPRN("SHMEM w: %" PRIx64 ", len: %d to %p", addr, len, buf);
cpu_physical_memory_write(addr, buf, len);
}
static inline void
vmw_shmem_rw(hwaddr addr, void *buf, int len, int is_write)
{
VMW_SHPRN("SHMEM r/w: %" PRIx64 ", len: %d (to %p), is write: %d",
addr, len, buf, is_write);
cpu_physical_memory_rw(addr, buf, len, is_write);
}
static inline void
vmw_shmem_set(hwaddr addr, uint8 val, int len)
{
int i;
VMW_SHPRN("SHMEM set: %" PRIx64 ", len: %d (value 0x%X)", addr, len, val);
for (i = 0; i < len; i++) {
cpu_physical_memory_write(addr + i, &val, 1);
}
}
static inline uint32_t
vmw_shmem_ld8(hwaddr addr)
{
uint8_t res = ldub_phys(addr);
VMW_SHPRN("SHMEM load8: %" PRIx64 " (value 0x%X)", addr, res);
return res;
}
static inline void
vmw_shmem_st8(hwaddr addr, uint8_t value)
{
VMW_SHPRN("SHMEM store8: %" PRIx64 " (value 0x%X)", addr, value);
stb_phys(addr, value);
}
static inline uint32_t
vmw_shmem_ld16(hwaddr addr)
{
uint16_t res = lduw_le_phys(addr);
VMW_SHPRN("SHMEM load16: %" PRIx64 " (value 0x%X)", addr, res);
return res;
}
static inline void
vmw_shmem_st16(hwaddr addr, uint16_t value)
{
VMW_SHPRN("SHMEM store16: %" PRIx64 " (value 0x%X)", addr, value);
stw_le_phys(addr, value);
}
static inline uint32_t
vmw_shmem_ld32(hwaddr addr)
{
uint32_t res = ldl_le_phys(addr);
VMW_SHPRN("SHMEM load32: %" PRIx64 " (value 0x%X)", addr, res);
return res;
}
static inline void
vmw_shmem_st32(hwaddr addr, uint32_t value)
{
VMW_SHPRN("SHMEM store32: %" PRIx64 " (value 0x%X)", addr, value);
stl_le_phys(addr, value);
}
static inline uint64_t
vmw_shmem_ld64(hwaddr addr)
{
uint64_t res = ldq_le_phys(addr);
VMW_SHPRN("SHMEM load64: %" PRIx64 " (value %" PRIx64 ")", addr, res);
return res;
}
static inline void
vmw_shmem_st64(hwaddr addr, uint64_t value)
{
VMW_SHPRN("SHMEM store64: %" PRIx64 " (value %" PRIx64 ")", addr, value);
stq_le_phys(addr, value);
}
/* Macros for simplification of operations on array-style registers */
/*
* Whether <addr> lies inside of array-style register defined by <base>,
* number of elements (<cnt>) and element size (<regsize>)
*
*/
#define VMW_IS_MULTIREG_ADDR(addr, base, cnt, regsize) \
range_covers_byte(base, cnt * regsize, addr)
/*
* Returns index of given register (<addr>) in array-style register defined by
* <base> and element size (<regsize>)
*
*/
#define VMW_MULTIREG_IDX_BY_ADDR(addr, base, regsize) \
(((addr) - (base)) / (regsize))
#endif

2461
hw/vmxnet3.c Normal file

File diff suppressed because it is too large Load Diff

760
hw/vmxnet3.h Normal file
View File

@ -0,0 +1,760 @@
/*
* QEMU VMWARE VMXNET3 paravirtual NIC interface definitions
*
* Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
*
* Developed by Daynix Computing LTD (http://www.daynix.com)
*
* Authors:
* Dmitry Fleytman <dmitry@daynix.com>
* Tamir Shomer <tamirs@daynix.com>
* Yan Vugenfirer <yan@daynix.com>
*
* This work is licensed under the terms of the GNU GPL, version 2.
* See the COPYING file in the top-level directory.
*
*/
#ifndef _QEMU_VMXNET3_H
#define _QEMU_VMXNET3_H
#define VMXNET3_DEVICE_MAX_TX_QUEUES 8
#define VMXNET3_DEVICE_MAX_RX_QUEUES 8 /* Keep this value as a power of 2 */
/*
* VMWARE headers we got from Linux kernel do not fully comply QEMU coding
* standards in sense of types and defines used.
* Since we didn't want to change VMWARE code, following set of typedefs
* and defines needed to compile these headers with QEMU introduced.
*/
#define u64 uint64_t
#define u32 uint32_t
#define u16 uint16_t
#define u8 uint8_t
#define __le16 uint16_t
#define __le32 uint32_t
#define __le64 uint64_t
#define __packed QEMU_PACKED
#if defined(HOST_WORDS_BIGENDIAN)
#define const_cpu_to_le64(x) bswap_64(x)
#define __BIG_ENDIAN_BITFIELD
#else
#define const_cpu_to_le64(x) (x)
#endif
/*
* Following is an interface definition for
* VMXNET3 device as provided by VMWARE
* See original copyright from Linux kernel v3.2.8
* header file drivers/net/vmxnet3/vmxnet3_defs.h below.
*/
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
* Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; version 2 of the License and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
*
*/
struct UPT1_TxStats {
u64 TSOPktsTxOK; /* TSO pkts post-segmentation */
u64 TSOBytesTxOK;
u64 ucastPktsTxOK;
u64 ucastBytesTxOK;
u64 mcastPktsTxOK;
u64 mcastBytesTxOK;
u64 bcastPktsTxOK;
u64 bcastBytesTxOK;
u64 pktsTxError;
u64 pktsTxDiscard;
};
struct UPT1_RxStats {
u64 LROPktsRxOK; /* LRO pkts */
u64 LROBytesRxOK; /* bytes from LRO pkts */
/* the following counters are for pkts from the wire, i.e., pre-LRO */
u64 ucastPktsRxOK;
u64 ucastBytesRxOK;
u64 mcastPktsRxOK;
u64 mcastBytesRxOK;
u64 bcastPktsRxOK;
u64 bcastBytesRxOK;
u64 pktsRxOutOfBuf;
u64 pktsRxError;
};
/* interrupt moderation level */
enum {
UPT1_IML_NONE = 0, /* no interrupt moderation */
UPT1_IML_HIGHEST = 7, /* least intr generated */
UPT1_IML_ADAPTIVE = 8, /* adpative intr moderation */
};
/* values for UPT1_RSSConf.hashFunc */
enum {
UPT1_RSS_HASH_TYPE_NONE = 0x0,
UPT1_RSS_HASH_TYPE_IPV4 = 0x01,
UPT1_RSS_HASH_TYPE_TCP_IPV4 = 0x02,
UPT1_RSS_HASH_TYPE_IPV6 = 0x04,
UPT1_RSS_HASH_TYPE_TCP_IPV6 = 0x08,
};
enum {
UPT1_RSS_HASH_FUNC_NONE = 0x0,
UPT1_RSS_HASH_FUNC_TOEPLITZ = 0x01,
};
#define UPT1_RSS_MAX_KEY_SIZE 40
#define UPT1_RSS_MAX_IND_TABLE_SIZE 128
struct UPT1_RSSConf {
u16 hashType;
u16 hashFunc;
u16 hashKeySize;
u16 indTableSize;
u8 hashKey[UPT1_RSS_MAX_KEY_SIZE];
u8 indTable[UPT1_RSS_MAX_IND_TABLE_SIZE];
};
/* features */
enum {
UPT1_F_RXCSUM = const_cpu_to_le64(0x0001), /* rx csum verification */
UPT1_F_RSS = const_cpu_to_le64(0x0002),
UPT1_F_RXVLAN = const_cpu_to_le64(0x0004), /* VLAN tag stripping */
UPT1_F_LRO = const_cpu_to_le64(0x0008),
};
/* all registers are 32 bit wide */
/* BAR 1 */
enum {
VMXNET3_REG_VRRS = 0x0, /* Vmxnet3 Revision Report Selection */
VMXNET3_REG_UVRS = 0x8, /* UPT Version Report Selection */
VMXNET3_REG_DSAL = 0x10, /* Driver Shared Address Low */
VMXNET3_REG_DSAH = 0x18, /* Driver Shared Address High */
VMXNET3_REG_CMD = 0x20, /* Command */
VMXNET3_REG_MACL = 0x28, /* MAC Address Low */
VMXNET3_REG_MACH = 0x30, /* MAC Address High */
VMXNET3_REG_ICR = 0x38, /* Interrupt Cause Register */
VMXNET3_REG_ECR = 0x40 /* Event Cause Register */
};
/* BAR 0 */
enum {
VMXNET3_REG_IMR = 0x0, /* Interrupt Mask Register */
VMXNET3_REG_TXPROD = 0x600, /* Tx Producer Index */
VMXNET3_REG_RXPROD = 0x800, /* Rx Producer Index for ring 1 */
VMXNET3_REG_RXPROD2 = 0xA00 /* Rx Producer Index for ring 2 */
};
#define VMXNET3_PT_REG_SIZE 4096 /* BAR 0 */
#define VMXNET3_VD_REG_SIZE 4096 /* BAR 1 */
#define VMXNET3_REG_ALIGN 8 /* All registers are 8-byte aligned. */
#define VMXNET3_REG_ALIGN_MASK 0x7
/* I/O Mapped access to registers */
#define VMXNET3_IO_TYPE_PT 0
#define VMXNET3_IO_TYPE_VD 1
#define VMXNET3_IO_ADDR(type, reg) (((type) << 24) | ((reg) & 0xFFFFFF))
#define VMXNET3_IO_TYPE(addr) ((addr) >> 24)
#define VMXNET3_IO_REG(addr) ((addr) & 0xFFFFFF)
enum {
VMXNET3_CMD_FIRST_SET = 0xCAFE0000,
VMXNET3_CMD_ACTIVATE_DEV = VMXNET3_CMD_FIRST_SET, /* 0xCAFE0000 */
VMXNET3_CMD_QUIESCE_DEV, /* 0xCAFE0001 */
VMXNET3_CMD_RESET_DEV, /* 0xCAFE0002 */
VMXNET3_CMD_UPDATE_RX_MODE, /* 0xCAFE0003 */
VMXNET3_CMD_UPDATE_MAC_FILTERS, /* 0xCAFE0004 */
VMXNET3_CMD_UPDATE_VLAN_FILTERS, /* 0xCAFE0005 */
VMXNET3_CMD_UPDATE_RSSIDT, /* 0xCAFE0006 */
VMXNET3_CMD_UPDATE_IML, /* 0xCAFE0007 */
VMXNET3_CMD_UPDATE_PMCFG, /* 0xCAFE0008 */
VMXNET3_CMD_UPDATE_FEATURE, /* 0xCAFE0009 */
VMXNET3_CMD_LOAD_PLUGIN, /* 0xCAFE000A */
VMXNET3_CMD_FIRST_GET = 0xF00D0000,
VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET, /* 0xF00D0000 */
VMXNET3_CMD_GET_STATS, /* 0xF00D0001 */
VMXNET3_CMD_GET_LINK, /* 0xF00D0002 */
VMXNET3_CMD_GET_PERM_MAC_LO, /* 0xF00D0003 */
VMXNET3_CMD_GET_PERM_MAC_HI, /* 0xF00D0004 */
VMXNET3_CMD_GET_DID_LO, /* 0xF00D0005 */
VMXNET3_CMD_GET_DID_HI, /* 0xF00D0006 */
VMXNET3_CMD_GET_DEV_EXTRA_INFO, /* 0xF00D0007 */
VMXNET3_CMD_GET_CONF_INTR /* 0xF00D0008 */
};
/*
* Little Endian layout of bitfields -
* Byte 0 : 7.....len.....0
* Byte 1 : rsvd gen 13.len.8
* Byte 2 : 5.msscof.0 ext1 dtype
* Byte 3 : 13...msscof...6
*
* Big Endian layout of bitfields -
* Byte 0: 13...msscof...6
* Byte 1 : 5.msscof.0 ext1 dtype
* Byte 2 : rsvd gen 13.len.8
* Byte 3 : 7.....len.....0
*
* Thus, le32_to_cpu on the dword will allow the big endian driver to read
* the bit fields correctly. And cpu_to_le32 will convert bitfields
* bit fields written by big endian driver to format required by device.
*/
struct Vmxnet3_TxDesc {
__le64 addr;
#ifdef __BIG_ENDIAN_BITFIELD
u32 msscof:14; /* MSS, checksum offset, flags */
u32 ext1:1;
u32 dtype:1; /* descriptor type */
u32 rsvd:1;
u32 gen:1; /* generation bit */
u32 len:14;
#else
u32 len:14;
u32 gen:1; /* generation bit */
u32 rsvd:1;
u32 dtype:1; /* descriptor type */
u32 ext1:1;
u32 msscof:14; /* MSS, checksum offset, flags */
#endif /* __BIG_ENDIAN_BITFIELD */
#ifdef __BIG_ENDIAN_BITFIELD
u32 tci:16; /* Tag to Insert */
u32 ti:1; /* VLAN Tag Insertion */
u32 ext2:1;
u32 cq:1; /* completion request */
u32 eop:1; /* End Of Packet */
u32 om:2; /* offload mode */
u32 hlen:10; /* header len */
#else
u32 hlen:10; /* header len */
u32 om:2; /* offload mode */
u32 eop:1; /* End Of Packet */
u32 cq:1; /* completion request */
u32 ext2:1;
u32 ti:1; /* VLAN Tag Insertion */
u32 tci:16; /* Tag to Insert */
#endif /* __BIG_ENDIAN_BITFIELD */
};
/* TxDesc.OM values */
#define VMXNET3_OM_NONE 0
#define VMXNET3_OM_CSUM 2
#define VMXNET3_OM_TSO 3
/* fields in TxDesc we access w/o using bit fields */
#define VMXNET3_TXD_EOP_SHIFT 12
#define VMXNET3_TXD_CQ_SHIFT 13
#define VMXNET3_TXD_GEN_SHIFT 14
#define VMXNET3_TXD_EOP_DWORD_SHIFT 3
#define VMXNET3_TXD_GEN_DWORD_SHIFT 2
#define VMXNET3_TXD_CQ (1 << VMXNET3_TXD_CQ_SHIFT)
#define VMXNET3_TXD_EOP (1 << VMXNET3_TXD_EOP_SHIFT)
#define VMXNET3_TXD_GEN (1 << VMXNET3_TXD_GEN_SHIFT)
#define VMXNET3_HDR_COPY_SIZE 128
struct Vmxnet3_TxDataDesc {
u8 data[VMXNET3_HDR_COPY_SIZE];
};
#define VMXNET3_TCD_GEN_SHIFT 31
#define VMXNET3_TCD_GEN_SIZE 1
#define VMXNET3_TCD_TXIDX_SHIFT 0
#define VMXNET3_TCD_TXIDX_SIZE 12
#define VMXNET3_TCD_GEN_DWORD_SHIFT 3
struct Vmxnet3_TxCompDesc {
u32 txdIdx:12; /* Index of the EOP TxDesc */
u32 ext1:20;
__le32 ext2;
__le32 ext3;
u32 rsvd:24;
u32 type:7; /* completion type */
u32 gen:1; /* generation bit */
};
struct Vmxnet3_RxDesc {
__le64 addr;
#ifdef __BIG_ENDIAN_BITFIELD
u32 gen:1; /* Generation bit */
u32 rsvd:15;
u32 dtype:1; /* Descriptor type */
u32 btype:1; /* Buffer Type */
u32 len:14;
#else
u32 len:14;
u32 btype:1; /* Buffer Type */
u32 dtype:1; /* Descriptor type */
u32 rsvd:15;
u32 gen:1; /* Generation bit */
#endif
u32 ext1;
};
/* values of RXD.BTYPE */
#define VMXNET3_RXD_BTYPE_HEAD 0 /* head only */
#define VMXNET3_RXD_BTYPE_BODY 1 /* body only */
/* fields in RxDesc we access w/o using bit fields */
#define VMXNET3_RXD_BTYPE_SHIFT 14
#define VMXNET3_RXD_GEN_SHIFT 31
struct Vmxnet3_RxCompDesc {
#ifdef __BIG_ENDIAN_BITFIELD
u32 ext2:1;
u32 cnc:1; /* Checksum Not Calculated */
u32 rssType:4; /* RSS hash type used */
u32 rqID:10; /* rx queue/ring ID */
u32 sop:1; /* Start of Packet */
u32 eop:1; /* End of Packet */
u32 ext1:2;
u32 rxdIdx:12; /* Index of the RxDesc */
#else
u32 rxdIdx:12; /* Index of the RxDesc */
u32 ext1:2;
u32 eop:1; /* End of Packet */
u32 sop:1; /* Start of Packet */
u32 rqID:10; /* rx queue/ring ID */
u32 rssType:4; /* RSS hash type used */
u32 cnc:1; /* Checksum Not Calculated */
u32 ext2:1;
#endif /* __BIG_ENDIAN_BITFIELD */
__le32 rssHash; /* RSS hash value */
#ifdef __BIG_ENDIAN_BITFIELD
u32 tci:16; /* Tag stripped */
u32 ts:1; /* Tag is stripped */
u32 err:1; /* Error */
u32 len:14; /* data length */
#else
u32 len:14; /* data length */
u32 err:1; /* Error */
u32 ts:1; /* Tag is stripped */
u32 tci:16; /* Tag stripped */
#endif /* __BIG_ENDIAN_BITFIELD */
#ifdef __BIG_ENDIAN_BITFIELD
u32 gen:1; /* generation bit */
u32 type:7; /* completion type */
u32 fcs:1; /* Frame CRC correct */
u32 frg:1; /* IP Fragment */
u32 v4:1; /* IPv4 */
u32 v6:1; /* IPv6 */
u32 ipc:1; /* IP Checksum Correct */
u32 tcp:1; /* TCP packet */
u32 udp:1; /* UDP packet */
u32 tuc:1; /* TCP/UDP Checksum Correct */
u32 csum:16;
#else
u32 csum:16;
u32 tuc:1; /* TCP/UDP Checksum Correct */
u32 udp:1; /* UDP packet */
u32 tcp:1; /* TCP packet */
u32 ipc:1; /* IP Checksum Correct */
u32 v6:1; /* IPv6 */
u32 v4:1; /* IPv4 */
u32 frg:1; /* IP Fragment */
u32 fcs:1; /* Frame CRC correct */
u32 type:7; /* completion type */
u32 gen:1; /* generation bit */
#endif /* __BIG_ENDIAN_BITFIELD */
};
/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */
#define VMXNET3_RCD_TUC_SHIFT 16
#define VMXNET3_RCD_IPC_SHIFT 19
/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.qword[1] */
#define VMXNET3_RCD_TYPE_SHIFT 56
#define VMXNET3_RCD_GEN_SHIFT 63
/* csum OK for TCP/UDP pkts over IP */
#define VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | \
1 << VMXNET3_RCD_IPC_SHIFT)
#define VMXNET3_TXD_GEN_SIZE 1
#define VMXNET3_TXD_EOP_SIZE 1
/* value of RxCompDesc.rssType */
enum {
VMXNET3_RCD_RSS_TYPE_NONE = 0,
VMXNET3_RCD_RSS_TYPE_IPV4 = 1,
VMXNET3_RCD_RSS_TYPE_TCPIPV4 = 2,
VMXNET3_RCD_RSS_TYPE_IPV6 = 3,
VMXNET3_RCD_RSS_TYPE_TCPIPV6 = 4,
};
/* a union for accessing all cmd/completion descriptors */
union Vmxnet3_GenericDesc {
__le64 qword[2];
__le32 dword[4];
__le16 word[8];
struct Vmxnet3_TxDesc txd;
struct Vmxnet3_RxDesc rxd;
struct Vmxnet3_TxCompDesc tcd;
struct Vmxnet3_RxCompDesc rcd;
};
#define VMXNET3_INIT_GEN 1
/* Max size of a single tx buffer */
#define VMXNET3_MAX_TX_BUF_SIZE (1 << 14)
/* # of tx desc needed for a tx buffer size */
#define VMXNET3_TXD_NEEDED(size) (((size) + VMXNET3_MAX_TX_BUF_SIZE - 1) / \
VMXNET3_MAX_TX_BUF_SIZE)
/* max # of tx descs for a non-tso pkt */
#define VMXNET3_MAX_TXD_PER_PKT 16
/* Max size of a single rx buffer */
#define VMXNET3_MAX_RX_BUF_SIZE ((1 << 14) - 1)
/* Minimum size of a type 0 buffer */
#define VMXNET3_MIN_T0_BUF_SIZE 128
#define VMXNET3_MAX_CSUM_OFFSET 1024
/* Ring base address alignment */
#define VMXNET3_RING_BA_ALIGN 512
#define VMXNET3_RING_BA_MASK (VMXNET3_RING_BA_ALIGN - 1)
/* Ring size must be a multiple of 32 */
#define VMXNET3_RING_SIZE_ALIGN 32
#define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1)
/* Max ring size */
#define VMXNET3_TX_RING_MAX_SIZE 4096
#define VMXNET3_TC_RING_MAX_SIZE 4096
#define VMXNET3_RX_RING_MAX_SIZE 4096
#define VMXNET3_RC_RING_MAX_SIZE 8192
/* a list of reasons for queue stop */
enum {
VMXNET3_ERR_NOEOP = 0x80000000, /* cannot find the EOP desc of a pkt */
VMXNET3_ERR_TXD_REUSE = 0x80000001, /* reuse TxDesc before tx completion */
VMXNET3_ERR_BIG_PKT = 0x80000002, /* too many TxDesc for a pkt */
VMXNET3_ERR_DESC_NOT_SPT = 0x80000003, /* descriptor type not supported */
VMXNET3_ERR_SMALL_BUF = 0x80000004, /* type 0 buffer too small */
VMXNET3_ERR_STRESS = 0x80000005, /* stress option firing in vmkernel */
VMXNET3_ERR_SWITCH = 0x80000006, /* mode switch failure */
VMXNET3_ERR_TXD_INVALID = 0x80000007, /* invalid TxDesc */
};
/* completion descriptor types */
#define VMXNET3_CDTYPE_TXCOMP 0 /* Tx Completion Descriptor */
#define VMXNET3_CDTYPE_RXCOMP 3 /* Rx Completion Descriptor */
enum {
VMXNET3_GOS_BITS_UNK = 0, /* unknown */
VMXNET3_GOS_BITS_32 = 1,
VMXNET3_GOS_BITS_64 = 2,
};
#define VMXNET3_GOS_TYPE_UNK 0 /* unknown */
#define VMXNET3_GOS_TYPE_LINUX 1
#define VMXNET3_GOS_TYPE_WIN 2
#define VMXNET3_GOS_TYPE_SOLARIS 3
#define VMXNET3_GOS_TYPE_FREEBSD 4
#define VMXNET3_GOS_TYPE_PXE 5
struct Vmxnet3_GOSInfo {
#ifdef __BIG_ENDIAN_BITFIELD
u32 gosMisc:10; /* other info about gos */
u32 gosVer:16; /* gos version */
u32 gosType:4; /* which guest */
u32 gosBits:2; /* 32-bit or 64-bit? */
#else
u32 gosBits:2; /* 32-bit or 64-bit? */
u32 gosType:4; /* which guest */
u32 gosVer:16; /* gos version */
u32 gosMisc:10; /* other info about gos */
#endif /* __BIG_ENDIAN_BITFIELD */
};
struct Vmxnet3_DriverInfo {
__le32 version;
struct Vmxnet3_GOSInfo gos;
__le32 vmxnet3RevSpt;
__le32 uptVerSpt;
};
#define VMXNET3_REV1_MAGIC 0xbabefee1
/*
* QueueDescPA must be 128 bytes aligned. It points to an array of
* Vmxnet3_TxQueueDesc followed by an array of Vmxnet3_RxQueueDesc.
* The number of Vmxnet3_TxQueueDesc/Vmxnet3_RxQueueDesc are specified by
* Vmxnet3_MiscConf.numTxQueues/numRxQueues, respectively.
*/
#define VMXNET3_QUEUE_DESC_ALIGN 128
struct Vmxnet3_MiscConf {
struct Vmxnet3_DriverInfo driverInfo;
__le64 uptFeatures;
__le64 ddPA; /* driver data PA */
__le64 queueDescPA; /* queue descriptor table PA */
__le32 ddLen; /* driver data len */
__le32 queueDescLen; /* queue desc. table len in bytes */
__le32 mtu;
__le16 maxNumRxSG;
u8 numTxQueues;
u8 numRxQueues;
__le32 reserved[4];
};
struct Vmxnet3_TxQueueConf {
__le64 txRingBasePA;
__le64 dataRingBasePA;
__le64 compRingBasePA;
__le64 ddPA; /* driver data */
__le64 reserved;
__le32 txRingSize; /* # of tx desc */
__le32 dataRingSize; /* # of data desc */
__le32 compRingSize; /* # of comp desc */
__le32 ddLen; /* size of driver data */
u8 intrIdx;
u8 _pad[7];
};
struct Vmxnet3_RxQueueConf {
__le64 rxRingBasePA[2];
__le64 compRingBasePA;
__le64 ddPA; /* driver data */
__le64 reserved;
__le32 rxRingSize[2]; /* # of rx desc */
__le32 compRingSize; /* # of rx comp desc */
__le32 ddLen; /* size of driver data */
u8 intrIdx;
u8 _pad[7];
};
enum vmxnet3_intr_mask_mode {
VMXNET3_IMM_AUTO = 0,
VMXNET3_IMM_ACTIVE = 1,
VMXNET3_IMM_LAZY = 2
};
enum vmxnet3_intr_type {
VMXNET3_IT_AUTO = 0,
VMXNET3_IT_INTX = 1,
VMXNET3_IT_MSI = 2,
VMXNET3_IT_MSIX = 3
};
#define VMXNET3_MAX_TX_QUEUES 8
#define VMXNET3_MAX_RX_QUEUES 16
/* addition 1 for events */
#define VMXNET3_MAX_INTRS 25
/* value of intrCtrl */
#define VMXNET3_IC_DISABLE_ALL 0x1 /* bit 0 */
struct Vmxnet3_IntrConf {
bool autoMask;
u8 numIntrs; /* # of interrupts */
u8 eventIntrIdx;
u8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for
* each intr */
__le32 intrCtrl;
__le32 reserved[2];
};
/* one bit per VLAN ID, the size is in the units of u32 */
#define VMXNET3_VFT_SIZE (4096/(sizeof(uint32_t)*8))
struct Vmxnet3_QueueStatus {
bool stopped;
u8 _pad[3];
__le32 error;
};
struct Vmxnet3_TxQueueCtrl {
__le32 txNumDeferred;
__le32 txThreshold;
__le64 reserved;
};
struct Vmxnet3_RxQueueCtrl {
bool updateRxProd;
u8 _pad[7];
__le64 reserved;
};
enum {
VMXNET3_RXM_UCAST = 0x01, /* unicast only */
VMXNET3_RXM_MCAST = 0x02, /* multicast passing the filters */
VMXNET3_RXM_BCAST = 0x04, /* broadcast only */
VMXNET3_RXM_ALL_MULTI = 0x08, /* all multicast */
VMXNET3_RXM_PROMISC = 0x10 /* promiscuous */
};
struct Vmxnet3_RxFilterConf {
__le32 rxMode; /* VMXNET3_RXM_xxx */
__le16 mfTableLen; /* size of the multicast filter table */
__le16 _pad1;
__le64 mfTablePA; /* PA of the multicast filters table */
__le32 vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */
};
#define VMXNET3_PM_MAX_FILTERS 6
#define VMXNET3_PM_MAX_PATTERN_SIZE 128
#define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8)
#define VMXNET3_PM_WAKEUP_MAGIC cpu_to_le16(0x01) /* wake up on magic pkts */
#define VMXNET3_PM_WAKEUP_FILTER cpu_to_le16(0x02) /* wake up on pkts matching
* filters */
struct Vmxnet3_PM_PktFilter {
u8 maskSize;
u8 patternSize;
u8 mask[VMXNET3_PM_MAX_MASK_SIZE];
u8 pattern[VMXNET3_PM_MAX_PATTERN_SIZE];
u8 pad[6];
};
struct Vmxnet3_PMConf {
__le16 wakeUpEvents; /* VMXNET3_PM_WAKEUP_xxx */
u8 numFilters;
u8 pad[5];
struct Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS];
};
struct Vmxnet3_VariableLenConfDesc {
__le32 confVer;
__le32 confLen;
__le64 confPA;
};
struct Vmxnet3_TxQueueDesc {
struct Vmxnet3_TxQueueCtrl ctrl;
struct Vmxnet3_TxQueueConf conf;
/* Driver read after a GET command */
struct Vmxnet3_QueueStatus status;
struct UPT1_TxStats stats;
u8 _pad[88]; /* 128 aligned */
};
struct Vmxnet3_RxQueueDesc {
struct Vmxnet3_RxQueueCtrl ctrl;
struct Vmxnet3_RxQueueConf conf;
/* Driver read after a GET commad */
struct Vmxnet3_QueueStatus status;
struct UPT1_RxStats stats;
u8 __pad[88]; /* 128 aligned */
};
struct Vmxnet3_DSDevRead {
/* read-only region for device, read by dev in response to a SET cmd */
struct Vmxnet3_MiscConf misc;
struct Vmxnet3_IntrConf intrConf;
struct Vmxnet3_RxFilterConf rxFilterConf;
struct Vmxnet3_VariableLenConfDesc rssConfDesc;
struct Vmxnet3_VariableLenConfDesc pmConfDesc;
struct Vmxnet3_VariableLenConfDesc pluginConfDesc;
};
/* All structures in DriverShared are padded to multiples of 8 bytes */
struct Vmxnet3_DriverShared {
__le32 magic;
/* make devRead start at 64bit boundaries */
__le32 pad;
struct Vmxnet3_DSDevRead devRead;
__le32 ecr;
__le32 reserved[5];
};
#define VMXNET3_ECR_RQERR (1 << 0)
#define VMXNET3_ECR_TQERR (1 << 1)
#define VMXNET3_ECR_LINK (1 << 2)
#define VMXNET3_ECR_DIC (1 << 3)
#define VMXNET3_ECR_DEBUG (1 << 4)
/* flip the gen bit of a ring */
#define VMXNET3_FLIP_RING_GEN(gen) ((gen) = (gen) ^ 0x1)
/* only use this if moving the idx won't affect the gen bit */
#define VMXNET3_INC_RING_IDX_ONLY(idx, ring_size) \
do {\
(idx)++;\
if (unlikely((idx) == (ring_size))) {\
(idx) = 0;\
} \
} while (0)
#define VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid) \
(vfTable[vid >> 5] |= (1 << (vid & 31)))
#define VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid) \
(vfTable[vid >> 5] &= ~(1 << (vid & 31)))
#define VMXNET3_VFTABLE_ENTRY_IS_SET(vfTable, vid) \
((vfTable[vid >> 5] & (1 << (vid & 31))) != 0)
#define VMXNET3_MAX_MTU 9000
#define VMXNET3_MIN_MTU 60
#define VMXNET3_LINK_UP (10000 << 16 | 1) /* 10 Gbps, up */
#define VMXNET3_LINK_DOWN 0
#undef u64
#undef u32
#undef u16
#undef u8
#undef __le16
#undef __le32
#undef __le64
#undef __packed
#undef const_cpu_to_le64
#if defined(HOST_WORDS_BIGENDIAN)
#undef __BIG_ENDIAN_BITFIELD
#endif
#endif

115
hw/vmxnet_debug.h Normal file
View File

@ -0,0 +1,115 @@
/*
* QEMU VMWARE VMXNET* paravirtual NICs - debugging facilities
*
* Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
*
* Developed by Daynix Computing LTD (http://www.daynix.com)
*
* Authors:
* Dmitry Fleytman <dmitry@daynix.com>
* Tamir Shomer <tamirs@daynix.com>
* Yan Vugenfirer <yan@daynix.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#ifndef _QEMU_VMXNET_DEBUG_H
#define _QEMU_VMXNET_DEBUG_H
#define VMXNET_DEVICE_NAME "vmxnet3"
/* #define VMXNET_DEBUG_CB */
#define VMXNET_DEBUG_WARNINGS
#define VMXNET_DEBUG_ERRORS
/* #define VMXNET_DEBUG_INTERRUPTS */
/* #define VMXNET_DEBUG_CONFIG */
/* #define VMXNET_DEBUG_RINGS */
/* #define VMXNET_DEBUG_PACKETS */
/* #define VMXNET_DEBUG_SHMEM_ACCESS */
#ifdef VMXNET_DEBUG_SHMEM_ACCESS
#define VMW_SHPRN(fmt, ...) \
do { \
printf("[%s][SH][%s]: " fmt "\n", VMXNET_DEVICE_NAME, __func__, \
## __VA_ARGS__); \
} while (0)
#else
#define VMW_SHPRN(fmt, ...) do {} while (0)
#endif
#ifdef VMXNET_DEBUG_CB
#define VMW_CBPRN(fmt, ...) \
do { \
printf("[%s][CB][%s]: " fmt "\n", VMXNET_DEVICE_NAME, __func__, \
## __VA_ARGS__); \
} while (0)
#else
#define VMW_CBPRN(fmt, ...) do {} while (0)
#endif
#ifdef VMXNET_DEBUG_PACKETS
#define VMW_PKPRN(fmt, ...) \
do { \
printf("[%s][PK][%s]: " fmt "\n", VMXNET_DEVICE_NAME, __func__, \
## __VA_ARGS__); \
} while (0)
#else
#define VMW_PKPRN(fmt, ...) do {} while (0)
#endif
#ifdef VMXNET_DEBUG_WARNINGS
#define VMW_WRPRN(fmt, ...) \
do { \
printf("[%s][WR][%s]: " fmt "\n", VMXNET_DEVICE_NAME, __func__, \
## __VA_ARGS__); \
} while (0)
#else
#define VMW_WRPRN(fmt, ...) do {} while (0)
#endif
#ifdef VMXNET_DEBUG_ERRORS
#define VMW_ERPRN(fmt, ...) \
do { \
printf("[%s][ER][%s]: " fmt "\n", VMXNET_DEVICE_NAME, __func__, \
## __VA_ARGS__); \
} while (0)
#else
#define VMW_ERPRN(fmt, ...) do {} while (0)
#endif
#ifdef VMXNET_DEBUG_INTERRUPTS
#define VMW_IRPRN(fmt, ...) \
do { \
printf("[%s][IR][%s]: " fmt "\n", VMXNET_DEVICE_NAME, __func__, \
## __VA_ARGS__); \
} while (0)
#else
#define VMW_IRPRN(fmt, ...) do {} while (0)
#endif
#ifdef VMXNET_DEBUG_CONFIG
#define VMW_CFPRN(fmt, ...) \
do { \
printf("[%s][CF][%s]: " fmt "\n", VMXNET_DEVICE_NAME, __func__, \
## __VA_ARGS__); \
} while (0)
#else
#define VMW_CFPRN(fmt, ...) do {} while (0)
#endif
#ifdef VMXNET_DEBUG_RINGS
#define VMW_RIPRN(fmt, ...) \
do { \
printf("[%s][RI][%s]: " fmt "\n", VMXNET_DEVICE_NAME, __func__, \
## __VA_ARGS__); \
} while (0)
#else
#define VMW_RIPRN(fmt, ...) do {} while (0)
#endif
#define VMXNET_MF "%02X:%02X:%02X:%02X:%02X:%02X"
#define VMXNET_MA(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
#endif /* _QEMU_VMXNET3_DEBUG_H */

187
hw/vmxnet_rx_pkt.c Normal file
View File

@ -0,0 +1,187 @@
/*
* QEMU VMWARE VMXNET* paravirtual NICs - RX packets abstractions
*
* Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
*
* Developed by Daynix Computing LTD (http://www.daynix.com)
*
* Authors:
* Dmitry Fleytman <dmitry@daynix.com>
* Tamir Shomer <tamirs@daynix.com>
* Yan Vugenfirer <yan@daynix.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include "vmxnet_rx_pkt.h"
#include "net/eth.h"
#include "qemu-common.h"
#include "qemu/iov.h"
#include "net/checksum.h"
#include "net/tap.h"
/*
* RX packet may contain up to 2 fragments - rebuilt eth header
* in case of VLAN tag stripping
* and payload received from QEMU - in any case
*/
#define VMXNET_MAX_RX_PACKET_FRAGMENTS (2)
struct VmxnetRxPkt {
struct virtio_net_hdr virt_hdr;
uint8_t ehdr_buf[ETH_MAX_L2_HDR_LEN];
struct iovec vec[VMXNET_MAX_RX_PACKET_FRAGMENTS];
uint16_t vec_len;
uint32_t tot_len;
uint16_t tci;
bool vlan_stripped;
bool has_virt_hdr;
eth_pkt_types_e packet_type;
/* Analysis results */
bool isip4;
bool isip6;
bool isudp;
bool istcp;
};
void vmxnet_rx_pkt_init(struct VmxnetRxPkt **pkt, bool has_virt_hdr)
{
struct VmxnetRxPkt *p = g_malloc0(sizeof *p);
p->has_virt_hdr = has_virt_hdr;
*pkt = p;
}
void vmxnet_rx_pkt_uninit(struct VmxnetRxPkt *pkt)
{
g_free(pkt);
}
struct virtio_net_hdr *vmxnet_rx_pkt_get_vhdr(struct VmxnetRxPkt *pkt)
{
assert(pkt);
return &pkt->virt_hdr;
}
void vmxnet_rx_pkt_attach_data(struct VmxnetRxPkt *pkt, const void *data,
size_t len, bool strip_vlan)
{
uint16_t tci = 0;
uint16_t ploff;
assert(pkt);
pkt->vlan_stripped = false;
if (strip_vlan) {
pkt->vlan_stripped = eth_strip_vlan(data, pkt->ehdr_buf, &ploff, &tci);
}
if (pkt->vlan_stripped) {
pkt->vec[0].iov_base = pkt->ehdr_buf;
pkt->vec[0].iov_len = ploff - sizeof(struct vlan_header);
pkt->vec[1].iov_base = (uint8_t *) data + ploff;
pkt->vec[1].iov_len = len - ploff;
pkt->vec_len = 2;
pkt->tot_len = len - ploff + sizeof(struct eth_header);
} else {
pkt->vec[0].iov_base = (void *)data;
pkt->vec[0].iov_len = len;
pkt->vec_len = 1;
pkt->tot_len = len;
}
pkt->tci = tci;
eth_get_protocols(data, len, &pkt->isip4, &pkt->isip6,
&pkt->isudp, &pkt->istcp);
}
void vmxnet_rx_pkt_dump(struct VmxnetRxPkt *pkt)
{
#ifdef VMXNET_RX_PKT_DEBUG
VmxnetRxPkt *pkt = (VmxnetRxPkt *)pkt;
assert(pkt);
printf("RX PKT: tot_len: %d, vlan_stripped: %d, vlan_tag: %d\n",
pkt->tot_len, pkt->vlan_stripped, pkt->tci);
#endif
}
void vmxnet_rx_pkt_set_packet_type(struct VmxnetRxPkt *pkt,
eth_pkt_types_e packet_type)
{
assert(pkt);
pkt->packet_type = packet_type;
}
eth_pkt_types_e vmxnet_rx_pkt_get_packet_type(struct VmxnetRxPkt *pkt)
{
assert(pkt);
return pkt->packet_type;
}
size_t vmxnet_rx_pkt_get_total_len(struct VmxnetRxPkt *pkt)
{
assert(pkt);
return pkt->tot_len;
}
void vmxnet_rx_pkt_get_protocols(struct VmxnetRxPkt *pkt,
bool *isip4, bool *isip6,
bool *isudp, bool *istcp)
{
assert(pkt);
*isip4 = pkt->isip4;
*isip6 = pkt->isip6;
*isudp = pkt->isudp;
*istcp = pkt->istcp;
}
struct iovec *vmxnet_rx_pkt_get_iovec(struct VmxnetRxPkt *pkt)
{
assert(pkt);
return pkt->vec;
}
void vmxnet_rx_pkt_set_vhdr(struct VmxnetRxPkt *pkt,
struct virtio_net_hdr *vhdr)
{
assert(pkt);
memcpy(&pkt->virt_hdr, vhdr, sizeof pkt->virt_hdr);
}
bool vmxnet_rx_pkt_is_vlan_stripped(struct VmxnetRxPkt *pkt)
{
assert(pkt);
return pkt->vlan_stripped;
}
bool vmxnet_rx_pkt_has_virt_hdr(struct VmxnetRxPkt *pkt)
{
assert(pkt);
return pkt->has_virt_hdr;
}
uint16_t vmxnet_rx_pkt_get_num_frags(struct VmxnetRxPkt *pkt)
{
assert(pkt);
return pkt->vec_len;
}
uint16_t vmxnet_rx_pkt_get_vlan_tag(struct VmxnetRxPkt *pkt)
{
assert(pkt);
return pkt->tci;
}

174
hw/vmxnet_rx_pkt.h Normal file
View File

@ -0,0 +1,174 @@
/*
* QEMU VMWARE VMXNET* paravirtual NICs - RX packets abstraction
*
* Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
*
* Developed by Daynix Computing LTD (http://www.daynix.com)
*
* Authors:
* Dmitry Fleytman <dmitry@daynix.com>
* Tamir Shomer <tamirs@daynix.com>
* Yan Vugenfirer <yan@daynix.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#ifndef VMXNET_RX_PKT_H
#define VMXNET_RX_PKT_H
#include "stdint.h"
#include "stdbool.h"
#include "net/eth.h"
/* defines to enable packet dump functions */
/*#define VMXNET_RX_PKT_DEBUG*/
struct VmxnetRxPkt;
/**
* Clean all rx packet resources
*
* @pkt: packet
*
*/
void vmxnet_rx_pkt_uninit(struct VmxnetRxPkt *pkt);
/**
* Init function for rx packet functionality
*
* @pkt: packet pointer
* @has_virt_hdr: device uses virtio header
*
*/
void vmxnet_rx_pkt_init(struct VmxnetRxPkt **pkt, bool has_virt_hdr);
/**
* returns total length of data attached to rx context
*
* @pkt: packet
*
* Return: nothing
*
*/
size_t vmxnet_rx_pkt_get_total_len(struct VmxnetRxPkt *pkt);
/**
* fetches packet analysis results
*
* @pkt: packet
* @isip4: whether the packet given is IPv4
* @isip6: whether the packet given is IPv6
* @isudp: whether the packet given is UDP
* @istcp: whether the packet given is TCP
*
*/
void vmxnet_rx_pkt_get_protocols(struct VmxnetRxPkt *pkt,
bool *isip4, bool *isip6,
bool *isudp, bool *istcp);
/**
* returns virtio header stored in rx context
*
* @pkt: packet
* @ret: virtio header
*
*/
struct virtio_net_hdr *vmxnet_rx_pkt_get_vhdr(struct VmxnetRxPkt *pkt);
/**
* returns packet type
*
* @pkt: packet
* @ret: packet type
*
*/
eth_pkt_types_e vmxnet_rx_pkt_get_packet_type(struct VmxnetRxPkt *pkt);
/**
* returns vlan tag
*
* @pkt: packet
* @ret: VLAN tag
*
*/
uint16_t vmxnet_rx_pkt_get_vlan_tag(struct VmxnetRxPkt *pkt);
/**
* tells whether vlan was stripped from the packet
*
* @pkt: packet
* @ret: VLAN stripped sign
*
*/
bool vmxnet_rx_pkt_is_vlan_stripped(struct VmxnetRxPkt *pkt);
/**
* notifies caller if the packet has virtio header
*
* @pkt: packet
* @ret: true if packet has virtio header, false otherwize
*
*/
bool vmxnet_rx_pkt_has_virt_hdr(struct VmxnetRxPkt *pkt);
/**
* returns number of frags attached to the packet
*
* @pkt: packet
* @ret: number of frags
*
*/
uint16_t vmxnet_rx_pkt_get_num_frags(struct VmxnetRxPkt *pkt);
/**
* attach data to rx packet
*
* @pkt: packet
* @data: pointer to the data buffer
* @len: data length
* @strip_vlan: should the module strip vlan from data
*
*/
void vmxnet_rx_pkt_attach_data(struct VmxnetRxPkt *pkt, const void *data,
size_t len, bool strip_vlan);
/**
* returns io vector that holds the attached data
*
* @pkt: packet
* @ret: pointer to IOVec
*
*/
struct iovec *vmxnet_rx_pkt_get_iovec(struct VmxnetRxPkt *pkt);
/**
* prints rx packet data if debug is enabled
*
* @pkt: packet
*
*/
void vmxnet_rx_pkt_dump(struct VmxnetRxPkt *pkt);
/**
* copy passed vhdr data to packet context
*
* @pkt: packet
* @vhdr: VHDR buffer
*
*/
void vmxnet_rx_pkt_set_vhdr(struct VmxnetRxPkt *pkt,
struct virtio_net_hdr *vhdr);
/**
* save packet type in packet context
*
* @pkt: packet
* @packet_type: the packet type
*
*/
void vmxnet_rx_pkt_set_packet_type(struct VmxnetRxPkt *pkt,
eth_pkt_types_e packet_type);
#endif

567
hw/vmxnet_tx_pkt.c Normal file
View File

@ -0,0 +1,567 @@
/*
* QEMU VMWARE VMXNET* paravirtual NICs - TX packets abstractions
*
* Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
*
* Developed by Daynix Computing LTD (http://www.daynix.com)
*
* Authors:
* Dmitry Fleytman <dmitry@daynix.com>
* Tamir Shomer <tamirs@daynix.com>
* Yan Vugenfirer <yan@daynix.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include "vmxnet_tx_pkt.h"
#include "net/eth.h"
#include "qemu-common.h"
#include "qemu/iov.h"
#include "net/checksum.h"
#include "net/tap.h"
#include "net/net.h"
#include "exec/cpu-common.h"
enum {
VMXNET_TX_PKT_VHDR_FRAG = 0,
VMXNET_TX_PKT_L2HDR_FRAG,
VMXNET_TX_PKT_L3HDR_FRAG,
VMXNET_TX_PKT_PL_START_FRAG
};
/* TX packet private context */
struct VmxnetTxPkt {
struct virtio_net_hdr virt_hdr;
bool has_virt_hdr;
struct iovec *raw;
uint32_t raw_frags;
uint32_t max_raw_frags;
struct iovec *vec;
uint8_t l2_hdr[ETH_MAX_L2_HDR_LEN];
uint32_t payload_len;
uint32_t payload_frags;
uint32_t max_payload_frags;
uint16_t hdr_len;
eth_pkt_types_e packet_type;
uint8_t l4proto;
};
void vmxnet_tx_pkt_init(struct VmxnetTxPkt **pkt, uint32_t max_frags,
bool has_virt_hdr)
{
struct VmxnetTxPkt *p = g_malloc0(sizeof *p);
p->vec = g_malloc((sizeof *p->vec) *
(max_frags + VMXNET_TX_PKT_PL_START_FRAG));
p->raw = g_malloc((sizeof *p->raw) * max_frags);
p->max_payload_frags = max_frags;
p->max_raw_frags = max_frags;
p->has_virt_hdr = has_virt_hdr;
p->vec[VMXNET_TX_PKT_VHDR_FRAG].iov_base = &p->virt_hdr;
p->vec[VMXNET_TX_PKT_VHDR_FRAG].iov_len =
p->has_virt_hdr ? sizeof p->virt_hdr : 0;
p->vec[VMXNET_TX_PKT_L2HDR_FRAG].iov_base = &p->l2_hdr;
p->vec[VMXNET_TX_PKT_L3HDR_FRAG].iov_base = NULL;
p->vec[VMXNET_TX_PKT_L3HDR_FRAG].iov_len = 0;
*pkt = p;
}
void vmxnet_tx_pkt_uninit(struct VmxnetTxPkt *pkt)
{
if (pkt) {
g_free(pkt->vec);
g_free(pkt->raw);
g_free(pkt);
}
}
void vmxnet_tx_pkt_update_ip_checksums(struct VmxnetTxPkt *pkt)
{
uint16_t csum;
uint32_t ph_raw_csum;
assert(pkt);
uint8_t gso_type = pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN;
struct ip_header *ip_hdr;
if (VIRTIO_NET_HDR_GSO_TCPV4 != gso_type &&
VIRTIO_NET_HDR_GSO_UDP != gso_type) {
return;
}
ip_hdr = pkt->vec[VMXNET_TX_PKT_L3HDR_FRAG].iov_base;
if (pkt->payload_len + pkt->vec[VMXNET_TX_PKT_L3HDR_FRAG].iov_len >
ETH_MAX_IP_DGRAM_LEN) {
return;
}
ip_hdr->ip_len = cpu_to_be16(pkt->payload_len +
pkt->vec[VMXNET_TX_PKT_L3HDR_FRAG].iov_len);
/* Calculate IP header checksum */
ip_hdr->ip_sum = 0;
csum = net_raw_checksum((uint8_t *)ip_hdr,
pkt->vec[VMXNET_TX_PKT_L3HDR_FRAG].iov_len);
ip_hdr->ip_sum = cpu_to_be16(csum);
/* Calculate IP pseudo header checksum */
ph_raw_csum = eth_calc_pseudo_hdr_csum(ip_hdr, pkt->payload_len);
csum = cpu_to_be16(~net_checksum_finish(ph_raw_csum));
iov_from_buf(&pkt->vec[VMXNET_TX_PKT_PL_START_FRAG], pkt->payload_frags,
pkt->virt_hdr.csum_offset, &csum, sizeof(csum));
}
static void vmxnet_tx_pkt_calculate_hdr_len(struct VmxnetTxPkt *pkt)
{
pkt->hdr_len = pkt->vec[VMXNET_TX_PKT_L2HDR_FRAG].iov_len +
pkt->vec[VMXNET_TX_PKT_L3HDR_FRAG].iov_len;
}
static bool vmxnet_tx_pkt_parse_headers(struct VmxnetTxPkt *pkt)
{
struct iovec *l2_hdr, *l3_hdr;
size_t bytes_read;
size_t full_ip6hdr_len;
uint16_t l3_proto;
assert(pkt);
l2_hdr = &pkt->vec[VMXNET_TX_PKT_L2HDR_FRAG];
l3_hdr = &pkt->vec[VMXNET_TX_PKT_L3HDR_FRAG];
bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, 0, l2_hdr->iov_base,
ETH_MAX_L2_HDR_LEN);
if (bytes_read < ETH_MAX_L2_HDR_LEN) {
l2_hdr->iov_len = 0;
return false;
} else {
l2_hdr->iov_len = eth_get_l2_hdr_length(l2_hdr->iov_base);
}
l3_proto = eth_get_l3_proto(l2_hdr->iov_base, l2_hdr->iov_len);
switch (l3_proto) {
case ETH_P_IP:
l3_hdr->iov_base = g_malloc(ETH_MAX_IP4_HDR_LEN);
bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, l2_hdr->iov_len,
l3_hdr->iov_base, sizeof(struct ip_header));
if (bytes_read < sizeof(struct ip_header)) {
l3_hdr->iov_len = 0;
return false;
}
l3_hdr->iov_len = IP_HDR_GET_LEN(l3_hdr->iov_base);
pkt->l4proto = ((struct ip_header *) l3_hdr->iov_base)->ip_p;
/* copy optional IPv4 header data */
bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags,
l2_hdr->iov_len + sizeof(struct ip_header),
l3_hdr->iov_base + sizeof(struct ip_header),
l3_hdr->iov_len - sizeof(struct ip_header));
if (bytes_read < l3_hdr->iov_len - sizeof(struct ip_header)) {
l3_hdr->iov_len = 0;
return false;
}
break;
case ETH_P_IPV6:
if (!eth_parse_ipv6_hdr(pkt->raw, pkt->raw_frags, l2_hdr->iov_len,
&pkt->l4proto, &full_ip6hdr_len)) {
l3_hdr->iov_len = 0;
return false;
}
l3_hdr->iov_base = g_malloc(full_ip6hdr_len);
bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, l2_hdr->iov_len,
l3_hdr->iov_base, full_ip6hdr_len);
if (bytes_read < full_ip6hdr_len) {
l3_hdr->iov_len = 0;
return false;
} else {
l3_hdr->iov_len = full_ip6hdr_len;
}
break;
default:
l3_hdr->iov_len = 0;
break;
}
vmxnet_tx_pkt_calculate_hdr_len(pkt);
pkt->packet_type = get_eth_packet_type(l2_hdr->iov_base);
return true;
}
static bool vmxnet_tx_pkt_rebuild_payload(struct VmxnetTxPkt *pkt)
{
size_t payload_len = iov_size(pkt->raw, pkt->raw_frags) - pkt->hdr_len;
pkt->payload_frags = iov_copy(&pkt->vec[VMXNET_TX_PKT_PL_START_FRAG],
pkt->max_payload_frags,
pkt->raw, pkt->raw_frags,
pkt->hdr_len, payload_len);
if (pkt->payload_frags != (uint32_t) -1) {
pkt->payload_len = payload_len;
return true;
} else {
return false;
}
}
bool vmxnet_tx_pkt_parse(struct VmxnetTxPkt *pkt)
{
return vmxnet_tx_pkt_parse_headers(pkt) &&
vmxnet_tx_pkt_rebuild_payload(pkt);
}
struct virtio_net_hdr *vmxnet_tx_pkt_get_vhdr(struct VmxnetTxPkt *pkt)
{
assert(pkt);
return &pkt->virt_hdr;
}
static uint8_t vmxnet_tx_pkt_get_gso_type(struct VmxnetTxPkt *pkt,
bool tso_enable)
{
uint8_t rc = VIRTIO_NET_HDR_GSO_NONE;
uint16_t l3_proto;
l3_proto = eth_get_l3_proto(pkt->vec[VMXNET_TX_PKT_L2HDR_FRAG].iov_base,
pkt->vec[VMXNET_TX_PKT_L2HDR_FRAG].iov_len);
if (!tso_enable) {
goto func_exit;
}
rc = eth_get_gso_type(l3_proto, pkt->vec[VMXNET_TX_PKT_L3HDR_FRAG].iov_base,
pkt->l4proto);
func_exit:
return rc;
}
void vmxnet_tx_pkt_build_vheader(struct VmxnetTxPkt *pkt, bool tso_enable,
bool csum_enable, uint32_t gso_size)
{
struct tcp_hdr l4hdr;
assert(pkt);
/* csum has to be enabled if tso is. */
assert(csum_enable || !tso_enable);
pkt->virt_hdr.gso_type = vmxnet_tx_pkt_get_gso_type(pkt, tso_enable);
switch (pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_NONE:
pkt->virt_hdr.hdr_len = 0;
pkt->virt_hdr.gso_size = 0;
break;
case VIRTIO_NET_HDR_GSO_UDP:
pkt->virt_hdr.gso_size = IP_FRAG_ALIGN_SIZE(gso_size);
pkt->virt_hdr.hdr_len = pkt->hdr_len + sizeof(struct udp_header);
break;
case VIRTIO_NET_HDR_GSO_TCPV4:
case VIRTIO_NET_HDR_GSO_TCPV6:
iov_to_buf(&pkt->vec[VMXNET_TX_PKT_PL_START_FRAG], pkt->payload_frags,
0, &l4hdr, sizeof(l4hdr));
pkt->virt_hdr.hdr_len = pkt->hdr_len + l4hdr.th_off * sizeof(uint32_t);
pkt->virt_hdr.gso_size = IP_FRAG_ALIGN_SIZE(gso_size);
break;
default:
assert(false);
}
if (csum_enable) {
switch (pkt->l4proto) {
case IP_PROTO_TCP:
pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
pkt->virt_hdr.csum_start = pkt->hdr_len;
pkt->virt_hdr.csum_offset = offsetof(struct tcp_hdr, th_sum);
break;
case IP_PROTO_UDP:
pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
pkt->virt_hdr.csum_start = pkt->hdr_len;
pkt->virt_hdr.csum_offset = offsetof(struct udp_hdr, uh_sum);
break;
default:
break;
}
}
}
void vmxnet_tx_pkt_setup_vlan_header(struct VmxnetTxPkt *pkt, uint16_t vlan)
{
bool is_new;
assert(pkt);
eth_setup_vlan_headers(pkt->vec[VMXNET_TX_PKT_L2HDR_FRAG].iov_base,
vlan, &is_new);
/* update l2hdrlen */
if (is_new) {
pkt->hdr_len += sizeof(struct vlan_header);
pkt->vec[VMXNET_TX_PKT_L2HDR_FRAG].iov_len +=
sizeof(struct vlan_header);
}
}
bool vmxnet_tx_pkt_add_raw_fragment(struct VmxnetTxPkt *pkt, hwaddr pa,
size_t len)
{
hwaddr mapped_len = 0;
struct iovec *ventry;
assert(pkt);
assert(pkt->max_raw_frags > pkt->raw_frags);
if (!len) {
return true;
}
ventry = &pkt->raw[pkt->raw_frags];
mapped_len = len;
ventry->iov_base = cpu_physical_memory_map(pa, &mapped_len, false);
ventry->iov_len = mapped_len;
pkt->raw_frags += !!ventry->iov_base;
if ((ventry->iov_base == NULL) || (len != mapped_len)) {
return false;
}
return true;
}
eth_pkt_types_e vmxnet_tx_pkt_get_packet_type(struct VmxnetTxPkt *pkt)
{
assert(pkt);
return pkt->packet_type;
}
size_t vmxnet_tx_pkt_get_total_len(struct VmxnetTxPkt *pkt)
{
assert(pkt);
return pkt->hdr_len + pkt->payload_len;
}
void vmxnet_tx_pkt_dump(struct VmxnetTxPkt *pkt)
{
#ifdef VMXNET_TX_PKT_DEBUG
assert(pkt);
printf("TX PKT: hdr_len: %d, pkt_type: 0x%X, l2hdr_len: %lu, "
"l3hdr_len: %lu, payload_len: %u\n", pkt->hdr_len, pkt->packet_type,
pkt->vec[VMXNET_TX_PKT_L2HDR_FRAG].iov_len,
pkt->vec[VMXNET_TX_PKT_L3HDR_FRAG].iov_len, pkt->payload_len);
#endif
}
void vmxnet_tx_pkt_reset(struct VmxnetTxPkt *pkt)
{
int i;
/* no assert, as reset can be called before tx_pkt_init */
if (!pkt) {
return;
}
memset(&pkt->virt_hdr, 0, sizeof(pkt->virt_hdr));
g_free(pkt->vec[VMXNET_TX_PKT_L3HDR_FRAG].iov_base);
pkt->vec[VMXNET_TX_PKT_L3HDR_FRAG].iov_base = NULL;
assert(pkt->vec);
for (i = VMXNET_TX_PKT_L2HDR_FRAG;
i < pkt->payload_frags + VMXNET_TX_PKT_PL_START_FRAG; i++) {
pkt->vec[i].iov_len = 0;
}
pkt->payload_len = 0;
pkt->payload_frags = 0;
assert(pkt->raw);
for (i = 0; i < pkt->raw_frags; i++) {
assert(pkt->raw[i].iov_base);
cpu_physical_memory_unmap(pkt->raw[i].iov_base, pkt->raw[i].iov_len,
false, pkt->raw[i].iov_len);
pkt->raw[i].iov_len = 0;
}
pkt->raw_frags = 0;
pkt->hdr_len = 0;
pkt->packet_type = 0;
pkt->l4proto = 0;
}
static void vmxnet_tx_pkt_do_sw_csum(struct VmxnetTxPkt *pkt)
{
struct iovec *iov = &pkt->vec[VMXNET_TX_PKT_L2HDR_FRAG];
uint32_t csum_cntr;
uint16_t csum = 0;
/* num of iovec without vhdr */
uint32_t iov_len = pkt->payload_frags + VMXNET_TX_PKT_PL_START_FRAG - 1;
uint16_t csl;
struct ip_header *iphdr;
size_t csum_offset = pkt->virt_hdr.csum_start + pkt->virt_hdr.csum_offset;
/* Put zero to checksum field */
iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum);
/* Calculate L4 TCP/UDP checksum */
csl = pkt->payload_len;
/* data checksum */
csum_cntr =
net_checksum_add_iov(iov, iov_len, pkt->virt_hdr.csum_start, csl);
/* add pseudo header to csum */
iphdr = pkt->vec[VMXNET_TX_PKT_L3HDR_FRAG].iov_base;
csum_cntr += eth_calc_pseudo_hdr_csum(iphdr, csl);
/* Put the checksum obtained into the packet */
csum = cpu_to_be16(net_checksum_finish(csum_cntr));
iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum);
}
enum {
VMXNET_TX_PKT_FRAGMENT_L2_HDR_POS = 0,
VMXNET_TX_PKT_FRAGMENT_L3_HDR_POS,
VMXNET_TX_PKT_FRAGMENT_HEADER_NUM
};
#define VMXNET_MAX_FRAG_SG_LIST (64)
static size_t vmxnet_tx_pkt_fetch_fragment(struct VmxnetTxPkt *pkt,
int *src_idx, size_t *src_offset, struct iovec *dst, int *dst_idx)
{
size_t fetched = 0;
struct iovec *src = pkt->vec;
*dst_idx = VMXNET_TX_PKT_FRAGMENT_HEADER_NUM;
while (fetched < pkt->virt_hdr.gso_size) {
/* no more place in fragment iov */
if (*dst_idx == VMXNET_MAX_FRAG_SG_LIST) {
break;
}
/* no more data in iovec */
if (*src_idx == (pkt->payload_frags + VMXNET_TX_PKT_PL_START_FRAG)) {
break;
}
dst[*dst_idx].iov_base = src[*src_idx].iov_base + *src_offset;
dst[*dst_idx].iov_len = MIN(src[*src_idx].iov_len - *src_offset,
pkt->virt_hdr.gso_size - fetched);
*src_offset += dst[*dst_idx].iov_len;
fetched += dst[*dst_idx].iov_len;
if (*src_offset == src[*src_idx].iov_len) {
*src_offset = 0;
(*src_idx)++;
}
(*dst_idx)++;
}
return fetched;
}
static bool vmxnet_tx_pkt_do_sw_fragmentation(struct VmxnetTxPkt *pkt,
NetClientState *nc)
{
struct iovec fragment[VMXNET_MAX_FRAG_SG_LIST];
size_t fragment_len = 0;
bool more_frags = false;
/* some pointers for shorter code */
void *l2_iov_base, *l3_iov_base;
size_t l2_iov_len, l3_iov_len;
int src_idx = VMXNET_TX_PKT_PL_START_FRAG, dst_idx;
size_t src_offset = 0;
size_t fragment_offset = 0;
l2_iov_base = pkt->vec[VMXNET_TX_PKT_L2HDR_FRAG].iov_base;
l2_iov_len = pkt->vec[VMXNET_TX_PKT_L2HDR_FRAG].iov_len;
l3_iov_base = pkt->vec[VMXNET_TX_PKT_L3HDR_FRAG].iov_base;
l3_iov_len = pkt->vec[VMXNET_TX_PKT_L3HDR_FRAG].iov_len;
/* Copy headers */
fragment[VMXNET_TX_PKT_FRAGMENT_L2_HDR_POS].iov_base = l2_iov_base;
fragment[VMXNET_TX_PKT_FRAGMENT_L2_HDR_POS].iov_len = l2_iov_len;
fragment[VMXNET_TX_PKT_FRAGMENT_L3_HDR_POS].iov_base = l3_iov_base;
fragment[VMXNET_TX_PKT_FRAGMENT_L3_HDR_POS].iov_len = l3_iov_len;
/* Put as much data as possible and send */
do {
fragment_len = vmxnet_tx_pkt_fetch_fragment(pkt, &src_idx, &src_offset,
fragment, &dst_idx);
more_frags = (fragment_offset + fragment_len < pkt->payload_len);
eth_setup_ip4_fragmentation(l2_iov_base, l2_iov_len, l3_iov_base,
l3_iov_len, fragment_len, fragment_offset, more_frags);
eth_fix_ip4_checksum(l3_iov_base, l3_iov_len);
qemu_sendv_packet(nc, fragment, dst_idx);
fragment_offset += fragment_len;
} while (more_frags);
return true;
}
bool vmxnet_tx_pkt_send(struct VmxnetTxPkt *pkt, NetClientState *nc)
{
assert(pkt);
if (!pkt->has_virt_hdr &&
pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
vmxnet_tx_pkt_do_sw_csum(pkt);
}
/*
* Since underlying infrastructure does not support IP datagrams longer
* than 64K we should drop such packets and don't even try to send
*/
if (VIRTIO_NET_HDR_GSO_NONE != pkt->virt_hdr.gso_type) {
if (pkt->payload_len >
ETH_MAX_IP_DGRAM_LEN -
pkt->vec[VMXNET_TX_PKT_L3HDR_FRAG].iov_len) {
return false;
}
}
if (pkt->has_virt_hdr ||
pkt->virt_hdr.gso_type == VIRTIO_NET_HDR_GSO_NONE) {
qemu_sendv_packet(nc, pkt->vec,
pkt->payload_frags + VMXNET_TX_PKT_PL_START_FRAG);
return true;
}
return vmxnet_tx_pkt_do_sw_fragmentation(pkt, nc);
}

148
hw/vmxnet_tx_pkt.h Normal file
View File

@ -0,0 +1,148 @@
/*
* QEMU VMWARE VMXNET* paravirtual NICs - TX packets abstraction
*
* Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
*
* Developed by Daynix Computing LTD (http://www.daynix.com)
*
* Authors:
* Dmitry Fleytman <dmitry@daynix.com>
* Tamir Shomer <tamirs@daynix.com>
* Yan Vugenfirer <yan@daynix.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#ifndef VMXNET_TX_PKT_H
#define VMXNET_TX_PKT_H
#include "stdint.h"
#include "stdbool.h"
#include "net/eth.h"
#include "exec/hwaddr.h"
/* define to enable packet dump functions */
/*#define VMXNET_TX_PKT_DEBUG*/
struct VmxnetTxPkt;
/**
* Init function for tx packet functionality
*
* @pkt: packet pointer
* @max_frags: max tx ip fragments
* @has_virt_hdr: device uses virtio header.
*/
void vmxnet_tx_pkt_init(struct VmxnetTxPkt **pkt, uint32_t max_frags,
bool has_virt_hdr);
/**
* Clean all tx packet resources.
*
* @pkt: packet.
*/
void vmxnet_tx_pkt_uninit(struct VmxnetTxPkt *pkt);
/**
* get virtio header
*
* @pkt: packet
* @ret: virtio header
*/
struct virtio_net_hdr *vmxnet_tx_pkt_get_vhdr(struct VmxnetTxPkt *pkt);
/**
* build virtio header (will be stored in module context)
*
* @pkt: packet
* @tso_enable: TSO enabled
* @csum_enable: CSO enabled
* @gso_size: MSS size for TSO
*
*/
void vmxnet_tx_pkt_build_vheader(struct VmxnetTxPkt *pkt, bool tso_enable,
bool csum_enable, uint32_t gso_size);
/**
* updates vlan tag, and adds vlan header in case it is missing
*
* @pkt: packet
* @vlan: VLAN tag
*
*/
void vmxnet_tx_pkt_setup_vlan_header(struct VmxnetTxPkt *pkt, uint16_t vlan);
/**
* populate data fragment into pkt context.
*
* @pkt: packet
* @pa: physical address of fragment
* @len: length of fragment
*
*/
bool vmxnet_tx_pkt_add_raw_fragment(struct VmxnetTxPkt *pkt, hwaddr pa,
size_t len);
/**
* fix ip header fields and calculate checksums needed.
*
* @pkt: packet
*
*/
void vmxnet_tx_pkt_update_ip_checksums(struct VmxnetTxPkt *pkt);
/**
* get length of all populated data.
*
* @pkt: packet
* @ret: total data length
*
*/
size_t vmxnet_tx_pkt_get_total_len(struct VmxnetTxPkt *pkt);
/**
* get packet type
*
* @pkt: packet
* @ret: packet type
*
*/
eth_pkt_types_e vmxnet_tx_pkt_get_packet_type(struct VmxnetTxPkt *pkt);
/**
* prints packet data if debug is enabled
*
* @pkt: packet
*
*/
void vmxnet_tx_pkt_dump(struct VmxnetTxPkt *pkt);
/**
* reset tx packet private context (needed to be called between packets)
*
* @pkt: packet
*
*/
void vmxnet_tx_pkt_reset(struct VmxnetTxPkt *pkt);
/**
* Send packet to qemu. handles sw offloads if vhdr is not supported.
*
* @pkt: packet
* @nc: NetClientState
* @ret: operation result
*
*/
bool vmxnet_tx_pkt_send(struct VmxnetTxPkt *pkt, NetClientState *nc);
/**
* parse raw packet data and analyze offload requirements.
*
* @pkt: packet
*
*/
bool vmxnet_tx_pkt_parse(struct VmxnetTxPkt *pkt);
#endif

View File

@ -20,10 +20,34 @@
#include <stdint.h>
uint32_t net_checksum_add(int len, uint8_t *buf);
uint32_t net_checksum_add_cont(int len, uint8_t *buf, int seq);
uint16_t net_checksum_finish(uint32_t sum);
uint16_t net_checksum_tcpudp(uint16_t length, uint16_t proto,
uint8_t *addrs, uint8_t *buf);
void net_checksum_calculate(uint8_t *data, int length);
static inline uint32_t
net_checksum_add(int len, uint8_t *buf)
{
return net_checksum_add_cont(len, buf, 0);
}
static inline uint16_t
net_raw_checksum(uint8_t *data, int length)
{
return net_checksum_finish(net_checksum_add(length, data));
}
/**
* net_checksum_add_iov: scatter-gather vector checksumming
*
* @iov: input scatter-gather array
* @iov_cnt: number of array elements
* @iov_off: starting iov offset for checksumming
* @size: length of data to be checksummed
*/
uint32_t net_checksum_add_iov(const struct iovec *iov,
const unsigned int iov_cnt,
uint32_t iov_off, uint32_t size);
#endif /* QEMU_NET_CHECKSUM_H */

347
include/net/eth.h Normal file
View File

@ -0,0 +1,347 @@
/*
* QEMU network structures definitions and helper functions
*
* Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
*
* Developed by Daynix Computing LTD (http://www.daynix.com)
*
* Portions developed by Free Software Foundation, Inc
* Copyright (C) 1991-1997, 2001, 2003, 2006 Free Software Foundation, Inc.
* See netinet/ip6.h and netinet/in.h (GNU C Library)
*
* Portions developed by Igor Kovalenko
* Copyright (c) 2006 Igor Kovalenko
* See hw/rtl8139.c (QEMU)
*
* Authors:
* Dmitry Fleytman <dmitry@daynix.com>
* Tamir Shomer <tamirs@daynix.com>
* Yan Vugenfirer <yan@daynix.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#ifndef QEMU_ETH_H
#define QEMU_ETH_H
#include <sys/types.h>
#include <string.h>
#include "qemu/bswap.h"
#include "qemu/iov.h"
#define ETH_ALEN 6
struct eth_header {
uint8_t h_dest[ETH_ALEN]; /* destination eth addr */
uint8_t h_source[ETH_ALEN]; /* source ether addr */
uint16_t h_proto; /* packet type ID field */
};
struct vlan_header {
uint16_t h_tci; /* priority and VLAN ID */
uint16_t h_proto; /* encapsulated protocol */
};
struct ip_header {
uint8_t ip_ver_len; /* version and header length */
uint8_t ip_tos; /* type of service */
uint16_t ip_len; /* total length */
uint16_t ip_id; /* identification */
uint16_t ip_off; /* fragment offset field */
uint8_t ip_ttl; /* time to live */
uint8_t ip_p; /* protocol */
uint16_t ip_sum; /* checksum */
uint32_t ip_src, ip_dst; /* source and destination address */
};
typedef struct tcp_header {
uint16_t th_sport; /* source port */
uint16_t th_dport; /* destination port */
uint32_t th_seq; /* sequence number */
uint32_t th_ack; /* acknowledgment number */
uint16_t th_offset_flags; /* data offset, reserved 6 bits, */
/* TCP protocol flags */
uint16_t th_win; /* window */
uint16_t th_sum; /* checksum */
uint16_t th_urp; /* urgent pointer */
} tcp_header;
typedef struct udp_header {
uint16_t uh_sport; /* source port */
uint16_t uh_dport; /* destination port */
uint16_t uh_ulen; /* udp length */
uint16_t uh_sum; /* udp checksum */
} udp_header;
typedef struct ip_pseudo_header {
uint32_t ip_src;
uint32_t ip_dst;
uint8_t zeros;
uint8_t ip_proto;
uint16_t ip_payload;
} ip_pseudo_header;
/* IPv6 address */
struct in6_addr {
union {
uint8_t __u6_addr8[16];
} __in6_u;
};
struct ip6_header {
union {
struct ip6_hdrctl {
uint32_t ip6_un1_flow; /* 4 bits version, 8 bits TC,
20 bits flow-ID */
uint16_t ip6_un1_plen; /* payload length */
uint8_t ip6_un1_nxt; /* next header */
uint8_t ip6_un1_hlim; /* hop limit */
} ip6_un1;
uint8_t ip6_un2_vfc; /* 4 bits version, top 4 bits tclass */
struct ip6_ecn_access {
uint8_t ip6_un3_vfc; /* 4 bits version, top 4 bits tclass */
uint8_t ip6_un3_ecn; /* 2 bits ECN, top 6 bits payload length */
} ip6_un3;
} ip6_ctlun;
struct in6_addr ip6_src; /* source address */
struct in6_addr ip6_dst; /* destination address */
};
struct ip6_ext_hdr {
uint8_t ip6r_nxt; /* next header */
uint8_t ip6r_len; /* length in units of 8 octets */
};
struct udp_hdr {
uint16_t uh_sport; /* source port */
uint16_t uh_dport; /* destination port */
uint16_t uh_ulen; /* udp length */
uint16_t uh_sum; /* udp checksum */
};
struct tcp_hdr {
u_short th_sport; /* source port */
u_short th_dport; /* destination port */
uint32_t th_seq; /* sequence number */
uint32_t th_ack; /* acknowledgment number */
#ifdef HOST_WORDS_BIGENDIAN
u_char th_off : 4, /* data offset */
th_x2:4; /* (unused) */
#else
u_char th_x2 : 4, /* (unused) */
th_off:4; /* data offset */
#endif
#define TH_ELN 0x1 /* explicit loss notification */
#define TH_ECN 0x2 /* explicit congestion notification */
#define TH_FS 0x4 /* fast start */
u_char th_flags;
#define TH_FIN 0x01
#define TH_SYN 0x02
#define TH_RST 0x04
#define TH_PUSH 0x08
#define TH_ACK 0x10
#define TH_URG 0x20
u_short th_win; /* window */
u_short th_sum; /* checksum */
u_short th_urp; /* urgent pointer */
};
#define ip6_nxt ip6_ctlun.ip6_un1.ip6_un1_nxt
#define ip6_ecn_acc ip6_ctlun.ip6_un3.ip6_un3_ecn
#define PKT_GET_ETH_HDR(p) \
((struct eth_header *)(p))
#define PKT_GET_VLAN_HDR(p) \
((struct vlan_header *) (((uint8_t *)(p)) + sizeof(struct eth_header)))
#define PKT_GET_DVLAN_HDR(p) \
(PKT_GET_VLAN_HDR(p) + 1)
#define PKT_GET_IP_HDR(p) \
((struct ip_header *)(((uint8_t *)(p)) + eth_get_l2_hdr_length(p)))
#define IP_HDR_GET_LEN(p) \
((((struct ip_header *)p)->ip_ver_len & 0x0F) << 2)
#define PKT_GET_IP_HDR_LEN(p) \
(IP_HDR_GET_LEN(PKT_GET_IP_HDR(p)))
#define PKT_GET_IP6_HDR(p) \
((struct ip6_header *) (((uint8_t *)(p)) + eth_get_l2_hdr_length(p)))
#define IP_HEADER_VERSION(ip) \
((ip->ip_ver_len >> 4)&0xf)
#define ETH_P_IP (0x0800)
#define ETH_P_IPV6 (0x86dd)
#define ETH_P_VLAN (0x8100)
#define ETH_P_DVLAN (0x88a8)
#define VLAN_VID_MASK 0x0fff
#define IP_HEADER_VERSION_4 (4)
#define IP_HEADER_VERSION_6 (6)
#define IP_PROTO_TCP (6)
#define IP_PROTO_UDP (17)
#define IPTOS_ECN_MASK 0x03
#define IPTOS_ECN(x) ((x) & IPTOS_ECN_MASK)
#define IPTOS_ECN_CE 0x03
#define IP6_ECN_MASK 0xC0
#define IP6_ECN(x) ((x) & IP6_ECN_MASK)
#define IP6_ECN_CE 0xC0
#define IP4_DONT_FRAGMENT_FLAG (1 << 14)
#define IS_SPECIAL_VLAN_ID(x) \
(((x) == 0) || ((x) == 0xFFF))
#define ETH_MAX_L2_HDR_LEN \
(sizeof(struct eth_header) + 2 * sizeof(struct vlan_header))
#define ETH_MAX_IP4_HDR_LEN (60)
#define ETH_MAX_IP_DGRAM_LEN (0xFFFF)
#define IP_FRAG_UNIT_SIZE (8)
#define IP_FRAG_ALIGN_SIZE(x) ((x) & ~0x7)
#define IP_RF 0x8000 /* reserved fragment flag */
#define IP_DF 0x4000 /* don't fragment flag */
#define IP_MF 0x2000 /* more fragments flag */
#define IP_OFFMASK 0x1fff /* mask for fragmenting bits */
#define IP6_EXT_GRANULARITY (8) /* Size granularity for
IPv6 extension headers */
/* IP6 extension header types */
#define IP6_HOP_BY_HOP (0)
#define IP6_ROUTING (43)
#define IP6_FRAGMENT (44)
#define IP6_ESP (50)
#define IP6_AUTHENTICATION (51)
#define IP6_NONE (59)
#define IP6_DESTINATON (60)
#define IP6_MOBILITY (135)
static inline int is_multicast_ether_addr(const uint8_t *addr)
{
return 0x01 & addr[0];
}
static inline int is_broadcast_ether_addr(const uint8_t *addr)
{
return (addr[0] & addr[1] & addr[2] & addr[3] & addr[4] & addr[5]) == 0xff;
}
static inline int is_unicast_ether_addr(const uint8_t *addr)
{
return !is_multicast_ether_addr(addr);
}
typedef enum {
ETH_PKT_UCAST = 0xAABBCC00,
ETH_PKT_BCAST,
ETH_PKT_MCAST
} eth_pkt_types_e;
static inline eth_pkt_types_e
get_eth_packet_type(const struct eth_header *ehdr)
{
if (is_broadcast_ether_addr(ehdr->h_dest)) {
return ETH_PKT_BCAST;
} else if (is_multicast_ether_addr(ehdr->h_dest)) {
return ETH_PKT_MCAST;
} else { /* unicast */
return ETH_PKT_UCAST;
}
}
static inline uint32_t
eth_get_l2_hdr_length(const void *p)
{
uint16_t proto = be16_to_cpu(PKT_GET_ETH_HDR(p)->h_proto);
struct vlan_header *hvlan = PKT_GET_VLAN_HDR(p);
switch (proto) {
case ETH_P_VLAN:
return sizeof(struct eth_header) + sizeof(struct vlan_header);
case ETH_P_DVLAN:
if (hvlan->h_proto == ETH_P_VLAN) {
return sizeof(struct eth_header) + 2 * sizeof(struct vlan_header);
} else {
return sizeof(struct eth_header) + sizeof(struct vlan_header);
}
default:
return sizeof(struct eth_header);
}
}
static inline uint16_t
eth_get_pkt_tci(const void *p)
{
uint16_t proto = be16_to_cpu(PKT_GET_ETH_HDR(p)->h_proto);
struct vlan_header *hvlan = PKT_GET_VLAN_HDR(p);
switch (proto) {
case ETH_P_VLAN:
case ETH_P_DVLAN:
return be16_to_cpu(hvlan->h_tci);
default:
return 0;
}
}
static inline bool
eth_strip_vlan(const void *p, uint8_t *new_ehdr_buf,
uint16_t *payload_offset, uint16_t *tci)
{
uint16_t proto = be16_to_cpu(PKT_GET_ETH_HDR(p)->h_proto);
struct vlan_header *hvlan = PKT_GET_VLAN_HDR(p);
struct eth_header *new_ehdr = (struct eth_header *) new_ehdr_buf;
switch (proto) {
case ETH_P_VLAN:
case ETH_P_DVLAN:
memcpy(new_ehdr->h_source, PKT_GET_ETH_HDR(p)->h_source, ETH_ALEN);
memcpy(new_ehdr->h_dest, PKT_GET_ETH_HDR(p)->h_dest, ETH_ALEN);
new_ehdr->h_proto = hvlan->h_proto;
*tci = be16_to_cpu(hvlan->h_tci);
*payload_offset =
sizeof(struct eth_header) + sizeof(struct vlan_header);
if (be16_to_cpu(new_ehdr->h_proto) == ETH_P_VLAN) {
memcpy(PKT_GET_VLAN_HDR(new_ehdr),
PKT_GET_DVLAN_HDR(p),
sizeof(struct vlan_header));
*payload_offset += sizeof(struct vlan_header);
}
return true;
default:
return false;
}
}
static inline uint16_t
eth_get_l3_proto(const void *l2hdr, size_t l2hdr_len)
{
uint8_t *proto_ptr = (uint8_t *) l2hdr + l2hdr_len - sizeof(uint16_t);
return be16_to_cpup((uint16_t *)proto_ptr);
}
void eth_setup_vlan_headers(struct eth_header *ehdr, uint16_t vlan_tag,
bool *is_new);
uint8_t eth_get_gso_type(uint16_t l3_proto, uint8_t *l3_hdr, uint8_t l4proto);
void eth_get_protocols(const uint8_t *headers,
uint32_t hdr_length,
bool *isip4, bool *isip6,
bool *isudp, bool *istcp);
void eth_setup_ip4_fragmentation(const void *l2hdr, size_t l2hdr_len,
void *l3hdr, size_t l3hdr_len,
size_t l3payload_len,
size_t frag_offset, bool more_frags);
void
eth_fix_ip4_checksum(void *l3hdr, size_t l3hdr_len);
uint32_t
eth_calc_pseudo_hdr_csum(struct ip_header *iphdr, uint16_t csl);
bool
eth_parse_ipv6_hdr(struct iovec *pkt, int pkt_frags,
size_t ip6hdr_off, uint8_t *l4proto,
size_t *full_hdr_len);
#endif

View File

@ -11,6 +11,11 @@
#define MAX_QUEUE_NUM 1024
/* Maximum GSO packet size (64k) plus plenty of room for
* the ethernet and virtio_net headers
*/
#define NET_BUFSIZE (4096 + 65536)
struct MACAddr {
uint8_t a[6];
};

View File

@ -1,6 +1,7 @@
common-obj-y = net.o queue.o checksum.o util.o hub.o
common-obj-y += socket.o
common-obj-y += dump.o
common-obj-y += eth.o
common-obj-$(CONFIG_POSIX) += tap.o
common-obj-$(CONFIG_LINUX) += tap-linux.o
common-obj-$(CONFIG_WIN32) += tap-win32.o

View File

@ -15,21 +15,23 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu-common.h"
#include "net/checksum.h"
#define PROTO_TCP 6
#define PROTO_UDP 17
uint32_t net_checksum_add(int len, uint8_t *buf)
uint32_t net_checksum_add_cont(int len, uint8_t *buf, int seq)
{
uint32_t sum = 0;
int i;
for (i = 0; i < len; i++) {
if (i & 1)
sum += (uint32_t)buf[i];
else
sum += (uint32_t)buf[i] << 8;
for (i = seq; i < seq + len; i++) {
if (i & 1) {
sum += (uint32_t)buf[i - seq];
} else {
sum += (uint32_t)buf[i - seq] << 8;
}
}
return sum;
}
@ -83,3 +85,31 @@ void net_checksum_calculate(uint8_t *data, int length)
data[14+hlen+csum_offset] = csum >> 8;
data[14+hlen+csum_offset+1] = csum & 0xff;
}
uint32_t
net_checksum_add_iov(const struct iovec *iov, const unsigned int iov_cnt,
uint32_t iov_off, uint32_t size)
{
size_t iovec_off, buf_off;
unsigned int i;
uint32_t res = 0;
uint32_t seq = 0;
iovec_off = 0;
buf_off = 0;
for (i = 0; i < iov_cnt && size; i++) {
if (iov_off < (iovec_off + iov[i].iov_len)) {
size_t len = MIN((iovec_off + iov[i].iov_len) - iov_off , size);
void *chunk_buf = iov[i].iov_base + (iov_off - iovec_off);
res += net_checksum_add_cont(len, chunk_buf, seq);
seq += len;
buf_off += len;
iov_off += len;
size -= len;
}
iovec_off += iov[i].iov_len;
}
return res;
}

217
net/eth.c Normal file
View File

@ -0,0 +1,217 @@
/*
* QEMU network structures definitions and helper functions
*
* Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
*
* Developed by Daynix Computing LTD (http://www.daynix.com)
*
* Authors:
* Dmitry Fleytman <dmitry@daynix.com>
* Tamir Shomer <tamirs@daynix.com>
* Yan Vugenfirer <yan@daynix.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include "net/eth.h"
#include "net/checksum.h"
#include "qemu-common.h"
#include "net/tap.h"
void eth_setup_vlan_headers(struct eth_header *ehdr, uint16_t vlan_tag,
bool *is_new)
{
struct vlan_header *vhdr = PKT_GET_VLAN_HDR(ehdr);
switch (be16_to_cpu(ehdr->h_proto)) {
case ETH_P_VLAN:
case ETH_P_DVLAN:
/* vlan hdr exists */
*is_new = false;
break;
default:
/* No VLAN header, put a new one */
vhdr->h_proto = ehdr->h_proto;
ehdr->h_proto = cpu_to_be16(ETH_P_VLAN);
*is_new = true;
break;
}
vhdr->h_tci = cpu_to_be16(vlan_tag);
}
uint8_t
eth_get_gso_type(uint16_t l3_proto, uint8_t *l3_hdr, uint8_t l4proto)
{
uint8_t ecn_state = 0;
if (l3_proto == ETH_P_IP) {
struct ip_header *iphdr = (struct ip_header *) l3_hdr;
if (IP_HEADER_VERSION(iphdr) == IP_HEADER_VERSION_4) {
if (IPTOS_ECN(iphdr->ip_tos) == IPTOS_ECN_CE) {
ecn_state = VIRTIO_NET_HDR_GSO_ECN;
}
if (l4proto == IP_PROTO_TCP) {
return VIRTIO_NET_HDR_GSO_TCPV4 | ecn_state;
} else if (l4proto == IP_PROTO_UDP) {
return VIRTIO_NET_HDR_GSO_UDP | ecn_state;
}
}
} else if (l3_proto == ETH_P_IPV6) {
struct ip6_header *ip6hdr = (struct ip6_header *) l3_hdr;
if (IP6_ECN(ip6hdr->ip6_ecn_acc) == IP6_ECN_CE) {
ecn_state = VIRTIO_NET_HDR_GSO_ECN;
}
if (l4proto == IP_PROTO_TCP) {
return VIRTIO_NET_HDR_GSO_TCPV6 | ecn_state;
}
}
/* Unsupported offload */
assert(false);
return VIRTIO_NET_HDR_GSO_NONE | ecn_state;
}
void eth_get_protocols(const uint8_t *headers,
uint32_t hdr_length,
bool *isip4, bool *isip6,
bool *isudp, bool *istcp)
{
int proto;
size_t l2hdr_len = eth_get_l2_hdr_length(headers);
assert(hdr_length >= eth_get_l2_hdr_length(headers));
*isip4 = *isip6 = *isudp = *istcp = false;
proto = eth_get_l3_proto(headers, l2hdr_len);
if (proto == ETH_P_IP) {
*isip4 = true;
struct ip_header *iphdr;
assert(hdr_length >=
eth_get_l2_hdr_length(headers) + sizeof(struct ip_header));
iphdr = PKT_GET_IP_HDR(headers);
if (IP_HEADER_VERSION(iphdr) == IP_HEADER_VERSION_4) {
if (iphdr->ip_p == IP_PROTO_TCP) {
*istcp = true;
} else if (iphdr->ip_p == IP_PROTO_UDP) {
*isudp = true;
}
}
} else if (proto == ETH_P_IPV6) {
uint8_t l4proto;
size_t full_ip6hdr_len;
struct iovec hdr_vec;
hdr_vec.iov_base = (void *) headers;
hdr_vec.iov_len = hdr_length;
*isip6 = true;
if (eth_parse_ipv6_hdr(&hdr_vec, 1, l2hdr_len,
&l4proto, &full_ip6hdr_len)) {
if (l4proto == IP_PROTO_TCP) {
*istcp = true;
} else if (l4proto == IP_PROTO_UDP) {
*isudp = true;
}
}
}
}
void
eth_setup_ip4_fragmentation(const void *l2hdr, size_t l2hdr_len,
void *l3hdr, size_t l3hdr_len,
size_t l3payload_len,
size_t frag_offset, bool more_frags)
{
if (eth_get_l3_proto(l2hdr, l2hdr_len) == ETH_P_IP) {
uint16_t orig_flags;
struct ip_header *iphdr = (struct ip_header *) l3hdr;
uint16_t frag_off_units = frag_offset / IP_FRAG_UNIT_SIZE;
uint16_t new_ip_off;
assert(frag_offset % IP_FRAG_UNIT_SIZE == 0);
assert((frag_off_units & ~IP_OFFMASK) == 0);
orig_flags = be16_to_cpu(iphdr->ip_off) & ~(IP_OFFMASK|IP_MF);
new_ip_off = frag_off_units | orig_flags | (more_frags ? IP_MF : 0);
iphdr->ip_off = cpu_to_be16(new_ip_off);
iphdr->ip_len = cpu_to_be16(l3payload_len + l3hdr_len);
}
}
void
eth_fix_ip4_checksum(void *l3hdr, size_t l3hdr_len)
{
struct ip_header *iphdr = (struct ip_header *) l3hdr;
iphdr->ip_sum = 0;
iphdr->ip_sum = cpu_to_be16(net_raw_checksum(l3hdr, l3hdr_len));
}
uint32_t
eth_calc_pseudo_hdr_csum(struct ip_header *iphdr, uint16_t csl)
{
struct ip_pseudo_header ipph;
ipph.ip_src = iphdr->ip_src;
ipph.ip_dst = iphdr->ip_dst;
ipph.ip_payload = cpu_to_be16(csl);
ipph.ip_proto = iphdr->ip_p;
ipph.zeros = 0;
return net_checksum_add(sizeof(ipph), (uint8_t *) &ipph);
}
static bool
eth_is_ip6_extension_header_type(uint8_t hdr_type)
{
switch (hdr_type) {
case IP6_HOP_BY_HOP:
case IP6_ROUTING:
case IP6_FRAGMENT:
case IP6_ESP:
case IP6_AUTHENTICATION:
case IP6_DESTINATON:
case IP6_MOBILITY:
return true;
default:
return false;
}
}
bool eth_parse_ipv6_hdr(struct iovec *pkt, int pkt_frags,
size_t ip6hdr_off, uint8_t *l4proto,
size_t *full_hdr_len)
{
struct ip6_header ip6_hdr;
struct ip6_ext_hdr ext_hdr;
size_t bytes_read;
bytes_read = iov_to_buf(pkt, pkt_frags, ip6hdr_off,
&ip6_hdr, sizeof(ip6_hdr));
if (bytes_read < sizeof(ip6_hdr)) {
return false;
}
*full_hdr_len = sizeof(struct ip6_header);
if (!eth_is_ip6_extension_header_type(ip6_hdr.ip6_nxt)) {
*l4proto = ip6_hdr.ip6_nxt;
return true;
}
do {
bytes_read = iov_to_buf(pkt, pkt_frags, ip6hdr_off + *full_hdr_len,
&ext_hdr, sizeof(ext_hdr));
*full_hdr_len += (ext_hdr.ip6r_len + 1) * IP6_EXT_GRANULARITY;
} while (eth_is_ip6_extension_header_type(ext_hdr.ip6r_nxt));
*l4proto = ext_hdr.ip6r_nxt;
return true;
}

View File

@ -497,7 +497,7 @@ ssize_t qemu_send_packet_raw(NetClientState *nc, const uint8_t *buf, int size)
static ssize_t nc_sendv_compat(NetClientState *nc, const struct iovec *iov,
int iovcnt)
{
uint8_t buffer[4096];
uint8_t buffer[NET_BUFSIZE];
size_t offset;
offset = iov_to_buf(iov, iovcnt, 0, buffer, sizeof(buffer));

View File

@ -40,7 +40,7 @@ typedef struct NetSocketState {
unsigned int index;
unsigned int packet_len;
unsigned int send_index; /* number of bytes sent (only SOCK_STREAM) */
uint8_t buf[4096];
uint8_t buf[NET_BUFSIZE];
struct sockaddr_in dgram_dst; /* contains inet host and port destination iff connectionless (SOCK_DGRAM) */
IOHandler *send_fn; /* differs between SOCK_STREAM/SOCK_DGRAM */
bool read_poll; /* waiting to receive data? */
@ -146,7 +146,7 @@ static void net_socket_send(void *opaque)
NetSocketState *s = opaque;
int size, err;
unsigned l;
uint8_t buf1[4096];
uint8_t buf1[NET_BUFSIZE];
const uint8_t *buf;
size = qemu_recv(s->fd, buf1, sizeof(buf1), 0);
@ -438,6 +438,9 @@ static NetSocketState *net_socket_fd_init_stream(NetClientState *peer,
s->fd = fd;
s->listen_fd = -1;
/* Disable Nagle algorithm on TCP sockets to reduce latency */
socket_set_nodelay(fd);
if (is_connected) {
net_socket_connect(s);
} else {

View File

@ -44,17 +44,12 @@
#include "hw/vhost_net.h"
/* Maximum GSO packet size (64k) plus plenty of room for
* the ethernet and virtio_net headers
*/
#define TAP_BUFSIZE (4096 + 65536)
typedef struct TAPState {
NetClientState nc;
int fd;
char down_script[1024];
char down_script_arg[128];
uint8_t buf[TAP_BUFSIZE];
uint8_t buf[NET_BUFSIZE];
bool read_poll;
bool write_poll;
bool using_vnet_hdr;

View File

@ -39,7 +39,7 @@ typedef struct VDEState {
static void vde_to_qemu(void *opaque)
{
VDEState *s = opaque;
uint8_t buf[4096];
uint8_t buf[NET_BUFSIZE];
int size;
size = vde_recv(s->vde, (char *)buf, sizeof(buf), 0);