-----BEGIN PGP SIGNATURE-----

Version: GnuPG v1
 
 iQEcBAABAgAGBQJe62kmAAoJEO8Ells5jWIRZfUH/2bPZrhG4QEKNWbm1LXzam+0
 4dzG3A7vYTKWjfbpzcWtUAELO+4SiUe/IU3gYMiyeWNDKjwm5hX/FMCFjnR1IZXl
 wQ7cvr/7TIsxt9HyrjIkh03PkJBGpCD3uO0DkGd1siDmKLOFNRt0uLsmSvA7Ydvo
 2hH/tc/plYoQAxPSbXBmIqg9hRrks/QAw2kfPba7Adhtzg5x2XrUrP+UOW8NmWcL
 xSo02ExPUSdzPX6I4Enwm1c1KiytlQ77LvazpI2NBlejsI4nqa0Y1WJW7WJ4RMGo
 E1kWDiKt69MoT1SgH7UJnF/ISyUuldksD4fuual5UOysCpwpbAIBKh6/Yod6k0M=
 =3+ix
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/jasowang/tags/net-pull-request' into staging

# gpg: Signature made Thu 18 Jun 2020 14:16:22 BST
# gpg:                using RSA key EF04965B398D6211
# gpg: Good signature from "Jason Wang (Jason Wang on RedHat) <jasowang@redhat.com>" [marginal]
# gpg: WARNING: This key is not certified with sufficiently trusted signatures!
# gpg:          It is not certain that the signature belongs to the owner.
# Primary key fingerprint: 215D 46F4 8246 689E C77F  3562 EF04 965B 398D 6211

* remotes/jasowang/tags/net-pull-request: (33 commits)
  net: Drop the NetLegacy structure, always use Netdev instead
  net: Drop the legacy "name" parameter from the -net option
  hw/net/e1000e: Do not abort() on invalid PSRCTL register value
  colo-compare: Fix memory leak in packet_enqueue()
  net/colo-compare.c: Correct ordering in complete and finalize
  net/colo-compare.c: Check that colo-compare is active
  net/colo-compare.c: Only hexdump packets if tracing is enabled
  net/colo-compare.c: Fix deadlock in compare_chr_send
  chardev/char.c: Use qemu_co_sleep_ns if in coroutine
  net/colo-compare.c: Create event_bh with the right AioContext
  net: use peer when purging queue in qemu_flush_or_purge_queue_packets()
  net: cadence_gem: Fix RX address filtering
  net: cadence_gem: TX_LAST bit should be set by guest
  net: cadence_gem: Update the reset value for interrupt mask register
  net: cadnece_gem: Update irq_read_clear field of designcfg_debug1 reg
  net: cadence_gem: Add support for jumbo frames
  net: cadence_gem: Fix up code style
  net: cadence_gem: Move tx/rx packet buffert to CadenceGEMState
  net: cadence_gem: Set ISR according to queue in use
  net: cadence_gem: Define access permission for interrupt registers
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2020-06-18 16:52:09 +01:00
commit 292ef18a38
19 changed files with 909 additions and 453 deletions

View File

@ -38,6 +38,7 @@
#include "qemu/module.h"
#include "qemu/option.h"
#include "qemu/id.h"
#include "qemu/coroutine.h"
#include "chardev/char-mux.h"
@ -119,7 +120,11 @@ static int qemu_chr_write_buffer(Chardev *s,
retry:
res = cc->chr_write(s, buf + *offset, len - *offset);
if (res < 0 && errno == EAGAIN && write_all) {
g_usleep(100);
if (qemu_in_coroutine()) {
qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000);
} else {
g_usleep(100);
}
goto retry;
}

View File

@ -47,12 +47,6 @@ The 'file' driver for drives is no longer appropriate for character or host
devices and will only accept regular files (S_IFREG). The correct driver
for these file types is 'host_cdrom' or 'host_device' as appropriate.
``-net ...,name=``\ *name* (since 3.1)
''''''''''''''''''''''''''''''''''''''
The ``name`` parameter of the ``-net`` option is a synonym
for the ``id`` parameter, which should now be used instead.
``-smp`` (invalid topologies) (since 3.1)
'''''''''''''''''''''''''''''''''''''''''
@ -441,6 +435,15 @@ What follows is a record of recently removed, formerly deprecated
features that serves as a record for users who have encountered
trouble after a recent upgrade.
System emulator command line arguments
--------------------------------------
``-net ...,name=``\ *name* (removed in 5.1)
'''''''''''''''''''''''''''''''''''''''''''
The ``name`` parameter of the ``-net`` option was a synonym
for the ``id`` parameter, which should now be used instead.
QEMU Machine Protocol (QMP) commands
------------------------------------

View File

@ -41,6 +41,7 @@ obj-$(CONFIG_MILKYMIST) += milkymist-minimac2.o
obj-$(CONFIG_PSERIES) += spapr_llan.o
obj-$(CONFIG_XILINX_ETHLITE) += xilinx_ethlite.o
common-obj-$(CONFIG_VIRTIO_NET) += net_rx_pkt.o
obj-$(CONFIG_VIRTIO_NET) += virtio-net.o
common-obj-$(call land,$(CONFIG_VIRTIO_NET),$(CONFIG_VHOST_NET)) += vhost_net.o
common-obj-$(call lnot,$(call land,$(CONFIG_VIRTIO_NET),$(CONFIG_VHOST_NET))) += vhost_net-stub.o

View File

@ -34,120 +34,123 @@
#include "qemu/module.h"
#include "sysemu/dma.h"
#include "net/checksum.h"
#include "net/eth.h"
#ifdef CADENCE_GEM_ERR_DEBUG
#define DB_PRINT(...) do { \
fprintf(stderr, ": %s: ", __func__); \
fprintf(stderr, ## __VA_ARGS__); \
} while (0)
#else
#define DB_PRINT(...)
#endif
#define CADENCE_GEM_ERR_DEBUG 0
#define DB_PRINT(...) do {\
if (CADENCE_GEM_ERR_DEBUG) { \
qemu_log(": %s: ", __func__); \
qemu_log(__VA_ARGS__); \
} \
} while (0)
#define GEM_NWCTRL (0x00000000/4) /* Network Control reg */
#define GEM_NWCFG (0x00000004/4) /* Network Config reg */
#define GEM_NWSTATUS (0x00000008/4) /* Network Status reg */
#define GEM_USERIO (0x0000000C/4) /* User IO reg */
#define GEM_DMACFG (0x00000010/4) /* DMA Control reg */
#define GEM_TXSTATUS (0x00000014/4) /* TX Status reg */
#define GEM_RXQBASE (0x00000018/4) /* RX Q Base address reg */
#define GEM_TXQBASE (0x0000001C/4) /* TX Q Base address reg */
#define GEM_RXSTATUS (0x00000020/4) /* RX Status reg */
#define GEM_ISR (0x00000024/4) /* Interrupt Status reg */
#define GEM_IER (0x00000028/4) /* Interrupt Enable reg */
#define GEM_IDR (0x0000002C/4) /* Interrupt Disable reg */
#define GEM_IMR (0x00000030/4) /* Interrupt Mask reg */
#define GEM_PHYMNTNC (0x00000034/4) /* Phy Maintenance reg */
#define GEM_RXPAUSE (0x00000038/4) /* RX Pause Time reg */
#define GEM_TXPAUSE (0x0000003C/4) /* TX Pause Time reg */
#define GEM_TXPARTIALSF (0x00000040/4) /* TX Partial Store and Forward */
#define GEM_RXPARTIALSF (0x00000044/4) /* RX Partial Store and Forward */
#define GEM_HASHLO (0x00000080/4) /* Hash Low address reg */
#define GEM_HASHHI (0x00000084/4) /* Hash High address reg */
#define GEM_SPADDR1LO (0x00000088/4) /* Specific addr 1 low reg */
#define GEM_SPADDR1HI (0x0000008C/4) /* Specific addr 1 high reg */
#define GEM_SPADDR2LO (0x00000090/4) /* Specific addr 2 low reg */
#define GEM_SPADDR2HI (0x00000094/4) /* Specific addr 2 high reg */
#define GEM_SPADDR3LO (0x00000098/4) /* Specific addr 3 low reg */
#define GEM_SPADDR3HI (0x0000009C/4) /* Specific addr 3 high reg */
#define GEM_SPADDR4LO (0x000000A0/4) /* Specific addr 4 low reg */
#define GEM_SPADDR4HI (0x000000A4/4) /* Specific addr 4 high reg */
#define GEM_TIDMATCH1 (0x000000A8/4) /* Type ID1 Match reg */
#define GEM_TIDMATCH2 (0x000000AC/4) /* Type ID2 Match reg */
#define GEM_TIDMATCH3 (0x000000B0/4) /* Type ID3 Match reg */
#define GEM_TIDMATCH4 (0x000000B4/4) /* Type ID4 Match reg */
#define GEM_WOLAN (0x000000B8/4) /* Wake on LAN reg */
#define GEM_IPGSTRETCH (0x000000BC/4) /* IPG Stretch reg */
#define GEM_SVLAN (0x000000C0/4) /* Stacked VLAN reg */
#define GEM_MODID (0x000000FC/4) /* Module ID reg */
#define GEM_OCTTXLO (0x00000100/4) /* Octects transmitted Low reg */
#define GEM_OCTTXHI (0x00000104/4) /* Octects transmitted High reg */
#define GEM_TXCNT (0x00000108/4) /* Error-free Frames transmitted */
#define GEM_TXBCNT (0x0000010C/4) /* Error-free Broadcast Frames */
#define GEM_TXMCNT (0x00000110/4) /* Error-free Multicast Frame */
#define GEM_TXPAUSECNT (0x00000114/4) /* Pause Frames Transmitted */
#define GEM_TX64CNT (0x00000118/4) /* Error-free 64 TX */
#define GEM_TX65CNT (0x0000011C/4) /* Error-free 65-127 TX */
#define GEM_TX128CNT (0x00000120/4) /* Error-free 128-255 TX */
#define GEM_TX256CNT (0x00000124/4) /* Error-free 256-511 */
#define GEM_TX512CNT (0x00000128/4) /* Error-free 512-1023 TX */
#define GEM_TX1024CNT (0x0000012C/4) /* Error-free 1024-1518 TX */
#define GEM_TX1519CNT (0x00000130/4) /* Error-free larger than 1519 TX */
#define GEM_TXURUNCNT (0x00000134/4) /* TX under run error counter */
#define GEM_SINGLECOLLCNT (0x00000138/4) /* Single Collision Frames */
#define GEM_MULTCOLLCNT (0x0000013C/4) /* Multiple Collision Frames */
#define GEM_EXCESSCOLLCNT (0x00000140/4) /* Excessive Collision Frames */
#define GEM_LATECOLLCNT (0x00000144/4) /* Late Collision Frames */
#define GEM_DEFERTXCNT (0x00000148/4) /* Deferred Transmission Frames */
#define GEM_CSENSECNT (0x0000014C/4) /* Carrier Sense Error Counter */
#define GEM_OCTRXLO (0x00000150/4) /* Octects Received register Low */
#define GEM_OCTRXHI (0x00000154/4) /* Octects Received register High */
#define GEM_RXCNT (0x00000158/4) /* Error-free Frames Received */
#define GEM_RXBROADCNT (0x0000015C/4) /* Error-free Broadcast Frames RX */
#define GEM_RXMULTICNT (0x00000160/4) /* Error-free Multicast Frames RX */
#define GEM_RXPAUSECNT (0x00000164/4) /* Pause Frames Received Counter */
#define GEM_RX64CNT (0x00000168/4) /* Error-free 64 byte Frames RX */
#define GEM_RX65CNT (0x0000016C/4) /* Error-free 65-127B Frames RX */
#define GEM_RX128CNT (0x00000170/4) /* Error-free 128-255B Frames RX */
#define GEM_RX256CNT (0x00000174/4) /* Error-free 256-512B Frames RX */
#define GEM_RX512CNT (0x00000178/4) /* Error-free 512-1023B Frames RX */
#define GEM_RX1024CNT (0x0000017C/4) /* Error-free 1024-1518B Frames RX */
#define GEM_RX1519CNT (0x00000180/4) /* Error-free 1519-max Frames RX */
#define GEM_RXUNDERCNT (0x00000184/4) /* Undersize Frames Received */
#define GEM_RXOVERCNT (0x00000188/4) /* Oversize Frames Received */
#define GEM_RXJABCNT (0x0000018C/4) /* Jabbers Received Counter */
#define GEM_RXFCSCNT (0x00000190/4) /* Frame Check seq. Error Counter */
#define GEM_RXLENERRCNT (0x00000194/4) /* Length Field Error Counter */
#define GEM_RXSYMERRCNT (0x00000198/4) /* Symbol Error Counter */
#define GEM_RXALIGNERRCNT (0x0000019C/4) /* Alignment Error Counter */
#define GEM_RXRSCERRCNT (0x000001A0/4) /* Receive Resource Error Counter */
#define GEM_RXORUNCNT (0x000001A4/4) /* Receive Overrun Counter */
#define GEM_RXIPCSERRCNT (0x000001A8/4) /* IP header Checksum Error Counter */
#define GEM_RXTCPCCNT (0x000001AC/4) /* TCP Checksum Error Counter */
#define GEM_RXUDPCCNT (0x000001B0/4) /* UDP Checksum Error Counter */
#define GEM_NWCTRL (0x00000000 / 4) /* Network Control reg */
#define GEM_NWCFG (0x00000004 / 4) /* Network Config reg */
#define GEM_NWSTATUS (0x00000008 / 4) /* Network Status reg */
#define GEM_USERIO (0x0000000C / 4) /* User IO reg */
#define GEM_DMACFG (0x00000010 / 4) /* DMA Control reg */
#define GEM_TXSTATUS (0x00000014 / 4) /* TX Status reg */
#define GEM_RXQBASE (0x00000018 / 4) /* RX Q Base address reg */
#define GEM_TXQBASE (0x0000001C / 4) /* TX Q Base address reg */
#define GEM_RXSTATUS (0x00000020 / 4) /* RX Status reg */
#define GEM_ISR (0x00000024 / 4) /* Interrupt Status reg */
#define GEM_IER (0x00000028 / 4) /* Interrupt Enable reg */
#define GEM_IDR (0x0000002C / 4) /* Interrupt Disable reg */
#define GEM_IMR (0x00000030 / 4) /* Interrupt Mask reg */
#define GEM_PHYMNTNC (0x00000034 / 4) /* Phy Maintenance reg */
#define GEM_RXPAUSE (0x00000038 / 4) /* RX Pause Time reg */
#define GEM_TXPAUSE (0x0000003C / 4) /* TX Pause Time reg */
#define GEM_TXPARTIALSF (0x00000040 / 4) /* TX Partial Store and Forward */
#define GEM_RXPARTIALSF (0x00000044 / 4) /* RX Partial Store and Forward */
#define GEM_JUMBO_MAX_LEN (0x00000048 / 4) /* Max Jumbo Frame Size */
#define GEM_HASHLO (0x00000080 / 4) /* Hash Low address reg */
#define GEM_HASHHI (0x00000084 / 4) /* Hash High address reg */
#define GEM_SPADDR1LO (0x00000088 / 4) /* Specific addr 1 low reg */
#define GEM_SPADDR1HI (0x0000008C / 4) /* Specific addr 1 high reg */
#define GEM_SPADDR2LO (0x00000090 / 4) /* Specific addr 2 low reg */
#define GEM_SPADDR2HI (0x00000094 / 4) /* Specific addr 2 high reg */
#define GEM_SPADDR3LO (0x00000098 / 4) /* Specific addr 3 low reg */
#define GEM_SPADDR3HI (0x0000009C / 4) /* Specific addr 3 high reg */
#define GEM_SPADDR4LO (0x000000A0 / 4) /* Specific addr 4 low reg */
#define GEM_SPADDR4HI (0x000000A4 / 4) /* Specific addr 4 high reg */
#define GEM_TIDMATCH1 (0x000000A8 / 4) /* Type ID1 Match reg */
#define GEM_TIDMATCH2 (0x000000AC / 4) /* Type ID2 Match reg */
#define GEM_TIDMATCH3 (0x000000B0 / 4) /* Type ID3 Match reg */
#define GEM_TIDMATCH4 (0x000000B4 / 4) /* Type ID4 Match reg */
#define GEM_WOLAN (0x000000B8 / 4) /* Wake on LAN reg */
#define GEM_IPGSTRETCH (0x000000BC / 4) /* IPG Stretch reg */
#define GEM_SVLAN (0x000000C0 / 4) /* Stacked VLAN reg */
#define GEM_MODID (0x000000FC / 4) /* Module ID reg */
#define GEM_OCTTXLO (0x00000100 / 4) /* Octects transmitted Low reg */
#define GEM_OCTTXHI (0x00000104 / 4) /* Octects transmitted High reg */
#define GEM_TXCNT (0x00000108 / 4) /* Error-free Frames transmitted */
#define GEM_TXBCNT (0x0000010C / 4) /* Error-free Broadcast Frames */
#define GEM_TXMCNT (0x00000110 / 4) /* Error-free Multicast Frame */
#define GEM_TXPAUSECNT (0x00000114 / 4) /* Pause Frames Transmitted */
#define GEM_TX64CNT (0x00000118 / 4) /* Error-free 64 TX */
#define GEM_TX65CNT (0x0000011C / 4) /* Error-free 65-127 TX */
#define GEM_TX128CNT (0x00000120 / 4) /* Error-free 128-255 TX */
#define GEM_TX256CNT (0x00000124 / 4) /* Error-free 256-511 */
#define GEM_TX512CNT (0x00000128 / 4) /* Error-free 512-1023 TX */
#define GEM_TX1024CNT (0x0000012C / 4) /* Error-free 1024-1518 TX */
#define GEM_TX1519CNT (0x00000130 / 4) /* Error-free larger than 1519 TX */
#define GEM_TXURUNCNT (0x00000134 / 4) /* TX under run error counter */
#define GEM_SINGLECOLLCNT (0x00000138 / 4) /* Single Collision Frames */
#define GEM_MULTCOLLCNT (0x0000013C / 4) /* Multiple Collision Frames */
#define GEM_EXCESSCOLLCNT (0x00000140 / 4) /* Excessive Collision Frames */
#define GEM_LATECOLLCNT (0x00000144 / 4) /* Late Collision Frames */
#define GEM_DEFERTXCNT (0x00000148 / 4) /* Deferred Transmission Frames */
#define GEM_CSENSECNT (0x0000014C / 4) /* Carrier Sense Error Counter */
#define GEM_OCTRXLO (0x00000150 / 4) /* Octects Received register Low */
#define GEM_OCTRXHI (0x00000154 / 4) /* Octects Received register High */
#define GEM_RXCNT (0x00000158 / 4) /* Error-free Frames Received */
#define GEM_RXBROADCNT (0x0000015C / 4) /* Error-free Broadcast Frames RX */
#define GEM_RXMULTICNT (0x00000160 / 4) /* Error-free Multicast Frames RX */
#define GEM_RXPAUSECNT (0x00000164 / 4) /* Pause Frames Received Counter */
#define GEM_RX64CNT (0x00000168 / 4) /* Error-free 64 byte Frames RX */
#define GEM_RX65CNT (0x0000016C / 4) /* Error-free 65-127B Frames RX */
#define GEM_RX128CNT (0x00000170 / 4) /* Error-free 128-255B Frames RX */
#define GEM_RX256CNT (0x00000174 / 4) /* Error-free 256-512B Frames RX */
#define GEM_RX512CNT (0x00000178 / 4) /* Error-free 512-1023B Frames RX */
#define GEM_RX1024CNT (0x0000017C / 4) /* Error-free 1024-1518B Frames RX */
#define GEM_RX1519CNT (0x00000180 / 4) /* Error-free 1519-max Frames RX */
#define GEM_RXUNDERCNT (0x00000184 / 4) /* Undersize Frames Received */
#define GEM_RXOVERCNT (0x00000188 / 4) /* Oversize Frames Received */
#define GEM_RXJABCNT (0x0000018C / 4) /* Jabbers Received Counter */
#define GEM_RXFCSCNT (0x00000190 / 4) /* Frame Check seq. Error Counter */
#define GEM_RXLENERRCNT (0x00000194 / 4) /* Length Field Error Counter */
#define GEM_RXSYMERRCNT (0x00000198 / 4) /* Symbol Error Counter */
#define GEM_RXALIGNERRCNT (0x0000019C / 4) /* Alignment Error Counter */
#define GEM_RXRSCERRCNT (0x000001A0 / 4) /* Receive Resource Error Counter */
#define GEM_RXORUNCNT (0x000001A4 / 4) /* Receive Overrun Counter */
#define GEM_RXIPCSERRCNT (0x000001A8 / 4) /* IP header Checksum Err Counter */
#define GEM_RXTCPCCNT (0x000001AC / 4) /* TCP Checksum Error Counter */
#define GEM_RXUDPCCNT (0x000001B0 / 4) /* UDP Checksum Error Counter */
#define GEM_1588S (0x000001D0/4) /* 1588 Timer Seconds */
#define GEM_1588NS (0x000001D4/4) /* 1588 Timer Nanoseconds */
#define GEM_1588ADJ (0x000001D8/4) /* 1588 Timer Adjust */
#define GEM_1588INC (0x000001DC/4) /* 1588 Timer Increment */
#define GEM_PTPETXS (0x000001E0/4) /* PTP Event Frame Transmitted (s) */
#define GEM_PTPETXNS (0x000001E4/4) /* PTP Event Frame Transmitted (ns) */
#define GEM_PTPERXS (0x000001E8/4) /* PTP Event Frame Received (s) */
#define GEM_PTPERXNS (0x000001EC/4) /* PTP Event Frame Received (ns) */
#define GEM_PTPPTXS (0x000001E0/4) /* PTP Peer Frame Transmitted (s) */
#define GEM_PTPPTXNS (0x000001E4/4) /* PTP Peer Frame Transmitted (ns) */
#define GEM_PTPPRXS (0x000001E8/4) /* PTP Peer Frame Received (s) */
#define GEM_PTPPRXNS (0x000001EC/4) /* PTP Peer Frame Received (ns) */
#define GEM_1588S (0x000001D0 / 4) /* 1588 Timer Seconds */
#define GEM_1588NS (0x000001D4 / 4) /* 1588 Timer Nanoseconds */
#define GEM_1588ADJ (0x000001D8 / 4) /* 1588 Timer Adjust */
#define GEM_1588INC (0x000001DC / 4) /* 1588 Timer Increment */
#define GEM_PTPETXS (0x000001E0 / 4) /* PTP Event Frame Transmitted (s) */
#define GEM_PTPETXNS (0x000001E4 / 4) /*
* PTP Event Frame Transmitted (ns)
*/
#define GEM_PTPERXS (0x000001E8 / 4) /* PTP Event Frame Received (s) */
#define GEM_PTPERXNS (0x000001EC / 4) /* PTP Event Frame Received (ns) */
#define GEM_PTPPTXS (0x000001E0 / 4) /* PTP Peer Frame Transmitted (s) */
#define GEM_PTPPTXNS (0x000001E4 / 4) /* PTP Peer Frame Transmitted (ns) */
#define GEM_PTPPRXS (0x000001E8 / 4) /* PTP Peer Frame Received (s) */
#define GEM_PTPPRXNS (0x000001EC / 4) /* PTP Peer Frame Received (ns) */
/* Design Configuration Registers */
#define GEM_DESCONF (0x00000280/4)
#define GEM_DESCONF2 (0x00000284/4)
#define GEM_DESCONF3 (0x00000288/4)
#define GEM_DESCONF4 (0x0000028C/4)
#define GEM_DESCONF5 (0x00000290/4)
#define GEM_DESCONF6 (0x00000294/4)
#define GEM_DESCONF (0x00000280 / 4)
#define GEM_DESCONF2 (0x00000284 / 4)
#define GEM_DESCONF3 (0x00000288 / 4)
#define GEM_DESCONF4 (0x0000028C / 4)
#define GEM_DESCONF5 (0x00000290 / 4)
#define GEM_DESCONF6 (0x00000294 / 4)
#define GEM_DESCONF6_64B_MASK (1U << 23)
#define GEM_DESCONF7 (0x00000298/4)
#define GEM_DESCONF7 (0x00000298 / 4)
#define GEM_INT_Q1_STATUS (0x00000400 / 4)
#define GEM_INT_Q1_MASK (0x00000640 / 4)
@ -211,10 +214,12 @@
#define GEM_NWCFG_LERR_DISC 0x00010000 /* Discard RX frames with len err */
#define GEM_NWCFG_BUFF_OFST_M 0x0000C000 /* Receive buffer offset mask */
#define GEM_NWCFG_BUFF_OFST_S 14 /* Receive buffer offset shift */
#define GEM_NWCFG_RCV_1538 0x00000100 /* Receive 1538 bytes frame */
#define GEM_NWCFG_UCAST_HASH 0x00000080 /* accept unicast if hash match */
#define GEM_NWCFG_MCAST_HASH 0x00000040 /* accept multicast if hash match */
#define GEM_NWCFG_BCAST_REJ 0x00000020 /* Reject broadcast packets */
#define GEM_NWCFG_PROMISC 0x00000010 /* Accept all packets */
#define GEM_NWCFG_JUMBO_FRAME 0x00000008 /* Jumbo Frames enable */
#define GEM_DMACFG_ADDR_64B (1U << 30)
#define GEM_DMACFG_TX_BD_EXT (1U << 29)
@ -232,6 +237,7 @@
/* GEM_ISR GEM_IER GEM_IDR GEM_IMR */
#define GEM_INT_TXCMPL 0x00000080 /* Transmit Complete */
#define GEM_INT_AMBA_ERR 0x00000040
#define GEM_INT_TXUSED 0x00000008
#define GEM_INT_RXUSED 0x00000004
#define GEM_INT_RXCMPL 0x00000002
@ -345,11 +351,6 @@ static inline unsigned tx_desc_get_last(uint32_t *desc)
return (desc[1] & DESC_1_TX_LAST) ? 1 : 0;
}
static inline void tx_desc_set_last(uint32_t *desc)
{
desc[1] |= DESC_1_TX_LAST;
}
static inline unsigned tx_desc_get_length(uint32_t *desc)
{
return desc[1] & DESC_1_LENGTH;
@ -452,6 +453,34 @@ static inline void rx_desc_set_sar(uint32_t *desc, int sar_idx)
/* The broadcast MAC address: 0xFFFFFFFFFFFF */
static const uint8_t broadcast_addr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
static uint32_t gem_get_max_buf_len(CadenceGEMState *s, bool tx)
{
uint32_t size;
if (s->regs[GEM_NWCFG] & GEM_NWCFG_JUMBO_FRAME) {
size = s->regs[GEM_JUMBO_MAX_LEN];
if (size > s->jumbo_max_len) {
size = s->jumbo_max_len;
qemu_log_mask(LOG_GUEST_ERROR, "GEM_JUMBO_MAX_LEN reg cannot be"
" greater than 0x%" PRIx32 "\n", s->jumbo_max_len);
}
} else if (tx) {
size = 1518;
} else {
size = s->regs[GEM_NWCFG] & GEM_NWCFG_RCV_1538 ? 1538 : 1518;
}
return size;
}
static void gem_set_isr(CadenceGEMState *s, int q, uint32_t flag)
{
if (q == 0) {
s->regs[GEM_ISR] |= flag & ~(s->regs[GEM_IMR]);
} else {
s->regs[GEM_INT_Q1_STATUS + q - 1] |= flag &
~(s->regs[GEM_INT_Q1_MASK + q - 1]);
}
}
/*
* gem_init_register_masks:
* One time initialization.
@ -459,6 +488,7 @@ static const uint8_t broadcast_addr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
*/
static void gem_init_register_masks(CadenceGEMState *s)
{
unsigned int i;
/* Mask of register bits which are read only */
memset(&s->regs_ro[0], 0, sizeof(s->regs_ro));
s->regs_ro[GEM_NWCTRL] = 0xFFF80000;
@ -471,10 +501,19 @@ static void gem_init_register_masks(CadenceGEMState *s)
s->regs_ro[GEM_ISR] = 0xFFFFFFFF;
s->regs_ro[GEM_IMR] = 0xFFFFFFFF;
s->regs_ro[GEM_MODID] = 0xFFFFFFFF;
for (i = 0; i < s->num_priority_queues; i++) {
s->regs_ro[GEM_INT_Q1_STATUS + i] = 0xFFFFFFFF;
s->regs_ro[GEM_INT_Q1_ENABLE + i] = 0xFFFFF319;
s->regs_ro[GEM_INT_Q1_DISABLE + i] = 0xFFFFF319;
s->regs_ro[GEM_INT_Q1_MASK + i] = 0xFFFFFFFF;
}
/* Mask of register bits which are clear on read */
memset(&s->regs_rtc[0], 0, sizeof(s->regs_rtc));
s->regs_rtc[GEM_ISR] = 0xFFFFFFFF;
for (i = 0; i < s->num_priority_queues; i++) {
s->regs_rtc[GEM_INT_Q1_STATUS + i] = 0x00000CE6;
}
/* Mask of register bits which are write 1 to clear */
memset(&s->regs_w1c[0], 0, sizeof(s->regs_w1c));
@ -486,6 +525,10 @@ static void gem_init_register_masks(CadenceGEMState *s)
s->regs_wo[GEM_NWCTRL] = 0x00073E60;
s->regs_wo[GEM_IER] = 0x07FFFFFF;
s->regs_wo[GEM_IDR] = 0x07FFFFFF;
for (i = 0; i < s->num_priority_queues; i++) {
s->regs_wo[GEM_INT_Q1_ENABLE + i] = 0x00000CE6;
s->regs_wo[GEM_INT_Q1_DISABLE + i] = 0x00000CE6;
}
}
/*
@ -555,29 +598,10 @@ static void gem_update_int_status(CadenceGEMState *s)
{
int i;
if (!s->regs[GEM_ISR]) {
/* ISR isn't set, clear all the interrupts */
for (i = 0; i < s->num_priority_queues; ++i) {
qemu_set_irq(s->irq[i], 0);
}
return;
}
qemu_set_irq(s->irq[0], !!s->regs[GEM_ISR]);
/* If we get here we know s->regs[GEM_ISR] is set, so we don't need to
* check it again.
*/
if (s->num_priority_queues == 1) {
/* No priority queues, just trigger the interrupt */
DB_PRINT("asserting int.\n");
qemu_set_irq(s->irq[0], 1);
return;
}
for (i = 0; i < s->num_priority_queues; ++i) {
if (s->regs[GEM_INT_Q1_STATUS + i]) {
DB_PRINT("asserting int. (q=%d)\n", i);
qemu_set_irq(s->irq[i], 1);
}
for (i = 1; i < s->num_priority_queues; ++i) {
qemu_set_irq(s->irq[i], !!s->regs[GEM_INT_Q1_STATUS + i - 1]);
}
}
@ -679,7 +703,7 @@ static unsigned calc_mac_hash(const uint8_t *mac)
static int gem_mac_address_filter(CadenceGEMState *s, const uint8_t *packet)
{
uint8_t *gem_spaddr;
int i;
int i, is_mc;
/* Promiscuous mode? */
if (s->regs[GEM_NWCFG] & GEM_NWCFG_PROMISC) {
@ -695,22 +719,17 @@ static int gem_mac_address_filter(CadenceGEMState *s, const uint8_t *packet)
}
/* Accept packets -w- hash match? */
if ((packet[0] == 0x01 && (s->regs[GEM_NWCFG] & GEM_NWCFG_MCAST_HASH)) ||
(packet[0] != 0x01 && (s->regs[GEM_NWCFG] & GEM_NWCFG_UCAST_HASH))) {
is_mc = is_multicast_ether_addr(packet);
if ((is_mc && (s->regs[GEM_NWCFG] & GEM_NWCFG_MCAST_HASH)) ||
(!is_mc && (s->regs[GEM_NWCFG] & GEM_NWCFG_UCAST_HASH))) {
uint64_t buckets;
unsigned hash_index;
hash_index = calc_mac_hash(packet);
if (hash_index < 32) {
if (s->regs[GEM_HASHLO] & (1<<hash_index)) {
return packet[0] == 0x01 ? GEM_RX_MULTICAST_HASH_ACCEPT :
GEM_RX_UNICAST_HASH_ACCEPT;
}
} else {
hash_index -= 32;
if (s->regs[GEM_HASHHI] & (1<<hash_index)) {
return packet[0] == 0x01 ? GEM_RX_MULTICAST_HASH_ACCEPT :
GEM_RX_UNICAST_HASH_ACCEPT;
}
buckets = ((uint64_t)s->regs[GEM_HASHHI] << 32) | s->regs[GEM_HASHLO];
if ((buckets >> hash_index) & 1) {
return is_mc ? GEM_RX_MULTICAST_HASH_ACCEPT
: GEM_RX_UNICAST_HASH_ACCEPT;
}
}
@ -846,6 +865,35 @@ static int get_queue_from_screen(CadenceGEMState *s, uint8_t *rxbuf_ptr,
return 0;
}
static uint32_t gem_get_queue_base_addr(CadenceGEMState *s, bool tx, int q)
{
uint32_t base_addr = 0;
switch (q) {
case 0:
base_addr = s->regs[tx ? GEM_TXQBASE : GEM_RXQBASE];
break;
case 1 ... (MAX_PRIORITY_QUEUES - 1):
base_addr = s->regs[(tx ? GEM_TRANSMIT_Q1_PTR :
GEM_RECEIVE_Q1_PTR) + q - 1];
break;
default:
g_assert_not_reached();
};
return base_addr;
}
static inline uint32_t gem_get_tx_queue_base_addr(CadenceGEMState *s, int q)
{
return gem_get_queue_base_addr(s, true, q);
}
static inline uint32_t gem_get_rx_queue_base_addr(CadenceGEMState *s, int q)
{
return gem_get_queue_base_addr(s, false, q);
}
static hwaddr gem_get_desc_addr(CadenceGEMState *s, bool tx, int q)
{
hwaddr desc_addr = 0;
@ -883,7 +931,7 @@ static void gem_get_rx_desc(CadenceGEMState *s, int q)
if (rx_desc_get_ownership(s->rx_desc[q]) == 1) {
DB_PRINT("descriptor 0x%" HWADDR_PRIx " owned by sw.\n", desc_addr);
s->regs[GEM_RXSTATUS] |= GEM_RXSTATUS_NOBUF;
s->regs[GEM_ISR] |= GEM_INT_RXUSED & ~(s->regs[GEM_IMR]);
gem_set_isr(s, q, GEM_INT_RXUSED);
/* Handle interrupt consequences */
gem_update_int_status(s);
}
@ -895,21 +943,18 @@ static void gem_get_rx_desc(CadenceGEMState *s, int q)
*/
static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size)
{
CadenceGEMState *s;
CadenceGEMState *s = qemu_get_nic_opaque(nc);
unsigned rxbufsize, bytes_to_copy;
unsigned rxbuf_offset;
uint8_t rxbuf[2048];
uint8_t *rxbuf_ptr;
bool first_desc = true;
int maf;
int q = 0;
s = qemu_get_nic_opaque(nc);
/* Is this destination MAC address "for us" ? */
maf = gem_mac_address_filter(s, buf);
if (maf == GEM_RX_REJECT) {
return -1;
return size; /* no, drop siliently b/c it's not an error */
}
/* Discard packets with receive length error enabled ? */
@ -961,29 +1006,35 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size)
} else {
unsigned crc_val;
if (size > sizeof(rxbuf) - sizeof(crc_val)) {
size = sizeof(rxbuf) - sizeof(crc_val);
if (size > MAX_FRAME_SIZE - sizeof(crc_val)) {
size = MAX_FRAME_SIZE - sizeof(crc_val);
}
bytes_to_copy = size;
/* The application wants the FCS field, which QEMU does not provide.
* We must try and calculate one.
*/
memcpy(rxbuf, buf, size);
memset(rxbuf + size, 0, sizeof(rxbuf) - size);
rxbuf_ptr = rxbuf;
crc_val = cpu_to_le32(crc32(0, rxbuf, MAX(size, 60)));
memcpy(rxbuf + size, &crc_val, sizeof(crc_val));
memcpy(s->rx_packet, buf, size);
memset(s->rx_packet + size, 0, MAX_FRAME_SIZE - size);
rxbuf_ptr = s->rx_packet;
crc_val = cpu_to_le32(crc32(0, s->rx_packet, MAX(size, 60)));
memcpy(s->rx_packet + size, &crc_val, sizeof(crc_val));
bytes_to_copy += 4;
size += 4;
}
DB_PRINT("config bufsize: %d packet size: %ld\n", rxbufsize, size);
DB_PRINT("config bufsize: %u packet size: %zd\n", rxbufsize, size);
/* Find which queue we are targeting */
q = get_queue_from_screen(s, rxbuf_ptr, rxbufsize);
if (size > gem_get_max_buf_len(s, false)) {
qemu_log_mask(LOG_GUEST_ERROR, "rx frame too long\n");
gem_set_isr(s, q, GEM_INT_AMBA_ERR);
return -1;
}
while (bytes_to_copy) {
hwaddr desc_addr;
@ -992,9 +1043,9 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size)
return -1;
}
DB_PRINT("copy %u bytes to 0x%" PRIx64 "\n",
MIN(bytes_to_copy, rxbufsize),
rx_desc_get_buffer(s, s->rx_desc[q]));
DB_PRINT("copy %" PRIu32 " bytes to 0x%" PRIx64 "\n",
MIN(bytes_to_copy, rxbufsize),
rx_desc_get_buffer(s, s->rx_desc[q]));
/* Copy packet data to emulated DMA buffer */
address_space_write(&s->dma_as, rx_desc_get_buffer(s, s->rx_desc[q]) +
@ -1044,7 +1095,7 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size)
/* Next descriptor */
if (rx_desc_get_wrap(s->rx_desc[q])) {
DB_PRINT("wrapping RX descriptor list\n");
s->rx_desc_addr[q] = s->regs[GEM_RXQBASE];
s->rx_desc_addr[q] = gem_get_rx_queue_base_addr(s, q);
} else {
DB_PRINT("incrementing RX descriptor list\n");
s->rx_desc_addr[q] += 4 * gem_get_desc_len(s, true);
@ -1057,7 +1108,7 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size)
gem_receive_updatestats(s, buf, size);
s->regs[GEM_RXSTATUS] |= GEM_RXSTATUS_FRMRCVD;
s->regs[GEM_ISR] |= GEM_INT_RXCMPL & ~(s->regs[GEM_IMR]);
gem_set_isr(s, q, GEM_INT_RXCMPL);
/* Handle interrupt consequences */
gem_update_int_status(s);
@ -1119,7 +1170,6 @@ static void gem_transmit(CadenceGEMState *s)
{
uint32_t desc[DESC_MAX_NUM_WORDS];
hwaddr packet_desc_addr;
uint8_t tx_packet[2048];
uint8_t *p;
unsigned total_bytes;
int q = 0;
@ -1135,7 +1185,7 @@ static void gem_transmit(CadenceGEMState *s)
* Packets scattered across multiple descriptors are gathered to this
* one contiguous buffer first.
*/
p = tx_packet;
p = s->tx_packet;
total_bytes = 0;
for (q = s->num_priority_queues - 1; q >= 0; q--) {
@ -1160,17 +1210,18 @@ static void gem_transmit(CadenceGEMState *s)
*/
if ((tx_desc_get_buffer(s, desc) == 0) ||
(tx_desc_get_length(desc) == 0)) {
DB_PRINT("Invalid TX descriptor @ 0x%x\n",
(unsigned)packet_desc_addr);
DB_PRINT("Invalid TX descriptor @ 0x%" HWADDR_PRIx "\n",
packet_desc_addr);
break;
}
if (tx_desc_get_length(desc) > sizeof(tx_packet) -
(p - tx_packet)) {
DB_PRINT("TX descriptor @ 0x%" HWADDR_PRIx \
" too large: size 0x%x space 0x%zx\n",
if (tx_desc_get_length(desc) > gem_get_max_buf_len(s, true) -
(p - s->tx_packet)) {
qemu_log_mask(LOG_GUEST_ERROR, "TX descriptor @ 0x%" \
HWADDR_PRIx " too large: size 0x%x space 0x%zx\n",
packet_desc_addr, tx_desc_get_length(desc),
sizeof(tx_packet) - (p - tx_packet));
gem_get_max_buf_len(s, true) - (p - s->tx_packet));
gem_set_isr(s, q, GEM_INT_AMBA_ERR);
break;
}
@ -1200,7 +1251,7 @@ static void gem_transmit(CadenceGEMState *s)
sizeof(desc_first));
/* Advance the hardware current descriptor past this packet */
if (tx_desc_get_wrap(desc)) {
s->tx_desc_addr[q] = s->regs[GEM_TXQBASE];
s->tx_desc_addr[q] = gem_get_tx_queue_base_addr(s, q);
} else {
s->tx_desc_addr[q] = packet_desc_addr +
4 * gem_get_desc_len(s, false);
@ -1208,43 +1259,36 @@ static void gem_transmit(CadenceGEMState *s)
DB_PRINT("TX descriptor next: 0x%08x\n", s->tx_desc_addr[q]);
s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_TXCMPL;
s->regs[GEM_ISR] |= GEM_INT_TXCMPL & ~(s->regs[GEM_IMR]);
/* Update queue interrupt status */
if (s->num_priority_queues > 1) {
s->regs[GEM_INT_Q1_STATUS + q] |=
GEM_INT_TXCMPL & ~(s->regs[GEM_INT_Q1_MASK + q]);
}
gem_set_isr(s, q, GEM_INT_TXCMPL);
/* Handle interrupt consequences */
gem_update_int_status(s);
/* Is checksum offload enabled? */
if (s->regs[GEM_DMACFG] & GEM_DMACFG_TXCSUM_OFFL) {
net_checksum_calculate(tx_packet, total_bytes);
net_checksum_calculate(s->tx_packet, total_bytes);
}
/* Update MAC statistics */
gem_transmit_updatestats(s, tx_packet, total_bytes);
gem_transmit_updatestats(s, s->tx_packet, total_bytes);
/* Send the packet somewhere */
if (s->phy_loop || (s->regs[GEM_NWCTRL] &
GEM_NWCTRL_LOCALLOOP)) {
gem_receive(qemu_get_queue(s->nic), tx_packet,
gem_receive(qemu_get_queue(s->nic), s->tx_packet,
total_bytes);
} else {
qemu_send_packet(qemu_get_queue(s->nic), tx_packet,
qemu_send_packet(qemu_get_queue(s->nic), s->tx_packet,
total_bytes);
}
/* Prepare for next packet */
p = tx_packet;
p = s->tx_packet;
total_bytes = 0;
}
/* read next descriptor */
if (tx_desc_get_wrap(desc)) {
tx_desc_set_last(desc);
if (s->regs[GEM_DMACFG] & GEM_DMACFG_ADDR_64B) {
packet_desc_addr = s->regs[GEM_TBQPH];
@ -1252,7 +1296,7 @@ static void gem_transmit(CadenceGEMState *s)
} else {
packet_desc_addr = 0;
}
packet_desc_addr |= s->regs[GEM_TXQBASE];
packet_desc_addr |= gem_get_tx_queue_base_addr(s, q);
} else {
packet_desc_addr += 4 * gem_get_desc_len(s, false);
}
@ -1264,7 +1308,10 @@ static void gem_transmit(CadenceGEMState *s)
if (tx_desc_get_used(desc)) {
s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_USED;
s->regs[GEM_ISR] |= GEM_INT_TXUSED & ~(s->regs[GEM_IMR]);
/* IRQ TXUSED is defined only for queue 0 */
if (q == 0) {
gem_set_isr(s, 0, GEM_INT_TXUSED);
}
gem_update_int_status(s);
}
}
@ -1314,10 +1361,12 @@ static void gem_reset(DeviceState *d)
s->regs[GEM_TXPARTIALSF] = 0x000003ff;
s->regs[GEM_RXPARTIALSF] = 0x000003ff;
s->regs[GEM_MODID] = s->revision;
s->regs[GEM_DESCONF] = 0x02500111;
s->regs[GEM_DESCONF2] = 0x2ab13fff;
s->regs[GEM_DESCONF] = 0x02D00111;
s->regs[GEM_DESCONF2] = 0x2ab10000 | s->jumbo_max_len;
s->regs[GEM_DESCONF5] = 0x002f2045;
s->regs[GEM_DESCONF6] = GEM_DESCONF6_64B_MASK;
s->regs[GEM_INT_Q1_MASK] = 0x00000CE6;
s->regs[GEM_JUMBO_MAX_LEN] = s->jumbo_max_len;
if (s->num_priority_queues > 1) {
queues_mask = MAKE_64BIT_MASK(1, s->num_priority_queues - 1);
@ -1458,7 +1507,7 @@ static void gem_write(void *opaque, hwaddr offset, uint64_t val,
if (!(val & GEM_NWCTRL_TXENA)) {
/* Reset to start of Q when transmit disabled. */
for (i = 0; i < s->num_priority_queues; i++) {
s->tx_desc_addr[i] = s->regs[GEM_TXQBASE];
s->tx_desc_addr[i] = gem_get_tx_queue_base_addr(s, i);
}
}
if (gem_can_receive(qemu_get_queue(s->nic))) {
@ -1488,6 +1537,9 @@ static void gem_write(void *opaque, hwaddr offset, uint64_t val,
s->regs[GEM_IMR] &= ~val;
gem_update_int_status(s);
break;
case GEM_JUMBO_MAX_LEN:
s->regs[GEM_JUMBO_MAX_LEN] = val & MAX_JUMBO_FRAME_SIZE_MASK;
break;
case GEM_INT_Q1_ENABLE ... GEM_INT_Q7_ENABLE:
s->regs[GEM_INT_Q1_MASK + offset - GEM_INT_Q1_ENABLE] &= ~val;
gem_update_int_status(s);
@ -1582,6 +1634,12 @@ static void gem_realize(DeviceState *dev, Error **errp)
s->nic = qemu_new_nic(&net_gem_info, &s->conf,
object_get_typename(OBJECT(dev)), dev->id, s);
if (s->jumbo_max_len > MAX_FRAME_SIZE) {
error_setg(errp, "jumbo-max-len is greater than %d",
MAX_FRAME_SIZE);
return;
}
}
static void gem_init(Object *obj)
@ -1630,6 +1688,8 @@ static Property gem_properties[] = {
num_type1_screeners, 4),
DEFINE_PROP_UINT8("num-type2-screeners", CadenceGEMState,
num_type2_screeners, 4),
DEFINE_PROP_UINT16("jumbo-max-len", CadenceGEMState,
jumbo_max_len, 10240),
DEFINE_PROP_END_OF_LIST(),
};

View File

@ -34,9 +34,9 @@
*/
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "net/net.h"
#include "net/tap.h"
#include "hw/hw.h"
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
#include "sysemu/runstate.h"
@ -2816,11 +2816,15 @@ e1000e_set_psrctl(E1000ECore *core, int index, uint32_t val)
if (core->mac[RCTL] & E1000_RCTL_DTYP_MASK) {
if ((val & E1000_PSRCTL_BSIZE0_MASK) == 0) {
hw_error("e1000e: PSRCTL.BSIZE0 cannot be zero");
qemu_log_mask(LOG_GUEST_ERROR,
"e1000e: PSRCTL.BSIZE0 cannot be zero");
return;
}
if ((val & E1000_PSRCTL_BSIZE1_MASK) == 0) {
hw_error("e1000e: PSRCTL.BSIZE1 cannot be zero");
qemu_log_mask(LOG_GUEST_ERROR,
"e1000e: PSRCTL.BSIZE1 cannot be zero");
return;
}
}

View File

@ -381,6 +381,9 @@ virtio_net_announce_notify(void) ""
virtio_net_announce_timer(int round) "%d"
virtio_net_handle_announce(int round) "%d"
virtio_net_post_load_device(void)
virtio_net_rss_disable(void)
virtio_net_rss_error(const char *msg, uint32_t value) "%s, value 0x%08x"
virtio_net_rss_enable(uint32_t p1, uint16_t p2, uint8_t p3) "hashes 0x%x, table of %d, key of %d"
# tulip.c
tulip_reg_write(uint64_t addr, const char *name, int size, uint64_t val) "addr 0x%02"PRIx64" (%s) size %d value 0x%08"PRIx64

View File

@ -171,9 +171,6 @@ static void tulip_copy_rx_bytes(TULIPState *s, struct tulip_descriptor *desc)
len = s->rx_frame_len;
}
if (s->rx_frame_len + len > sizeof(s->rx_frame)) {
return;
}
pci_dma_write(&s->dev, desc->buf_addr1, s->rx_frame +
(s->rx_frame_size - s->rx_frame_len), len);
s->rx_frame_len -= len;
@ -186,9 +183,6 @@ static void tulip_copy_rx_bytes(TULIPState *s, struct tulip_descriptor *desc)
len = s->rx_frame_len;
}
if (s->rx_frame_len + len > sizeof(s->rx_frame)) {
return;
}
pci_dma_write(&s->dev, desc->buf_addr2, s->rx_frame +
(s->rx_frame_size - s->rx_frame_len), len);
s->rx_frame_len -= len;
@ -584,6 +578,9 @@ static int tulip_copy_tx_buffers(TULIPState *s, struct tulip_descriptor *desc)
int len2 = (desc->control >> TDES1_BUF2_SIZE_SHIFT) & TDES1_BUF2_SIZE_MASK;
if (s->tx_frame_len + len1 > sizeof(s->tx_frame)) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: descriptor overflow (ofs: %u, len:%d, size:%zu)\n",
__func__, s->tx_frame_len, len1, sizeof(s->tx_frame));
return -1;
}
if (len1) {
@ -593,6 +590,9 @@ static int tulip_copy_tx_buffers(TULIPState *s, struct tulip_descriptor *desc)
}
if (s->tx_frame_len + len2 > sizeof(s->tx_frame)) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: descriptor overflow (ofs: %u, len:%d, size:%zu)\n",
__func__, s->tx_frame_len, len2, sizeof(s->tx_frame));
return -1;
}
if (len2) {

View File

@ -211,7 +211,7 @@
#define RDES0_RF BIT(11)
#define RDES0_DT_SHIFT 12
#define RDES0_DT_MASK 3
#define RDES0_LE BIT(14)
#define RDES0_DE BIT(14)
#define RDES0_ES BIT(15)
#define RDES0_FL_SHIFT 16
#define RDES0_FL_MASK 0x3fff

View File

@ -42,6 +42,7 @@
#include "trace.h"
#include "monitor/qdev.h"
#include "hw/pci/pci.h"
#include "net_rx_pkt.h"
#define VIRTIO_NET_VM_VERSION 11
@ -77,25 +78,15 @@
tso/gso/gro 'off'. */
#define VIRTIO_NET_RSC_DEFAULT_INTERVAL 300000
/* temporary until standard header include it */
#if !defined(VIRTIO_NET_HDR_F_RSC_INFO)
#define VIRTIO_NET_HDR_F_RSC_INFO 4 /* rsc_ext data in csum_ fields */
#define VIRTIO_NET_F_RSC_EXT 61
#endif
static inline __virtio16 *virtio_net_rsc_ext_num_packets(
struct virtio_net_hdr *hdr)
{
return &hdr->csum_start;
}
static inline __virtio16 *virtio_net_rsc_ext_num_dupacks(
struct virtio_net_hdr *hdr)
{
return &hdr->csum_offset;
}
#define VIRTIO_NET_RSS_SUPPORTED_HASHES (VIRTIO_NET_RSS_HASH_TYPE_IPv4 | \
VIRTIO_NET_RSS_HASH_TYPE_TCPv4 | \
VIRTIO_NET_RSS_HASH_TYPE_UDPv4 | \
VIRTIO_NET_RSS_HASH_TYPE_IPv6 | \
VIRTIO_NET_RSS_HASH_TYPE_TCPv6 | \
VIRTIO_NET_RSS_HASH_TYPE_UDPv6 | \
VIRTIO_NET_RSS_HASH_TYPE_IP_EX | \
VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | \
VIRTIO_NET_RSS_HASH_TYPE_UDP_EX)
static VirtIOFeature feature_sizes[] = {
{.flags = 1ULL << VIRTIO_NET_F_MAC,
@ -108,6 +99,8 @@ static VirtIOFeature feature_sizes[] = {
.end = endof(struct virtio_net_config, mtu)},
{.flags = 1ULL << VIRTIO_NET_F_SPEED_DUPLEX,
.end = endof(struct virtio_net_config, duplex)},
{.flags = (1ULL << VIRTIO_NET_F_RSS) | (1ULL << VIRTIO_NET_F_HASH_REPORT),
.end = endof(struct virtio_net_config, supported_hash_types)},
{}
};
@ -138,6 +131,12 @@ static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
memcpy(netcfg.mac, n->mac, ETH_ALEN);
virtio_stl_p(vdev, &netcfg.speed, n->net_conf.speed);
netcfg.duplex = n->net_conf.duplex;
netcfg.rss_max_key_size = VIRTIO_NET_RSS_MAX_KEY_SIZE;
virtio_stw_p(vdev, &netcfg.rss_max_indirection_table_length,
virtio_host_has_feature(vdev, VIRTIO_NET_F_RSS) ?
VIRTIO_NET_RSS_MAX_TABLE_LEN : 1);
virtio_stl_p(vdev, &netcfg.supported_hash_types,
VIRTIO_NET_RSS_SUPPORTED_HASHES);
memcpy(config, &netcfg, n->config_size);
}
@ -561,7 +560,7 @@ static int peer_has_ufo(VirtIONet *n)
}
static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
int version_1)
int version_1, int hash_report)
{
int i;
NetClientState *nc;
@ -569,7 +568,10 @@ static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
n->mergeable_rx_bufs = mergeable_rx_bufs;
if (version_1) {
n->guest_hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
n->guest_hdr_len = hash_report ?
sizeof(struct virtio_net_hdr_v1_hash) :
sizeof(struct virtio_net_hdr_mrg_rxbuf);
n->rss_data.populate_hash = !!hash_report;
} else {
n->guest_hdr_len = n->mergeable_rx_bufs ?
sizeof(struct virtio_net_hdr_mrg_rxbuf) :
@ -690,6 +692,8 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT);
}
if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
@ -701,6 +705,8 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
return features;
}
virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT);
features = vhost_net_get_features(get_vhost_net(nc->peer), features);
vdev->backend_features = features;
@ -860,18 +866,22 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
}
virtio_net_set_multiqueue(n,
virtio_has_feature(features, VIRTIO_NET_F_RSS) ||
virtio_has_feature(features, VIRTIO_NET_F_MQ));
virtio_net_set_mrg_rx_bufs(n,
virtio_has_feature(features,
VIRTIO_NET_F_MRG_RXBUF),
virtio_has_feature(features,
VIRTIO_F_VERSION_1));
VIRTIO_F_VERSION_1),
virtio_has_feature(features,
VIRTIO_NET_F_HASH_REPORT));
n->rsc4_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO4);
n->rsc6_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO6);
n->rss_data.redirect = virtio_has_feature(features, VIRTIO_NET_F_RSS);
if (n->has_vnet_hdr) {
n->curr_guest_offloads =
@ -1136,25 +1146,165 @@ static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd,
}
}
static void virtio_net_disable_rss(VirtIONet *n)
{
if (n->rss_data.enabled) {
trace_virtio_net_rss_disable();
}
n->rss_data.enabled = false;
}
static uint16_t virtio_net_handle_rss(VirtIONet *n,
struct iovec *iov,
unsigned int iov_cnt,
bool do_rss)
{
VirtIODevice *vdev = VIRTIO_DEVICE(n);
struct virtio_net_rss_config cfg;
size_t s, offset = 0, size_get;
uint16_t queues, i;
struct {
uint16_t us;
uint8_t b;
} QEMU_PACKED temp;
const char *err_msg = "";
uint32_t err_value = 0;
if (do_rss && !virtio_vdev_has_feature(vdev, VIRTIO_NET_F_RSS)) {
err_msg = "RSS is not negotiated";
goto error;
}
if (!do_rss && !virtio_vdev_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT)) {
err_msg = "Hash report is not negotiated";
goto error;
}
size_get = offsetof(struct virtio_net_rss_config, indirection_table);
s = iov_to_buf(iov, iov_cnt, offset, &cfg, size_get);
if (s != size_get) {
err_msg = "Short command buffer";
err_value = (uint32_t)s;
goto error;
}
n->rss_data.hash_types = virtio_ldl_p(vdev, &cfg.hash_types);
n->rss_data.indirections_len =
virtio_lduw_p(vdev, &cfg.indirection_table_mask);
n->rss_data.indirections_len++;
if (!do_rss) {
n->rss_data.indirections_len = 1;
}
if (!is_power_of_2(n->rss_data.indirections_len)) {
err_msg = "Invalid size of indirection table";
err_value = n->rss_data.indirections_len;
goto error;
}
if (n->rss_data.indirections_len > VIRTIO_NET_RSS_MAX_TABLE_LEN) {
err_msg = "Too large indirection table";
err_value = n->rss_data.indirections_len;
goto error;
}
n->rss_data.default_queue = do_rss ?
virtio_lduw_p(vdev, &cfg.unclassified_queue) : 0;
if (n->rss_data.default_queue >= n->max_queues) {
err_msg = "Invalid default queue";
err_value = n->rss_data.default_queue;
goto error;
}
offset += size_get;
size_get = sizeof(uint16_t) * n->rss_data.indirections_len;
g_free(n->rss_data.indirections_table);
n->rss_data.indirections_table = g_malloc(size_get);
if (!n->rss_data.indirections_table) {
err_msg = "Can't allocate indirections table";
err_value = n->rss_data.indirections_len;
goto error;
}
s = iov_to_buf(iov, iov_cnt, offset,
n->rss_data.indirections_table, size_get);
if (s != size_get) {
err_msg = "Short indirection table buffer";
err_value = (uint32_t)s;
goto error;
}
for (i = 0; i < n->rss_data.indirections_len; ++i) {
uint16_t val = n->rss_data.indirections_table[i];
n->rss_data.indirections_table[i] = virtio_lduw_p(vdev, &val);
}
offset += size_get;
size_get = sizeof(temp);
s = iov_to_buf(iov, iov_cnt, offset, &temp, size_get);
if (s != size_get) {
err_msg = "Can't get queues";
err_value = (uint32_t)s;
goto error;
}
queues = do_rss ? virtio_lduw_p(vdev, &temp.us) : n->curr_queues;
if (queues == 0 || queues > n->max_queues) {
err_msg = "Invalid number of queues";
err_value = queues;
goto error;
}
if (temp.b > VIRTIO_NET_RSS_MAX_KEY_SIZE) {
err_msg = "Invalid key size";
err_value = temp.b;
goto error;
}
if (!temp.b && n->rss_data.hash_types) {
err_msg = "No key provided";
err_value = 0;
goto error;
}
if (!temp.b && !n->rss_data.hash_types) {
virtio_net_disable_rss(n);
return queues;
}
offset += size_get;
size_get = temp.b;
s = iov_to_buf(iov, iov_cnt, offset, n->rss_data.key, size_get);
if (s != size_get) {
err_msg = "Can get key buffer";
err_value = (uint32_t)s;
goto error;
}
n->rss_data.enabled = true;
trace_virtio_net_rss_enable(n->rss_data.hash_types,
n->rss_data.indirections_len,
temp.b);
return queues;
error:
trace_virtio_net_rss_error(err_msg, err_value);
virtio_net_disable_rss(n);
return 0;
}
static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
struct iovec *iov, unsigned int iov_cnt)
{
VirtIODevice *vdev = VIRTIO_DEVICE(n);
struct virtio_net_ctrl_mq mq;
size_t s;
uint16_t queues;
s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
if (s != sizeof(mq)) {
virtio_net_disable_rss(n);
if (cmd == VIRTIO_NET_CTRL_MQ_HASH_CONFIG) {
queues = virtio_net_handle_rss(n, iov, iov_cnt, false);
return queues ? VIRTIO_NET_OK : VIRTIO_NET_ERR;
}
if (cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
queues = virtio_net_handle_rss(n, iov, iov_cnt, true);
} else if (cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
struct virtio_net_ctrl_mq mq;
size_t s;
if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ)) {
return VIRTIO_NET_ERR;
}
s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
if (s != sizeof(mq)) {
return VIRTIO_NET_ERR;
}
queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
} else {
return VIRTIO_NET_ERR;
}
if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
return VIRTIO_NET_ERR;
}
queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
queues > n->max_queues ||
@ -1387,8 +1537,107 @@ static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
return 0;
}
static uint8_t virtio_net_get_hash_type(bool isip4,
bool isip6,
bool isudp,
bool istcp,
uint32_t types)
{
if (isip4) {
if (istcp && (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv4)) {
return NetPktRssIpV4Tcp;
}
if (isudp && (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv4)) {
return NetPktRssIpV4Udp;
}
if (types & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
return NetPktRssIpV4;
}
} else if (isip6) {
uint32_t mask = VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
VIRTIO_NET_RSS_HASH_TYPE_TCPv6;
if (istcp && (types & mask)) {
return (types & VIRTIO_NET_RSS_HASH_TYPE_TCP_EX) ?
NetPktRssIpV6TcpEx : NetPktRssIpV6Tcp;
}
mask = VIRTIO_NET_RSS_HASH_TYPE_UDP_EX | VIRTIO_NET_RSS_HASH_TYPE_UDPv6;
if (isudp && (types & mask)) {
return (types & VIRTIO_NET_RSS_HASH_TYPE_UDP_EX) ?
NetPktRssIpV6UdpEx : NetPktRssIpV6Udp;
}
mask = VIRTIO_NET_RSS_HASH_TYPE_IP_EX | VIRTIO_NET_RSS_HASH_TYPE_IPv6;
if (types & mask) {
return (types & VIRTIO_NET_RSS_HASH_TYPE_IP_EX) ?
NetPktRssIpV6Ex : NetPktRssIpV6;
}
}
return 0xff;
}
static void virtio_set_packet_hash(const uint8_t *buf, uint8_t report,
uint32_t hash)
{
struct virtio_net_hdr_v1_hash *hdr = (void *)buf;
hdr->hash_value = hash;
hdr->hash_report = report;
}
static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf,
size_t size)
{
VirtIONet *n = qemu_get_nic_opaque(nc);
unsigned int index = nc->queue_index, new_index = index;
struct NetRxPkt *pkt = n->rx_pkt;
uint8_t net_hash_type;
uint32_t hash;
bool isip4, isip6, isudp, istcp;
static const uint8_t reports[NetPktRssIpV6UdpEx + 1] = {
VIRTIO_NET_HASH_REPORT_IPv4,
VIRTIO_NET_HASH_REPORT_TCPv4,
VIRTIO_NET_HASH_REPORT_TCPv6,
VIRTIO_NET_HASH_REPORT_IPv6,
VIRTIO_NET_HASH_REPORT_IPv6_EX,
VIRTIO_NET_HASH_REPORT_TCPv6_EX,
VIRTIO_NET_HASH_REPORT_UDPv4,
VIRTIO_NET_HASH_REPORT_UDPv6,
VIRTIO_NET_HASH_REPORT_UDPv6_EX
};
net_rx_pkt_set_protocols(pkt, buf + n->host_hdr_len,
size - n->host_hdr_len);
net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
if (isip4 && (net_rx_pkt_get_ip4_info(pkt)->fragment)) {
istcp = isudp = false;
}
if (isip6 && (net_rx_pkt_get_ip6_info(pkt)->fragment)) {
istcp = isudp = false;
}
net_hash_type = virtio_net_get_hash_type(isip4, isip6, isudp, istcp,
n->rss_data.hash_types);
if (net_hash_type > NetPktRssIpV6UdpEx) {
if (n->rss_data.populate_hash) {
virtio_set_packet_hash(buf, VIRTIO_NET_HASH_REPORT_NONE, 0);
}
return n->rss_data.redirect ? n->rss_data.default_queue : -1;
}
hash = net_rx_pkt_calc_rss_hash(pkt, net_hash_type, n->rss_data.key);
if (n->rss_data.populate_hash) {
virtio_set_packet_hash(buf, reports[net_hash_type], hash);
}
if (n->rss_data.redirect) {
new_index = hash & (n->rss_data.indirections_len - 1);
new_index = n->rss_data.indirections_table[new_index];
}
return (index == new_index) ? -1 : new_index;
}
static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
size_t size)
size_t size, bool no_rss)
{
VirtIONet *n = qemu_get_nic_opaque(nc);
VirtIONetQueue *q = virtio_net_get_subqueue(nc);
@ -1402,6 +1651,14 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
return -1;
}
if (!no_rss && n->rss_data.enabled) {
int index = virtio_net_process_rss(nc, buf, size);
if (index >= 0) {
NetClientState *nc2 = qemu_get_subqueue(n->nic, index);
return virtio_net_receive_rcu(nc2, buf, size, true);
}
}
/* hdr_len refers to the header we supply to the guest */
if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
return 0;
@ -1452,6 +1709,11 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
}
receive_header(n, sg, elem->in_num, buf, size);
if (n->rss_data.populate_hash) {
offset = sizeof(mhdr);
iov_from_buf(sg, elem->in_num, offset,
buf + offset, n->host_hdr_len - sizeof(mhdr));
}
offset = n->host_hdr_len;
total += n->guest_hdr_len;
guest_offset = n->guest_hdr_len;
@ -1496,7 +1758,7 @@ static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf,
{
RCU_READ_LOCK_GUARD();
return virtio_net_receive_rcu(nc, buf, size);
return virtio_net_receive_rcu(nc, buf, size, false);
}
static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain,
@ -1539,15 +1801,15 @@ static size_t virtio_net_rsc_drain_seg(VirtioNetRscChain *chain,
VirtioNetRscSeg *seg)
{
int ret;
struct virtio_net_hdr *h;
struct virtio_net_hdr_v1 *h;
h = (struct virtio_net_hdr *)seg->buf;
h = (struct virtio_net_hdr_v1 *)seg->buf;
h->flags = 0;
h->gso_type = VIRTIO_NET_HDR_GSO_NONE;
if (seg->is_coalesced) {
*virtio_net_rsc_ext_num_packets(h) = seg->packets;
*virtio_net_rsc_ext_num_dupacks(h) = seg->dup_ack;
h->rsc.segments = seg->packets;
h->rsc.dup_acks = seg->dup_ack;
h->flags = VIRTIO_NET_HDR_F_RSC_INFO;
if (chain->proto == ETH_P_IP) {
h->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
@ -2444,7 +2706,9 @@ static int virtio_net_post_load_device(void *opaque, int version_id)
trace_virtio_net_post_load_device();
virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs,
virtio_vdev_has_feature(vdev,
VIRTIO_F_VERSION_1));
VIRTIO_F_VERSION_1),
virtio_vdev_has_feature(vdev,
VIRTIO_NET_F_HASH_REPORT));
/* MAC_TABLE_ENTRIES may be different from the saved image */
if (n->mac_table.in_use > MAC_TABLE_ENTRIES) {
@ -2493,6 +2757,13 @@ static int virtio_net_post_load_device(void *opaque, int version_id)
}
}
if (n->rss_data.enabled) {
trace_virtio_net_rss_enable(n->rss_data.hash_types,
n->rss_data.indirections_len,
sizeof(n->rss_data.key));
} else {
trace_virtio_net_rss_disable();
}
return 0;
}
@ -2670,6 +2941,32 @@ static const VMStateDescription vmstate_virtio_net_has_vnet = {
},
};
static bool virtio_net_rss_needed(void *opaque)
{
return VIRTIO_NET(opaque)->rss_data.enabled;
}
static const VMStateDescription vmstate_virtio_net_rss = {
.name = "virtio-net-device/rss",
.version_id = 1,
.minimum_version_id = 1,
.needed = virtio_net_rss_needed,
.fields = (VMStateField[]) {
VMSTATE_BOOL(rss_data.enabled, VirtIONet),
VMSTATE_BOOL(rss_data.redirect, VirtIONet),
VMSTATE_BOOL(rss_data.populate_hash, VirtIONet),
VMSTATE_UINT32(rss_data.hash_types, VirtIONet),
VMSTATE_UINT16(rss_data.indirections_len, VirtIONet),
VMSTATE_UINT16(rss_data.default_queue, VirtIONet),
VMSTATE_UINT8_ARRAY(rss_data.key, VirtIONet,
VIRTIO_NET_RSS_MAX_KEY_SIZE),
VMSTATE_VARRAY_UINT16_ALLOC(rss_data.indirections_table, VirtIONet,
rss_data.indirections_len, 0,
vmstate_info_uint16, uint16_t),
VMSTATE_END_OF_LIST()
},
};
static const VMStateDescription vmstate_virtio_net_device = {
.name = "virtio-net-device",
.version_id = VIRTIO_NET_VM_VERSION,
@ -2720,6 +3017,10 @@ static const VMStateDescription vmstate_virtio_net_device = {
has_ctrl_guest_offloads),
VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription * []) {
&vmstate_virtio_net_rss,
NULL
}
};
static NetClientInfo net_virtio_info = {
@ -3063,7 +3364,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
n->vqs[0].tx_waiting = 0;
n->tx_burst = n->net_conf.txburst;
virtio_net_set_mrg_rx_bufs(n, 0, 0);
virtio_net_set_mrg_rx_bufs(n, 0, 0, 0);
n->promisc = 1; /* for compatibility */
n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
@ -3075,6 +3376,8 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
QTAILQ_INIT(&n->rsc_chains);
n->qdev = dev;
net_rx_pkt_init(&n->rx_pkt, false);
}
static void virtio_net_device_unrealize(DeviceState *dev)
@ -3111,6 +3414,8 @@ static void virtio_net_device_unrealize(DeviceState *dev)
g_free(n->vqs);
qemu_del_nic(n->nic);
virtio_net_rsc_cleanup(n);
g_free(n->rss_data.indirections_table);
net_rx_pkt_uninit(n->rx_pkt);
virtio_cleanup(vdev);
}
@ -3212,6 +3517,10 @@ static Property virtio_net_properties[] = {
DEFINE_PROP_BIT64("ctrl_guest_offloads", VirtIONet, host_features,
VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true),
DEFINE_PROP_BIT64("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false),
DEFINE_PROP_BIT64("rss", VirtIONet, host_features,
VIRTIO_NET_F_RSS, false),
DEFINE_PROP_BIT64("hash", VirtIONet, host_features,
VIRTIO_NET_F_HASH_REPORT, false),
DEFINE_PROP_BIT64("guest_rsc_ext", VirtIONet, host_features,
VIRTIO_NET_F_RSC_EXT, false),
DEFINE_PROP_UINT32("rsc_interval", VirtIONet, rsc_timeout,

View File

@ -40,6 +40,9 @@
#define MAX_TYPE1_SCREENERS 16
#define MAX_TYPE2_SCREENERS 16
#define MAX_JUMBO_FRAME_SIZE_MASK 0x3FFF
#define MAX_FRAME_SIZE MAX_JUMBO_FRAME_SIZE_MASK
typedef struct CadenceGEMState {
/*< private >*/
SysBusDevice parent_obj;
@ -57,6 +60,7 @@ typedef struct CadenceGEMState {
uint8_t num_type1_screeners;
uint8_t num_type2_screeners;
uint32_t revision;
uint16_t jumbo_max_len;
/* GEM registers backing store */
uint32_t regs[CADENCE_GEM_MAXREG];
@ -80,6 +84,8 @@ typedef struct CadenceGEMState {
uint8_t can_rx_state; /* Debug only */
uint8_t tx_packet[MAX_FRAME_SIZE];
uint8_t rx_packet[MAX_FRAME_SIZE];
uint32_t rx_desc[MAX_PRIORITY_QUEUES][DESC_MAX_NUM_WORDS];
bool sar_active[4];

View File

@ -126,6 +126,20 @@ typedef struct VirtioNetRscChain {
/* Maximum packet size we can receive from tap device: header + 64k */
#define VIRTIO_NET_MAX_BUFSIZE (sizeof(struct virtio_net_hdr) + (64 * KiB))
#define VIRTIO_NET_RSS_MAX_KEY_SIZE 40
#define VIRTIO_NET_RSS_MAX_TABLE_LEN 128
typedef struct VirtioNetRssData {
bool enabled;
bool redirect;
bool populate_hash;
uint32_t hash_types;
uint8_t key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
uint16_t indirections_len;
uint16_t *indirections_table;
uint16_t default_queue;
} VirtioNetRssData;
typedef struct VirtIONetQueue {
VirtQueue *rx_vq;
VirtQueue *tx_vq;
@ -199,6 +213,8 @@ struct VirtIONet {
bool failover;
DeviceListener primary_listener;
Notifier migration_state;
VirtioNetRssData rss_data;
struct NetRxPkt *rx_pkt;
};
void virtio_net_set_netclient_name(VirtIONet *n, const char *name,

View File

@ -432,6 +432,16 @@ extern const VMStateInfo vmstate_info_qlist;
.offset = vmstate_offset_pointer(_state, _field, _type), \
}
#define VMSTATE_VARRAY_UINT16_ALLOC(_field, _state, _field_num, _version, _info, _type) {\
.name = (stringify(_field)), \
.version_id = (_version), \
.num_offset = vmstate_offset_value(_state, _field_num, uint16_t),\
.info = &(_info), \
.size = sizeof(_type), \
.flags = VMS_VARRAY_UINT16 | VMS_POINTER | VMS_ALLOC, \
.offset = vmstate_offset_pointer(_state, _field, _type), \
}
#define VMSTATE_VARRAY_UINT16_UNSAFE(_field, _state, _field_num, _version, _info, _type) {\
.name = (stringify(_field)), \
.version_id = (_version), \

View File

@ -32,6 +32,9 @@
#include "migration/migration.h"
#include "util.h"
#include "block/aio-wait.h"
#include "qemu/coroutine.h"
#define TYPE_COLO_COMPARE "colo-compare"
#define COLO_COMPARE(obj) \
OBJECT_CHECK(CompareState, (obj), TYPE_COLO_COMPARE)
@ -51,6 +54,8 @@ static NotifierList colo_compare_notifiers =
#define REGULAR_PACKET_CHECK_MS 3000
#define DEFAULT_TIME_OUT_MS 3000
static QemuMutex colo_compare_mutex;
static bool colo_compare_active;
static QemuMutex event_mtx;
static QemuCond event_complete_cond;
static int event_unhandled_count;
@ -77,6 +82,23 @@ static int event_unhandled_count;
* |packet | |packet + |packet | |packet +
* +--------+ +--------+ +--------+ +--------+
*/
typedef struct SendCo {
Coroutine *co;
struct CompareState *s;
CharBackend *chr;
GQueue send_list;
bool notify_remote_frame;
bool done;
int ret;
} SendCo;
typedef struct SendEntry {
uint32_t size;
uint32_t vnet_hdr_len;
uint8_t *buf;
} SendEntry;
typedef struct CompareState {
Object parent;
@ -91,6 +113,8 @@ typedef struct CompareState {
SocketReadState pri_rs;
SocketReadState sec_rs;
SocketReadState notify_rs;
SendCo out_sendco;
SendCo notify_sendco;
bool vnet_hdr;
uint32_t compare_timeout;
uint32_t expired_scan_cycle;
@ -122,12 +146,17 @@ enum {
SECONDARY_IN,
};
static const char *colo_mode[] = {
[PRIMARY_IN] = "primary",
[SECONDARY_IN] = "secondary",
};
static int compare_chr_send(CompareState *s,
const uint8_t *buf,
uint8_t *buf,
uint32_t size,
uint32_t vnet_hdr_len,
bool notify_remote_frame);
bool notify_remote_frame,
bool zero_copy);
static bool packet_matches_str(const char *str,
const uint8_t *buf,
@ -145,7 +174,7 @@ static void notify_remote_frame(CompareState *s)
char msg[] = "DO_CHECKPOINT";
int ret = 0;
ret = compare_chr_send(s, (uint8_t *)msg, strlen(msg), 0, true);
ret = compare_chr_send(s, (uint8_t *)msg, strlen(msg), 0, true, false);
if (ret < 0) {
error_report("Notify Xen COLO-frame failed");
}
@ -217,6 +246,7 @@ static int packet_enqueue(CompareState *s, int mode, Connection **con)
ConnectionKey key;
Packet *pkt = NULL;
Connection *conn;
int ret;
if (mode == PRIMARY_IN) {
pkt = packet_new(s->pri_rs.buf,
@ -245,16 +275,18 @@ static int packet_enqueue(CompareState *s, int mode, Connection **con)
}
if (mode == PRIMARY_IN) {
if (!colo_insert_packet(&conn->primary_list, pkt, &conn->pack)) {
error_report("colo compare primary queue size too big,"
"drop packet");
}
ret = colo_insert_packet(&conn->primary_list, pkt, &conn->pack);
} else {
if (!colo_insert_packet(&conn->secondary_list, pkt, &conn->sack)) {
error_report("colo compare secondary queue size too big,"
"drop packet");
}
ret = colo_insert_packet(&conn->secondary_list, pkt, &conn->sack);
}
if (!ret) {
trace_colo_compare_drop_packet(colo_mode[mode],
"queue size too big, drop packet");
packet_destroy(pkt, NULL);
pkt = NULL;
}
*con = conn;
return 0;
@ -272,12 +304,13 @@ static void colo_release_primary_pkt(CompareState *s, Packet *pkt)
pkt->data,
pkt->size,
pkt->vnet_hdr_len,
false);
false,
true);
if (ret < 0) {
error_report("colo send primary packet failed");
}
trace_colo_compare_main("packet same and release packet");
packet_destroy(pkt, NULL);
packet_destroy_partial(pkt, NULL);
}
/*
@ -459,10 +492,12 @@ sec:
g_queue_push_head(&conn->primary_list, ppkt);
g_queue_push_head(&conn->secondary_list, spkt);
qemu_hexdump((char *)ppkt->data, stderr,
"colo-compare ppkt", ppkt->size);
qemu_hexdump((char *)spkt->data, stderr,
"colo-compare spkt", spkt->size);
if (trace_event_get_state_backends(TRACE_COLO_COMPARE_MISCOMPARE)) {
qemu_hexdump((char *)ppkt->data, stderr,
"colo-compare ppkt", ppkt->size);
qemu_hexdump((char *)spkt->data, stderr,
"colo-compare spkt", spkt->size);
}
colo_compare_inconsistency_notify(s);
}
@ -699,65 +734,115 @@ static void colo_compare_connection(void *opaque, void *user_data)
}
}
static void coroutine_fn _compare_chr_send(void *opaque)
{
SendCo *sendco = opaque;
CompareState *s = sendco->s;
int ret = 0;
while (!g_queue_is_empty(&sendco->send_list)) {
SendEntry *entry = g_queue_pop_tail(&sendco->send_list);
uint32_t len = htonl(entry->size);
ret = qemu_chr_fe_write_all(sendco->chr, (uint8_t *)&len, sizeof(len));
if (ret != sizeof(len)) {
g_free(entry->buf);
g_slice_free(SendEntry, entry);
goto err;
}
if (!sendco->notify_remote_frame && s->vnet_hdr) {
/*
* We send vnet header len make other module(like filter-redirector)
* know how to parse net packet correctly.
*/
len = htonl(entry->vnet_hdr_len);
ret = qemu_chr_fe_write_all(sendco->chr,
(uint8_t *)&len,
sizeof(len));
if (ret != sizeof(len)) {
g_free(entry->buf);
g_slice_free(SendEntry, entry);
goto err;
}
}
ret = qemu_chr_fe_write_all(sendco->chr,
(uint8_t *)entry->buf,
entry->size);
if (ret != entry->size) {
g_free(entry->buf);
g_slice_free(SendEntry, entry);
goto err;
}
g_free(entry->buf);
g_slice_free(SendEntry, entry);
}
sendco->ret = 0;
goto out;
err:
while (!g_queue_is_empty(&sendco->send_list)) {
SendEntry *entry = g_queue_pop_tail(&sendco->send_list);
g_free(entry->buf);
g_slice_free(SendEntry, entry);
}
sendco->ret = ret < 0 ? ret : -EIO;
out:
sendco->co = NULL;
sendco->done = true;
aio_wait_kick();
}
static int compare_chr_send(CompareState *s,
const uint8_t *buf,
uint8_t *buf,
uint32_t size,
uint32_t vnet_hdr_len,
bool notify_remote_frame)
bool notify_remote_frame,
bool zero_copy)
{
int ret = 0;
uint32_t len = htonl(size);
SendCo *sendco;
SendEntry *entry;
if (notify_remote_frame) {
sendco = &s->notify_sendco;
} else {
sendco = &s->out_sendco;
}
if (!size) {
return 0;
}
if (notify_remote_frame) {
ret = qemu_chr_fe_write_all(&s->chr_notify_dev,
(uint8_t *)&len,
sizeof(len));
entry = g_slice_new(SendEntry);
entry->size = size;
entry->vnet_hdr_len = vnet_hdr_len;
if (zero_copy) {
entry->buf = buf;
} else {
ret = qemu_chr_fe_write_all(&s->chr_out, (uint8_t *)&len, sizeof(len));
entry->buf = g_malloc(size);
memcpy(entry->buf, buf, size);
}
g_queue_push_head(&sendco->send_list, entry);
if (ret != sizeof(len)) {
goto err;
}
if (s->vnet_hdr) {
/*
* We send vnet header len make other module(like filter-redirector)
* know how to parse net packet correctly.
*/
len = htonl(vnet_hdr_len);
if (!notify_remote_frame) {
ret = qemu_chr_fe_write_all(&s->chr_out,
(uint8_t *)&len,
sizeof(len));
}
if (ret != sizeof(len)) {
goto err;
if (sendco->done) {
sendco->co = qemu_coroutine_create(_compare_chr_send, sendco);
sendco->done = false;
qemu_coroutine_enter(sendco->co);
if (sendco->done) {
/* report early errors */
return sendco->ret;
}
}
if (notify_remote_frame) {
ret = qemu_chr_fe_write_all(&s->chr_notify_dev,
(uint8_t *)buf,
size);
} else {
ret = qemu_chr_fe_write_all(&s->chr_out, (uint8_t *)buf, size);
}
if (ret != size) {
goto err;
}
/* assume success */
return 0;
err:
return ret < 0 ? ret : -EIO;
}
static int compare_chr_can_read(void *opaque)
@ -830,6 +915,12 @@ static void check_old_packet_regular(void *opaque)
void colo_notify_compares_event(void *opaque, int event, Error **errp)
{
CompareState *s;
qemu_mutex_lock(&colo_compare_mutex);
if (!colo_compare_active) {
qemu_mutex_unlock(&colo_compare_mutex);
return;
}
qemu_mutex_lock(&event_mtx);
QTAILQ_FOREACH(s, &net_compares, next) {
@ -843,6 +934,7 @@ void colo_notify_compares_event(void *opaque, int event, Error **errp)
}
qemu_mutex_unlock(&event_mtx);
qemu_mutex_unlock(&colo_compare_mutex);
}
static void colo_compare_timer_init(CompareState *s)
@ -890,6 +982,7 @@ static void colo_compare_handle_event(void *opaque)
static void colo_compare_iothread(CompareState *s)
{
AioContext *ctx = iothread_get_aio_context(s->iothread);
object_ref(OBJECT(s->iothread));
s->worker_context = iothread_get_g_main_context(s->iothread);
@ -906,7 +999,7 @@ static void colo_compare_iothread(CompareState *s)
}
colo_compare_timer_init(s);
s->event_bh = qemu_bh_new(colo_compare_handle_event, s);
s->event_bh = aio_bh_new(ctx, colo_compare_handle_event, s);
}
static char *compare_get_pri_indev(Object *obj, Error **errp)
@ -1062,6 +1155,7 @@ static void compare_pri_rs_finalize(SocketReadState *pri_rs)
pri_rs->buf,
pri_rs->packet_len,
pri_rs->vnet_hdr_len,
false,
false);
} else {
/* compare packet in the specified connection */
@ -1092,7 +1186,7 @@ static void compare_notify_rs_finalize(SocketReadState *notify_rs)
if (packet_matches_str("COLO_USERSPACE_PROXY_INIT",
notify_rs->buf,
notify_rs->packet_len)) {
ret = compare_chr_send(s, (uint8_t *)msg, strlen(msg), 0, true);
ret = compare_chr_send(s, (uint8_t *)msg, strlen(msg), 0, true, false);
if (ret < 0) {
error_report("Notify Xen COLO-frame INIT failed");
}
@ -1196,19 +1290,38 @@ static void colo_compare_complete(UserCreatable *uc, Error **errp)
s->vnet_hdr);
}
QTAILQ_INSERT_TAIL(&net_compares, s, next);
s->out_sendco.s = s;
s->out_sendco.chr = &s->chr_out;
s->out_sendco.notify_remote_frame = false;
s->out_sendco.done = true;
g_queue_init(&s->out_sendco.send_list);
if (s->notify_dev) {
s->notify_sendco.s = s;
s->notify_sendco.chr = &s->chr_notify_dev;
s->notify_sendco.notify_remote_frame = true;
s->notify_sendco.done = true;
g_queue_init(&s->notify_sendco.send_list);
}
g_queue_init(&s->conn_list);
qemu_mutex_init(&event_mtx);
qemu_cond_init(&event_complete_cond);
s->connection_track_table = g_hash_table_new_full(connection_key_hash,
connection_key_equal,
g_free,
connection_destroy);
colo_compare_iothread(s);
qemu_mutex_lock(&colo_compare_mutex);
if (!colo_compare_active) {
qemu_mutex_init(&event_mtx);
qemu_cond_init(&event_complete_cond);
colo_compare_active = true;
}
QTAILQ_INSERT_TAIL(&net_compares, s, next);
qemu_mutex_unlock(&colo_compare_mutex);
return;
}
@ -1224,8 +1337,9 @@ static void colo_flush_packets(void *opaque, void *user_data)
pkt->data,
pkt->size,
pkt->vnet_hdr_len,
false);
packet_destroy(pkt, NULL);
false,
true);
packet_destroy_partial(pkt, NULL);
}
while (!g_queue_is_empty(&conn->secondary_list)) {
pkt = g_queue_pop_head(&conn->secondary_list);
@ -1276,6 +1390,20 @@ static void colo_compare_finalize(Object *obj)
CompareState *s = COLO_COMPARE(obj);
CompareState *tmp = NULL;
qemu_mutex_lock(&colo_compare_mutex);
QTAILQ_FOREACH(tmp, &net_compares, next) {
if (tmp == s) {
QTAILQ_REMOVE(&net_compares, s, next);
break;
}
}
if (QTAILQ_EMPTY(&net_compares)) {
colo_compare_active = false;
qemu_mutex_destroy(&event_mtx);
qemu_cond_destroy(&event_complete_cond);
}
qemu_mutex_unlock(&colo_compare_mutex);
qemu_chr_fe_deinit(&s->chr_pri_in, false);
qemu_chr_fe_deinit(&s->chr_sec_in, false);
qemu_chr_fe_deinit(&s->chr_out, false);
@ -1289,17 +1417,23 @@ static void colo_compare_finalize(Object *obj)
qemu_bh_delete(s->event_bh);
QTAILQ_FOREACH(tmp, &net_compares, next) {
if (tmp == s) {
QTAILQ_REMOVE(&net_compares, s, next);
break;
}
AioContext *ctx = iothread_get_aio_context(s->iothread);
aio_context_acquire(ctx);
AIO_WAIT_WHILE(ctx, !s->out_sendco.done);
if (s->notify_dev) {
AIO_WAIT_WHILE(ctx, !s->notify_sendco.done);
}
aio_context_release(ctx);
/* Release all unhandled packets after compare thead exited */
g_queue_foreach(&s->conn_list, colo_flush_packets, s);
AIO_WAIT_WHILE(NULL, !s->out_sendco.done);
g_queue_clear(&s->conn_list);
g_queue_clear(&s->out_sendco.send_list);
if (s->notify_dev) {
g_queue_clear(&s->notify_sendco.send_list);
}
if (s->connection_track_table) {
g_hash_table_destroy(s->connection_track_table);
@ -1309,15 +1443,18 @@ static void colo_compare_finalize(Object *obj)
object_unref(OBJECT(s->iothread));
}
qemu_mutex_destroy(&event_mtx);
qemu_cond_destroy(&event_complete_cond);
g_free(s->pri_indev);
g_free(s->sec_indev);
g_free(s->outdev);
g_free(s->notify_dev);
}
static void __attribute__((__constructor__)) colo_compare_init_globals(void)
{
colo_compare_active = false;
qemu_mutex_init(&colo_compare_mutex);
}
static const TypeInfo colo_compare_info = {
.name = TYPE_COLO_COMPARE,
.parent = TYPE_OBJECT,

View File

@ -185,6 +185,13 @@ void packet_destroy(void *opaque, void *user_data)
g_slice_free(Packet, pkt);
}
void packet_destroy_partial(void *opaque, void *user_data)
{
Packet *pkt = opaque;
g_slice_free(Packet, pkt);
}
/*
* Clear hashtable, stop this hash growing really huge
*/

View File

@ -102,5 +102,6 @@ bool connection_has_tracked(GHashTable *connection_track_table,
void connection_hashtable_reset(GHashTable *connection_track_table);
Packet *packet_new(const void *data, int size, int vnet_hdr_len);
void packet_destroy(void *opaque, void *user_data);
void packet_destroy_partial(void *opaque, void *user_data);
#endif /* NET_COLO_H */

View File

@ -610,7 +610,7 @@ void qemu_flush_or_purge_queued_packets(NetClientState *nc, bool purge)
qemu_notify_event();
} else if (purge) {
/* Unable to empty the queue, purge remaining packets */
qemu_net_queue_purge(nc->incoming_queue, nc);
qemu_net_queue_purge(nc->incoming_queue, nc->peer);
}
}
@ -965,17 +965,11 @@ static int (* const net_client_init_fun[NET_CLIENT_DRIVER__MAX])(
};
static int net_client_init1(const void *object, bool is_netdev, Error **errp)
static int net_client_init1(const Netdev *netdev, bool is_netdev, Error **errp)
{
Netdev legacy = {0};
const Netdev *netdev;
const char *name;
NetClientState *peer = NULL;
if (is_netdev) {
netdev = object;
name = netdev->id;
if (netdev->type == NET_CLIENT_DRIVER_NIC ||
!net_client_init_fun[netdev->type]) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "type",
@ -983,62 +977,11 @@ static int net_client_init1(const void *object, bool is_netdev, Error **errp)
return -1;
}
} else {
const NetLegacy *net = object;
const NetLegacyOptions *opts = net->opts;
legacy.id = net->id;
netdev = &legacy;
/* missing optional values have been initialized to "all bits zero" */
name = net->has_id ? net->id : net->name;
if (net->has_name) {
warn_report("The 'name' parameter is deprecated, use 'id' instead");
}
/* Map the old options to the new flat type */
switch (opts->type) {
case NET_LEGACY_OPTIONS_TYPE_NONE:
if (netdev->type == NET_CLIENT_DRIVER_NONE) {
return 0; /* nothing to do */
case NET_LEGACY_OPTIONS_TYPE_NIC:
legacy.type = NET_CLIENT_DRIVER_NIC;
legacy.u.nic = opts->u.nic;
break;
case NET_LEGACY_OPTIONS_TYPE_USER:
legacy.type = NET_CLIENT_DRIVER_USER;
legacy.u.user = opts->u.user;
break;
case NET_LEGACY_OPTIONS_TYPE_TAP:
legacy.type = NET_CLIENT_DRIVER_TAP;
legacy.u.tap = opts->u.tap;
break;
case NET_LEGACY_OPTIONS_TYPE_L2TPV3:
legacy.type = NET_CLIENT_DRIVER_L2TPV3;
legacy.u.l2tpv3 = opts->u.l2tpv3;
break;
case NET_LEGACY_OPTIONS_TYPE_SOCKET:
legacy.type = NET_CLIENT_DRIVER_SOCKET;
legacy.u.socket = opts->u.socket;
break;
case NET_LEGACY_OPTIONS_TYPE_VDE:
legacy.type = NET_CLIENT_DRIVER_VDE;
legacy.u.vde = opts->u.vde;
break;
case NET_LEGACY_OPTIONS_TYPE_BRIDGE:
legacy.type = NET_CLIENT_DRIVER_BRIDGE;
legacy.u.bridge = opts->u.bridge;
break;
case NET_LEGACY_OPTIONS_TYPE_NETMAP:
legacy.type = NET_CLIENT_DRIVER_NETMAP;
legacy.u.netmap = opts->u.netmap;
break;
case NET_LEGACY_OPTIONS_TYPE_VHOST_USER:
legacy.type = NET_CLIENT_DRIVER_VHOST_USER;
legacy.u.vhost_user = opts->u.vhost_user;
break;
default:
abort();
}
if (!net_client_init_fun[netdev->type]) {
if (netdev->type == NET_CLIENT_DRIVER_HUBPORT ||
!net_client_init_fun[netdev->type]) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "type",
"a net backend type (maybe it is not compiled "
"into this binary)");
@ -1047,12 +990,12 @@ static int net_client_init1(const void *object, bool is_netdev, Error **errp)
/* Do not add to a hub if it's a nic with a netdev= parameter. */
if (netdev->type != NET_CLIENT_DRIVER_NIC ||
!opts->u.nic.has_netdev) {
!netdev->u.nic.has_netdev) {
peer = net_hub_add_port(0, NULL, NULL);
}
}
if (net_client_init_fun[netdev->type](netdev, name, peer, errp) < 0) {
if (net_client_init_fun[netdev->type](netdev, netdev->id, peer, errp) < 0) {
/* FIXME drop when all init functions store an Error */
if (errp && !*errp) {
error_setg(errp, QERR_DEVICE_INIT_FAILED,
@ -1108,7 +1051,7 @@ static void show_netdevs(void)
static int net_client_init(QemuOpts *opts, bool is_netdev, Error **errp)
{
gchar **substrings = NULL;
void *object = NULL;
Netdev *object = NULL;
Error *err = NULL;
int ret = -1;
Visitor *v = opts_visitor_new(opts);
@ -1151,21 +1094,19 @@ static int net_client_init(QemuOpts *opts, bool is_netdev, Error **errp)
}
}
if (is_netdev) {
visit_type_Netdev(v, NULL, (Netdev **)&object, &err);
} else {
visit_type_NetLegacy(v, NULL, (NetLegacy **)&object, &err);
/* Create an ID for -net if the user did not specify one */
if (!is_netdev && !qemu_opts_id(opts)) {
static int idx;
qemu_opts_set_id(opts, g_strdup_printf("__org.qemu.net%i", idx++));
}
visit_type_Netdev(v, NULL, &object, &err);
if (!err) {
ret = net_client_init1(object, is_netdev, &err);
}
if (is_netdev) {
qapi_free_Netdev(object);
} else {
qapi_free_NetLegacy(object);
}
qapi_free_Netdev(object);
out:
error_propagate(errp, err);

View File

@ -254,7 +254,8 @@ static void tap_set_vnet_hdr_len(NetClientState *nc, int len)
assert(nc->info->type == NET_CLIENT_DRIVER_TAP);
assert(len == sizeof(struct virtio_net_hdr_mrg_rxbuf) ||
len == sizeof(struct virtio_net_hdr));
len == sizeof(struct virtio_net_hdr) ||
len == sizeof(struct virtio_net_hdr_v1_hash));
tap_fd_set_vnet_hdr_len(s->fd, len);
s->host_vnet_hdr_len = len;

View File

@ -12,6 +12,7 @@ colo_proxy_main(const char *chr) ": %s"
# colo-compare.c
colo_compare_main(const char *chr) ": %s"
colo_compare_drop_packet(const char *queue, const char *chr) ": %s: %s"
colo_compare_udp_miscompare(const char *sta, int size) ": %s = %d"
colo_compare_icmp_miscompare(const char *sta, int size) ": %s = %d"
colo_compare_ip_info(int psize, const char *sta, const char *stb, int ssize, const char *stc, const char *std) "ppkt size = %d, ip_src = %s, ip_dst = %s, spkt size = %d, ip_src = %s, ip_dst = %s"

View File

@ -467,55 +467,6 @@
'netmap': 'NetdevNetmapOptions',
'vhost-user': 'NetdevVhostUserOptions' } }
##
# @NetLegacy:
#
# Captures the configuration of a network device; legacy.
#
# @id: identifier for monitor commands
#
# @name: identifier for monitor commands, ignored if @id is present
#
# @opts: device type specific properties (legacy)
#
# Since: 1.2
##
{ 'struct': 'NetLegacy',
'data': {
'*id': 'str',
'*name': 'str',
'opts': 'NetLegacyOptions' } }
##
# @NetLegacyOptionsType:
#
# Since: 1.2
##
{ 'enum': 'NetLegacyOptionsType',
'data': ['none', 'nic', 'user', 'tap', 'l2tpv3', 'socket', 'vde',
'bridge', 'netmap', 'vhost-user'] }
##
# @NetLegacyOptions:
#
# Like Netdev, but for use only by the legacy command line options
#
# Since: 1.2
##
{ 'union': 'NetLegacyOptions',
'base': { 'type': 'NetLegacyOptionsType' },
'discriminator': 'type',
'data': {
'nic': 'NetLegacyNicOptions',
'user': 'NetdevUserOptions',
'tap': 'NetdevTapOptions',
'l2tpv3': 'NetdevL2TPv3Options',
'socket': 'NetdevSocketOptions',
'vde': 'NetdevVdeOptions',
'bridge': 'NetdevBridgeOptions',
'netmap': 'NetdevNetmapOptions',
'vhost-user': 'NetdevVhostUserOptions' } }
##
# @NetFilterDirection:
#