net: netmap: small improvements netmap_send()

This change improves the handling of incomplete multi-slot packets
(e.g. with the NS_MOREFRAG set), by advancing ring->head only on
complete packets. The ring->cur pointer is advanced in any case in
order to acknowledge the kernel and move the wake-up point (thus
avoiding repeated wake-ups).
Also don't be verbose when incomplete packets are found.

Signed-off-by: Vincenzo Maffione <v.maffione@gmail.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
This commit is contained in:
Vincenzo Maffione 2018-12-06 17:59:05 +01:00 committed by Jason Wang
parent 6d3aaa5b25
commit cc599ed6d4
1 changed files with 19 additions and 12 deletions

View File

@ -272,39 +272,46 @@ static void netmap_send(void *opaque)
{
NetmapState *s = opaque;
struct netmap_ring *ring = s->rx;
unsigned int tail = ring->tail;
/* Keep sending while there are available packets into the netmap
/* Keep sending while there are available slots in the netmap
RX ring and the forwarding path towards the peer is open. */
while (!nm_ring_empty(ring)) {
uint32_t i;
while (ring->head != tail) {
uint32_t i = ring->head;
uint32_t idx;
bool morefrag;
int iovcnt = 0;
int iovsize;
/* Get a (possibly multi-slot) packet. */
do {
i = ring->cur;
idx = ring->slot[i].buf_idx;
morefrag = (ring->slot[i].flags & NS_MOREFRAG);
s->iov[iovcnt].iov_base = (u_char *)NETMAP_BUF(ring, idx);
s->iov[iovcnt].iov_base = (void *)NETMAP_BUF(ring, idx);
s->iov[iovcnt].iov_len = ring->slot[i].len;
iovcnt++;
i = nm_ring_next(ring, i);
} while (i != tail && morefrag);
ring->cur = ring->head = nm_ring_next(ring, i);
} while (!nm_ring_empty(ring) && morefrag);
/* Advance ring->cur to tell the kernel that we have seen the slots. */
ring->cur = i;
if (unlikely(nm_ring_empty(ring) && morefrag)) {
RD(5, "[netmap_send] ran out of slots, with a pending"
"incomplete packet\n");
if (unlikely(morefrag)) {
/* This is a truncated packet, so we can stop without releasing the
* incomplete slots by updating ring->head. We will hopefully
* re-read the complete packet the next time we are called. */
break;
}
iovsize = qemu_sendv_packet_async(&s->nc, s->iov, iovcnt,
netmap_send_completed);
/* Release the slots to the kernel. */
ring->head = i;
if (iovsize == 0) {
/* The peer does not receive anymore. Packet is queued, stop
* reading from the backend until netmap_send_completed()
*/
* reading from the backend until netmap_send_completed(). */
netmap_read_poll(s, false);
break;
}