hw/net/net_tx_pkt: Decouple implementation from PCI

This is intended to be followed by another change for the interface.
It also fixes the leak of memory mapping when the specified memory is
partially mapped.

Fixes: e263cd49c7 ("Packet abstraction for VMWARE network devices")
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
This commit is contained in:
Akihiko Odaki 2023-05-23 11:42:52 +09:00 committed by Jason Wang
parent 8d689f6aae
commit 163246e1ce
2 changed files with 42 additions and 20 deletions

View File

@ -384,10 +384,9 @@ void net_tx_pkt_setup_vlan_header_ex(struct NetTxPkt *pkt,
}
}
bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa,
size_t len)
static bool net_tx_pkt_add_raw_fragment_common(struct NetTxPkt *pkt,
void *base, size_t len)
{
hwaddr mapped_len = 0;
struct iovec *ventry;
assert(pkt);
@ -395,23 +394,12 @@ bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa,
return false;
}
if (!len) {
return true;
}
ventry = &pkt->raw[pkt->raw_frags];
mapped_len = len;
ventry->iov_base = base;
ventry->iov_len = len;
pkt->raw_frags++;
ventry->iov_base = pci_dma_map(pkt->pci_dev, pa,
&mapped_len, DMA_DIRECTION_TO_DEVICE);
if ((ventry->iov_base != NULL) && (len == mapped_len)) {
ventry->iov_len = mapped_len;
pkt->raw_frags++;
return true;
} else {
return false;
}
return true;
}
bool net_tx_pkt_has_fragments(struct NetTxPkt *pkt)
@ -465,8 +453,9 @@ void net_tx_pkt_reset(struct NetTxPkt *pkt, PCIDevice *pci_dev)
assert(pkt->raw);
for (i = 0; i < pkt->raw_frags; i++) {
assert(pkt->raw[i].iov_base);
pci_dma_unmap(pkt->pci_dev, pkt->raw[i].iov_base,
pkt->raw[i].iov_len, DMA_DIRECTION_TO_DEVICE, 0);
net_tx_pkt_unmap_frag_pci(pkt->pci_dev,
pkt->raw[i].iov_base,
pkt->raw[i].iov_len);
}
}
pkt->pci_dev = pci_dev;
@ -476,6 +465,30 @@ void net_tx_pkt_reset(struct NetTxPkt *pkt, PCIDevice *pci_dev)
pkt->l4proto = 0;
}
void net_tx_pkt_unmap_frag_pci(void *context, void *base, size_t len)
{
pci_dma_unmap(context, base, len, DMA_DIRECTION_TO_DEVICE, 0);
}
bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa,
size_t len)
{
dma_addr_t mapped_len = len;
void *base = pci_dma_map(pkt->pci_dev, pa, &mapped_len,
DMA_DIRECTION_TO_DEVICE);
if (!base) {
return false;
}
if (mapped_len != len ||
!net_tx_pkt_add_raw_fragment_common(pkt, base, len)) {
net_tx_pkt_unmap_frag_pci(pkt->pci_dev, base, mapped_len);
return false;
}
return true;
}
static void net_tx_pkt_do_sw_csum(struct NetTxPkt *pkt,
struct iovec *iov, uint32_t iov_len,
uint16_t csl)

View File

@ -153,6 +153,15 @@ void net_tx_pkt_dump(struct NetTxPkt *pkt);
*/
void net_tx_pkt_reset(struct NetTxPkt *pkt, PCIDevice *dev);
/**
* Unmap a fragment mapped from a PCI device.
*
* @context: PCI device owning fragment
* @base: pointer to fragment
* @len: length of fragment
*/
void net_tx_pkt_unmap_frag_pci(void *context, void *base, size_t len);
/**
* Send packet to qemu. handles sw offloads if vhdr is not supported.
*