hw/xen: Use XEN_PAGE_SIZE in PV backend drivers

XC_PAGE_SIZE comes from the actual Xen libraries, while XEN_PAGE_SIZE is
provided by QEMU itself in xen_backend_ops.h. For backends which may be
built for emulation mode, use the latter.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Reviewed-by: Paul Durrant <paul@xen.org>
This commit is contained in:
David Woodhouse 2023-01-07 16:47:43 +00:00
parent 7a8a749da7
commit a9ae1418b3
4 changed files with 20 additions and 20 deletions

View File

@ -101,9 +101,9 @@ static XenBlockRequest *xen_block_start_request(XenBlockDataPlane *dataplane)
* re-use requests, allocate the memory once here. It will be freed * re-use requests, allocate the memory once here. It will be freed
* xen_block_dataplane_destroy() when the request list is freed. * xen_block_dataplane_destroy() when the request list is freed.
*/ */
request->buf = qemu_memalign(XC_PAGE_SIZE, request->buf = qemu_memalign(XEN_PAGE_SIZE,
BLKIF_MAX_SEGMENTS_PER_REQUEST * BLKIF_MAX_SEGMENTS_PER_REQUEST *
XC_PAGE_SIZE); XEN_PAGE_SIZE);
dataplane->requests_total++; dataplane->requests_total++;
qemu_iovec_init(&request->v, 1); qemu_iovec_init(&request->v, 1);
} else { } else {
@ -185,7 +185,7 @@ static int xen_block_parse_request(XenBlockRequest *request)
goto err; goto err;
} }
if (request->req.seg[i].last_sect * dataplane->sector_size >= if (request->req.seg[i].last_sect * dataplane->sector_size >=
XC_PAGE_SIZE) { XEN_PAGE_SIZE) {
error_report("error: page crossing"); error_report("error: page crossing");
goto err; goto err;
} }
@ -740,7 +740,7 @@ void xen_block_dataplane_start(XenBlockDataPlane *dataplane,
dataplane->protocol = protocol; dataplane->protocol = protocol;
ring_size = XC_PAGE_SIZE * dataplane->nr_ring_ref; ring_size = XEN_PAGE_SIZE * dataplane->nr_ring_ref;
switch (dataplane->protocol) { switch (dataplane->protocol) {
case BLKIF_PROTOCOL_NATIVE: case BLKIF_PROTOCOL_NATIVE:
{ {

View File

@ -489,13 +489,13 @@ static int xenfb_map_fb(struct XenFB *xenfb)
} }
if (xenfb->pixels) { if (xenfb->pixels) {
munmap(xenfb->pixels, xenfb->fbpages * XC_PAGE_SIZE); munmap(xenfb->pixels, xenfb->fbpages * XEN_PAGE_SIZE);
xenfb->pixels = NULL; xenfb->pixels = NULL;
} }
xenfb->fbpages = DIV_ROUND_UP(xenfb->fb_len, XC_PAGE_SIZE); xenfb->fbpages = DIV_ROUND_UP(xenfb->fb_len, XEN_PAGE_SIZE);
n_fbdirs = xenfb->fbpages * mode / 8; n_fbdirs = xenfb->fbpages * mode / 8;
n_fbdirs = DIV_ROUND_UP(n_fbdirs, XC_PAGE_SIZE); n_fbdirs = DIV_ROUND_UP(n_fbdirs, XEN_PAGE_SIZE);
pgmfns = g_new0(xen_pfn_t, n_fbdirs); pgmfns = g_new0(xen_pfn_t, n_fbdirs);
fbmfns = g_new0(xen_pfn_t, xenfb->fbpages); fbmfns = g_new0(xen_pfn_t, xenfb->fbpages);
@ -528,8 +528,8 @@ static int xenfb_configure_fb(struct XenFB *xenfb, size_t fb_len_lim,
{ {
size_t mfn_sz = sizeof_field(struct xenfb_page, pd[0]); size_t mfn_sz = sizeof_field(struct xenfb_page, pd[0]);
size_t pd_len = sizeof_field(struct xenfb_page, pd) / mfn_sz; size_t pd_len = sizeof_field(struct xenfb_page, pd) / mfn_sz;
size_t fb_pages = pd_len * XC_PAGE_SIZE / mfn_sz; size_t fb_pages = pd_len * XEN_PAGE_SIZE / mfn_sz;
size_t fb_len_max = fb_pages * XC_PAGE_SIZE; size_t fb_len_max = fb_pages * XEN_PAGE_SIZE;
int max_width, max_height; int max_width, max_height;
if (fb_len_lim > fb_len_max) { if (fb_len_lim > fb_len_max) {
@ -930,7 +930,7 @@ static void fb_disconnect(struct XenLegacyDevice *xendev)
* instead. This releases the guest pages and keeps qemu happy. * instead. This releases the guest pages and keeps qemu happy.
*/ */
qemu_xen_foreignmem_unmap(fb->pixels, fb->fbpages); qemu_xen_foreignmem_unmap(fb->pixels, fb->fbpages);
fb->pixels = mmap(fb->pixels, fb->fbpages * XC_PAGE_SIZE, fb->pixels = mmap(fb->pixels, fb->fbpages * XEN_PAGE_SIZE,
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON,
-1, 0); -1, 0);
if (fb->pixels == MAP_FAILED) { if (fb->pixels == MAP_FAILED) {

View File

@ -145,7 +145,7 @@ static void net_tx_packets(struct XenNetDev *netdev)
continue; continue;
} }
if ((txreq.offset + txreq.size) > XC_PAGE_SIZE) { if ((txreq.offset + txreq.size) > XEN_PAGE_SIZE) {
xen_pv_printf(&netdev->xendev, 0, "error: page crossing\n"); xen_pv_printf(&netdev->xendev, 0, "error: page crossing\n");
net_tx_error(netdev, &txreq, rc); net_tx_error(netdev, &txreq, rc);
continue; continue;
@ -171,7 +171,7 @@ static void net_tx_packets(struct XenNetDev *netdev)
if (txreq.flags & NETTXF_csum_blank) { if (txreq.flags & NETTXF_csum_blank) {
/* have read-only mapping -> can't fill checksum in-place */ /* have read-only mapping -> can't fill checksum in-place */
if (!tmpbuf) { if (!tmpbuf) {
tmpbuf = g_malloc(XC_PAGE_SIZE); tmpbuf = g_malloc(XEN_PAGE_SIZE);
} }
memcpy(tmpbuf, page + txreq.offset, txreq.size); memcpy(tmpbuf, page + txreq.offset, txreq.size);
net_checksum_calculate(tmpbuf, txreq.size, CSUM_ALL); net_checksum_calculate(tmpbuf, txreq.size, CSUM_ALL);
@ -243,9 +243,9 @@ static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size
if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) { if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) {
return 0; return 0;
} }
if (size > XC_PAGE_SIZE - NET_IP_ALIGN) { if (size > XEN_PAGE_SIZE - NET_IP_ALIGN) {
xen_pv_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)", xen_pv_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)",
(unsigned long)size, XC_PAGE_SIZE - NET_IP_ALIGN); (unsigned long)size, XEN_PAGE_SIZE - NET_IP_ALIGN);
return -1; return -1;
} }
@ -348,8 +348,8 @@ static int net_connect(struct XenLegacyDevice *xendev)
netdev->txs = NULL; netdev->txs = NULL;
return -1; return -1;
} }
BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XC_PAGE_SIZE); BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XEN_PAGE_SIZE);
BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XC_PAGE_SIZE); BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XEN_PAGE_SIZE);
xen_be_bind_evtchn(&netdev->xendev); xen_be_bind_evtchn(&netdev->xendev);

View File

@ -161,7 +161,7 @@ static int usbback_gnttab_map(struct usbback_req *usbback_req)
for (i = 0; i < nr_segs; i++) { for (i = 0; i < nr_segs; i++) {
if ((unsigned)usbback_req->req.seg[i].offset + if ((unsigned)usbback_req->req.seg[i].offset +
(unsigned)usbback_req->req.seg[i].length > XC_PAGE_SIZE) { (unsigned)usbback_req->req.seg[i].length > XEN_PAGE_SIZE) {
xen_pv_printf(xendev, 0, "segment crosses page boundary\n"); xen_pv_printf(xendev, 0, "segment crosses page boundary\n");
return -EINVAL; return -EINVAL;
} }
@ -185,7 +185,7 @@ static int usbback_gnttab_map(struct usbback_req *usbback_req)
for (i = 0; i < usbback_req->nr_buffer_segs; i++) { for (i = 0; i < usbback_req->nr_buffer_segs; i++) {
seg = usbback_req->req.seg + i; seg = usbback_req->req.seg + i;
addr = usbback_req->buffer + i * XC_PAGE_SIZE + seg->offset; addr = usbback_req->buffer + i * XEN_PAGE_SIZE + seg->offset;
qemu_iovec_add(&usbback_req->packet.iov, addr, seg->length); qemu_iovec_add(&usbback_req->packet.iov, addr, seg->length);
} }
} }
@ -902,8 +902,8 @@ static int usbback_connect(struct XenLegacyDevice *xendev)
usbif->conn_ring_ref = conn_ring_ref; usbif->conn_ring_ref = conn_ring_ref;
urb_sring = usbif->urb_sring; urb_sring = usbif->urb_sring;
conn_sring = usbif->conn_sring; conn_sring = usbif->conn_sring;
BACK_RING_INIT(&usbif->urb_ring, urb_sring, XC_PAGE_SIZE); BACK_RING_INIT(&usbif->urb_ring, urb_sring, XEN_PAGE_SIZE);
BACK_RING_INIT(&usbif->conn_ring, conn_sring, XC_PAGE_SIZE); BACK_RING_INIT(&usbif->conn_ring, conn_sring, XEN_PAGE_SIZE);
xen_be_bind_evtchn(xendev); xen_be_bind_evtchn(xendev);