qdisk - hw/block/xen_disk: grant copy implementation

Copy data operated on during request from/to local buffers to/from
the grant references.

Before grant copy operation local buffers must be allocated what is
done by calling ioreq_init_copy_buffers. For the 'read' operation,
first, the qemu device invokes the read operation on local buffers
and on the completion grant copy is called and buffers are freed.
For the 'write' operation grant copy is performed before invoking
write by qemu device.

A new value 'feature_grant_copy' is added to recognize when the
grant copy operation is supported by a guest.

Signed-off-by: Paulina Szubarczyk <paulinaszubarczyk@gmail.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Acked-by: Anthony PERARD <anthony.perard@citrix.com>
Acked-by: Roger Pau Monné <roger.pau@citrix.com>
This commit is contained in:
Paulina Szubarczyk 2016-09-14 21:10:03 +02:00 committed by Stefano Stabellini
parent 25930ed60a
commit b6eb9b45f7
3 changed files with 217 additions and 5 deletions

55
configure vendored
View File

@ -1952,6 +1952,61 @@ EOF
# Xen unstable
elif
cat > $TMPC <<EOF &&
/*
* If we have stable libs the we don't want the libxc compat
* layers, regardless of what CFLAGS we may have been given.
*
* Also, check if xengnttab_grant_copy_segment_t is defined and
* grant copy operation is implemented.
*/
#undef XC_WANT_COMPAT_EVTCHN_API
#undef XC_WANT_COMPAT_GNTTAB_API
#undef XC_WANT_COMPAT_MAP_FOREIGN_API
#include <xenctrl.h>
#include <xenstore.h>
#include <xenevtchn.h>
#include <xengnttab.h>
#include <xenforeignmemory.h>
#include <stdint.h>
#include <xen/hvm/hvm_info_table.h>
#if !defined(HVM_MAX_VCPUS)
# error HVM_MAX_VCPUS not defined
#endif
int main(void) {
xc_interface *xc = NULL;
xenforeignmemory_handle *xfmem;
xenevtchn_handle *xe;
xengnttab_handle *xg;
xen_domain_handle_t handle;
xengnttab_grant_copy_segment_t* seg = NULL;
xs_daemon_open();
xc = xc_interface_open(0, 0, 0);
xc_hvm_set_mem_type(0, 0, HVMMEM_ram_ro, 0, 0);
xc_domain_add_to_physmap(0, 0, XENMAPSPACE_gmfn, 0, 0);
xc_hvm_inject_msi(xc, 0, 0xf0000000, 0x00000000);
xc_hvm_create_ioreq_server(xc, 0, HVM_IOREQSRV_BUFIOREQ_ATOMIC, NULL);
xc_domain_create(xc, 0, handle, 0, NULL, NULL);
xfmem = xenforeignmemory_open(0, 0);
xenforeignmemory_map(xfmem, 0, 0, 0, 0, 0);
xe = xenevtchn_open(0, 0);
xenevtchn_fd(xe);
xg = xengnttab_open(0, 0);
xengnttab_grant_copy(xg, 0, seg);
return 0;
}
EOF
compile_prog "" "$xen_libs $xen_stable_libs"
then
xen_ctrl_version=480
xen=yes
elif
cat > $TMPC <<EOF &&
/*
* If we have stable libs the we don't want the libxc compat
* layers, regardless of what CFLAGS we may have been given.

View File

@ -119,6 +119,9 @@ struct XenBlkDev {
unsigned int persistent_gnt_count;
unsigned int max_grants;
/* Grant copy */
gboolean feature_grant_copy;
/* qemu block driver */
DriveInfo *dinfo;
BlockBackend *blk;
@ -489,6 +492,106 @@ static int ioreq_map(struct ioreq *ioreq)
return 0;
}
#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 480
static void ioreq_free_copy_buffers(struct ioreq *ioreq)
{
int i;
for (i = 0; i < ioreq->v.niov; i++) {
ioreq->page[i] = NULL;
}
qemu_vfree(ioreq->pages);
}
static int ioreq_init_copy_buffers(struct ioreq *ioreq)
{
int i;
if (ioreq->v.niov == 0) {
return 0;
}
ioreq->pages = qemu_memalign(XC_PAGE_SIZE, ioreq->v.niov * XC_PAGE_SIZE);
for (i = 0; i < ioreq->v.niov; i++) {
ioreq->page[i] = ioreq->pages + i * XC_PAGE_SIZE;
ioreq->v.iov[i].iov_base = ioreq->page[i];
}
return 0;
}
static int ioreq_grant_copy(struct ioreq *ioreq)
{
xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
xengnttab_grant_copy_segment_t segs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
int i, count, rc;
int64_t file_blk = ioreq->blkdev->file_blk;
if (ioreq->v.niov == 0) {
return 0;
}
count = ioreq->v.niov;
for (i = 0; i < count; i++) {
if (ioreq->req.operation == BLKIF_OP_READ) {
segs[i].flags = GNTCOPY_dest_gref;
segs[i].dest.foreign.ref = ioreq->refs[i];
segs[i].dest.foreign.domid = ioreq->domids[i];
segs[i].dest.foreign.offset = ioreq->req.seg[i].first_sect * file_blk;
segs[i].source.virt = ioreq->v.iov[i].iov_base;
} else {
segs[i].flags = GNTCOPY_source_gref;
segs[i].source.foreign.ref = ioreq->refs[i];
segs[i].source.foreign.domid = ioreq->domids[i];
segs[i].source.foreign.offset = ioreq->req.seg[i].first_sect * file_blk;
segs[i].dest.virt = ioreq->v.iov[i].iov_base;
}
segs[i].len = (ioreq->req.seg[i].last_sect
- ioreq->req.seg[i].first_sect + 1) * file_blk;
}
rc = xengnttab_grant_copy(gnt, count, segs);
if (rc) {
xen_be_printf(&ioreq->blkdev->xendev, 0,
"failed to copy data %d\n", rc);
ioreq->aio_errors++;
return -1;
}
for (i = 0; i < count; i++) {
if (segs[i].status != GNTST_okay) {
xen_be_printf(&ioreq->blkdev->xendev, 3,
"failed to copy data %d for gref %d, domid %d\n",
segs[i].status, ioreq->refs[i], ioreq->domids[i]);
ioreq->aio_errors++;
rc = -1;
}
}
return rc;
}
#else
static void ioreq_free_copy_buffers(struct ioreq *ioreq)
{
abort();
}
static int ioreq_init_copy_buffers(struct ioreq *ioreq)
{
abort();
}
static int ioreq_grant_copy(struct ioreq *ioreq)
{
abort();
}
#endif
static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
static void qemu_aio_complete(void *opaque, int ret)
@ -511,8 +614,31 @@ static void qemu_aio_complete(void *opaque, int ret)
return;
}
if (ioreq->blkdev->feature_grant_copy) {
switch (ioreq->req.operation) {
case BLKIF_OP_READ:
/* in case of failure ioreq->aio_errors is increased */
if (ret == 0) {
ioreq_grant_copy(ioreq);
}
ioreq_free_copy_buffers(ioreq);
break;
case BLKIF_OP_WRITE:
case BLKIF_OP_FLUSH_DISKCACHE:
if (!ioreq->req.nr_segments) {
break;
}
ioreq_free_copy_buffers(ioreq);
break;
default:
break;
}
}
ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
if (!ioreq->blkdev->feature_grant_copy) {
ioreq_unmap(ioreq);
}
ioreq_finish(ioreq);
switch (ioreq->req.operation) {
case BLKIF_OP_WRITE:
@ -538,8 +664,18 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
{
struct XenBlkDev *blkdev = ioreq->blkdev;
if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
goto err_no_map;
if (ioreq->blkdev->feature_grant_copy) {
ioreq_init_copy_buffers(ioreq);
if (ioreq->req.nr_segments && (ioreq->req.operation == BLKIF_OP_WRITE ||
ioreq->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
ioreq_grant_copy(ioreq)) {
ioreq_free_copy_buffers(ioreq);
goto err;
}
} else {
if (ioreq->req.nr_segments && ioreq_map(ioreq)) {
goto err;
}
}
ioreq->aio_inflight++;
@ -582,6 +718,9 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
}
default:
/* unknown operation (shouldn't happen -- parse catches this) */
if (!ioreq->blkdev->feature_grant_copy) {
ioreq_unmap(ioreq);
}
goto err;
}
@ -590,8 +729,6 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
return 0;
err:
ioreq_unmap(ioreq);
err_no_map:
ioreq_finish(ioreq);
ioreq->status = BLKIF_RSP_ERROR;
return -1;
@ -1034,6 +1171,12 @@ static int blk_connect(struct XenDevice *xendev)
xen_be_bind_evtchn(&blkdev->xendev);
blkdev->feature_grant_copy =
(xengnttab_grant_copy(blkdev->xendev.gnttabdev, 0, NULL) == 0);
xen_be_printf(&blkdev->xendev, 3, "grant copy operation %s\n",
blkdev->feature_grant_copy ? "enabled" : "disabled");
xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
"remote port %d, local port %d\n",
blkdev->xendev.protocol, blkdev->ring_ref,

View File

@ -424,4 +424,18 @@ static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
#endif
#endif
/* Xen before 4.8 */
#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 480
typedef void *xengnttab_grant_copy_segment_t;
static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count,
xengnttab_grant_copy_segment_t *segs)
{
return -ENOSYS;
}
#endif
#endif /* QEMU_HW_XEN_COMMON_H */