vfio-ccw: Wire up the CRW irq and CRW region

Use the IRQ to notify userspace that there is a CRW
pending in the region, related to path-availability
changes on the passthrough subchannel.

Signed-off-by: Farhan Ali <alifm@linux.ibm.com>
Signed-off-by: Eric Farman <farman@linux.ibm.com>
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Message-Id: <20200505122745.53208-8-farman@linux.ibm.com>
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
This commit is contained in:
Farhan Ali 2020-05-05 14:27:44 +02:00 committed by Cornelia Huck
parent d8cac29b1d
commit 3f02cb2fd9
3 changed files with 74 additions and 0 deletions

View File

@ -82,14 +82,24 @@ static ssize_t vfio_ccw_crw_region_read(struct vfio_ccw_private *private,
unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
struct ccw_crw_region *region;
struct vfio_ccw_crw *crw;
int ret;
if (pos + count > sizeof(*region))
return -EINVAL;
crw = list_first_entry_or_null(&private->crw,
struct vfio_ccw_crw, next);
if (crw)
list_del(&crw->next);
mutex_lock(&private->io_mutex);
region = private->region[i].data;
if (crw)
memcpy(&region->crw, &crw->crw, sizeof(region->crw));
if (copy_to_user(buf, (void *)region + pos, count))
ret = -EFAULT;
else
@ -98,6 +108,13 @@ static ssize_t vfio_ccw_crw_region_read(struct vfio_ccw_private *private,
region->crw = 0;
mutex_unlock(&private->io_mutex);
kfree(crw);
/* Notify the guest if more CRWs are on our queue */
if (!list_empty(&private->crw) && private->crw_trigger)
eventfd_signal(private->crw_trigger, 1);
return ret;
}

View File

@ -108,6 +108,16 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
eventfd_signal(private->io_trigger, 1);
}
static void vfio_ccw_crw_todo(struct work_struct *work)
{
struct vfio_ccw_private *private;
private = container_of(work, struct vfio_ccw_private, crw_work);
if (!list_empty(&private->crw) && private->crw_trigger)
eventfd_signal(private->crw_trigger, 1);
}
/*
* Css driver callbacks
*/
@ -186,7 +196,9 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
if (ret)
goto out_free;
INIT_LIST_HEAD(&private->crw);
INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
atomic_set(&private->avail, 1);
private->state = VFIO_CCW_STATE_STANDBY;
@ -217,9 +229,15 @@ out_free:
static int vfio_ccw_sch_remove(struct subchannel *sch)
{
struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
struct vfio_ccw_crw *crw, *temp;
vfio_ccw_sch_quiesce(sch);
list_for_each_entry_safe(crw, temp, &private->crw, next) {
list_del(&crw->next);
kfree(crw);
}
vfio_ccw_mdev_unreg(sch);
dev_set_drvdata(&sch->dev, NULL);
@ -281,6 +299,33 @@ out_unlock:
return rc;
}
static void vfio_ccw_queue_crw(struct vfio_ccw_private *private,
unsigned int rsc,
unsigned int erc,
unsigned int rsid)
{
struct vfio_ccw_crw *crw;
/*
* If unable to allocate a CRW, just drop the event and
* carry on. The guest will either see a later one or
* learn when it issues its own store subchannel.
*/
crw = kzalloc(sizeof(*crw), GFP_ATOMIC);
if (!crw)
return;
/*
* Build the CRW based on the inputs given to us.
*/
crw->crw.rsc = rsc;
crw->crw.erc = erc;
crw->crw.rsid = rsid;
list_add_tail(&crw->next, &private->crw);
queue_work(vfio_ccw_work_q, &private->crw_work);
}
static int vfio_ccw_chp_event(struct subchannel *sch,
struct chp_link *link, int event)
{
@ -311,6 +356,8 @@ static int vfio_ccw_chp_event(struct subchannel *sch,
/* Path is gone */
if (sch->schib.pmcw.lpum & mask)
cio_cancel_halt_clear(sch, &retry);
vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_PERRN,
link->chpid.id);
break;
case CHP_VARY_ON:
/* Path logically turned on */
@ -320,6 +367,8 @@ static int vfio_ccw_chp_event(struct subchannel *sch,
case CHP_ONLINE:
/* Path became available */
sch->lpm |= mask & sch->opm;
vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_INIT,
link->chpid.id);
break;
}

View File

@ -17,6 +17,7 @@
#include <linux/eventfd.h>
#include <linux/workqueue.h>
#include <linux/vfio_ccw.h>
#include <asm/crw.h>
#include <asm/debug.h>
#include "css.h"
@ -59,6 +60,11 @@ int vfio_ccw_register_async_dev_regions(struct vfio_ccw_private *private);
int vfio_ccw_register_schib_dev_regions(struct vfio_ccw_private *private);
int vfio_ccw_register_crw_dev_regions(struct vfio_ccw_private *private);
struct vfio_ccw_crw {
struct list_head next;
struct crw crw;
};
/**
* struct vfio_ccw_private
* @sch: pointer to the subchannel
@ -98,10 +104,12 @@ struct vfio_ccw_private {
struct channel_program cp;
struct irb irb;
union scsw scsw;
struct list_head crw;
struct eventfd_ctx *io_trigger;
struct eventfd_ctx *crw_trigger;
struct work_struct io_work;
struct work_struct crw_work;
} __aligned(8);
extern int vfio_ccw_mdev_reg(struct subchannel *sch);