hw/xen: Implement EVTCHNOP_unmask

This finally comes with a mechanism for actually injecting events into
the guest vCPU, with all the atomic-test-and-set that's involved in
setting the bit in the shinfo, then the index in the vcpu_info, and
injecting either the lapic vector as MSI, or letting KVM inject the
bare vector.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Reviewed-by: Paul Durrant <paul@xen.org>
This commit is contained in:
David Woodhouse 2022-12-13 17:20:46 +00:00
parent 83eb581134
commit 190cc3c0ed
3 changed files with 189 additions and 0 deletions

View File

@ -224,6 +224,13 @@ int xen_evtchn_set_callback_param(uint64_t param)
return ret; return ret;
} }
static void inject_callback(XenEvtchnState *s, uint32_t vcpu)
{
int type = s->callback_param >> CALLBACK_VIA_TYPE_SHIFT;
kvm_xen_inject_vcpu_callback_vector(vcpu, type);
}
static bool valid_port(evtchn_port_t port) static bool valid_port(evtchn_port_t port)
{ {
if (!port) { if (!port) {
@ -294,6 +301,152 @@ int xen_evtchn_status_op(struct evtchn_status *status)
return 0; return 0;
} }
/*
* Never thought I'd hear myself say this, but C++ templates would be
* kind of nice here.
*
* template<class T> static int do_unmask_port(T *shinfo, ...);
*/
static int do_unmask_port_lm(XenEvtchnState *s, evtchn_port_t port,
bool do_unmask, struct shared_info *shinfo,
struct vcpu_info *vcpu_info)
{
const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
typeof(shinfo->evtchn_pending[0]) mask;
int idx = port / bits_per_word;
int offset = port % bits_per_word;
mask = 1UL << offset;
if (idx >= bits_per_word) {
return -EINVAL;
}
if (do_unmask) {
/*
* If this is a true unmask operation, clear the mask bit. If
* it was already unmasked, we have nothing further to do.
*/
if (!((qatomic_fetch_and(&shinfo->evtchn_mask[idx], ~mask) & mask))) {
return 0;
}
} else {
/*
* This is a pseudo-unmask for affinity changes. We don't
* change the mask bit, and if it's *masked* we have nothing
* else to do.
*/
if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
return 0;
}
}
/* If the event was not pending, we're done. */
if (!(qatomic_fetch_or(&shinfo->evtchn_pending[idx], 0) & mask)) {
return 0;
}
/* Now on to the vcpu_info evtchn_pending_sel index... */
mask = 1UL << idx;
/* If a port in this word was already pending for this vCPU, all done. */
if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
return 0;
}
/* Set evtchn_upcall_pending for this vCPU */
if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
return 0;
}
inject_callback(s, s->port_table[port].vcpu);
return 0;
}
static int do_unmask_port_compat(XenEvtchnState *s, evtchn_port_t port,
bool do_unmask,
struct compat_shared_info *shinfo,
struct compat_vcpu_info *vcpu_info)
{
const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
typeof(shinfo->evtchn_pending[0]) mask;
int idx = port / bits_per_word;
int offset = port % bits_per_word;
mask = 1UL << offset;
if (idx >= bits_per_word) {
return -EINVAL;
}
if (do_unmask) {
/*
* If this is a true unmask operation, clear the mask bit. If
* it was already unmasked, we have nothing further to do.
*/
if (!((qatomic_fetch_and(&shinfo->evtchn_mask[idx], ~mask) & mask))) {
return 0;
}
} else {
/*
* This is a pseudo-unmask for affinity changes. We don't
* change the mask bit, and if it's *masked* we have nothing
* else to do.
*/
if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
return 0;
}
}
/* If the event was not pending, we're done. */
if (!(qatomic_fetch_or(&shinfo->evtchn_pending[idx], 0) & mask)) {
return 0;
}
/* Now on to the vcpu_info evtchn_pending_sel index... */
mask = 1UL << idx;
/* If a port in this word was already pending for this vCPU, all done. */
if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
return 0;
}
/* Set evtchn_upcall_pending for this vCPU */
if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
return 0;
}
inject_callback(s, s->port_table[port].vcpu);
return 0;
}
static int unmask_port(XenEvtchnState *s, evtchn_port_t port, bool do_unmask)
{
void *vcpu_info, *shinfo;
if (s->port_table[port].type == EVTCHNSTAT_closed) {
return -EINVAL;
}
shinfo = xen_overlay_get_shinfo_ptr();
if (!shinfo) {
return -ENOTSUP;
}
vcpu_info = kvm_xen_get_vcpu_info_hva(s->port_table[port].vcpu);
if (!vcpu_info) {
return -EINVAL;
}
if (xen_is_long_mode()) {
return do_unmask_port_lm(s, port, do_unmask, shinfo, vcpu_info);
} else {
return do_unmask_port_compat(s, port, do_unmask, shinfo, vcpu_info);
}
}
static int clear_port_pending(XenEvtchnState *s, evtchn_port_t port) static int clear_port_pending(XenEvtchnState *s, evtchn_port_t port)
{ {
void *p = xen_overlay_get_shinfo_ptr(); void *p = xen_overlay_get_shinfo_ptr();
@ -380,3 +533,25 @@ int xen_evtchn_close_op(struct evtchn_close *close)
return ret; return ret;
} }
int xen_evtchn_unmask_op(struct evtchn_unmask *unmask)
{
XenEvtchnState *s = xen_evtchn_singleton;
int ret;
if (!s) {
return -ENOTSUP;
}
if (!valid_port(unmask->port)) {
return -EINVAL;
}
qemu_mutex_lock(&s->port_lock);
ret = unmask_port(s, unmask->port, true);
qemu_mutex_unlock(&s->port_lock);
return ret;
}

View File

@ -17,7 +17,9 @@ int xen_evtchn_set_callback_param(uint64_t param);
struct evtchn_status; struct evtchn_status;
struct evtchn_close; struct evtchn_close;
struct evtchn_unmask;
int xen_evtchn_status_op(struct evtchn_status *status); int xen_evtchn_status_op(struct evtchn_status *status);
int xen_evtchn_close_op(struct evtchn_close *close); int xen_evtchn_close_op(struct evtchn_close *close);
int xen_evtchn_unmask_op(struct evtchn_unmask *unmask);
#endif /* QEMU_XEN_EVTCHN_H */ #endif /* QEMU_XEN_EVTCHN_H */

View File

@ -817,6 +817,18 @@ static bool kvm_xen_hcall_evtchn_op(struct kvm_xen_exit *exit, X86CPU *cpu,
err = xen_evtchn_close_op(&close); err = xen_evtchn_close_op(&close);
break; break;
} }
case EVTCHNOP_unmask: {
struct evtchn_unmask unmask;
qemu_build_assert(sizeof(unmask) == 4);
if (kvm_copy_from_gva(cs, arg, &unmask, sizeof(unmask))) {
err = -EFAULT;
break;
}
err = xen_evtchn_unmask_op(&unmask);
break;
}
default: default:
return false; return false;
} }