ivshmem: use EventNotifier and memory API

All of ivshmem's usage of eventfd now has a corresponding API in
EventNotifier.  Simplify the code by using it, and also use the
memory API consistently to set up and tear down the ioeventfds.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Paolo Bonzini 2012-07-05 17:16:25 +02:00 committed by Avi Kivity
parent e80c262be7
commit 563027cc0c
1 changed files with 35 additions and 28 deletions

View File

@ -23,6 +23,7 @@
#include "kvm.h"
#include "migration.h"
#include "qerror.h"
#include "event_notifier.h"
#include <sys/mman.h>
#include <sys/types.h>
@ -45,7 +46,7 @@
typedef struct Peer {
int nb_eventfds;
int *eventfds;
EventNotifier *eventfds;
} Peer;
typedef struct EventfdEntry {
@ -63,7 +64,6 @@ typedef struct IVShmemState {
CharDriverState *server_chr;
MemoryRegion ivshmem_mmio;
pcibus_t mmio_addr;
/* We might need to register the BAR before we actually have the memory.
* So prepare a container MemoryRegion for the BAR immediately and
* add a subregion when we have the memory.
@ -168,7 +168,6 @@ static void ivshmem_io_write(void *opaque, target_phys_addr_t addr,
{
IVShmemState *s = opaque;
uint64_t write_one = 1;
uint16_t dest = val >> 16;
uint16_t vector = val & 0xff;
@ -194,12 +193,8 @@ static void ivshmem_io_write(void *opaque, target_phys_addr_t addr,
/* check doorbell range */
if (vector < s->peers[dest].nb_eventfds) {
IVSHMEM_DPRINTF("Writing %" PRId64 " to VM %d on vector %d\n",
write_one, dest, vector);
if (write(s->peers[dest].eventfds[vector],
&(write_one), 8) != 8) {
IVSHMEM_DPRINTF("error writing to eventfd\n");
}
IVSHMEM_DPRINTF("Notifying VM %d on vector %d\n", dest, vector);
event_notifier_set(&s->peers[dest].eventfds[vector]);
}
break;
default:
@ -279,12 +274,13 @@ static void fake_irqfd(void *opaque, const uint8_t *buf, int size) {
msix_notify(pdev, entry->vector);
}
static CharDriverState* create_eventfd_chr_device(void * opaque, int eventfd,
int vector)
static CharDriverState* create_eventfd_chr_device(void * opaque, EventNotifier *n,
int vector)
{
/* create a event character device based on the passed eventfd */
IVShmemState *s = opaque;
CharDriverState * chr;
int eventfd = event_notifier_get_fd(n);
chr = qemu_chr_open_eventfd(eventfd);
@ -347,6 +343,26 @@ static void create_shared_memory_BAR(IVShmemState *s, int fd) {
pci_register_bar(&s->dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar);
}
static void ivshmem_add_eventfd(IVShmemState *s, int posn, int i)
{
memory_region_add_eventfd(&s->ivshmem_mmio,
DOORBELL,
4,
true,
(posn << 16) | i,
event_notifier_get_fd(&s->peers[posn].eventfds[i]));
}
static void ivshmem_del_eventfd(IVShmemState *s, int posn, int i)
{
memory_region_del_eventfd(&s->ivshmem_mmio,
DOORBELL,
4,
true,
(posn << 16) | i,
event_notifier_get_fd(&s->peers[posn].eventfds[i]));
}
static void close_guest_eventfds(IVShmemState *s, int posn)
{
int i, guest_curr_max;
@ -354,9 +370,8 @@ static void close_guest_eventfds(IVShmemState *s, int posn)
guest_curr_max = s->peers[posn].nb_eventfds;
for (i = 0; i < guest_curr_max; i++) {
kvm_set_ioeventfd_mmio(s->peers[posn].eventfds[i],
s->mmio_addr + DOORBELL, (posn << 16) | i, 0, 4);
close(s->peers[posn].eventfds[i]);
ivshmem_del_eventfd(s, posn, i);
event_notifier_cleanup(&s->peers[posn].eventfds[i]);
}
g_free(s->peers[posn].eventfds);
@ -369,12 +384,7 @@ static void setup_ioeventfds(IVShmemState *s) {
for (i = 0; i <= s->max_peer; i++) {
for (j = 0; j < s->peers[i].nb_eventfds; j++) {
memory_region_add_eventfd(&s->ivshmem_mmio,
DOORBELL,
4,
true,
(i << 16) | j,
s->peers[i].eventfds[j]);
ivshmem_add_eventfd(s, i, j);
}
}
}
@ -476,14 +486,14 @@ static void ivshmem_read(void *opaque, const uint8_t * buf, int flags)
if (guest_max_eventfd == 0) {
/* one eventfd per MSI vector */
s->peers[incoming_posn].eventfds = (int *) g_malloc(s->vectors *
sizeof(int));
s->peers[incoming_posn].eventfds = g_new(EventNotifier, s->vectors);
}
/* this is an eventfd for a particular guest VM */
IVSHMEM_DPRINTF("eventfds[%ld][%d] = %d\n", incoming_posn,
guest_max_eventfd, incoming_fd);
s->peers[incoming_posn].eventfds[guest_max_eventfd] = incoming_fd;
event_notifier_init_fd(&s->peers[incoming_posn].eventfds[guest_max_eventfd],
incoming_fd);
/* increment count for particular guest */
s->peers[incoming_posn].nb_eventfds++;
@ -495,15 +505,12 @@ static void ivshmem_read(void *opaque, const uint8_t * buf, int flags)
if (incoming_posn == s->vm_id) {
s->eventfd_chr[guest_max_eventfd] = create_eventfd_chr_device(s,
s->peers[s->vm_id].eventfds[guest_max_eventfd],
&s->peers[s->vm_id].eventfds[guest_max_eventfd],
guest_max_eventfd);
}
if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
if (kvm_set_ioeventfd_mmio(incoming_fd, s->mmio_addr + DOORBELL,
(incoming_posn << 16) | guest_max_eventfd, 1, 4) < 0) {
fprintf(stderr, "ivshmem: ioeventfd not available\n");
}
ivshmem_add_eventfd(s, incoming_posn, guest_max_eventfd);
}
return;