contrib: add ivshmem client and server

When using ivshmem devices, notifications between guests can be sent as
interrupts using a ivshmem-server (typical use described in documentation).
The client is provided as a debug tool.

Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
Signed-off-by: David Marchand <david.marchand@6wind.com>
[fix a valgrind warning, option and server_close() segvs, extra server
headers includes, getopt() return type, out-of-tree build, use qemu
event_notifier instead of eventfd, fix x86/osx warnings - Marc-André]
Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
This commit is contained in:
David Marchand 2014-09-08 11:17:48 +02:00 committed by Marc-André Lureau
parent 12f0b68c82
commit a75eb03b9f
12 changed files with 1759 additions and 3 deletions

View File

@ -151,6 +151,8 @@ dummy := $(call unnest-vars,, \
stub-obj-y \
util-obj-y \
qga-obj-y \
ivshmem-client-obj-y \
ivshmem-server-obj-y \
qga-vss-dll-obj-y \
block-obj-y \
block-obj-m \
@ -323,6 +325,11 @@ ifneq ($(EXESUF),)
qemu-ga: qemu-ga$(EXESUF) $(QGA_VSS_PROVIDER) $(QEMU_GA_MSI)
endif
ivshmem-client$(EXESUF): $(ivshmem-client-obj-y)
$(call LINK, $^)
ivshmem-server$(EXESUF): $(ivshmem-server-obj-y) libqemuutil.a libqemustub.a
$(call LINK, $^)
clean:
# avoid old build problems by removing potentially incorrect old files
rm -f config.mak op-i386.h opc-i386.h gen-op-i386.h op-arm.h opc-arm.h gen-op-arm.h

View File

@ -104,3 +104,8 @@ target-obj-y += trace/
# by libqemuutil.a. These should be moved to a separate .json schema.
qga-obj-y = qga/
qga-vss-dll-obj-y = qga/
######################################################################
# contrib
ivshmem-client-obj-y = contrib/ivshmem-client/
ivshmem-server-obj-y = contrib/ivshmem-server/

1
configure vendored
View File

@ -4508,6 +4508,7 @@ if test "$want_tools" = "yes" ; then
tools="qemu-img\$(EXESUF) qemu-io\$(EXESUF) $tools"
if [ "$linux" = "yes" -o "$bsd" = "yes" -o "$solaris" = "yes" ] ; then
tools="qemu-nbd\$(EXESUF) $tools"
tools="ivshmem-client\$(EXESUF) ivshmem-server\$(EXESUF) $tools"
fi
fi
if test "$softmmu" = yes ; then

View File

@ -0,0 +1 @@
ivshmem-client-obj-y = ivshmem-client.o main.o

View File

@ -0,0 +1,433 @@
/*
* Copyright 6WIND S.A., 2014
*
* This work is licensed under the terms of the GNU GPL, version 2 or
* (at your option) any later version. See the COPYING file in the
* top-level directory.
*/
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/un.h>
#include "qemu-common.h"
#include "qemu/queue.h"
#include "ivshmem-client.h"
/* log a message on stdout if verbose=1 */
#define IVSHMEM_CLIENT_DEBUG(client, fmt, ...) do { \
if ((client)->verbose) { \
printf(fmt, ## __VA_ARGS__); \
} \
} while (0)
/* read message from the unix socket */
static int
ivshmem_client_read_one_msg(IvshmemClient *client, long *index, int *fd)
{
int ret;
struct msghdr msg;
struct iovec iov[1];
union {
struct cmsghdr cmsg;
char control[CMSG_SPACE(sizeof(int))];
} msg_control;
struct cmsghdr *cmsg;
iov[0].iov_base = index;
iov[0].iov_len = sizeof(*index);
memset(&msg, 0, sizeof(msg));
msg.msg_iov = iov;
msg.msg_iovlen = 1;
msg.msg_control = &msg_control;
msg.msg_controllen = sizeof(msg_control);
ret = recvmsg(client->sock_fd, &msg, 0);
if (ret < 0) {
IVSHMEM_CLIENT_DEBUG(client, "cannot read message: %s\n",
strerror(errno));
return -1;
}
if (ret == 0) {
IVSHMEM_CLIENT_DEBUG(client, "lost connection to server\n");
return -1;
}
*fd = -1;
for (cmsg = CMSG_FIRSTHDR(&msg); cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)) ||
cmsg->cmsg_level != SOL_SOCKET ||
cmsg->cmsg_type != SCM_RIGHTS) {
continue;
}
memcpy(fd, CMSG_DATA(cmsg), sizeof(*fd));
}
return 0;
}
/* free a peer when the server advertises a disconnection or when the
* client is freed */
static void
ivshmem_client_free_peer(IvshmemClient *client, IvshmemClientPeer *peer)
{
unsigned vector;
QTAILQ_REMOVE(&client->peer_list, peer, next);
for (vector = 0; vector < peer->vectors_count; vector++) {
close(peer->vectors[vector]);
}
g_free(peer);
}
/* handle message coming from server (new peer, new vectors) */
static int
ivshmem_client_handle_server_msg(IvshmemClient *client)
{
IvshmemClientPeer *peer;
long peer_id;
int ret, fd;
ret = ivshmem_client_read_one_msg(client, &peer_id, &fd);
if (ret < 0) {
return -1;
}
/* can return a peer or the local client */
peer = ivshmem_client_search_peer(client, peer_id);
/* delete peer */
if (fd == -1) {
if (peer == NULL || peer == &client->local) {
IVSHMEM_CLIENT_DEBUG(client, "receive delete for invalid "
"peer %ld\n", peer_id);
return -1;
}
IVSHMEM_CLIENT_DEBUG(client, "delete peer id = %ld\n", peer_id);
ivshmem_client_free_peer(client, peer);
return 0;
}
/* new peer */
if (peer == NULL) {
peer = g_malloc0(sizeof(*peer));
peer->id = peer_id;
peer->vectors_count = 0;
QTAILQ_INSERT_TAIL(&client->peer_list, peer, next);
IVSHMEM_CLIENT_DEBUG(client, "new peer id = %ld\n", peer_id);
}
/* new vector */
IVSHMEM_CLIENT_DEBUG(client, " new vector %d (fd=%d) for peer id %ld\n",
peer->vectors_count, fd, peer->id);
peer->vectors[peer->vectors_count] = fd;
peer->vectors_count++;
return 0;
}
/* init a new ivshmem client */
int
ivshmem_client_init(IvshmemClient *client, const char *unix_sock_path,
IvshmemClientNotifCb notif_cb, void *notif_arg,
bool verbose)
{
int ret;
unsigned i;
memset(client, 0, sizeof(*client));
ret = snprintf(client->unix_sock_path, sizeof(client->unix_sock_path),
"%s", unix_sock_path);
if (ret < 0 || ret >= sizeof(client->unix_sock_path)) {
IVSHMEM_CLIENT_DEBUG(client, "could not copy unix socket path\n");
return -1;
}
for (i = 0; i < IVSHMEM_CLIENT_MAX_VECTORS; i++) {
client->local.vectors[i] = -1;
}
QTAILQ_INIT(&client->peer_list);
client->local.id = -1;
client->notif_cb = notif_cb;
client->notif_arg = notif_arg;
client->verbose = verbose;
client->shm_fd = -1;
client->sock_fd = -1;
return 0;
}
/* create and connect to the unix socket */
int
ivshmem_client_connect(IvshmemClient *client)
{
struct sockaddr_un sun;
int fd, ret;
long tmp;
IVSHMEM_CLIENT_DEBUG(client, "connect to client %s\n",
client->unix_sock_path);
client->sock_fd = socket(AF_UNIX, SOCK_STREAM, 0);
if (client->sock_fd < 0) {
IVSHMEM_CLIENT_DEBUG(client, "cannot create socket: %s\n",
strerror(errno));
return -1;
}
sun.sun_family = AF_UNIX;
ret = snprintf(sun.sun_path, sizeof(sun.sun_path), "%s",
client->unix_sock_path);
if (ret < 0 || ret >= sizeof(sun.sun_path)) {
IVSHMEM_CLIENT_DEBUG(client, "could not copy unix socket path\n");
goto err_close;
}
if (connect(client->sock_fd, (struct sockaddr *)&sun, sizeof(sun)) < 0) {
IVSHMEM_CLIENT_DEBUG(client, "cannot connect to %s: %s\n", sun.sun_path,
strerror(errno));
goto err_close;
}
/* first, we expect our index + a fd == -1 */
if (ivshmem_client_read_one_msg(client, &client->local.id, &fd) < 0 ||
client->local.id < 0 || fd != -1) {
IVSHMEM_CLIENT_DEBUG(client, "cannot read from server\n");
goto err_close;
}
IVSHMEM_CLIENT_DEBUG(client, "our_id=%ld\n", client->local.id);
/* now, we expect shared mem fd + a -1 index, note that shm fd
* is not used */
if (ivshmem_client_read_one_msg(client, &tmp, &fd) < 0 ||
tmp != -1 || fd < 0) {
if (fd >= 0) {
close(fd);
}
IVSHMEM_CLIENT_DEBUG(client, "cannot read from server (2)\n");
goto err_close;
}
client->shm_fd = fd;
IVSHMEM_CLIENT_DEBUG(client, "shm_fd=%d\n", fd);
return 0;
err_close:
close(client->sock_fd);
client->sock_fd = -1;
return -1;
}
/* close connection to the server, and free all peer structures */
void
ivshmem_client_close(IvshmemClient *client)
{
IvshmemClientPeer *peer;
unsigned i;
IVSHMEM_CLIENT_DEBUG(client, "close client\n");
while ((peer = QTAILQ_FIRST(&client->peer_list)) != NULL) {
ivshmem_client_free_peer(client, peer);
}
close(client->shm_fd);
client->shm_fd = -1;
close(client->sock_fd);
client->sock_fd = -1;
client->local.id = -1;
for (i = 0; i < IVSHMEM_CLIENT_MAX_VECTORS; i++) {
close(client->local.vectors[i]);
client->local.vectors[i] = -1;
}
client->local.vectors_count = 0;
}
/* get the fd_set according to the unix socket and peer list */
void
ivshmem_client_get_fds(const IvshmemClient *client, fd_set *fds, int *maxfd)
{
int fd;
unsigned vector;
FD_SET(client->sock_fd, fds);
if (client->sock_fd >= *maxfd) {
*maxfd = client->sock_fd + 1;
}
for (vector = 0; vector < client->local.vectors_count; vector++) {
fd = client->local.vectors[vector];
FD_SET(fd, fds);
if (fd >= *maxfd) {
*maxfd = fd + 1;
}
}
}
/* handle events from eventfd: just print a message on notification */
static int
ivshmem_client_handle_event(IvshmemClient *client, const fd_set *cur, int maxfd)
{
IvshmemClientPeer *peer;
uint64_t kick;
unsigned i;
int ret;
peer = &client->local;
for (i = 0; i < peer->vectors_count; i++) {
if (peer->vectors[i] >= maxfd || !FD_ISSET(peer->vectors[i], cur)) {
continue;
}
ret = read(peer->vectors[i], &kick, sizeof(kick));
if (ret < 0) {
return ret;
}
if (ret != sizeof(kick)) {
IVSHMEM_CLIENT_DEBUG(client, "invalid read size = %d\n", ret);
errno = EINVAL;
return -1;
}
IVSHMEM_CLIENT_DEBUG(client, "received event on fd %d vector %d: %"
PRIu64 "\n", peer->vectors[i], i, kick);
if (client->notif_cb != NULL) {
client->notif_cb(client, peer, i, client->notif_arg);
}
}
return 0;
}
/* read and handle new messages on the given fd_set */
int
ivshmem_client_handle_fds(IvshmemClient *client, fd_set *fds, int maxfd)
{
if (client->sock_fd < maxfd && FD_ISSET(client->sock_fd, fds) &&
ivshmem_client_handle_server_msg(client) < 0 && errno != EINTR) {
IVSHMEM_CLIENT_DEBUG(client, "ivshmem_client_handle_server_msg() "
"failed\n");
return -1;
} else if (ivshmem_client_handle_event(client, fds, maxfd) < 0 &&
errno != EINTR) {
IVSHMEM_CLIENT_DEBUG(client, "ivshmem_client_handle_event() failed\n");
return -1;
}
return 0;
}
/* send a notification on a vector of a peer */
int
ivshmem_client_notify(const IvshmemClient *client,
const IvshmemClientPeer *peer, unsigned vector)
{
uint64_t kick;
int fd;
if (vector >= peer->vectors_count) {
IVSHMEM_CLIENT_DEBUG(client, "invalid vector %u on peer %ld\n",
vector, peer->id);
return -1;
}
fd = peer->vectors[vector];
IVSHMEM_CLIENT_DEBUG(client, "notify peer %ld on vector %d, fd %d\n",
peer->id, vector, fd);
kick = 1;
if (write(fd, &kick, sizeof(kick)) != sizeof(kick)) {
fprintf(stderr, "could not write to %d: %s\n", peer->vectors[vector],
strerror(errno));
return -1;
}
return 0;
}
/* send a notification to all vectors of a peer */
int
ivshmem_client_notify_all_vects(const IvshmemClient *client,
const IvshmemClientPeer *peer)
{
unsigned vector;
int ret = 0;
for (vector = 0; vector < peer->vectors_count; vector++) {
if (ivshmem_client_notify(client, peer, vector) < 0) {
ret = -1;
}
}
return ret;
}
/* send a notification to all peers */
int
ivshmem_client_notify_broadcast(const IvshmemClient *client)
{
IvshmemClientPeer *peer;
int ret = 0;
QTAILQ_FOREACH(peer, &client->peer_list, next) {
if (ivshmem_client_notify_all_vects(client, peer) < 0) {
ret = -1;
}
}
return ret;
}
/* lookup peer from its id */
IvshmemClientPeer *
ivshmem_client_search_peer(IvshmemClient *client, long peer_id)
{
IvshmemClientPeer *peer;
if (peer_id == client->local.id) {
return &client->local;
}
QTAILQ_FOREACH(peer, &client->peer_list, next) {
if (peer->id == peer_id) {
return peer;
}
}
return NULL;
}
/* dump our info, the list of peers their vectors on stdout */
void
ivshmem_client_dump(const IvshmemClient *client)
{
const IvshmemClientPeer *peer;
unsigned vector;
/* dump local infos */
peer = &client->local;
printf("our_id = %ld\n", peer->id);
for (vector = 0; vector < peer->vectors_count; vector++) {
printf(" vector %d is enabled (fd=%d)\n", vector,
peer->vectors[vector]);
}
/* dump peers */
QTAILQ_FOREACH(peer, &client->peer_list, next) {
printf("peer_id = %ld\n", peer->id);
for (vector = 0; vector < peer->vectors_count; vector++) {
printf(" vector %d is enabled (fd=%d)\n", vector,
peer->vectors[vector]);
}
}
}

View File

@ -0,0 +1,212 @@
/*
* Copyright 6WIND S.A., 2014
*
* This work is licensed under the terms of the GNU GPL, version 2 or
* (at your option) any later version. See the COPYING file in the
* top-level directory.
*/
#ifndef _IVSHMEM_CLIENT_H_
#define _IVSHMEM_CLIENT_H_
/**
* This file provides helper to implement an ivshmem client. It is used
* on the host to ask QEMU to send an interrupt to an ivshmem PCI device in a
* guest. QEMU also implements an ivshmem client similar to this one, they both
* connect to an ivshmem server.
*
* A standalone ivshmem client based on this file is provided for debug/test
* purposes.
*/
#include <limits.h>
#include <sys/select.h>
#include "qemu/queue.h"
/**
* Maximum number of notification vectors supported by the client
*/
#define IVSHMEM_CLIENT_MAX_VECTORS 64
/**
* Structure storing a peer
*
* Each time a client connects to an ivshmem server, it is advertised to
* all connected clients through the unix socket. When our ivshmem
* client receives a notification, it creates a IvshmemClientPeer
* structure to store the infos of this peer.
*
* This structure is also used to store the information of our own
* client in (IvshmemClient)->local.
*/
typedef struct IvshmemClientPeer {
QTAILQ_ENTRY(IvshmemClientPeer) next; /**< next in list*/
long id; /**< the id of the peer */
int vectors[IVSHMEM_CLIENT_MAX_VECTORS]; /**< one fd per vector */
unsigned vectors_count; /**< number of vectors */
} IvshmemClientPeer;
QTAILQ_HEAD(IvshmemClientPeerList, IvshmemClientPeer);
typedef struct IvshmemClientPeerList IvshmemClientPeerList;
typedef struct IvshmemClient IvshmemClient;
/**
* Typedef of callback function used when our IvshmemClient receives a
* notification from a peer.
*/
typedef void (*IvshmemClientNotifCb)(
const IvshmemClient *client,
const IvshmemClientPeer *peer,
unsigned vect, void *arg);
/**
* Structure describing an ivshmem client
*
* This structure stores all information related to our client: the name
* of the server unix socket, the list of peers advertised by the
* server, our own client information, and a pointer the notification
* callback function used when we receive a notification from a peer.
*/
struct IvshmemClient {
char unix_sock_path[PATH_MAX]; /**< path to unix sock */
int sock_fd; /**< unix sock filedesc */
int shm_fd; /**< shm file descriptor */
IvshmemClientPeerList peer_list; /**< list of peers */
IvshmemClientPeer local; /**< our own infos */
IvshmemClientNotifCb notif_cb; /**< notification callback */
void *notif_arg; /**< notification argument */
bool verbose; /**< true to enable debug */
};
/**
* Initialize an ivshmem client
*
* @client: A pointer to an uninitialized IvshmemClient structure
* @unix_sock_path: The pointer to the unix socket file name
* @notif_cb: If not NULL, the pointer to the function to be called when
* our IvshmemClient receives a notification from a peer
* @notif_arg: Opaque pointer given as-is to the notification callback
* function
* @verbose: True to enable debug
*
* Returns: 0 on success, or a negative value on error
*/
int ivshmem_client_init(IvshmemClient *client, const char *unix_sock_path,
IvshmemClientNotifCb notif_cb, void *notif_arg,
bool verbose);
/**
* Connect to the server
*
* Connect to the server unix socket, and read the first initial
* messages sent by the server, giving the ID of the client and the file
* descriptor of the shared memory.
*
* @client: The ivshmem client
*
* Returns: 0 on success, or a negative value on error
*/
int ivshmem_client_connect(IvshmemClient *client);
/**
* Close connection to the server and free all peer structures
*
* @client: The ivshmem client
*/
void ivshmem_client_close(IvshmemClient *client);
/**
* Fill a fd_set with file descriptors to be monitored
*
* This function will fill a fd_set with all file descriptors
* that must be polled (unix server socket and peers eventfd). The
* function will not initialize the fd_set, it is up to the caller
* to do this.
*
* @client: The ivshmem client
* @fds: The fd_set to be updated
* @maxfd: Must be set to the max file descriptor + 1 in fd_set. This value is
* updated if this function adds a greater fd in fd_set.
*/
void ivshmem_client_get_fds(const IvshmemClient *client, fd_set *fds,
int *maxfd);
/**
* Read and handle new messages
*
* Given a fd_set filled by select(), handle incoming messages from
* server or peers.
*
* @client: The ivshmem client
* @fds: The fd_set containing the file descriptors to be checked. Note
* that file descriptors that are not related to our client are
* ignored.
* @maxfd: The maximum fd in fd_set, plus one.
*
* Returns: 0 on success, or a negative value on error
*/
int ivshmem_client_handle_fds(IvshmemClient *client, fd_set *fds, int maxfd);
/**
* Send a notification to a vector of a peer
*
* @client: The ivshmem client
* @peer: The peer to be notified
* @vector: The number of the vector
*
* Returns: 0 on success, or a negative value on error
*/
int ivshmem_client_notify(const IvshmemClient *client,
const IvshmemClientPeer *peer, unsigned vector);
/**
* Send a notification to all vectors of a peer
*
* @client: The ivshmem client
* @peer: The peer to be notified
*
* Returns: 0 on success, or a negative value on error (at least one
* notification failed)
*/
int ivshmem_client_notify_all_vects(const IvshmemClient *client,
const IvshmemClientPeer *peer);
/**
* Broadcat a notification to all vectors of all peers
*
* @client: The ivshmem client
*
* Returns: 0 on success, or a negative value on error (at least one
* notification failed)
*/
int ivshmem_client_notify_broadcast(const IvshmemClient *client);
/**
* Search a peer from its identifier
*
* Return the peer structure from its peer_id. If the given peer_id is
* the local id, the function returns the local peer structure.
*
* @client: The ivshmem client
* @peer_id: The identifier of the peer structure
*
* Returns: The peer structure, or NULL if not found
*/
IvshmemClientPeer *
ivshmem_client_search_peer(IvshmemClient *client, long peer_id);
/**
* Dump information of this ivshmem client on stdout
*
* Dump the id and the vectors of the given ivshmem client and the list
* of its peers and their vectors on stdout.
*
* @client: The ivshmem client
*/
void ivshmem_client_dump(const IvshmemClient *client);
#endif /* _IVSHMEM_CLIENT_H_ */

View File

@ -0,0 +1,239 @@
/*
* Copyright 6WIND S.A., 2014
*
* This work is licensed under the terms of the GNU GPL, version 2 or
* (at your option) any later version. See the COPYING file in the
* top-level directory.
*/
#include "qemu-common.h"
#include "ivshmem-client.h"
#define IVSHMEM_CLIENT_DEFAULT_VERBOSE 0
#define IVSHMEM_CLIENT_DEFAULT_UNIX_SOCK_PATH "/tmp/ivshmem_socket"
typedef struct IvshmemClientArgs {
bool verbose;
const char *unix_sock_path;
} IvshmemClientArgs;
/* show ivshmem_client_usage and exit with given error code */
static void
ivshmem_client_usage(const char *name, int code)
{
fprintf(stderr, "%s [opts]\n", name);
fprintf(stderr, " -h: show this help\n");
fprintf(stderr, " -v: verbose mode\n");
fprintf(stderr, " -S <unix_sock_path>: path to the unix socket\n"
" to connect to.\n"
" default=%s\n", IVSHMEM_CLIENT_DEFAULT_UNIX_SOCK_PATH);
exit(code);
}
/* parse the program arguments, exit on error */
static void
ivshmem_client_parse_args(IvshmemClientArgs *args, int argc, char *argv[])
{
int c;
while ((c = getopt(argc, argv,
"h" /* help */
"v" /* verbose */
"S:" /* unix_sock_path */
)) != -1) {
switch (c) {
case 'h': /* help */
ivshmem_client_usage(argv[0], 0);
break;
case 'v': /* verbose */
args->verbose = 1;
break;
case 'S': /* unix_sock_path */
args->unix_sock_path = strdup(optarg);
break;
default:
ivshmem_client_usage(argv[0], 1);
break;
}
}
}
/* show command line help */
static void
ivshmem_client_cmdline_help(void)
{
printf("dump: dump peers (including us)\n"
"int <peer> <vector>: notify one vector on a peer\n"
"int <peer> all: notify all vectors of a peer\n"
"int all: notify all vectors of all peers (excepting us)\n");
}
/* read stdin and handle commands */
static int
ivshmem_client_handle_stdin_command(IvshmemClient *client)
{
IvshmemClientPeer *peer;
char buf[128];
char *s, *token;
int ret;
int peer_id, vector;
memset(buf, 0, sizeof(buf));
ret = read(0, buf, sizeof(buf) - 1);
if (ret < 0) {
return -1;
}
s = buf;
while ((token = strsep(&s, "\n\r;")) != NULL) {
if (!strcmp(token, "")) {
continue;
}
if (!strcmp(token, "?")) {
ivshmem_client_cmdline_help();
}
if (!strcmp(token, "help")) {
ivshmem_client_cmdline_help();
} else if (!strcmp(token, "dump")) {
ivshmem_client_dump(client);
} else if (!strcmp(token, "int all")) {
ivshmem_client_notify_broadcast(client);
} else if (sscanf(token, "int %d %d", &peer_id, &vector) == 2) {
peer = ivshmem_client_search_peer(client, peer_id);
if (peer == NULL) {
printf("cannot find peer_id = %d\n", peer_id);
continue;
}
ivshmem_client_notify(client, peer, vector);
} else if (sscanf(token, "int %d all", &peer_id) == 1) {
peer = ivshmem_client_search_peer(client, peer_id);
if (peer == NULL) {
printf("cannot find peer_id = %d\n", peer_id);
continue;
}
ivshmem_client_notify_all_vects(client, peer);
} else {
printf("invalid command, type help\n");
}
}
printf("cmd> ");
fflush(stdout);
return 0;
}
/* listen on stdin (command line), on unix socket (notifications of new
* and dead peers), and on eventfd (IRQ request) */
static int
ivshmem_client_poll_events(IvshmemClient *client)
{
fd_set fds;
int ret, maxfd;
while (1) {
FD_ZERO(&fds);
FD_SET(0, &fds); /* add stdin in fd_set */
maxfd = 1;
ivshmem_client_get_fds(client, &fds, &maxfd);
ret = select(maxfd, &fds, NULL, NULL, NULL);
if (ret < 0) {
if (errno == EINTR) {
continue;
}
fprintf(stderr, "select error: %s\n", strerror(errno));
break;
}
if (ret == 0) {
continue;
}
if (FD_ISSET(0, &fds) &&
ivshmem_client_handle_stdin_command(client) < 0 && errno != EINTR) {
fprintf(stderr, "ivshmem_client_handle_stdin_command() failed\n");
break;
}
if (ivshmem_client_handle_fds(client, &fds, maxfd) < 0) {
fprintf(stderr, "ivshmem_client_handle_fds() failed\n");
break;
}
}
return ret;
}
/* callback when we receive a notification (just display it) */
static void
ivshmem_client_notification_cb(const IvshmemClient *client,
const IvshmemClientPeer *peer,
unsigned vect, void *arg)
{
(void)client;
(void)arg;
printf("receive notification from peer_id=%ld vector=%d\n", peer->id, vect);
}
int
main(int argc, char *argv[])
{
struct sigaction sa;
IvshmemClient client;
IvshmemClientArgs args = {
.verbose = IVSHMEM_CLIENT_DEFAULT_VERBOSE,
.unix_sock_path = IVSHMEM_CLIENT_DEFAULT_UNIX_SOCK_PATH,
};
/* parse arguments, will exit on error */
ivshmem_client_parse_args(&args, argc, argv);
/* Ignore SIGPIPE, see this link for more info:
* http://www.mail-archive.com/libevent-users@monkey.org/msg01606.html */
sa.sa_handler = SIG_IGN;
sa.sa_flags = 0;
if (sigemptyset(&sa.sa_mask) == -1 ||
sigaction(SIGPIPE, &sa, 0) == -1) {
perror("failed to ignore SIGPIPE; sigaction");
return 1;
}
ivshmem_client_cmdline_help();
printf("cmd> ");
fflush(stdout);
if (ivshmem_client_init(&client, args.unix_sock_path,
ivshmem_client_notification_cb, NULL,
args.verbose) < 0) {
fprintf(stderr, "cannot init client\n");
return 1;
}
while (1) {
if (ivshmem_client_connect(&client) < 0) {
fprintf(stderr, "cannot connect to server, retry in 1 second\n");
sleep(1);
continue;
}
fprintf(stdout, "listen on server socket %d\n", client.sock_fd);
if (ivshmem_client_poll_events(&client) == 0) {
continue;
}
/* disconnected from server, reset all peers */
fprintf(stdout, "disconnected from server\n");
ivshmem_client_close(&client);
}
return 0;
}

View File

@ -0,0 +1 @@
ivshmem-server-obj-y = ivshmem-server.o main.o

View File

@ -0,0 +1,422 @@
/*
* Copyright 6WIND S.A., 2014
*
* This work is licensed under the terms of the GNU GPL, version 2 or
* (at your option) any later version. See the COPYING file in the
* top-level directory.
*/
#include "qemu-common.h"
#include "qemu/sockets.h"
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/un.h>
#include "ivshmem-server.h"
/* log a message on stdout if verbose=1 */
#define IVSHMEM_SERVER_DEBUG(server, fmt, ...) do { \
if ((server)->verbose) { \
printf(fmt, ## __VA_ARGS__); \
} \
} while (0)
/** maximum size of a huge page, used by ivshmem_server_ftruncate() */
#define IVSHMEM_SERVER_MAX_HUGEPAGE_SIZE (1024 * 1024 * 1024)
/** default listen backlog (number of sockets not accepted) */
#define IVSHMEM_SERVER_LISTEN_BACKLOG 10
/* send message to a client unix socket */
static int
ivshmem_server_send_one_msg(int sock_fd, long peer_id, int fd)
{
int ret;
struct msghdr msg;
struct iovec iov[1];
union {
struct cmsghdr cmsg;
char control[CMSG_SPACE(sizeof(int))];
} msg_control;
struct cmsghdr *cmsg;
iov[0].iov_base = &peer_id;
iov[0].iov_len = sizeof(peer_id);
memset(&msg, 0, sizeof(msg));
msg.msg_iov = iov;
msg.msg_iovlen = 1;
/* if fd is specified, add it in a cmsg */
if (fd >= 0) {
memset(&msg_control, 0, sizeof(msg_control));
msg.msg_control = &msg_control;
msg.msg_controllen = sizeof(msg_control);
cmsg = CMSG_FIRSTHDR(&msg);
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
cmsg->cmsg_len = CMSG_LEN(sizeof(int));
memcpy(CMSG_DATA(cmsg), &fd, sizeof(fd));
}
ret = sendmsg(sock_fd, &msg, 0);
if (ret <= 0) {
return -1;
}
return 0;
}
/* free a peer when the server advertises a disconnection or when the
* server is freed */
static void
ivshmem_server_free_peer(IvshmemServer *server, IvshmemServerPeer *peer)
{
unsigned vector;
IvshmemServerPeer *other_peer;
IVSHMEM_SERVER_DEBUG(server, "free peer %ld\n", peer->id);
close(peer->sock_fd);
QTAILQ_REMOVE(&server->peer_list, peer, next);
/* advertise the deletion to other peers */
QTAILQ_FOREACH(other_peer, &server->peer_list, next) {
ivshmem_server_send_one_msg(other_peer->sock_fd, peer->id, -1);
}
for (vector = 0; vector < peer->vectors_count; vector++) {
event_notifier_cleanup(&peer->vectors[vector]);
}
g_free(peer);
}
/* send the peer id and the shm_fd just after a new client connection */
static int
ivshmem_server_send_initial_info(IvshmemServer *server, IvshmemServerPeer *peer)
{
int ret;
/* send the peer id to the client */
ret = ivshmem_server_send_one_msg(peer->sock_fd, peer->id, -1);
if (ret < 0) {
IVSHMEM_SERVER_DEBUG(server, "cannot send peer id: %s\n",
strerror(errno));
return -1;
}
/* send the shm_fd */
ret = ivshmem_server_send_one_msg(peer->sock_fd, -1, server->shm_fd);
if (ret < 0) {
IVSHMEM_SERVER_DEBUG(server, "cannot send shm fd: %s\n",
strerror(errno));
return -1;
}
return 0;
}
/* handle message on listening unix socket (new client connection) */
static int
ivshmem_server_handle_new_conn(IvshmemServer *server)
{
IvshmemServerPeer *peer, *other_peer;
struct sockaddr_un unaddr;
socklen_t unaddr_len;
int newfd;
unsigned i;
/* accept the incoming connection */
unaddr_len = sizeof(unaddr);
newfd = qemu_accept(server->sock_fd,
(struct sockaddr *)&unaddr, &unaddr_len);
if (newfd < 0) {
IVSHMEM_SERVER_DEBUG(server, "cannot accept() %s\n", strerror(errno));
return -1;
}
qemu_set_nonblock(newfd);
IVSHMEM_SERVER_DEBUG(server, "accept()=%d\n", newfd);
/* allocate new structure for this peer */
peer = g_malloc0(sizeof(*peer));
peer->sock_fd = newfd;
/* get an unused peer id */
while (ivshmem_server_search_peer(server, server->cur_id) != NULL) {
server->cur_id++;
}
peer->id = server->cur_id++;
/* create eventfd, one per vector */
peer->vectors_count = server->n_vectors;
for (i = 0; i < peer->vectors_count; i++) {
if (event_notifier_init(&peer->vectors[i], FALSE) < 0) {
IVSHMEM_SERVER_DEBUG(server, "cannot create eventfd\n");
goto fail;
}
}
/* send peer id and shm fd */
if (ivshmem_server_send_initial_info(server, peer) < 0) {
IVSHMEM_SERVER_DEBUG(server, "cannot send initial info\n");
goto fail;
}
/* advertise the new peer to others */
QTAILQ_FOREACH(other_peer, &server->peer_list, next) {
for (i = 0; i < peer->vectors_count; i++) {
ivshmem_server_send_one_msg(other_peer->sock_fd, peer->id,
peer->vectors[i].wfd);
}
}
/* advertise the other peers to the new one */
QTAILQ_FOREACH(other_peer, &server->peer_list, next) {
for (i = 0; i < peer->vectors_count; i++) {
ivshmem_server_send_one_msg(peer->sock_fd, other_peer->id,
other_peer->vectors[i].wfd);
}
}
/* advertise the new peer to itself */
for (i = 0; i < peer->vectors_count; i++) {
ivshmem_server_send_one_msg(peer->sock_fd, peer->id,
event_notifier_get_fd(&peer->vectors[i]));
}
QTAILQ_INSERT_TAIL(&server->peer_list, peer, next);
IVSHMEM_SERVER_DEBUG(server, "new peer id = %ld\n",
peer->id);
return 0;
fail:
while (i--) {
event_notifier_cleanup(&peer->vectors[i]);
}
close(newfd);
g_free(peer);
return -1;
}
/* Try to ftruncate a file to next power of 2 of shmsize.
* If it fails; all power of 2 above shmsize are tested until
* we reach the maximum huge page size. This is useful
* if the shm file is in a hugetlbfs that cannot be truncated to the
* shm_size value. */
static int
ivshmem_server_ftruncate(int fd, unsigned shmsize)
{
int ret;
struct stat mapstat;
/* align shmsize to next power of 2 */
shmsize = pow2ceil(shmsize);
if (fstat(fd, &mapstat) != -1 && mapstat.st_size == shmsize) {
return 0;
}
while (shmsize <= IVSHMEM_SERVER_MAX_HUGEPAGE_SIZE) {
ret = ftruncate(fd, shmsize);
if (ret == 0) {
return ret;
}
shmsize *= 2;
}
return -1;
}
/* Init a new ivshmem server */
int
ivshmem_server_init(IvshmemServer *server, const char *unix_sock_path,
const char *shm_path, size_t shm_size, unsigned n_vectors,
bool verbose)
{
int ret;
memset(server, 0, sizeof(*server));
server->verbose = verbose;
ret = snprintf(server->unix_sock_path, sizeof(server->unix_sock_path),
"%s", unix_sock_path);
if (ret < 0 || ret >= sizeof(server->unix_sock_path)) {
IVSHMEM_SERVER_DEBUG(server, "could not copy unix socket path\n");
return -1;
}
ret = snprintf(server->shm_path, sizeof(server->shm_path),
"%s", shm_path);
if (ret < 0 || ret >= sizeof(server->shm_path)) {
IVSHMEM_SERVER_DEBUG(server, "could not copy shm path\n");
return -1;
}
server->shm_size = shm_size;
server->n_vectors = n_vectors;
QTAILQ_INIT(&server->peer_list);
return 0;
}
/* open shm, create and bind to the unix socket */
int
ivshmem_server_start(IvshmemServer *server)
{
struct sockaddr_un sun;
int shm_fd, sock_fd, ret;
/* open shm file */
shm_fd = shm_open(server->shm_path, O_CREAT|O_RDWR, S_IRWXU);
if (shm_fd < 0) {
fprintf(stderr, "cannot open shm file %s: %s\n", server->shm_path,
strerror(errno));
return -1;
}
if (ivshmem_server_ftruncate(shm_fd, server->shm_size) < 0) {
fprintf(stderr, "ftruncate(%s) failed: %s\n", server->shm_path,
strerror(errno));
goto err_close_shm;
}
IVSHMEM_SERVER_DEBUG(server, "create & bind socket %s\n",
server->unix_sock_path);
/* create the unix listening socket */
sock_fd = socket(AF_UNIX, SOCK_STREAM, 0);
if (sock_fd < 0) {
IVSHMEM_SERVER_DEBUG(server, "cannot create socket: %s\n",
strerror(errno));
goto err_close_shm;
}
sun.sun_family = AF_UNIX;
ret = snprintf(sun.sun_path, sizeof(sun.sun_path), "%s",
server->unix_sock_path);
if (ret < 0 || ret >= sizeof(sun.sun_path)) {
IVSHMEM_SERVER_DEBUG(server, "could not copy unix socket path\n");
goto err_close_sock;
}
if (bind(sock_fd, (struct sockaddr *)&sun, sizeof(sun)) < 0) {
IVSHMEM_SERVER_DEBUG(server, "cannot connect to %s: %s\n", sun.sun_path,
strerror(errno));
goto err_close_sock;
}
if (listen(sock_fd, IVSHMEM_SERVER_LISTEN_BACKLOG) < 0) {
IVSHMEM_SERVER_DEBUG(server, "listen() failed: %s\n", strerror(errno));
goto err_close_sock;
}
server->sock_fd = sock_fd;
server->shm_fd = shm_fd;
return 0;
err_close_sock:
close(sock_fd);
err_close_shm:
close(shm_fd);
return -1;
}
/* close connections to clients, the unix socket and the shm fd */
void
ivshmem_server_close(IvshmemServer *server)
{
IvshmemServerPeer *peer, *npeer;
IVSHMEM_SERVER_DEBUG(server, "close server\n");
QTAILQ_FOREACH_SAFE(peer, &server->peer_list, next, npeer) {
ivshmem_server_free_peer(server, peer);
}
unlink(server->unix_sock_path);
close(server->sock_fd);
close(server->shm_fd);
server->sock_fd = -1;
server->shm_fd = -1;
}
/* get the fd_set according to the unix socket and the peer list */
void
ivshmem_server_get_fds(const IvshmemServer *server, fd_set *fds, int *maxfd)
{
IvshmemServerPeer *peer;
if (server->sock_fd == -1) {
return;
}
FD_SET(server->sock_fd, fds);
if (server->sock_fd >= *maxfd) {
*maxfd = server->sock_fd + 1;
}
QTAILQ_FOREACH(peer, &server->peer_list, next) {
FD_SET(peer->sock_fd, fds);
if (peer->sock_fd >= *maxfd) {
*maxfd = peer->sock_fd + 1;
}
}
}
/* process incoming messages on the sockets in fd_set */
int
ivshmem_server_handle_fds(IvshmemServer *server, fd_set *fds, int maxfd)
{
IvshmemServerPeer *peer, *peer_next;
if (server->sock_fd < maxfd && FD_ISSET(server->sock_fd, fds) &&
ivshmem_server_handle_new_conn(server) < 0 && errno != EINTR) {
IVSHMEM_SERVER_DEBUG(server, "ivshmem_server_handle_new_conn() "
"failed\n");
return -1;
}
QTAILQ_FOREACH_SAFE(peer, &server->peer_list, next, peer_next) {
/* any message from a peer socket result in a close() */
IVSHMEM_SERVER_DEBUG(server, "peer->sock_fd=%d\n", peer->sock_fd);
if (peer->sock_fd < maxfd && FD_ISSET(peer->sock_fd, fds)) {
ivshmem_server_free_peer(server, peer);
}
}
return 0;
}
/* lookup peer from its id */
IvshmemServerPeer *
ivshmem_server_search_peer(IvshmemServer *server, long peer_id)
{
IvshmemServerPeer *peer;
QTAILQ_FOREACH(peer, &server->peer_list, next) {
if (peer->id == peer_id) {
return peer;
}
}
return NULL;
}
/* dump our info, the list of peers their vectors on stdout */
void
ivshmem_server_dump(const IvshmemServer *server)
{
const IvshmemServerPeer *peer;
unsigned vector;
/* dump peers */
QTAILQ_FOREACH(peer, &server->peer_list, next) {
printf("peer_id = %ld\n", peer->id);
for (vector = 0; vector < peer->vectors_count; vector++) {
printf(" vector %d is enabled (fd=%d)\n", vector,
event_notifier_get_fd(&peer->vectors[vector]));
}
}
}

View File

@ -0,0 +1,167 @@
/*
* Copyright 6WIND S.A., 2014
*
* This work is licensed under the terms of the GNU GPL, version 2 or
* (at your option) any later version. See the COPYING file in the
* top-level directory.
*/
#ifndef _IVSHMEM_SERVER_H_
#define _IVSHMEM_SERVER_H_
/**
* The ivshmem server is a daemon that creates a unix socket in listen
* mode. The ivshmem clients (qemu or ivshmem-client) connect to this
* unix socket. For each client, the server will create some eventfd
* (see EVENTFD(2)), one per vector. These fd are transmitted to all
* clients using the SCM_RIGHTS cmsg message. Therefore, each client is
* able to send a notification to another client without beeing
* "profixied" by the server.
*
* We use this mechanism to send interruptions between guests.
* qemu is able to transform an event on a eventfd into a PCI MSI-x
* interruption in the guest.
*
* The ivshmem server is also able to share the file descriptor
* associated to the ivshmem shared memory.
*/
#include <limits.h>
#include <sys/select.h>
#include <stdint.h>
#include <stdbool.h>
#include "qemu/event_notifier.h"
#include "qemu/queue.h"
/**
* Maximum number of notification vectors supported by the server
*/
#define IVSHMEM_SERVER_MAX_VECTORS 64
/**
* Structure storing a peer
*
* Each time a client connects to an ivshmem server, a new
* IvshmemServerPeer structure is created. This peer and all its
* vectors are advertised to all connected clients through the connected
* unix sockets.
*/
typedef struct IvshmemServerPeer {
QTAILQ_ENTRY(IvshmemServerPeer) next; /**< next in list*/
int sock_fd; /**< connected unix sock */
long id; /**< the id of the peer */
EventNotifier vectors[IVSHMEM_SERVER_MAX_VECTORS]; /**< one per vector */
unsigned vectors_count; /**< number of vectors */
} IvshmemServerPeer;
QTAILQ_HEAD(IvshmemServerPeerList, IvshmemServerPeer);
typedef struct IvshmemServerPeerList IvshmemServerPeerList;
/**
* Structure describing an ivshmem server
*
* This structure stores all information related to our server: the name
* of the server unix socket and the list of connected peers.
*/
typedef struct IvshmemServer {
char unix_sock_path[PATH_MAX]; /**< path to unix socket */
int sock_fd; /**< unix sock file descriptor */
char shm_path[PATH_MAX]; /**< path to shm */
size_t shm_size; /**< size of shm */
int shm_fd; /**< shm file descriptor */
unsigned n_vectors; /**< number of vectors */
long cur_id; /**< id to be given to next client */
bool verbose; /**< true in verbose mode */
IvshmemServerPeerList peer_list; /**< list of peers */
} IvshmemServer;
/**
* Initialize an ivshmem server
*
* @server: A pointer to an uninitialized IvshmemServer structure
* @unix_sock_path: The pointer to the unix socket file name
* @shm_path: Path to the shared memory. The path corresponds to a POSIX
* shm name. To use a real file, for instance in a hugetlbfs,
* it is possible to use /../../abspath/to/file.
* @shm_size: Size of shared memory
* @n_vectors: Number of interrupt vectors per client
* @verbose: True to enable verbose mode
*
* Returns: 0 on success, or a negative value on error
*/
int
ivshmem_server_init(IvshmemServer *server, const char *unix_sock_path,
const char *shm_path, size_t shm_size, unsigned n_vectors,
bool verbose);
/**
* Open the shm, then create and bind to the unix socket
*
* @server: The pointer to the initialized IvshmemServer structure
*
* Returns: 0 on success, or a negative value on error
*/
int ivshmem_server_start(IvshmemServer *server);
/**
* Close the server
*
* Close connections to all clients, close the unix socket and the
* shared memory file descriptor. The structure remains initialized, so
* it is possible to call ivshmem_server_start() again after a call to
* ivshmem_server_close().
*
* @server: The ivshmem server
*/
void ivshmem_server_close(IvshmemServer *server);
/**
* Fill a fd_set with file descriptors to be monitored
*
* This function will fill a fd_set with all file descriptors that must
* be polled (unix server socket and peers unix socket). The function
* will not initialize the fd_set, it is up to the caller to do it.
*
* @server: The ivshmem server
* @fds: The fd_set to be updated
* @maxfd: Must be set to the max file descriptor + 1 in fd_set. This value is
* updated if this function adds a greater fd in fd_set.
*/
void
ivshmem_server_get_fds(const IvshmemServer *server, fd_set *fds, int *maxfd);
/**
* Read and handle new messages
*
* Given a fd_set (for instance filled by a call to select()), handle
* incoming messages from peers.
*
* @server: The ivshmem server
* @fds: The fd_set containing the file descriptors to be checked. Note that
* file descriptors that are not related to our server are ignored.
* @maxfd: The maximum fd in fd_set, plus one.
*
* Returns: 0 on success, or a negative value on error
*/
int ivshmem_server_handle_fds(IvshmemServer *server, fd_set *fds, int maxfd);
/**
* Search a peer from its identifier
*
* @server: The ivshmem server
* @peer_id: The identifier of the peer structure
*
* Returns: The peer structure, or NULL if not found
*/
IvshmemServerPeer *
ivshmem_server_search_peer(IvshmemServer *server, long peer_id);
/**
* Dump information of this ivshmem server and its peers on stdout
*
* @server: The ivshmem server
*/
void ivshmem_server_dump(const IvshmemServer *server);
#endif /* _IVSHMEM_SERVER_H_ */

View File

@ -0,0 +1,264 @@
/*
* Copyright 6WIND S.A., 2014
*
* This work is licensed under the terms of the GNU GPL, version 2 or
* (at your option) any later version. See the COPYING file in the
* top-level directory.
*/
#include "qemu-common.h"
#include "ivshmem-server.h"
#define IVSHMEM_SERVER_DEFAULT_VERBOSE 0
#define IVSHMEM_SERVER_DEFAULT_FOREGROUND 0
#define IVSHMEM_SERVER_DEFAULT_PID_FILE "/var/run/ivshmem-server.pid"
#define IVSHMEM_SERVER_DEFAULT_UNIX_SOCK_PATH "/tmp/ivshmem_socket"
#define IVSHMEM_SERVER_DEFAULT_SHM_PATH "ivshmem"
#define IVSHMEM_SERVER_DEFAULT_SHM_SIZE (4*1024*1024)
#define IVSHMEM_SERVER_DEFAULT_N_VECTORS 1
/* used to quit on signal SIGTERM */
static int ivshmem_server_quit;
/* arguments given by the user */
typedef struct IvshmemServerArgs {
bool verbose;
bool foreground;
const char *pid_file;
const char *unix_socket_path;
const char *shm_path;
uint64_t shm_size;
unsigned n_vectors;
} IvshmemServerArgs;
/* show ivshmem_server_usage and exit with given error code */
static void
ivshmem_server_usage(const char *name, int code)
{
fprintf(stderr, "%s [opts]\n", name);
fprintf(stderr, " -h: show this help\n");
fprintf(stderr, " -v: verbose mode\n");
fprintf(stderr, " -F: foreground mode (default is to daemonize)\n");
fprintf(stderr, " -p <pid_file>: path to the PID file (used in daemon\n"
" mode only).\n"
" Default=%s\n", IVSHMEM_SERVER_DEFAULT_SHM_PATH);
fprintf(stderr, " -S <unix_socket_path>: path to the unix socket\n"
" to listen to.\n"
" Default=%s\n", IVSHMEM_SERVER_DEFAULT_UNIX_SOCK_PATH);
fprintf(stderr, " -m <shm_path>: path to the shared memory.\n"
" The path corresponds to a POSIX shm name. To use a\n"
" real file, for instance in a hugetlbfs, use\n"
" /../../abspath/to/file.\n"
" default=%s\n", IVSHMEM_SERVER_DEFAULT_SHM_PATH);
fprintf(stderr, " -l <size>: size of shared memory in bytes. The suffix\n"
" K, M and G can be used (ex: 1K means 1024).\n"
" default=%u\n", IVSHMEM_SERVER_DEFAULT_SHM_SIZE);
fprintf(stderr, " -n <n_vects>: number of vectors.\n"
" default=%u\n", IVSHMEM_SERVER_DEFAULT_N_VECTORS);
exit(code);
}
/* parse the program arguments, exit on error */
static void
ivshmem_server_parse_args(IvshmemServerArgs *args, int argc, char *argv[])
{
int c;
unsigned long long v;
Error *errp = NULL;
while ((c = getopt(argc, argv,
"h" /* help */
"v" /* verbose */
"F" /* foreground */
"p:" /* pid_file */
"S:" /* unix_socket_path */
"m:" /* shm_path */
"l:" /* shm_size */
"n:" /* n_vectors */
)) != -1) {
switch (c) {
case 'h': /* help */
ivshmem_server_usage(argv[0], 0);
break;
case 'v': /* verbose */
args->verbose = 1;
break;
case 'F': /* foreground */
args->foreground = 1;
break;
case 'p': /* pid_file */
args->pid_file = strdup(optarg);
break;
case 'S': /* unix_socket_path */
args->unix_socket_path = strdup(optarg);
break;
case 'm': /* shm_path */
args->shm_path = strdup(optarg);
break;
case 'l': /* shm_size */
parse_option_size("shm_size", optarg, &args->shm_size, &errp);
if (errp) {
fprintf(stderr, "cannot parse shm size: %s\n",
error_get_pretty(errp));
error_free(errp);
ivshmem_server_usage(argv[0], 1);
}
break;
case 'n': /* n_vectors */
if (parse_uint_full(optarg, &v, 0) < 0) {
fprintf(stderr, "cannot parse n_vectors\n");
ivshmem_server_usage(argv[0], 1);
}
args->n_vectors = v;
break;
default:
ivshmem_server_usage(argv[0], 1);
break;
}
}
if (args->n_vectors > IVSHMEM_SERVER_MAX_VECTORS) {
fprintf(stderr, "too many requested vectors (max is %d)\n",
IVSHMEM_SERVER_MAX_VECTORS);
ivshmem_server_usage(argv[0], 1);
}
if (args->verbose == 1 && args->foreground == 0) {
fprintf(stderr, "cannot use verbose in daemon mode\n");
ivshmem_server_usage(argv[0], 1);
}
}
/* wait for events on listening server unix socket and connected client
* sockets */
static int
ivshmem_server_poll_events(IvshmemServer *server)
{
fd_set fds;
int ret = 0, maxfd;
while (!ivshmem_server_quit) {
FD_ZERO(&fds);
maxfd = 0;
ivshmem_server_get_fds(server, &fds, &maxfd);
ret = select(maxfd, &fds, NULL, NULL, NULL);
if (ret < 0) {
if (errno == EINTR) {
continue;
}
fprintf(stderr, "select error: %s\n", strerror(errno));
break;
}
if (ret == 0) {
continue;
}
if (ivshmem_server_handle_fds(server, &fds, maxfd) < 0) {
fprintf(stderr, "ivshmem_server_handle_fds() failed\n");
break;
}
}
return ret;
}
static void
ivshmem_server_quit_cb(int signum)
{
ivshmem_server_quit = 1;
}
int
main(int argc, char *argv[])
{
IvshmemServer server;
struct sigaction sa, sa_quit;
IvshmemServerArgs args = {
.verbose = IVSHMEM_SERVER_DEFAULT_VERBOSE,
.foreground = IVSHMEM_SERVER_DEFAULT_FOREGROUND,
.pid_file = IVSHMEM_SERVER_DEFAULT_PID_FILE,
.unix_socket_path = IVSHMEM_SERVER_DEFAULT_UNIX_SOCK_PATH,
.shm_path = IVSHMEM_SERVER_DEFAULT_SHM_PATH,
.shm_size = IVSHMEM_SERVER_DEFAULT_SHM_SIZE,
.n_vectors = IVSHMEM_SERVER_DEFAULT_N_VECTORS,
};
int ret = 1;
/* parse arguments, will exit on error */
ivshmem_server_parse_args(&args, argc, argv);
/* Ignore SIGPIPE, see this link for more info:
* http://www.mail-archive.com/libevent-users@monkey.org/msg01606.html */
sa.sa_handler = SIG_IGN;
sa.sa_flags = 0;
if (sigemptyset(&sa.sa_mask) == -1 ||
sigaction(SIGPIPE, &sa, 0) == -1) {
perror("failed to ignore SIGPIPE; sigaction");
goto err;
}
sa_quit.sa_handler = ivshmem_server_quit_cb;
sa_quit.sa_flags = 0;
if (sigemptyset(&sa_quit.sa_mask) == -1 ||
sigaction(SIGTERM, &sa_quit, 0) == -1) {
perror("failed to add SIGTERM handler; sigaction");
goto err;
}
/* init the ivshms structure */
if (ivshmem_server_init(&server, args.unix_socket_path, args.shm_path,
args.shm_size, args.n_vectors, args.verbose) < 0) {
fprintf(stderr, "cannot init server\n");
goto err;
}
/* start the ivshmem server (open shm & unix socket) */
if (ivshmem_server_start(&server) < 0) {
fprintf(stderr, "cannot bind\n");
goto err;
}
/* daemonize if asked to */
if (!args.foreground) {
FILE *fp;
if (qemu_daemon(1, 1) < 0) {
fprintf(stderr, "cannot daemonize: %s\n", strerror(errno));
goto err_close;
}
/* write pid file */
fp = fopen(args.pid_file, "w");
if (fp == NULL) {
fprintf(stderr, "cannot write pid file: %s\n", strerror(errno));
goto err_close;
}
fprintf(fp, "%d\n", (int) getpid());
fclose(fp);
}
ivshmem_server_poll_events(&server);
fprintf(stdout, "server disconnected\n");
ret = 0;
err_close:
ivshmem_server_close(&server);
err:
return ret;
}

View File

@ -1266,9 +1266,13 @@ is qemu.git/contrib/ivshmem-server. An example syntax when using the shared
memory server is:
@example
qemu-system-i386 -device ivshmem,size=<size in format accepted by -m>[,chardev=<id>]
[,msi=on][,ioeventfd=on][,vectors=n][,role=peer|master]
qemu-system-i386 -chardev socket,path=<path>,id=<id>
# First start the ivshmem server once and for all
ivshmem-server -p <pidfile> -S <path> -m <shm name> -l <shm size> -n <vectors n>
# Then start your qemu instances with matching arguments
qemu-system-i386 -device ivshmem,size=<shm size>,vectors=<vectors n>,chardev=<id>
[,msi=on][,ioeventfd=on][,role=peer|master]
-chardev socket,path=<path>,id=<id>
@end example
When using the server, the guest will be assigned a VM ID (>=0) that allows guests