- fix build with musl libc

- fix potential deadlock of QEMU main event loop (cannot be hit with linux
   client)
 - revert 9pfs reply truncation (LP 1877688)
 - xen backend waits for client to free space on the reply ring instead of
   truncating or disconnecting
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEtIKLr5QxQM7yo0kQcdTV5YIvc9YFAl7M4xcACgkQcdTV5YIv
 c9ZyjQ//X03vDTd3w7QgPT4ffJtNvEAjOhFkgVAxANz4qYpsB7sPReY4EIQSe3R2
 06z5CA1+ad6fI27+ogj1pNPdmMabkbVFl+8lS6aPk5mdZz3b/PHpcH5NfWjB3PLD
 Dm9EQIxqei0L3nbtro4Y3Rj2l4DFiv3q1hTkLjvGUxbljpOQUd4iNo9jIpasD5/h
 9TaZquHRcZc4EaQO5KzpyTfqFOYcApcXBm3tDVgKpeec5HbJKsszVUmXc6beKBHe
 Epvj3nTBs4hEEZCK2kzJqaJYaVAVyxn6AeGnXQzWAvIhtrrN6V1cGmPPHUV4i/pm
 Hgw75wD4Glzh62vNDHmpptt860T2d11FrljSUWgHVM2PG8+XAcNZqwppLOFKo2DZ
 yRCuBhvKCB6AiSe4l5mrduHAJatX14aK9+6DRscJEG/gKPcnKA3LlW5eMVLnZ+ue
 crxjTyPnPKiGRFdSa6Qg7tVO8Zg41r0QY7LeVByBDwN6sxRA4mbrfDIZhMTlLCX4
 xXueocG9TXSBNCEk4mOQFN6YsZ1oaSC90qw9txIfIsynlN1nugXrYQp4GetvdFgB
 Oj4rhHdwBX609kQRlBoaW0vex2dhgiRCsP1PYAXqYcKa+DnTKqn9fK2Ud6VBBkgM
 gH/0O/yNg7zVmjCqJpgn0F5kwNDxGwtSHM9Wzh+5vyAcjanhfQk=
 =XY8R
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/gkurz/tags/9p-next-2020-05-26' into staging

- fix build with musl libc
- fix potential deadlock of QEMU main event loop (cannot be hit with linux
  client)
- revert 9pfs reply truncation (LP 1877688)
- xen backend waits for client to free space on the reply ring instead of
  truncating or disconnecting

# gpg: Signature made Tue 26 May 2020 10:36:23 BST
# gpg:                using RSA key B4828BAF943140CEF2A3491071D4D5E5822F73D6
# gpg: Good signature from "Greg Kurz <groug@kaod.org>" [full]
# gpg:                 aka "Gregory Kurz <gregory.kurz@free.fr>" [full]
# gpg:                 aka "[jpeg image of size 3330]" [full]
# Primary key fingerprint: B482 8BAF 9431 40CE F2A3  4910 71D4 D5E5 822F 73D6

* remotes/gkurz/tags/9p-next-2020-05-26:
  xen/9pfs: increase max ring order to 9
  xen/9pfs: yield when there isn't enough room on the ring
  Revert "9p: init_in_iov_from_pdu can truncate the size"
  9p: Lock directory streams with a CoMutex
  9pfs: include linux/limits.h for XATTR_SIZE_MAX

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2020-05-26 14:05:53 +01:00
commit ddc760832f
4 changed files with 49 additions and 46 deletions

View File

@ -28,6 +28,7 @@
#include "sysemu/qtest.h"
#include "qemu/xxhash.h"
#include <math.h>
#include <linux/limits.h>
int open_fd_hw;
int total_open_fd;
@ -2102,29 +2103,22 @@ out_nofid:
* with qemu_iovec_destroy().
*/
static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
size_t skip, size_t *size,
size_t skip, size_t size,
bool is_write)
{
QEMUIOVector elem;
struct iovec *iov;
unsigned int niov;
size_t alloc_size = *size + skip;
if (is_write) {
pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov, alloc_size);
pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov, size + skip);
} else {
pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, &alloc_size);
}
if (alloc_size < skip) {
*size = 0;
} else {
*size = alloc_size - skip;
pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, size + skip);
}
qemu_iovec_init_external(&elem, iov, niov);
qemu_iovec_init(qiov, niov);
qemu_iovec_concat(qiov, &elem, skip, *size);
qemu_iovec_concat(qiov, &elem, skip, size);
}
static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
@ -2132,14 +2126,15 @@ static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
{
ssize_t err;
size_t offset = 7;
size_t read_count;
uint64_t read_count;
QEMUIOVector qiov_full;
if (fidp->fs.xattr.len < off) {
read_count = 0;
} else if (fidp->fs.xattr.len - off < max_count) {
read_count = fidp->fs.xattr.len - off;
} else {
read_count = fidp->fs.xattr.len - off;
}
if (read_count > max_count) {
read_count = max_count;
}
err = pdu_marshal(pdu, offset, "d", read_count);
@ -2148,7 +2143,7 @@ static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
}
offset += err;
v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, &read_count, false);
v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, read_count, false);
err = v9fs_pack(qiov_full.iov, qiov_full.niov, 0,
((char *)fidp->fs.xattr.value) + off,
read_count);
@ -2277,11 +2272,9 @@ static void coroutine_fn v9fs_read(void *opaque)
QEMUIOVector qiov_full;
QEMUIOVector qiov;
int32_t len;
size_t size = max_count;
v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, &size, false);
v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, max_count, false);
qemu_iovec_init(&qiov, qiov_full.niov);
max_count = size;
do {
qemu_iovec_reset(&qiov);
qemu_iovec_concat(&qiov, &qiov_full, count, qiov_full.size - count);
@ -2532,7 +2525,6 @@ static void coroutine_fn v9fs_write(void *opaque)
int32_t len = 0;
int32_t total = 0;
size_t offset = 7;
size_t size;
V9fsFidState *fidp;
V9fsPDU *pdu = opaque;
V9fsState *s = pdu->s;
@ -2545,9 +2537,7 @@ static void coroutine_fn v9fs_write(void *opaque)
return;
}
offset += err;
size = count;
v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, &size, true);
count = size;
v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, count, true);
trace_v9fs_write(pdu->tag, pdu->id, fid, off, count, qiov_full.niov);
fidp = get_fid(pdu, fid);

View File

@ -197,22 +197,22 @@ typedef struct V9fsXattr
typedef struct V9fsDir {
DIR *stream;
QemuMutex readdir_mutex;
CoMutex readdir_mutex;
} V9fsDir;
static inline void v9fs_readdir_lock(V9fsDir *dir)
{
qemu_mutex_lock(&dir->readdir_mutex);
qemu_co_mutex_lock(&dir->readdir_mutex);
}
static inline void v9fs_readdir_unlock(V9fsDir *dir)
{
qemu_mutex_unlock(&dir->readdir_mutex);
qemu_co_mutex_unlock(&dir->readdir_mutex);
}
static inline void v9fs_readdir_init(V9fsDir *dir)
{
qemu_mutex_init(&dir->readdir_mutex);
qemu_co_mutex_init(&dir->readdir_mutex);
}
/*
@ -436,7 +436,7 @@ struct V9fsTransport {
ssize_t (*pdu_vunmarshal)(V9fsPDU *pdu, size_t offset, const char *fmt,
va_list ap);
void (*init_in_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
unsigned int *pniov, size_t *size);
unsigned int *pniov, size_t size);
void (*init_out_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
unsigned int *pniov, size_t size);
void (*push_and_notify)(V9fsPDU *pdu);

View File

@ -147,22 +147,19 @@ static ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, size_t offset,
}
static void virtio_init_in_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
unsigned int *pniov, size_t *size)
unsigned int *pniov, size_t size)
{
V9fsState *s = pdu->s;
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
VirtQueueElement *elem = v->elems[pdu->idx];
size_t buf_size = iov_size(elem->in_sg, elem->in_num);
if (buf_size < P9_IOHDRSZ) {
if (buf_size < size) {
VirtIODevice *vdev = VIRTIO_DEVICE(v);
virtio_error(vdev,
"VirtFS reply type %d needs %zu bytes, buffer has %zu, less than minimum",
pdu->id + 1, *size, buf_size);
}
if (buf_size < *size) {
*size = buf_size;
"VirtFS reply type %d needs %zu bytes, buffer has %zu",
pdu->id + 1, size, buf_size);
}
*piov = elem->in_sg;

View File

@ -21,7 +21,7 @@
#define VERSIONS "1"
#define MAX_RINGS 8
#define MAX_RING_ORDER 8
#define MAX_RING_ORDER 9
typedef struct Xen9pfsRing {
struct Xen9pfsDev *priv;
@ -37,6 +37,7 @@ typedef struct Xen9pfsRing {
struct iovec *sg;
QEMUBH *bh;
Coroutine *co;
/* local copies, so that we can read/write PDU data directly from
* the ring */
@ -188,7 +189,7 @@ static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
struct iovec **piov,
unsigned int *pniov,
size_t *size)
size_t size)
{
Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
@ -198,19 +199,20 @@ static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
g_free(ring->sg);
ring->sg = g_new0(struct iovec, 2);
xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, *size);
ring->co = qemu_coroutine_self();
/* make sure other threads see ring->co changes before continuing */
smp_wmb();
again:
xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size);
buf_size = iov_size(ring->sg, num);
if (buf_size < P9_IOHDRSZ) {
xen_pv_printf(&xen_9pfs->xendev, 0, "Xen 9pfs reply type %d needs "
"%zu bytes, buffer has %zu, less than minimum\n",
pdu->id + 1, *size, buf_size);
xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
xen_9pfs_disconnect(&xen_9pfs->xendev);
}
if (buf_size < *size) {
*size = buf_size;
if (buf_size < size) {
qemu_coroutine_yield();
goto again;
}
ring->co = NULL;
/* make sure other threads see ring->co changes before continuing */
smp_wmb();
*piov = ring->sg;
*pniov = num;
@ -295,6 +297,20 @@ static int xen_9pfs_receive(Xen9pfsRing *ring)
static void xen_9pfs_bh(void *opaque)
{
Xen9pfsRing *ring = opaque;
bool wait;
again:
wait = ring->co != NULL && qemu_coroutine_entered(ring->co);
/* paired with the smb_wmb barriers in xen_9pfs_init_in_iov_from_pdu */
smp_rmb();
if (wait) {
cpu_relax();
goto again;
}
if (ring->co != NULL) {
qemu_coroutine_enter_if_inactive(ring->co);
}
xen_9pfs_receive(ring);
}