Merge remote-tracking branch 'stefanha/block' into staging
# By Stefan Hajnoczi (4) and others # Via Stefan Hajnoczi * stefanha/block: dataplane: refuse to start if device is already in use dataplane: enable virtio-blk x-data-plane=on live migration migration: fix spice migration migration: notify migration state before starting thread block: Repair the throttling code. gluster: Add image resize support Message-id: 1375112172-24863-1-git-send-email-stefanha@redhat.com Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
This commit is contained in:
commit
eddbf0ab9d
7
block.c
7
block.c
@ -127,7 +127,7 @@ void bdrv_io_limits_disable(BlockDriverState *bs)
|
||||
{
|
||||
bs->io_limits_enabled = false;
|
||||
|
||||
while (qemu_co_queue_next(&bs->throttled_reqs));
|
||||
do {} while (qemu_co_enter_next(&bs->throttled_reqs));
|
||||
|
||||
if (bs->block_timer) {
|
||||
qemu_del_timer(bs->block_timer);
|
||||
@ -143,7 +143,7 @@ static void bdrv_block_timer(void *opaque)
|
||||
{
|
||||
BlockDriverState *bs = opaque;
|
||||
|
||||
qemu_co_queue_next(&bs->throttled_reqs);
|
||||
qemu_co_enter_next(&bs->throttled_reqs);
|
||||
}
|
||||
|
||||
void bdrv_io_limits_enable(BlockDriverState *bs)
|
||||
@ -1452,8 +1452,7 @@ void bdrv_drain_all(void)
|
||||
* a busy wait.
|
||||
*/
|
||||
QTAILQ_FOREACH(bs, &bdrv_states, list) {
|
||||
if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
|
||||
qemu_co_queue_restart_all(&bs->throttled_reqs);
|
||||
while (qemu_co_enter_next(&bs->throttled_reqs)) {
|
||||
busy = true;
|
||||
}
|
||||
}
|
||||
|
@ -493,6 +493,19 @@ out:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset)
|
||||
{
|
||||
int ret;
|
||||
BDRVGlusterState *s = bs->opaque;
|
||||
|
||||
ret = glfs_ftruncate(s->fd, offset);
|
||||
if (ret < 0) {
|
||||
return -errno;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static BlockDriverAIOCB *qemu_gluster_aio_readv(BlockDriverState *bs,
|
||||
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
|
||||
BlockDriverCompletionFunc *cb, void *opaque)
|
||||
@ -631,6 +644,7 @@ static BlockDriver bdrv_gluster = {
|
||||
.bdrv_create = qemu_gluster_create,
|
||||
.bdrv_getlength = qemu_gluster_getlength,
|
||||
.bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
|
||||
.bdrv_truncate = qemu_gluster_truncate,
|
||||
.bdrv_aio_readv = qemu_gluster_aio_readv,
|
||||
.bdrv_aio_writev = qemu_gluster_aio_writev,
|
||||
.bdrv_aio_flush = qemu_gluster_aio_flush,
|
||||
@ -650,6 +664,7 @@ static BlockDriver bdrv_gluster_tcp = {
|
||||
.bdrv_create = qemu_gluster_create,
|
||||
.bdrv_getlength = qemu_gluster_getlength,
|
||||
.bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
|
||||
.bdrv_truncate = qemu_gluster_truncate,
|
||||
.bdrv_aio_readv = qemu_gluster_aio_readv,
|
||||
.bdrv_aio_writev = qemu_gluster_aio_writev,
|
||||
.bdrv_aio_flush = qemu_gluster_aio_flush,
|
||||
@ -669,6 +684,7 @@ static BlockDriver bdrv_gluster_unix = {
|
||||
.bdrv_create = qemu_gluster_create,
|
||||
.bdrv_getlength = qemu_gluster_getlength,
|
||||
.bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
|
||||
.bdrv_truncate = qemu_gluster_truncate,
|
||||
.bdrv_aio_readv = qemu_gluster_aio_readv,
|
||||
.bdrv_aio_writev = qemu_gluster_aio_writev,
|
||||
.bdrv_aio_flush = qemu_gluster_aio_flush,
|
||||
@ -688,6 +704,7 @@ static BlockDriver bdrv_gluster_rdma = {
|
||||
.bdrv_create = qemu_gluster_create,
|
||||
.bdrv_getlength = qemu_gluster_getlength,
|
||||
.bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
|
||||
.bdrv_truncate = qemu_gluster_truncate,
|
||||
.bdrv_aio_readv = qemu_gluster_aio_readv,
|
||||
.bdrv_aio_writev = qemu_gluster_aio_writev,
|
||||
.bdrv_aio_flush = qemu_gluster_aio_flush,
|
||||
|
@ -18,7 +18,6 @@
|
||||
#include "qemu/error-report.h"
|
||||
#include "hw/virtio/dataplane/vring.h"
|
||||
#include "ioq.h"
|
||||
#include "migration/migration.h"
|
||||
#include "block/block.h"
|
||||
#include "hw/virtio/virtio-blk.h"
|
||||
#include "virtio-blk.h"
|
||||
@ -69,8 +68,6 @@ struct VirtIOBlockDataPlane {
|
||||
queue */
|
||||
|
||||
unsigned int num_reqs;
|
||||
|
||||
Error *migration_blocker;
|
||||
};
|
||||
|
||||
/* Raise an interrupt to signal guest, if necessary */
|
||||
@ -418,6 +415,14 @@ bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *blk,
|
||||
return false;
|
||||
}
|
||||
|
||||
/* If dataplane is (re-)enabled while the guest is running there could be
|
||||
* block jobs that can conflict.
|
||||
*/
|
||||
if (bdrv_in_use(blk->conf.bs)) {
|
||||
error_report("cannot start dataplane thread while device is in use");
|
||||
return false;
|
||||
}
|
||||
|
||||
fd = raw_get_aio_fd(blk->conf.bs);
|
||||
if (fd < 0) {
|
||||
error_report("drive is incompatible with x-data-plane, "
|
||||
@ -433,10 +438,6 @@ bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *blk,
|
||||
/* Prevent block operations that conflict with data plane thread */
|
||||
bdrv_set_in_use(blk->conf.bs, 1);
|
||||
|
||||
error_setg(&s->migration_blocker,
|
||||
"x-data-plane does not support migration");
|
||||
migrate_add_blocker(s->migration_blocker);
|
||||
|
||||
*dataplane = s;
|
||||
return true;
|
||||
}
|
||||
@ -448,8 +449,6 @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
|
||||
}
|
||||
|
||||
virtio_blk_data_plane_stop(s);
|
||||
migrate_del_blocker(s->migration_blocker);
|
||||
error_free(s->migration_blocker);
|
||||
bdrv_set_in_use(s->blk->conf.bs, 0);
|
||||
g_free(s);
|
||||
}
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include "hw/virtio/virtio-blk.h"
|
||||
#ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
|
||||
# include "dataplane/virtio-blk.h"
|
||||
# include "migration/migration.h"
|
||||
#endif
|
||||
#include "block/scsi.h"
|
||||
#ifdef __linux__
|
||||
@ -628,6 +629,34 @@ void virtio_blk_set_conf(DeviceState *dev, VirtIOBlkConf *blk)
|
||||
memcpy(&(s->blk), blk, sizeof(struct VirtIOBlkConf));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
|
||||
/* Disable dataplane thread during live migration since it does not
|
||||
* update the dirty memory bitmap yet.
|
||||
*/
|
||||
static void virtio_blk_migration_state_changed(Notifier *notifier, void *data)
|
||||
{
|
||||
VirtIOBlock *s = container_of(notifier, VirtIOBlock,
|
||||
migration_state_notifier);
|
||||
MigrationState *mig = data;
|
||||
|
||||
if (migration_in_setup(mig)) {
|
||||
if (!s->dataplane) {
|
||||
return;
|
||||
}
|
||||
virtio_blk_data_plane_destroy(s->dataplane);
|
||||
s->dataplane = NULL;
|
||||
} else if (migration_has_finished(mig) ||
|
||||
migration_has_failed(mig)) {
|
||||
if (s->dataplane) {
|
||||
return;
|
||||
}
|
||||
bdrv_drain_all(); /* complete in-flight non-dataplane requests */
|
||||
virtio_blk_data_plane_create(VIRTIO_DEVICE(s), &s->blk,
|
||||
&s->dataplane);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_VIRTIO_BLK_DATA_PLANE */
|
||||
|
||||
static int virtio_blk_device_init(VirtIODevice *vdev)
|
||||
{
|
||||
DeviceState *qdev = DEVICE(vdev);
|
||||
@ -664,6 +693,8 @@ static int virtio_blk_device_init(VirtIODevice *vdev)
|
||||
virtio_cleanup(vdev);
|
||||
return -1;
|
||||
}
|
||||
s->migration_state_notifier.notify = virtio_blk_migration_state_changed;
|
||||
add_migration_state_change_notifier(&s->migration_state_notifier);
|
||||
#endif
|
||||
|
||||
s->change = qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s);
|
||||
@ -683,6 +714,7 @@ static int virtio_blk_device_exit(DeviceState *dev)
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
||||
VirtIOBlock *s = VIRTIO_BLK(dev);
|
||||
#ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
|
||||
remove_migration_state_change_notifier(&s->migration_state_notifier);
|
||||
virtio_blk_data_plane_destroy(s->dataplane);
|
||||
s->dataplane = NULL;
|
||||
#endif
|
||||
|
@ -130,12 +130,17 @@ void coroutine_fn qemu_co_queue_wait_insert_head(CoQueue *queue);
|
||||
*
|
||||
* Returns true if a coroutine was restarted, false if the queue is empty.
|
||||
*/
|
||||
bool qemu_co_queue_next(CoQueue *queue);
|
||||
bool coroutine_fn qemu_co_queue_next(CoQueue *queue);
|
||||
|
||||
/**
|
||||
* Restarts all coroutines in the CoQueue and leaves the queue empty.
|
||||
*/
|
||||
void qemu_co_queue_restart_all(CoQueue *queue);
|
||||
void coroutine_fn qemu_co_queue_restart_all(CoQueue *queue);
|
||||
|
||||
/**
|
||||
* Enter the next coroutine in the queue
|
||||
*/
|
||||
bool qemu_co_enter_next(CoQueue *queue);
|
||||
|
||||
/**
|
||||
* Checks if the CoQueue is empty.
|
||||
|
@ -125,6 +125,7 @@ typedef struct VirtIOBlock {
|
||||
unsigned short sector_mask;
|
||||
VMChangeStateEntry *change;
|
||||
#ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
|
||||
Notifier migration_state_notifier;
|
||||
struct VirtIOBlockDataPlane *dataplane;
|
||||
#endif
|
||||
} VirtIOBlock;
|
||||
|
@ -90,7 +90,7 @@ int migrate_fd_close(MigrationState *s);
|
||||
|
||||
void add_migration_state_change_notifier(Notifier *notify);
|
||||
void remove_migration_state_change_notifier(Notifier *notify);
|
||||
bool migration_is_active(MigrationState *);
|
||||
bool migration_in_setup(MigrationState *);
|
||||
bool migration_has_finished(MigrationState *);
|
||||
bool migration_has_failed(MigrationState *);
|
||||
MigrationState *migrate_get_current(void);
|
||||
|
@ -338,9 +338,9 @@ void remove_migration_state_change_notifier(Notifier *notify)
|
||||
notifier_remove(notify);
|
||||
}
|
||||
|
||||
bool migration_is_active(MigrationState *s)
|
||||
bool migration_in_setup(MigrationState *s)
|
||||
{
|
||||
return s->state == MIG_STATE_ACTIVE;
|
||||
return s->state == MIG_STATE_SETUP;
|
||||
}
|
||||
|
||||
bool migration_has_finished(MigrationState *s)
|
||||
@ -658,7 +658,9 @@ void migrate_fd_connect(MigrationState *s)
|
||||
qemu_file_set_rate_limit(s->file,
|
||||
s->bandwidth_limit / XFER_LIMIT_RATIO);
|
||||
|
||||
/* Notify before starting migration thread */
|
||||
notifier_list_notify(&migration_state_notifiers, s);
|
||||
|
||||
qemu_thread_create(&s->thread, migration_thread, s,
|
||||
QEMU_THREAD_JOINABLE);
|
||||
notifier_list_notify(&migration_state_notifiers, s);
|
||||
}
|
||||
|
@ -88,16 +88,32 @@ static bool qemu_co_queue_do_restart(CoQueue *queue, bool single)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool qemu_co_queue_next(CoQueue *queue)
|
||||
bool coroutine_fn qemu_co_queue_next(CoQueue *queue)
|
||||
{
|
||||
assert(qemu_in_coroutine());
|
||||
return qemu_co_queue_do_restart(queue, true);
|
||||
}
|
||||
|
||||
void qemu_co_queue_restart_all(CoQueue *queue)
|
||||
void coroutine_fn qemu_co_queue_restart_all(CoQueue *queue)
|
||||
{
|
||||
assert(qemu_in_coroutine());
|
||||
qemu_co_queue_do_restart(queue, false);
|
||||
}
|
||||
|
||||
bool qemu_co_enter_next(CoQueue *queue)
|
||||
{
|
||||
Coroutine *next;
|
||||
|
||||
next = QTAILQ_FIRST(&queue->entries);
|
||||
if (!next) {
|
||||
return false;
|
||||
}
|
||||
|
||||
QTAILQ_REMOVE(&queue->entries, next, co_queue_next);
|
||||
qemu_coroutine_enter(next, NULL);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool qemu_co_queue_empty(CoQueue *queue)
|
||||
{
|
||||
return (QTAILQ_FIRST(&queue->entries) == NULL);
|
||||
|
@ -563,7 +563,7 @@ static void migration_state_notifier(Notifier *notifier, void *data)
|
||||
{
|
||||
MigrationState *s = data;
|
||||
|
||||
if (migration_is_active(s)) {
|
||||
if (migration_in_setup(s)) {
|
||||
spice_server_migrate_start(spice_server);
|
||||
} else if (migration_has_finished(s)) {
|
||||
spice_server_migrate_end(spice_server, true);
|
||||
|
Loading…
Reference in New Issue
Block a user