Migration queue

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJWCiwjAAoJEB6aO1+FQIO2bekQAIBudvPAajQcVNj8qQdKPiW6
 HzPqDHCsy2viyJdxPl1itXYso1lU6cQikx7o/gpzLeCmqYU7xWK747kkmGvUt0fa
 P210hFb+0GXQ1k4xOw0QxcDbaVISVqMQQLajxL5KR+Ktx6sg/byAFdT3Z6ACuHem
 jnupERCw6bkOeI236P9ajjZXQ0EHFFIL28wdxfUOJ3i+qHHxy3t/3NmO1Rhl0prL
 hH2ZS2SaPshpCrY2WiABDfzt1uZapgyhebgesCtzbHrMmOsz3m3g7sz/vleAlg/l
 KykPo+qHvc8simnmKXvk656fWr0FD/Qm5EXobQBfRVJA67gmPjUV/LH8zxqz98Xw
 Vi0kYBBvxLTmpzxdVCjlDNaNTpnRt18sw9SaFzh5+ySx/90qsM5y4dcXVkc508vt
 ipTVlQcr+QfGcg/VSVBvkrgiIIx9yEJ7We9sAWIrD6sBcCREpAjiDU0fY0ESNcXa
 mfpp8cPSjtsPNmvkg/pcQdj421RiuvS5STL8t97jNo7yGFO/b2ndUp2pKB3o+tkp
 UPtC+r/qbhnBKeOv86jnSH1futSdIsriC3/6CWpccGiMWKkJ1ATx1xE9QX7KiYcj
 6G16l1TIJMjckdAExSjBVGiAA3v4bJK9vo8WP/idL4YnNVeZB7efdpWHc+jnXVgp
 WjK3Ll/rfvDlbmprQ2EL
 =01Wd
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/amit-migration/tags/for-juan-201509' into staging

Migration queue

# gpg: Signature made Tue 29 Sep 2015 07:13:55 BST using RSA key ID 854083B6
# gpg: Good signature from "Amit Shah <amit@amitshah.net>"
# gpg:                 aka "Amit Shah <amit@kernel.org>"
# gpg:                 aka "Amit Shah <amitshah@gmx.net>"

* remotes/amit-migration/tags/for-juan-201509:
  ram_find_and_save_block: Split out the finding
  Move dirty page search state into separate structure
  migration: Use g_new() & friends where that makes obvious sense
  migration: qemu-file more size_t'ifying
  migration: size_t'ify some of qemu-file
  Init page sizes in qtest
  Split out end of migration code from migration_thread
  migration/ram.c: Use RAMBlock rather than MemoryRegion
  vmstate: Remove redefinition of VMSTATE_UINT32_ARRAY

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2015-09-29 12:41:19 +01:00
commit b2312c6800
12 changed files with 211 additions and 142 deletions

View File

@ -31,15 +31,15 @@
* The pos argument can be ignored if the file is only being used for
* streaming. The handler should try to write all of the data it can.
*/
typedef int (QEMUFilePutBufferFunc)(void *opaque, const uint8_t *buf,
int64_t pos, int size);
typedef ssize_t (QEMUFilePutBufferFunc)(void *opaque, const uint8_t *buf,
int64_t pos, size_t size);
/* Read a chunk of data from a file at the given position. The pos argument
* can be ignored if the file is only be used for streaming. The number of
* bytes actually read should be returned.
*/
typedef int (QEMUFileGetBufferFunc)(void *opaque, uint8_t *buf,
int64_t pos, int size);
typedef ssize_t (QEMUFileGetBufferFunc)(void *opaque, uint8_t *buf,
int64_t pos, size_t size);
/* Close a file
*
@ -126,13 +126,13 @@ int qemu_get_fd(QEMUFile *f);
int qemu_fclose(QEMUFile *f);
int64_t qemu_ftell(QEMUFile *f);
int64_t qemu_ftell_fast(QEMUFile *f);
void qemu_put_buffer(QEMUFile *f, const uint8_t *buf, int size);
void qemu_put_buffer(QEMUFile *f, const uint8_t *buf, size_t size);
void qemu_put_byte(QEMUFile *f, int v);
/*
* put_buffer without copying the buffer.
* The buffer should be available till it is sent asynchronously.
*/
void qemu_put_buffer_async(QEMUFile *f, const uint8_t *buf, int size);
void qemu_put_buffer_async(QEMUFile *f, const uint8_t *buf, size_t size);
bool qemu_file_mode_is_not_valid(const char *mode);
bool qemu_file_is_writable(QEMUFile *f);
@ -161,8 +161,8 @@ static inline void qemu_put_ubyte(QEMUFile *f, unsigned int v)
void qemu_put_be16(QEMUFile *f, unsigned int v);
void qemu_put_be32(QEMUFile *f, unsigned int v);
void qemu_put_be64(QEMUFile *f, uint64_t v);
int qemu_peek_buffer(QEMUFile *f, uint8_t **buf, int size, size_t offset);
int qemu_get_buffer(QEMUFile *f, uint8_t *buf, int size);
size_t qemu_peek_buffer(QEMUFile *f, uint8_t **buf, size_t size, size_t offset);
size_t qemu_get_buffer(QEMUFile *f, uint8_t *buf, size_t size);
ssize_t qemu_put_compression_data(QEMUFile *f, const uint8_t *p, size_t size,
int level);
int qemu_put_qemu_file(QEMUFile *f_des, QEMUFile *f_src);
@ -237,7 +237,7 @@ static inline void qemu_get_8s(QEMUFile *f, uint8_t *pv)
}
// Signed versions for type safety
static inline void qemu_put_sbuffer(QEMUFile *f, const int8_t *buf, int size)
static inline void qemu_put_sbuffer(QEMUFile *f, const int8_t *buf, size_t size)
{
qemu_put_buffer(f, (const uint8_t *)buf, size);
}

View File

@ -754,9 +754,6 @@ extern const VMStateInfo vmstate_info_bitmap;
#define VMSTATE_UINT32_SUB_ARRAY(_f, _s, _start, _num) \
VMSTATE_SUB_ARRAY(_f, _s, _start, _num, 0, vmstate_info_uint32, uint32_t)
#define VMSTATE_UINT32_ARRAY(_f, _s, _n) \
VMSTATE_UINT32_ARRAY_V(_f, _s, _n, 0)
#define VMSTATE_INT64_ARRAY_V(_f, _s, _n, _v) \
VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_int64, int64_t)

View File

@ -86,7 +86,7 @@ MigrationIncomingState *migration_incoming_get_current(void)
MigrationIncomingState *migration_incoming_state_new(QEMUFile* f)
{
mis_current = g_malloc0(sizeof(MigrationIncomingState));
mis_current = g_new0(MigrationIncomingState, 1);
mis_current->file = f;
QLIST_INIT(&mis_current->loadvm_handlers);
@ -913,6 +913,50 @@ int64_t migrate_xbzrle_cache_size(void)
return s->xbzrle_cache_size;
}
/**
* migration_completion: Used by migration_thread when there's not much left.
* The caller 'breaks' the loop when this returns.
*
* @s: Current migration state
* @*old_vm_running: Pointer to old_vm_running flag
* @*start_time: Pointer to time to update
*/
static void migration_completion(MigrationState *s, bool *old_vm_running,
int64_t *start_time)
{
int ret;
qemu_mutex_lock_iothread();
*start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
*old_vm_running = runstate_is_running();
ret = global_state_store();
if (!ret) {
ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
if (ret >= 0) {
qemu_file_set_rate_limit(s->file, INT64_MAX);
qemu_savevm_state_complete(s->file);
}
}
qemu_mutex_unlock_iothread();
if (ret < 0) {
goto fail;
}
if (qemu_file_get_error(s->file)) {
trace_migration_completion_file_err();
goto fail;
}
migrate_set_state(s, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_COMPLETED);
return;
fail:
migrate_set_state(s, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_FAILED);
}
/* migration thread support */
static void *migration_thread(void *opaque)
@ -943,34 +987,9 @@ static void *migration_thread(void *opaque)
if (pending_size && pending_size >= max_size) {
qemu_savevm_state_iterate(s->file);
} else {
int ret;
qemu_mutex_lock_iothread();
start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
old_vm_running = runstate_is_running();
ret = global_state_store();
if (!ret) {
ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
if (ret >= 0) {
qemu_file_set_rate_limit(s->file, INT64_MAX);
qemu_savevm_state_complete(s->file);
}
}
qemu_mutex_unlock_iothread();
if (ret < 0) {
migrate_set_state(s, MIGRATION_STATUS_ACTIVE,
MIGRATION_STATUS_FAILED);
break;
}
if (!qemu_file_get_error(s->file)) {
migrate_set_state(s, MIGRATION_STATUS_ACTIVE,
MIGRATION_STATUS_COMPLETED);
break;
}
trace_migration_thread_low_pending(pending_size);
migration_completion(s, &old_vm_running, &start_time);
break;
}
}

View File

@ -372,7 +372,8 @@ typedef struct QEMUBuffer {
bool qsb_allocated;
} QEMUBuffer;
static int buf_get_buffer(void *opaque, uint8_t *buf, int64_t pos, int size)
static ssize_t buf_get_buffer(void *opaque, uint8_t *buf, int64_t pos,
size_t size)
{
QEMUBuffer *s = opaque;
ssize_t len = qsb_get_length(s->qsb) - pos;
@ -387,8 +388,8 @@ static int buf_get_buffer(void *opaque, uint8_t *buf, int64_t pos, int size)
return qsb_get_buffer(s->qsb, pos, len, buf);
}
static int buf_put_buffer(void *opaque, const uint8_t *buf,
int64_t pos, int size)
static ssize_t buf_put_buffer(void *opaque, const uint8_t *buf,
int64_t pos, size_t size)
{
QEMUBuffer *s = opaque;
@ -439,7 +440,7 @@ QEMUFile *qemu_bufopen(const char *mode, QEMUSizedBuffer *input)
return NULL;
}
s = g_malloc0(sizeof(QEMUBuffer));
s = g_new0(QEMUBuffer, 1);
s->qsb = input;
if (s->qsb == NULL) {

View File

@ -37,11 +37,11 @@ static int stdio_get_fd(void *opaque)
return fileno(s->stdio_file);
}
static int stdio_put_buffer(void *opaque, const uint8_t *buf, int64_t pos,
int size)
static ssize_t stdio_put_buffer(void *opaque, const uint8_t *buf, int64_t pos,
size_t size)
{
QEMUFileStdio *s = opaque;
int res;
size_t res;
res = fwrite(buf, 1, size, s->stdio_file);
@ -51,11 +51,12 @@ static int stdio_put_buffer(void *opaque, const uint8_t *buf, int64_t pos,
return res;
}
static int stdio_get_buffer(void *opaque, uint8_t *buf, int64_t pos, int size)
static ssize_t stdio_get_buffer(void *opaque, uint8_t *buf, int64_t pos,
size_t size)
{
QEMUFileStdio *s = opaque;
FILE *fp = s->stdio_file;
int bytes;
ssize_t bytes;
for (;;) {
clearerr(fp);
@ -143,7 +144,7 @@ QEMUFile *qemu_popen_cmd(const char *command, const char *mode)
return NULL;
}
s = g_malloc0(sizeof(QEMUFileStdio));
s = g_new0(QEMUFileStdio, 1);
s->stdio_file = stdio_file;
@ -175,7 +176,7 @@ QEMUFile *qemu_fopen(const char *filename, const char *mode)
return NULL;
}
s = g_malloc0(sizeof(QEMUFileStdio));
s = g_new0(QEMUFileStdio, 1);
s->stdio_file = fopen(filename, mode);
if (!s->stdio_file) {

View File

@ -54,7 +54,8 @@ static int socket_get_fd(void *opaque)
return s->fd;
}
static int socket_get_buffer(void *opaque, uint8_t *buf, int64_t pos, int size)
static ssize_t socket_get_buffer(void *opaque, uint8_t *buf, int64_t pos,
size_t size)
{
QEMUFileSocket *s = opaque;
ssize_t len;
@ -138,7 +139,8 @@ static ssize_t unix_writev_buffer(void *opaque, struct iovec *iov, int iovcnt,
return total;
}
static int unix_get_buffer(void *opaque, uint8_t *buf, int64_t pos, int size)
static ssize_t unix_get_buffer(void *opaque, uint8_t *buf, int64_t pos,
size_t size)
{
QEMUFileSocket *s = opaque;
ssize_t len;
@ -192,7 +194,7 @@ QEMUFile *qemu_fdopen(int fd, const char *mode)
return NULL;
}
s = g_malloc0(sizeof(QEMUFileSocket));
s = g_new0(QEMUFileSocket, 1);
s->fd = fd;
if (mode[0] == 'r') {
@ -226,7 +228,7 @@ QEMUFile *qemu_fopen_socket(int fd, const char *mode)
return NULL;
}
s = g_malloc0(sizeof(QEMUFileSocket));
s = g_new0(QEMUFileSocket, 1);
s->fd = fd;
if (mode[0] == 'w') {
qemu_set_block(s->fd);

View File

@ -60,7 +60,7 @@ QEMUFile *qemu_fopen_ops(void *opaque, const QEMUFileOps *ops)
{
QEMUFile *f;
f = g_malloc0(sizeof(QEMUFile));
f = g_new0(QEMUFile, 1);
f->opaque = opaque;
f->ops = ops;
@ -270,7 +270,7 @@ int qemu_fclose(QEMUFile *f)
return ret;
}
static void add_to_iovec(QEMUFile *f, const uint8_t *buf, int size)
static void add_to_iovec(QEMUFile *f, const uint8_t *buf, size_t size)
{
/* check for adjacent buffer and coalesce them */
if (f->iovcnt > 0 && buf == f->iov[f->iovcnt - 1].iov_base +
@ -286,7 +286,7 @@ static void add_to_iovec(QEMUFile *f, const uint8_t *buf, int size)
}
}
void qemu_put_buffer_async(QEMUFile *f, const uint8_t *buf, int size)
void qemu_put_buffer_async(QEMUFile *f, const uint8_t *buf, size_t size)
{
if (!f->ops->writev_buffer) {
qemu_put_buffer(f, buf, size);
@ -301,9 +301,9 @@ void qemu_put_buffer_async(QEMUFile *f, const uint8_t *buf, int size)
add_to_iovec(f, buf, size);
}
void qemu_put_buffer(QEMUFile *f, const uint8_t *buf, int size)
void qemu_put_buffer(QEMUFile *f, const uint8_t *buf, size_t size)
{
int l;
size_t l;
if (f->last_error) {
return;
@ -363,10 +363,10 @@ void qemu_file_skip(QEMUFile *f, int size)
* return as many as it managed to read (assuming blocking fd's which
* all current QEMUFile are)
*/
int qemu_peek_buffer(QEMUFile *f, uint8_t **buf, int size, size_t offset)
size_t qemu_peek_buffer(QEMUFile *f, uint8_t **buf, size_t size, size_t offset)
{
int pending;
int index;
ssize_t pending;
size_t index;
assert(!qemu_file_is_writable(f));
assert(offset < IO_BUF_SIZE);
@ -411,13 +411,13 @@ int qemu_peek_buffer(QEMUFile *f, uint8_t **buf, int size, size_t offset)
* return as many as it managed to read (assuming blocking fd's which
* all current QEMUFile are)
*/
int qemu_get_buffer(QEMUFile *f, uint8_t *buf, int size)
size_t qemu_get_buffer(QEMUFile *f, uint8_t *buf, size_t size)
{
int pending = size;
int done = 0;
size_t pending = size;
size_t done = 0;
while (pending > 0) {
int res;
size_t res;
uint8_t *src;
res = qemu_peek_buffer(f, &src, MIN(pending, IO_BUF_SIZE), 0);

View File

@ -227,6 +227,17 @@ static uint64_t migration_dirty_pages;
static uint32_t last_version;
static bool ram_bulk_stage;
/* used by the search for pages to send */
struct PageSearchStatus {
/* Current block being searched */
RAMBlock *block;
/* Current offset to search from */
ram_addr_t offset;
/* Set once we wrap around */
bool complete_round;
};
typedef struct PageSearchStatus PageSearchStatus;
struct CompressParam {
bool start;
bool done;
@ -497,13 +508,13 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
/* Called with rcu_read_lock() to protect migration_bitmap */
static inline
ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
ram_addr_t migration_bitmap_find_and_reset_dirty(RAMBlock *rb,
ram_addr_t start)
{
unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS;
unsigned long base = rb->offset >> TARGET_PAGE_BITS;
unsigned long nr = base + (start >> TARGET_PAGE_BITS);
uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
uint64_t rb_size = rb->used_length;
unsigned long size = base + (rb_size >> TARGET_PAGE_BITS);
unsigned long *bitmap;
unsigned long next;
@ -531,7 +542,6 @@ static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length);
}
/* Fix me: there are too many global variables used in migration process. */
static int64_t start_time;
static int64_t bytes_xfer_prev;
@ -573,7 +583,7 @@ static void migration_bitmap_sync(void)
qemu_mutex_lock(&migration_bitmap_mutex);
rcu_read_lock();
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
migration_bitmap_sync_range(block->mr->ram_addr, block->used_length);
migration_bitmap_sync_range(block->offset, block->used_length);
}
rcu_read_unlock();
qemu_mutex_unlock(&migration_bitmap_mutex);
@ -668,12 +678,11 @@ static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
int pages = -1;
uint64_t bytes_xmit;
ram_addr_t current_addr;
MemoryRegion *mr = block->mr;
uint8_t *p;
int ret;
bool send_async = true;
p = memory_region_get_ram_ptr(mr) + offset;
p = block->host + offset;
/* In doubt sent page as normal */
bytes_xmit = 0;
@ -744,7 +753,7 @@ static int do_compress_ram_page(CompressParam *param)
RAMBlock *block = param->block;
ram_addr_t offset = param->offset;
p = memory_region_get_ram_ptr(block->mr) + (offset & TARGET_PAGE_MASK);
p = block->host + (offset & TARGET_PAGE_MASK);
bytes_sent = save_page_header(param->file, block, offset |
RAM_SAVE_FLAG_COMPRESS_PAGE);
@ -852,11 +861,10 @@ static int ram_save_compressed_page(QEMUFile *f, RAMBlock *block,
{
int pages = -1;
uint64_t bytes_xmit;
MemoryRegion *mr = block->mr;
uint8_t *p;
int ret;
p = memory_region_get_ram_ptr(mr) + offset;
p = block->host + offset;
bytes_xmit = 0;
ret = ram_control_save_page(f, block->offset,
@ -909,6 +917,59 @@ static int ram_save_compressed_page(QEMUFile *f, RAMBlock *block,
return pages;
}
/*
* Find the next dirty page and update any state associated with
* the search process.
*
* Returns: True if a page is found
*
* @f: Current migration stream.
* @pss: Data about the state of the current dirty page scan.
* @*again: Set to false if the search has scanned the whole of RAM
*/
static bool find_dirty_block(QEMUFile *f, PageSearchStatus *pss,
bool *again)
{
pss->offset = migration_bitmap_find_and_reset_dirty(pss->block,
pss->offset);
if (pss->complete_round && pss->block == last_seen_block &&
pss->offset >= last_offset) {
/*
* We've been once around the RAM and haven't found anything.
* Give up.
*/
*again = false;
return false;
}
if (pss->offset >= pss->block->used_length) {
/* Didn't find anything in this RAM Block */
pss->offset = 0;
pss->block = QLIST_NEXT_RCU(pss->block, next);
if (!pss->block) {
/* Hit the end of the list */
pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
/* Flag that we've looped */
pss->complete_round = true;
ram_bulk_stage = false;
if (migrate_use_xbzrle()) {
/* If xbzrle is on, stop using the data compression at this
* point. In theory, xbzrle can do better than compression.
*/
flush_compressed_data(f);
compression_switch = false;
}
}
/* Didn't find anything this time, but try again on the new block */
*again = true;
return false;
} else {
/* Can go around again, but... */
*again = true;
/* We've found something so probably don't need to */
return true;
}
}
/**
* ram_find_and_save_block: Finds a dirty page and sends it to f
*
@ -925,56 +986,40 @@ static int ram_save_compressed_page(QEMUFile *f, RAMBlock *block,
static int ram_find_and_save_block(QEMUFile *f, bool last_stage,
uint64_t *bytes_transferred)
{
RAMBlock *block = last_seen_block;
ram_addr_t offset = last_offset;
bool complete_round = false;
PageSearchStatus pss;
int pages = 0;
MemoryRegion *mr;
bool again, found;
if (!block)
block = QLIST_FIRST_RCU(&ram_list.blocks);
pss.block = last_seen_block;
pss.offset = last_offset;
pss.complete_round = false;
while (true) {
mr = block->mr;
offset = migration_bitmap_find_and_reset_dirty(mr, offset);
if (complete_round && block == last_seen_block &&
offset >= last_offset) {
break;
}
if (offset >= block->used_length) {
offset = 0;
block = QLIST_NEXT_RCU(block, next);
if (!block) {
block = QLIST_FIRST_RCU(&ram_list.blocks);
complete_round = true;
ram_bulk_stage = false;
if (migrate_use_xbzrle()) {
/* If xbzrle is on, stop using the data compression at this
* point. In theory, xbzrle can do better than compression.
*/
flush_compressed_data(f);
compression_switch = false;
}
}
} else {
if (!pss.block) {
pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
}
do {
found = find_dirty_block(f, &pss, &again);
if (found) {
if (compression_switch && migrate_use_compression()) {
pages = ram_save_compressed_page(f, block, offset, last_stage,
pages = ram_save_compressed_page(f, pss.block, pss.offset,
last_stage,
bytes_transferred);
} else {
pages = ram_save_page(f, block, offset, last_stage,
pages = ram_save_page(f, pss.block, pss.offset, last_stage,
bytes_transferred);
}
/* if page is unmodified, continue to the next */
if (pages > 0) {
last_sent_block = block;
break;
last_sent_block = pss.block;
}
}
}
} while (!pages && again);
last_seen_block = block;
last_offset = offset;
last_seen_block = pss.block;
last_offset = pss.offset;
return pages;
}
@ -1344,7 +1389,7 @@ static inline void *host_from_stream_offset(QEMUFile *f,
return NULL;
}
return memory_region_get_ram_ptr(block->mr) + offset;
return block->host + offset;
}
len = qemu_get_byte(f);
@ -1354,7 +1399,7 @@ static inline void *host_from_stream_offset(QEMUFile *f,
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
if (!strncmp(id, block->idstr, sizeof(id)) &&
block->max_length > offset) {
return memory_region_get_ram_ptr(block->mr) + offset;
return block->host + offset;
}
}

View File

@ -541,7 +541,7 @@ static int rdma_add_block(RDMAContext *rdma, const char *block_name,
RDMALocalBlock *block;
RDMALocalBlock *old = local->block;
local->block = g_malloc0(sizeof(RDMALocalBlock) * (local->nb_blocks + 1));
local->block = g_new0(RDMALocalBlock, local->nb_blocks + 1);
if (local->nb_blocks) {
int x;
@ -572,7 +572,7 @@ static int rdma_add_block(RDMAContext *rdma, const char *block_name,
bitmap_clear(block->transit_bitmap, 0, block->nb_chunks);
block->unregister_bitmap = bitmap_new(block->nb_chunks);
bitmap_clear(block->unregister_bitmap, 0, block->nb_chunks);
block->remote_keys = g_malloc0(block->nb_chunks * sizeof(uint32_t));
block->remote_keys = g_new0(uint32_t, block->nb_chunks);
block->is_ram_block = local->init ? false : true;
@ -617,8 +617,8 @@ static int qemu_rdma_init_ram_blocks(RDMAContext *rdma)
memset(local, 0, sizeof *local);
qemu_ram_foreach_block(qemu_rdma_init_one_block, rdma);
trace_qemu_rdma_init_ram_blocks(local->nb_blocks);
rdma->dest_blocks = (RDMADestBlock *) g_malloc0(sizeof(RDMADestBlock) *
rdma->local_ram_blocks.nb_blocks);
rdma->dest_blocks = g_new0(RDMADestBlock,
rdma->local_ram_blocks.nb_blocks);
local->init = true;
return 0;
}
@ -677,8 +677,7 @@ static int rdma_delete_block(RDMAContext *rdma, RDMALocalBlock *block)
if (local->nb_blocks > 1) {
local->block = g_malloc0(sizeof(RDMALocalBlock) *
(local->nb_blocks - 1));
local->block = g_new0(RDMALocalBlock, local->nb_blocks - 1);
if (block->index) {
memcpy(local->block, old, sizeof(RDMALocalBlock) * block->index);
@ -1164,7 +1163,7 @@ static int qemu_rdma_register_and_get_keys(RDMAContext *rdma,
/* allocate memory to store chunk MRs */
if (!block->pmr) {
block->pmr = g_malloc0(block->nb_chunks * sizeof(struct ibv_mr *));
block->pmr = g_new0(struct ibv_mr *, block->nb_chunks);
}
/*
@ -2494,7 +2493,7 @@ static void *qemu_rdma_data_init(const char *host_port, Error **errp)
InetSocketAddress *addr;
if (host_port) {
rdma = g_malloc0(sizeof(RDMAContext));
rdma = g_new0(RDMAContext, 1);
rdma->current_index = -1;
rdma->current_chunk = -1;
@ -2519,8 +2518,8 @@ static void *qemu_rdma_data_init(const char *host_port, Error **errp)
* SEND messages for control only.
* VM's ram is handled with regular RDMA messages.
*/
static int qemu_rdma_put_buffer(void *opaque, const uint8_t *buf,
int64_t pos, int size)
static ssize_t qemu_rdma_put_buffer(void *opaque, const uint8_t *buf,
int64_t pos, size_t size)
{
QEMUFileRDMA *r = opaque;
QEMUFile *f = r->file;
@ -2547,7 +2546,8 @@ static int qemu_rdma_put_buffer(void *opaque, const uint8_t *buf,
r->len = MIN(remaining, RDMA_SEND_INCREMENT);
remaining -= r->len;
head.len = r->len;
/* Guaranteed to fit due to RDMA_SEND_INCREMENT MIN above */
head.len = (uint32_t)r->len;
head.type = RDMA_CONTROL_QEMU_FILE;
ret = qemu_rdma_exchange_send(rdma, &head, data, NULL, NULL, NULL);
@ -2564,7 +2564,7 @@ static int qemu_rdma_put_buffer(void *opaque, const uint8_t *buf,
}
static size_t qemu_rdma_fill(RDMAContext *rdma, uint8_t *buf,
int size, int idx)
size_t size, int idx)
{
size_t len = 0;
@ -2585,8 +2585,8 @@ static size_t qemu_rdma_fill(RDMAContext *rdma, uint8_t *buf,
* RDMA links don't use bytestreams, so we have to
* return bytes to QEMUFile opportunistically.
*/
static int qemu_rdma_get_buffer(void *opaque, uint8_t *buf,
int64_t pos, int size)
static ssize_t qemu_rdma_get_buffer(void *opaque, uint8_t *buf,
int64_t pos, size_t size)
{
QEMUFileRDMA *r = opaque;
RDMAContext *rdma = r->rdma;
@ -3399,7 +3399,7 @@ static void *qemu_fopen_rdma(RDMAContext *rdma, const char *mode)
return NULL;
}
r = g_malloc0(sizeof(QEMUFileRDMA));
r = g_new0(QEMUFileRDMA, 1);
r->rdma = rdma;
if (mode[0] == 'w') {

View File

@ -138,14 +138,15 @@ static ssize_t block_writev_buffer(void *opaque, struct iovec *iov, int iovcnt,
return qiov.size;
}
static int block_put_buffer(void *opaque, const uint8_t *buf,
int64_t pos, int size)
static ssize_t block_put_buffer(void *opaque, const uint8_t *buf,
int64_t pos, size_t size)
{
bdrv_save_vmstate(opaque, buf, pos, size);
return size;
}
static int block_get_buffer(void *opaque, uint8_t *buf, int64_t pos, int size)
static ssize_t block_get_buffer(void *opaque, uint8_t *buf, int64_t pos,
size_t size)
{
return bdrv_load_vmstate(opaque, buf, pos, size);
}
@ -480,7 +481,7 @@ int register_savevm_live(DeviceState *dev,
{
SaveStateEntry *se;
se = g_malloc0(sizeof(SaveStateEntry));
se = g_new0(SaveStateEntry, 1);
se->version_id = version_id;
se->section_id = savevm_state.global_section_id++;
se->ops = ops;
@ -498,7 +499,7 @@ int register_savevm_live(DeviceState *dev,
pstrcat(se->idstr, sizeof(se->idstr), "/");
g_free(id);
se->compat = g_malloc0(sizeof(CompatEntry));
se->compat = g_new0(CompatEntry, 1);
pstrcpy(se->compat->idstr, sizeof(se->compat->idstr), idstr);
se->compat->instance_id = instance_id == -1 ?
calculate_compat_instance_id(idstr) : instance_id;
@ -526,7 +527,7 @@ int register_savevm(DeviceState *dev,
LoadStateHandler *load_state,
void *opaque)
{
SaveVMHandlers *ops = g_malloc0(sizeof(SaveVMHandlers));
SaveVMHandlers *ops = g_new0(SaveVMHandlers, 1);
ops->save_state = save_state;
ops->load_state = load_state;
return register_savevm_live(dev, idstr, instance_id, version_id,
@ -568,7 +569,7 @@ int vmstate_register_with_alias_id(DeviceState *dev, int instance_id,
/* If this triggers, alias support can be dropped for the vmsd. */
assert(alias_id == -1 || required_for_version >= vmsd->minimum_version_id);
se = g_malloc0(sizeof(SaveStateEntry));
se = g_new0(SaveStateEntry, 1);
se->version_id = vmsd->version_id;
se->section_id = savevm_state.global_section_id++;
se->opaque = opaque;
@ -582,7 +583,7 @@ int vmstate_register_with_alias_id(DeviceState *dev, int instance_id,
pstrcat(se->idstr, sizeof(se->idstr), "/");
g_free(id);
se->compat = g_malloc0(sizeof(CompatEntry));
se->compat = g_new0(CompatEntry, 1);
pstrcpy(se->compat->idstr, sizeof(se->compat->idstr), vmsd->name);
se->compat->instance_id = instance_id == -1 ?
calculate_compat_instance_id(vmsd->name) : instance_id;
@ -1544,7 +1545,7 @@ void hmp_info_snapshots(Monitor *mon, const QDict *qdict)
return;
}
available_snapshots = g_malloc0(sizeof(int) * nb_sns);
available_snapshots = g_new0(int, nb_sns);
total = 0;
for (i = 0; i < nb_sns; i++) {
sn = &sn_tab[i];

View File

@ -657,6 +657,7 @@ void qtest_init(const char *qtest_chrdev, const char *qtest_log, Error **errp)
inbuf = g_string_new("");
qtest_chr = chr;
page_size_init();
}
bool qtest_driver(void)

View File

@ -1420,6 +1420,8 @@ migrate_transferred(uint64_t tranferred, uint64_t time_spent, double bandwidth,
migrate_state_too_big(void) ""
migrate_global_state_post_load(const char *state) "loaded state: %s"
migrate_global_state_pre_save(const char *state) "saved state: %s"
migration_completion_file_err(void) ""
migration_thread_low_pending(uint64_t pending) "%" PRIu64
# migration/rdma.c
qemu_rdma_accept_incoming_migration(void) ""
@ -1440,7 +1442,7 @@ qemu_rdma_exchange_get_response_none(const char *desc, int type) "Surprise: got
qemu_rdma_exchange_send_issue_callback(void) ""
qemu_rdma_exchange_send_waiting(const char *desc) "Waiting for response %s"
qemu_rdma_exchange_send_received(const char *desc) "Response %s received."
qemu_rdma_fill(int64_t control_len, int size) "RDMA %" PRId64 " of %d bytes already in buffer"
qemu_rdma_fill(size_t control_len, size_t size) "RDMA %zd of %zd bytes already in buffer"
qemu_rdma_init_ram_blocks(int blocks) "Allocated %d local ram block structures"
qemu_rdma_poll_recv(const char *compstr, int64_t comp, int64_t id, int sent) "completion %s #%" PRId64 " received (%" PRId64 ") left %d"
qemu_rdma_poll_write(const char *compstr, int64_t comp, int left, uint64_t block, uint64_t chunk, void *local, void *remote) "completions %s (%" PRId64 ") left %d, block %" PRIu64 ", chunk: %" PRIu64 " %p %p"