migration/next for 20140505

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABCAAGBQJTZ/QsAAoJEPSH7xhYctcjJ4wP/31lNMpPLxdRpxpUoTPK39L0
 zEoBTUH5dzG2FG34p8/znNH0Lbmc3GALqP9UzyM0sL3QU0EDwqAP0idLJ/8zHrYN
 ZNJnNH6wexlpEypAsyQiayAFYAq6bc3hYtnEurN56dQgPVgb6jCicuOAt26JafLg
 xuGIuencgaEFZaIb8iobxloqDX9raoq0+JfnoV8r02+ES+PG+HHfZsAlbDiWpj/Z
 o06WBq9Xvf8X/zvuGHb9PCLGK36+kxPJ2G2531TJGZ6BjjewDExF2xlQH+PQ8pQB
 d2OKCP0In66JfyLu6JBz21APrks7DJxsuenvoqPhxhtlIcBSm+grZElNLbccDMLQ
 3fteWOCcri5WSwKNAkZ138D5SzIArZFBlqD9qW0GQsiaj3tCxfy162JAmzRPa94i
 R5OJCTDclwjCH6JvRsOs8NrQyYcXHcEl2rfeymfr68YOH5XBMeRPhcR91tVr03ow
 ZVgvwC/TJHntKnW+qtGIOa9Wfq4KghjddD37ayPAqKM/GQ0TV2sAPEr2htDaBZ0l
 5O/YGsAZPKax72OnMR2ObqoIMokympeanQLxG3Q8VKQSU2c+aU3rGVQrG1juiVYw
 NiC/nzarFzFWU8UYToXIu2QBDTde2vuRLX/myzL7L4OLavOcv8EXZBp60DP/bktE
 Pd+jdeLMh9kWu1tFrE8F
 =VoYL
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/juanquintela/tags/migration/20140505' into staging

migration/next for 20140505

# gpg: Signature made Mon 05 May 2014 21:27:24 BST using RSA key ID 5872D723
# gpg: Can't check signature: public key not found

* remotes/juanquintela/tags/migration/20140505: (36 commits)
  migration: expose xbzrle cache miss rate
  migration: expose the bitmap_sync_count to the end
  migration: Add counts of updating the dirty bitmap
  XBZRLE: Fix one XBZRLE corruption issues
  migration: remove duplicate code
  Coverity: Fix failure path for qemu_accept in migration
  Init the XBZRLE.lock in ram_mig_init
  Provide init function for ram migration
  Count used RAMBlock pages for migration_dirty_pages
  Make qemu_peek_buffer loop until it gets it's data
  Disallow outward migration while awaiting incoming migration
  virtio: validate config_len on load
  virtio-net: out-of-bounds buffer write on load
  openpic: avoid buffer overrun on incoming migration
  ssi-sd: fix buffer overrun on invalid state load
  savevm: Ignore minimum_version_id_old if there is no load_state_old
  usb: sanity check setup_index+setup_len in post_load
  vmstate: s/VMSTATE_INT32_LE/VMSTATE_INT32_POSITIVE_LE/
  virtio-scsi: fix buffer overrun on invalid state load
  zaurus: fix buffer overrun on invalid state load
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2014-05-07 14:51:21 +01:00
commit c9541f67df
32 changed files with 484 additions and 185 deletions

View File

@ -45,6 +45,7 @@
#include "hw/audio/pcspk.h"
#include "migration/page_cache.h"
#include "qemu/config-file.h"
#include "qemu/error-report.h"
#include "qmp-commands.h"
#include "trace.h"
#include "exec/cpu-all.h"
@ -110,6 +111,8 @@ static bool mig_throttle_on;
static int dirty_rate_high_cnt;
static void check_guest_throttling(void);
static uint64_t bitmap_sync_count;
/***********************************************************/
/* ram save/restore */
@ -167,11 +170,8 @@ static struct {
/* Cache for XBZRLE, Protected by lock. */
PageCache *cache;
QemuMutex lock;
} XBZRLE = {
.encoded_buf = NULL,
.current_buf = NULL,
.cache = NULL,
};
} XBZRLE;
/* buffer used for XBZRLE decoding */
static uint8_t *xbzrle_decoded_buf;
@ -187,41 +187,44 @@ static void XBZRLE_cache_unlock(void)
qemu_mutex_unlock(&XBZRLE.lock);
}
/*
* called from qmp_migrate_set_cache_size in main thread, possibly while
* a migration is in progress.
* A running migration maybe using the cache and might finish during this
* call, hence changes to the cache are protected by XBZRLE.lock().
*/
int64_t xbzrle_cache_resize(int64_t new_size)
{
PageCache *new_cache, *cache_to_free;
PageCache *new_cache;
int64_t ret;
if (new_size < TARGET_PAGE_SIZE) {
return -1;
}
/* no need to lock, the current thread holds qemu big lock */
XBZRLE_cache_lock();
if (XBZRLE.cache != NULL) {
/* check XBZRLE.cache again later */
if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
return pow2floor(new_size);
goto out_new_size;
}
new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
TARGET_PAGE_SIZE);
if (!new_cache) {
DPRINTF("Error creating cache\n");
return -1;
error_report("Error creating cache");
ret = -1;
goto out;
}
XBZRLE_cache_lock();
/* the XBZRLE.cache may have be destroyed, check it again */
if (XBZRLE.cache != NULL) {
cache_to_free = XBZRLE.cache;
XBZRLE.cache = new_cache;
} else {
cache_to_free = new_cache;
}
XBZRLE_cache_unlock();
cache_fini(cache_to_free);
cache_fini(XBZRLE.cache);
XBZRLE.cache = new_cache;
}
return pow2floor(new_size);
out_new_size:
ret = pow2floor(new_size);
out:
XBZRLE_cache_unlock();
return ret;
}
/* accounting for migration statistics */
@ -233,6 +236,7 @@ typedef struct AccountingInfo {
uint64_t xbzrle_bytes;
uint64_t xbzrle_pages;
uint64_t xbzrle_cache_miss;
double xbzrle_cache_miss_rate;
uint64_t xbzrle_overflows;
} AccountingInfo;
@ -288,6 +292,11 @@ uint64_t xbzrle_mig_pages_cache_miss(void)
return acct_info.xbzrle_cache_miss;
}
double xbzrle_mig_cache_miss_rate(void)
{
return acct_info.xbzrle_cache_miss_rate;
}
uint64_t xbzrle_mig_pages_overflow(void)
{
return acct_info.xbzrle_overflows;
@ -340,7 +349,7 @@ static void xbzrle_cache_zero_page(ram_addr_t current_addr)
#define ENCODING_FLAG_XBZRLE 0x1
static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
ram_addr_t current_addr, RAMBlock *block,
ram_addr_t offset, int cont, bool last_stage)
{
@ -348,19 +357,23 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
uint8_t *prev_cached_page;
if (!cache_is_cached(XBZRLE.cache, current_addr)) {
acct_info.xbzrle_cache_miss++;
if (!last_stage) {
if (cache_insert(XBZRLE.cache, current_addr, current_data) == -1) {
if (cache_insert(XBZRLE.cache, current_addr, *current_data) == -1) {
return -1;
} else {
/* update *current_data when the page has been
inserted into cache */
*current_data = get_cached_data(XBZRLE.cache, current_addr);
}
}
acct_info.xbzrle_cache_miss++;
return -1;
}
prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
/* save current buffer into memory */
memcpy(XBZRLE.current_buf, current_data, TARGET_PAGE_SIZE);
memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
/* XBZRLE encoding (if there is no overflow) */
encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
@ -373,7 +386,10 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
DPRINTF("Overflow\n");
acct_info.xbzrle_overflows++;
/* update data in the cache */
memcpy(prev_cached_page, current_data, TARGET_PAGE_SIZE);
if (!last_stage) {
memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
*current_data = prev_cached_page;
}
return -1;
}
@ -479,6 +495,10 @@ static void migration_bitmap_sync(void)
static int64_t num_dirty_pages_period;
int64_t end_time;
int64_t bytes_xfer_now;
static uint64_t xbzrle_cache_miss_prev;
static uint64_t iterations_prev;
bitmap_sync_count++;
if (!bytes_xfer_prev) {
bytes_xfer_prev = ram_bytes_transferred();
@ -520,11 +540,22 @@ static void migration_bitmap_sync(void)
} else {
mig_throttle_on = false;
}
if (migrate_use_xbzrle()) {
if (iterations_prev != 0) {
acct_info.xbzrle_cache_miss_rate =
(double)(acct_info.xbzrle_cache_miss -
xbzrle_cache_miss_prev) /
(acct_info.iterations - iterations_prev);
}
iterations_prev = acct_info.iterations;
xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss;
}
s->dirty_pages_rate = num_dirty_pages_period * 1000
/ (end_time - start_time);
s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
start_time = end_time;
num_dirty_pages_period = 0;
s->dirty_sync_count = bitmap_sync_count;
}
}
@ -598,15 +629,9 @@ static int ram_save_block(QEMUFile *f, bool last_stage)
*/
xbzrle_cache_zero_page(current_addr);
} else if (!ram_bulk_stage && migrate_use_xbzrle()) {
bytes_sent = save_xbzrle_page(f, p, current_addr, block,
bytes_sent = save_xbzrle_page(f, &p, current_addr, block,
offset, cont, last_stage);
if (!last_stage) {
/* We must send exactly what's in the xbzrle cache
* even if the page wasn't xbzrle compressed, so that
* it's right next time.
*/
p = get_cached_data(XBZRLE.cache, current_addr);
/* Can't send this cached data async, since the cache page
* might get updated before it gets to the wire
*/
@ -726,37 +751,34 @@ static void reset_ram_globals(void)
static int ram_save_setup(QEMUFile *f, void *opaque)
{
RAMBlock *block;
int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
migration_bitmap = bitmap_new(ram_pages);
bitmap_set(migration_bitmap, 0, ram_pages);
migration_dirty_pages = ram_pages;
mig_throttle_on = false;
dirty_rate_high_cnt = 0;
bitmap_sync_count = 0;
if (migrate_use_xbzrle()) {
qemu_mutex_lock_iothread();
XBZRLE_cache_lock();
XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
TARGET_PAGE_SIZE,
TARGET_PAGE_SIZE);
if (!XBZRLE.cache) {
qemu_mutex_unlock_iothread();
DPRINTF("Error creating cache\n");
XBZRLE_cache_unlock();
error_report("Error creating cache");
return -1;
}
qemu_mutex_init(&XBZRLE.lock);
qemu_mutex_unlock_iothread();
XBZRLE_cache_unlock();
/* We prefer not to abort if there is no memory */
XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
if (!XBZRLE.encoded_buf) {
DPRINTF("Error allocating encoded_buf\n");
error_report("Error allocating encoded_buf");
return -1;
}
XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
if (!XBZRLE.current_buf) {
DPRINTF("Error allocating current_buf\n");
error_report("Error allocating current_buf");
g_free(XBZRLE.encoded_buf);
XBZRLE.encoded_buf = NULL;
return -1;
@ -770,6 +792,22 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
bytes_transferred = 0;
reset_ram_globals();
ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
migration_bitmap = bitmap_new(ram_bitmap_pages);
bitmap_set(migration_bitmap, 0, ram_bitmap_pages);
/*
* Count the total number of pages used by ram blocks not including any
* gaps due to alignment or unplugs.
*/
migration_dirty_pages = 0;
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
uint64_t block_pages;
block_pages = block->length >> TARGET_PAGE_BITS;
migration_dirty_pages += block_pages;
}
memory_global_dirty_log_start();
migration_bitmap_sync();
qemu_mutex_unlock_iothread();
@ -997,7 +1035,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
seq_iter++;
if (version_id < 4 || version_id > 4) {
if (version_id != 4) {
return -EINVAL;
}
@ -1008,44 +1046,42 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
addr &= TARGET_PAGE_MASK;
if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
if (version_id == 4) {
/* Synchronize RAM block list */
char id[256];
ram_addr_t length;
ram_addr_t total_ram_bytes = addr;
/* Synchronize RAM block list */
char id[256];
ram_addr_t length;
ram_addr_t total_ram_bytes = addr;
while (total_ram_bytes) {
RAMBlock *block;
uint8_t len;
while (total_ram_bytes) {
RAMBlock *block;
uint8_t len;
len = qemu_get_byte(f);
qemu_get_buffer(f, (uint8_t *)id, len);
id[len] = 0;
length = qemu_get_be64(f);
len = qemu_get_byte(f);
qemu_get_buffer(f, (uint8_t *)id, len);
id[len] = 0;
length = qemu_get_be64(f);
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (!strncmp(id, block->idstr, sizeof(id))) {
if (block->length != length) {
fprintf(stderr,
"Length mismatch: %s: " RAM_ADDR_FMT
" in != " RAM_ADDR_FMT "\n", id, length,
block->length);
ret = -EINVAL;
goto done;
}
break;
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (!strncmp(id, block->idstr, sizeof(id))) {
if (block->length != length) {
fprintf(stderr,
"Length mismatch: %s: " RAM_ADDR_FMT
" in != " RAM_ADDR_FMT "\n", id, length,
block->length);
ret = -EINVAL;
goto done;
}
break;
}
if (!block) {
fprintf(stderr, "Unknown ramblock \"%s\", cannot "
"accept migration\n", id);
ret = -EINVAL;
goto done;
}
total_ram_bytes -= length;
}
if (!block) {
fprintf(stderr, "Unknown ramblock \"%s\", cannot "
"accept migration\n", id);
ret = -EINVAL;
goto done;
}
total_ram_bytes -= length;
}
}
@ -1095,7 +1131,7 @@ done:
return ret;
}
SaveVMHandlers savevm_ram_handlers = {
static SaveVMHandlers savevm_ram_handlers = {
.save_live_setup = ram_save_setup,
.save_live_iterate = ram_save_iterate,
.save_live_complete = ram_save_complete,
@ -1104,6 +1140,12 @@ SaveVMHandlers savevm_ram_handlers = {
.cancel = ram_migration_cancel,
};
void ram_mig_init(void)
{
qemu_mutex_init(&XBZRLE.lock);
register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL);
}
struct soundhw {
const char *name;
const char *descr;

View File

@ -139,7 +139,6 @@ static const VMStateDescription vmstate_kbd = {
.name = "pckbd",
.version_id = 3,
.minimum_version_id = 3,
.minimum_version_id_old = 3,
.fields = (VMStateField []) {
VMSTATE_UINT8(write_cmd, KBDState),
VMSTATE_UINT8(status, KBDState),
@ -168,12 +167,13 @@ You can see that there are several version fields:
- minimum_version_id: the minimum version_id that VMState is able to understand
for that device.
- minimum_version_id_old: For devices that were not able to port to vmstate, we can
assign a function that knows how to read this old state.
assign a function that knows how to read this old state. This field is
ignored if there is no load_state_old handler.
So, VMState is able to read versions from minimum_version_id to
version_id. And the function load_state_old() is able to load state
from minimum_version_id_old to minimum_version_id. This function is
deprecated and will be removed when no more users are left.
version_id. And the function load_state_old() (if present) is able to
load state from minimum_version_id_old to minimum_version_id. This
function is deprecated and will be removed when no more users are left.
=== Massaging functions ===
@ -255,7 +255,6 @@ const VMStateDescription vmstate_ide_drive_pio_state = {
.name = "ide_drive/pio_state",
.version_id = 1,
.minimum_version_id = 1,
.minimum_version_id_old = 1,
.pre_save = ide_drive_pio_pre_save,
.post_load = ide_drive_pio_post_load,
.fields = (VMStateField []) {
@ -275,7 +274,6 @@ const VMStateDescription vmstate_ide_drive = {
.name = "ide_drive",
.version_id = 3,
.minimum_version_id = 0,
.minimum_version_id_old = 0,
.post_load = ide_drive_post_load,
.fields = (VMStateField []) {
.... several fields ....

4
hmp.c
View File

@ -188,6 +188,8 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
info->ram->normal);
monitor_printf(mon, "normal bytes: %" PRIu64 " kbytes\n",
info->ram->normal_bytes >> 10);
monitor_printf(mon, "dirty sync count: %" PRIu64 "\n",
info->ram->dirty_sync_count);
if (info->ram->dirty_pages_rate) {
monitor_printf(mon, "dirty pages rate: %" PRIu64 " pages\n",
info->ram->dirty_pages_rate);
@ -212,6 +214,8 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
info->xbzrle_cache->pages);
monitor_printf(mon, "xbzrle cache miss: %" PRIu64 "\n",
info->xbzrle_cache->cache_miss);
monitor_printf(mon, "xbzrle cache miss rate: %0.2f\n",
info->xbzrle_cache->cache_miss_rate);
monitor_printf(mon, "xbzrle overflow : %" PRIu64 "\n",
info->xbzrle_cache->overflow);
}

View File

@ -732,7 +732,7 @@ static void pxa2xx_ssp_save(QEMUFile *f, void *opaque)
static int pxa2xx_ssp_load(QEMUFile *f, void *opaque, int version_id)
{
PXA2xxSSPState *s = (PXA2xxSSPState *) opaque;
int i;
int i, v;
s->enable = qemu_get_be32(f);
@ -746,7 +746,11 @@ static int pxa2xx_ssp_load(QEMUFile *f, void *opaque, int version_id)
qemu_get_8s(f, &s->ssrsa);
qemu_get_8s(f, &s->ssacd);
s->rx_level = qemu_get_byte(f);
v = qemu_get_byte(f);
if (v < 0 || v > ARRAY_SIZE(s->rx_fifo)) {
return -EINVAL;
}
s->rx_level = v;
s->rx_start = 0;
for (i = 0; i < s->rx_level; i ++)
s->rx_fifo[i] = qemu_get_byte(f);

View File

@ -312,18 +312,42 @@ static int ssd0323_load(QEMUFile *f, void *opaque, int version_id)
return -EINVAL;
s->cmd_len = qemu_get_be32(f);
if (s->cmd_len < 0 || s->cmd_len > ARRAY_SIZE(s->cmd_data)) {
return -EINVAL;
}
s->cmd = qemu_get_be32(f);
for (i = 0; i < 8; i++)
s->cmd_data[i] = qemu_get_be32(f);
s->row = qemu_get_be32(f);
if (s->row < 0 || s->row >= 80) {
return -EINVAL;
}
s->row_start = qemu_get_be32(f);
if (s->row_start < 0 || s->row_start >= 80) {
return -EINVAL;
}
s->row_end = qemu_get_be32(f);
if (s->row_end < 0 || s->row_end >= 80) {
return -EINVAL;
}
s->col = qemu_get_be32(f);
if (s->col < 0 || s->col >= 64) {
return -EINVAL;
}
s->col_start = qemu_get_be32(f);
if (s->col_start < 0 || s->col_start >= 64) {
return -EINVAL;
}
s->col_end = qemu_get_be32(f);
if (s->col_end < 0 || s->col_end >= 64) {
return -EINVAL;
}
s->redraw = qemu_get_be32(f);
s->remap = qemu_get_be32(f);
s->mode = qemu_get_be32(f);
if (s->mode != SSD0323_CMD && s->mode != SSD0323_DATA) {
return -EINVAL;
}
qemu_get_buffer(f, s->framebuffer, sizeof(s->framebuffer));
ss->cs = qemu_get_be32(f);

View File

@ -203,6 +203,15 @@ static bool is_version_0 (void *opaque, int version_id)
return version_id == 0;
}
static bool vmstate_scoop_validate(void *opaque, int version_id)
{
ScoopInfo *s = opaque;
return !(s->prev_level & 0xffff0000) &&
!(s->gpio_level & 0xffff0000) &&
!(s->gpio_dir & 0xffff0000);
}
static const VMStateDescription vmstate_scoop_regs = {
.name = "scoop",
.version_id = 1,
@ -215,6 +224,7 @@ static const VMStateDescription vmstate_scoop_regs = {
VMSTATE_UINT32(gpio_level, ScoopInfo),
VMSTATE_UINT32(gpio_dir, ScoopInfo),
VMSTATE_UINT32(prev_level, ScoopInfo),
VMSTATE_VALIDATE("irq levels are 16 bit", vmstate_scoop_validate),
VMSTATE_UINT16(mcr, ScoopInfo),
VMSTATE_UINT16(cdr, ScoopInfo),
VMSTATE_UINT16(ccr, ScoopInfo),

View File

@ -1293,7 +1293,7 @@ const VMStateDescription vmstate_ahci = {
VMSTATE_UINT32(control_regs.impl, AHCIState),
VMSTATE_UINT32(control_regs.version, AHCIState),
VMSTATE_UINT32(idp_index, AHCIState),
VMSTATE_INT32(ports, AHCIState),
VMSTATE_INT32_EQUAL(ports, AHCIState),
VMSTATE_END_OF_LIST()
},
};

View File

@ -1070,9 +1070,21 @@ static int tsc210x_load(QEMUFile *f, void *opaque, int version_id)
s->enabled = qemu_get_byte(f);
s->host_mode = qemu_get_byte(f);
s->function = qemu_get_byte(f);
if (s->function < 0 || s->function >= ARRAY_SIZE(mode_regs)) {
return -EINVAL;
}
s->nextfunction = qemu_get_byte(f);
if (s->nextfunction < 0 || s->nextfunction >= ARRAY_SIZE(mode_regs)) {
return -EINVAL;
}
s->precision = qemu_get_byte(f);
if (s->precision < 0 || s->precision >= ARRAY_SIZE(resolution)) {
return -EINVAL;
}
s->nextprecision = qemu_get_byte(f);
if (s->nextprecision < 0 || s->nextprecision >= ARRAY_SIZE(resolution)) {
return -EINVAL;
}
s->filter = qemu_get_byte(f);
s->pin_func = qemu_get_byte(f);
s->ref = qemu_get_byte(f);

View File

@ -41,6 +41,7 @@
#include "hw/sysbus.h"
#include "hw/pci/msi.h"
#include "qemu/bitops.h"
#include "qapi/qmp/qerror.h"
//#define DEBUG_OPENPIC
@ -1416,7 +1417,7 @@ static void openpic_load_IRQ_queue(QEMUFile* f, IRQQueue *q)
static int openpic_load(QEMUFile* f, void *opaque, int version_id)
{
OpenPICState *opp = (OpenPICState *)opaque;
unsigned int i;
unsigned int i, nb_cpus;
if (version_id != 1) {
return -EINVAL;
@ -1428,7 +1429,11 @@ static int openpic_load(QEMUFile* f, void *opaque, int version_id)
qemu_get_be32s(f, &opp->spve);
qemu_get_be32s(f, &opp->tfrr);
qemu_get_be32s(f, &opp->nb_cpus);
qemu_get_be32s(f, &nb_cpus);
if (opp->nb_cpus != nb_cpus) {
return -EINVAL;
}
assert(nb_cpus > 0 && nb_cpus <= MAX_CPU);
for (i = 0; i < opp->nb_cpus; i++) {
qemu_get_sbe32s(f, &opp->dst[i].ctpr);
@ -1567,6 +1572,13 @@ static void openpic_realize(DeviceState *dev, Error **errp)
{NULL}
};
if (opp->nb_cpus > MAX_CPU) {
error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE,
TYPE_OPENPIC, "nb_cpus", (uint64_t)opp->nb_cpus,
(uint64_t)0, (uint64_t)MAX_CPU);
return;
}
switch (opp->model) {
case OPENPIC_MODEL_FSL_MPIC_20:
default:

View File

@ -1362,10 +1362,17 @@ static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) {
qemu_get_buffer(f, n->mac_table.macs,
n->mac_table.in_use * ETH_ALEN);
} else if (n->mac_table.in_use) {
uint8_t *buf = g_malloc0(n->mac_table.in_use);
qemu_get_buffer(f, buf, n->mac_table.in_use * ETH_ALEN);
g_free(buf);
} else {
int64_t i;
/* Overflow detected - can happen if source has a larger MAC table.
* We simply set overflow flag so there's no need to maintain the
* table of addresses, discard them all.
* Note: 64 bit math to avoid integer overflow.
*/
for (i = 0; i < (int64_t)n->mac_table.in_use * ETH_ALEN; ++i) {
qemu_get_byte(f);
}
n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1;
n->mac_table.in_use = 0;
}
@ -1407,6 +1414,11 @@ static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
}
n->curr_queues = qemu_get_be16(f);
if (n->curr_queues > n->max_queues) {
error_report("virtio-net: curr_queues %x > max_queues %x",
n->curr_queues, n->max_queues);
return -1;
}
for (i = 1; i < n->curr_queues; i++) {
n->vqs[i].tx_waiting = qemu_get_be32(f);
}

View File

@ -475,7 +475,7 @@ const VMStateDescription vmstate_pci_device = {
.minimum_version_id = 1,
.minimum_version_id_old = 1,
.fields = (VMStateField []) {
VMSTATE_INT32_LE(version_id, PCIDevice),
VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice),
VMSTATE_BUFFER_UNSAFE_INFO(config, PCIDevice, 0,
vmstate_info_pci_config,
PCI_CONFIG_SPACE_SIZE),
@ -492,7 +492,7 @@ const VMStateDescription vmstate_pcie_device = {
.minimum_version_id = 1,
.minimum_version_id_old = 1,
.fields = (VMStateField []) {
VMSTATE_INT32_LE(version_id, PCIDevice),
VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice),
VMSTATE_BUFFER_UNSAFE_INFO(config, PCIDevice, 0,
vmstate_info_pci_config,
PCIE_CONFIG_SPACE_SIZE),

View File

@ -795,6 +795,13 @@ static const VMStateDescription vmstate_pcie_aer_err = {
}
};
static bool pcie_aer_state_log_num_valid(void *opaque, int version_id)
{
PCIEAERLog *s = opaque;
return s->log_num <= s->log_max;
}
const VMStateDescription vmstate_pcie_aer_log = {
.name = "PCIE_AER_ERROR_LOG",
.version_id = 1,
@ -802,7 +809,8 @@ const VMStateDescription vmstate_pcie_aer_log = {
.minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT16(log_num, PCIEAERLog),
VMSTATE_UINT16(log_max, PCIEAERLog),
VMSTATE_UINT16_EQUAL(log_max, PCIEAERLog),
VMSTATE_VALIDATE("log_num <= log_max", pcie_aer_state_log_num_valid),
VMSTATE_STRUCT_VARRAY_POINTER_UINT16(log, PCIEAERLog, log_num,
vmstate_pcie_aer_err, PCIEAERErr),
VMSTATE_END_OF_LIST()

View File

@ -147,6 +147,15 @@ static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq)
qemu_get_be32s(f, &n);
assert(n < vs->conf.num_queues);
qemu_get_buffer(f, (unsigned char *)&req->elem, sizeof(req->elem));
/* TODO: add a way for SCSIBusInfo's load_request to fail,
* and fail migration instead of asserting here.
* When we do, we might be able to re-enable NDEBUG below.
*/
#ifdef NDEBUG
#error building with NDEBUG is not supported
#endif
assert(req->elem.in_num <= ARRAY_SIZE(req->elem.in_sg));
assert(req->elem.out_num <= ARRAY_SIZE(req->elem.out_sg));
virtio_scsi_parse_req(s, vs->cmd_vqs[n], req);
scsi_req_ref(sreq);

View File

@ -230,8 +230,17 @@ static int ssi_sd_load(QEMUFile *f, void *opaque, int version_id)
for (i = 0; i < 5; i++)
s->response[i] = qemu_get_be32(f);
s->arglen = qemu_get_be32(f);
if (s->mode == SSI_SD_CMDARG &&
(s->arglen < 0 || s->arglen >= ARRAY_SIZE(s->cmdarg))) {
return -EINVAL;
}
s->response_pos = qemu_get_be32(f);
s->stopping = qemu_get_be32(f);
if (s->mode == SSI_SD_RESPONSE &&
(s->response_pos < 0 || s->response_pos >= ARRAY_SIZE(s->response) ||
(!s->stopping && s->arglen > ARRAY_SIZE(s->response)))) {
return -EINVAL;
}
ss->cs = qemu_get_be32(f);

View File

@ -240,11 +240,25 @@ static const MemoryRegionOps pl022_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
static int pl022_post_load(void *opaque, int version_id)
{
PL022State *s = opaque;
if (s->tx_fifo_head < 0 ||
s->tx_fifo_head >= ARRAY_SIZE(s->tx_fifo) ||
s->rx_fifo_head < 0 ||
s->rx_fifo_head >= ARRAY_SIZE(s->rx_fifo)) {
return -1;
}
return 0;
}
static const VMStateDescription vmstate_pl022 = {
.name = "pl022_ssp",
.version_id = 1,
.minimum_version_id = 1,
.minimum_version_id_old = 1,
.post_load = pl022_post_load,
.fields = (VMStateField[]) {
VMSTATE_UINT32(cr0, PL022State),
VMSTATE_UINT32(cr1, PL022State),

View File

@ -239,6 +239,18 @@ static int hpet_pre_load(void *opaque)
return 0;
}
static bool hpet_validate_num_timers(void *opaque, int version_id)
{
HPETState *s = opaque;
if (s->num_timers < HPET_MIN_TIMERS) {
return false;
} else if (s->num_timers > HPET_MAX_TIMERS) {
return false;
}
return true;
}
static int hpet_post_load(void *opaque, int version_id)
{
HPETState *s = opaque;
@ -307,6 +319,7 @@ static const VMStateDescription vmstate_hpet = {
VMSTATE_UINT64(isr, HPETState),
VMSTATE_UINT64(hpet_counter, HPETState),
VMSTATE_UINT8_V(num_timers, HPETState, 2),
VMSTATE_VALIDATE("num_timers in range", hpet_validate_num_timers),
VMSTATE_STRUCT_VARRAY_UINT8(timer, HPETState, num_timers, 0,
vmstate_hpet_timer, HPETTimer),
VMSTATE_END_OF_LIST()

View File

@ -49,7 +49,9 @@ static int usb_device_post_load(void *opaque, int version_id)
} else {
dev->attached = 1;
}
if (dev->setup_index >= sizeof(dev->data_buf) ||
if (dev->setup_index < 0 ||
dev->setup_len < 0 ||
dev->setup_index >= sizeof(dev->data_buf) ||
dev->setup_len >= sizeof(dev->data_buf)) {
return -EINVAL;
}

View File

@ -430,6 +430,12 @@ void virtqueue_map_sg(struct iovec *sg, hwaddr *addr,
unsigned int i;
hwaddr len;
if (num_sg >= VIRTQUEUE_MAX_SIZE) {
error_report("virtio: map attempt out of bounds: %zd > %d",
num_sg, VIRTQUEUE_MAX_SIZE);
exit(1);
}
for (i = 0; i < num_sg; i++) {
len = sg[i].iov_len;
sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
@ -891,7 +897,9 @@ int virtio_set_features(VirtIODevice *vdev, uint32_t val)
int virtio_load(VirtIODevice *vdev, QEMUFile *f)
{
int num, i, ret;
int i, ret;
int32_t config_len;
uint32_t num;
uint32_t features;
uint32_t supported_features;
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
@ -906,6 +914,9 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f)
qemu_get_8s(f, &vdev->status);
qemu_get_8s(f, &vdev->isr);
qemu_get_be16s(f, &vdev->queue_sel);
if (vdev->queue_sel >= VIRTIO_PCI_QUEUE_MAX) {
return -1;
}
qemu_get_be32s(f, &features);
if (virtio_set_features(vdev, features) < 0) {
@ -914,11 +925,21 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f)
features, supported_features);
return -1;
}
vdev->config_len = qemu_get_be32(f);
config_len = qemu_get_be32(f);
if (config_len != vdev->config_len) {
error_report("Unexpected config length 0x%x. Expected 0x%zx",
config_len, vdev->config_len);
return -1;
}
qemu_get_buffer(f, vdev->config, vdev->config_len);
num = qemu_get_be32(f);
if (num > VIRTIO_PCI_QUEUE_MAX) {
error_report("Invalid number of PCI queues: 0x%x", num);
return -1;
}
for (i = 0; i < num; i++) {
vdev->vq[i].vring.num = qemu_get_be32(f);
if (k->has_variable_vring_alignment) {

View File

@ -176,8 +176,8 @@ typedef struct VirtIONet {
uint8_t nobcast;
uint8_t vhost_started;
struct {
int in_use;
int first_multi;
uint32_t in_use;
uint32_t first_multi;
uint8_t multi_overflow;
uint8_t uni_overflow;
uint8_t *macs;

View File

@ -61,6 +61,7 @@ struct MigrationState
bool enabled_capabilities[MIGRATION_CAPABILITY_MAX];
int64_t xbzrle_cache_size;
int64_t setup_time;
int64_t dirty_sync_count;
};
void process_incoming_migration(QEMUFile *f);
@ -113,8 +114,6 @@ void free_xbzrle_decoded_buf(void);
void acct_update_position(QEMUFile *f, size_t size, bool zero);
extern SaveVMHandlers savevm_ram_handlers;
uint64_t dup_mig_bytes_transferred(void);
uint64_t dup_mig_pages_transferred(void);
uint64_t skipped_mig_bytes_transferred(void);
@ -125,6 +124,7 @@ uint64_t xbzrle_mig_bytes_transferred(void);
uint64_t xbzrle_mig_pages_transferred(void);
uint64_t xbzrle_mig_pages_overflow(void);
uint64_t xbzrle_mig_pages_cache_miss(void);
double xbzrle_mig_cache_miss_rate(void);
void ram_handle_compressed(void *host, uint8_t ch, uint64_t size);

View File

@ -123,6 +123,11 @@ void qemu_put_be32(QEMUFile *f, unsigned int v);
void qemu_put_be64(QEMUFile *f, uint64_t v);
int qemu_peek_buffer(QEMUFile *f, uint8_t *buf, int size, size_t offset);
int qemu_get_buffer(QEMUFile *f, uint8_t *buf, int size);
/*
* Note that you can only peek continuous bytes from where the current pointer
* is; you aren't guaranteed to be able to peak to +n bytes unless you've
* previously peeked +n-1.
*/
int qemu_peek_byte(QEMUFile *f, int offset);
int qemu_get_byte(QEMUFile *f);
void qemu_file_skip(QEMUFile *f, int size);

View File

@ -100,6 +100,7 @@ enum VMStateFlags {
VMS_MULTIPLY = 0x200, /* multiply "size" field by field_size */
VMS_VARRAY_UINT8 = 0x400, /* Array with size in uint8_t field*/
VMS_VARRAY_UINT32 = 0x800, /* Array with size in uint32_t field*/
VMS_MUST_EXIST = 0x1000, /* Field must exist in input */
};
typedef struct {
@ -203,6 +204,14 @@ extern const VMStateInfo vmstate_info_bitmap;
.offset = vmstate_offset_value(_state, _field, _type), \
}
/* Validate state using a boolean predicate. */
#define VMSTATE_VALIDATE(_name, _test) { \
.name = (_name), \
.field_exists = (_test), \
.flags = VMS_ARRAY | VMS_MUST_EXIST, \
.num = 0, /* 0 elements: no data, only run _test */ \
}
#define VMSTATE_POINTER(_field, _state, _version, _info, _type) { \
.name = (stringify(_field)), \
.version_id = (_version), \
@ -592,7 +601,7 @@ extern const VMStateInfo vmstate_info_bitmap;
#define VMSTATE_UINT64_EQUAL(_f, _s) \
VMSTATE_UINT64_EQUAL_V(_f, _s, 0)
#define VMSTATE_INT32_LE(_f, _s) \
#define VMSTATE_INT32_POSITIVE_LE(_f, _s) \
VMSTATE_SINGLE(_f, _s, 0, vmstate_info_int32_le, int32_t)
#define VMSTATE_UINT8_TEST(_f, _s, _t) \

View File

@ -29,6 +29,7 @@ extern const uint32_t arch_type;
void select_soundhw(const char *optarg);
void do_acpitable_option(const QemuOpts *opts);
void do_smbios_option(QemuOpts *opts);
void ram_mig_init(void);
void cpudef_init(void);
void audio_init(void);
int tcg_available(void);

View File

@ -13,7 +13,10 @@
* GNU GPL, version 2 or (at your option) any later version.
*/
#include <string.h>
#include "qemu-common.h"
#include "qemu/error-report.h"
#include "qemu/sockets.h"
#include "migration/migration.h"
#include "migration/qemu-file.h"
@ -56,24 +59,26 @@ static void tcp_accept_incoming_migration(void *opaque)
socklen_t addrlen = sizeof(addr);
int s = (intptr_t)opaque;
QEMUFile *f;
int c;
int c, err;
do {
c = qemu_accept(s, (struct sockaddr *)&addr, &addrlen);
} while (c == -1 && socket_error() == EINTR);
err = socket_error();
} while (c < 0 && err == EINTR);
qemu_set_fd_handler2(s, NULL, NULL, NULL, NULL);
closesocket(s);
DPRINTF("accepted migration\n");
if (c == -1) {
fprintf(stderr, "could not accept migration connection\n");
goto out;
if (c < 0) {
error_report("could not accept migration connection (%s)",
strerror(err));
return;
}
f = qemu_fopen_socket(c, "rb");
if (f == NULL) {
fprintf(stderr, "could not qemu_fopen socket\n");
error_report("could not qemu_fopen socket");
goto out;
}

View File

@ -13,7 +13,10 @@
* GNU GPL, version 2 or (at your option) any later version.
*/
#include <string.h>
#include "qemu-common.h"
#include "qemu/error-report.h"
#include "qemu/sockets.h"
#include "qemu/main-loop.h"
#include "migration/migration.h"
@ -56,24 +59,26 @@ static void unix_accept_incoming_migration(void *opaque)
socklen_t addrlen = sizeof(addr);
int s = (intptr_t)opaque;
QEMUFile *f;
int c;
int c, err;
do {
c = qemu_accept(s, (struct sockaddr *)&addr, &addrlen);
} while (c == -1 && errno == EINTR);
err = errno;
} while (c < 0 && err == EINTR);
qemu_set_fd_handler2(s, NULL, NULL, NULL, NULL);
close(s);
DPRINTF("accepted migration\n");
if (c == -1) {
fprintf(stderr, "could not accept migration connection\n");
goto out;
if (c < 0) {
error_report("could not accept migration connection (%s)",
strerror(err));
return;
}
f = qemu_fopen_socket(c, "rb");
if (f == NULL) {
fprintf(stderr, "could not qemu_fopen socket\n");
error_report("could not qemu_fopen socket");
goto out;
}

View File

@ -174,6 +174,7 @@ static void get_xbzrle_cache_stats(MigrationInfo *info)
info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred();
info->xbzrle_cache->pages = xbzrle_mig_pages_transferred();
info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss();
info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate();
info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow();
}
}
@ -215,6 +216,7 @@ MigrationInfo *qmp_query_migrate(Error **errp)
info->ram->normal_bytes = norm_mig_bytes_transferred();
info->ram->dirty_pages_rate = s->dirty_pages_rate;
info->ram->mbps = s->mbps;
info->ram->dirty_sync_count = s->dirty_sync_count;
if (blk_mig_active()) {
info->has_disk = true;
@ -248,6 +250,7 @@ MigrationInfo *qmp_query_migrate(Error **errp)
info->ram->normal = norm_mig_pages_transferred();
info->ram->normal_bytes = norm_mig_bytes_transferred();
info->ram->mbps = s->mbps;
info->ram->dirty_sync_count = s->dirty_sync_count;
break;
case MIG_STATE_ERROR:
info->has_status = true;
@ -419,6 +422,11 @@ void qmp_migrate(const char *uri, bool has_blk, bool blk,
return;
}
if (runstate_check(RUN_STATE_INMIGRATE)) {
error_setg(errp, "Guest is waiting for an incoming migration");
return;
}
if (qemu_savevm_state_blocked(errp)) {
return;
}

View File

@ -651,13 +651,15 @@
#
# @mbps: throughput in megabits/sec. (since 1.6)
#
# @dirty-sync-count: number of times that dirty ram was synchronized (since 2.1)
#
# Since: 0.14.0
##
{ 'type': 'MigrationStats',
'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' ,
'duplicate': 'int', 'skipped': 'int', 'normal': 'int',
'normal-bytes': 'int', 'dirty-pages-rate' : 'int',
'mbps' : 'number' } }
'mbps' : 'number', 'dirty-sync-count' : 'int' } }
##
# @XBZRLECacheStats
@ -672,13 +674,16 @@
#
# @cache-miss: number of cache miss
#
# @cache-miss-rate: rate of cache miss (since 2.1)
#
# @overflow: number of overflows
#
# Since: 1.2
##
{ 'type': 'XBZRLECacheStats',
'data': {'cache-size': 'int', 'bytes': 'int', 'pages': 'int',
'cache-miss': 'int', 'overflow': 'int' } }
'cache-miss': 'int', 'cache-miss-rate': 'number',
'overflow': 'int' } }
##
# @MigrationInfo

View File

@ -530,7 +530,15 @@ size_t ram_control_save_page(QEMUFile *f, ram_addr_t block_offset,
return RAM_SAVE_CONTROL_NOT_SUPP;
}
static void qemu_fill_buffer(QEMUFile *f)
/*
* Attempt to fill the buffer from the underlying file
* Returns the number of bytes read, or negative value for an error.
*
* Note that it can return a partially full buffer even in a not error/not EOF
* case if the underlying file descriptor gives a short read, and that can
* happen even on a blocking fd.
*/
static ssize_t qemu_fill_buffer(QEMUFile *f)
{
int len;
int pending;
@ -554,6 +562,8 @@ static void qemu_fill_buffer(QEMUFile *f)
} else if (len != -EAGAIN) {
qemu_file_set_error(f, len);
}
return len;
}
int qemu_get_fd(QEMUFile *f)
@ -685,17 +695,39 @@ void qemu_file_skip(QEMUFile *f, int size)
}
}
/*
* Read 'size' bytes from file (at 'offset') into buf without moving the
* pointer.
*
* It will return size bytes unless there was an error, in which case it will
* return as many as it managed to read (assuming blocking fd's which
* all current QEMUFile are)
*/
int qemu_peek_buffer(QEMUFile *f, uint8_t *buf, int size, size_t offset)
{
int pending;
int index;
assert(!qemu_file_is_writable(f));
assert(offset < IO_BUF_SIZE);
assert(size <= IO_BUF_SIZE - offset);
/* The 1st byte to read from */
index = f->buf_index + offset;
/* The number of available bytes starting at index */
pending = f->buf_size - index;
if (pending < size) {
qemu_fill_buffer(f);
/*
* qemu_fill_buffer might return just a few bytes, even when there isn't
* an error, so loop collecting them until we get enough.
*/
while (pending < size) {
int received = qemu_fill_buffer(f);
if (received <= 0) {
break;
}
index = f->buf_index + offset;
pending = f->buf_size - index;
}
@ -711,6 +743,14 @@ int qemu_peek_buffer(QEMUFile *f, uint8_t *buf, int size, size_t offset)
return size;
}
/*
* Read 'size' bytes of data from the file into buf.
* 'size' can be larger than the internal buffer.
*
* It will return size bytes unless there was an error, in which case it will
* return as many as it managed to read (assuming blocking fd's which
* all current QEMUFile are)
*/
int qemu_get_buffer(QEMUFile *f, uint8_t *buf, int size)
{
int pending = size;
@ -719,7 +759,7 @@ int qemu_get_buffer(QEMUFile *f, uint8_t *buf, int size)
while (pending > 0) {
int res;
res = qemu_peek_buffer(f, buf, pending, 0);
res = qemu_peek_buffer(f, buf, MIN(pending, IO_BUF_SIZE), 0);
if (res == 0) {
return done;
}
@ -731,11 +771,16 @@ int qemu_get_buffer(QEMUFile *f, uint8_t *buf, int size)
return done;
}
/*
* Peeks a single byte from the buffer; this isn't guaranteed to work if
* offset leaves a gap after the previous read/peeked data.
*/
int qemu_peek_byte(QEMUFile *f, int offset)
{
int index = f->buf_index + offset;
assert(!qemu_file_is_writable(f));
assert(offset < IO_BUF_SIZE);
if (index >= f->buf_size) {
qemu_fill_buffer(f);

View File

@ -2967,6 +2967,7 @@ The main json-object contains the following:
pages. This is just normal pages times size of one page,
but this way upper levels don't need to care about page
size (json-int)
- "dirty-sync-count": times that dirty ram was synchronized (json-int)
- "disk": only present if "status" is "active" and it is a block migration,
it is a json-object with the following disk information:
- "transferred": amount transferred in bytes (json-int)
@ -2978,6 +2979,7 @@ The main json-object contains the following:
- "bytes": number of bytes transferred for XBZRLE compressed pages
- "pages": number of XBZRLE compressed pages
- "cache-miss": number of XBRZRLE page cache misses
- "cache-miss-rate": rate of XBRZRLE page cache misses
- "overflow": number of times XBZRLE overflows. This means
that the XBZRLE encoding was bigger than just sent the
whole page, and then we sent the whole page instead (as as
@ -3004,7 +3006,8 @@ Examples:
"downtime":12345,
"duplicate":123,
"normal":123,
"normal-bytes":123456
"normal-bytes":123456,
"dirty-sync-count":15
}
}
}
@ -3029,7 +3032,8 @@ Examples:
"expected-downtime":12345,
"duplicate":123,
"normal":123,
"normal-bytes":123456
"normal-bytes":123456,
"dirty-sync-count":15
}
}
}
@ -3049,7 +3053,8 @@ Examples:
"expected-downtime":12345,
"duplicate":123,
"normal":123,
"normal-bytes":123456
"normal-bytes":123456,
"dirty-sync-count":15
},
"disk":{
"total":20971520,
@ -3075,13 +3080,15 @@ Examples:
"expected-downtime":12345,
"duplicate":10,
"normal":3333,
"normal-bytes":3412992
"normal-bytes":3412992,
"dirty-sync-count":15
},
"xbzrle-cache":{
"cache-size":67108864,
"bytes":20971520,
"pages":2444343,
"cache-miss":2244,
"cache-miss-rate":0.123,
"overflow":34434
}
}

View File

@ -248,7 +248,7 @@ const VMStateDescription vmstate_arm_cpu = {
/* The length-check must come before the arrays to avoid
* incoming data possibly overflowing the array.
*/
VMSTATE_INT32_LE(cpreg_vmstate_array_len, ARMCPU),
VMSTATE_INT32_POSITIVE_LE(cpreg_vmstate_array_len, ARMCPU),
VMSTATE_VARRAY_INT32(cpreg_vmstate_indexes, ARMCPU,
cpreg_vmstate_array_len,
0, vmstate_info_uint64, uint64_t),

3
vl.c
View File

@ -4322,6 +4322,7 @@ int main(int argc, char **argv, char **envp)
cpu_exec_init_all();
blk_mig_init();
ram_mig_init();
/* open the virtual block devices */
if (snapshot)
@ -4336,8 +4337,6 @@ int main(int argc, char **argv, char **envp)
default_drive(default_floppy, snapshot, IF_FLOPPY, 0, FD_OPTS);
default_drive(default_sdcard, snapshot, IF_SD, 0, SD_OPTS);
register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL);
if (nb_numa_nodes > 0) {
int i;

122
vmstate.c
View File

@ -10,6 +10,50 @@ static void vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd,
static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
void *opaque);
static int vmstate_n_elems(void *opaque, VMStateField *field)
{
int n_elems = 1;
if (field->flags & VMS_ARRAY) {
n_elems = field->num;
} else if (field->flags & VMS_VARRAY_INT32) {
n_elems = *(int32_t *)(opaque+field->num_offset);
} else if (field->flags & VMS_VARRAY_UINT32) {
n_elems = *(uint32_t *)(opaque+field->num_offset);
} else if (field->flags & VMS_VARRAY_UINT16) {
n_elems = *(uint16_t *)(opaque+field->num_offset);
} else if (field->flags & VMS_VARRAY_UINT8) {
n_elems = *(uint8_t *)(opaque+field->num_offset);
}
return n_elems;
}
static int vmstate_size(void *opaque, VMStateField *field)
{
int size = field->size;
if (field->flags & VMS_VBUFFER) {
size = *(int32_t *)(opaque+field->size_offset);
if (field->flags & VMS_MULTIPLY) {
size *= field->size;
}
}
return size;
}
static void *vmstate_base_addr(void *opaque, VMStateField *field)
{
void *base_addr = opaque + field->offset;
if (field->flags & VMS_POINTER) {
base_addr = *(void **)base_addr + field->start;
}
return base_addr;
}
int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd,
void *opaque, int version_id)
{
@ -19,11 +63,12 @@ int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd,
if (version_id > vmsd->version_id) {
return -EINVAL;
}
if (version_id < vmsd->minimum_version_id_old) {
return -EINVAL;
}
if (version_id < vmsd->minimum_version_id) {
return vmsd->load_state_old(f, opaque, version_id);
if (vmsd->load_state_old &&
version_id >= vmsd->minimum_version_id_old) {
return vmsd->load_state_old(f, opaque, version_id);
}
return -EINVAL;
}
if (vmsd->pre_load) {
int ret = vmsd->pre_load(opaque);
@ -36,30 +81,10 @@ int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd,
field->field_exists(opaque, version_id)) ||
(!field->field_exists &&
field->version_id <= version_id)) {
void *base_addr = opaque + field->offset;
int i, n_elems = 1;
int size = field->size;
void *base_addr = vmstate_base_addr(opaque, field);
int i, n_elems = vmstate_n_elems(opaque, field);
int size = vmstate_size(opaque, field);
if (field->flags & VMS_VBUFFER) {
size = *(int32_t *)(opaque+field->size_offset);
if (field->flags & VMS_MULTIPLY) {
size *= field->size;
}
}
if (field->flags & VMS_ARRAY) {
n_elems = field->num;
} else if (field->flags & VMS_VARRAY_INT32) {
n_elems = *(int32_t *)(opaque+field->num_offset);
} else if (field->flags & VMS_VARRAY_UINT32) {
n_elems = *(uint32_t *)(opaque+field->num_offset);
} else if (field->flags & VMS_VARRAY_UINT16) {
n_elems = *(uint16_t *)(opaque+field->num_offset);
} else if (field->flags & VMS_VARRAY_UINT8) {
n_elems = *(uint8_t *)(opaque+field->num_offset);
}
if (field->flags & VMS_POINTER) {
base_addr = *(void **)base_addr + field->start;
}
for (i = 0; i < n_elems; i++) {
void *addr = base_addr + size * i;
@ -78,6 +103,10 @@ int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd,
return ret;
}
}
} else if (field->flags & VMS_MUST_EXIST) {
fprintf(stderr, "Input validation failed: %s/%s\n",
vmsd->name, field->name);
return -1;
}
field++;
}
@ -102,30 +131,10 @@ void vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd,
while (field->name) {
if (!field->field_exists ||
field->field_exists(opaque, vmsd->version_id)) {
void *base_addr = opaque + field->offset;
int i, n_elems = 1;
int size = field->size;
void *base_addr = vmstate_base_addr(opaque, field);
int i, n_elems = vmstate_n_elems(opaque, field);
int size = vmstate_size(opaque, field);
if (field->flags & VMS_VBUFFER) {
size = *(int32_t *)(opaque+field->size_offset);
if (field->flags & VMS_MULTIPLY) {
size *= field->size;
}
}
if (field->flags & VMS_ARRAY) {
n_elems = field->num;
} else if (field->flags & VMS_VARRAY_INT32) {
n_elems = *(int32_t *)(opaque+field->num_offset);
} else if (field->flags & VMS_VARRAY_UINT32) {
n_elems = *(uint32_t *)(opaque+field->num_offset);
} else if (field->flags & VMS_VARRAY_UINT16) {
n_elems = *(uint16_t *)(opaque+field->num_offset);
} else if (field->flags & VMS_VARRAY_UINT8) {
n_elems = *(uint8_t *)(opaque+field->num_offset);
}
if (field->flags & VMS_POINTER) {
base_addr = *(void **)base_addr + field->start;
}
for (i = 0; i < n_elems; i++) {
void *addr = base_addr + size * i;
@ -138,6 +147,12 @@ void vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd,
field->info->put(f, addr, size);
}
}
} else {
if (field->flags & VMS_MUST_EXIST) {
fprintf(stderr, "Output state validation failed: %s/%s\n",
vmsd->name, field->name);
assert(!(field->flags & VMS_MUST_EXIST));
}
}
field++;
}
@ -323,8 +338,9 @@ const VMStateInfo vmstate_info_int32_equal = {
.put = put_int32,
};
/* 32 bit int. Check that the received value is less than or equal to
the one in the field */
/* 32 bit int. Check that the received value is non-negative
* and less than or equal to the one in the field.
*/
static int get_int32_le(QEMUFile *f, void *pv, size_t size)
{
@ -332,7 +348,7 @@ static int get_int32_le(QEMUFile *f, void *pv, size_t size)
int32_t loaded;
qemu_get_sbe32s(f, &loaded);
if (loaded <= *cur) {
if (loaded >= 0 && loaded <= *cur) {
*cur = loaded;
return 0;
}