Fix two XBZRLE corruption issues

Push zero'd pages into the XBZRLE cache
    A page that was cached by XBZRLE, zero'd and then XBZRLE'd again
    was being compared against a stale cache value

Don't use 'qemu_put_buffer_async' to put pages from the XBZRLE cache
    Since the cache might change before the data hits the wire

Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
This commit is contained in:
Dr. David Alan Gilbert 2014-02-13 19:44:45 +00:00 committed by Juan Quintela
parent 24a370ef23
commit 6d3cb1f970
3 changed files with 51 additions and 17 deletions

View File

@ -122,7 +122,6 @@ static void check_guest_throttling(void);
#define RAM_SAVE_FLAG_XBZRLE 0x40
/* 0x80 is reserved in migration.h start with 0x100 next */
static struct defconfig_file {
const char *filename;
/* Indicates it is an user config file (disabled by -no-user-config) */
@ -133,6 +132,7 @@ static struct defconfig_file {
{ NULL }, /* end of list */
};
static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE];
int qemu_read_default_config_files(bool userconfig)
{
@ -273,6 +273,34 @@ static size_t save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
return size;
}
/* This is the last block that we have visited serching for dirty pages
*/
static RAMBlock *last_seen_block;
/* This is the last block from where we have sent data */
static RAMBlock *last_sent_block;
static ram_addr_t last_offset;
static unsigned long *migration_bitmap;
static uint64_t migration_dirty_pages;
static uint32_t last_version;
static bool ram_bulk_stage;
/* Update the xbzrle cache to reflect a page that's been sent as all 0.
* The important thing is that a stale (not-yet-0'd) page be replaced
* by the new data.
* As a bonus, if the page wasn't in the cache it gets added so that
* when a small write is made into the 0'd page it gets XBZRLE sent
*/
static void xbzrle_cache_zero_page(ram_addr_t current_addr)
{
if (ram_bulk_stage || !migrate_use_xbzrle()) {
return;
}
/* We don't care if this fails to allocate a new cache page
* as long as it updated an old one */
cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE);
}
#define ENCODING_FLAG_XBZRLE 0x1
static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
@ -329,18 +357,6 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
return bytes_sent;
}
/* This is the last block that we have visited serching for dirty pages
*/
static RAMBlock *last_seen_block;
/* This is the last block from where we have sent data */
static RAMBlock *last_sent_block;
static ram_addr_t last_offset;
static unsigned long *migration_bitmap;
static uint64_t migration_dirty_pages;
static uint32_t last_version;
static bool ram_bulk_stage;
static inline
ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
ram_addr_t start)
@ -512,6 +528,7 @@ static int ram_save_block(QEMUFile *f, bool last_stage)
} else {
int ret;
uint8_t *p;
bool send_async = true;
int cont = (block == last_sent_block) ?
RAM_SAVE_FLAG_CONTINUE : 0;
@ -522,6 +539,7 @@ static int ram_save_block(QEMUFile *f, bool last_stage)
ret = ram_control_save_page(f, block->offset,
offset, TARGET_PAGE_SIZE, &bytes_sent);
current_addr = block->offset + offset;
if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
if (ret != RAM_SAVE_CONTROL_DELAYED) {
if (bytes_sent > 0) {
@ -536,19 +554,35 @@ static int ram_save_block(QEMUFile *f, bool last_stage)
RAM_SAVE_FLAG_COMPRESS);
qemu_put_byte(f, 0);
bytes_sent++;
/* Must let xbzrle know, otherwise a previous (now 0'd) cached
* page would be stale
*/
xbzrle_cache_zero_page(current_addr);
} else if (!ram_bulk_stage && migrate_use_xbzrle()) {
current_addr = block->offset + offset;
bytes_sent = save_xbzrle_page(f, p, current_addr, block,
offset, cont, last_stage);
if (!last_stage) {
/* We must send exactly what's in the xbzrle cache
* even if the page wasn't xbzrle compressed, so that
* it's right next time.
*/
p = get_cached_data(XBZRLE.cache, current_addr);
/* Can't send this cached data async, since the cache page
* might get updated before it gets to the wire
*/
send_async = false;
}
}
/* XBZRLE overflow or normal page */
if (bytes_sent == -1) {
bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
if (send_async) {
qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
} else {
qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
}
bytes_sent += TARGET_PAGE_SIZE;
acct_info.norm_pages++;
}

View File

@ -66,7 +66,7 @@ uint8_t *get_cached_data(const PageCache *cache, uint64_t addr);
* @addr: page address
* @pdata: pointer to the page
*/
int cache_insert(PageCache *cache, uint64_t addr, uint8_t *pdata);
int cache_insert(PageCache *cache, uint64_t addr, const uint8_t *pdata);
/**
* cache_resize: resize the page cache. In case of size reduction the extra

View File

@ -150,7 +150,7 @@ uint8_t *get_cached_data(const PageCache *cache, uint64_t addr)
return cache_get_by_addr(cache, addr)->it_data;
}
int cache_insert(PageCache *cache, uint64_t addr, uint8_t *pdata)
int cache_insert(PageCache *cache, uint64_t addr, const uint8_t *pdata)
{
CacheItem *it = NULL;