migration: Make dirty_sync_count atomic

Signed-off-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
This commit is contained in:
Juan Quintela 2023-04-11 18:02:34 +02:00
parent 296a4ac2aa
commit 536b5a4e56
3 changed files with 10 additions and 8 deletions

View File

@ -1148,7 +1148,8 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s)
info->ram->normal = stat64_get(&ram_counters.normal); info->ram->normal = stat64_get(&ram_counters.normal);
info->ram->normal_bytes = info->ram->normal * page_size; info->ram->normal_bytes = info->ram->normal * page_size;
info->ram->mbps = s->mbps; info->ram->mbps = s->mbps;
info->ram->dirty_sync_count = ram_counters.dirty_sync_count; info->ram->dirty_sync_count =
stat64_get(&ram_counters.dirty_sync_count);
info->ram->dirty_sync_missed_zero_copy = info->ram->dirty_sync_missed_zero_copy =
stat64_get(&ram_counters.dirty_sync_missed_zero_copy); stat64_get(&ram_counters.dirty_sync_missed_zero_copy);
info->ram->postcopy_requests = ram_counters.postcopy_requests; info->ram->postcopy_requests = ram_counters.postcopy_requests;

View File

@ -764,7 +764,7 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
/* We don't care if this fails to allocate a new cache page /* We don't care if this fails to allocate a new cache page
* as long as it updated an old one */ * as long as it updated an old one */
cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page, cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
ram_counters.dirty_sync_count); stat64_get(&ram_counters.dirty_sync_count));
} }
#define ENCODING_FLAG_XBZRLE 0x1 #define ENCODING_FLAG_XBZRLE 0x1
@ -790,13 +790,13 @@ static int save_xbzrle_page(RAMState *rs, PageSearchStatus *pss,
int encoded_len = 0, bytes_xbzrle; int encoded_len = 0, bytes_xbzrle;
uint8_t *prev_cached_page; uint8_t *prev_cached_page;
QEMUFile *file = pss->pss_channel; QEMUFile *file = pss->pss_channel;
uint64_t generation = stat64_get(&ram_counters.dirty_sync_count);
if (!cache_is_cached(XBZRLE.cache, current_addr, if (!cache_is_cached(XBZRLE.cache, current_addr, generation)) {
ram_counters.dirty_sync_count)) {
xbzrle_counters.cache_miss++; xbzrle_counters.cache_miss++;
if (!rs->last_stage) { if (!rs->last_stage) {
if (cache_insert(XBZRLE.cache, current_addr, *current_data, if (cache_insert(XBZRLE.cache, current_addr, *current_data,
ram_counters.dirty_sync_count) == -1) { generation) == -1) {
return -1; return -1;
} else { } else {
/* update *current_data when the page has been /* update *current_data when the page has been
@ -1209,7 +1209,7 @@ static void migration_bitmap_sync(RAMState *rs)
RAMBlock *block; RAMBlock *block;
int64_t end_time; int64_t end_time;
ram_counters.dirty_sync_count++; stat64_add(&ram_counters.dirty_sync_count, 1);
if (!rs->time_last_bitmap_sync) { if (!rs->time_last_bitmap_sync) {
rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
@ -1246,7 +1246,8 @@ static void migration_bitmap_sync(RAMState *rs)
rs->bytes_xfer_prev = stat64_get(&ram_counters.transferred); rs->bytes_xfer_prev = stat64_get(&ram_counters.transferred);
} }
if (migrate_use_events()) { if (migrate_use_events()) {
qapi_event_send_migration_pass(ram_counters.dirty_sync_count); uint64_t generation = stat64_get(&ram_counters.dirty_sync_count);
qapi_event_send_migration_pass(generation);
} }
} }

View File

@ -42,7 +42,7 @@
*/ */
typedef struct { typedef struct {
int64_t dirty_pages_rate; int64_t dirty_pages_rate;
int64_t dirty_sync_count; Stat64 dirty_sync_count;
Stat64 dirty_sync_missed_zero_copy; Stat64 dirty_sync_missed_zero_copy;
Stat64 downtime_bytes; Stat64 downtime_bytes;
Stat64 duplicate; Stat64 duplicate;