migration: Rate limit inside host pages

When using hugepages, rate limiting is necessary within each huge
page, since a 1G huge page can take a significant time to send, so
you end up with bursty behaviour.

Fixes: 4c011c37ec ("postcopy: Send whole huge pages")
Reported-by: Lin Ma <LMa@suse.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
This commit is contained in:
Dr. David Alan Gilbert 2019-12-05 10:29:18 +00:00 committed by Juan Quintela
parent 03acb4e94d
commit 97e1e06780
4 changed files with 37 additions and 27 deletions

View File

@ -3224,6 +3224,37 @@ void migration_consume_urgent_request(void)
qemu_sem_wait(&migrate_get_current()->rate_limit_sem); qemu_sem_wait(&migrate_get_current()->rate_limit_sem);
} }
/* Returns true if the rate limiting was broken by an urgent request */
bool migration_rate_limit(void)
{
int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
MigrationState *s = migrate_get_current();
bool urgent = false;
migration_update_counters(s, now);
if (qemu_file_rate_limit(s->to_dst_file)) {
/*
* Wait for a delay to do rate limiting OR
* something urgent to post the semaphore.
*/
int ms = s->iteration_start_time + BUFFER_DELAY - now;
trace_migration_rate_limit_pre(ms);
if (qemu_sem_timedwait(&s->rate_limit_sem, ms) == 0) {
/*
* We were woken by one or more urgent things but
* the timedwait will have consumed one of them.
* The service routine for the urgent wake will dec
* the semaphore itself for each item it consumes,
* so add this one we just eat back.
*/
qemu_sem_post(&s->rate_limit_sem);
urgent = true;
}
trace_migration_rate_limit_post(urgent);
}
return urgent;
}
/* /*
* Master migration thread on the source VM. * Master migration thread on the source VM.
* It drives the migration and pumps the data down the outgoing channel. * It drives the migration and pumps the data down the outgoing channel.
@ -3290,8 +3321,6 @@ static void *migration_thread(void *opaque)
trace_migration_thread_setup_complete(); trace_migration_thread_setup_complete();
while (migration_is_active(s)) { while (migration_is_active(s)) {
int64_t current_time;
if (urgent || !qemu_file_rate_limit(s->to_dst_file)) { if (urgent || !qemu_file_rate_limit(s->to_dst_file)) {
MigIterateState iter_state = migration_iteration_run(s); MigIterateState iter_state = migration_iteration_run(s);
if (iter_state == MIG_ITERATE_SKIP) { if (iter_state == MIG_ITERATE_SKIP) {
@ -3318,29 +3347,7 @@ static void *migration_thread(void *opaque)
update_iteration_initial_status(s); update_iteration_initial_status(s);
} }
current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); urgent = migration_rate_limit();
migration_update_counters(s, current_time);
urgent = false;
if (qemu_file_rate_limit(s->to_dst_file)) {
/* Wait for a delay to do rate limiting OR
* something urgent to post the semaphore.
*/
int ms = s->iteration_start_time + BUFFER_DELAY - current_time;
trace_migration_thread_ratelimit_pre(ms);
if (qemu_sem_timedwait(&s->rate_limit_sem, ms) == 0) {
/* We were worken by one or more urgent things but
* the timedwait will have consumed one of them.
* The service routine for the urgent wake will dec
* the semaphore itself for each item it consumes,
* so add this one we just eat back.
*/
qemu_sem_post(&s->rate_limit_sem);
urgent = true;
}
trace_migration_thread_ratelimit_post(urgent);
}
} }
trace_migration_thread_after_loop(); trace_migration_thread_after_loop();

View File

@ -341,5 +341,6 @@ int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque);
void migration_make_urgent_request(void); void migration_make_urgent_request(void);
void migration_consume_urgent_request(void); void migration_consume_urgent_request(void);
bool migration_rate_limit(void);
#endif #endif

View File

@ -2639,6 +2639,8 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
pages += tmppages; pages += tmppages;
pss->page++; pss->page++;
/* Allow rate limiting to happen in the middle of huge pages */
migration_rate_limit();
} while ((pss->page & (pagesize_bits - 1)) && } while ((pss->page & (pagesize_bits - 1)) &&
offset_in_ramblock(pss->block, pss->page << TARGET_PAGE_BITS)); offset_in_ramblock(pss->block, pss->page << TARGET_PAGE_BITS));

View File

@ -138,12 +138,12 @@ migrate_send_rp_recv_bitmap(char *name, int64_t size) "block '%s' size 0x%"PRIi6
migration_completion_file_err(void) "" migration_completion_file_err(void) ""
migration_completion_postcopy_end(void) "" migration_completion_postcopy_end(void) ""
migration_completion_postcopy_end_after_complete(void) "" migration_completion_postcopy_end_after_complete(void) ""
migration_rate_limit_pre(int ms) "%d ms"
migration_rate_limit_post(int urgent) "urgent: %d"
migration_return_path_end_before(void) "" migration_return_path_end_before(void) ""
migration_return_path_end_after(int rp_error) "%d" migration_return_path_end_after(int rp_error) "%d"
migration_thread_after_loop(void) "" migration_thread_after_loop(void) ""
migration_thread_file_err(void) "" migration_thread_file_err(void) ""
migration_thread_ratelimit_pre(int ms) "%d ms"
migration_thread_ratelimit_post(int urgent) "urgent: %d"
migration_thread_setup_complete(void) "" migration_thread_setup_complete(void) ""
open_return_path_on_source(void) "" open_return_path_on_source(void) ""
open_return_path_on_source_continue(void) "" open_return_path_on_source_continue(void) ""