ram.c: Call update_compress_thread_counts from compress_send_queued_data

This makes the core compress code more independend from ram.c.

Signed-off-by: Lukas Straub <lukasstraub2@web.de>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
This commit is contained in:
Lukas Straub 2023-04-20 11:48:10 +02:00 committed by Juan Quintela
parent 3e81763e4c
commit 680628d200

View File

@ -1540,12 +1540,14 @@ static int send_queued_data(CompressParam *param)
abort();
}
update_compress_thread_counts(param, len);
return len;
}
static void flush_compressed_data(RAMState *rs)
{
int idx, len, thread_count;
int idx, thread_count;
if (!save_page_use_compression(rs)) {
return;
@ -1564,15 +1566,8 @@ static void flush_compressed_data(RAMState *rs)
qemu_mutex_lock(&comp_param[idx].mutex);
if (!comp_param[idx].quit) {
CompressParam *param = &comp_param[idx];
len = send_queued_data(param);
send_queued_data(param);
compress_reset_result(param);
/*
* it's safe to fetch zero_page without holding comp_done_lock
* as there is no further request submitted to the thread,
* i.e, the thread should be waiting for a request at this point.
*/
update_compress_thread_counts(param, len);
}
qemu_mutex_unlock(&comp_param[idx].mutex);
}
@ -1588,7 +1583,7 @@ static inline void set_compress_params(CompressParam *param, RAMBlock *block,
static int compress_page_with_multi_thread(RAMBlock *block, ram_addr_t offset)
{
int idx, thread_count, bytes_xmit = -1, pages = -1;
int idx, thread_count, pages = -1;
bool wait = migrate_compress_wait_thread();
thread_count = migrate_compress_threads();
@ -1599,11 +1594,10 @@ retry:
CompressParam *param = &comp_param[idx];
qemu_mutex_lock(&param->mutex);
param->done = false;
bytes_xmit = send_queued_data(param);
send_queued_data(param);
compress_reset_result(param);
set_compress_params(param, block, offset);
update_compress_thread_counts(param, bytes_xmit);
qemu_cond_signal(&param->cond);
qemu_mutex_unlock(&param->mutex);
pages = 1;