util/oslib-posix: Introduce and use MemsetContext for touch_all_pages()
Let's minimize the number of global variables to prepare for os_mem_prealloc() getting called concurrently and make the code a bit easier to read. The only consumer that really needs a global variable is the sigbus handler, which will require protection via a mutex in the future either way as we cannot concurrently mess with the SIGBUS handler. Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> Reviewed-by: Michal Privoznik <mprivozn@redhat.com> Signed-off-by: David Hildenbrand <david@redhat.com> Message-Id: <20211217134611.31172-4-david@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
a384bfa32e
commit
dba506788b
@ -73,21 +73,30 @@
|
|||||||
|
|
||||||
#define MAX_MEM_PREALLOC_THREAD_COUNT 16
|
#define MAX_MEM_PREALLOC_THREAD_COUNT 16
|
||||||
|
|
||||||
|
struct MemsetThread;
|
||||||
|
|
||||||
|
typedef struct MemsetContext {
|
||||||
|
bool all_threads_created;
|
||||||
|
bool any_thread_failed;
|
||||||
|
struct MemsetThread *threads;
|
||||||
|
int num_threads;
|
||||||
|
} MemsetContext;
|
||||||
|
|
||||||
struct MemsetThread {
|
struct MemsetThread {
|
||||||
char *addr;
|
char *addr;
|
||||||
size_t numpages;
|
size_t numpages;
|
||||||
size_t hpagesize;
|
size_t hpagesize;
|
||||||
QemuThread pgthread;
|
QemuThread pgthread;
|
||||||
sigjmp_buf env;
|
sigjmp_buf env;
|
||||||
|
MemsetContext *context;
|
||||||
};
|
};
|
||||||
typedef struct MemsetThread MemsetThread;
|
typedef struct MemsetThread MemsetThread;
|
||||||
|
|
||||||
static MemsetThread *memset_thread;
|
/* used by sigbus_handler() */
|
||||||
static int memset_num_threads;
|
static MemsetContext *sigbus_memset_context;
|
||||||
|
|
||||||
static QemuMutex page_mutex;
|
static QemuMutex page_mutex;
|
||||||
static QemuCond page_cond;
|
static QemuCond page_cond;
|
||||||
static bool threads_created_flag;
|
|
||||||
|
|
||||||
int qemu_get_thread_id(void)
|
int qemu_get_thread_id(void)
|
||||||
{
|
{
|
||||||
@ -438,10 +447,13 @@ const char *qemu_get_exec_dir(void)
|
|||||||
static void sigbus_handler(int signal)
|
static void sigbus_handler(int signal)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
if (memset_thread) {
|
|
||||||
for (i = 0; i < memset_num_threads; i++) {
|
if (sigbus_memset_context) {
|
||||||
if (qemu_thread_is_self(&memset_thread[i].pgthread)) {
|
for (i = 0; i < sigbus_memset_context->num_threads; i++) {
|
||||||
siglongjmp(memset_thread[i].env, 1);
|
MemsetThread *thread = &sigbus_memset_context->threads[i];
|
||||||
|
|
||||||
|
if (qemu_thread_is_self(&thread->pgthread)) {
|
||||||
|
siglongjmp(thread->env, 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -459,7 +471,7 @@ static void *do_touch_pages(void *arg)
|
|||||||
* clearing until all threads have been created.
|
* clearing until all threads have been created.
|
||||||
*/
|
*/
|
||||||
qemu_mutex_lock(&page_mutex);
|
qemu_mutex_lock(&page_mutex);
|
||||||
while(!threads_created_flag){
|
while (!memset_args->context->all_threads_created) {
|
||||||
qemu_cond_wait(&page_cond, &page_mutex);
|
qemu_cond_wait(&page_cond, &page_mutex);
|
||||||
}
|
}
|
||||||
qemu_mutex_unlock(&page_mutex);
|
qemu_mutex_unlock(&page_mutex);
|
||||||
@ -502,7 +514,7 @@ static void *do_madv_populate_write_pages(void *arg)
|
|||||||
|
|
||||||
/* See do_touch_pages(). */
|
/* See do_touch_pages(). */
|
||||||
qemu_mutex_lock(&page_mutex);
|
qemu_mutex_lock(&page_mutex);
|
||||||
while (!threads_created_flag) {
|
while (!memset_args->context->all_threads_created) {
|
||||||
qemu_cond_wait(&page_cond, &page_mutex);
|
qemu_cond_wait(&page_cond, &page_mutex);
|
||||||
}
|
}
|
||||||
qemu_mutex_unlock(&page_mutex);
|
qemu_mutex_unlock(&page_mutex);
|
||||||
@ -529,6 +541,9 @@ static int touch_all_pages(char *area, size_t hpagesize, size_t numpages,
|
|||||||
int smp_cpus, bool use_madv_populate_write)
|
int smp_cpus, bool use_madv_populate_write)
|
||||||
{
|
{
|
||||||
static gsize initialized = 0;
|
static gsize initialized = 0;
|
||||||
|
MemsetContext context = {
|
||||||
|
.num_threads = get_memset_num_threads(smp_cpus),
|
||||||
|
};
|
||||||
size_t numpages_per_thread, leftover;
|
size_t numpages_per_thread, leftover;
|
||||||
void *(*touch_fn)(void *);
|
void *(*touch_fn)(void *);
|
||||||
int ret = 0, i = 0;
|
int ret = 0, i = 0;
|
||||||
@ -546,35 +561,41 @@ static int touch_all_pages(char *area, size_t hpagesize, size_t numpages,
|
|||||||
touch_fn = do_touch_pages;
|
touch_fn = do_touch_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
threads_created_flag = false;
|
context.threads = g_new0(MemsetThread, context.num_threads);
|
||||||
memset_num_threads = get_memset_num_threads(smp_cpus);
|
numpages_per_thread = numpages / context.num_threads;
|
||||||
memset_thread = g_new0(MemsetThread, memset_num_threads);
|
leftover = numpages % context.num_threads;
|
||||||
numpages_per_thread = numpages / memset_num_threads;
|
for (i = 0; i < context.num_threads; i++) {
|
||||||
leftover = numpages % memset_num_threads;
|
context.threads[i].addr = addr;
|
||||||
for (i = 0; i < memset_num_threads; i++) {
|
context.threads[i].numpages = numpages_per_thread + (i < leftover);
|
||||||
memset_thread[i].addr = addr;
|
context.threads[i].hpagesize = hpagesize;
|
||||||
memset_thread[i].numpages = numpages_per_thread + (i < leftover);
|
context.threads[i].context = &context;
|
||||||
memset_thread[i].hpagesize = hpagesize;
|
qemu_thread_create(&context.threads[i].pgthread, "touch_pages",
|
||||||
qemu_thread_create(&memset_thread[i].pgthread, "touch_pages",
|
touch_fn, &context.threads[i],
|
||||||
touch_fn, &memset_thread[i],
|
|
||||||
QEMU_THREAD_JOINABLE);
|
QEMU_THREAD_JOINABLE);
|
||||||
addr += memset_thread[i].numpages * hpagesize;
|
addr += context.threads[i].numpages * hpagesize;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!use_madv_populate_write) {
|
||||||
|
sigbus_memset_context = &context;
|
||||||
}
|
}
|
||||||
|
|
||||||
qemu_mutex_lock(&page_mutex);
|
qemu_mutex_lock(&page_mutex);
|
||||||
threads_created_flag = true;
|
context.all_threads_created = true;
|
||||||
qemu_cond_broadcast(&page_cond);
|
qemu_cond_broadcast(&page_cond);
|
||||||
qemu_mutex_unlock(&page_mutex);
|
qemu_mutex_unlock(&page_mutex);
|
||||||
|
|
||||||
for (i = 0; i < memset_num_threads; i++) {
|
for (i = 0; i < context.num_threads; i++) {
|
||||||
int tmp = (uintptr_t)qemu_thread_join(&memset_thread[i].pgthread);
|
int tmp = (uintptr_t)qemu_thread_join(&context.threads[i].pgthread);
|
||||||
|
|
||||||
if (tmp) {
|
if (tmp) {
|
||||||
ret = tmp;
|
ret = tmp;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
g_free(memset_thread);
|
|
||||||
memset_thread = NULL;
|
if (!use_madv_populate_write) {
|
||||||
|
sigbus_memset_context = NULL;
|
||||||
|
}
|
||||||
|
g_free(context.threads);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user