2011-01-17 17:08:14 +01:00
|
|
|
/*
|
|
|
|
* ucontext coroutine initialization code
|
|
|
|
*
|
|
|
|
* Copyright (C) 2006 Anthony Liguori <anthony@codemonkey.ws>
|
|
|
|
* Copyright (C) 2011 Kevin Wolf <kwolf@redhat.com>
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.0 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* XXX Is there a nicer way to disable glibc's stack check for longjmp? */
|
|
|
|
#ifdef _FORTIFY_SOURCE
|
|
|
|
#undef _FORTIFY_SOURCE
|
|
|
|
#endif
|
2016-01-29 18:49:55 +01:00
|
|
|
#include "qemu/osdep.h"
|
2011-01-17 17:08:14 +01:00
|
|
|
#include <ucontext.h>
|
2015-09-01 15:48:02 +02:00
|
|
|
#include "qemu/coroutine_int.h"
|
2011-01-17 17:08:14 +01:00
|
|
|
|
2012-06-29 13:40:27 +02:00
|
|
|
#ifdef CONFIG_VALGRIND_H
|
|
|
|
#include <valgrind/valgrind.h>
|
|
|
|
#endif
|
|
|
|
|
2018-01-16 16:11:52 +01:00
|
|
|
#if defined(__SANITIZE_ADDRESS__) || __has_feature(address_sanitizer)
|
|
|
|
#ifdef CONFIG_ASAN_IFACE_FIBER
|
|
|
|
#define CONFIG_ASAN 1
|
|
|
|
#include <sanitizer/asan_interface.h>
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2020-06-12 21:02:23 +02:00
|
|
|
#ifdef CONFIG_TSAN
|
|
|
|
#include <sanitizer/tsan_interface.h>
|
|
|
|
#endif
|
|
|
|
|
2011-01-17 17:08:14 +01:00
|
|
|
typedef struct {
|
|
|
|
Coroutine base;
|
|
|
|
void *stack;
|
2016-09-27 11:58:43 +02:00
|
|
|
size_t stack_size;
|
2020-05-29 22:51:19 +02:00
|
|
|
#ifdef CONFIG_SAFESTACK
|
|
|
|
/* Need an unsafe stack for each coroutine */
|
|
|
|
void *unsafe_stack;
|
|
|
|
size_t unsafe_stack_size;
|
|
|
|
#endif
|
2013-02-20 16:21:09 +01:00
|
|
|
sigjmp_buf env;
|
2012-06-29 13:40:27 +02:00
|
|
|
|
2020-07-01 15:56:16 +02:00
|
|
|
#ifdef CONFIG_TSAN
|
2020-06-12 21:02:23 +02:00
|
|
|
void *tsan_co_fiber;
|
|
|
|
void *tsan_caller_fiber;
|
2020-07-01 15:56:16 +02:00
|
|
|
#endif
|
2020-06-12 21:02:23 +02:00
|
|
|
|
2012-06-29 13:40:27 +02:00
|
|
|
#ifdef CONFIG_VALGRIND_H
|
|
|
|
unsigned int valgrind_stack_id;
|
|
|
|
#endif
|
|
|
|
|
2011-01-17 17:08:14 +01:00
|
|
|
} CoroutineUContext;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Per-thread coroutine bookkeeping
|
|
|
|
*/
|
2014-12-02 12:05:44 +01:00
|
|
|
static __thread CoroutineUContext leader;
|
|
|
|
static __thread Coroutine *current;
|
2011-01-17 17:08:14 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* va_args to makecontext() must be type 'int', so passing
|
|
|
|
* the pointer we need may require several int args. This
|
|
|
|
* union is a quick hack to let us do that
|
|
|
|
*/
|
|
|
|
union cc_arg {
|
|
|
|
void *p;
|
|
|
|
int i[2];
|
|
|
|
};
|
|
|
|
|
2020-07-01 15:56:16 +02:00
|
|
|
/*
|
|
|
|
* QEMU_ALWAYS_INLINE only does so if __OPTIMIZE__, so we cannot use it.
|
|
|
|
* always_inline is required to avoid TSan runtime fatal errors.
|
|
|
|
*/
|
2020-06-12 21:02:23 +02:00
|
|
|
static inline __attribute__((always_inline))
|
|
|
|
void on_new_fiber(CoroutineUContext *co)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_TSAN
|
|
|
|
co->tsan_co_fiber = __tsan_create_fiber(0); /* flags: sync on switch */
|
|
|
|
co->tsan_caller_fiber = __tsan_get_current_fiber();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-07-01 15:56:16 +02:00
|
|
|
/* always_inline is required to avoid TSan runtime fatal errors. */
|
2020-06-12 21:02:23 +02:00
|
|
|
static inline __attribute__((always_inline))
|
|
|
|
void finish_switch_fiber(void *fake_stack_save)
|
2018-01-16 16:11:52 +01:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_ASAN
|
|
|
|
const void *bottom_old;
|
|
|
|
size_t size_old;
|
|
|
|
|
|
|
|
__sanitizer_finish_switch_fiber(fake_stack_save, &bottom_old, &size_old);
|
|
|
|
|
|
|
|
if (!leader.stack) {
|
|
|
|
leader.stack = (void *)bottom_old;
|
|
|
|
leader.stack_size = size_old;
|
|
|
|
}
|
|
|
|
#endif
|
2020-06-12 21:02:23 +02:00
|
|
|
#ifdef CONFIG_TSAN
|
|
|
|
if (fake_stack_save) {
|
|
|
|
__tsan_release(fake_stack_save);
|
|
|
|
__tsan_switch_to_fiber(fake_stack_save, 0); /* 0=synchronize */
|
|
|
|
}
|
|
|
|
#endif
|
2018-01-16 16:11:52 +01:00
|
|
|
}
|
|
|
|
|
2020-07-01 15:56:16 +02:00
|
|
|
/* always_inline is required to avoid TSan runtime fatal errors. */
|
|
|
|
static inline __attribute__((always_inline))
|
|
|
|
void start_switch_fiber_asan(CoroutineAction action, void **fake_stack_save,
|
|
|
|
const void *bottom, size_t size)
|
2018-01-16 16:11:52 +01:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_ASAN
|
2020-06-12 21:02:23 +02:00
|
|
|
__sanitizer_start_switch_fiber(
|
|
|
|
action == COROUTINE_TERMINATE ? NULL : fake_stack_save,
|
|
|
|
bottom, size);
|
|
|
|
#endif
|
2020-07-01 15:56:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* always_inline is required to avoid TSan runtime fatal errors. */
|
|
|
|
static inline __attribute__((always_inline))
|
|
|
|
void start_switch_fiber_tsan(void **fake_stack_save,
|
|
|
|
CoroutineUContext *co,
|
|
|
|
bool caller)
|
|
|
|
{
|
2020-06-12 21:02:23 +02:00
|
|
|
#ifdef CONFIG_TSAN
|
2020-07-01 15:56:16 +02:00
|
|
|
void *new_fiber = caller ?
|
|
|
|
co->tsan_caller_fiber :
|
|
|
|
co->tsan_co_fiber;
|
|
|
|
void *curr_fiber = __tsan_get_current_fiber();
|
2020-06-12 21:02:23 +02:00
|
|
|
__tsan_acquire(curr_fiber);
|
|
|
|
|
|
|
|
*fake_stack_save = curr_fiber;
|
|
|
|
__tsan_switch_to_fiber(new_fiber, 0); /* 0=synchronize */
|
2018-01-16 16:11:52 +01:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2011-01-17 17:08:14 +01:00
|
|
|
static void coroutine_trampoline(int i0, int i1)
|
|
|
|
{
|
|
|
|
union cc_arg arg;
|
|
|
|
CoroutineUContext *self;
|
|
|
|
Coroutine *co;
|
2018-01-16 16:11:52 +01:00
|
|
|
void *fake_stack_save = NULL;
|
|
|
|
|
|
|
|
finish_switch_fiber(NULL);
|
2011-01-17 17:08:14 +01:00
|
|
|
|
|
|
|
arg.i[0] = i0;
|
|
|
|
arg.i[1] = i1;
|
|
|
|
self = arg.p;
|
|
|
|
co = &self->base;
|
|
|
|
|
|
|
|
/* Initialize longjmp environment and switch back the caller */
|
2013-02-20 16:21:09 +01:00
|
|
|
if (!sigsetjmp(self->env, 0)) {
|
2020-07-01 15:56:16 +02:00
|
|
|
start_switch_fiber_asan(COROUTINE_YIELD, &fake_stack_save, leader.stack,
|
|
|
|
leader.stack_size);
|
|
|
|
start_switch_fiber_tsan(&fake_stack_save, self, true); /* true=caller */
|
2013-02-20 16:21:09 +01:00
|
|
|
siglongjmp(*(sigjmp_buf *)co->entry_arg, 1);
|
2011-01-17 17:08:14 +01:00
|
|
|
}
|
|
|
|
|
2018-01-16 16:11:52 +01:00
|
|
|
finish_switch_fiber(fake_stack_save);
|
|
|
|
|
2011-01-17 17:08:14 +01:00
|
|
|
while (true) {
|
|
|
|
co->entry(co->entry_arg);
|
|
|
|
qemu_coroutine_switch(co, co->caller, COROUTINE_TERMINATE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-02-19 11:59:09 +01:00
|
|
|
Coroutine *qemu_coroutine_new(void)
|
2011-01-17 17:08:14 +01:00
|
|
|
{
|
|
|
|
CoroutineUContext *co;
|
|
|
|
ucontext_t old_uc, uc;
|
2013-02-20 16:21:09 +01:00
|
|
|
sigjmp_buf old_env;
|
2011-08-08 11:46:51 +02:00
|
|
|
union cc_arg arg = {0};
|
2018-01-16 16:11:52 +01:00
|
|
|
void *fake_stack_save = NULL;
|
2011-01-17 17:08:14 +01:00
|
|
|
|
2013-02-20 16:21:09 +01:00
|
|
|
/* The ucontext functions preserve signal masks which incurs a
|
|
|
|
* system call overhead. sigsetjmp(buf, 0)/siglongjmp() does not
|
|
|
|
* preserve signal masks but only works on the current stack.
|
|
|
|
* Since we need a way to create and switch to a new stack, use
|
|
|
|
* the ucontext functions for that but sigsetjmp()/siglongjmp() for
|
|
|
|
* everything else.
|
2011-01-17 17:08:14 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
if (getcontext(&uc) == -1) {
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
2011-08-21 05:09:37 +02:00
|
|
|
co = g_malloc0(sizeof(*co));
|
2016-09-27 11:58:43 +02:00
|
|
|
co->stack_size = COROUTINE_STACK_SIZE;
|
|
|
|
co->stack = qemu_alloc_stack(&co->stack_size);
|
2020-05-29 22:51:19 +02:00
|
|
|
#ifdef CONFIG_SAFESTACK
|
|
|
|
co->unsafe_stack_size = COROUTINE_STACK_SIZE;
|
|
|
|
co->unsafe_stack = qemu_alloc_stack(&co->unsafe_stack_size);
|
|
|
|
#endif
|
2011-01-17 17:08:14 +01:00
|
|
|
co->base.entry_arg = &old_env; /* stash away our jmp_buf */
|
|
|
|
|
|
|
|
uc.uc_link = &old_uc;
|
|
|
|
uc.uc_stack.ss_sp = co->stack;
|
2016-09-27 11:58:43 +02:00
|
|
|
uc.uc_stack.ss_size = co->stack_size;
|
2011-01-17 17:08:14 +01:00
|
|
|
uc.uc_stack.ss_flags = 0;
|
|
|
|
|
2012-06-29 13:40:27 +02:00
|
|
|
#ifdef CONFIG_VALGRIND_H
|
|
|
|
co->valgrind_stack_id =
|
2016-09-27 11:58:43 +02:00
|
|
|
VALGRIND_STACK_REGISTER(co->stack, co->stack + co->stack_size);
|
2012-06-29 13:40:27 +02:00
|
|
|
#endif
|
|
|
|
|
2011-01-17 17:08:14 +01:00
|
|
|
arg.p = co;
|
|
|
|
|
2020-06-12 21:02:23 +02:00
|
|
|
on_new_fiber(co);
|
2011-01-17 17:08:14 +01:00
|
|
|
makecontext(&uc, (void (*)(void))coroutine_trampoline,
|
|
|
|
2, arg.i[0], arg.i[1]);
|
|
|
|
|
2013-02-20 16:21:09 +01:00
|
|
|
/* swapcontext() in, siglongjmp() back out */
|
|
|
|
if (!sigsetjmp(old_env, 0)) {
|
2020-07-01 15:56:16 +02:00
|
|
|
start_switch_fiber_asan(COROUTINE_YIELD, &fake_stack_save, co->stack,
|
|
|
|
co->stack_size);
|
|
|
|
start_switch_fiber_tsan(&fake_stack_save,
|
|
|
|
co, false); /* false=not caller */
|
2020-05-29 22:51:19 +02:00
|
|
|
|
|
|
|
#ifdef CONFIG_SAFESTACK
|
|
|
|
/*
|
|
|
|
* Before we swap the context, set the new unsafe stack
|
|
|
|
* The unsafe stack grows just like the normal stack, so start from
|
|
|
|
* the last usable location of the memory area.
|
|
|
|
* NOTE: we don't have to re-set the usp afterwards because we are
|
|
|
|
* coming back to this context through a siglongjmp.
|
|
|
|
* The compiler already wrapped the corresponding sigsetjmp call with
|
|
|
|
* code that saves the usp on the (safe) stack before the call, and
|
|
|
|
* restores it right after (which is where we return with siglongjmp).
|
|
|
|
*/
|
|
|
|
void *usp = co->unsafe_stack + co->unsafe_stack_size;
|
|
|
|
__safestack_unsafe_stack_ptr = usp;
|
|
|
|
#endif
|
|
|
|
|
2011-01-17 17:08:14 +01:00
|
|
|
swapcontext(&old_uc, &uc);
|
|
|
|
}
|
2018-01-16 16:11:52 +01:00
|
|
|
|
|
|
|
finish_switch_fiber(fake_stack_save);
|
|
|
|
|
2011-01-17 17:08:14 +01:00
|
|
|
return &co->base;
|
|
|
|
}
|
|
|
|
|
2012-06-29 13:40:27 +02:00
|
|
|
#ifdef CONFIG_VALGRIND_H
|
|
|
|
/* Work around an unused variable in the valgrind.h macro... */
|
2020-07-08 20:19:44 +02:00
|
|
|
#if !defined(__clang__)
|
2013-04-16 13:51:06 +02:00
|
|
|
#pragma GCC diagnostic push
|
2012-06-29 13:40:27 +02:00
|
|
|
#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
|
2012-07-30 17:13:07 +02:00
|
|
|
#endif
|
2012-06-29 13:40:27 +02:00
|
|
|
static inline void valgrind_stack_deregister(CoroutineUContext *co)
|
|
|
|
{
|
|
|
|
VALGRIND_STACK_DEREGISTER(co->valgrind_stack_id);
|
|
|
|
}
|
2020-07-08 20:19:44 +02:00
|
|
|
#if !defined(__clang__)
|
2013-04-16 13:51:06 +02:00
|
|
|
#pragma GCC diagnostic pop
|
2012-06-29 13:40:27 +02:00
|
|
|
#endif
|
2012-07-30 17:13:07 +02:00
|
|
|
#endif
|
2012-06-29 13:40:27 +02:00
|
|
|
|
2011-01-17 17:08:14 +01:00
|
|
|
void qemu_coroutine_delete(Coroutine *co_)
|
|
|
|
{
|
|
|
|
CoroutineUContext *co = DO_UPCAST(CoroutineUContext, base, co_);
|
|
|
|
|
2012-06-29 13:40:27 +02:00
|
|
|
#ifdef CONFIG_VALGRIND_H
|
|
|
|
valgrind_stack_deregister(co);
|
|
|
|
#endif
|
|
|
|
|
2016-09-27 11:58:43 +02:00
|
|
|
qemu_free_stack(co->stack, co->stack_size);
|
2020-05-29 22:51:19 +02:00
|
|
|
#ifdef CONFIG_SAFESTACK
|
|
|
|
qemu_free_stack(co->unsafe_stack, co->unsafe_stack_size);
|
|
|
|
#endif
|
2011-08-21 05:09:37 +02:00
|
|
|
g_free(co);
|
2011-01-17 17:08:14 +01:00
|
|
|
}
|
|
|
|
|
2014-12-02 12:05:44 +01:00
|
|
|
/* This function is marked noinline to prevent GCC from inlining it
|
|
|
|
* into coroutine_trampoline(). If we allow it to do that then it
|
|
|
|
* hoists the code to get the address of the TLS variable "current"
|
|
|
|
* out of the while() loop. This is an invalid transformation because
|
|
|
|
* the sigsetjmp() call may be called when running thread A but
|
|
|
|
* return in thread B, and so we might be in a different thread
|
|
|
|
* context each time round the loop.
|
|
|
|
*/
|
|
|
|
CoroutineAction __attribute__((noinline))
|
|
|
|
qemu_coroutine_switch(Coroutine *from_, Coroutine *to_,
|
|
|
|
CoroutineAction action)
|
2011-01-17 17:08:14 +01:00
|
|
|
{
|
|
|
|
CoroutineUContext *from = DO_UPCAST(CoroutineUContext, base, from_);
|
|
|
|
CoroutineUContext *to = DO_UPCAST(CoroutineUContext, base, to_);
|
|
|
|
int ret;
|
2018-01-16 16:11:52 +01:00
|
|
|
void *fake_stack_save = NULL;
|
2011-01-17 17:08:14 +01:00
|
|
|
|
2014-12-02 12:05:44 +01:00
|
|
|
current = to_;
|
2011-01-17 17:08:14 +01:00
|
|
|
|
2013-02-20 16:21:09 +01:00
|
|
|
ret = sigsetjmp(from->env, 0);
|
2011-01-17 17:08:14 +01:00
|
|
|
if (ret == 0) {
|
2020-07-01 15:56:16 +02:00
|
|
|
start_switch_fiber_asan(action, &fake_stack_save, to->stack,
|
|
|
|
to->stack_size);
|
|
|
|
start_switch_fiber_tsan(&fake_stack_save,
|
|
|
|
to, false); /* false=not caller */
|
2013-02-20 16:21:09 +01:00
|
|
|
siglongjmp(to->env, action);
|
2011-01-17 17:08:14 +01:00
|
|
|
}
|
2018-01-16 16:11:52 +01:00
|
|
|
|
|
|
|
finish_switch_fiber(fake_stack_save);
|
|
|
|
|
2011-01-17 17:08:14 +01:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
Coroutine *qemu_coroutine_self(void)
|
|
|
|
{
|
2014-12-02 12:05:44 +01:00
|
|
|
if (!current) {
|
|
|
|
current = &leader.base;
|
|
|
|
}
|
2020-06-12 21:02:23 +02:00
|
|
|
#ifdef CONFIG_TSAN
|
|
|
|
if (!leader.tsan_co_fiber) {
|
|
|
|
leader.tsan_co_fiber = __tsan_get_current_fiber();
|
|
|
|
}
|
|
|
|
#endif
|
2014-12-02 12:05:44 +01:00
|
|
|
return current;
|
2011-01-17 17:08:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
bool qemu_in_coroutine(void)
|
|
|
|
{
|
2014-12-02 12:05:44 +01:00
|
|
|
return current && current->caller;
|
2011-01-17 17:08:14 +01:00
|
|
|
}
|