linux-user/i386: Emulate x86_64 vsyscalls

Notice the magic page during translate, much like we already
do for the arm32 commpage.  At runtime, raise an exception to
return cpu_loop for emulation.

Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20200213032223.14643-4-richard.henderson@linaro.org>
Signed-off-by: Laurent Vivier <laurent@vivier.eu>
This commit is contained in:
Richard Henderson 2020-02-12 19:22:21 -08:00 committed by Laurent Vivier
parent acf768a904
commit b26491b4d4
3 changed files with 128 additions and 1 deletions

View File

@ -92,6 +92,109 @@ static void gen_signal(CPUX86State *env, int sig, int code, abi_ptr addr)
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
}
#ifdef TARGET_X86_64
static bool write_ok_or_segv(CPUX86State *env, abi_ptr addr, size_t len)
{
/*
* For all the vsyscalls, NULL means "don't write anything" not
* "write it at address 0".
*/
if (addr == 0 || access_ok(VERIFY_WRITE, addr, len)) {
return true;
}
env->error_code = PG_ERROR_W_MASK | PG_ERROR_U_MASK;
gen_signal(env, TARGET_SIGSEGV, TARGET_SEGV_MAPERR, addr);
return false;
}
/*
* Since v3.1, the kernel traps and emulates the vsyscall page.
* Entry points other than the official generate SIGSEGV.
*/
static void emulate_vsyscall(CPUX86State *env)
{
int syscall;
abi_ulong ret;
uint64_t caller;
/*
* Validate the entry point. We have already validated the page
* during translation to get here; now verify the offset.
*/
switch (env->eip & ~TARGET_PAGE_MASK) {
case 0x000:
syscall = TARGET_NR_gettimeofday;
break;
case 0x400:
syscall = TARGET_NR_time;
break;
case 0x800:
syscall = TARGET_NR_getcpu;
break;
default:
goto sigsegv;
}
/*
* Validate the return address.
* Note that the kernel treats this the same as an invalid entry point.
*/
if (get_user_u64(caller, env->regs[R_ESP])) {
goto sigsegv;
}
/*
* Validate the the pointer arguments.
*/
switch (syscall) {
case TARGET_NR_gettimeofday:
if (!write_ok_or_segv(env, env->regs[R_EDI],
sizeof(struct target_timeval)) ||
!write_ok_or_segv(env, env->regs[R_ESI],
sizeof(struct target_timezone))) {
return;
}
break;
case TARGET_NR_time:
if (!write_ok_or_segv(env, env->regs[R_EDI], sizeof(abi_long))) {
return;
}
break;
case TARGET_NR_getcpu:
if (!write_ok_or_segv(env, env->regs[R_EDI], sizeof(uint32_t)) ||
!write_ok_or_segv(env, env->regs[R_ESI], sizeof(uint32_t))) {
return;
}
break;
default:
g_assert_not_reached();
}
/*
* Perform the syscall. None of the vsyscalls should need restarting.
*/
ret = do_syscall(env, syscall, env->regs[R_EDI], env->regs[R_ESI],
env->regs[R_EDX], env->regs[10], env->regs[8],
env->regs[9], 0, 0);
g_assert(ret != -TARGET_ERESTARTSYS);
g_assert(ret != -TARGET_QEMU_ESIGRETURN);
if (ret == -TARGET_EFAULT) {
goto sigsegv;
}
env->regs[R_EAX] = ret;
/* Emulate a ret instruction to leave the vsyscall page. */
env->eip = caller;
env->regs[R_ESP] += 8;
return;
sigsegv:
/* Like force_sig(SIGSEGV). */
gen_signal(env, TARGET_SIGSEGV, TARGET_SI_KERNEL, 0);
}
#endif
void cpu_loop(CPUX86State *env)
{
CPUState *cs = env_cpu(env);
@ -141,6 +244,11 @@ void cpu_loop(CPUX86State *env)
env->regs[R_EAX] = ret;
}
break;
#endif
#ifdef TARGET_X86_64
case EXCP_VSYSCALL:
emulate_vsyscall(env);
break;
#endif
case EXCP0B_NOSEG:
case EXCP0C_STACK:

View File

@ -1003,6 +1003,7 @@ typedef uint64_t FeatureWordArray[FEATURE_WORDS];
#define EXCP_VMEXIT 0x100 /* only for system emulation */
#define EXCP_SYSCALL 0x101 /* only for user emulation */
#define EXCP_VSYSCALL 0x102 /* only for user emulation */
/* i386-specific interrupt pending bits. */
#define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1
@ -2218,4 +2219,10 @@ static inline bool hyperv_feat_enabled(X86CPU *cpu, int feat)
return !!(cpu->hyperv_features & BIT(feat));
}
#if defined(TARGET_X86_64) && \
defined(CONFIG_USER_ONLY) && \
defined(CONFIG_LINUX)
# define TARGET_VSYSCALL_PAGE (UINT64_C(-10) << 20)
#endif
#endif /* I386_CPU_H */

View File

@ -8555,7 +8555,19 @@ static bool i386_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
target_ulong pc_next = disas_insn(dc, cpu);
target_ulong pc_next;
#ifdef TARGET_VSYSCALL_PAGE
/*
* Detect entry into the vsyscall page and invoke the syscall.
*/
if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
gen_exception(dc, EXCP_VSYSCALL, dc->base.pc_next);
return;
}
#endif
pc_next = disas_insn(dc, cpu);
if (dc->tf || (dc->base.tb->flags & HF_INHIBIT_IRQ_MASK)) {
/* if single step mode, we generate only one instruction and