x86, selftests: Add a test for the "sysret_ss_attrs" bug

On AMD CPUs, SYSRET can return with a valid SS descriptor with
with the hidden attributes set to an unusable state.  Make sure
the kernel doesn't let this happen.  This detects an
as-yet-unfixed regression.

Note that the 64-bit version of this test fails on AMD CPUs on
all kernel versions, although the issue in the 64-bit case is
much less severe than in the 32-bit case.

Reported-by: Brian Gerst <brgerst@gmail.com>
Tested-by: Denys Vlasenko <dvlasenk@redhat.com>
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Tests: e7d6eefaaa ("x86/vdso32/syscall.S: Do not load __USER32_DS to %ss")
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Denys Vlasenko <vda.linux@googlemail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Drewry <wad@chromium.org>
Link: http://lkml.kernel.org/r/resend_4d740841bac383742949e2fefb03982736595087.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Andy Lutomirski 2015-04-24 15:09:19 -07:00 committed by Ingo Molnar
parent 7ae383be81
commit e22438f8e9
4 changed files with 185 additions and 1 deletions

View File

@ -1,6 +1,6 @@
.PHONY: all all_32 all_64 check_build32 clean run_tests
TARGETS_C_BOTHBITS := sigreturn single_step_syscall
TARGETS_C_BOTHBITS := sigreturn single_step_syscall sysret_ss_attrs
BINARIES_32 := $(TARGETS_C_BOTHBITS:%=%_32)
BINARIES_64 := $(TARGETS_C_BOTHBITS:%=%_64)
@ -46,3 +46,6 @@ check_build32:
echo " yum install glibc-devel.*i686"; \
exit 1; \
fi
# Some tests have additional dependencies.
sysret_ss_attrs_64: thunks.S

View File

@ -4,10 +4,12 @@
# script here.
./sigreturn_32 || exit 1
./single_step_syscall_32 || exit 1
./sysret_ss_attrs_32 || exit 1
if [[ "$uname -p" -eq "x86_64" ]]; then
./sigreturn_64 || exit 1
./single_step_syscall_64 || exit 1
./sysret_ss_attrs_64 || exit 1
fi
exit 0

View File

@ -0,0 +1,112 @@
/*
* sysret_ss_attrs.c - test that syscalls return valid hidden SS attributes
* Copyright (c) 2015 Andrew Lutomirski
*
* This program is free software; you can redistribute it and/or modify
* it under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* On AMD CPUs, SYSRET can return with a valid SS descriptor with with
* the hidden attributes set to an unusable state. Make sure the kernel
* doesn't let this happen.
*/
#define _GNU_SOURCE
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#include <string.h>
#include <sys/mman.h>
#include <err.h>
#include <stddef.h>
#include <stdbool.h>
#include <pthread.h>
static void *threadproc(void *ctx)
{
/*
* Do our best to cause sleeps on this CPU to exit the kernel and
* re-enter with SS = 0.
*/
while (true)
;
return NULL;
}
#ifdef __x86_64__
extern unsigned long call32_from_64(void *stack, void (*function)(void));
asm (".pushsection .text\n\t"
".code32\n\t"
"test_ss:\n\t"
"pushl $0\n\t"
"popl %eax\n\t"
"ret\n\t"
".code64");
extern void test_ss(void);
#endif
int main()
{
/*
* Start a busy-looping thread on the same CPU we're on.
* For simplicity, just stick everything to CPU 0. This will
* fail in some containers, but that's probably okay.
*/
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(0, &cpuset);
if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0)
printf("[WARN]\tsched_setaffinity failed\n");
pthread_t thread;
if (pthread_create(&thread, 0, threadproc, 0) != 0)
err(1, "pthread_create");
#ifdef __x86_64__
unsigned char *stack32 = mmap(NULL, 4096, PROT_READ | PROT_WRITE,
MAP_32BIT | MAP_ANONYMOUS | MAP_PRIVATE,
-1, 0);
if (stack32 == MAP_FAILED)
err(1, "mmap");
#endif
printf("[RUN]\tSyscalls followed by SS validation\n");
for (int i = 0; i < 1000; i++) {
/*
* Go to sleep and return using sysret (if we're 64-bit
* or we're 32-bit on AMD on a 64-bit kernel). On AMD CPUs,
* SYSRET doesn't fix up the cached SS descriptor, so the
* kernel needs some kind of workaround to make sure that we
* end the system call with a valid stack segment. This
* can be a confusing failure because the SS *selector*
* is the same regardless.
*/
usleep(2);
#ifdef __x86_64__
/*
* On 32-bit, just doing a syscall through glibc is enough
* to cause a crash if our cached SS descriptor is invalid.
* On 64-bit, it's not, so try extra hard.
*/
call32_from_64(stack32 + 4088, test_ss);
#endif
}
printf("[OK]\tWe survived\n");
#ifdef __x86_64__
munmap(stack32, 4096);
#endif
return 0;
}

View File

@ -0,0 +1,67 @@
/*
* thunks.S - assembly helpers for mixed-bitness code
* Copyright (c) 2015 Andrew Lutomirski
*
* This program is free software; you can redistribute it and/or modify
* it under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* These are little helpers that make it easier to switch bitness on
* the fly.
*/
.text
.global call32_from_64
.type call32_from_64, @function
call32_from_64:
// rdi: stack to use
// esi: function to call
// Save registers
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushfq
// Switch stacks
mov %rsp,(%rdi)
mov %rdi,%rsp
// Switch to compatibility mode
pushq $0x23 /* USER32_CS */
pushq $1f
lretq
1:
.code32
// Call the function
call *%esi
// Switch back to long mode
jmp $0x33,$1f
.code64
1:
// Restore the stack
mov (%rsp),%rsp
// Restore registers
popfq
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbp
popq %rbx
ret
.size call32_from_64, .-call32_from_64