2005-04-17 00:20:36 +02:00
|
|
|
/* By Ross Biro 1/23/92 */
|
|
|
|
/*
|
|
|
|
* Pentium III FXSR, SSE support
|
|
|
|
* Gareth Hughes <gareth@valinux.com>, May 2000
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/errno.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 09:04:11 +01:00
|
|
|
#include <linux/slab.h>
|
2005-04-17 00:20:36 +02:00
|
|
|
#include <linux/ptrace.h>
|
2008-01-30 13:31:52 +01:00
|
|
|
#include <linux/regset.h>
|
2008-03-17 07:36:28 +01:00
|
|
|
#include <linux/tracehook.h>
|
2005-04-17 00:20:36 +02:00
|
|
|
#include <linux/user.h>
|
2008-01-30 13:31:53 +01:00
|
|
|
#include <linux/elf.h>
|
2005-04-17 00:20:36 +02:00
|
|
|
#include <linux/security.h>
|
|
|
|
#include <linux/audit.h>
|
|
|
|
#include <linux/seccomp.h>
|
2005-05-01 17:59:14 +02:00
|
|
|
#include <linux/signal.h>
|
2009-09-09 19:22:48 +02:00
|
|
|
#include <linux/perf_event.h>
|
|
|
|
#include <linux/hw_breakpoint.h>
|
2012-07-11 20:26:34 +02:00
|
|
|
#include <linux/rcupdate.h>
|
2013-02-14 21:14:02 +01:00
|
|
|
#include <linux/export.h>
|
2012-11-27 19:33:25 +01:00
|
|
|
#include <linux/context_tracking.h>
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/i387.h>
|
2012-02-21 22:19:22 +01:00
|
|
|
#include <asm/fpu-internal.h>
|
2005-04-17 00:20:36 +02:00
|
|
|
#include <asm/debugreg.h>
|
|
|
|
#include <asm/ldt.h>
|
|
|
|
#include <asm/desc.h>
|
2008-01-30 13:31:01 +01:00
|
|
|
#include <asm/prctl.h>
|
|
|
|
#include <asm/proto.h>
|
2009-06-01 20:15:48 +02:00
|
|
|
#include <asm/hw_breakpoint.h>
|
2012-03-12 10:25:55 +01:00
|
|
|
#include <asm/traps.h>
|
2008-01-30 13:31:09 +01:00
|
|
|
|
2008-01-30 13:31:53 +01:00
|
|
|
#include "tls.h"
|
|
|
|
|
2009-08-24 23:43:14 +02:00
|
|
|
#define CREATE_TRACE_POINTS
|
|
|
|
#include <trace/events/syscalls.h>
|
|
|
|
|
2008-01-30 13:31:53 +01:00
|
|
|
enum x86_regset {
|
|
|
|
REGSET_GENERAL,
|
|
|
|
REGSET_FP,
|
|
|
|
REGSET_XFP,
|
2008-08-09 00:58:39 +02:00
|
|
|
REGSET_IOPERM64 = REGSET_XFP,
|
2010-02-11 20:50:59 +01:00
|
|
|
REGSET_XSTATE,
|
2008-01-30 13:31:53 +01:00
|
|
|
REGSET_TLS,
|
2008-08-09 00:58:39 +02:00
|
|
|
REGSET_IOPERM32,
|
2008-01-30 13:31:53 +01:00
|
|
|
};
|
2008-01-30 13:31:09 +01:00
|
|
|
|
2009-08-13 22:34:44 +02:00
|
|
|
struct pt_regs_offset {
|
|
|
|
const char *name;
|
|
|
|
int offset;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
|
|
|
|
#define REG_OFFSET_END {.name = NULL, .offset = 0}
|
|
|
|
|
|
|
|
static const struct pt_regs_offset regoffset_table[] = {
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
REG_OFFSET_NAME(r15),
|
|
|
|
REG_OFFSET_NAME(r14),
|
|
|
|
REG_OFFSET_NAME(r13),
|
|
|
|
REG_OFFSET_NAME(r12),
|
|
|
|
REG_OFFSET_NAME(r11),
|
|
|
|
REG_OFFSET_NAME(r10),
|
|
|
|
REG_OFFSET_NAME(r9),
|
|
|
|
REG_OFFSET_NAME(r8),
|
|
|
|
#endif
|
|
|
|
REG_OFFSET_NAME(bx),
|
|
|
|
REG_OFFSET_NAME(cx),
|
|
|
|
REG_OFFSET_NAME(dx),
|
|
|
|
REG_OFFSET_NAME(si),
|
|
|
|
REG_OFFSET_NAME(di),
|
|
|
|
REG_OFFSET_NAME(bp),
|
|
|
|
REG_OFFSET_NAME(ax),
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
REG_OFFSET_NAME(ds),
|
|
|
|
REG_OFFSET_NAME(es),
|
|
|
|
REG_OFFSET_NAME(fs),
|
|
|
|
REG_OFFSET_NAME(gs),
|
|
|
|
#endif
|
|
|
|
REG_OFFSET_NAME(orig_ax),
|
|
|
|
REG_OFFSET_NAME(ip),
|
|
|
|
REG_OFFSET_NAME(cs),
|
|
|
|
REG_OFFSET_NAME(flags),
|
|
|
|
REG_OFFSET_NAME(sp),
|
|
|
|
REG_OFFSET_NAME(ss),
|
|
|
|
REG_OFFSET_END,
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* regs_query_register_offset() - query register offset from its name
|
|
|
|
* @name: the name of a register
|
|
|
|
*
|
|
|
|
* regs_query_register_offset() returns the offset of a register in struct
|
|
|
|
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
|
|
|
|
*/
|
|
|
|
int regs_query_register_offset(const char *name)
|
|
|
|
{
|
|
|
|
const struct pt_regs_offset *roff;
|
|
|
|
for (roff = regoffset_table; roff->name != NULL; roff++)
|
|
|
|
if (!strcmp(roff->name, name))
|
|
|
|
return roff->offset;
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* regs_query_register_name() - query register name from its offset
|
|
|
|
* @offset: the offset of a register in struct pt_regs.
|
|
|
|
*
|
|
|
|
* regs_query_register_name() returns the name of a register from its
|
|
|
|
* offset in struct pt_regs. If the @offset is invalid, this returns NULL;
|
|
|
|
*/
|
|
|
|
const char *regs_query_register_name(unsigned int offset)
|
|
|
|
{
|
|
|
|
const struct pt_regs_offset *roff;
|
|
|
|
for (roff = regoffset_table; roff->name != NULL; roff++)
|
|
|
|
if (roff->offset == offset)
|
|
|
|
return roff->name;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const int arg_offs_table[] = {
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
[0] = offsetof(struct pt_regs, ax),
|
|
|
|
[1] = offsetof(struct pt_regs, dx),
|
|
|
|
[2] = offsetof(struct pt_regs, cx)
|
|
|
|
#else /* CONFIG_X86_64 */
|
|
|
|
[0] = offsetof(struct pt_regs, di),
|
|
|
|
[1] = offsetof(struct pt_regs, si),
|
|
|
|
[2] = offsetof(struct pt_regs, dx),
|
|
|
|
[3] = offsetof(struct pt_regs, cx),
|
|
|
|
[4] = offsetof(struct pt_regs, r8),
|
|
|
|
[5] = offsetof(struct pt_regs, r9)
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
/*
|
|
|
|
* does not yet catch signals sent when the child dies.
|
|
|
|
* in exit.c or in signal.c.
|
|
|
|
*/
|
|
|
|
|
2006-01-06 05:11:29 +01:00
|
|
|
/*
|
|
|
|
* Determines which flags the user has access to [1 = access, 0 = no access].
|
|
|
|
*/
|
2008-01-30 13:31:01 +01:00
|
|
|
#define FLAG_MASK_32 ((unsigned long) \
|
|
|
|
(X86_EFLAGS_CF | X86_EFLAGS_PF | \
|
|
|
|
X86_EFLAGS_AF | X86_EFLAGS_ZF | \
|
|
|
|
X86_EFLAGS_SF | X86_EFLAGS_TF | \
|
|
|
|
X86_EFLAGS_DF | X86_EFLAGS_OF | \
|
|
|
|
X86_EFLAGS_RF | X86_EFLAGS_AC))
|
|
|
|
|
2008-01-30 13:31:01 +01:00
|
|
|
/*
|
|
|
|
* Determines whether a value may be installed in a segment register.
|
|
|
|
*/
|
|
|
|
static inline bool invalid_selector(u16 value)
|
|
|
|
{
|
|
|
|
return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
|
2008-01-30 13:31:01 +01:00
|
|
|
#define FLAG_MASK FLAG_MASK_32
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2012-09-03 20:54:48 +02:00
|
|
|
/*
|
|
|
|
* X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
|
|
|
|
* when it traps. The previous stack will be directly underneath the saved
|
|
|
|
* registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'.
|
|
|
|
*
|
|
|
|
* Now, if the stack is empty, '®s->sp' is out of range. In this
|
|
|
|
* case we try to take the previous stack. To always return a non-null
|
|
|
|
* stack pointer we fall back to regs as stack if no previous stack
|
|
|
|
* exists.
|
|
|
|
*
|
|
|
|
* This is valid only for kernel mode traps.
|
|
|
|
*/
|
|
|
|
unsigned long kernel_stack_pointer(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
|
|
|
|
unsigned long sp = (unsigned long)®s->sp;
|
|
|
|
struct thread_info *tinfo;
|
|
|
|
|
|
|
|
if (context == (sp & ~(THREAD_SIZE - 1)))
|
|
|
|
return sp;
|
|
|
|
|
|
|
|
tinfo = (struct thread_info *)context;
|
|
|
|
if (tinfo->previous_esp)
|
|
|
|
return tinfo->previous_esp;
|
|
|
|
|
|
|
|
return (unsigned long)regs;
|
|
|
|
}
|
2012-11-21 07:21:02 +01:00
|
|
|
EXPORT_SYMBOL_GPL(kernel_stack_pointer);
|
2012-09-03 20:54:48 +02:00
|
|
|
|
2008-07-25 06:49:27 +02:00
|
|
|
static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
|
2005-04-17 00:20:36 +02:00
|
|
|
{
|
2008-01-30 13:30:56 +01:00
|
|
|
BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
|
2009-02-09 14:17:40 +01:00
|
|
|
return ®s->bx + (regno >> 2);
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
2008-01-30 13:31:01 +01:00
|
|
|
static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
|
2005-04-17 00:20:36 +02:00
|
|
|
{
|
2008-01-30 13:31:01 +01:00
|
|
|
/*
|
|
|
|
* Returning the value truncates it to 16 bits.
|
|
|
|
*/
|
|
|
|
unsigned int retval;
|
|
|
|
if (offset != offsetof(struct user_regs_struct, gs))
|
|
|
|
retval = *pt_regs_access(task_pt_regs(task), offset);
|
|
|
|
else {
|
|
|
|
if (task == current)
|
2009-02-09 14:17:40 +01:00
|
|
|
retval = get_user_gs(task_pt_regs(task));
|
|
|
|
else
|
|
|
|
retval = task_user_gs(task);
|
2008-01-30 13:31:01 +01:00
|
|
|
}
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_segment_reg(struct task_struct *task,
|
|
|
|
unsigned long offset, u16 value)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* The value argument was already truncated to 16 bits.
|
|
|
|
*/
|
2008-01-30 13:31:01 +01:00
|
|
|
if (invalid_selector(value))
|
2008-01-30 13:31:01 +01:00
|
|
|
return -EIO;
|
|
|
|
|
2008-02-06 22:39:44 +01:00
|
|
|
/*
|
|
|
|
* For %cs and %ss we cannot permit a null selector.
|
|
|
|
* We can permit a bogus selector as long as it has USER_RPL.
|
|
|
|
* Null selectors are fine for other segment registers, but
|
|
|
|
* we will never get back to user mode with invalid %cs or %ss
|
|
|
|
* and will take the trap in iret instead. Much code relies
|
|
|
|
* on user_mode() to distinguish a user trap frame (which can
|
|
|
|
* safely use invalid selectors) from a kernel trap frame.
|
|
|
|
*/
|
|
|
|
switch (offset) {
|
|
|
|
case offsetof(struct user_regs_struct, cs):
|
|
|
|
case offsetof(struct user_regs_struct, ss):
|
|
|
|
if (unlikely(value == 0))
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
default:
|
2008-01-30 13:31:01 +01:00
|
|
|
*pt_regs_access(task_pt_regs(task), offset) = value;
|
2008-02-06 22:39:44 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
case offsetof(struct user_regs_struct, gs):
|
2008-01-30 13:31:01 +01:00
|
|
|
if (task == current)
|
2009-02-09 14:17:40 +01:00
|
|
|
set_user_gs(task_pt_regs(task), value);
|
|
|
|
else
|
|
|
|
task_user_gs(task) = value;
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
2008-01-30 13:31:01 +01:00
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-01-30 13:31:01 +01:00
|
|
|
#else /* CONFIG_X86_64 */
|
|
|
|
|
|
|
|
#define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
|
|
|
|
|
|
|
|
static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
|
|
|
|
{
|
|
|
|
BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
|
|
|
|
return ®s->r15 + (offset / sizeof(regs->r15));
|
|
|
|
}
|
|
|
|
|
|
|
|
static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Returning the value truncates it to 16 bits.
|
|
|
|
*/
|
|
|
|
unsigned int seg;
|
|
|
|
|
|
|
|
switch (offset) {
|
|
|
|
case offsetof(struct user_regs_struct, fs):
|
|
|
|
if (task == current) {
|
|
|
|
/* Older gas can't assemble movq %?s,%r?? */
|
|
|
|
asm("movl %%fs,%0" : "=r" (seg));
|
|
|
|
return seg;
|
|
|
|
}
|
|
|
|
return task->thread.fsindex;
|
|
|
|
case offsetof(struct user_regs_struct, gs):
|
|
|
|
if (task == current) {
|
|
|
|
asm("movl %%gs,%0" : "=r" (seg));
|
|
|
|
return seg;
|
|
|
|
}
|
|
|
|
return task->thread.gsindex;
|
|
|
|
case offsetof(struct user_regs_struct, ds):
|
|
|
|
if (task == current) {
|
|
|
|
asm("movl %%ds,%0" : "=r" (seg));
|
|
|
|
return seg;
|
|
|
|
}
|
|
|
|
return task->thread.ds;
|
|
|
|
case offsetof(struct user_regs_struct, es):
|
|
|
|
if (task == current) {
|
|
|
|
asm("movl %%es,%0" : "=r" (seg));
|
|
|
|
return seg;
|
|
|
|
}
|
|
|
|
return task->thread.es;
|
|
|
|
|
|
|
|
case offsetof(struct user_regs_struct, cs):
|
|
|
|
case offsetof(struct user_regs_struct, ss):
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return *pt_regs_access(task_pt_regs(task), offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_segment_reg(struct task_struct *task,
|
|
|
|
unsigned long offset, u16 value)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* The value argument was already truncated to 16 bits.
|
|
|
|
*/
|
|
|
|
if (invalid_selector(value))
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
switch (offset) {
|
|
|
|
case offsetof(struct user_regs_struct,fs):
|
|
|
|
/*
|
|
|
|
* If this is setting fs as for normal 64-bit use but
|
|
|
|
* setting fs_base has implicitly changed it, leave it.
|
|
|
|
*/
|
|
|
|
if ((value == FS_TLS_SEL && task->thread.fsindex == 0 &&
|
|
|
|
task->thread.fs != 0) ||
|
|
|
|
(value == 0 && task->thread.fsindex == FS_TLS_SEL &&
|
|
|
|
task->thread.fs == 0))
|
|
|
|
break;
|
|
|
|
task->thread.fsindex = value;
|
|
|
|
if (task == current)
|
|
|
|
loadsegment(fs, task->thread.fsindex);
|
|
|
|
break;
|
|
|
|
case offsetof(struct user_regs_struct,gs):
|
|
|
|
/*
|
|
|
|
* If this is setting gs as for normal 64-bit use but
|
|
|
|
* setting gs_base has implicitly changed it, leave it.
|
|
|
|
*/
|
|
|
|
if ((value == GS_TLS_SEL && task->thread.gsindex == 0 &&
|
|
|
|
task->thread.gs != 0) ||
|
|
|
|
(value == 0 && task->thread.gsindex == GS_TLS_SEL &&
|
|
|
|
task->thread.gs == 0))
|
|
|
|
break;
|
|
|
|
task->thread.gsindex = value;
|
|
|
|
if (task == current)
|
|
|
|
load_gs_index(task->thread.gsindex);
|
|
|
|
break;
|
|
|
|
case offsetof(struct user_regs_struct,ds):
|
|
|
|
task->thread.ds = value;
|
|
|
|
if (task == current)
|
|
|
|
loadsegment(ds, task->thread.ds);
|
|
|
|
break;
|
|
|
|
case offsetof(struct user_regs_struct,es):
|
|
|
|
task->thread.es = value;
|
|
|
|
if (task == current)
|
|
|
|
loadsegment(es, task->thread.es);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Can't actually change these in 64-bit mode.
|
|
|
|
*/
|
|
|
|
case offsetof(struct user_regs_struct,cs):
|
2008-02-06 22:39:44 +01:00
|
|
|
if (unlikely(value == 0))
|
|
|
|
return -EIO;
|
2008-01-30 13:31:01 +01:00
|
|
|
#ifdef CONFIG_IA32_EMULATION
|
|
|
|
if (test_tsk_thread_flag(task, TIF_IA32))
|
|
|
|
task_pt_regs(task)->cs = value;
|
|
|
|
#endif
|
2008-01-30 13:31:01 +01:00
|
|
|
break;
|
2008-01-30 13:31:01 +01:00
|
|
|
case offsetof(struct user_regs_struct,ss):
|
2008-02-06 22:39:44 +01:00
|
|
|
if (unlikely(value == 0))
|
|
|
|
return -EIO;
|
2008-01-30 13:31:01 +01:00
|
|
|
#ifdef CONFIG_IA32_EMULATION
|
|
|
|
if (test_tsk_thread_flag(task, TIF_IA32))
|
|
|
|
task_pt_regs(task)->ss = value;
|
|
|
|
#endif
|
2008-01-30 13:31:01 +01:00
|
|
|
break;
|
2008-01-30 13:31:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_X86_32 */
|
|
|
|
|
2008-01-30 13:31:01 +01:00
|
|
|
static unsigned long get_flags(struct task_struct *task)
|
2005-04-17 00:20:36 +02:00
|
|
|
{
|
2008-01-30 13:31:01 +01:00
|
|
|
unsigned long retval = task_pt_regs(task)->flags;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the debugger set TF, hide it from the readout.
|
|
|
|
*/
|
|
|
|
if (test_tsk_thread_flag(task, TIF_FORCED_TF))
|
|
|
|
retval &= ~X86_EFLAGS_TF;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2008-01-30 13:31:01 +01:00
|
|
|
static int set_flags(struct task_struct *task, unsigned long value)
|
|
|
|
{
|
|
|
|
struct pt_regs *regs = task_pt_regs(task);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the user value contains TF, mark that
|
|
|
|
* it was not "us" (the debugger) that set it.
|
|
|
|
* If not, make sure it stays set if we had.
|
|
|
|
*/
|
|
|
|
if (value & X86_EFLAGS_TF)
|
|
|
|
clear_tsk_thread_flag(task, TIF_FORCED_TF);
|
|
|
|
else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
|
|
|
|
value |= X86_EFLAGS_TF;
|
|
|
|
|
|
|
|
regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int putreg(struct task_struct *child,
|
|
|
|
unsigned long offset, unsigned long value)
|
|
|
|
{
|
|
|
|
switch (offset) {
|
|
|
|
case offsetof(struct user_regs_struct, cs):
|
|
|
|
case offsetof(struct user_regs_struct, ds):
|
|
|
|
case offsetof(struct user_regs_struct, es):
|
|
|
|
case offsetof(struct user_regs_struct, fs):
|
|
|
|
case offsetof(struct user_regs_struct, gs):
|
|
|
|
case offsetof(struct user_regs_struct, ss):
|
|
|
|
return set_segment_reg(child, offset, value);
|
|
|
|
|
|
|
|
case offsetof(struct user_regs_struct, flags):
|
|
|
|
return set_flags(child, value);
|
2008-01-30 13:31:01 +01:00
|
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
case offsetof(struct user_regs_struct,fs_base):
|
|
|
|
if (value >= TASK_SIZE_OF(child))
|
|
|
|
return -EIO;
|
|
|
|
/*
|
|
|
|
* When changing the segment base, use do_arch_prctl
|
|
|
|
* to set either thread.fs or thread.fsindex and the
|
|
|
|
* corresponding GDT slot.
|
|
|
|
*/
|
|
|
|
if (child->thread.fs != value)
|
|
|
|
return do_arch_prctl(child, ARCH_SET_FS, value);
|
|
|
|
return 0;
|
|
|
|
case offsetof(struct user_regs_struct,gs_base):
|
|
|
|
/*
|
|
|
|
* Exactly the same here as the %fs handling above.
|
|
|
|
*/
|
|
|
|
if (value >= TASK_SIZE_OF(child))
|
|
|
|
return -EIO;
|
|
|
|
if (child->thread.gs != value)
|
|
|
|
return do_arch_prctl(child, ARCH_SET_GS, value);
|
|
|
|
return 0;
|
|
|
|
#endif
|
2008-01-30 13:31:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
*pt_regs_access(task_pt_regs(child), offset) = value;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned long getreg(struct task_struct *task, unsigned long offset)
|
|
|
|
{
|
|
|
|
switch (offset) {
|
|
|
|
case offsetof(struct user_regs_struct, cs):
|
|
|
|
case offsetof(struct user_regs_struct, ds):
|
|
|
|
case offsetof(struct user_regs_struct, es):
|
|
|
|
case offsetof(struct user_regs_struct, fs):
|
|
|
|
case offsetof(struct user_regs_struct, gs):
|
|
|
|
case offsetof(struct user_regs_struct, ss):
|
|
|
|
return get_segment_reg(task, offset);
|
|
|
|
|
|
|
|
case offsetof(struct user_regs_struct, flags):
|
|
|
|
return get_flags(task);
|
2008-01-30 13:31:01 +01:00
|
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
case offsetof(struct user_regs_struct, fs_base): {
|
|
|
|
/*
|
|
|
|
* do_arch_prctl may have used a GDT slot instead of
|
|
|
|
* the MSR. To userland, it appears the same either
|
|
|
|
* way, except the %fs segment selector might not be 0.
|
|
|
|
*/
|
|
|
|
unsigned int seg = task->thread.fsindex;
|
|
|
|
if (task->thread.fs != 0)
|
|
|
|
return task->thread.fs;
|
|
|
|
if (task == current)
|
|
|
|
asm("movl %%fs,%0" : "=r" (seg));
|
|
|
|
if (seg != FS_TLS_SEL)
|
|
|
|
return 0;
|
|
|
|
return get_desc_base(&task->thread.tls_array[FS_TLS]);
|
|
|
|
}
|
|
|
|
case offsetof(struct user_regs_struct, gs_base): {
|
|
|
|
/*
|
|
|
|
* Exactly the same here as the %fs handling above.
|
|
|
|
*/
|
|
|
|
unsigned int seg = task->thread.gsindex;
|
|
|
|
if (task->thread.gs != 0)
|
|
|
|
return task->thread.gs;
|
|
|
|
if (task == current)
|
|
|
|
asm("movl %%gs,%0" : "=r" (seg));
|
|
|
|
if (seg != GS_TLS_SEL)
|
|
|
|
return 0;
|
|
|
|
return get_desc_base(&task->thread.tls_array[GS_TLS]);
|
|
|
|
}
|
|
|
|
#endif
|
2008-01-30 13:31:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return *pt_regs_access(task_pt_regs(task), offset);
|
|
|
|
}
|
|
|
|
|
2008-01-30 13:31:52 +01:00
|
|
|
static int genregs_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
void *kbuf, void __user *ubuf)
|
|
|
|
{
|
|
|
|
if (kbuf) {
|
|
|
|
unsigned long *k = kbuf;
|
2009-12-17 16:04:56 +01:00
|
|
|
while (count >= sizeof(*k)) {
|
2008-01-30 13:31:52 +01:00
|
|
|
*k++ = getreg(target, pos);
|
|
|
|
count -= sizeof(*k);
|
|
|
|
pos += sizeof(*k);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
unsigned long __user *u = ubuf;
|
2009-12-17 16:04:56 +01:00
|
|
|
while (count >= sizeof(*u)) {
|
2008-01-30 13:31:52 +01:00
|
|
|
if (__put_user(getreg(target, pos), u++))
|
|
|
|
return -EFAULT;
|
|
|
|
count -= sizeof(*u);
|
|
|
|
pos += sizeof(*u);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int genregs_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
if (kbuf) {
|
|
|
|
const unsigned long *k = kbuf;
|
2009-12-17 16:04:56 +01:00
|
|
|
while (count >= sizeof(*k) && !ret) {
|
2008-01-30 13:31:52 +01:00
|
|
|
ret = putreg(target, pos, *k++);
|
|
|
|
count -= sizeof(*k);
|
|
|
|
pos += sizeof(*k);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
const unsigned long __user *u = ubuf;
|
2009-12-17 16:04:56 +01:00
|
|
|
while (count >= sizeof(*u) && !ret) {
|
2008-01-30 13:31:52 +01:00
|
|
|
unsigned long word;
|
|
|
|
ret = __get_user(word, u++);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
ret = putreg(target, pos, word);
|
|
|
|
count -= sizeof(*u);
|
|
|
|
pos += sizeof(*u);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-06-27 14:41:57 +02:00
|
|
|
static void ptrace_triggered(struct perf_event *bp,
|
2009-12-05 09:44:31 +01:00
|
|
|
struct perf_sample_data *data,
|
|
|
|
struct pt_regs *regs)
|
2008-01-30 13:30:52 +01:00
|
|
|
{
|
2008-01-30 13:30:59 +01:00
|
|
|
int i;
|
2009-09-09 19:22:48 +02:00
|
|
|
struct thread_struct *thread = &(current->thread);
|
2008-01-30 13:30:59 +01:00
|
|
|
|
2009-06-01 20:15:48 +02:00
|
|
|
/*
|
|
|
|
* Store in the virtual DR6 register the fact that the breakpoint
|
|
|
|
* was hit so the thread's debugger will see it.
|
|
|
|
*/
|
2009-09-09 19:22:48 +02:00
|
|
|
for (i = 0; i < HBP_NUM; i++) {
|
|
|
|
if (thread->ptrace_bps[i] == bp)
|
2009-06-01 20:15:48 +02:00
|
|
|
break;
|
2009-09-09 19:22:48 +02:00
|
|
|
}
|
2008-01-30 13:30:52 +01:00
|
|
|
|
2009-06-01 20:15:48 +02:00
|
|
|
thread->debugreg6 |= (DR_TRAP0 << i);
|
|
|
|
}
|
2008-01-30 13:30:52 +01:00
|
|
|
|
|
|
|
/*
|
2009-09-09 19:22:48 +02:00
|
|
|
* Walk through every ptrace breakpoints for this thread and
|
|
|
|
* build the dr7 value on top of their attributes.
|
|
|
|
*
|
2008-01-30 13:30:52 +01:00
|
|
|
*/
|
2009-09-09 19:22:48 +02:00
|
|
|
static unsigned long ptrace_get_dr7(struct perf_event *bp[])
|
2008-01-30 13:30:52 +01:00
|
|
|
{
|
2009-09-09 19:22:48 +02:00
|
|
|
int i;
|
|
|
|
int dr7 = 0;
|
|
|
|
struct arch_hw_breakpoint *info;
|
|
|
|
|
|
|
|
for (i = 0; i < HBP_NUM; i++) {
|
|
|
|
if (bp[i] && !bp[i]->attr.disabled) {
|
|
|
|
info = counter_arch_bp(bp[i]);
|
|
|
|
dr7 |= encode_dr7(i, info->len, info->type);
|
|
|
|
}
|
2008-01-30 13:30:59 +01:00
|
|
|
}
|
2009-09-09 19:22:48 +02:00
|
|
|
|
|
|
|
return dr7;
|
2008-01-30 13:30:52 +01:00
|
|
|
}
|
|
|
|
|
2013-07-09 01:00:59 +02:00
|
|
|
static int ptrace_fill_bp_fields(struct perf_event_attr *attr,
|
|
|
|
int len, int type, bool disabled)
|
|
|
|
{
|
|
|
|
int err, bp_len, bp_type;
|
|
|
|
|
|
|
|
err = arch_bp_generic_fields(len, type, &bp_len, &bp_type);
|
|
|
|
if (!err) {
|
|
|
|
attr->bp_len = bp_len;
|
|
|
|
attr->bp_type = bp_type;
|
|
|
|
attr->disabled = disabled;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct perf_event *
|
|
|
|
ptrace_register_breakpoint(struct task_struct *tsk, int len, int type,
|
|
|
|
unsigned long addr, bool disabled)
|
2009-11-27 04:55:53 +01:00
|
|
|
{
|
2009-12-05 09:44:31 +01:00
|
|
|
struct perf_event_attr attr;
|
2013-07-09 01:00:59 +02:00
|
|
|
int err;
|
|
|
|
|
|
|
|
ptrace_breakpoint_init(&attr);
|
|
|
|
attr.bp_addr = addr;
|
2009-11-27 04:55:53 +01:00
|
|
|
|
2013-07-09 01:00:59 +02:00
|
|
|
err = ptrace_fill_bp_fields(&attr, len, type, disabled);
|
2009-11-27 04:55:53 +01:00
|
|
|
if (err)
|
2013-07-09 01:00:59 +02:00
|
|
|
return ERR_PTR(err);
|
|
|
|
|
|
|
|
return register_user_hw_breakpoint(&attr, ptrace_triggered,
|
|
|
|
NULL, tsk);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
|
|
|
|
int disabled)
|
|
|
|
{
|
|
|
|
struct perf_event_attr attr = bp->attr;
|
|
|
|
int err;
|
2009-11-27 04:55:53 +01:00
|
|
|
|
2013-07-09 01:00:59 +02:00
|
|
|
err = ptrace_fill_bp_fields(&attr, len, type, disabled);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2009-11-27 04:55:53 +01:00
|
|
|
|
2009-12-05 07:06:10 +01:00
|
|
|
return modify_user_hw_breakpoint(bp, &attr);
|
2009-11-27 04:55:53 +01:00
|
|
|
}
|
|
|
|
|
2009-06-01 20:15:48 +02:00
|
|
|
/*
|
|
|
|
* Handle ptrace writes to debug register 7.
|
|
|
|
*/
|
|
|
|
static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data)
|
2008-01-30 13:30:52 +01:00
|
|
|
{
|
2013-07-09 01:00:58 +02:00
|
|
|
struct thread_struct *thread = &tsk->thread;
|
2009-09-09 19:22:48 +02:00
|
|
|
unsigned long old_dr7;
|
2013-07-09 01:00:58 +02:00
|
|
|
bool second_pass = false;
|
|
|
|
int i, rc, ret = 0;
|
2009-06-01 20:15:48 +02:00
|
|
|
|
|
|
|
data &= ~DR_CONTROL_RESERVED;
|
2009-09-09 19:22:48 +02:00
|
|
|
old_dr7 = ptrace_get_dr7(thread->ptrace_bps);
|
2013-07-09 01:00:58 +02:00
|
|
|
|
2009-06-01 20:15:48 +02:00
|
|
|
restore:
|
2013-07-09 01:00:58 +02:00
|
|
|
rc = 0;
|
2009-06-01 20:15:48 +02:00
|
|
|
for (i = 0; i < HBP_NUM; i++) {
|
2013-07-09 01:00:56 +02:00
|
|
|
unsigned len, type;
|
|
|
|
bool disabled = !decode_dr7(data, i, &len, &type);
|
|
|
|
struct perf_event *bp = thread->ptrace_bps[i];
|
|
|
|
|
2013-07-09 01:00:58 +02:00
|
|
|
if (!bp) {
|
|
|
|
if (disabled)
|
|
|
|
continue;
|
2013-07-09 01:01:01 +02:00
|
|
|
|
|
|
|
bp = ptrace_register_breakpoint(tsk,
|
|
|
|
len, type, 0, disabled);
|
|
|
|
if (IS_ERR(bp)) {
|
|
|
|
rc = PTR_ERR(bp);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
thread->ptrace_bps[i] = bp;
|
|
|
|
continue;
|
2009-06-01 20:15:48 +02:00
|
|
|
}
|
2008-01-30 13:30:59 +01:00
|
|
|
|
2013-07-09 01:00:59 +02:00
|
|
|
rc = ptrace_modify_breakpoint(bp, len, type, disabled);
|
2009-12-09 09:25:48 +01:00
|
|
|
if (rc)
|
2009-09-09 19:22:48 +02:00
|
|
|
break;
|
2009-06-01 20:15:48 +02:00
|
|
|
}
|
2013-07-09 01:00:58 +02:00
|
|
|
|
|
|
|
/* Restore if the first pass failed, second_pass shouldn't fail. */
|
|
|
|
if (rc && !WARN_ON(second_pass)) {
|
|
|
|
ret = rc;
|
|
|
|
data = old_dr7;
|
|
|
|
second_pass = true;
|
2009-06-01 20:15:48 +02:00
|
|
|
goto restore;
|
|
|
|
}
|
2011-04-08 17:29:36 +02:00
|
|
|
|
2013-07-09 01:00:58 +02:00
|
|
|
return ret;
|
2009-06-01 20:15:48 +02:00
|
|
|
}
|
2008-01-30 13:30:59 +01:00
|
|
|
|
2009-06-01 20:15:48 +02:00
|
|
|
/*
|
|
|
|
* Handle PTRACE_PEEKUSR calls for the debug register area.
|
|
|
|
*/
|
2009-07-01 16:22:30 +02:00
|
|
|
static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
|
2009-06-01 20:15:48 +02:00
|
|
|
{
|
2013-07-09 01:01:03 +02:00
|
|
|
struct thread_struct *thread = &tsk->thread;
|
2009-06-01 20:15:48 +02:00
|
|
|
unsigned long val = 0;
|
|
|
|
|
2009-09-09 19:22:48 +02:00
|
|
|
if (n < HBP_NUM) {
|
2013-07-09 01:00:47 +02:00
|
|
|
struct perf_event *bp = thread->ptrace_bps[n];
|
2011-04-08 17:29:36 +02:00
|
|
|
|
2013-07-09 01:00:47 +02:00
|
|
|
if (bp)
|
2011-04-08 17:29:36 +02:00
|
|
|
val = bp->hw.info.address;
|
2009-09-09 19:22:48 +02:00
|
|
|
} else if (n == 6) {
|
2009-06-01 20:15:48 +02:00
|
|
|
val = thread->debugreg6;
|
2013-07-09 01:01:03 +02:00
|
|
|
} else if (n == 7) {
|
2010-02-18 18:24:18 +01:00
|
|
|
val = thread->ptrace_dr7;
|
2009-09-09 19:22:48 +02:00
|
|
|
}
|
2009-06-01 20:15:48 +02:00
|
|
|
return val;
|
|
|
|
}
|
2008-01-30 13:30:59 +01:00
|
|
|
|
2009-09-09 19:22:48 +02:00
|
|
|
static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
|
|
|
|
unsigned long addr)
|
|
|
|
{
|
|
|
|
struct thread_struct *t = &tsk->thread;
|
2013-07-09 01:00:59 +02:00
|
|
|
struct perf_event *bp = t->ptrace_bps[nr];
|
2011-04-08 17:29:36 +02:00
|
|
|
int err = 0;
|
|
|
|
|
2013-07-09 01:00:59 +02:00
|
|
|
if (!bp) {
|
2009-12-09 09:25:48 +01:00
|
|
|
/*
|
2013-07-09 01:00:59 +02:00
|
|
|
* Put stub len and type to create an inactive but correct bp.
|
|
|
|
*
|
2009-12-09 09:25:48 +01:00
|
|
|
* CHECKME: the previous code returned -EIO if the addr wasn't
|
|
|
|
* a valid task virtual addr. The new one will return -EINVAL in
|
|
|
|
* this case.
|
|
|
|
* -EINVAL may be what we want for in-kernel breakpoints users,
|
|
|
|
* but -EIO looks better for ptrace, since we refuse a register
|
|
|
|
* writing for the user. And anyway this is the previous
|
|
|
|
* behaviour.
|
|
|
|
*/
|
2013-07-09 01:00:59 +02:00
|
|
|
bp = ptrace_register_breakpoint(tsk,
|
|
|
|
X86_BREAKPOINT_LEN_1, X86_BREAKPOINT_WRITE,
|
|
|
|
addr, true);
|
|
|
|
if (IS_ERR(bp))
|
2011-04-08 17:29:36 +02:00
|
|
|
err = PTR_ERR(bp);
|
2013-07-09 01:00:59 +02:00
|
|
|
else
|
|
|
|
t->ptrace_bps[nr] = bp;
|
2009-09-09 19:22:48 +02:00
|
|
|
} else {
|
2013-07-09 01:00:59 +02:00
|
|
|
struct perf_event_attr attr = bp->attr;
|
2009-11-27 04:55:53 +01:00
|
|
|
|
|
|
|
attr.bp_addr = addr;
|
2009-12-09 09:25:48 +01:00
|
|
|
err = modify_user_hw_breakpoint(bp, &attr);
|
2008-01-30 13:30:52 +01:00
|
|
|
}
|
2013-07-09 01:00:59 +02:00
|
|
|
|
2011-04-08 17:29:36 +02:00
|
|
|
return err;
|
2008-01-30 13:30:52 +01:00
|
|
|
}
|
|
|
|
|
2009-06-01 20:15:48 +02:00
|
|
|
/*
|
|
|
|
* Handle PTRACE_POKEUSR calls for the debug register area.
|
|
|
|
*/
|
2011-11-15 23:48:58 +01:00
|
|
|
static int ptrace_set_debugreg(struct task_struct *tsk, int n,
|
|
|
|
unsigned long val)
|
2009-06-01 20:15:48 +02:00
|
|
|
{
|
2013-07-09 01:01:03 +02:00
|
|
|
struct thread_struct *thread = &tsk->thread;
|
2009-06-01 20:15:48 +02:00
|
|
|
/* There are no DR4 or DR5 registers */
|
2013-07-09 01:01:03 +02:00
|
|
|
int rc = -EIO;
|
2009-06-01 20:15:48 +02:00
|
|
|
|
|
|
|
if (n < HBP_NUM) {
|
2009-09-09 19:22:48 +02:00
|
|
|
rc = ptrace_set_breakpoint_addr(tsk, n, val);
|
2013-07-09 01:01:03 +02:00
|
|
|
} else if (n == 6) {
|
|
|
|
thread->debugreg6 = val;
|
|
|
|
rc = 0;
|
|
|
|
} else if (n == 7) {
|
2009-06-01 20:15:48 +02:00
|
|
|
rc = ptrace_write_dr7(tsk, val);
|
2010-02-18 18:24:18 +01:00
|
|
|
if (!rc)
|
|
|
|
thread->ptrace_dr7 = val;
|
|
|
|
}
|
2009-06-01 20:15:48 +02:00
|
|
|
return rc;
|
2008-01-30 13:30:52 +01:00
|
|
|
}
|
|
|
|
|
2008-08-09 00:58:39 +02:00
|
|
|
/*
|
|
|
|
* These access the current or another (stopped) task's io permission
|
|
|
|
* bitmap for debugging or core dump.
|
|
|
|
*/
|
|
|
|
static int ioperm_active(struct task_struct *target,
|
|
|
|
const struct user_regset *regset)
|
|
|
|
{
|
|
|
|
return target->thread.io_bitmap_max / regset->size;
|
|
|
|
}
|
2008-02-26 09:40:27 +01:00
|
|
|
|
2008-08-09 00:58:39 +02:00
|
|
|
static int ioperm_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
void *kbuf, void __user *ubuf)
|
2008-01-30 13:31:09 +01:00
|
|
|
{
|
2008-08-09 00:58:39 +02:00
|
|
|
if (!target->thread.io_bitmap_ptr)
|
2008-01-30 13:31:09 +01:00
|
|
|
return -ENXIO;
|
|
|
|
|
2008-08-09 00:58:39 +02:00
|
|
|
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
|
|
|
target->thread.io_bitmap_ptr,
|
|
|
|
0, IO_BITMAP_BYTES);
|
|
|
|
}
|
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
/*
|
|
|
|
* Called by kernel/ptrace.c when detaching..
|
|
|
|
*
|
|
|
|
* Make sure the single step bit is not set.
|
|
|
|
*/
|
|
|
|
void ptrace_disable(struct task_struct *child)
|
2008-01-30 13:30:58 +01:00
|
|
|
{
|
2008-01-30 13:30:48 +01:00
|
|
|
user_disable_single_step(child);
|
2008-01-30 13:31:01 +01:00
|
|
|
#ifdef TIF_SYSCALL_EMU
|
2005-09-04 00:57:21 +02:00
|
|
|
clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
|
2008-01-30 13:31:01 +01:00
|
|
|
#endif
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
2008-01-30 13:31:54 +01:00
|
|
|
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
|
|
|
|
static const struct user_regset_view user_x86_32_view; /* Initialized below. */
|
|
|
|
#endif
|
|
|
|
|
2010-10-28 00:33:47 +02:00
|
|
|
long arch_ptrace(struct task_struct *child, long request,
|
|
|
|
unsigned long addr, unsigned long data)
|
2005-04-17 00:20:36 +02:00
|
|
|
{
|
2008-01-30 13:31:54 +01:00
|
|
|
int ret;
|
2005-04-17 00:20:36 +02:00
|
|
|
unsigned long __user *datap = (unsigned long __user *)data;
|
|
|
|
|
|
|
|
switch (request) {
|
|
|
|
/* read the word at location addr in the USER area. */
|
|
|
|
case PTRACE_PEEKUSR: {
|
|
|
|
unsigned long tmp;
|
|
|
|
|
|
|
|
ret = -EIO;
|
2010-10-28 00:33:48 +02:00
|
|
|
if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
|
2005-04-17 00:20:36 +02:00
|
|
|
break;
|
|
|
|
|
|
|
|
tmp = 0; /* Default return condition */
|
2008-01-30 13:31:01 +01:00
|
|
|
if (addr < sizeof(struct user_regs_struct))
|
2005-04-17 00:20:36 +02:00
|
|
|
tmp = getreg(child, addr);
|
2008-01-30 13:31:01 +01:00
|
|
|
else if (addr >= offsetof(struct user, u_debugreg[0]) &&
|
|
|
|
addr <= offsetof(struct user, u_debugreg[7])) {
|
|
|
|
addr -= offsetof(struct user, u_debugreg[0]);
|
|
|
|
tmp = ptrace_get_debugreg(child, addr / sizeof(data));
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
ret = put_user(tmp, datap);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
|
|
|
|
ret = -EIO;
|
2010-10-28 00:33:48 +02:00
|
|
|
if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
|
2005-04-17 00:20:36 +02:00
|
|
|
break;
|
|
|
|
|
2008-01-30 13:31:01 +01:00
|
|
|
if (addr < sizeof(struct user_regs_struct))
|
2005-04-17 00:20:36 +02:00
|
|
|
ret = putreg(child, addr, data);
|
2008-01-30 13:31:01 +01:00
|
|
|
else if (addr >= offsetof(struct user, u_debugreg[0]) &&
|
|
|
|
addr <= offsetof(struct user, u_debugreg[7])) {
|
|
|
|
addr -= offsetof(struct user, u_debugreg[0]);
|
|
|
|
ret = ptrace_set_debugreg(child,
|
|
|
|
addr / sizeof(data), data);
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
2008-01-30 13:31:01 +01:00
|
|
|
break;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2008-01-30 13:31:54 +01:00
|
|
|
case PTRACE_GETREGS: /* Get all gp regs from the child. */
|
|
|
|
return copy_regset_to_user(child,
|
|
|
|
task_user_regset_view(current),
|
|
|
|
REGSET_GENERAL,
|
|
|
|
0, sizeof(struct user_regs_struct),
|
|
|
|
datap);
|
|
|
|
|
|
|
|
case PTRACE_SETREGS: /* Set all gp regs in the child. */
|
|
|
|
return copy_regset_from_user(child,
|
|
|
|
task_user_regset_view(current),
|
|
|
|
REGSET_GENERAL,
|
|
|
|
0, sizeof(struct user_regs_struct),
|
|
|
|
datap);
|
|
|
|
|
|
|
|
case PTRACE_GETFPREGS: /* Get the child FPU state. */
|
|
|
|
return copy_regset_to_user(child,
|
|
|
|
task_user_regset_view(current),
|
|
|
|
REGSET_FP,
|
|
|
|
0, sizeof(struct user_i387_struct),
|
|
|
|
datap);
|
|
|
|
|
|
|
|
case PTRACE_SETFPREGS: /* Set the child FPU state. */
|
|
|
|
return copy_regset_from_user(child,
|
|
|
|
task_user_regset_view(current),
|
|
|
|
REGSET_FP,
|
|
|
|
0, sizeof(struct user_i387_struct),
|
|
|
|
datap);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2008-01-30 13:31:01 +01:00
|
|
|
#ifdef CONFIG_X86_32
|
2008-01-30 13:31:54 +01:00
|
|
|
case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
|
|
|
|
return copy_regset_to_user(child, &user_x86_32_view,
|
|
|
|
REGSET_XFP,
|
|
|
|
0, sizeof(struct user_fxsr_struct),
|
2008-06-30 23:02:41 +02:00
|
|
|
datap) ? -EIO : 0;
|
2008-01-30 13:31:54 +01:00
|
|
|
|
|
|
|
case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
|
|
|
|
return copy_regset_from_user(child, &user_x86_32_view,
|
|
|
|
REGSET_XFP,
|
|
|
|
0, sizeof(struct user_fxsr_struct),
|
2008-06-30 23:02:41 +02:00
|
|
|
datap) ? -EIO : 0;
|
2008-01-30 13:31:01 +01:00
|
|
|
#endif
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2008-01-30 13:31:01 +01:00
|
|
|
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
|
2005-04-17 00:20:36 +02:00
|
|
|
case PTRACE_GET_THREAD_AREA:
|
2010-10-28 00:33:47 +02:00
|
|
|
if ((int) addr < 0)
|
2008-01-30 13:30:46 +01:00
|
|
|
return -EIO;
|
|
|
|
ret = do_get_thread_area(child, addr,
|
2010-10-28 00:33:48 +02:00
|
|
|
(struct user_desc __user *)data);
|
2005-04-17 00:20:36 +02:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PTRACE_SET_THREAD_AREA:
|
2010-10-28 00:33:47 +02:00
|
|
|
if ((int) addr < 0)
|
2008-01-30 13:30:46 +01:00
|
|
|
return -EIO;
|
|
|
|
ret = do_set_thread_area(child, addr,
|
2010-10-28 00:33:48 +02:00
|
|
|
(struct user_desc __user *)data, 0);
|
2005-04-17 00:20:36 +02:00
|
|
|
break;
|
2008-01-30 13:31:01 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
/* normal 64bit interface to access TLS data.
|
|
|
|
Works just like arch_prctl, except that the arguments
|
|
|
|
are reversed. */
|
|
|
|
case PTRACE_ARCH_PRCTL:
|
|
|
|
ret = do_arch_prctl(child, data, addr);
|
|
|
|
break;
|
|
|
|
#endif
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
default:
|
|
|
|
ret = ptrace_request(child, request, addr, data);
|
|
|
|
break;
|
|
|
|
}
|
2008-01-30 13:30:52 +01:00
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-01-30 13:31:01 +01:00
|
|
|
#ifdef CONFIG_IA32_EMULATION
|
|
|
|
|
2008-01-30 13:31:01 +01:00
|
|
|
#include <linux/compat.h>
|
|
|
|
#include <linux/syscalls.h>
|
|
|
|
#include <asm/ia32.h>
|
2008-01-30 13:31:01 +01:00
|
|
|
#include <asm/user32.h>
|
|
|
|
|
|
|
|
#define R32(l,q) \
|
|
|
|
case offsetof(struct user32, regs.l): \
|
|
|
|
regs->q = value; break
|
|
|
|
|
|
|
|
#define SEG32(rs) \
|
|
|
|
case offsetof(struct user32, regs.rs): \
|
|
|
|
return set_segment_reg(child, \
|
|
|
|
offsetof(struct user_regs_struct, rs), \
|
|
|
|
value); \
|
|
|
|
break
|
|
|
|
|
|
|
|
static int putreg32(struct task_struct *child, unsigned regno, u32 value)
|
|
|
|
{
|
|
|
|
struct pt_regs *regs = task_pt_regs(child);
|
|
|
|
|
|
|
|
switch (regno) {
|
|
|
|
|
|
|
|
SEG32(cs);
|
|
|
|
SEG32(ds);
|
|
|
|
SEG32(es);
|
|
|
|
SEG32(fs);
|
|
|
|
SEG32(gs);
|
|
|
|
SEG32(ss);
|
|
|
|
|
|
|
|
R32(ebx, bx);
|
|
|
|
R32(ecx, cx);
|
|
|
|
R32(edx, dx);
|
|
|
|
R32(edi, di);
|
|
|
|
R32(esi, si);
|
|
|
|
R32(ebp, bp);
|
|
|
|
R32(eax, ax);
|
|
|
|
R32(eip, ip);
|
|
|
|
R32(esp, sp);
|
|
|
|
|
2008-02-29 04:57:07 +01:00
|
|
|
case offsetof(struct user32, regs.orig_eax):
|
|
|
|
/*
|
2009-09-23 05:12:07 +02:00
|
|
|
* A 32-bit debugger setting orig_eax means to restore
|
|
|
|
* the state of the task restarting a 32-bit syscall.
|
|
|
|
* Make sure we interpret the -ERESTART* codes correctly
|
|
|
|
* in case the task is not actually still sitting at the
|
|
|
|
* exit from a 32-bit syscall with TS_COMPAT still set.
|
2008-02-29 04:57:07 +01:00
|
|
|
*/
|
2009-09-23 05:12:07 +02:00
|
|
|
regs->orig_ax = value;
|
|
|
|
if (syscall_get_nr(child, regs) >= 0)
|
|
|
|
task_thread_info(child)->status |= TS_COMPAT;
|
2008-02-29 04:57:07 +01:00
|
|
|
break;
|
|
|
|
|
2008-01-30 13:31:01 +01:00
|
|
|
case offsetof(struct user32, regs.eflags):
|
|
|
|
return set_flags(child, value);
|
|
|
|
|
|
|
|
case offsetof(struct user32, u_debugreg[0]) ...
|
|
|
|
offsetof(struct user32, u_debugreg[7]):
|
|
|
|
regno -= offsetof(struct user32, u_debugreg[0]);
|
|
|
|
return ptrace_set_debugreg(child, regno / 4, value);
|
|
|
|
|
|
|
|
default:
|
|
|
|
if (regno > sizeof(struct user32) || (regno & 3))
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Other dummy fields in the virtual user structure
|
|
|
|
* are ignored
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#undef R32
|
|
|
|
#undef SEG32
|
|
|
|
|
|
|
|
#define R32(l,q) \
|
|
|
|
case offsetof(struct user32, regs.l): \
|
|
|
|
*val = regs->q; break
|
|
|
|
|
|
|
|
#define SEG32(rs) \
|
|
|
|
case offsetof(struct user32, regs.rs): \
|
|
|
|
*val = get_segment_reg(child, \
|
|
|
|
offsetof(struct user_regs_struct, rs)); \
|
|
|
|
break
|
|
|
|
|
|
|
|
static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
|
|
|
|
{
|
|
|
|
struct pt_regs *regs = task_pt_regs(child);
|
|
|
|
|
|
|
|
switch (regno) {
|
|
|
|
|
|
|
|
SEG32(ds);
|
|
|
|
SEG32(es);
|
|
|
|
SEG32(fs);
|
|
|
|
SEG32(gs);
|
|
|
|
|
|
|
|
R32(cs, cs);
|
|
|
|
R32(ss, ss);
|
|
|
|
R32(ebx, bx);
|
|
|
|
R32(ecx, cx);
|
|
|
|
R32(edx, dx);
|
|
|
|
R32(edi, di);
|
|
|
|
R32(esi, si);
|
|
|
|
R32(ebp, bp);
|
|
|
|
R32(eax, ax);
|
|
|
|
R32(orig_eax, orig_ax);
|
|
|
|
R32(eip, ip);
|
|
|
|
R32(esp, sp);
|
|
|
|
|
|
|
|
case offsetof(struct user32, regs.eflags):
|
|
|
|
*val = get_flags(child);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case offsetof(struct user32, u_debugreg[0]) ...
|
|
|
|
offsetof(struct user32, u_debugreg[7]):
|
|
|
|
regno -= offsetof(struct user32, u_debugreg[0]);
|
|
|
|
*val = ptrace_get_debugreg(child, regno / 4);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
if (regno > sizeof(struct user32) || (regno & 3))
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Other dummy fields in the virtual user structure
|
|
|
|
* are ignored
|
|
|
|
*/
|
|
|
|
*val = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#undef R32
|
|
|
|
#undef SEG32
|
|
|
|
|
2008-01-30 13:31:52 +01:00
|
|
|
static int genregs32_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
void *kbuf, void __user *ubuf)
|
|
|
|
{
|
|
|
|
if (kbuf) {
|
|
|
|
compat_ulong_t *k = kbuf;
|
2009-12-17 16:04:56 +01:00
|
|
|
while (count >= sizeof(*k)) {
|
2008-01-30 13:31:52 +01:00
|
|
|
getreg32(target, pos, k++);
|
|
|
|
count -= sizeof(*k);
|
|
|
|
pos += sizeof(*k);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
compat_ulong_t __user *u = ubuf;
|
2009-12-17 16:04:56 +01:00
|
|
|
while (count >= sizeof(*u)) {
|
2008-01-30 13:31:52 +01:00
|
|
|
compat_ulong_t word;
|
|
|
|
getreg32(target, pos, &word);
|
|
|
|
if (__put_user(word, u++))
|
|
|
|
return -EFAULT;
|
|
|
|
count -= sizeof(*u);
|
|
|
|
pos += sizeof(*u);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int genregs32_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
if (kbuf) {
|
|
|
|
const compat_ulong_t *k = kbuf;
|
2009-12-17 16:04:56 +01:00
|
|
|
while (count >= sizeof(*k) && !ret) {
|
2008-02-22 05:37:24 +01:00
|
|
|
ret = putreg32(target, pos, *k++);
|
2008-01-30 13:31:52 +01:00
|
|
|
count -= sizeof(*k);
|
|
|
|
pos += sizeof(*k);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
const compat_ulong_t __user *u = ubuf;
|
2009-12-17 16:04:56 +01:00
|
|
|
while (count >= sizeof(*u) && !ret) {
|
2008-01-30 13:31:52 +01:00
|
|
|
compat_ulong_t word;
|
|
|
|
ret = __get_user(word, u++);
|
|
|
|
if (ret)
|
|
|
|
break;
|
2008-02-22 05:37:24 +01:00
|
|
|
ret = putreg32(target, pos, word);
|
2008-01-30 13:31:52 +01:00
|
|
|
count -= sizeof(*u);
|
|
|
|
pos += sizeof(*u);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-03-06 00:32:11 +01:00
|
|
|
#ifdef CONFIG_X86_X32_ABI
|
|
|
|
static long x32_arch_ptrace(struct task_struct *child,
|
|
|
|
compat_long_t request, compat_ulong_t caddr,
|
|
|
|
compat_ulong_t cdata)
|
|
|
|
{
|
|
|
|
unsigned long addr = caddr;
|
|
|
|
unsigned long data = cdata;
|
|
|
|
void __user *datap = compat_ptr(data);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
switch (request) {
|
|
|
|
/* Read 32bits at location addr in the USER area. Only allow
|
|
|
|
to return the lower 32bits of segment and debug registers. */
|
|
|
|
case PTRACE_PEEKUSR: {
|
|
|
|
u32 tmp;
|
|
|
|
|
|
|
|
ret = -EIO;
|
|
|
|
if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
|
|
|
|
addr < offsetof(struct user_regs_struct, cs))
|
|
|
|
break;
|
|
|
|
|
|
|
|
tmp = 0; /* Default return condition */
|
|
|
|
if (addr < sizeof(struct user_regs_struct))
|
|
|
|
tmp = getreg(child, addr);
|
|
|
|
else if (addr >= offsetof(struct user, u_debugreg[0]) &&
|
|
|
|
addr <= offsetof(struct user, u_debugreg[7])) {
|
|
|
|
addr -= offsetof(struct user, u_debugreg[0]);
|
|
|
|
tmp = ptrace_get_debugreg(child, addr / sizeof(data));
|
|
|
|
}
|
|
|
|
ret = put_user(tmp, (__u32 __user *)datap);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Write the word at location addr in the USER area. Only allow
|
|
|
|
to update segment and debug registers with the upper 32bits
|
|
|
|
zero-extended. */
|
|
|
|
case PTRACE_POKEUSR:
|
|
|
|
ret = -EIO;
|
|
|
|
if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
|
|
|
|
addr < offsetof(struct user_regs_struct, cs))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (addr < sizeof(struct user_regs_struct))
|
|
|
|
ret = putreg(child, addr, data);
|
|
|
|
else if (addr >= offsetof(struct user, u_debugreg[0]) &&
|
|
|
|
addr <= offsetof(struct user, u_debugreg[7])) {
|
|
|
|
addr -= offsetof(struct user, u_debugreg[0]);
|
|
|
|
ret = ptrace_set_debugreg(child,
|
|
|
|
addr / sizeof(data), data);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PTRACE_GETREGS: /* Get all gp regs from the child. */
|
|
|
|
return copy_regset_to_user(child,
|
|
|
|
task_user_regset_view(current),
|
|
|
|
REGSET_GENERAL,
|
|
|
|
0, sizeof(struct user_regs_struct),
|
|
|
|
datap);
|
|
|
|
|
|
|
|
case PTRACE_SETREGS: /* Set all gp regs in the child. */
|
|
|
|
return copy_regset_from_user(child,
|
|
|
|
task_user_regset_view(current),
|
|
|
|
REGSET_GENERAL,
|
|
|
|
0, sizeof(struct user_regs_struct),
|
|
|
|
datap);
|
|
|
|
|
|
|
|
case PTRACE_GETFPREGS: /* Get the child FPU state. */
|
|
|
|
return copy_regset_to_user(child,
|
|
|
|
task_user_regset_view(current),
|
|
|
|
REGSET_FP,
|
|
|
|
0, sizeof(struct user_i387_struct),
|
|
|
|
datap);
|
|
|
|
|
|
|
|
case PTRACE_SETFPREGS: /* Set the child FPU state. */
|
|
|
|
return copy_regset_from_user(child,
|
|
|
|
task_user_regset_view(current),
|
|
|
|
REGSET_FP,
|
|
|
|
0, sizeof(struct user_i387_struct),
|
|
|
|
datap);
|
|
|
|
|
|
|
|
default:
|
|
|
|
return compat_ptrace_request(child, request, addr, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-04-22 21:21:25 +02:00
|
|
|
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
|
|
|
compat_ulong_t caddr, compat_ulong_t cdata)
|
2008-01-30 13:31:01 +01:00
|
|
|
{
|
2008-04-22 21:21:25 +02:00
|
|
|
unsigned long addr = caddr;
|
|
|
|
unsigned long data = cdata;
|
2008-01-30 13:31:01 +01:00
|
|
|
void __user *datap = compat_ptr(data);
|
|
|
|
int ret;
|
|
|
|
__u32 val;
|
|
|
|
|
2012-03-06 00:32:11 +01:00
|
|
|
#ifdef CONFIG_X86_X32_ABI
|
|
|
|
if (!is_ia32_task())
|
|
|
|
return x32_arch_ptrace(child, request, caddr, cdata);
|
|
|
|
#endif
|
|
|
|
|
2008-01-30 13:31:01 +01:00
|
|
|
switch (request) {
|
|
|
|
case PTRACE_PEEKUSR:
|
|
|
|
ret = getreg32(child, addr, &val);
|
|
|
|
if (ret == 0)
|
|
|
|
ret = put_user(val, (__u32 __user *)datap);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PTRACE_POKEUSR:
|
|
|
|
ret = putreg32(child, addr, data);
|
|
|
|
break;
|
|
|
|
|
2008-01-30 13:31:54 +01:00
|
|
|
case PTRACE_GETREGS: /* Get all gp regs from the child. */
|
|
|
|
return copy_regset_to_user(child, &user_x86_32_view,
|
|
|
|
REGSET_GENERAL,
|
|
|
|
0, sizeof(struct user_regs_struct32),
|
|
|
|
datap);
|
|
|
|
|
|
|
|
case PTRACE_SETREGS: /* Set all gp regs in the child. */
|
|
|
|
return copy_regset_from_user(child, &user_x86_32_view,
|
|
|
|
REGSET_GENERAL, 0,
|
|
|
|
sizeof(struct user_regs_struct32),
|
|
|
|
datap);
|
|
|
|
|
|
|
|
case PTRACE_GETFPREGS: /* Get the child FPU state. */
|
|
|
|
return copy_regset_to_user(child, &user_x86_32_view,
|
|
|
|
REGSET_FP, 0,
|
|
|
|
sizeof(struct user_i387_ia32_struct),
|
|
|
|
datap);
|
|
|
|
|
|
|
|
case PTRACE_SETFPREGS: /* Set the child FPU state. */
|
|
|
|
return copy_regset_from_user(
|
|
|
|
child, &user_x86_32_view, REGSET_FP,
|
|
|
|
0, sizeof(struct user_i387_ia32_struct), datap);
|
|
|
|
|
|
|
|
case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
|
|
|
|
return copy_regset_to_user(child, &user_x86_32_view,
|
|
|
|
REGSET_XFP, 0,
|
|
|
|
sizeof(struct user32_fxsr_struct),
|
|
|
|
datap);
|
|
|
|
|
|
|
|
case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
|
|
|
|
return copy_regset_from_user(child, &user_x86_32_view,
|
|
|
|
REGSET_XFP, 0,
|
|
|
|
sizeof(struct user32_fxsr_struct),
|
|
|
|
datap);
|
2008-01-30 13:31:01 +01:00
|
|
|
|
2008-04-22 21:21:25 +02:00
|
|
|
case PTRACE_GET_THREAD_AREA:
|
|
|
|
case PTRACE_SET_THREAD_AREA:
|
|
|
|
return arch_ptrace(child, request, addr, data);
|
|
|
|
|
2008-01-30 13:31:01 +01:00
|
|
|
default:
|
2008-01-30 13:31:56 +01:00
|
|
|
return compat_ptrace_request(child, request, addr, data);
|
2008-01-30 13:31:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-01-30 13:31:01 +01:00
|
|
|
#endif /* CONFIG_IA32_EMULATION */
|
|
|
|
|
2008-01-30 13:31:53 +01:00
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
|
2010-02-11 20:50:59 +01:00
|
|
|
static struct user_regset x86_64_regsets[] __read_mostly = {
|
2008-01-30 13:31:53 +01:00
|
|
|
[REGSET_GENERAL] = {
|
|
|
|
.core_note_type = NT_PRSTATUS,
|
|
|
|
.n = sizeof(struct user_regs_struct) / sizeof(long),
|
|
|
|
.size = sizeof(long), .align = sizeof(long),
|
|
|
|
.get = genregs_get, .set = genregs_set
|
|
|
|
},
|
|
|
|
[REGSET_FP] = {
|
|
|
|
.core_note_type = NT_PRFPREG,
|
|
|
|
.n = sizeof(struct user_i387_struct) / sizeof(long),
|
|
|
|
.size = sizeof(long), .align = sizeof(long),
|
|
|
|
.active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
|
|
|
|
},
|
2010-02-11 20:50:59 +01:00
|
|
|
[REGSET_XSTATE] = {
|
|
|
|
.core_note_type = NT_X86_XSTATE,
|
|
|
|
.size = sizeof(u64), .align = sizeof(u64),
|
|
|
|
.active = xstateregs_active, .get = xstateregs_get,
|
|
|
|
.set = xstateregs_set
|
|
|
|
},
|
2008-08-09 00:58:39 +02:00
|
|
|
[REGSET_IOPERM64] = {
|
|
|
|
.core_note_type = NT_386_IOPERM,
|
|
|
|
.n = IO_BITMAP_LONGS,
|
|
|
|
.size = sizeof(long), .align = sizeof(long),
|
|
|
|
.active = ioperm_active, .get = ioperm_get
|
|
|
|
},
|
2008-01-30 13:31:53 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct user_regset_view user_x86_64_view = {
|
|
|
|
.name = "x86_64", .e_machine = EM_X86_64,
|
|
|
|
.regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
|
|
|
|
};
|
|
|
|
|
|
|
|
#else /* CONFIG_X86_32 */
|
|
|
|
|
|
|
|
#define user_regs_struct32 user_regs_struct
|
|
|
|
#define genregs32_get genregs_get
|
|
|
|
#define genregs32_set genregs_set
|
|
|
|
|
|
|
|
#endif /* CONFIG_X86_64 */
|
|
|
|
|
|
|
|
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
|
2010-02-11 20:50:59 +01:00
|
|
|
static struct user_regset x86_32_regsets[] __read_mostly = {
|
2008-01-30 13:31:53 +01:00
|
|
|
[REGSET_GENERAL] = {
|
|
|
|
.core_note_type = NT_PRSTATUS,
|
|
|
|
.n = sizeof(struct user_regs_struct32) / sizeof(u32),
|
|
|
|
.size = sizeof(u32), .align = sizeof(u32),
|
|
|
|
.get = genregs32_get, .set = genregs32_set
|
|
|
|
},
|
|
|
|
[REGSET_FP] = {
|
|
|
|
.core_note_type = NT_PRFPREG,
|
2008-05-10 00:43:44 +02:00
|
|
|
.n = sizeof(struct user_i387_ia32_struct) / sizeof(u32),
|
2008-01-30 13:31:53 +01:00
|
|
|
.size = sizeof(u32), .align = sizeof(u32),
|
|
|
|
.active = fpregs_active, .get = fpregs_get, .set = fpregs_set
|
|
|
|
},
|
|
|
|
[REGSET_XFP] = {
|
|
|
|
.core_note_type = NT_PRXFPREG,
|
2008-05-10 00:43:44 +02:00
|
|
|
.n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
|
2008-01-30 13:31:53 +01:00
|
|
|
.size = sizeof(u32), .align = sizeof(u32),
|
|
|
|
.active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
|
|
|
|
},
|
2010-02-11 20:50:59 +01:00
|
|
|
[REGSET_XSTATE] = {
|
|
|
|
.core_note_type = NT_X86_XSTATE,
|
|
|
|
.size = sizeof(u64), .align = sizeof(u64),
|
|
|
|
.active = xstateregs_active, .get = xstateregs_get,
|
|
|
|
.set = xstateregs_set
|
|
|
|
},
|
2008-01-30 13:31:53 +01:00
|
|
|
[REGSET_TLS] = {
|
2008-01-30 13:31:56 +01:00
|
|
|
.core_note_type = NT_386_TLS,
|
2008-01-30 13:31:53 +01:00
|
|
|
.n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN,
|
|
|
|
.size = sizeof(struct user_desc),
|
|
|
|
.align = sizeof(struct user_desc),
|
|
|
|
.active = regset_tls_active,
|
|
|
|
.get = regset_tls_get, .set = regset_tls_set
|
|
|
|
},
|
2008-08-09 00:58:39 +02:00
|
|
|
[REGSET_IOPERM32] = {
|
|
|
|
.core_note_type = NT_386_IOPERM,
|
|
|
|
.n = IO_BITMAP_BYTES / sizeof(u32),
|
|
|
|
.size = sizeof(u32), .align = sizeof(u32),
|
|
|
|
.active = ioperm_active, .get = ioperm_get
|
|
|
|
},
|
2008-01-30 13:31:53 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct user_regset_view user_x86_32_view = {
|
|
|
|
.name = "i386", .e_machine = EM_386,
|
|
|
|
.regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2010-02-11 20:50:59 +01:00
|
|
|
/*
|
|
|
|
* This represents bytes 464..511 in the memory layout exported through
|
|
|
|
* the REGSET_XSTATE interface.
|
|
|
|
*/
|
|
|
|
u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
|
|
|
|
|
|
|
|
void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
|
|
|
|
#endif
|
|
|
|
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
|
|
|
|
x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64);
|
|
|
|
#endif
|
|
|
|
xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask;
|
|
|
|
}
|
|
|
|
|
2008-01-30 13:31:53 +01:00
|
|
|
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_IA32_EMULATION
|
|
|
|
if (test_tsk_thread_flag(task, TIF_IA32))
|
|
|
|
#endif
|
|
|
|
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
|
|
|
|
return &user_x86_32_view;
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
return &user_x86_64_view;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2009-12-16 01:47:20 +01:00
|
|
|
static void fill_sigtrap_info(struct task_struct *tsk,
|
|
|
|
struct pt_regs *regs,
|
|
|
|
int error_code, int si_code,
|
|
|
|
struct siginfo *info)
|
2005-04-17 00:20:36 +02:00
|
|
|
{
|
2012-03-12 10:25:55 +01:00
|
|
|
tsk->thread.trap_nr = X86_TRAP_DB;
|
2005-04-17 00:20:36 +02:00
|
|
|
tsk->thread.error_code = error_code;
|
|
|
|
|
2009-12-16 01:47:20 +01:00
|
|
|
memset(info, 0, sizeof(*info));
|
|
|
|
info->si_signo = SIGTRAP;
|
|
|
|
info->si_code = si_code;
|
|
|
|
info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void user_single_step_siginfo(struct task_struct *tsk,
|
|
|
|
struct pt_regs *regs,
|
|
|
|
struct siginfo *info)
|
|
|
|
{
|
|
|
|
fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info);
|
|
|
|
}
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2009-12-16 01:47:20 +01:00
|
|
|
void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
|
|
|
|
int error_code, int si_code)
|
|
|
|
{
|
|
|
|
struct siginfo info;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2009-12-16 01:47:20 +01:00
|
|
|
fill_sigtrap_info(tsk, regs, error_code, si_code, &info);
|
2007-10-20 01:13:56 +02:00
|
|
|
/* Send us the fake SIGTRAP */
|
2005-04-17 00:20:36 +02:00
|
|
|
force_sig_info(SIGTRAP, &info, tsk);
|
|
|
|
}
|
|
|
|
|
2008-01-30 13:31:01 +01:00
|
|
|
|
2008-07-09 11:38:07 +02:00
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
# define IS_IA32 1
|
|
|
|
#elif defined CONFIG_IA32_EMULATION
|
2009-02-28 04:03:24 +01:00
|
|
|
# define IS_IA32 is_compat_task()
|
2008-07-09 11:38:07 +02:00
|
|
|
#else
|
|
|
|
# define IS_IA32 0
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We must return the syscall number to actually look up in the table.
|
|
|
|
* This can be -1L to skip running any syscall at all.
|
|
|
|
*/
|
2011-05-24 00:18:05 +02:00
|
|
|
long syscall_trace_enter(struct pt_regs *regs)
|
2008-01-30 13:31:01 +01:00
|
|
|
{
|
2008-07-09 11:38:07 +02:00
|
|
|
long ret = 0;
|
|
|
|
|
2012-11-27 19:33:25 +01:00
|
|
|
user_exit();
|
2012-07-11 20:26:34 +02:00
|
|
|
|
2008-07-09 11:39:29 +02:00
|
|
|
/*
|
|
|
|
* If we stepped into a sysenter/syscall insn, it trapped in
|
|
|
|
* kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
|
|
|
|
* If user-mode had set TF itself, then it's still clear from
|
|
|
|
* do_debug() and we need to set it again to restore the user
|
|
|
|
* state. If we entered on the slow path, TF was already set.
|
|
|
|
*/
|
|
|
|
if (test_thread_flag(TIF_SINGLESTEP))
|
|
|
|
regs->flags |= X86_EFLAGS_TF;
|
|
|
|
|
2008-01-30 13:31:01 +01:00
|
|
|
/* do the secure computing check first */
|
2012-04-12 23:48:03 +02:00
|
|
|
if (secure_computing(regs->orig_ax)) {
|
|
|
|
/* seccomp failures shouldn't expose any additional code. */
|
|
|
|
ret = -1L;
|
|
|
|
goto out;
|
|
|
|
}
|
2008-01-30 13:31:01 +01:00
|
|
|
|
2008-07-09 11:38:07 +02:00
|
|
|
if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
|
|
|
|
ret = -1L;
|
|
|
|
|
2008-03-17 07:36:28 +01:00
|
|
|
if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
|
|
|
|
tracehook_report_syscall_entry(regs))
|
|
|
|
ret = -1L;
|
2008-01-30 13:31:01 +01:00
|
|
|
|
2009-08-24 23:43:11 +02:00
|
|
|
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
|
2009-08-24 23:43:14 +02:00
|
|
|
trace_sys_enter(regs, regs->orig_ax);
|
2009-03-07 05:53:00 +01:00
|
|
|
|
2012-01-03 20:23:06 +01:00
|
|
|
if (IS_IA32)
|
|
|
|
audit_syscall_entry(AUDIT_ARCH_I386,
|
|
|
|
regs->orig_ax,
|
|
|
|
regs->bx, regs->cx,
|
|
|
|
regs->dx, regs->si);
|
2008-07-09 11:38:07 +02:00
|
|
|
#ifdef CONFIG_X86_64
|
2012-01-03 20:23:06 +01:00
|
|
|
else
|
|
|
|
audit_syscall_entry(AUDIT_ARCH_X86_64,
|
|
|
|
regs->orig_ax,
|
|
|
|
regs->di, regs->si,
|
|
|
|
regs->dx, regs->r10);
|
2008-07-09 11:38:07 +02:00
|
|
|
#endif
|
|
|
|
|
2012-04-12 23:48:03 +02:00
|
|
|
out:
|
2008-07-09 11:38:07 +02:00
|
|
|
return ret ?: regs->orig_ax;
|
2008-01-30 13:31:01 +01:00
|
|
|
}
|
|
|
|
|
2011-05-24 00:18:05 +02:00
|
|
|
void syscall_trace_leave(struct pt_regs *regs)
|
2008-01-30 13:31:01 +01:00
|
|
|
{
|
2009-12-16 01:47:21 +01:00
|
|
|
bool step;
|
|
|
|
|
2012-10-26 11:40:28 +02:00
|
|
|
/*
|
|
|
|
* We may come here right after calling schedule_user()
|
|
|
|
* or do_notify_resume(), in which case we can be in RCU
|
|
|
|
* user mode.
|
|
|
|
*/
|
2012-11-27 19:33:25 +01:00
|
|
|
user_exit();
|
2012-10-26 11:40:28 +02:00
|
|
|
|
2012-01-03 20:23:06 +01:00
|
|
|
audit_syscall_exit(regs);
|
2008-01-30 13:31:01 +01:00
|
|
|
|
2009-08-24 23:43:11 +02:00
|
|
|
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
|
2009-08-24 23:43:14 +02:00
|
|
|
trace_sys_exit(regs, regs->ax);
|
2009-03-07 05:53:00 +01:00
|
|
|
|
2008-07-09 11:38:07 +02:00
|
|
|
/*
|
|
|
|
* If TIF_SYSCALL_EMU is set, we only get here because of
|
|
|
|
* TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
|
|
|
|
* We already reported this syscall instruction in
|
2009-12-16 01:47:21 +01:00
|
|
|
* syscall_trace_enter().
|
2008-07-09 11:38:07 +02:00
|
|
|
*/
|
2009-12-16 01:47:21 +01:00
|
|
|
step = unlikely(test_thread_flag(TIF_SINGLESTEP)) &&
|
|
|
|
!test_thread_flag(TIF_SYSCALL_EMU);
|
|
|
|
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
|
|
|
|
tracehook_report_syscall_exit(regs, step);
|
2012-07-11 20:26:34 +02:00
|
|
|
|
2012-11-27 19:33:25 +01:00
|
|
|
user_enter();
|
2008-07-09 11:38:07 +02:00
|
|
|
}
|