hexagon: kernel_thread()/kernel_execve() conversion

introduce sane current_pt_regs(), use it in syscalls where needed.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Al Viro 2012-10-18 22:45:24 -04:00
parent ddffeb8c4d
commit 9952185554
7 changed files with 46 additions and 98 deletions

View File

@ -31,6 +31,8 @@ config HEXAGON
select GENERIC_CLOCKEVENTS
select GENERIC_CLOCKEVENTS_BROADCAST
select MODULES_USE_ELF_RELA
select GENERIC_KERNEL_THREAD
select GENERIC_KERNEL_EXECVE
---help---
Qualcomm Hexagon is a processor architecture designed for high
performance and low power across a wide variety of applications.

View File

@ -34,7 +34,6 @@
struct task_struct;
/* this is defined in arch/process.c */
extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
extern unsigned long thread_saved_pc(struct task_struct *tsk);
extern void start_thread(struct pt_regs *, unsigned long, unsigned long);

View File

@ -32,4 +32,8 @@
extern int regs_query_register_offset(const char *name);
extern const char *regs_query_register_name(unsigned int offset);
#define current_pt_regs() \
((struct pt_regs *) \
((unsigned long)current_thread_info() + THREAD_SIZE) - 1)
#endif

View File

@ -25,33 +25,6 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
/*
* Kernel thread creation. The desired kernel function is "wrapped"
* in the kernel_thread_helper function, which does cleanup
* afterwards.
*/
static void __noreturn kernel_thread_helper(void *arg, int (*fn)(void *))
{
do_exit(fn(arg));
}
int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
{
struct pt_regs regs;
memset(&regs, 0, sizeof(regs));
/*
* Yes, we're exploting illicit knowledge of the ABI here.
*/
regs.r00 = (unsigned long) arg;
regs.r01 = (unsigned long) fn;
pt_set_elr(&regs, (unsigned long)kernel_thread_helper);
pt_set_kmode(&regs);
return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
}
EXPORT_SYMBOL(kernel_thread);
/*
* Program thread launch. Often defined as a macro in processor.h,
* but we're shooting for a small footprint and it's not an inner-loop
@ -114,7 +87,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
* Copy architecture-specific thread state
*/
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long unused, struct task_struct *p,
unsigned long arg, struct task_struct *p,
struct pt_regs *regs)
{
struct thread_info *ti = task_thread_info(p);
@ -125,19 +98,28 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
childregs = (struct pt_regs *) (((unsigned long) ti + THREAD_SIZE) -
sizeof(*childregs));
memcpy(childregs, regs, sizeof(*childregs));
ti->regs = childregs;
/*
* Establish kernel stack pointer and initial PC for new thread
* Note that unlike the usual situation, we do not copy the
* parent's callee-saved here; those are in pt_regs and whatever
* we leave here will be overridden on return to userland.
*/
ss = (struct hexagon_switch_stack *) ((unsigned long) childregs -
sizeof(*ss));
ss->lr = (unsigned long)ret_from_fork;
p->thread.switch_sp = ss;
if (unlikely(p->flags & PF_KTHREAD)) {
memset(childregs, 0, sizeof(struct pt_regs));
/* r24 <- fn, r25 <- arg */
ss->r2524 = usp | ((u64)arg << 32);
pt_set_kmode(childregs);
return 0;
}
memcpy(childregs, regs, sizeof(*childregs));
ss->r2524 = 0;
/* If User mode thread, set pt_reg stack pointer as per parameter */
if (user_mode(childregs)) {
pt_set_rte_sp(childregs, usp);
/* Child sees zero return value */
@ -160,26 +142,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
* this point in the fork process
* Might also want to set things like ti->addr_limit
*/
} else {
/*
* If kernel thread, resume stack is kernel stack base.
* Note that this is pointer arithmetic on pt_regs *
*/
pt_set_rte_sp(childregs, (unsigned long)(childregs + 1));
/*
* We need the current thread_info fast path pointer
* set up in pt_regs. The register to be used is
* parametric for assembler code, but the mechanism
* doesn't drop neatly into C. Needs to be fixed.
*/
childregs->THREADINFO_REG = (unsigned long) ti;
}
/*
* thread_info pointer is pulled out of task_struct "stack"
* field on switch_to.
*/
p->stack = (void *)ti;
return 0;
}

View File

@ -249,14 +249,14 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
*/
asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
{
struct pt_regs *regs = current_thread_info()->regs;
struct pt_regs *regs = current_pt_regs();
return do_sigaltstack(uss, uoss, regs->r29);
}
asmlinkage int sys_rt_sigreturn(void)
{
struct pt_regs *regs = current_thread_info()->regs;
struct pt_regs *regs = current_pt_regs();
struct rt_sigframe __user *frame;
sigset_t blocked;

View File

@ -39,7 +39,7 @@ asmlinkage int sys_execve(char __user *ufilename,
const char __user *const __user *argv,
const char __user *const __user *envp)
{
struct pt_regs *pregs = current_thread_info()->regs;
struct pt_regs *pregs = current_pt_regs();
struct filename *filename;
int retval;
@ -57,33 +57,10 @@ asmlinkage int sys_execve(char __user *ufilename,
asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
unsigned long parent_tidp, unsigned long child_tidp)
{
struct pt_regs *pregs = current_thread_info()->regs;
struct pt_regs *pregs = current_pt_regs();
if (!newsp)
newsp = pregs->SP;
return do_fork(clone_flags, newsp, pregs, 0, (int __user *)parent_tidp,
(int __user *)child_tidp);
}
/*
* Do a system call from the kernel, so as to have a proper pt_regs
* and recycle the sys_execvpe infrustructure.
*/
int kernel_execve(const char *filename,
const char *const argv[], const char *const envp[])
{
register unsigned long __a0 asm("r0") = (unsigned long) filename;
register unsigned long __a1 asm("r1") = (unsigned long) argv;
register unsigned long __a2 asm("r2") = (unsigned long) envp;
int retval;
__asm__ volatile(
" R6 = #%4;\n"
" trap0(#1);\n"
" %0 = R0;\n"
: "=r" (retval)
: "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_execve)
);
return retval;
}

View File

@ -266,4 +266,8 @@ _K_enter_machcheck:
.globl ret_from_fork
ret_from_fork:
call schedule_tail
P0 = cmp.eq(R24, #0);
if P0 jump return_from_syscall
R0 = R25;
callr R24
jump return_from_syscall