diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 23592eff67ad..b40989308775 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -18,7 +18,7 @@ #ifdef __KERNEL__ #include -#include +#include #define ATOMIC_INIT(i) { (i) } diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h new file mode 100644 index 000000000000..55707a8009d3 --- /dev/null +++ b/arch/xtensa/include/asm/barrier.h @@ -0,0 +1,29 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_SYSTEM_H +#define _XTENSA_SYSTEM_H + +#define smp_read_barrier_depends() do { } while(0) +#define read_barrier_depends() do { } while(0) + +#define mb() barrier() +#define rmb() mb() +#define wmb() mb() + +#ifdef CONFIG_SMP +#error smp_* not defined +#else +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#endif + +#define set_mb(var, value) do { var = value; mb(); } while (0) + +#endif /* _XTENSA_SYSTEM_H */ diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h index 40aa7fe77f66..5270197ddd36 100644 --- a/arch/xtensa/include/asm/bitops.h +++ b/arch/xtensa/include/asm/bitops.h @@ -21,7 +21,6 @@ #include #include -#include #ifdef CONFIG_SMP # error SMP not supported on this architecture diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h new file mode 100644 index 000000000000..e32149063d83 --- /dev/null +++ b/arch/xtensa/include/asm/cmpxchg.h @@ -0,0 +1,131 @@ +/* + * Atomic xchg and cmpxchg operations. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_CMPXCHG_H +#define _XTENSA_CMPXCHG_H + +#ifndef __ASSEMBLY__ + +#include + +/* + * cmpxchg + */ + +static inline unsigned long +__cmpxchg_u32(volatile int *p, int old, int new) +{ + __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" + "l32i %0, %1, 0 \n\t" + "bne %0, %2, 1f \n\t" + "s32i %3, %1, 0 \n\t" + "1: \n\t" + "wsr a15, "__stringify(PS)" \n\t" + "rsync \n\t" + : "=&a" (old) + : "a" (p), "a" (old), "r" (new) + : "a15", "memory"); + return old; +} +/* This function doesn't exist, so you'll get a linker error + * if something tries to do an invalid cmpxchg(). */ + +extern void __cmpxchg_called_with_bad_pointer(void); + +static __inline__ unsigned long +__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) +{ + switch (size) { + case 4: return __cmpxchg_u32(ptr, old, new); + default: __cmpxchg_called_with_bad_pointer(); + return old; + } +} + +#define cmpxchg(ptr,o,n) \ + ({ __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof (*(ptr))); \ + }) + +#include + +static inline unsigned long __cmpxchg_local(volatile void *ptr, + unsigned long old, + unsigned long new, int size) +{ + switch (size) { + case 4: + return __cmpxchg_u32(ptr, old, new); + default: + return __cmpxchg_local_generic(ptr, old, new, size); + } + + return old; +} + +/* + * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make + * them available. + */ +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ + (unsigned long)(n), sizeof(*(ptr)))) +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) + +/* + * xchg_u32 + * + * Note that a15 is used here because the register allocation + * done by the compiler is not guaranteed and a window overflow + * may not occur between the rsil and wsr instructions. By using + * a15 in the rsil, the machine is guaranteed to be in a state + * where no register reference will cause an overflow. + */ + +static inline unsigned long xchg_u32(volatile int * m, unsigned long val) +{ + unsigned long tmp; + __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" + "l32i %0, %1, 0 \n\t" + "s32i %2, %1, 0 \n\t" + "wsr a15, "__stringify(PS)" \n\t" + "rsync \n\t" + : "=&a" (tmp) + : "a" (m), "a" (val) + : "a15", "memory"); + return tmp; +} + +#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) + +/* + * This only works if the compiler isn't horribly bad at optimizing. + * gcc-2.5.8 reportedly can't handle this, but I define that one to + * be dead anyway. + */ + +extern void __xchg_called_with_bad_pointer(void); + +static __inline__ unsigned long +__xchg(unsigned long x, volatile void * ptr, int size) +{ + switch (size) { + case 4: + return xchg_u32(ptr, x); + } + __xchg_called_with_bad_pointer(); + return x; +} + +#endif /* __ASSEMBLY__ */ + +#endif /* _XTENSA_CMPXCHG_H */ diff --git a/arch/xtensa/include/asm/exec.h b/arch/xtensa/include/asm/exec.h new file mode 100644 index 000000000000..af949e28cb32 --- /dev/null +++ b/arch/xtensa/include/asm/exec.h @@ -0,0 +1,14 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_EXEC_H +#define _XTENSA_EXEC_H + +#define arch_align_stack(x) (x) + +#endif /* _XTENSA_EXEC_H */ diff --git a/arch/xtensa/include/asm/setup.h b/arch/xtensa/include/asm/setup.h index e3636520d8cc..9fa8ad979361 100644 --- a/arch/xtensa/include/asm/setup.h +++ b/arch/xtensa/include/asm/setup.h @@ -13,4 +13,6 @@ #define COMMAND_LINE_SIZE 256 +extern void set_except_vector(int n, void *addr); + #endif diff --git a/arch/xtensa/include/asm/switch_to.h b/arch/xtensa/include/asm/switch_to.h new file mode 100644 index 000000000000..6b73bf0eb1ff --- /dev/null +++ b/arch/xtensa/include/asm/switch_to.h @@ -0,0 +1,22 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_SWITCH_TO_H +#define _XTENSA_SWITCH_TO_H + +/* * switch_to(n) should switch tasks to task nr n, first + * checking that n isn't the current task, in which case it does nothing. + */ +extern void *_switch_to(void *last, void *next); + +#define switch_to(prev,next,last) \ +do { \ + (last) = _switch_to(prev, next); \ +} while(0) + +#endif /* _XTENSA_SWITCH_TO_H */ diff --git a/arch/xtensa/include/asm/system.h b/arch/xtensa/include/asm/system.h index 1e7e09ab6cd7..a7f40578587c 100644 --- a/arch/xtensa/include/asm/system.h +++ b/arch/xtensa/include/asm/system.h @@ -1,184 +1,5 @@ -/* - * include/asm-xtensa/system.h - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2001 - 2005 Tensilica Inc. - */ - -#ifndef _XTENSA_SYSTEM_H -#define _XTENSA_SYSTEM_H - -#include -#include - -#include - -#define smp_read_barrier_depends() do { } while(0) -#define read_barrier_depends() do { } while(0) - -#define mb() barrier() -#define rmb() mb() -#define wmb() mb() - -#ifdef CONFIG_SMP -#error smp_* not defined -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#endif - -#define set_mb(var, value) do { var = value; mb(); } while (0) - -#if !defined (__ASSEMBLY__) - -/* * switch_to(n) should switch tasks to task nr n, first - * checking that n isn't the current task, in which case it does nothing. - */ -extern void *_switch_to(void *last, void *next); - -#endif /* __ASSEMBLY__ */ - -#define switch_to(prev,next,last) \ -do { \ - (last) = _switch_to(prev, next); \ -} while(0) - -/* - * cmpxchg - */ - -static inline unsigned long -__cmpxchg_u32(volatile int *p, int old, int new) -{ - __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %1, 0 \n\t" - "bne %0, %2, 1f \n\t" - "s32i %3, %1, 0 \n\t" - "1: \n\t" - "wsr a15, "__stringify(PS)" \n\t" - "rsync \n\t" - : "=&a" (old) - : "a" (p), "a" (old), "r" (new) - : "a15", "memory"); - return old; -} -/* This function doesn't exist, so you'll get a linker error - * if something tries to do an invalid cmpxchg(). */ - -extern void __cmpxchg_called_with_bad_pointer(void); - -static __inline__ unsigned long -__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) -{ - switch (size) { - case 4: return __cmpxchg_u32(ptr, old, new); - default: __cmpxchg_called_with_bad_pointer(); - return old; - } -} - -#define cmpxchg(ptr,o,n) \ - ({ __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ - (unsigned long)_n_, sizeof (*(ptr))); \ - }) - -#include - -static inline unsigned long __cmpxchg_local(volatile void *ptr, - unsigned long old, - unsigned long new, int size) -{ - switch (size) { - case 4: - return __cmpxchg_u32(ptr, old, new); - default: - return __cmpxchg_local_generic(ptr, old, new, size); - } - - return old; -} - -/* - * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make - * them available. - */ -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ - (unsigned long)(n), sizeof(*(ptr)))) -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) - -/* - * xchg_u32 - * - * Note that a15 is used here because the register allocation - * done by the compiler is not guaranteed and a window overflow - * may not occur between the rsil and wsr instructions. By using - * a15 in the rsil, the machine is guaranteed to be in a state - * where no register reference will cause an overflow. - */ - -static inline unsigned long xchg_u32(volatile int * m, unsigned long val) -{ - unsigned long tmp; - __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %1, 0 \n\t" - "s32i %2, %1, 0 \n\t" - "wsr a15, "__stringify(PS)" \n\t" - "rsync \n\t" - : "=&a" (tmp) - : "a" (m), "a" (val) - : "a15", "memory"); - return tmp; -} - -#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) - -/* - * This only works if the compiler isn't horribly bad at optimizing. - * gcc-2.5.8 reportedly can't handle this, but I define that one to - * be dead anyway. - */ - -extern void __xchg_called_with_bad_pointer(void); - -static __inline__ unsigned long -__xchg(unsigned long x, volatile void * ptr, int size) -{ - switch (size) { - case 4: - return xchg_u32(ptr, x); - } - __xchg_called_with_bad_pointer(); - return x; -} - -extern void set_except_vector(int n, void *addr); - -static inline void spill_registers(void) -{ - unsigned int a0, ps; - - __asm__ __volatile__ ( - "movi a14," __stringify (PS_EXCM_BIT) " | 1\n\t" - "mov a12, a0\n\t" - "rsr a13," __stringify(SAR) "\n\t" - "xsr a14," __stringify(PS) "\n\t" - "movi a0, _spill_registers\n\t" - "rsync\n\t" - "callx0 a0\n\t" - "mov a0, a12\n\t" - "wsr a13," __stringify(SAR) "\n\t" - "wsr a14," __stringify(PS) "\n\t" - :: "a" (&a0), "a" (&ps) - : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", "memory"); -} - -#define arch_align_stack(x) (x) - -#endif /* _XTENSA_SYSTEM_H */ +/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */ +#include +#include +#include +#include diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 3fa526fd3c99..6e4bb3b791ab 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -17,7 +17,9 @@ #define _XTENSA_UACCESS_H #include +#ifndef __ASSEMBLY__ #include +#endif #include #define VERIFY_READ 0 diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 2c9004770c4e..6a2d6edf8f72 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -34,7 +34,6 @@ #include #include -#include #include #include #include diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c index 2dff698ab02e..33eea4c16f12 100644 --- a/arch/xtensa/kernel/ptrace.c +++ b/arch/xtensa/kernel/ptrace.c @@ -24,7 +24,6 @@ #include #include -#include #include #include #include diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 1e5a034fe011..17e746f7be60 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -34,7 +34,6 @@ # include #endif -#include #include #include #include diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index e64efac3b9db..bc1e14cf9369 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -381,6 +381,25 @@ static __always_inline unsigned long *stack_pointer(struct task_struct *task) return sp; } +static inline void spill_registers(void) +{ + unsigned int a0, ps; + + __asm__ __volatile__ ( + "movi a14," __stringify (PS_EXCM_BIT) " | 1\n\t" + "mov a12, a0\n\t" + "rsr a13," __stringify(SAR) "\n\t" + "xsr a14," __stringify(PS) "\n\t" + "movi a0, _spill_registers\n\t" + "rsync\n\t" + "callx0 a0\n\t" + "mov a0, a12\n\t" + "wsr a13," __stringify(SAR) "\n\t" + "wsr a14," __stringify(PS) "\n\t" + :: "a" (&a0), "a" (&ps) + : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", "memory"); +} + void show_trace(struct task_struct *task, unsigned long *sp) { unsigned long a0, a1, pc; diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index e367e3026436..b17885a0b508 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -19,7 +19,6 @@ #include #include #include -#include #include unsigned long asid_cache = ASID_USER_FIRST; diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c index 239461d8ea88..e2700b21395b 100644 --- a/arch/xtensa/mm/tlb.c +++ b/arch/xtensa/mm/tlb.c @@ -18,7 +18,6 @@ #include #include #include -#include #include