MIPS: uaccess: Switch lock annotations to might_fault().

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
Ralf Baechle 2009-04-28 14:17:54 +02:00
parent 005076a16b
commit ef41f4600f
2 changed files with 23 additions and 18 deletions

View File

@ -40,7 +40,7 @@ static inline
__wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
__wsum sum, int *err_ptr)
{
might_sleep();
might_fault();
return __csum_partial_copy_user((__force void *)src, dst,
len, sum, err_ptr);
}
@ -53,7 +53,7 @@ static inline
__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
__wsum sum, int *err_ptr)
{
might_sleep();
might_fault();
if (access_ok(VERIFY_WRITE, dst, len))
return __csum_partial_copy_user(src, (__force void *)dst,
len, sum, err_ptr);

View File

@ -245,6 +245,7 @@ do { \
int __gu_err = -EFAULT; \
const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
\
might_fault(); \
if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
__get_user_common((x), size, __gu_ptr); \
\
@ -334,6 +335,7 @@ do { \
__typeof__(*(ptr)) __pu_val = (x); \
int __pu_err = -EFAULT; \
\
might_fault(); \
if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
switch (size) { \
case 1: __put_user_asm("sb", __pu_addr); break; \
@ -708,10 +710,10 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
const void *__cu_from; \
long __cu_len; \
\
might_sleep(); \
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
might_fault(); \
__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
__cu_len; \
})
@ -764,13 +766,14 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
const void *__cu_from; \
long __cu_len; \
\
might_sleep(); \
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) \
if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
might_fault(); \
__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
__cu_len); \
} \
__cu_len; \
})
@ -843,10 +846,10 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
const void __user *__cu_from; \
long __cu_len; \
\
might_sleep(); \
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
might_fault(); \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
__cu_len); \
__cu_len; \
@ -874,13 +877,14 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
const void __user *__cu_from; \
long __cu_len; \
\
might_sleep(); \
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
if (access_ok(VERIFY_READ, __cu_from, __cu_len)) \
if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
might_fault(); \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
__cu_len); \
} \
__cu_len; \
})
@ -890,10 +894,10 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
const void __user *__cu_from; \
long __cu_len; \
\
might_sleep(); \
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
might_fault(); \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
__cu_len); \
__cu_len; \
@ -905,14 +909,15 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
const void __user *__cu_from; \
long __cu_len; \
\
might_sleep(); \
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \
access_ok(VERIFY_WRITE, __cu_to, __cu_len))) \
access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \
might_fault(); \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
__cu_len); \
} \
__cu_len; \
})
@ -932,7 +937,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
{
__kernel_size_t res;
might_sleep();
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
"move\t$5, $0\n\t"
@ -981,7 +986,7 @@ __strncpy_from_user(char *__to, const char __user *__from, long __len)
{
long res;
might_sleep();
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
"move\t$5, %2\n\t"
@ -1018,7 +1023,7 @@ strncpy_from_user(char *__to, const char __user *__from, long __len)
{
long res;
might_sleep();
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
"move\t$5, %2\n\t"
@ -1037,7 +1042,7 @@ static inline long __strlen_user(const char __user *s)
{
long res;
might_sleep();
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
__MODULE_JAL(__strlen_user_nocheck_asm)
@ -1067,7 +1072,7 @@ static inline long strlen_user(const char __user *s)
{
long res;
might_sleep();
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
__MODULE_JAL(__strlen_user_asm)
@ -1084,7 +1089,7 @@ static inline long __strnlen_user(const char __user *s, long n)
{
long res;
might_sleep();
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
"move\t$5, %2\n\t"
@ -1115,7 +1120,7 @@ static inline long strnlen_user(const char __user *s, long n)
{
long res;
might_sleep();
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
"move\t$5, %2\n\t"