4ae73f2d53
The generic strncpy_from_user() is not really optimal, since it is designed to work on both little-endian and big-endian. And on little-endian you can simplify much of the logic to find the first zero byte, since little-endian arithmetic doesn't have to worry about the carry bit propagating into earlier bytes (only later bytes, which we don't care about). But I have patches to make the generic routines use the architecture- specific <asm/word-at-a-time.h> infrastructure, so that we can regain the little-endian optimizations. But before we do that, switch over to the generic routines to make the patches each do just one well-defined thing. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
46 lines
848 B
C
46 lines
848 B
C
/*
|
|
* User address space access functions.
|
|
*
|
|
* For licencing details see kernel-base/COPYING
|
|
*/
|
|
|
|
#include <linux/highmem.h>
|
|
#include <linux/module.h>
|
|
|
|
#include <asm/word-at-a-time.h>
|
|
|
|
/*
|
|
* best effort, GUP based copy_from_user() that is NMI-safe
|
|
*/
|
|
unsigned long
|
|
copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
|
|
{
|
|
unsigned long offset, addr = (unsigned long)from;
|
|
unsigned long size, len = 0;
|
|
struct page *page;
|
|
void *map;
|
|
int ret;
|
|
|
|
do {
|
|
ret = __get_user_pages_fast(addr, 1, 0, &page);
|
|
if (!ret)
|
|
break;
|
|
|
|
offset = addr & (PAGE_SIZE - 1);
|
|
size = min(PAGE_SIZE - offset, n - len);
|
|
|
|
map = kmap_atomic(page);
|
|
memcpy(to, map+offset, size);
|
|
kunmap_atomic(map);
|
|
put_page(page);
|
|
|
|
len += size;
|
|
to += size;
|
|
addr += size;
|
|
|
|
} while (len < n);
|
|
|
|
return len;
|
|
}
|
|
EXPORT_SYMBOL_GPL(copy_from_user_nmi);
|