Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull more misc uaccess and vfs updates from Al Viro: "The rest of the stuff from -next (more uaccess work) + assorted fixes" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: score: traps: Add missing include file to fix build error fs/super.c: don't fool lockdep in freeze_super() and thaw_super() paths fs/super.c: fix race between freeze_super() and thaw_super() overlayfs: Fix setting IOP_XATTR flag iov_iter: kernel-doc import_iovec() and rw_copy_check_uvector() blackfin: no access_ok() for __copy_{to,from}_user() arm64: don't zero in __copy_from_user{,_inatomic} arm: don't zero in __copy_from_user_inatomic()/__copy_from_user() arc: don't leak bits of kernel stack into coredump alpha: get rid of tail-zeroing in __copy_user()
This commit is contained in:
commit
b26b5ef5ec
|
@ -396,11 +396,12 @@ copy_to_user(void __user *to, const void *from, long n)
|
||||||
extern inline long
|
extern inline long
|
||||||
copy_from_user(void *to, const void __user *from, long n)
|
copy_from_user(void *to, const void __user *from, long n)
|
||||||
{
|
{
|
||||||
|
long res = n;
|
||||||
if (likely(__access_ok((unsigned long)from, n, get_fs())))
|
if (likely(__access_ok((unsigned long)from, n, get_fs())))
|
||||||
n = __copy_tofrom_user_nocheck(to, (__force void *)from, n);
|
res = __copy_from_user_inatomic(to, from, n);
|
||||||
else
|
if (unlikely(res))
|
||||||
memset(to, 0, n);
|
memset(to + (n - res), 0, res);
|
||||||
return n;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void __do_clear_user(void);
|
extern void __do_clear_user(void);
|
||||||
|
|
|
@ -126,22 +126,8 @@ $65:
|
||||||
bis $31,$31,$0
|
bis $31,$31,$0
|
||||||
$41:
|
$41:
|
||||||
$35:
|
$35:
|
||||||
$exitout:
|
|
||||||
ret $31,($28),1
|
|
||||||
|
|
||||||
$exitin:
|
$exitin:
|
||||||
/* A stupid byte-by-byte zeroing of the rest of the output
|
$exitout:
|
||||||
buffer. This cures security holes by never leaving
|
|
||||||
random kernel data around to be copied elsewhere. */
|
|
||||||
|
|
||||||
mov $0,$1
|
|
||||||
$101:
|
|
||||||
EXO ( ldq_u $2,0($6) )
|
|
||||||
subq $1,1,$1
|
|
||||||
mskbl $2,$6,$2
|
|
||||||
EXO ( stq_u $2,0($6) )
|
|
||||||
addq $6,1,$6
|
|
||||||
bgt $1,$101
|
|
||||||
ret $31,($28),1
|
ret $31,($28),1
|
||||||
|
|
||||||
.end __copy_user
|
.end __copy_user
|
||||||
|
|
|
@ -228,33 +228,12 @@ $dirtyentry:
|
||||||
bgt $0,$onebyteloop # U .. .. .. : U L U L
|
bgt $0,$onebyteloop # U .. .. .. : U L U L
|
||||||
|
|
||||||
$zerolength:
|
$zerolength:
|
||||||
|
$exitin:
|
||||||
$exitout: # Destination for exception recovery(?)
|
$exitout: # Destination for exception recovery(?)
|
||||||
nop # .. .. .. E
|
nop # .. .. .. E
|
||||||
nop # .. .. E ..
|
nop # .. .. E ..
|
||||||
nop # .. E .. ..
|
nop # .. E .. ..
|
||||||
ret $31,($28),1 # L0 .. .. .. : L U L U
|
ret $31,($28),1 # L0 .. .. .. : L U L U
|
||||||
|
|
||||||
$exitin:
|
|
||||||
|
|
||||||
/* A stupid byte-by-byte zeroing of the rest of the output
|
|
||||||
buffer. This cures security holes by never leaving
|
|
||||||
random kernel data around to be copied elsewhere. */
|
|
||||||
|
|
||||||
nop
|
|
||||||
nop
|
|
||||||
nop
|
|
||||||
mov $0,$1
|
|
||||||
|
|
||||||
$101:
|
|
||||||
EXO ( stb $31,0($6) ) # L
|
|
||||||
subq $1,1,$1 # E
|
|
||||||
addq $6,1,$6 # E
|
|
||||||
bgt $1,$101 # U
|
|
||||||
|
|
||||||
nop
|
|
||||||
nop
|
|
||||||
nop
|
|
||||||
ret $31,($28),1 # L0
|
|
||||||
|
|
||||||
.end __copy_user
|
.end __copy_user
|
||||||
EXPORT_SYMBOL(__copy_user)
|
EXPORT_SYMBOL(__copy_user)
|
||||||
|
|
|
@ -107,13 +107,13 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
|
||||||
struct user_regs_struct uregs;
|
struct user_regs_struct uregs;
|
||||||
|
|
||||||
err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
|
err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
|
||||||
if (!err)
|
|
||||||
set_current_blocked(&set);
|
|
||||||
|
|
||||||
err |= __copy_from_user(&uregs.scratch,
|
err |= __copy_from_user(&uregs.scratch,
|
||||||
&(sf->uc.uc_mcontext.regs.scratch),
|
&(sf->uc.uc_mcontext.regs.scratch),
|
||||||
sizeof(sf->uc.uc_mcontext.regs.scratch));
|
sizeof(sf->uc.uc_mcontext.regs.scratch));
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
set_current_blocked(&set);
|
||||||
regs->bta = uregs.scratch.bta;
|
regs->bta = uregs.scratch.bta;
|
||||||
regs->lp_start = uregs.scratch.lp_start;
|
regs->lp_start = uregs.scratch.lp_start;
|
||||||
regs->lp_end = uregs.scratch.lp_end;
|
regs->lp_end = uregs.scratch.lp_end;
|
||||||
|
@ -138,7 +138,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
|
||||||
regs->r0 = uregs.scratch.r0;
|
regs->r0 = uregs.scratch.r0;
|
||||||
regs->sp = uregs.scratch.sp;
|
regs->sp = uregs.scratch.sp;
|
||||||
|
|
||||||
return err;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int is_do_ss_needed(unsigned int magic)
|
static inline int is_do_ss_needed(unsigned int magic)
|
||||||
|
|
|
@ -533,11 +533,12 @@ __clear_user(void __user *addr, unsigned long n)
|
||||||
|
|
||||||
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
|
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||||
{
|
{
|
||||||
if (access_ok(VERIFY_READ, from, n))
|
unsigned long res = n;
|
||||||
n = __copy_from_user(to, from, n);
|
if (likely(access_ok(VERIFY_READ, from, n)))
|
||||||
else /* security hole - plug it */
|
res = __copy_from_user(to, from, n);
|
||||||
memset(to, 0, n);
|
if (unlikely(res))
|
||||||
return n;
|
memset(to + (n - res), 0, res);
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
|
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||||
|
|
|
@ -100,12 +100,9 @@ EXPORT_SYMBOL(arm_copy_from_user)
|
||||||
.pushsection .fixup,"ax"
|
.pushsection .fixup,"ax"
|
||||||
.align 0
|
.align 0
|
||||||
copy_abort_preamble
|
copy_abort_preamble
|
||||||
ldmfd sp!, {r1, r2}
|
ldmfd sp!, {r1, r2, r3}
|
||||||
sub r3, r0, r1
|
sub r0, r0, r1
|
||||||
rsb r1, r3, r2
|
rsb r0, r0, r2
|
||||||
str r1, [sp]
|
|
||||||
bl __memzero
|
|
||||||
ldr r0, [sp], #4
|
|
||||||
copy_abort_end
|
copy_abort_end
|
||||||
.popsection
|
.popsection
|
||||||
|
|
||||||
|
|
|
@ -278,14 +278,16 @@ static inline unsigned long __must_check __copy_to_user(void __user *to, const v
|
||||||
|
|
||||||
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
|
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||||
{
|
{
|
||||||
|
unsigned long res = n;
|
||||||
kasan_check_write(to, n);
|
kasan_check_write(to, n);
|
||||||
|
|
||||||
if (access_ok(VERIFY_READ, from, n)) {
|
if (access_ok(VERIFY_READ, from, n)) {
|
||||||
check_object_size(to, n, false);
|
check_object_size(to, n, false);
|
||||||
n = __arch_copy_from_user(to, from, n);
|
res = __arch_copy_from_user(to, from, n);
|
||||||
} else /* security hole - plug it */
|
}
|
||||||
memset(to, 0, n);
|
if (unlikely(res))
|
||||||
return n;
|
memset(to + (n - res), 0, res);
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
|
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||||
|
|
|
@ -79,11 +79,6 @@ ENDPROC(__arch_copy_from_user)
|
||||||
|
|
||||||
.section .fixup,"ax"
|
.section .fixup,"ax"
|
||||||
.align 2
|
.align 2
|
||||||
9998:
|
9998: sub x0, end, dst // bytes not copied
|
||||||
sub x0, end, dst
|
|
||||||
9999:
|
|
||||||
strb wzr, [dst], #1 // zero remaining buffer space
|
|
||||||
cmp dst, end
|
|
||||||
b.lo 9999b
|
|
||||||
ret
|
ret
|
||||||
.previous
|
.previous
|
||||||
|
|
|
@ -163,18 +163,29 @@ static inline int bad_user_access_length(void)
|
||||||
: "a" (__ptr(ptr))); \
|
: "a" (__ptr(ptr))); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
|
|
||||||
#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
|
|
||||||
#define __copy_to_user_inatomic __copy_to_user
|
#define __copy_to_user_inatomic __copy_to_user
|
||||||
#define __copy_from_user_inatomic __copy_from_user
|
#define __copy_from_user_inatomic __copy_from_user
|
||||||
|
|
||||||
|
static inline unsigned long __must_check
|
||||||
|
__copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||||
|
{
|
||||||
|
memcpy(to, (const void __force *)from, n);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned long __must_check
|
||||||
|
__copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||||
|
{
|
||||||
|
memcpy((void __force *)to, from, n);
|
||||||
|
SSYNC();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static inline unsigned long __must_check
|
static inline unsigned long __must_check
|
||||||
copy_from_user(void *to, const void __user *from, unsigned long n)
|
copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||||
{
|
{
|
||||||
if (likely(access_ok(VERIFY_READ, from, n))) {
|
if (likely(access_ok(VERIFY_READ, from, n)))
|
||||||
memcpy(to, (const void __force *)from, n);
|
return __copy_from_user(to, from, n);
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
memset(to, 0, n);
|
memset(to, 0, n);
|
||||||
return n;
|
return n;
|
||||||
}
|
}
|
||||||
|
@ -182,12 +193,9 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||||
static inline unsigned long __must_check
|
static inline unsigned long __must_check
|
||||||
copy_to_user(void __user *to, const void *from, unsigned long n)
|
copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||||
{
|
{
|
||||||
if (access_ok(VERIFY_WRITE, to, n))
|
if (likely(access_ok(VERIFY_WRITE, to, n)))
|
||||||
memcpy((void __force *)to, from, n);
|
return __copy_to_user(to, from, n);
|
||||||
else
|
return n;
|
||||||
return n;
|
|
||||||
SSYNC();
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
#include <asm/irq_regs.h>
|
#include <asm/irq_regs.h>
|
||||||
|
#include <asm/uaccess.h>
|
||||||
|
|
||||||
unsigned long exception_handlers[32];
|
unsigned long exception_handlers[32];
|
||||||
|
|
||||||
|
|
|
@ -1303,6 +1303,12 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
|
||||||
if (!oe)
|
if (!oe)
|
||||||
goto out_put_cred;
|
goto out_put_cred;
|
||||||
|
|
||||||
|
sb->s_magic = OVERLAYFS_SUPER_MAGIC;
|
||||||
|
sb->s_op = &ovl_super_operations;
|
||||||
|
sb->s_xattr = ovl_xattr_handlers;
|
||||||
|
sb->s_fs_info = ufs;
|
||||||
|
sb->s_flags |= MS_POSIXACL | MS_NOREMOTELOCK;
|
||||||
|
|
||||||
root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR));
|
root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR));
|
||||||
if (!root_dentry)
|
if (!root_dentry)
|
||||||
goto out_free_oe;
|
goto out_free_oe;
|
||||||
|
@ -1326,12 +1332,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
|
||||||
ovl_inode_init(d_inode(root_dentry), realinode, !!upperpath.dentry);
|
ovl_inode_init(d_inode(root_dentry), realinode, !!upperpath.dentry);
|
||||||
ovl_copyattr(realinode, d_inode(root_dentry));
|
ovl_copyattr(realinode, d_inode(root_dentry));
|
||||||
|
|
||||||
sb->s_magic = OVERLAYFS_SUPER_MAGIC;
|
|
||||||
sb->s_op = &ovl_super_operations;
|
|
||||||
sb->s_xattr = ovl_xattr_handlers;
|
|
||||||
sb->s_root = root_dentry;
|
sb->s_root = root_dentry;
|
||||||
sb->s_fs_info = ufs;
|
|
||||||
sb->s_flags |= MS_POSIXACL | MS_NOREMOTELOCK;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
@ -730,6 +730,35 @@ static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter,
|
||||||
/* A write operation does a read from user space and vice versa */
|
/* A write operation does a read from user space and vice versa */
|
||||||
#define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ)
|
#define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* rw_copy_check_uvector() - Copy an array of &struct iovec from userspace
|
||||||
|
* into the kernel and check that it is valid.
|
||||||
|
*
|
||||||
|
* @type: One of %CHECK_IOVEC_ONLY, %READ, or %WRITE.
|
||||||
|
* @uvector: Pointer to the userspace array.
|
||||||
|
* @nr_segs: Number of elements in userspace array.
|
||||||
|
* @fast_segs: Number of elements in @fast_pointer.
|
||||||
|
* @fast_pointer: Pointer to (usually small on-stack) kernel array.
|
||||||
|
* @ret_pointer: (output parameter) Pointer to a variable that will point to
|
||||||
|
* either @fast_pointer, a newly allocated kernel array, or NULL,
|
||||||
|
* depending on which array was used.
|
||||||
|
*
|
||||||
|
* This function copies an array of &struct iovec of @nr_segs from
|
||||||
|
* userspace into the kernel and checks that each element is valid (e.g.
|
||||||
|
* it does not point to a kernel address or cause overflow by being too
|
||||||
|
* large, etc.).
|
||||||
|
*
|
||||||
|
* As an optimization, the caller may provide a pointer to a small
|
||||||
|
* on-stack array in @fast_pointer, typically %UIO_FASTIOV elements long
|
||||||
|
* (the size of this array, or 0 if unused, should be given in @fast_segs).
|
||||||
|
*
|
||||||
|
* @ret_pointer will always point to the array that was used, so the
|
||||||
|
* caller must take care not to call kfree() on it e.g. in case the
|
||||||
|
* @fast_pointer array was used and it was allocated on the stack.
|
||||||
|
*
|
||||||
|
* Return: The total number of bytes covered by the iovec array on success
|
||||||
|
* or a negative error code on error.
|
||||||
|
*/
|
||||||
ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
|
ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
|
||||||
unsigned long nr_segs, unsigned long fast_segs,
|
unsigned long nr_segs, unsigned long fast_segs,
|
||||||
struct iovec *fast_pointer,
|
struct iovec *fast_pointer,
|
||||||
|
|
43
fs/super.c
43
fs/super.c
|
@ -1269,25 +1269,34 @@ EXPORT_SYMBOL(__sb_start_write);
|
||||||
static void sb_wait_write(struct super_block *sb, int level)
|
static void sb_wait_write(struct super_block *sb, int level)
|
||||||
{
|
{
|
||||||
percpu_down_write(sb->s_writers.rw_sem + level-1);
|
percpu_down_write(sb->s_writers.rw_sem + level-1);
|
||||||
/*
|
|
||||||
* We are going to return to userspace and forget about this lock, the
|
|
||||||
* ownership goes to the caller of thaw_super() which does unlock.
|
|
||||||
*
|
|
||||||
* FIXME: we should do this before return from freeze_super() after we
|
|
||||||
* called sync_filesystem(sb) and s_op->freeze_fs(sb), and thaw_super()
|
|
||||||
* should re-acquire these locks before s_op->unfreeze_fs(sb). However
|
|
||||||
* this leads to lockdep false-positives, so currently we do the early
|
|
||||||
* release right after acquire.
|
|
||||||
*/
|
|
||||||
percpu_rwsem_release(sb->s_writers.rw_sem + level-1, 0, _THIS_IP_);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sb_freeze_unlock(struct super_block *sb)
|
/*
|
||||||
|
* We are going to return to userspace and forget about these locks, the
|
||||||
|
* ownership goes to the caller of thaw_super() which does unlock().
|
||||||
|
*/
|
||||||
|
static void lockdep_sb_freeze_release(struct super_block *sb)
|
||||||
|
{
|
||||||
|
int level;
|
||||||
|
|
||||||
|
for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
|
||||||
|
percpu_rwsem_release(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
|
||||||
|
*/
|
||||||
|
static void lockdep_sb_freeze_acquire(struct super_block *sb)
|
||||||
{
|
{
|
||||||
int level;
|
int level;
|
||||||
|
|
||||||
for (level = 0; level < SB_FREEZE_LEVELS; ++level)
|
for (level = 0; level < SB_FREEZE_LEVELS; ++level)
|
||||||
percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
|
percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sb_freeze_unlock(struct super_block *sb)
|
||||||
|
{
|
||||||
|
int level;
|
||||||
|
|
||||||
for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
|
for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
|
||||||
percpu_up_write(sb->s_writers.rw_sem + level);
|
percpu_up_write(sb->s_writers.rw_sem + level);
|
||||||
|
@ -1379,10 +1388,11 @@ int freeze_super(struct super_block *sb)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* This is just for debugging purposes so that fs can warn if it
|
* For debugging purposes so that fs can warn if it sees write activity
|
||||||
* sees write activity when frozen is set to SB_FREEZE_COMPLETE.
|
* when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
|
||||||
*/
|
*/
|
||||||
sb->s_writers.frozen = SB_FREEZE_COMPLETE;
|
sb->s_writers.frozen = SB_FREEZE_COMPLETE;
|
||||||
|
lockdep_sb_freeze_release(sb);
|
||||||
up_write(&sb->s_umount);
|
up_write(&sb->s_umount);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1399,7 +1409,7 @@ int thaw_super(struct super_block *sb)
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
down_write(&sb->s_umount);
|
down_write(&sb->s_umount);
|
||||||
if (sb->s_writers.frozen == SB_UNFROZEN) {
|
if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) {
|
||||||
up_write(&sb->s_umount);
|
up_write(&sb->s_umount);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -1409,11 +1419,14 @@ int thaw_super(struct super_block *sb)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
lockdep_sb_freeze_acquire(sb);
|
||||||
|
|
||||||
if (sb->s_op->unfreeze_fs) {
|
if (sb->s_op->unfreeze_fs) {
|
||||||
error = sb->s_op->unfreeze_fs(sb);
|
error = sb->s_op->unfreeze_fs(sb);
|
||||||
if (error) {
|
if (error) {
|
||||||
printk(KERN_ERR
|
printk(KERN_ERR
|
||||||
"VFS:Filesystem thaw failed\n");
|
"VFS:Filesystem thaw failed\n");
|
||||||
|
lockdep_sb_freeze_release(sb);
|
||||||
up_write(&sb->s_umount);
|
up_write(&sb->s_umount);
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1139,6 +1139,28 @@ const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dup_iter);
|
EXPORT_SYMBOL(dup_iter);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* import_iovec() - Copy an array of &struct iovec from userspace
|
||||||
|
* into the kernel, check that it is valid, and initialize a new
|
||||||
|
* &struct iov_iter iterator to access it.
|
||||||
|
*
|
||||||
|
* @type: One of %READ or %WRITE.
|
||||||
|
* @uvector: Pointer to the userspace array.
|
||||||
|
* @nr_segs: Number of elements in userspace array.
|
||||||
|
* @fast_segs: Number of elements in @iov.
|
||||||
|
* @iov: (input and output parameter) Pointer to pointer to (usually small
|
||||||
|
* on-stack) kernel array.
|
||||||
|
* @i: Pointer to iterator that will be initialized on success.
|
||||||
|
*
|
||||||
|
* If the array pointed to by *@iov is large enough to hold all @nr_segs,
|
||||||
|
* then this function places %NULL in *@iov on return. Otherwise, a new
|
||||||
|
* array will be allocated and the result placed in *@iov. This means that
|
||||||
|
* the caller may call kfree() on *@iov regardless of whether the small
|
||||||
|
* on-stack array was used or not (and regardless of whether this function
|
||||||
|
* returns an error or not).
|
||||||
|
*
|
||||||
|
* Return: 0 on success or negative error code on error.
|
||||||
|
*/
|
||||||
int import_iovec(int type, const struct iovec __user * uvector,
|
int import_iovec(int type, const struct iovec __user * uvector,
|
||||||
unsigned nr_segs, unsigned fast_segs,
|
unsigned nr_segs, unsigned fast_segs,
|
||||||
struct iovec **iov, struct iov_iter *i)
|
struct iovec **iov, struct iov_iter *i)
|
||||||
|
|
Loading…
Reference in New Issue