kill generic_segment_checks()

all callers of ->aio_read() and ->aio_write() have iov/nr_segs already
checked - generic_segment_checks() done after that is just an odd way
to spell iov_length().

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Al Viro 2014-03-04 15:24:06 -05:00
parent 0ae5e4d370
commit cb66a7a1f1
10 changed files with 16 additions and 104 deletions

View File

@ -1180,9 +1180,7 @@ static ssize_t ll_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
ssize_t result;
int refcheck;
result = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
if (result)
return result;
count = iov_length(iov, nr_segs);
env = cl_env_get(&refcheck);
if (IS_ERR(env))
@ -1235,14 +1233,10 @@ static ssize_t ll_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
{
struct lu_env *env;
struct vvp_io_args *args;
size_t count = 0;
size_t count = iov_length(iov, nr_segs);
ssize_t result;
int refcheck;
result = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
if (result)
return result;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
return PTR_ERR(env);

View File

@ -1726,12 +1726,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
mutex_lock(&inode->i_mutex);
err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
if (err) {
mutex_unlock(&inode->i_mutex);
goto out;
}
count = ocount;
count = ocount = iov_length(iov, nr_segs);
current->backing_dev_info = inode->i_mapping->backing_dev_info;
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));

View File

@ -828,12 +828,8 @@ again:
inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
ceph_cap_string(got));
if (!read) {
ret = generic_segment_checks(iov, &nr_segs,
&len, VERIFY_WRITE);
if (ret)
goto out;
}
if (!read)
len = iov_length(iov, nr_segs);
iov_iter_init(&i, iov, nr_segs, len, read);
@ -855,7 +851,6 @@ again:
ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
}
out:
dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
ceph_put_cap_refs(ci, got);
@ -911,9 +906,7 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
mutex_lock(&inode->i_mutex);
err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
if (err)
goto out;
count = iov_length(iov, nr_segs);
/* We can write back this queue in page reclaim */
current->backing_dev_info = file->f_mapping->backing_dev_info;

View File

@ -1208,12 +1208,7 @@ static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
WARN_ON(iocb->ki_pos != pos);
ocount = 0;
err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
if (err)
return err;
count = ocount;
count = ocount = iov_length(iov, nr_segs);
mutex_lock(&inode->i_mutex);
/* We can write back this queue in page reclaim */

View File

@ -2091,10 +2091,7 @@ static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb,
size_t count; /* after file limit checks */
ssize_t written, err;
count = 0;
err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
if (err)
return err;
count = iov_length(iov, nr_segs);
pos = *ppos;
/* We can write back this queue in page reclaim. */
current->backing_dev_info = mapping->backing_dev_info;

View File

@ -2355,12 +2355,7 @@ relock:
/* communicate with ocfs2_dio_end_io */
ocfs2_iocb_set_rw_locked(iocb, rw_level);
ret = generic_segment_checks(iov, &nr_segs, &ocount,
VERIFY_READ);
if (ret)
goto out_dio;
count = ocount;
count = ocount = iov_length(iov, nr_segs);
ret = generic_write_checks(file, ppos, &count,
S_ISBLK(inode->i_mode));
if (ret)

View File

@ -253,9 +253,7 @@ xfs_file_aio_read(
if (file->f_mode & FMODE_NOCMTIME)
ioflags |= IO_INVIS;
ret = generic_segment_checks(iovp, &nr_segs, &size, VERIFY_WRITE);
if (ret < 0)
return ret;
size = iov_length(iovp, nr_segs);
if (unlikely(ioflags & IO_ISDIRECT)) {
xfs_buftarg_t *target =
@ -777,10 +775,7 @@ xfs_file_aio_write(
BUG_ON(iocb->ki_pos != pos);
ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ);
if (ret)
return ret;
ocount = iov_length(iovp, nr_segs);
if (ocount == 0)
return 0;

View File

@ -2412,8 +2412,6 @@ extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *,
extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
extern int generic_segment_checks(const struct iovec *iov,
unsigned long *nr_segs, size_t *count, int access_flags);
/* fs/block_dev.c */
extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,

View File

@ -1663,45 +1663,6 @@ out:
return written ? written : error;
}
/*
* Performs necessary checks before doing a write
* @iov: io vector request
* @nr_segs: number of segments in the iovec
* @count: number of bytes to write
* @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE
*
* Adjust number of segments and amount of bytes to write (nr_segs should be
* properly initialized first). Returns appropriate error code that caller
* should return or zero in case that write should be allowed.
*/
int generic_segment_checks(const struct iovec *iov,
unsigned long *nr_segs, size_t *count, int access_flags)
{
unsigned long seg;
size_t cnt = 0;
for (seg = 0; seg < *nr_segs; seg++) {
const struct iovec *iv = &iov[seg];
/*
* If any segment has a negative length, or the cumulative
* length ever wraps negative then return -EINVAL.
*/
cnt += iv->iov_len;
if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
return -EINVAL;
if (access_ok(access_flags, iv->iov_base, iv->iov_len))
continue;
if (seg == 0)
return -EFAULT;
*nr_segs = seg;
cnt -= iv->iov_len; /* This segment is no good */
break;
}
*count = cnt;
return 0;
}
EXPORT_SYMBOL(generic_segment_checks);
/**
* generic_file_aio_read - generic filesystem read routine
* @iocb: kernel I/O control block
@ -1717,15 +1678,12 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct file *filp = iocb->ki_filp;
ssize_t retval;
ssize_t retval = 0;
size_t count;
loff_t *ppos = &iocb->ki_pos;
struct iov_iter i;
count = 0;
retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
if (retval)
return retval;
count = iov_length(iov, nr_segs);
iov_iter_init(&i, iov, nr_segs, count, 0);
/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
@ -2615,12 +2573,7 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
ssize_t status;
struct iov_iter from;
ocount = 0;
err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
if (err)
return err;
count = ocount;
count = ocount = iov_length(iov, nr_segs);
/* We can write back this queue in page reclaim */
current->backing_dev_info = mapping->backing_dev_info;

View File

@ -1412,14 +1412,11 @@ static ssize_t shmem_file_aio_read(struct kiocb *iocb,
unsigned long offset;
enum sgp_type sgp = SGP_READ;
int error = 0;
ssize_t retval;
size_t count;
ssize_t retval = 0;
size_t count = iov_length(iov, nr_segs);
loff_t *ppos = &iocb->ki_pos;
struct iov_iter iter;
retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
if (retval)
return retval;
iov_iter_init(&iter, iov, nr_segs, count, 0);
/*