NFSoRDMA client updates for 5.1

New features:
 - Convert rpc auth layer to use xdr_streams
 - Config option to disable insecure enctypes
 - Reduce size of RPC receive buffers
 
 Bugfixes and cleanups:
 - Fix sparse warnings
 - Check inline size before providing a write chunk
 - Reduce the receive doorbell rate
 - Various tracepoint improvements
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEnZ5MQTpR7cLU7KEp18tUv7ClQOsFAlxwX1IACgkQ18tUv7Cl
 QOv0tBAA3VXVuKdAtUH4b70q4ufBLkwz40puenDzlQEZXa4XjsGif+Iq62qHmAWW
 oYQfdaof3P+p1G/k9wEmFd6g+vk75a+2QYmmnlzVcSoOHc1teg8we39AbQt6Nz4X
 CZnb1VAuVYctprMXatZugyKsHi+EGWX4raUDtVlx8Zbte6BOSlzn/Cbnvvozeyi4
 bMDQ5mi6vof/20o1qf9FhIjrx3UTYvqF6XOPDdMsQZs8pxDF8Z21LiRgKpPTRNrb
 ci1oIaqraai5SV2riDtMpVnGxR+GDQXaYnyozPnF7kFOwG5nIFyQ56m5aTd2ntd2
 q09lRBHnmiy2sWaocoziXqUonnNi1sZI+fbdCzSTRD45tM0B34DkrvOKsDJuzuba
 m5xZqpoI8hL874EO0AFSEkPmv55BF+K7IMotPmzGo7i4ic+IlyLACDUXh5OkPx6D
 2VSPvXOoAY1U4iJGg6LS9aLWNX99ShVJAuhD5InUW12FLC4GuRwVTIWY3v1s5TIJ
 boUe2EFVoKIxwVkNvf5tKAR1LTNsqtFBPTs1ENtXIdFo1+9ucZX7REhp3bxTlODM
 HheDAqUjlVV5CboB+c1Pggekyv3ON8ihyV3P+dlZ6MFwHnN9s8YOPcReQ91quBZY
 0RNIMaNo2lBgLrkvCMlbDC05AZG6P8LuKhPTcAQ4+7/vfL4PpWI=
 =EiRa
 -----END PGP SIGNATURE-----

Merge tag 'nfs-rdma-for-5.1-1' of git://git.linux-nfs.org/projects/anna/linux-nfs

NFSoRDMA client updates for 5.1

New features:
- Convert rpc auth layer to use xdr_streams
- Config option to disable insecure enctypes
- Reduce size of RPC receive buffers

Bugfixes and cleanups:
- Fix sparse warnings
- Check inline size before providing a write chunk
- Reduce the receive doorbell rate
- Various tracepoint improvements

[Trond: Fix up merge conflicts]
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
This commit is contained in:
Trond Myklebust 2019-02-25 08:39:26 -05:00
commit 06b5fc3ad9
47 changed files with 2078 additions and 1581 deletions

View File

@ -74,17 +74,6 @@ static void nlm4_compute_offsets(const struct nlm_lock *lock,
*l_len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1);
}
/*
* Handle decode buffer overflows out-of-line.
*/
static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
{
dprintk("lockd: %s prematurely hit the end of our receive buffer. "
"Remaining buffer length is %tu words.\n",
func, xdr->end - xdr->p);
}
/*
* Encode/decode NLMv4 basic data types
*
@ -176,7 +165,6 @@ out_size:
dprintk("NFS: returned cookie was too long: %u\n", length);
return -EIO;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
@ -236,7 +224,6 @@ out_bad_xdr:
__func__, be32_to_cpup(p));
return -EIO;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
@ -309,7 +296,6 @@ static int decode_nlm4_holder(struct xdr_stream *xdr, struct nlm_res *result)
out:
return error;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}

View File

@ -70,17 +70,6 @@ static void nlm_compute_offsets(const struct nlm_lock *lock,
*l_len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1);
}
/*
* Handle decode buffer overflows out-of-line.
*/
static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
{
dprintk("lockd: %s prematurely hit the end of our receive buffer. "
"Remaining buffer length is %tu words.\n",
func, xdr->end - xdr->p);
}
/*
* Encode/decode NLMv3 basic data types
*
@ -173,7 +162,6 @@ out_size:
dprintk("NFS: returned cookie was too long: %u\n", length);
return -EIO;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
@ -231,7 +219,6 @@ out_enum:
__func__, be32_to_cpup(p));
return -EIO;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
@ -303,7 +290,6 @@ static int decode_nlm_holder(struct xdr_stream *xdr, struct nlm_res *result)
out:
return error;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}

View File

@ -72,16 +72,6 @@ static int nfs4_encode_void(struct svc_rqst *rqstp, __be32 *p)
return xdr_ressize_check(rqstp, p);
}
static __be32 *read_buf(struct xdr_stream *xdr, size_t nbytes)
{
__be32 *p;
p = xdr_inline_decode(xdr, nbytes);
if (unlikely(p == NULL))
printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed!\n");
return p;
}
static __be32 decode_string(struct xdr_stream *xdr, unsigned int *len,
const char **str, size_t maxlen)
{
@ -98,13 +88,13 @@ static __be32 decode_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
{
__be32 *p;
p = read_buf(xdr, 4);
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);
fh->size = ntohl(*p);
if (fh->size > NFS4_FHSIZE)
return htonl(NFS4ERR_BADHANDLE);
p = read_buf(xdr, fh->size);
p = xdr_inline_decode(xdr, fh->size);
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);
memcpy(&fh->data[0], p, fh->size);
@ -117,11 +107,11 @@ static __be32 decode_bitmap(struct xdr_stream *xdr, uint32_t *bitmap)
__be32 *p;
unsigned int attrlen;
p = read_buf(xdr, 4);
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);
attrlen = ntohl(*p);
p = read_buf(xdr, attrlen << 2);
p = xdr_inline_decode(xdr, attrlen << 2);
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);
if (likely(attrlen > 0))
@ -135,7 +125,7 @@ static __be32 decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
{
__be32 *p;
p = read_buf(xdr, NFS4_STATEID_SIZE);
p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);
memcpy(stateid->data, p, NFS4_STATEID_SIZE);
@ -156,7 +146,7 @@ static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound
status = decode_string(xdr, &hdr->taglen, &hdr->tag, CB_OP_TAGLEN_MAXSZ);
if (unlikely(status != 0))
return status;
p = read_buf(xdr, 12);
p = xdr_inline_decode(xdr, 12);
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);
hdr->minorversion = ntohl(*p++);
@ -176,7 +166,7 @@ static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound
static __be32 decode_op_hdr(struct xdr_stream *xdr, unsigned int *op)
{
__be32 *p;
p = read_buf(xdr, 4);
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE_HDR);
*op = ntohl(*p);
@ -205,7 +195,7 @@ static __be32 decode_recall_args(struct svc_rqst *rqstp,
status = decode_delegation_stateid(xdr, &args->stateid);
if (unlikely(status != 0))
return status;
p = read_buf(xdr, 4);
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);
args->truncate = ntohl(*p);
@ -227,7 +217,7 @@ static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp,
__be32 status = 0;
uint32_t iomode;
p = read_buf(xdr, 4 * sizeof(uint32_t));
p = xdr_inline_decode(xdr, 4 * sizeof(uint32_t));
if (unlikely(p == NULL))
return htonl(NFS4ERR_BADXDR);
@ -245,14 +235,14 @@ static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp,
if (unlikely(status != 0))
return status;
p = read_buf(xdr, 2 * sizeof(uint64_t));
p = xdr_inline_decode(xdr, 2 * sizeof(uint64_t));
if (unlikely(p == NULL))
return htonl(NFS4ERR_BADXDR);
p = xdr_decode_hyper(p, &args->cbl_range.offset);
p = xdr_decode_hyper(p, &args->cbl_range.length);
return decode_layout_stateid(xdr, &args->cbl_stateid);
} else if (args->cbl_recall_type == RETURN_FSID) {
p = read_buf(xdr, 2 * sizeof(uint64_t));
p = xdr_inline_decode(xdr, 2 * sizeof(uint64_t));
if (unlikely(p == NULL))
return htonl(NFS4ERR_BADXDR);
p = xdr_decode_hyper(p, &args->cbl_fsid.major);
@ -275,7 +265,7 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp,
args->ndevs = 0;
/* Num of device notifications */
p = read_buf(xdr, sizeof(uint32_t));
p = xdr_inline_decode(xdr, sizeof(uint32_t));
if (unlikely(p == NULL)) {
status = htonl(NFS4ERR_BADXDR);
goto out;
@ -298,7 +288,8 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp,
for (i = 0; i < n; i++) {
struct cb_devicenotifyitem *dev = &args->devs[i];
p = read_buf(xdr, (4 * sizeof(uint32_t)) + NFS4_DEVICEID4_SIZE);
p = xdr_inline_decode(xdr, (4 * sizeof(uint32_t)) +
NFS4_DEVICEID4_SIZE);
if (unlikely(p == NULL)) {
status = htonl(NFS4ERR_BADXDR);
goto err;
@ -329,7 +320,7 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp,
p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
if (dev->cbd_layout_type == NOTIFY_DEVICEID4_CHANGE) {
p = read_buf(xdr, sizeof(uint32_t));
p = xdr_inline_decode(xdr, sizeof(uint32_t));
if (unlikely(p == NULL)) {
status = htonl(NFS4ERR_BADXDR);
goto err;
@ -359,7 +350,7 @@ static __be32 decode_sessionid(struct xdr_stream *xdr,
{
__be32 *p;
p = read_buf(xdr, NFS4_MAX_SESSIONID_LEN);
p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN);
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);
@ -379,13 +370,13 @@ static __be32 decode_rc_list(struct xdr_stream *xdr,
goto out;
status = htonl(NFS4ERR_RESOURCE);
p = read_buf(xdr, sizeof(uint32_t));
p = xdr_inline_decode(xdr, sizeof(uint32_t));
if (unlikely(p == NULL))
goto out;
rc_list->rcl_nrefcalls = ntohl(*p++);
if (rc_list->rcl_nrefcalls) {
p = read_buf(xdr,
p = xdr_inline_decode(xdr,
rc_list->rcl_nrefcalls * 2 * sizeof(uint32_t));
if (unlikely(p == NULL))
goto out;
@ -418,7 +409,7 @@ static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp,
if (status)
return status;
p = read_buf(xdr, 5 * sizeof(uint32_t));
p = xdr_inline_decode(xdr, 5 * sizeof(uint32_t));
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);
@ -461,7 +452,7 @@ static __be32 decode_recallany_args(struct svc_rqst *rqstp,
uint32_t bitmap[2];
__be32 *p, status;
p = read_buf(xdr, 4);
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return htonl(NFS4ERR_BADXDR);
args->craa_objs_to_keep = ntohl(*p++);
@ -480,7 +471,7 @@ static __be32 decode_recallslot_args(struct svc_rqst *rqstp,
struct cb_recallslotargs *args = argp;
__be32 *p;
p = read_buf(xdr, 4);
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return htonl(NFS4ERR_BADXDR);
args->crsa_target_highest_slotid = ntohl(*p++);
@ -492,14 +483,14 @@ static __be32 decode_lockowner(struct xdr_stream *xdr, struct cb_notify_lock_arg
__be32 *p;
unsigned int len;
p = read_buf(xdr, 12);
p = xdr_inline_decode(xdr, 12);
if (unlikely(p == NULL))
return htonl(NFS4ERR_BADXDR);
p = xdr_decode_hyper(p, &args->cbnl_owner.clientid);
len = be32_to_cpu(*p);
p = read_buf(xdr, len);
p = xdr_inline_decode(xdr, len);
if (unlikely(p == NULL))
return htonl(NFS4ERR_BADXDR);
@ -537,7 +528,7 @@ static __be32 decode_write_response(struct xdr_stream *xdr,
__be32 *p;
/* skip the always zero field */
p = read_buf(xdr, 4);
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out;
p++;
@ -577,7 +568,7 @@ static __be32 decode_offload_args(struct svc_rqst *rqstp,
return status;
/* decode status */
p = read_buf(xdr, 4);
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out;
args->error = ntohl(*p++);
@ -943,10 +934,11 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp)
};
unsigned int nops = 0;
xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base);
xdr_init_decode(&xdr_in, &rqstp->rq_arg,
rqstp->rq_arg.head[0].iov_base, NULL);
p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len);
xdr_init_encode(&xdr_out, &rqstp->rq_res, p);
xdr_init_encode(&xdr_out, &rqstp->rq_res, p, NULL);
status = decode_compound_hdr_arg(&xdr_in, &hdr_arg);
if (status == htonl(NFS4ERR_RESOURCE))

View File

@ -2036,7 +2036,7 @@ ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
dprintk("%s: Begin\n", __func__);
xdr_init_encode(&tmp_xdr, &tmp_buf, NULL);
xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);

View File

@ -22,6 +22,7 @@
#include <linux/nfs.h>
#include <linux/nfs2.h>
#include <linux/nfs_fs.h>
#include "nfstrace.h"
#include "internal.h"
#define NFSDBG_FACILITY NFSDBG_XDR
@ -55,41 +56,15 @@
#define NFS_attrstat_sz (1+NFS_fattr_sz)
#define NFS_diropres_sz (1+NFS_fhandle_sz+NFS_fattr_sz)
#define NFS_readlinkres_sz (2)
#define NFS_readres_sz (1+NFS_fattr_sz+1)
#define NFS_readlinkres_sz (2+1)
#define NFS_readres_sz (1+NFS_fattr_sz+1+1)
#define NFS_writeres_sz (NFS_attrstat_sz)
#define NFS_stat_sz (1)
#define NFS_readdirres_sz (1)
#define NFS_readdirres_sz (1+1)
#define NFS_statfsres_sz (1+NFS_info_sz)
static int nfs_stat_to_errno(enum nfs_stat);
/*
* While encoding arguments, set up the reply buffer in advance to
* receive reply data directly into the page cache.
*/
static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages,
unsigned int base, unsigned int len,
unsigned int bufsize)
{
struct rpc_auth *auth = req->rq_cred->cr_auth;
unsigned int replen;
replen = RPC_REPHDRSIZE + auth->au_rslack + bufsize;
xdr_inline_pages(&req->rq_rcv_buf, replen << 2, pages, base, len);
}
/*
* Handle decode buffer overflows out-of-line.
*/
static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
{
dprintk("NFS: %s prematurely hit the end of our receive buffer. "
"Remaining buffer length is %tu words.\n",
func, xdr->end - xdr->p);
}
/*
* Encode/decode NFSv2 basic data types
*
@ -110,8 +85,8 @@ static int decode_nfsdata(struct xdr_stream *xdr, struct nfs_pgio_res *result)
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
count = be32_to_cpup(p);
recvd = xdr_read_pages(xdr, count);
if (unlikely(count > recvd))
@ -125,9 +100,6 @@ out_cheating:
"count %u > recvd %u\n", count, recvd);
count = recvd;
goto out;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
@ -157,13 +129,16 @@ static int decode_stat(struct xdr_stream *xdr, enum nfs_stat *status)
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
*status = be32_to_cpup(p);
if (unlikely(!p))
return -EIO;
if (unlikely(*p != cpu_to_be32(NFS_OK)))
goto out_status;
*status = 0;
return 0;
out_status:
*status = be32_to_cpup(p);
trace_nfs_xdr_status((int)*status);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
@ -205,14 +180,11 @@ static int decode_fhandle(struct xdr_stream *xdr, struct nfs_fh *fh)
__be32 *p;
p = xdr_inline_decode(xdr, NFS2_FHSIZE);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
fh->size = NFS2_FHSIZE;
memcpy(fh->data, p, NFS2_FHSIZE);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
@ -282,8 +254,8 @@ static int decode_fattr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
__be32 *p;
p = xdr_inline_decode(xdr, NFS_fattr_sz << 2);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
fattr->valid |= NFS_ATTR_FATTR_V2;
@ -325,9 +297,6 @@ out_uid:
out_gid:
dprintk("NFS: returned invalid gid\n");
return -EINVAL;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
@ -416,23 +385,20 @@ static int decode_filename_inline(struct xdr_stream *xdr,
u32 count;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
count = be32_to_cpup(p);
if (count > NFS3_MAXNAMLEN)
goto out_nametoolong;
p = xdr_inline_decode(xdr, count);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
*name = (const char *)p;
*length = count;
return 0;
out_nametoolong:
dprintk("NFS: returned filename too long: %u\n", count);
return -ENAMETOOLONG;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
@ -455,8 +421,8 @@ static int decode_path(struct xdr_stream *xdr)
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
length = be32_to_cpup(p);
if (unlikely(length >= xdr->buf->page_len || length > NFS_MAXPATHLEN))
goto out_size;
@ -472,9 +438,6 @@ out_cheating:
dprintk("NFS: server cheating in pathname result: "
"length %u > received %u\n", length, recvd);
return -EIO;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
@ -615,8 +578,8 @@ static void nfs2_xdr_enc_readlinkargs(struct rpc_rqst *req,
const struct nfs_readlinkargs *args = data;
encode_fhandle(xdr, args->fh);
prepare_reply_buffer(req, args->pages, args->pgbase,
args->pglen, NFS_readlinkres_sz);
rpc_prepare_reply_pages(req, args->pages, args->pgbase,
args->pglen, NFS_readlinkres_sz);
}
/*
@ -651,8 +614,8 @@ static void nfs2_xdr_enc_readargs(struct rpc_rqst *req,
const struct nfs_pgio_args *args = data;
encode_readargs(xdr, args);
prepare_reply_buffer(req, args->pages, args->pgbase,
args->count, NFS_readres_sz);
rpc_prepare_reply_pages(req, args->pages, args->pgbase,
args->count, NFS_readres_sz);
req->rq_rcv_buf.flags |= XDRBUF_READ;
}
@ -809,8 +772,8 @@ static void nfs2_xdr_enc_readdirargs(struct rpc_rqst *req,
const struct nfs_readdirargs *args = data;
encode_readdirargs(xdr, args);
prepare_reply_buffer(req, args->pages, 0,
args->count, NFS_readdirres_sz);
rpc_prepare_reply_pages(req, args->pages, 0,
args->count, NFS_readdirres_sz);
}
/*
@ -951,12 +914,12 @@ int nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
int error;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EAGAIN;
if (*p++ == xdr_zero) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EAGAIN;
if (*p++ == xdr_zero)
return -EAGAIN;
entry->eof = 1;
@ -964,8 +927,8 @@ int nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
}
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EAGAIN;
entry->ino = be32_to_cpup(p);
error = decode_filename_inline(xdr, &entry->name, &entry->len);
@ -978,17 +941,13 @@ int nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
*/
entry->prev_cookie = entry->cookie;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EAGAIN;
entry->cookie = be32_to_cpup(p);
entry->d_type = DT_UNKNOWN;
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EAGAIN;
}
/*
@ -1052,17 +1011,14 @@ static int decode_info(struct xdr_stream *xdr, struct nfs2_fsstat *result)
__be32 *p;
p = xdr_inline_decode(xdr, NFS_info_sz << 2);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
result->tsize = be32_to_cpup(p++);
result->bsize = be32_to_cpup(p++);
result->blocks = be32_to_cpup(p++);
result->bfree = be32_to_cpup(p++);
result->bavail = be32_to_cpup(p);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int nfs2_xdr_dec_statfsres(struct rpc_rqst *req, struct xdr_stream *xdr,

View File

@ -21,6 +21,7 @@
#include <linux/nfs3.h>
#include <linux/nfs_fs.h>
#include <linux/nfsacl.h>
#include "nfstrace.h"
#include "internal.h"
#define NFSDBG_FACILITY NFSDBG_XDR
@ -68,13 +69,13 @@
#define NFS3_removeres_sz (NFS3_setattrres_sz)
#define NFS3_lookupres_sz (1+NFS3_fh_sz+(2 * NFS3_post_op_attr_sz))
#define NFS3_accessres_sz (1+NFS3_post_op_attr_sz+1)
#define NFS3_readlinkres_sz (1+NFS3_post_op_attr_sz+1)
#define NFS3_readres_sz (1+NFS3_post_op_attr_sz+3)
#define NFS3_readlinkres_sz (1+NFS3_post_op_attr_sz+1+1)
#define NFS3_readres_sz (1+NFS3_post_op_attr_sz+3+1)
#define NFS3_writeres_sz (1+NFS3_wcc_data_sz+4)
#define NFS3_createres_sz (1+NFS3_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
#define NFS3_renameres_sz (1+(2 * NFS3_wcc_data_sz))
#define NFS3_linkres_sz (1+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
#define NFS3_readdirres_sz (1+NFS3_post_op_attr_sz+2)
#define NFS3_readdirres_sz (1+NFS3_post_op_attr_sz+2+1)
#define NFS3_fsstatres_sz (1+NFS3_post_op_attr_sz+13)
#define NFS3_fsinfores_sz (1+NFS3_post_op_attr_sz+12)
#define NFS3_pathconfres_sz (1+NFS3_post_op_attr_sz+6)
@ -84,7 +85,7 @@
#define ACL3_setaclargs_sz (NFS3_fh_sz+1+ \
XDR_QUADLEN(NFS_ACL_INLINE_BUFSIZE))
#define ACL3_getaclres_sz (1+NFS3_post_op_attr_sz+1+ \
XDR_QUADLEN(NFS_ACL_INLINE_BUFSIZE))
XDR_QUADLEN(NFS_ACL_INLINE_BUFSIZE)+1)
#define ACL3_setaclres_sz (1+NFS3_post_op_attr_sz)
static int nfs3_stat_to_errno(enum nfs_stat);
@ -103,32 +104,6 @@ static const umode_t nfs_type2fmt[] = {
[NF3FIFO] = S_IFIFO,
};
/*
* While encoding arguments, set up the reply buffer in advance to
* receive reply data directly into the page cache.
*/
static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages,
unsigned int base, unsigned int len,
unsigned int bufsize)
{
struct rpc_auth *auth = req->rq_cred->cr_auth;
unsigned int replen;
replen = RPC_REPHDRSIZE + auth->au_rslack + bufsize;
xdr_inline_pages(&req->rq_rcv_buf, replen << 2, pages, base, len);
}
/*
* Handle decode buffer overflows out-of-line.
*/
static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
{
dprintk("NFS: %s prematurely hit the end of our receive buffer. "
"Remaining buffer length is %tu words.\n",
func, xdr->end - xdr->p);
}
/*
* Encode/decode NFSv3 basic data types
*
@ -151,13 +126,10 @@ static int decode_uint32(struct xdr_stream *xdr, u32 *value)
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
*value = be32_to_cpup(p);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_uint64(struct xdr_stream *xdr, u64 *value)
@ -165,13 +137,10 @@ static int decode_uint64(struct xdr_stream *xdr, u64 *value)
__be32 *p;
p = xdr_inline_decode(xdr, 8);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
xdr_decode_hyper(p, value);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
@ -211,14 +180,14 @@ static int decode_inline_filename3(struct xdr_stream *xdr,
u32 count;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
count = be32_to_cpup(p);
if (count > NFS3_MAXNAMLEN)
goto out_nametoolong;
p = xdr_inline_decode(xdr, count);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
*name = (const char *)p;
*length = count;
return 0;
@ -226,9 +195,6 @@ static int decode_inline_filename3(struct xdr_stream *xdr,
out_nametoolong:
dprintk("NFS: returned filename too long: %u\n", count);
return -ENAMETOOLONG;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
@ -249,8 +215,8 @@ static int decode_nfspath3(struct xdr_stream *xdr)
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
count = be32_to_cpup(p);
if (unlikely(count >= xdr->buf->page_len || count > NFS3_MAXPATHLEN))
goto out_nametoolong;
@ -267,9 +233,6 @@ out_cheating:
dprintk("NFS: server cheating in pathname result: "
"count %u > recvd %u\n", count, recvd);
return -EIO;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
@ -303,13 +266,10 @@ static int decode_cookieverf3(struct xdr_stream *xdr, __be32 *verifier)
__be32 *p;
p = xdr_inline_decode(xdr, NFS3_COOKIEVERFSIZE);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
memcpy(verifier, p, NFS3_COOKIEVERFSIZE);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
@ -330,13 +290,10 @@ static int decode_writeverf3(struct xdr_stream *xdr, struct nfs_write_verifier *
__be32 *p;
p = xdr_inline_decode(xdr, NFS3_WRITEVERFSIZE);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
memcpy(verifier->data, p, NFS3_WRITEVERFSIZE);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
@ -364,13 +321,16 @@ static int decode_nfsstat3(struct xdr_stream *xdr, enum nfs_stat *status)
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
*status = be32_to_cpup(p);
if (unlikely(!p))
return -EIO;
if (unlikely(*p != cpu_to_be32(NFS3_OK)))
goto out_status;
*status = 0;
return 0;
out_status:
*status = be32_to_cpup(p);
trace_nfs_xdr_status((int)*status);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
@ -453,23 +413,20 @@ static int decode_nfs_fh3(struct xdr_stream *xdr, struct nfs_fh *fh)
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
length = be32_to_cpup(p++);
if (unlikely(length > NFS3_FHSIZE))
goto out_toobig;
p = xdr_inline_decode(xdr, length);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
fh->size = length;
memcpy(fh->data, p, length);
return 0;
out_toobig:
dprintk("NFS: file handle size (%u) too big\n", length);
return -E2BIG;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static void zero_nfs_fh3(struct nfs_fh *fh)
@ -655,8 +612,8 @@ static int decode_fattr3(struct xdr_stream *xdr, struct nfs_fattr *fattr)
__be32 *p;
p = xdr_inline_decode(xdr, NFS3_fattr_sz << 2);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
p = xdr_decode_ftype3(p, &fmode);
@ -690,9 +647,6 @@ out_uid:
out_gid:
dprintk("NFS: returned invalid gid\n");
return -EINVAL;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
@ -710,14 +664,11 @@ static int decode_post_op_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
if (*p != xdr_zero)
return decode_fattr3(xdr, fattr);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
@ -733,8 +684,8 @@ static int decode_wcc_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
__be32 *p;
p = xdr_inline_decode(xdr, NFS3_wcc_attr_sz << 2);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
fattr->valid |= NFS_ATTR_FATTR_PRESIZE
| NFS_ATTR_FATTR_PRECHANGE
@ -747,9 +698,6 @@ static int decode_wcc_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
fattr->pre_change_attr = nfs_timespec_to_change_attr(&fattr->pre_ctime);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
@ -773,14 +721,11 @@ static int decode_pre_op_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
if (*p != xdr_zero)
return decode_wcc_attr(xdr, fattr);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_wcc_data(struct xdr_stream *xdr, struct nfs_fattr *fattr)
@ -808,15 +753,12 @@ out:
static int decode_post_op_fh3(struct xdr_stream *xdr, struct nfs_fh *fh)
{
__be32 *p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
if (*p != xdr_zero)
return decode_nfs_fh3(xdr, fh);
zero_nfs_fh3(fh);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
@ -953,8 +895,8 @@ static void nfs3_xdr_enc_readlink3args(struct rpc_rqst *req,
const struct nfs3_readlinkargs *args = data;
encode_nfs_fh3(xdr, args->fh);
prepare_reply_buffer(req, args->pages, args->pgbase,
args->pglen, NFS3_readlinkres_sz);
rpc_prepare_reply_pages(req, args->pages, args->pgbase,
args->pglen, NFS3_readlinkres_sz);
}
/*
@ -986,8 +928,8 @@ static void nfs3_xdr_enc_read3args(struct rpc_rqst *req,
unsigned int replen = args->replen ? args->replen : NFS3_readres_sz;
encode_read3args(xdr, args);
prepare_reply_buffer(req, args->pages, args->pgbase,
args->count, replen);
rpc_prepare_reply_pages(req, args->pages, args->pgbase,
args->count, replen);
req->rq_rcv_buf.flags |= XDRBUF_READ;
}
@ -1279,7 +1221,7 @@ static void nfs3_xdr_enc_readdir3args(struct rpc_rqst *req,
const struct nfs3_readdirargs *args = data;
encode_readdir3args(xdr, args);
prepare_reply_buffer(req, args->pages, 0,
rpc_prepare_reply_pages(req, args->pages, 0,
args->count, NFS3_readdirres_sz);
}
@ -1321,7 +1263,7 @@ static void nfs3_xdr_enc_readdirplus3args(struct rpc_rqst *req,
const struct nfs3_readdirargs *args = data;
encode_readdirplus3args(xdr, args);
prepare_reply_buffer(req, args->pages, 0,
rpc_prepare_reply_pages(req, args->pages, 0,
args->count, NFS3_readdirres_sz);
}
@ -1366,7 +1308,7 @@ static void nfs3_xdr_enc_getacl3args(struct rpc_rqst *req,
encode_nfs_fh3(xdr, args->fh);
encode_uint32(xdr, args->mask);
if (args->mask & (NFS_ACL | NFS_DFACL)) {
prepare_reply_buffer(req, args->pages, 0,
rpc_prepare_reply_pages(req, args->pages, 0,
NFSACL_MAXPAGES << PAGE_SHIFT,
ACL3_getaclres_sz);
req->rq_rcv_buf.flags |= XDRBUF_SPARSE_PAGES;
@ -1643,8 +1585,8 @@ static int decode_read3resok(struct xdr_stream *xdr,
__be32 *p;
p = xdr_inline_decode(xdr, 4 + 4 + 4);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
count = be32_to_cpup(p++);
eof = be32_to_cpup(p++);
ocount = be32_to_cpup(p++);
@ -1667,9 +1609,6 @@ out_cheating:
count = recvd;
eof = 0;
goto out;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
@ -1690,7 +1629,7 @@ static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
result->op_status = status;
if (status != NFS3_OK)
goto out_status;
result->replen = 3 + ((xdr_stream_pos(xdr) - pos) >> 2);
result->replen = 4 + ((xdr_stream_pos(xdr) - pos) >> 2);
error = decode_read3resok(xdr, result);
out:
return error;
@ -1731,22 +1670,18 @@ static int decode_write3resok(struct xdr_stream *xdr,
__be32 *p;
p = xdr_inline_decode(xdr, 4 + 4);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
result->count = be32_to_cpup(p++);
result->verf->committed = be32_to_cpup(p++);
if (unlikely(result->verf->committed > NFS_FILE_SYNC))
goto out_badvalue;
if (decode_writeverf3(xdr, &result->verf->verifier))
goto out_eio;
return -EIO;
return result->count;
out_badvalue:
dprintk("NFS: bad stable_how value: %u\n", result->verf->committed);
return -EIO;
out_overflow:
print_overflow_msg(__func__, xdr);
out_eio:
return -EIO;
}
static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr,
@ -2010,12 +1945,12 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
u64 new_cookie;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EAGAIN;
if (*p == xdr_zero) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EAGAIN;
if (*p == xdr_zero)
return -EAGAIN;
entry->eof = 1;
@ -2051,8 +1986,8 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
/* In fact, a post_op_fh3: */
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EAGAIN;
if (*p != xdr_zero) {
error = decode_nfs_fh3(xdr, entry->fh);
if (unlikely(error)) {
@ -2069,9 +2004,6 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EAGAIN;
out_truncated:
dprintk("NFS: directory entry contains invalid file handle\n");
*entry = old;
@ -2183,8 +2115,8 @@ static int decode_fsstat3resok(struct xdr_stream *xdr,
__be32 *p;
p = xdr_inline_decode(xdr, 8 * 6 + 4);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
p = xdr_decode_size3(p, &result->tbytes);
p = xdr_decode_size3(p, &result->fbytes);
p = xdr_decode_size3(p, &result->abytes);
@ -2193,9 +2125,6 @@ static int decode_fsstat3resok(struct xdr_stream *xdr,
xdr_decode_size3(p, &result->afiles);
/* ignore invarsec */
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req,
@ -2255,8 +2184,8 @@ static int decode_fsinfo3resok(struct xdr_stream *xdr,
__be32 *p;
p = xdr_inline_decode(xdr, 4 * 7 + 8 + 8 + 4);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
result->rtmax = be32_to_cpup(p++);
result->rtpref = be32_to_cpup(p++);
result->rtmult = be32_to_cpup(p++);
@ -2270,9 +2199,6 @@ static int decode_fsinfo3resok(struct xdr_stream *xdr,
/* ignore properties */
result->lease_time = 0;
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req,
@ -2328,15 +2254,12 @@ static int decode_pathconf3resok(struct xdr_stream *xdr,
__be32 *p;
p = xdr_inline_decode(xdr, 4 * 6);
if (unlikely(p == NULL))
goto out_overflow;
if (unlikely(!p))
return -EIO;
result->max_link = be32_to_cpup(p++);
result->max_namelen = be32_to_cpup(p);
/* ignore remaining fields */
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req,

View File

@ -394,7 +394,7 @@ static int decode_write_response(struct xdr_stream *xdr,
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
return -EIO;
count = be32_to_cpup(p);
if (count > 1)
return -EREMOTEIO;
@ -402,18 +402,14 @@ static int decode_write_response(struct xdr_stream *xdr,
status = decode_opaque_fixed(xdr, &res->stateid,
NFS4_STATEID_SIZE);
if (unlikely(status))
goto out_overflow;
return -EIO;
}
p = xdr_inline_decode(xdr, 8 + 4);
if (unlikely(!p))
goto out_overflow;
return -EIO;
p = xdr_decode_hyper(p, &res->count);
res->verifier.committed = be32_to_cpup(p);
return decode_verifier(xdr, &res->verifier.verifier);
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_copy_requirements(struct xdr_stream *xdr,
@ -422,14 +418,11 @@ static int decode_copy_requirements(struct xdr_stream *xdr,
p = xdr_inline_decode(xdr, 4 + 4);
if (unlikely(!p))
goto out_overflow;
return -EIO;
res->consecutive = be32_to_cpup(p++);
res->synchronous = be32_to_cpup(p++);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_copy(struct xdr_stream *xdr, struct nfs42_copy_res *res)
@ -474,15 +467,11 @@ static int decode_seek(struct xdr_stream *xdr, struct nfs42_seek_res *res)
p = xdr_inline_decode(xdr, 4 + 8);
if (unlikely(!p))
goto out_overflow;
return -EIO;
res->sr_eof = be32_to_cpup(p++);
p = xdr_decode_hyper(p, &res->sr_offset);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_layoutstats(struct xdr_stream *xdr)

View File

@ -524,6 +524,31 @@ TRACE_EVENT(nfs4_setup_sequence,
)
);
TRACE_EVENT(nfs4_xdr_status,
TP_PROTO(
u32 op,
int error
),
TP_ARGS(op, error),
TP_STRUCT__entry(
__field(u32, op)
__field(int, error)
),
TP_fast_assign(
__entry->op = op;
__entry->error = -error;
),
TP_printk(
"operation %d: nfs status %d (%s)",
__entry->op,
__entry->error, show_nfsv4_errors(__entry->error)
)
);
DECLARE_EVENT_CLASS(nfs4_open_event,
TP_PROTO(
const struct nfs_open_context *ctx,

File diff suppressed because it is too large Load Diff

View File

@ -11,3 +11,4 @@
EXPORT_TRACEPOINT_SYMBOL_GPL(nfs_fsync_enter);
EXPORT_TRACEPOINT_SYMBOL_GPL(nfs_fsync_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(nfs_xdr_status);

View File

@ -969,6 +969,91 @@ TRACE_EVENT(nfs_commit_done,
)
);
TRACE_DEFINE_ENUM(NFS_OK);
TRACE_DEFINE_ENUM(NFSERR_PERM);
TRACE_DEFINE_ENUM(NFSERR_NOENT);
TRACE_DEFINE_ENUM(NFSERR_IO);
TRACE_DEFINE_ENUM(NFSERR_NXIO);
TRACE_DEFINE_ENUM(NFSERR_ACCES);
TRACE_DEFINE_ENUM(NFSERR_EXIST);
TRACE_DEFINE_ENUM(NFSERR_XDEV);
TRACE_DEFINE_ENUM(NFSERR_NODEV);
TRACE_DEFINE_ENUM(NFSERR_NOTDIR);
TRACE_DEFINE_ENUM(NFSERR_ISDIR);
TRACE_DEFINE_ENUM(NFSERR_INVAL);
TRACE_DEFINE_ENUM(NFSERR_FBIG);
TRACE_DEFINE_ENUM(NFSERR_NOSPC);
TRACE_DEFINE_ENUM(NFSERR_ROFS);
TRACE_DEFINE_ENUM(NFSERR_MLINK);
TRACE_DEFINE_ENUM(NFSERR_NAMETOOLONG);
TRACE_DEFINE_ENUM(NFSERR_NOTEMPTY);
TRACE_DEFINE_ENUM(NFSERR_DQUOT);
TRACE_DEFINE_ENUM(NFSERR_STALE);
TRACE_DEFINE_ENUM(NFSERR_REMOTE);
TRACE_DEFINE_ENUM(NFSERR_WFLUSH);
TRACE_DEFINE_ENUM(NFSERR_BADHANDLE);
TRACE_DEFINE_ENUM(NFSERR_NOT_SYNC);
TRACE_DEFINE_ENUM(NFSERR_BAD_COOKIE);
TRACE_DEFINE_ENUM(NFSERR_NOTSUPP);
TRACE_DEFINE_ENUM(NFSERR_TOOSMALL);
TRACE_DEFINE_ENUM(NFSERR_SERVERFAULT);
TRACE_DEFINE_ENUM(NFSERR_BADTYPE);
TRACE_DEFINE_ENUM(NFSERR_JUKEBOX);
#define nfs_show_status(x) \
__print_symbolic(x, \
{ NFS_OK, "OK" }, \
{ NFSERR_PERM, "PERM" }, \
{ NFSERR_NOENT, "NOENT" }, \
{ NFSERR_IO, "IO" }, \
{ NFSERR_NXIO, "NXIO" }, \
{ NFSERR_ACCES, "ACCES" }, \
{ NFSERR_EXIST, "EXIST" }, \
{ NFSERR_XDEV, "XDEV" }, \
{ NFSERR_NODEV, "NODEV" }, \
{ NFSERR_NOTDIR, "NOTDIR" }, \
{ NFSERR_ISDIR, "ISDIR" }, \
{ NFSERR_INVAL, "INVAL" }, \
{ NFSERR_FBIG, "FBIG" }, \
{ NFSERR_NOSPC, "NOSPC" }, \
{ NFSERR_ROFS, "ROFS" }, \
{ NFSERR_MLINK, "MLINK" }, \
{ NFSERR_NAMETOOLONG, "NAMETOOLONG" }, \
{ NFSERR_NOTEMPTY, "NOTEMPTY" }, \
{ NFSERR_DQUOT, "DQUOT" }, \
{ NFSERR_STALE, "STALE" }, \
{ NFSERR_REMOTE, "REMOTE" }, \
{ NFSERR_WFLUSH, "WFLUSH" }, \
{ NFSERR_BADHANDLE, "BADHANDLE" }, \
{ NFSERR_NOT_SYNC, "NOTSYNC" }, \
{ NFSERR_BAD_COOKIE, "BADCOOKIE" }, \
{ NFSERR_NOTSUPP, "NOTSUPP" }, \
{ NFSERR_TOOSMALL, "TOOSMALL" }, \
{ NFSERR_SERVERFAULT, "REMOTEIO" }, \
{ NFSERR_BADTYPE, "BADTYPE" }, \
{ NFSERR_JUKEBOX, "JUKEBOX" })
TRACE_EVENT(nfs_xdr_status,
TP_PROTO(
int error
),
TP_ARGS(error),
TP_STRUCT__entry(
__field(int, error)
),
TP_fast_assign(
__entry->error = error;
),
TP_printk(
"error=%d (%s)",
__entry->error, nfs_show_status(__entry->error)
)
);
#endif /* _TRACE_NFS_H */
#undef TRACE_INCLUDE_PATH

View File

@ -60,16 +60,6 @@ struct nfs4_cb_compound_hdr {
int status;
};
/*
* Handle decode buffer overflows out-of-line.
*/
static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
{
dprintk("NFS: %s prematurely hit the end of our receive buffer. "
"Remaining buffer length is %tu words.\n",
func, xdr->end - xdr->p);
}
static __be32 *xdr_encode_empty_array(__be32 *p)
{
*p++ = xdr_zero;
@ -240,7 +230,6 @@ static int decode_cb_op_status(struct xdr_stream *xdr,
*status = nfs_cb_stat_to_errno(be32_to_cpup(p));
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
out_unexpected:
dprintk("NFSD: Callback server returned operation %d but "
@ -309,7 +298,6 @@ static int decode_cb_compound4res(struct xdr_stream *xdr,
hdr->nops = be32_to_cpup(p);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
@ -437,7 +425,6 @@ out:
cb->cb_seq_status = status;
return status;
out_overflow:
print_overflow_msg(__func__, xdr);
status = -EIO;
goto out;
}

View File

@ -74,14 +74,12 @@ struct rpc_cred_cache;
struct rpc_authops;
struct rpc_auth {
unsigned int au_cslack; /* call cred size estimate */
/* guess at number of u32's auth adds before
* reply data; normally the verifier size: */
unsigned int au_rslack;
/* for gss, used to calculate au_rslack: */
unsigned int au_verfsize;
unsigned int au_rslack; /* reply cred size estimate */
unsigned int au_verfsize; /* size of reply verifier */
unsigned int au_ralign; /* words before UL header */
unsigned int au_flags; /* various flags */
const struct rpc_authops *au_ops; /* operations */
unsigned int au_flags;
const struct rpc_authops *au_ops;
rpc_authflavor_t au_flavor; /* pseudoflavor (note may
* differ from the flavor in
* au_ops->au_flavor in gss
@ -131,13 +129,15 @@ struct rpc_credops {
void (*crdestroy)(struct rpc_cred *);
int (*crmatch)(struct auth_cred *, struct rpc_cred *, int);
__be32 * (*crmarshal)(struct rpc_task *, __be32 *);
int (*crmarshal)(struct rpc_task *task,
struct xdr_stream *xdr);
int (*crrefresh)(struct rpc_task *);
__be32 * (*crvalidate)(struct rpc_task *, __be32 *);
int (*crwrap_req)(struct rpc_task *, kxdreproc_t,
void *, __be32 *, void *);
int (*crunwrap_resp)(struct rpc_task *, kxdrdproc_t,
void *, __be32 *, void *);
int (*crvalidate)(struct rpc_task *task,
struct xdr_stream *xdr);
int (*crwrap_req)(struct rpc_task *task,
struct xdr_stream *xdr);
int (*crunwrap_resp)(struct rpc_task *task,
struct xdr_stream *xdr);
int (*crkey_timeout)(struct rpc_cred *);
char * (*crstringify_acceptor)(struct rpc_cred *);
bool (*crneed_reencode)(struct rpc_task *);
@ -165,10 +165,18 @@ struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *
void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *);
struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int);
void put_rpccred(struct rpc_cred *);
__be32 * rpcauth_marshcred(struct rpc_task *, __be32 *);
__be32 * rpcauth_checkverf(struct rpc_task *, __be32 *);
int rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, __be32 *data, void *obj);
int rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, __be32 *data, void *obj);
int rpcauth_marshcred(struct rpc_task *task,
struct xdr_stream *xdr);
int rpcauth_checkverf(struct rpc_task *task,
struct xdr_stream *xdr);
int rpcauth_wrap_req_encode(struct rpc_task *task,
struct xdr_stream *xdr);
int rpcauth_wrap_req(struct rpc_task *task,
struct xdr_stream *xdr);
int rpcauth_unwrap_resp_decode(struct rpc_task *task,
struct xdr_stream *xdr);
int rpcauth_unwrap_resp(struct rpc_task *task,
struct xdr_stream *xdr);
bool rpcauth_xmit_need_reencode(struct rpc_task *task);
int rpcauth_refreshcred(struct rpc_task *);
void rpcauth_invalcred(struct rpc_task *);

View File

@ -169,6 +169,9 @@ int rpcb_v4_register(struct net *net, const u32 program,
const char *netid);
void rpcb_getport_async(struct rpc_task *);
void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages,
unsigned int base, unsigned int len,
unsigned int hdrsize);
void rpc_call_start(struct rpc_task *);
int rpc_call_async(struct rpc_clnt *clnt,
const struct rpc_message *msg, int flags,

View File

@ -1,4 +1,44 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Dumb way to share this static piece of information with nfsd
* Define the string that exports the set of kernel-supported
* Kerberos enctypes. This list is sent via upcall to gssd, and
* is also exposed via the nfsd /proc API. The consumers generally
* treat this as an ordered list, where the first item in the list
* is the most preferred.
*/
#ifndef _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H
#define _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H
#ifdef CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES
/*
* NB: This list includes encryption types that were deprecated
* by RFC 8429 (DES3_CBC_SHA1 and ARCFOUR_HMAC).
*
* ENCTYPE_AES256_CTS_HMAC_SHA1_96
* ENCTYPE_AES128_CTS_HMAC_SHA1_96
* ENCTYPE_DES3_CBC_SHA1
* ENCTYPE_ARCFOUR_HMAC
*/
#define KRB5_SUPPORTED_ENCTYPES "18,17,16,23"
#else /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */
/*
* NB: This list includes encryption types that were deprecated
* by RFC 8429 and RFC 6649.
*
* ENCTYPE_AES256_CTS_HMAC_SHA1_96
* ENCTYPE_AES128_CTS_HMAC_SHA1_96
* ENCTYPE_DES3_CBC_SHA1
* ENCTYPE_ARCFOUR_HMAC
* ENCTYPE_DES_CBC_MD5
* ENCTYPE_DES_CBC_CRC
* ENCTYPE_DES_CBC_MD4
*/
#define KRB5_SUPPORTED_ENCTYPES "18,17,16,23,3,1,2"
#endif /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */
#endif /* _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H */

View File

@ -87,6 +87,16 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
#define xdr_one cpu_to_be32(1)
#define xdr_two cpu_to_be32(2)
#define rpc_auth_null cpu_to_be32(RPC_AUTH_NULL)
#define rpc_auth_unix cpu_to_be32(RPC_AUTH_UNIX)
#define rpc_auth_short cpu_to_be32(RPC_AUTH_SHORT)
#define rpc_auth_gss cpu_to_be32(RPC_AUTH_GSS)
#define rpc_call cpu_to_be32(RPC_CALL)
#define rpc_reply cpu_to_be32(RPC_REPLY)
#define rpc_msg_accepted cpu_to_be32(RPC_MSG_ACCEPTED)
#define rpc_success cpu_to_be32(RPC_SUCCESS)
#define rpc_prog_unavail cpu_to_be32(RPC_PROG_UNAVAIL)
#define rpc_prog_mismatch cpu_to_be32(RPC_PROG_MISMATCH)
@ -95,6 +105,9 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
#define rpc_system_err cpu_to_be32(RPC_SYSTEM_ERR)
#define rpc_drop_reply cpu_to_be32(RPC_DROP_REPLY)
#define rpc_mismatch cpu_to_be32(RPC_MISMATCH)
#define rpc_auth_error cpu_to_be32(RPC_AUTH_ERROR)
#define rpc_auth_ok cpu_to_be32(RPC_AUTH_OK)
#define rpc_autherr_badcred cpu_to_be32(RPC_AUTH_BADCRED)
#define rpc_autherr_rejectedcred cpu_to_be32(RPC_AUTH_REJECTEDCRED)
@ -103,7 +116,6 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
#define rpc_autherr_tooweak cpu_to_be32(RPC_AUTH_TOOWEAK)
#define rpcsec_gsserr_credproblem cpu_to_be32(RPCSEC_GSS_CREDPROBLEM)
#define rpcsec_gsserr_ctxproblem cpu_to_be32(RPCSEC_GSS_CTXPROBLEM)
#define rpc_autherr_oldseqnum cpu_to_be32(101)
/*
* Miscellaneous XDR helper functions
@ -167,7 +179,6 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p)
extern void xdr_shift_buf(struct xdr_buf *, size_t);
extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *);
extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int);
extern void xdr_buf_trim(struct xdr_buf *, unsigned int);
extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, unsigned int);
extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
@ -217,6 +228,8 @@ struct xdr_stream {
struct kvec scratch; /* Scratch buffer */
struct page **page_ptr; /* pointer to the current page */
unsigned int nwords; /* Remaining decode buffer length */
struct rpc_rqst *rqst; /* For debugging */
};
/*
@ -227,7 +240,8 @@ typedef void (*kxdreproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
typedef int (*kxdrdproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
void *obj);
extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p);
extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf,
__be32 *p, struct rpc_rqst *rqst);
extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes);
extern void xdr_commit_encode(struct xdr_stream *xdr);
extern void xdr_truncate_encode(struct xdr_stream *xdr, size_t len);
@ -235,7 +249,8 @@ extern int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen);
extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages,
unsigned int base, unsigned int len);
extern unsigned int xdr_stream_pos(const struct xdr_stream *xdr);
extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p);
extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf,
__be32 *p, struct rpc_rqst *rqst);
extern void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
struct page **pages, unsigned int len);
extern void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen);

View File

@ -196,8 +196,6 @@ struct rpc_xprt {
size_t max_payload; /* largest RPC payload size,
in bytes */
unsigned int tsh_size; /* size of transport specific
header */
struct rpc_wait_queue binding; /* requests waiting on rpcbind */
struct rpc_wait_queue sending; /* requests waiting to send */
@ -362,11 +360,6 @@ struct rpc_xprt * xprt_alloc(struct net *net, size_t size,
unsigned int max_req);
void xprt_free(struct rpc_xprt *);
static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *p)
{
return p + xprt->tsh_size;
}
static inline int
xprt_enable_swap(struct rpc_xprt *xprt)
{

View File

@ -0,0 +1,361 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2018 Oracle. All rights reserved.
*
* Trace point definitions for the "rpcgss" subsystem.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM rpcgss
#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_RPCGSS_H
#include <linux/tracepoint.h>
/**
** GSS-API related trace events
**/
TRACE_DEFINE_ENUM(GSS_S_BAD_MECH);
TRACE_DEFINE_ENUM(GSS_S_BAD_NAME);
TRACE_DEFINE_ENUM(GSS_S_BAD_NAMETYPE);
TRACE_DEFINE_ENUM(GSS_S_BAD_BINDINGS);
TRACE_DEFINE_ENUM(GSS_S_BAD_STATUS);
TRACE_DEFINE_ENUM(GSS_S_BAD_SIG);
TRACE_DEFINE_ENUM(GSS_S_NO_CRED);
TRACE_DEFINE_ENUM(GSS_S_NO_CONTEXT);
TRACE_DEFINE_ENUM(GSS_S_DEFECTIVE_TOKEN);
TRACE_DEFINE_ENUM(GSS_S_DEFECTIVE_CREDENTIAL);
TRACE_DEFINE_ENUM(GSS_S_CREDENTIALS_EXPIRED);
TRACE_DEFINE_ENUM(GSS_S_CONTEXT_EXPIRED);
TRACE_DEFINE_ENUM(GSS_S_FAILURE);
TRACE_DEFINE_ENUM(GSS_S_BAD_QOP);
TRACE_DEFINE_ENUM(GSS_S_UNAUTHORIZED);
TRACE_DEFINE_ENUM(GSS_S_UNAVAILABLE);
TRACE_DEFINE_ENUM(GSS_S_DUPLICATE_ELEMENT);
TRACE_DEFINE_ENUM(GSS_S_NAME_NOT_MN);
TRACE_DEFINE_ENUM(GSS_S_CONTINUE_NEEDED);
TRACE_DEFINE_ENUM(GSS_S_DUPLICATE_TOKEN);
TRACE_DEFINE_ENUM(GSS_S_OLD_TOKEN);
TRACE_DEFINE_ENUM(GSS_S_UNSEQ_TOKEN);
TRACE_DEFINE_ENUM(GSS_S_GAP_TOKEN);
#define show_gss_status(x) \
__print_flags(x, "|", \
{ GSS_S_BAD_MECH, "GSS_S_BAD_MECH" }, \
{ GSS_S_BAD_NAME, "GSS_S_BAD_NAME" }, \
{ GSS_S_BAD_NAMETYPE, "GSS_S_BAD_NAMETYPE" }, \
{ GSS_S_BAD_BINDINGS, "GSS_S_BAD_BINDINGS" }, \
{ GSS_S_BAD_STATUS, "GSS_S_BAD_STATUS" }, \
{ GSS_S_BAD_SIG, "GSS_S_BAD_SIG" }, \
{ GSS_S_NO_CRED, "GSS_S_NO_CRED" }, \
{ GSS_S_NO_CONTEXT, "GSS_S_NO_CONTEXT" }, \
{ GSS_S_DEFECTIVE_TOKEN, "GSS_S_DEFECTIVE_TOKEN" }, \
{ GSS_S_DEFECTIVE_CREDENTIAL, "GSS_S_DEFECTIVE_CREDENTIAL" }, \
{ GSS_S_CREDENTIALS_EXPIRED, "GSS_S_CREDENTIALS_EXPIRED" }, \
{ GSS_S_CONTEXT_EXPIRED, "GSS_S_CONTEXT_EXPIRED" }, \
{ GSS_S_FAILURE, "GSS_S_FAILURE" }, \
{ GSS_S_BAD_QOP, "GSS_S_BAD_QOP" }, \
{ GSS_S_UNAUTHORIZED, "GSS_S_UNAUTHORIZED" }, \
{ GSS_S_UNAVAILABLE, "GSS_S_UNAVAILABLE" }, \
{ GSS_S_DUPLICATE_ELEMENT, "GSS_S_DUPLICATE_ELEMENT" }, \
{ GSS_S_NAME_NOT_MN, "GSS_S_NAME_NOT_MN" }, \
{ GSS_S_CONTINUE_NEEDED, "GSS_S_CONTINUE_NEEDED" }, \
{ GSS_S_DUPLICATE_TOKEN, "GSS_S_DUPLICATE_TOKEN" }, \
{ GSS_S_OLD_TOKEN, "GSS_S_OLD_TOKEN" }, \
{ GSS_S_UNSEQ_TOKEN, "GSS_S_UNSEQ_TOKEN" }, \
{ GSS_S_GAP_TOKEN, "GSS_S_GAP_TOKEN" })
DECLARE_EVENT_CLASS(rpcgss_gssapi_event,
TP_PROTO(
const struct rpc_task *task,
u32 maj_stat
),
TP_ARGS(task, maj_stat),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, maj_stat)
),
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->maj_stat = maj_stat;
),
TP_printk("task:%u@%u maj_stat=%s",
__entry->task_id, __entry->client_id,
__entry->maj_stat == 0 ?
"GSS_S_COMPLETE" : show_gss_status(__entry->maj_stat))
);
#define DEFINE_GSSAPI_EVENT(name) \
DEFINE_EVENT(rpcgss_gssapi_event, rpcgss_##name, \
TP_PROTO( \
const struct rpc_task *task, \
u32 maj_stat \
), \
TP_ARGS(task, maj_stat))
TRACE_EVENT(rpcgss_import_ctx,
TP_PROTO(
int status
),
TP_ARGS(status),
TP_STRUCT__entry(
__field(int, status)
),
TP_fast_assign(
__entry->status = status;
),
TP_printk("status=%d", __entry->status)
);
DEFINE_GSSAPI_EVENT(get_mic);
DEFINE_GSSAPI_EVENT(verify_mic);
DEFINE_GSSAPI_EVENT(wrap);
DEFINE_GSSAPI_EVENT(unwrap);
/**
** GSS auth unwrap failures
**/
TRACE_EVENT(rpcgss_unwrap_failed,
TP_PROTO(
const struct rpc_task *task
),
TP_ARGS(task),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
),
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
),
TP_printk("task:%u@%u", __entry->task_id, __entry->client_id)
);
TRACE_EVENT(rpcgss_bad_seqno,
TP_PROTO(
const struct rpc_task *task,
u32 expected,
u32 received
),
TP_ARGS(task, expected, received),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, expected)
__field(u32, received)
),
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->expected = expected;
__entry->received = received;
),
TP_printk("task:%u@%u expected seqno %u, received seqno %u",
__entry->task_id, __entry->client_id,
__entry->expected, __entry->received)
);
TRACE_EVENT(rpcgss_seqno,
TP_PROTO(
const struct rpc_task *task
),
TP_ARGS(task),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
__field(u32, seqno)
),
TP_fast_assign(
const struct rpc_rqst *rqst = task->tk_rqstp;
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(rqst->rq_xid);
__entry->seqno = rqst->rq_seqno;
),
TP_printk("task:%u@%u xid=0x%08x seqno=%u",
__entry->task_id, __entry->client_id,
__entry->xid, __entry->seqno)
);
TRACE_EVENT(rpcgss_need_reencode,
TP_PROTO(
const struct rpc_task *task,
u32 seq_xmit,
bool ret
),
TP_ARGS(task, seq_xmit, ret),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
__field(u32, seq_xmit)
__field(u32, seqno)
__field(bool, ret)
),
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
__entry->seq_xmit = seq_xmit;
__entry->seqno = task->tk_rqstp->rq_seqno;
__entry->ret = ret;
),
TP_printk("task:%u@%u xid=0x%08x rq_seqno=%u seq_xmit=%u reencode %sneeded",
__entry->task_id, __entry->client_id,
__entry->xid, __entry->seqno, __entry->seq_xmit,
__entry->ret ? "" : "un")
);
/**
** gssd upcall related trace events
**/
TRACE_EVENT(rpcgss_upcall_msg,
TP_PROTO(
const char *buf
),
TP_ARGS(buf),
TP_STRUCT__entry(
__string(msg, buf)
),
TP_fast_assign(
__assign_str(msg, buf)
),
TP_printk("msg='%s'", __get_str(msg))
);
TRACE_EVENT(rpcgss_upcall_result,
TP_PROTO(
u32 uid,
int result
),
TP_ARGS(uid, result),
TP_STRUCT__entry(
__field(u32, uid)
__field(int, result)
),
TP_fast_assign(
__entry->uid = uid;
__entry->result = result;
),
TP_printk("for uid %u, result=%d", __entry->uid, __entry->result)
);
TRACE_EVENT(rpcgss_context,
TP_PROTO(
unsigned long expiry,
unsigned long now,
unsigned int timeout,
unsigned int len,
const u8 *data
),
TP_ARGS(expiry, now, timeout, len, data),
TP_STRUCT__entry(
__field(unsigned long, expiry)
__field(unsigned long, now)
__field(unsigned int, timeout)
__field(int, len)
__string(acceptor, data)
),
TP_fast_assign(
__entry->expiry = expiry;
__entry->now = now;
__entry->timeout = timeout;
__entry->len = len;
strncpy(__get_str(acceptor), data, len);
),
TP_printk("gc_expiry=%lu now=%lu timeout=%u acceptor=%.*s",
__entry->expiry, __entry->now, __entry->timeout,
__entry->len, __get_str(acceptor))
);
/**
** Miscellaneous events
*/
TRACE_DEFINE_ENUM(RPC_AUTH_GSS_KRB5);
TRACE_DEFINE_ENUM(RPC_AUTH_GSS_KRB5I);
TRACE_DEFINE_ENUM(RPC_AUTH_GSS_KRB5P);
#define show_pseudoflavor(x) \
__print_symbolic(x, \
{ RPC_AUTH_GSS_KRB5, "RPC_AUTH_GSS_KRB5" }, \
{ RPC_AUTH_GSS_KRB5I, "RPC_AUTH_GSS_KRB5I" }, \
{ RPC_AUTH_GSS_KRB5P, "RPC_AUTH_GSS_KRB5P" })
TRACE_EVENT(rpcgss_createauth,
TP_PROTO(
unsigned int flavor,
int error
),
TP_ARGS(flavor, error),
TP_STRUCT__entry(
__field(unsigned int, flavor)
__field(int, error)
),
TP_fast_assign(
__entry->flavor = flavor;
__entry->error = error;
),
TP_printk("flavor=%s error=%d",
show_pseudoflavor(__entry->flavor), __entry->error)
);
#endif /* _TRACE_RPCGSS_H */
#include <trace/define_trace.h>

View File

@ -521,12 +521,18 @@ TRACE_EVENT(xprtrdma_post_send,
TP_STRUCT__entry(
__field(const void *, req)
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(int, num_sge)
__field(int, signaled)
__field(int, status)
),
TP_fast_assign(
const struct rpc_rqst *rqst = &req->rl_slot;
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid;
__entry->req = req;
__entry->num_sge = req->rl_sendctx->sc_wr.num_sge;
__entry->signaled = req->rl_sendctx->sc_wr.send_flags &
@ -534,9 +540,11 @@ TRACE_EVENT(xprtrdma_post_send,
__entry->status = status;
),
TP_printk("req=%p, %d SGEs%s, status=%d",
TP_printk("task:%u@%u req=%p (%d SGE%s) %sstatus=%d",
__entry->task_id, __entry->client_id,
__entry->req, __entry->num_sge,
(__entry->signaled ? ", signaled" : ""),
(__entry->num_sge == 1 ? "" : "s"),
(__entry->signaled ? "signaled " : ""),
__entry->status
)
);

View File

@ -77,6 +77,50 @@ TRACE_EVENT(rpc_request,
)
);
TRACE_DEFINE_ENUM(RPC_TASK_ASYNC);
TRACE_DEFINE_ENUM(RPC_TASK_SWAPPER);
TRACE_DEFINE_ENUM(RPC_CALL_MAJORSEEN);
TRACE_DEFINE_ENUM(RPC_TASK_ROOTCREDS);
TRACE_DEFINE_ENUM(RPC_TASK_DYNAMIC);
TRACE_DEFINE_ENUM(RPC_TASK_KILLED);
TRACE_DEFINE_ENUM(RPC_TASK_SOFT);
TRACE_DEFINE_ENUM(RPC_TASK_SOFTCONN);
TRACE_DEFINE_ENUM(RPC_TASK_SENT);
TRACE_DEFINE_ENUM(RPC_TASK_TIMEOUT);
TRACE_DEFINE_ENUM(RPC_TASK_NOCONNECT);
TRACE_DEFINE_ENUM(RPC_TASK_NO_RETRANS_TIMEOUT);
#define rpc_show_task_flags(flags) \
__print_flags(flags, "|", \
{ RPC_TASK_ASYNC, "ASYNC" }, \
{ RPC_TASK_SWAPPER, "SWAPPER" }, \
{ RPC_CALL_MAJORSEEN, "MAJORSEEN" }, \
{ RPC_TASK_ROOTCREDS, "ROOTCREDS" }, \
{ RPC_TASK_DYNAMIC, "DYNAMIC" }, \
{ RPC_TASK_KILLED, "KILLED" }, \
{ RPC_TASK_SOFT, "SOFT" }, \
{ RPC_TASK_SOFTCONN, "SOFTCONN" }, \
{ RPC_TASK_SENT, "SENT" }, \
{ RPC_TASK_TIMEOUT, "TIMEOUT" }, \
{ RPC_TASK_NOCONNECT, "NOCONNECT" }, \
{ RPC_TASK_NO_RETRANS_TIMEOUT, "NORTO" })
TRACE_DEFINE_ENUM(RPC_TASK_RUNNING);
TRACE_DEFINE_ENUM(RPC_TASK_QUEUED);
TRACE_DEFINE_ENUM(RPC_TASK_ACTIVE);
TRACE_DEFINE_ENUM(RPC_TASK_NEED_XMIT);
TRACE_DEFINE_ENUM(RPC_TASK_NEED_RECV);
TRACE_DEFINE_ENUM(RPC_TASK_MSG_PIN_WAIT);
#define rpc_show_runstate(flags) \
__print_flags(flags, "|", \
{ (1UL << RPC_TASK_RUNNING), "RUNNING" }, \
{ (1UL << RPC_TASK_QUEUED), "QUEUED" }, \
{ (1UL << RPC_TASK_ACTIVE), "ACTIVE" }, \
{ (1UL << RPC_TASK_NEED_XMIT), "NEED_XMIT" }, \
{ (1UL << RPC_TASK_NEED_RECV), "NEED_RECV" }, \
{ (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" })
DECLARE_EVENT_CLASS(rpc_task_running,
TP_PROTO(const struct rpc_task *task, const void *action),
@ -102,10 +146,10 @@ DECLARE_EVENT_CLASS(rpc_task_running,
__entry->flags = task->tk_flags;
),
TP_printk("task:%u@%d flags=%4.4x state=%4.4lx status=%d action=%pf",
TP_printk("task:%u@%d flags=%s runstate=%s status=%d action=%pf",
__entry->task_id, __entry->client_id,
__entry->flags,
__entry->runstate,
rpc_show_task_flags(__entry->flags),
rpc_show_runstate(__entry->runstate),
__entry->status,
__entry->action
)
@ -149,10 +193,10 @@ DECLARE_EVENT_CLASS(rpc_task_queued,
__assign_str(q_name, rpc_qname(q));
),
TP_printk("task:%u@%d flags=%4.4x state=%4.4lx status=%d timeout=%lu queue=%s",
TP_printk("task:%u@%d flags=%s runstate=%s status=%d timeout=%lu queue=%s",
__entry->task_id, __entry->client_id,
__entry->flags,
__entry->runstate,
rpc_show_task_flags(__entry->flags),
rpc_show_runstate(__entry->runstate),
__entry->status,
__entry->timeout,
__get_str(q_name)
@ -169,6 +213,87 @@ DECLARE_EVENT_CLASS(rpc_task_queued,
DEFINE_RPC_QUEUED_EVENT(sleep);
DEFINE_RPC_QUEUED_EVENT(wakeup);
DECLARE_EVENT_CLASS(rpc_failure,
TP_PROTO(const struct rpc_task *task),
TP_ARGS(task),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
),
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
),
TP_printk("task:%u@%u",
__entry->task_id, __entry->client_id)
);
#define DEFINE_RPC_FAILURE(name) \
DEFINE_EVENT(rpc_failure, rpc_bad_##name, \
TP_PROTO( \
const struct rpc_task *task \
), \
TP_ARGS(task))
DEFINE_RPC_FAILURE(callhdr);
DEFINE_RPC_FAILURE(verifier);
DECLARE_EVENT_CLASS(rpc_reply_event,
TP_PROTO(
const struct rpc_task *task
),
TP_ARGS(task),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
__string(progname, task->tk_client->cl_program->name)
__field(u32, version)
__string(procname, rpc_proc_name(task))
__string(servername, task->tk_xprt->servername)
),
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
__assign_str(progname, task->tk_client->cl_program->name)
__entry->version = task->tk_client->cl_vers;
__assign_str(procname, rpc_proc_name(task))
__assign_str(servername, task->tk_xprt->servername)
),
TP_printk("task:%u@%d server=%s xid=0x%08x %sv%d %s",
__entry->task_id, __entry->client_id, __get_str(servername),
__entry->xid, __get_str(progname), __entry->version,
__get_str(procname))
)
#define DEFINE_RPC_REPLY_EVENT(name) \
DEFINE_EVENT(rpc_reply_event, rpc__##name, \
TP_PROTO( \
const struct rpc_task *task \
), \
TP_ARGS(task))
DEFINE_RPC_REPLY_EVENT(prog_unavail);
DEFINE_RPC_REPLY_EVENT(prog_mismatch);
DEFINE_RPC_REPLY_EVENT(proc_unavail);
DEFINE_RPC_REPLY_EVENT(garbage_args);
DEFINE_RPC_REPLY_EVENT(unparsable);
DEFINE_RPC_REPLY_EVENT(mismatch);
DEFINE_RPC_REPLY_EVENT(stale_creds);
DEFINE_RPC_REPLY_EVENT(bad_creds);
DEFINE_RPC_REPLY_EVENT(auth_tooweak);
TRACE_EVENT(rpc_stats_latency,
TP_PROTO(
@ -210,6 +335,169 @@ TRACE_EVENT(rpc_stats_latency,
__entry->backlog, __entry->rtt, __entry->execute)
);
TRACE_EVENT(rpc_xdr_overflow,
TP_PROTO(
const struct xdr_stream *xdr,
size_t requested
),
TP_ARGS(xdr, requested),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(int, version)
__field(size_t, requested)
__field(const void *, end)
__field(const void *, p)
__field(const void *, head_base)
__field(size_t, head_len)
__field(const void *, tail_base)
__field(size_t, tail_len)
__field(unsigned int, page_len)
__field(unsigned int, len)
__string(progname,
xdr->rqst->rq_task->tk_client->cl_program->name)
__string(procedure,
xdr->rqst->rq_task->tk_msg.rpc_proc->p_name)
),
TP_fast_assign(
if (xdr->rqst) {
const struct rpc_task *task = xdr->rqst->rq_task;
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__assign_str(progname,
task->tk_client->cl_program->name)
__entry->version = task->tk_client->cl_vers;
__assign_str(procedure, task->tk_msg.rpc_proc->p_name)
} else {
__entry->task_id = 0;
__entry->client_id = 0;
__assign_str(progname, "unknown")
__entry->version = 0;
__assign_str(procedure, "unknown")
}
__entry->requested = requested;
__entry->end = xdr->end;
__entry->p = xdr->p;
__entry->head_base = xdr->buf->head[0].iov_base,
__entry->head_len = xdr->buf->head[0].iov_len,
__entry->page_len = xdr->buf->page_len,
__entry->tail_base = xdr->buf->tail[0].iov_base,
__entry->tail_len = xdr->buf->tail[0].iov_len,
__entry->len = xdr->buf->len;
),
TP_printk(
"task:%u@%u %sv%d %s requested=%zu p=%p end=%p xdr=[%p,%zu]/%u/[%p,%zu]/%u\n",
__entry->task_id, __entry->client_id,
__get_str(progname), __entry->version, __get_str(procedure),
__entry->requested, __entry->p, __entry->end,
__entry->head_base, __entry->head_len,
__entry->page_len,
__entry->tail_base, __entry->tail_len,
__entry->len
)
);
TRACE_EVENT(rpc_xdr_alignment,
TP_PROTO(
const struct xdr_stream *xdr,
size_t offset,
unsigned int copied
),
TP_ARGS(xdr, offset, copied),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(int, version)
__field(size_t, offset)
__field(unsigned int, copied)
__field(const void *, head_base)
__field(size_t, head_len)
__field(const void *, tail_base)
__field(size_t, tail_len)
__field(unsigned int, page_len)
__field(unsigned int, len)
__string(progname,
xdr->rqst->rq_task->tk_client->cl_program->name)
__string(procedure,
xdr->rqst->rq_task->tk_msg.rpc_proc->p_name)
),
TP_fast_assign(
const struct rpc_task *task = xdr->rqst->rq_task;
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__assign_str(progname,
task->tk_client->cl_program->name)
__entry->version = task->tk_client->cl_vers;
__assign_str(procedure, task->tk_msg.rpc_proc->p_name)
__entry->offset = offset;
__entry->copied = copied;
__entry->head_base = xdr->buf->head[0].iov_base,
__entry->head_len = xdr->buf->head[0].iov_len,
__entry->page_len = xdr->buf->page_len,
__entry->tail_base = xdr->buf->tail[0].iov_base,
__entry->tail_len = xdr->buf->tail[0].iov_len,
__entry->len = xdr->buf->len;
),
TP_printk(
"task:%u@%u %sv%d %s offset=%zu copied=%u xdr=[%p,%zu]/%u/[%p,%zu]/%u\n",
__entry->task_id, __entry->client_id,
__get_str(progname), __entry->version, __get_str(procedure),
__entry->offset, __entry->copied,
__entry->head_base, __entry->head_len,
__entry->page_len,
__entry->tail_base, __entry->tail_len,
__entry->len
)
);
TRACE_EVENT(rpc_reply_pages,
TP_PROTO(
const struct rpc_rqst *req
),
TP_ARGS(req),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(const void *, head_base)
__field(size_t, head_len)
__field(const void *, tail_base)
__field(size_t, tail_len)
__field(unsigned int, page_len)
),
TP_fast_assign(
__entry->task_id = req->rq_task->tk_pid;
__entry->client_id = req->rq_task->tk_client->cl_clid;
__entry->head_base = req->rq_rcv_buf.head[0].iov_base;
__entry->head_len = req->rq_rcv_buf.head[0].iov_len;
__entry->page_len = req->rq_rcv_buf.page_len;
__entry->tail_base = req->rq_rcv_buf.tail[0].iov_base;
__entry->tail_len = req->rq_rcv_buf.tail[0].iov_len;
),
TP_printk(
"task:%u@%u xdr=[%p,%zu]/%u/[%p,%zu]\n",
__entry->task_id, __entry->client_id,
__entry->head_base, __entry->head_len,
__entry->page_len,
__entry->tail_base, __entry->tail_len
)
);
/*
* First define the enums in the below macros to be exported to userspace
* via TRACE_DEFINE_ENUM().
@ -404,9 +692,68 @@ DECLARE_EVENT_CLASS(rpc_xprt_event,
DEFINE_RPC_XPRT_EVENT(timer);
DEFINE_RPC_XPRT_EVENT(lookup_rqst);
DEFINE_RPC_XPRT_EVENT(transmit);
DEFINE_RPC_XPRT_EVENT(complete_rqst);
TRACE_EVENT(xprt_transmit,
TP_PROTO(
const struct rpc_rqst *rqst,
int status
),
TP_ARGS(rqst, status),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
__field(u32, seqno)
__field(int, status)
),
TP_fast_assign(
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(rqst->rq_xid);
__entry->seqno = rqst->rq_seqno;
__entry->status = status;
),
TP_printk(
"task:%u@%u xid=0x%08x seqno=%u status=%d",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->seqno, __entry->status)
);
TRACE_EVENT(xprt_enq_xmit,
TP_PROTO(
const struct rpc_task *task,
int stage
),
TP_ARGS(task, stage),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
__field(u32, seqno)
__field(int, stage)
),
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
__entry->seqno = task->tk_rqstp->rq_seqno;
__entry->stage = stage;
),
TP_printk(
"task:%u@%u xid=0x%08x seqno=%u stage=%d",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->seqno, __entry->stage)
);
TRACE_EVENT(xprt_ping,
TP_PROTO(const struct rpc_xprt *xprt, int status),

View File

@ -34,6 +34,22 @@ config RPCSEC_GSS_KRB5
If unsure, say Y.
config CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES
bool "Secure RPC: Disable insecure Kerberos encryption types"
depends on RPCSEC_GSS_KRB5
default n
help
Choose Y here to disable the use of deprecated encryption types
with the Kerberos version 5 GSS-API mechanism (RFC 1964). The
deprecated encryption types include DES-CBC-MD5, DES-CBC-CRC,
and DES-CBC-MD4. These types were deprecated by RFC 6649 because
they were found to be insecure.
N is the default because many sites have deployed KDCs and
keytabs that contain only these deprecated encryption types.
Choosing Y prevents the use of known-insecure encryption types
but might result in compatibility problems.
config SUNRPC_DEBUG
bool "RPC: Enable dprintk debugging"
depends on SUNRPC && SYSCTL

View File

@ -17,9 +17,7 @@
#include <linux/sunrpc/gss_api.h>
#include <linux/spinlock.h>
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
#include <trace/events/sunrpc.h>
#define RPC_CREDCACHE_DEFAULT_HASHBITS (4)
struct rpc_cred_cache {
@ -267,8 +265,6 @@ rpcauth_list_flavors(rpc_authflavor_t *array, int size)
}
}
rcu_read_unlock();
dprintk("RPC: %s returns %d\n", __func__, result);
return result;
}
EXPORT_SYMBOL_GPL(rpcauth_list_flavors);
@ -636,9 +632,6 @@ rpcauth_lookupcred(struct rpc_auth *auth, int flags)
struct rpc_cred *ret;
const struct cred *cred = current_cred();
dprintk("RPC: looking up %s cred\n",
auth->au_ops->au_name);
memset(&acred, 0, sizeof(acred));
acred.cred = cred;
ret = auth->au_ops->lookup_cred(auth, &acred, flags);
@ -670,8 +663,6 @@ rpcauth_bind_root_cred(struct rpc_task *task, int lookupflags)
};
struct rpc_cred *ret;
dprintk("RPC: %5u looking up %s cred\n",
task->tk_pid, task->tk_client->cl_auth->au_ops->au_name);
ret = auth->au_ops->lookup_cred(auth, &acred, lookupflags);
put_cred(acred.cred);
return ret;
@ -688,8 +679,6 @@ rpcauth_bind_machine_cred(struct rpc_task *task, int lookupflags)
if (!acred.principal)
return NULL;
dprintk("RPC: %5u looking up %s machine cred\n",
task->tk_pid, task->tk_client->cl_auth->au_ops->au_name);
return auth->au_ops->lookup_cred(auth, &acred, lookupflags);
}
@ -698,8 +687,6 @@ rpcauth_bind_new_cred(struct rpc_task *task, int lookupflags)
{
struct rpc_auth *auth = task->tk_client->cl_auth;
dprintk("RPC: %5u looking up %s cred\n",
task->tk_pid, auth->au_ops->au_name);
return rpcauth_lookupcred(auth, lookupflags);
}
@ -771,75 +758,102 @@ destroy:
}
EXPORT_SYMBOL_GPL(put_rpccred);
__be32 *
rpcauth_marshcred(struct rpc_task *task, __be32 *p)
/**
* rpcauth_marshcred - Append RPC credential to end of @xdr
* @task: controlling RPC task
* @xdr: xdr_stream containing initial portion of RPC Call header
*
* On success, an appropriate verifier is added to @xdr, @xdr is
* updated to point past the verifier, and zero is returned.
* Otherwise, @xdr is in an undefined state and a negative errno
* is returned.
*/
int rpcauth_marshcred(struct rpc_task *task, struct xdr_stream *xdr)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
const struct rpc_credops *ops = task->tk_rqstp->rq_cred->cr_ops;
dprintk("RPC: %5u marshaling %s cred %p\n",
task->tk_pid, cred->cr_auth->au_ops->au_name, cred);
return cred->cr_ops->crmarshal(task, p);
return ops->crmarshal(task, xdr);
}
__be32 *
rpcauth_checkverf(struct rpc_task *task, __be32 *p)
/**
* rpcauth_wrap_req_encode - XDR encode the RPC procedure
* @task: controlling RPC task
* @xdr: stream where on-the-wire bytes are to be marshalled
*
* On success, @xdr contains the encoded and wrapped message.
* Otherwise, @xdr is in an undefined state.
*/
int rpcauth_wrap_req_encode(struct rpc_task *task, struct xdr_stream *xdr)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
kxdreproc_t encode = task->tk_msg.rpc_proc->p_encode;
dprintk("RPC: %5u validating %s cred %p\n",
task->tk_pid, cred->cr_auth->au_ops->au_name, cred);
return cred->cr_ops->crvalidate(task, p);
}
static void rpcauth_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp,
__be32 *data, void *obj)
{
struct xdr_stream xdr;
xdr_init_encode(&xdr, &rqstp->rq_snd_buf, data);
encode(rqstp, &xdr, obj);
}
int
rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp,
__be32 *data, void *obj)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
dprintk("RPC: %5u using %s cred %p to wrap rpc data\n",
task->tk_pid, cred->cr_ops->cr_name, cred);
if (cred->cr_ops->crwrap_req)
return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj);
/* By default, we encode the arguments normally. */
rpcauth_wrap_req_encode(encode, rqstp, data, obj);
encode(task->tk_rqstp, xdr, task->tk_msg.rpc_argp);
return 0;
}
EXPORT_SYMBOL_GPL(rpcauth_wrap_req_encode);
static int
rpcauth_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp,
__be32 *data, void *obj)
/**
* rpcauth_wrap_req - XDR encode and wrap the RPC procedure
* @task: controlling RPC task
* @xdr: stream where on-the-wire bytes are to be marshalled
*
* On success, @xdr contains the encoded and wrapped message,
* and zero is returned. Otherwise, @xdr is in an undefined
* state and a negative errno is returned.
*/
int rpcauth_wrap_req(struct rpc_task *task, struct xdr_stream *xdr)
{
struct xdr_stream xdr;
const struct rpc_credops *ops = task->tk_rqstp->rq_cred->cr_ops;
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, data);
return decode(rqstp, &xdr, obj);
return ops->crwrap_req(task, xdr);
}
/**
* rpcauth_checkverf - Validate verifier in RPC Reply header
* @task: controlling RPC task
* @xdr: xdr_stream containing RPC Reply header
*
* On success, @xdr is updated to point past the verifier and
* zero is returned. Otherwise, @xdr is in an undefined state
* and a negative errno is returned.
*/
int
rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp,
__be32 *data, void *obj)
rpcauth_checkverf(struct rpc_task *task, struct xdr_stream *xdr)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
const struct rpc_credops *ops = task->tk_rqstp->rq_cred->cr_ops;
dprintk("RPC: %5u using %s cred %p to unwrap rpc data\n",
task->tk_pid, cred->cr_ops->cr_name, cred);
if (cred->cr_ops->crunwrap_resp)
return cred->cr_ops->crunwrap_resp(task, decode, rqstp,
data, obj);
/* By default, we decode the arguments normally. */
return rpcauth_unwrap_req_decode(decode, rqstp, data, obj);
return ops->crvalidate(task, xdr);
}
/**
* rpcauth_unwrap_resp_decode - Invoke XDR decode function
* @task: controlling RPC task
* @xdr: stream where the Reply message resides
*
* Returns zero on success; otherwise a negative errno is returned.
*/
int
rpcauth_unwrap_resp_decode(struct rpc_task *task, struct xdr_stream *xdr)
{
kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode;
return decode(task->tk_rqstp, xdr, task->tk_msg.rpc_resp);
}
EXPORT_SYMBOL_GPL(rpcauth_unwrap_resp_decode);
/**
* rpcauth_unwrap_resp - Invoke unwrap and decode function for the cred
* @task: controlling RPC task
* @xdr: stream where the Reply message resides
*
* Returns zero on success; otherwise a negative errno is returned.
*/
int
rpcauth_unwrap_resp(struct rpc_task *task, struct xdr_stream *xdr)
{
const struct rpc_credops *ops = task->tk_rqstp->rq_cred->cr_ops;
return ops->crunwrap_resp(task, xdr);
}
bool
@ -865,8 +879,6 @@ rpcauth_refreshcred(struct rpc_task *task)
goto out;
cred = task->tk_rqstp->rq_cred;
}
dprintk("RPC: %5u refreshing %s cred %p\n",
task->tk_pid, cred->cr_auth->au_ops->au_name, cred);
err = cred->cr_ops->crrefresh(task);
out:
@ -880,8 +892,6 @@ rpcauth_invalcred(struct rpc_task *task)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
dprintk("RPC: %5u invalidating %s cred %p\n",
task->tk_pid, cred->cr_auth->au_ops->au_name, cred);
if (cred)
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
}

View File

@ -7,7 +7,7 @@ obj-$(CONFIG_SUNRPC_GSS) += auth_rpcgss.o
auth_rpcgss-y := auth_gss.o gss_generic_token.o \
gss_mech_switch.o svcauth_gss.o \
gss_rpc_upcall.o gss_rpc_xdr.o
gss_rpc_upcall.o gss_rpc_xdr.o trace.o
obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o

View File

@ -1,3 +1,4 @@
// SPDX-License-Identifier: BSD-3-Clause
/*
* linux/net/sunrpc/auth_gss/auth_gss.c
*
@ -8,34 +9,8 @@
*
* Dug Song <dugsong@monkey.org>
* Andy Adamson <andros@umich.edu>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
@ -55,6 +30,8 @@
#include "../netns.h"
#include <trace/events/rpcgss.h>
static const struct rpc_authops authgss_ops;
static const struct rpc_credops gss_credops;
@ -260,6 +237,7 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct
}
ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, NULL, GFP_NOFS);
if (ret < 0) {
trace_rpcgss_import_ctx(ret);
p = ERR_PTR(ret);
goto err;
}
@ -275,12 +253,9 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct
if (IS_ERR(p))
goto err;
done:
dprintk("RPC: %s Success. gc_expiry %lu now %lu timeout %u acceptor %.*s\n",
__func__, ctx->gc_expiry, now, timeout, ctx->gc_acceptor.len,
ctx->gc_acceptor.data);
return p;
trace_rpcgss_context(ctx->gc_expiry, now, timeout,
ctx->gc_acceptor.len, ctx->gc_acceptor.data);
err:
dprintk("RPC: %s returns error %ld\n", __func__, -PTR_ERR(p));
return p;
}
@ -354,10 +329,8 @@ __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth
if (auth && pos->auth->service != auth->service)
continue;
refcount_inc(&pos->count);
dprintk("RPC: %s found msg %p\n", __func__, pos);
return pos;
}
dprintk("RPC: %s found nothing\n", __func__);
return NULL;
}
@ -456,7 +429,7 @@ static int gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
size_t buflen = sizeof(gss_msg->databuf);
int len;
len = scnprintf(p, buflen, "mech=%s uid=%d ", mech->gm_name,
len = scnprintf(p, buflen, "mech=%s uid=%d", mech->gm_name,
from_kuid(&init_user_ns, gss_msg->uid));
buflen -= len;
p += len;
@ -467,7 +440,7 @@ static int gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
* identity that we are authenticating to.
*/
if (target_name) {
len = scnprintf(p, buflen, "target=%s ", target_name);
len = scnprintf(p, buflen, " target=%s", target_name);
buflen -= len;
p += len;
gss_msg->msg.len += len;
@ -487,11 +460,11 @@ static int gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
char *c = strchr(service_name, '@');
if (!c)
len = scnprintf(p, buflen, "service=%s ",
len = scnprintf(p, buflen, " service=%s",
service_name);
else
len = scnprintf(p, buflen,
"service=%.*s srchost=%s ",
" service=%.*s srchost=%s",
(int)(c - service_name),
service_name, c + 1);
buflen -= len;
@ -500,17 +473,17 @@ static int gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
}
if (mech->gm_upcall_enctypes) {
len = scnprintf(p, buflen, "enctypes=%s ",
len = scnprintf(p, buflen, " enctypes=%s",
mech->gm_upcall_enctypes);
buflen -= len;
p += len;
gss_msg->msg.len += len;
}
trace_rpcgss_upcall_msg(gss_msg->databuf);
len = scnprintf(p, buflen, "\n");
if (len == 0)
goto out_overflow;
gss_msg->msg.len += len;
gss_msg->msg.data = gss_msg->databuf;
return 0;
out_overflow:
@ -603,8 +576,6 @@ gss_refresh_upcall(struct rpc_task *task)
struct rpc_pipe *pipe;
int err = 0;
dprintk("RPC: %5u %s for uid %u\n",
task->tk_pid, __func__, from_kuid(&init_user_ns, cred->cr_cred->fsuid));
gss_msg = gss_setup_upcall(gss_auth, cred);
if (PTR_ERR(gss_msg) == -EAGAIN) {
/* XXX: warning on the first, under the assumption we
@ -612,7 +583,8 @@ gss_refresh_upcall(struct rpc_task *task)
warn_gssd();
task->tk_timeout = 15*HZ;
rpc_sleep_on(&pipe_version_rpc_waitqueue, task, NULL);
return -EAGAIN;
err = -EAGAIN;
goto out;
}
if (IS_ERR(gss_msg)) {
err = PTR_ERR(gss_msg);
@ -635,9 +607,8 @@ gss_refresh_upcall(struct rpc_task *task)
spin_unlock(&pipe->lock);
gss_release_msg(gss_msg);
out:
dprintk("RPC: %5u %s for uid %u result %d\n",
task->tk_pid, __func__,
from_kuid(&init_user_ns, cred->cr_cred->fsuid), err);
trace_rpcgss_upcall_result(from_kuid(&init_user_ns,
cred->cr_cred->fsuid), err);
return err;
}
@ -652,14 +623,13 @@ gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
DEFINE_WAIT(wait);
int err;
dprintk("RPC: %s for uid %u\n",
__func__, from_kuid(&init_user_ns, cred->cr_cred->fsuid));
retry:
err = 0;
/* if gssd is down, just skip upcalling altogether */
if (!gssd_running(net)) {
warn_gssd();
return -EACCES;
err = -EACCES;
goto out;
}
gss_msg = gss_setup_upcall(gss_auth, cred);
if (PTR_ERR(gss_msg) == -EAGAIN) {
@ -700,8 +670,8 @@ out_intr:
finish_wait(&gss_msg->waitqueue, &wait);
gss_release_msg(gss_msg);
out:
dprintk("RPC: %s for uid %u result %d\n",
__func__, from_kuid(&init_user_ns, cred->cr_cred->fsuid), err);
trace_rpcgss_upcall_result(from_kuid(&init_user_ns,
cred->cr_cred->fsuid), err);
return err;
}
@ -794,7 +764,6 @@ err_put_ctx:
err:
kfree(buf);
out:
dprintk("RPC: %s returning %zd\n", __func__, err);
return err;
}
@ -863,8 +832,6 @@ gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg);
if (msg->errno < 0) {
dprintk("RPC: %s releasing msg %p\n",
__func__, gss_msg);
refcount_inc(&gss_msg->count);
gss_unhash_msg(gss_msg);
if (msg->errno == -ETIMEDOUT)
@ -1024,8 +991,6 @@ gss_create_new(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
struct rpc_auth * auth;
int err = -ENOMEM; /* XXX? */
dprintk("RPC: creating GSS authenticator for client %p\n", clnt);
if (!try_module_get(THIS_MODULE))
return ERR_PTR(err);
if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL)))
@ -1041,10 +1006,8 @@ gss_create_new(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
gss_auth->net = get_net(rpc_net_ns(clnt));
err = -EINVAL;
gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor);
if (!gss_auth->mech) {
dprintk("RPC: Pseudoflavor %d not found!\n", flavor);
if (!gss_auth->mech)
goto err_put_net;
}
gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
if (gss_auth->service == 0)
goto err_put_mech;
@ -1053,6 +1016,8 @@ gss_create_new(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
auth = &gss_auth->rpc_auth;
auth->au_cslack = GSS_CRED_SLACK >> 2;
auth->au_rslack = GSS_VERF_SLACK >> 2;
auth->au_verfsize = GSS_VERF_SLACK >> 2;
auth->au_ralign = GSS_VERF_SLACK >> 2;
auth->au_flags = 0;
auth->au_ops = &authgss_ops;
auth->au_flavor = flavor;
@ -1099,6 +1064,7 @@ err_free:
kfree(gss_auth);
out_dec:
module_put(THIS_MODULE);
trace_rpcgss_createauth(flavor, err);
return ERR_PTR(err);
}
@ -1135,9 +1101,6 @@ gss_destroy(struct rpc_auth *auth)
struct gss_auth *gss_auth = container_of(auth,
struct gss_auth, rpc_auth);
dprintk("RPC: destroying GSS authenticator %p flavor %d\n",
auth, auth->au_flavor);
if (hash_hashed(&gss_auth->hash)) {
spin_lock(&gss_auth_hash_lock);
hash_del(&gss_auth->hash);
@ -1300,8 +1263,6 @@ gss_send_destroy_context(struct rpc_cred *cred)
static void
gss_do_free_ctx(struct gss_cl_ctx *ctx)
{
dprintk("RPC: %s\n", __func__);
gss_delete_sec_context(&ctx->gc_gss_ctx);
kfree(ctx->gc_wire_ctx.data);
kfree(ctx->gc_acceptor.data);
@ -1324,7 +1285,6 @@ gss_free_ctx(struct gss_cl_ctx *ctx)
static void
gss_free_cred(struct gss_cred *gss_cred)
{
dprintk("RPC: %s cred=%p\n", __func__, gss_cred);
kfree(gss_cred);
}
@ -1381,10 +1341,6 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t
struct gss_cred *cred = NULL;
int err = -ENOMEM;
dprintk("RPC: %s for uid %d, flavor %d\n",
__func__, from_kuid(&init_user_ns, acred->cred->fsuid),
auth->au_flavor);
if (!(cred = kzalloc(sizeof(*cred), gfp)))
goto out_err;
@ -1400,7 +1356,6 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t
return &cred->gc_base;
out_err:
dprintk("RPC: %s failed with error %d\n", __func__, err);
return ERR_PTR(err);
}
@ -1526,69 +1481,84 @@ out:
}
/*
* Marshal credentials.
* Maybe we should keep a cached credential for performance reasons.
*/
static __be32 *
gss_marshal(struct rpc_task *task, __be32 *p)
* Marshal credentials.
*
* The expensive part is computing the verifier. We can't cache a
* pre-computed version of the verifier because the seqno, which
* is different every time, is included in the MIC.
*/
static int gss_marshal(struct rpc_task *task, struct xdr_stream *xdr)
{
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_cred *cred = req->rq_cred;
struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
gc_base);
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
__be32 *cred_len;
__be32 *p, *cred_len;
u32 maj_stat = 0;
struct xdr_netobj mic;
struct kvec iov;
struct xdr_buf verf_buf;
int status;
dprintk("RPC: %5u %s\n", task->tk_pid, __func__);
/* Credential */
*p++ = htonl(RPC_AUTH_GSS);
p = xdr_reserve_space(xdr, 7 * sizeof(*p) +
ctx->gc_wire_ctx.len);
if (!p)
goto marshal_failed;
*p++ = rpc_auth_gss;
cred_len = p++;
spin_lock(&ctx->gc_seq_lock);
req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ;
spin_unlock(&ctx->gc_seq_lock);
if (req->rq_seqno == MAXSEQ)
goto out_expired;
goto expired;
trace_rpcgss_seqno(task);
*p++ = htonl((u32) RPC_GSS_VERSION);
*p++ = htonl((u32) ctx->gc_proc);
*p++ = htonl((u32) req->rq_seqno);
*p++ = htonl((u32) gss_cred->gc_service);
*p++ = cpu_to_be32(RPC_GSS_VERSION);
*p++ = cpu_to_be32(ctx->gc_proc);
*p++ = cpu_to_be32(req->rq_seqno);
*p++ = cpu_to_be32(gss_cred->gc_service);
p = xdr_encode_netobj(p, &ctx->gc_wire_ctx);
*cred_len = htonl((p - (cred_len + 1)) << 2);
*cred_len = cpu_to_be32((p - (cred_len + 1)) << 2);
/* Verifier */
/* We compute the checksum for the verifier over the xdr-encoded bytes
* starting with the xid and ending at the end of the credential: */
iov.iov_base = xprt_skip_transport_header(req->rq_xprt,
req->rq_snd_buf.head[0].iov_base);
iov.iov_base = req->rq_snd_buf.head[0].iov_base;
iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
xdr_buf_from_iov(&iov, &verf_buf);
/* set verifier flavor*/
*p++ = htonl(RPC_AUTH_GSS);
p = xdr_reserve_space(xdr, sizeof(*p));
if (!p)
goto marshal_failed;
*p++ = rpc_auth_gss;
mic.data = (u8 *)(p + 1);
maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
if (maj_stat == GSS_S_CONTEXT_EXPIRED) {
goto out_expired;
} else if (maj_stat != 0) {
pr_warn("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
task->tk_status = -EIO;
goto out_put_ctx;
}
p = xdr_encode_opaque(p, NULL, mic.len);
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
goto expired;
else if (maj_stat != 0)
goto bad_mic;
if (xdr_stream_encode_opaque_inline(xdr, (void **)&p, mic.len) < 0)
goto marshal_failed;
status = 0;
out:
gss_put_ctx(ctx);
return p;
out_expired:
return status;
expired:
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
task->tk_status = -EKEYEXPIRED;
out_put_ctx:
gss_put_ctx(ctx);
return NULL;
status = -EKEYEXPIRED;
goto out;
marshal_failed:
status = -EMSGSIZE;
goto out;
bad_mic:
trace_rpcgss_get_mic(task, maj_stat);
status = -EIO;
goto out;
}
static int gss_renew_cred(struct rpc_task *task)
@ -1662,116 +1632,105 @@ gss_refresh_null(struct rpc_task *task)
return 0;
}
static __be32 *
gss_validate(struct rpc_task *task, __be32 *p)
static int
gss_validate(struct rpc_task *task, struct xdr_stream *xdr)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
__be32 *seq = NULL;
__be32 *p, *seq = NULL;
struct kvec iov;
struct xdr_buf verf_buf;
struct xdr_netobj mic;
u32 flav,len;
u32 maj_stat;
__be32 *ret = ERR_PTR(-EIO);
u32 len, maj_stat;
int status;
dprintk("RPC: %5u %s\n", task->tk_pid, __func__);
p = xdr_inline_decode(xdr, 2 * sizeof(*p));
if (!p)
goto validate_failed;
if (*p++ != rpc_auth_gss)
goto validate_failed;
len = be32_to_cpup(p);
if (len > RPC_MAX_AUTH_SIZE)
goto validate_failed;
p = xdr_inline_decode(xdr, len);
if (!p)
goto validate_failed;
flav = ntohl(*p++);
if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE)
goto out_bad;
if (flav != RPC_AUTH_GSS)
goto out_bad;
seq = kmalloc(4, GFP_NOFS);
if (!seq)
goto out_bad;
*seq = htonl(task->tk_rqstp->rq_seqno);
goto validate_failed;
*seq = cpu_to_be32(task->tk_rqstp->rq_seqno);
iov.iov_base = seq;
iov.iov_len = 4;
xdr_buf_from_iov(&iov, &verf_buf);
mic.data = (u8 *)p;
mic.len = len;
ret = ERR_PTR(-EACCES);
maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
if (maj_stat) {
dprintk("RPC: %5u %s: gss_verify_mic returned error 0x%08x\n",
task->tk_pid, __func__, maj_stat);
goto out_bad;
}
if (maj_stat)
goto bad_mic;
/* We leave it to unwrap to calculate au_rslack. For now we just
* calculate the length of the verifier: */
cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2;
status = 0;
out:
gss_put_ctx(ctx);
dprintk("RPC: %5u %s: gss_verify_mic succeeded.\n",
task->tk_pid, __func__);
kfree(seq);
return p + XDR_QUADLEN(len);
out_bad:
gss_put_ctx(ctx);
dprintk("RPC: %5u %s failed ret %ld.\n", task->tk_pid, __func__,
PTR_ERR(ret));
kfree(seq);
return ret;
return status;
validate_failed:
status = -EIO;
goto out;
bad_mic:
trace_rpcgss_verify_mic(task, maj_stat);
status = -EACCES;
goto out;
}
static void gss_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp,
__be32 *p, void *obj)
static int gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
struct rpc_task *task, struct xdr_stream *xdr)
{
struct xdr_stream xdr;
xdr_init_encode(&xdr, &rqstp->rq_snd_buf, p);
encode(rqstp, &xdr, obj);
}
static inline int
gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
kxdreproc_t encode, struct rpc_rqst *rqstp,
__be32 *p, void *obj)
{
struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
struct xdr_buf integ_buf;
__be32 *integ_len = NULL;
struct rpc_rqst *rqstp = task->tk_rqstp;
struct xdr_buf integ_buf, *snd_buf = &rqstp->rq_snd_buf;
struct xdr_netobj mic;
u32 offset;
__be32 *q;
struct kvec *iov;
u32 maj_stat = 0;
int status = -EIO;
__be32 *p, *integ_len;
u32 offset, maj_stat;
p = xdr_reserve_space(xdr, 2 * sizeof(*p));
if (!p)
goto wrap_failed;
integ_len = p++;
*p = cpu_to_be32(rqstp->rq_seqno);
if (rpcauth_wrap_req_encode(task, xdr))
goto wrap_failed;
offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
*p++ = htonl(rqstp->rq_seqno);
gss_wrap_req_encode(encode, rqstp, p, obj);
if (xdr_buf_subsegment(snd_buf, &integ_buf,
offset, snd_buf->len - offset))
return status;
*integ_len = htonl(integ_buf.len);
goto wrap_failed;
*integ_len = cpu_to_be32(integ_buf.len);
/* guess whether we're in the head or the tail: */
if (snd_buf->page_len || snd_buf->tail[0].iov_len)
iov = snd_buf->tail;
else
iov = snd_buf->head;
p = iov->iov_base + iov->iov_len;
p = xdr_reserve_space(xdr, 0);
if (!p)
goto wrap_failed;
mic.data = (u8 *)(p + 1);
maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
status = -EIO; /* XXX? */
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
else if (maj_stat)
return status;
q = xdr_encode_opaque(p, NULL, mic.len);
offset = (u8 *)q - (u8 *)p;
iov->iov_len += offset;
snd_buf->len += offset;
goto bad_mic;
/* Check that the trailing MIC fit in the buffer, after the fact */
if (xdr_stream_encode_opaque_inline(xdr, (void **)&p, mic.len) < 0)
goto wrap_failed;
return 0;
wrap_failed:
return -EMSGSIZE;
bad_mic:
trace_rpcgss_get_mic(task, maj_stat);
return -EIO;
}
static void
@ -1822,61 +1781,62 @@ out:
return -EAGAIN;
}
static inline int
gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
kxdreproc_t encode, struct rpc_rqst *rqstp,
__be32 *p, void *obj)
static int gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
struct rpc_task *task, struct xdr_stream *xdr)
{
struct rpc_rqst *rqstp = task->tk_rqstp;
struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
u32 offset;
u32 maj_stat;
u32 pad, offset, maj_stat;
int status;
__be32 *opaque_len;
__be32 *p, *opaque_len;
struct page **inpages;
int first;
int pad;
struct kvec *iov;
char *tmp;
status = -EIO;
p = xdr_reserve_space(xdr, 2 * sizeof(*p));
if (!p)
goto wrap_failed;
opaque_len = p++;
offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
*p++ = htonl(rqstp->rq_seqno);
*p = cpu_to_be32(rqstp->rq_seqno);
gss_wrap_req_encode(encode, rqstp, p, obj);
if (rpcauth_wrap_req_encode(task, xdr))
goto wrap_failed;
status = alloc_enc_pages(rqstp);
if (status)
return status;
if (unlikely(status))
goto wrap_failed;
first = snd_buf->page_base >> PAGE_SHIFT;
inpages = snd_buf->pages + first;
snd_buf->pages = rqstp->rq_enc_pages;
snd_buf->page_base -= first << PAGE_SHIFT;
/*
* Give the tail its own page, in case we need extra space in the
* head when wrapping:
* Move the tail into its own page, in case gss_wrap needs
* more space in the head when wrapping.
*
* call_allocate() allocates twice the slack space required
* by the authentication flavor to rq_callsize.
* For GSS, slack is GSS_CRED_SLACK.
* Still... Why can't gss_wrap just slide the tail down?
*/
if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
char *tmp;
tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
snd_buf->tail[0].iov_base = tmp;
}
offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
/* slack space should prevent this ever happening: */
BUG_ON(snd_buf->len > snd_buf->buflen);
status = -EIO;
if (unlikely(snd_buf->len > snd_buf->buflen))
goto wrap_failed;
/* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
* done anyway, so it's safe to put the request on the wire: */
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
else if (maj_stat)
return status;
goto bad_wrap;
*opaque_len = htonl(snd_buf->len - offset);
/* guess whether we're in the head or the tail: */
*opaque_len = cpu_to_be32(snd_buf->len - offset);
/* guess whether the pad goes into the head or the tail: */
if (snd_buf->page_len || snd_buf->tail[0].iov_len)
iov = snd_buf->tail;
else
@ -1888,118 +1848,154 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
snd_buf->len += pad;
return 0;
wrap_failed:
return status;
bad_wrap:
trace_rpcgss_wrap(task, maj_stat);
return -EIO;
}
static int
gss_wrap_req(struct rpc_task *task,
kxdreproc_t encode, void *rqstp, __be32 *p, void *obj)
static int gss_wrap_req(struct rpc_task *task, struct xdr_stream *xdr)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
gc_base);
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
int status = -EIO;
int status;
dprintk("RPC: %5u %s\n", task->tk_pid, __func__);
status = -EIO;
if (ctx->gc_proc != RPC_GSS_PROC_DATA) {
/* The spec seems a little ambiguous here, but I think that not
* wrapping context destruction requests makes the most sense.
*/
gss_wrap_req_encode(encode, rqstp, p, obj);
status = 0;
status = rpcauth_wrap_req_encode(task, xdr);
goto out;
}
switch (gss_cred->gc_service) {
case RPC_GSS_SVC_NONE:
gss_wrap_req_encode(encode, rqstp, p, obj);
status = 0;
status = rpcauth_wrap_req_encode(task, xdr);
break;
case RPC_GSS_SVC_INTEGRITY:
status = gss_wrap_req_integ(cred, ctx, encode, rqstp, p, obj);
status = gss_wrap_req_integ(cred, ctx, task, xdr);
break;
case RPC_GSS_SVC_PRIVACY:
status = gss_wrap_req_priv(cred, ctx, encode, rqstp, p, obj);
status = gss_wrap_req_priv(cred, ctx, task, xdr);
break;
default:
status = -EIO;
}
out:
gss_put_ctx(ctx);
dprintk("RPC: %5u %s returning %d\n", task->tk_pid, __func__, status);
return status;
}
static inline int
gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
struct rpc_rqst *rqstp, __be32 **p)
static int
gss_unwrap_resp_auth(struct rpc_cred *cred)
{
struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
struct xdr_buf integ_buf;
struct xdr_netobj mic;
u32 data_offset, mic_offset;
u32 integ_len;
u32 maj_stat;
int status = -EIO;
struct rpc_auth *auth = cred->cr_auth;
integ_len = ntohl(*(*p)++);
auth->au_rslack = auth->au_verfsize;
auth->au_ralign = auth->au_verfsize;
return 0;
}
static int
gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred,
struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp,
struct xdr_stream *xdr)
{
struct xdr_buf integ_buf, *rcv_buf = &rqstp->rq_rcv_buf;
u32 data_offset, mic_offset, integ_len, maj_stat;
struct rpc_auth *auth = cred->cr_auth;
struct xdr_netobj mic;
__be32 *p;
p = xdr_inline_decode(xdr, 2 * sizeof(*p));
if (unlikely(!p))
goto unwrap_failed;
integ_len = be32_to_cpup(p++);
if (integ_len & 3)
return status;
data_offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
goto unwrap_failed;
data_offset = (u8 *)(p) - (u8 *)rcv_buf->head[0].iov_base;
mic_offset = integ_len + data_offset;
if (mic_offset > rcv_buf->len)
return status;
if (ntohl(*(*p)++) != rqstp->rq_seqno)
return status;
if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset,
mic_offset - data_offset))
return status;
goto unwrap_failed;
if (be32_to_cpup(p) != rqstp->rq_seqno)
goto bad_seqno;
if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset, integ_len))
goto unwrap_failed;
if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset))
return status;
goto unwrap_failed;
maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
if (maj_stat != GSS_S_COMPLETE)
return status;
goto bad_mic;
auth->au_rslack = auth->au_verfsize + 2 + 1 + XDR_QUADLEN(mic.len);
auth->au_ralign = auth->au_verfsize + 2;
return 0;
unwrap_failed:
trace_rpcgss_unwrap_failed(task);
return -EIO;
bad_seqno:
trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, be32_to_cpup(p));
return -EIO;
bad_mic:
trace_rpcgss_verify_mic(task, maj_stat);
return -EIO;
}
static inline int
gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
struct rpc_rqst *rqstp, __be32 **p)
static int
gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp,
struct xdr_stream *xdr)
{
struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
u32 offset;
u32 opaque_len;
u32 maj_stat;
int status = -EIO;
struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
struct kvec *head = rqstp->rq_rcv_buf.head;
struct rpc_auth *auth = cred->cr_auth;
unsigned int savedlen = rcv_buf->len;
u32 offset, opaque_len, maj_stat;
__be32 *p;
opaque_len = ntohl(*(*p)++);
offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
p = xdr_inline_decode(xdr, 2 * sizeof(*p));
if (unlikely(!p))
goto unwrap_failed;
opaque_len = be32_to_cpup(p++);
offset = (u8 *)(p) - (u8 *)head->iov_base;
if (offset + opaque_len > rcv_buf->len)
return status;
/* remove padding: */
goto unwrap_failed;
rcv_buf->len = offset + opaque_len;
maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf);
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
if (maj_stat != GSS_S_COMPLETE)
return status;
if (ntohl(*(*p)++) != rqstp->rq_seqno)
return status;
goto bad_unwrap;
/* gss_unwrap decrypted the sequence number */
if (be32_to_cpup(p++) != rqstp->rq_seqno)
goto bad_seqno;
/* gss_unwrap redacts the opaque blob from the head iovec.
* rcv_buf has changed, thus the stream needs to be reset.
*/
xdr_init_decode(xdr, rcv_buf, p, rqstp);
auth->au_rslack = auth->au_verfsize + 2 +
XDR_QUADLEN(savedlen - rcv_buf->len);
auth->au_ralign = auth->au_verfsize + 2 +
XDR_QUADLEN(savedlen - rcv_buf->len);
return 0;
}
static int
gss_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp,
__be32 *p, void *obj)
{
struct xdr_stream xdr;
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
return decode(rqstp, &xdr, obj);
unwrap_failed:
trace_rpcgss_unwrap_failed(task);
return -EIO;
bad_seqno:
trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, be32_to_cpup(--p));
return -EIO;
bad_unwrap:
trace_rpcgss_unwrap(task, maj_stat);
return -EIO;
}
static bool
@ -2014,14 +2010,14 @@ gss_xmit_need_reencode(struct rpc_task *task)
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_cred *cred = req->rq_cred;
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
u32 win, seq_xmit;
u32 win, seq_xmit = 0;
bool ret = true;
if (!ctx)
return true;
goto out;
if (gss_seq_is_newer(req->rq_seqno, READ_ONCE(ctx->gc_seq)))
goto out;
goto out_ctx;
seq_xmit = READ_ONCE(ctx->gc_seq_xmit);
while (gss_seq_is_newer(req->rq_seqno, seq_xmit)) {
@ -2030,56 +2026,51 @@ gss_xmit_need_reencode(struct rpc_task *task)
seq_xmit = cmpxchg(&ctx->gc_seq_xmit, tmp, req->rq_seqno);
if (seq_xmit == tmp) {
ret = false;
goto out;
goto out_ctx;
}
}
win = ctx->gc_win;
if (win > 0)
ret = !gss_seq_is_newer(req->rq_seqno, seq_xmit - win);
out:
out_ctx:
gss_put_ctx(ctx);
out:
trace_rpcgss_need_reencode(task, seq_xmit, ret);
return ret;
}
static int
gss_unwrap_resp(struct rpc_task *task,
kxdrdproc_t decode, void *rqstp, __be32 *p, void *obj)
gss_unwrap_resp(struct rpc_task *task, struct xdr_stream *xdr)
{
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
struct rpc_rqst *rqstp = task->tk_rqstp;
struct rpc_cred *cred = rqstp->rq_cred;
struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
gc_base);
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
__be32 *savedp = p;
struct kvec *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head;
int savedlen = head->iov_len;
int status = -EIO;
int status = -EIO;
if (ctx->gc_proc != RPC_GSS_PROC_DATA)
goto out_decode;
switch (gss_cred->gc_service) {
case RPC_GSS_SVC_NONE:
status = gss_unwrap_resp_auth(cred);
break;
case RPC_GSS_SVC_INTEGRITY:
status = gss_unwrap_resp_integ(cred, ctx, rqstp, &p);
if (status)
goto out;
status = gss_unwrap_resp_integ(task, cred, ctx, rqstp, xdr);
break;
case RPC_GSS_SVC_PRIVACY:
status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p);
if (status)
goto out;
status = gss_unwrap_resp_priv(task, cred, ctx, rqstp, xdr);
break;
}
/* take into account extra slack for integrity and privacy cases: */
cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp)
+ (savedlen - head->iov_len);
if (status)
goto out;
out_decode:
status = gss_unwrap_req_decode(decode, rqstp, p, obj);
status = rpcauth_unwrap_resp_decode(task, xdr);
out:
gss_put_ctx(ctx);
dprintk("RPC: %5u %s returning %d\n",
task->tk_pid, __func__, status);
return status;
}

View File

@ -1,3 +1,4 @@
// SPDX-License-Identifier: BSD-3-Clause
/*
* linux/net/sunrpc/gss_krb5_mech.c
*
@ -6,32 +7,6 @@
*
* Andy Adamson <andros@umich.edu>
* J. Bruce Fields <bfields@umich.edu>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <crypto/hash.h>
@ -53,6 +28,7 @@
static struct gss_api_mech gss_kerberos_mech; /* forward declaration */
static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = {
#ifndef CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES
/*
* DES (All DES enctypes are mapped to the same gss functionality)
*/
@ -74,6 +50,7 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = {
.cksumlength = 8,
.keyed_cksum = 0,
},
#endif /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */
/*
* RC4-HMAC
*/

View File

@ -570,14 +570,16 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
*/
movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len);
movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;
BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
buf->head[0].iov_len);
if (offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
buf->head[0].iov_len)
return GSS_S_FAILURE;
memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
/* Trim off the trailing "extra count" and checksum blob */
xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
buf->len -= ec + GSS_KRB5_TOK_HDR_LEN + tailskip;
return GSS_S_COMPLETE;
}

View File

@ -1,3 +1,4 @@
// SPDX-License-Identifier: BSD-3-Clause
/*
* linux/net/sunrpc/gss_mech_switch.c
*
@ -5,32 +6,6 @@
* All rights reserved.
*
* J. Bruce Fields <bfields@umich.edu>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <linux/types.h>

View File

@ -1,21 +1,8 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* linux/net/sunrpc/gss_rpc_upcall.c
*
* Copyright (C) 2012 Simo Sorce <simo@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/types.h>

View File

@ -1,21 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* linux/net/sunrpc/gss_rpc_upcall.h
*
* Copyright (C) 2012 Simo Sorce <simo@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _GSS_RPC_UPCALL_H
@ -45,4 +32,5 @@ void gssp_free_upcall_data(struct gssp_upcall_data *data);
void init_gssp_clnt(struct sunrpc_net *);
int set_gssp_clnt(struct net *);
void clear_gssp_clnt(struct sunrpc_net *);
#endif /* _GSS_RPC_UPCALL_H */

View File

@ -1,21 +1,8 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* GSS Proxy upcall module
*
* Copyright (C) 2012 Simo Sorce <simo@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/sunrpc/svcauth.h>

View File

@ -1,21 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* GSS Proxy upcall module
*
* Copyright (C) 2012 Simo Sorce <simo@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _LINUX_GSS_RPC_XDR_H
@ -262,6 +249,4 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
#define GSSX_ARG_wrap_size_limit_sz 0
#define GSSX_RES_wrap_size_limit_sz 0
#endif /* _LINUX_GSS_RPC_XDR_H */

View File

@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Neil Brown <neilb@cse.unsw.edu.au>
* J. Bruce Fields <bfields@umich.edu>
@ -896,7 +897,7 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
if (svc_getnl(&buf->head[0]) != seq)
goto out;
/* trim off the mic and padding at the end before returning */
xdr_buf_trim(buf, round_up_to_quad(mic.len) + 4);
buf->len -= 4 + round_up_to_quad(mic.len);
stat = 0;
out:
kfree(mic.data);

View File

@ -0,0 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018, 2019 Oracle. All rights reserved.
*/
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/gss_err.h>
#define CREATE_TRACE_POINTS
#include <trace/events/rpcgss.h>

View File

@ -59,15 +59,21 @@ nul_match(struct auth_cred *acred, struct rpc_cred *cred, int taskflags)
/*
* Marshal credential.
*/
static __be32 *
nul_marshal(struct rpc_task *task, __be32 *p)
static int
nul_marshal(struct rpc_task *task, struct xdr_stream *xdr)
{
*p++ = htonl(RPC_AUTH_NULL);
*p++ = 0;
*p++ = htonl(RPC_AUTH_NULL);
*p++ = 0;
__be32 *p;
return p;
p = xdr_reserve_space(xdr, 4 * sizeof(*p));
if (!p)
return -EMSGSIZE;
/* Credential */
*p++ = rpc_auth_null;
*p++ = xdr_zero;
/* Verifier */
*p++ = rpc_auth_null;
*p = xdr_zero;
return 0;
}
/*
@ -80,25 +86,19 @@ nul_refresh(struct rpc_task *task)
return 0;
}
static __be32 *
nul_validate(struct rpc_task *task, __be32 *p)
static int
nul_validate(struct rpc_task *task, struct xdr_stream *xdr)
{
rpc_authflavor_t flavor;
u32 size;
__be32 *p;
flavor = ntohl(*p++);
if (flavor != RPC_AUTH_NULL) {
printk("RPC: bad verf flavor: %u\n", flavor);
return ERR_PTR(-EIO);
}
size = ntohl(*p++);
if (size != 0) {
printk("RPC: bad verf size: %u\n", size);
return ERR_PTR(-EIO);
}
return p;
p = xdr_inline_decode(xdr, 2 * sizeof(*p));
if (!p)
return -EIO;
if (*p++ != rpc_auth_null)
return -EIO;
if (*p != xdr_zero)
return -EIO;
return 0;
}
const struct rpc_authops authnull_ops = {
@ -114,6 +114,8 @@ static
struct rpc_auth null_auth = {
.au_cslack = NUL_CALLSLACK,
.au_rslack = NUL_REPLYSLACK,
.au_verfsize = NUL_REPLYSLACK,
.au_ralign = NUL_REPLYSLACK,
.au_ops = &authnull_ops,
.au_flavor = RPC_AUTH_NULL,
.au_count = REFCOUNT_INIT(1),
@ -125,8 +127,10 @@ const struct rpc_credops null_credops = {
.crdestroy = nul_destroy_cred,
.crmatch = nul_match,
.crmarshal = nul_marshal,
.crwrap_req = rpcauth_wrap_req_encode,
.crrefresh = nul_refresh,
.crvalidate = nul_validate,
.crunwrap_resp = rpcauth_unwrap_resp_decode,
};
static

View File

@ -28,8 +28,6 @@ static mempool_t *unix_pool;
static struct rpc_auth *
unx_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
{
dprintk("RPC: creating UNIX authenticator for client %p\n",
clnt);
refcount_inc(&unix_auth.au_count);
return &unix_auth;
}
@ -37,7 +35,6 @@ unx_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
static void
unx_destroy(struct rpc_auth *auth)
{
dprintk("RPC: destroying UNIX authenticator %p\n", auth);
}
/*
@ -48,10 +45,6 @@ unx_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
{
struct rpc_cred *ret = mempool_alloc(unix_pool, GFP_NOFS);
dprintk("RPC: allocating UNIX cred for uid %d gid %d\n",
from_kuid(&init_user_ns, acred->cred->fsuid),
from_kgid(&init_user_ns, acred->cred->fsgid));
rpcauth_init_cred(ret, acred, auth, &unix_credops);
ret->cr_flags = 1UL << RPCAUTH_CRED_UPTODATE;
return ret;
@ -61,7 +54,7 @@ static void
unx_free_cred_callback(struct rcu_head *head)
{
struct rpc_cred *rpc_cred = container_of(head, struct rpc_cred, cr_rcu);
dprintk("RPC: unx_free_cred %p\n", rpc_cred);
put_cred(rpc_cred->cr_cred);
mempool_free(rpc_cred, unix_pool);
}
@ -106,37 +99,55 @@ unx_match(struct auth_cred *acred, struct rpc_cred *cred, int flags)
* Marshal credentials.
* Maybe we should keep a cached credential for performance reasons.
*/
static __be32 *
unx_marshal(struct rpc_task *task, __be32 *p)
static int
unx_marshal(struct rpc_task *task, struct xdr_stream *xdr)
{
struct rpc_clnt *clnt = task->tk_client;
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
__be32 *base, *hold;
__be32 *p, *cred_len, *gidarr_len;
int i;
struct group_info *gi = cred->cr_cred->group_info;
*p++ = htonl(RPC_AUTH_UNIX);
base = p++;
*p++ = htonl(jiffies/HZ);
/* Credential */
/*
* Copy the UTS nodename captured when the client was created.
*/
p = xdr_encode_array(p, clnt->cl_nodename, clnt->cl_nodelen);
p = xdr_reserve_space(xdr, 3 * sizeof(*p));
if (!p)
goto marshal_failed;
*p++ = rpc_auth_unix;
cred_len = p++;
*p++ = xdr_zero; /* stamp */
if (xdr_stream_encode_opaque(xdr, clnt->cl_nodename,
clnt->cl_nodelen) < 0)
goto marshal_failed;
p = xdr_reserve_space(xdr, 3 * sizeof(*p));
if (!p)
goto marshal_failed;
*p++ = cpu_to_be32(from_kuid(&init_user_ns, cred->cr_cred->fsuid));
*p++ = cpu_to_be32(from_kgid(&init_user_ns, cred->cr_cred->fsgid));
*p++ = htonl((u32) from_kuid(&init_user_ns, cred->cr_cred->fsuid));
*p++ = htonl((u32) from_kgid(&init_user_ns, cred->cr_cred->fsgid));
hold = p++;
gidarr_len = p++;
if (gi)
for (i = 0; i < UNX_NGROUPS && i < gi->ngroups; i++)
*p++ = htonl((u32) from_kgid(&init_user_ns, gi->gid[i]));
*hold = htonl(p - hold - 1); /* gid array length */
*base = htonl((p - base - 1) << 2); /* cred length */
*p++ = cpu_to_be32(from_kgid(&init_user_ns,
gi->gid[i]));
*gidarr_len = cpu_to_be32(p - gidarr_len - 1);
*cred_len = cpu_to_be32((p - cred_len - 1) << 2);
p = xdr_reserve_space(xdr, (p - gidarr_len - 1) << 2);
if (!p)
goto marshal_failed;
*p++ = htonl(RPC_AUTH_NULL);
*p++ = htonl(0);
/* Verifier */
return p;
p = xdr_reserve_space(xdr, 2 * sizeof(*p));
if (!p)
goto marshal_failed;
*p++ = rpc_auth_null;
*p = xdr_zero;
return 0;
marshal_failed:
return -EMSGSIZE;
}
/*
@ -149,29 +160,35 @@ unx_refresh(struct rpc_task *task)
return 0;
}
static __be32 *
unx_validate(struct rpc_task *task, __be32 *p)
static int
unx_validate(struct rpc_task *task, struct xdr_stream *xdr)
{
rpc_authflavor_t flavor;
u32 size;
struct rpc_auth *auth = task->tk_rqstp->rq_cred->cr_auth;
__be32 *p;
u32 size;
flavor = ntohl(*p++);
if (flavor != RPC_AUTH_NULL &&
flavor != RPC_AUTH_UNIX &&
flavor != RPC_AUTH_SHORT) {
printk("RPC: bad verf flavor: %u\n", flavor);
return ERR_PTR(-EIO);
p = xdr_inline_decode(xdr, 2 * sizeof(*p));
if (!p)
return -EIO;
switch (*p++) {
case rpc_auth_null:
case rpc_auth_unix:
case rpc_auth_short:
break;
default:
return -EIO;
}
size = be32_to_cpup(p);
if (size > RPC_MAX_AUTH_SIZE)
return -EIO;
p = xdr_inline_decode(xdr, size);
if (!p)
return -EIO;
size = ntohl(*p++);
if (size > RPC_MAX_AUTH_SIZE) {
printk("RPC: giant verf size: %u\n", size);
return ERR_PTR(-EIO);
}
task->tk_rqstp->rq_cred->cr_auth->au_rslack = (size >> 2) + 2;
p += (size >> 2);
return p;
auth->au_verfsize = XDR_QUADLEN(size) + 2;
auth->au_rslack = XDR_QUADLEN(size) + 2;
auth->au_ralign = XDR_QUADLEN(size) + 2;
return 0;
}
int __init rpc_init_authunix(void)
@ -198,6 +215,7 @@ static
struct rpc_auth unix_auth = {
.au_cslack = UNX_CALLSLACK,
.au_rslack = NUL_REPLYSLACK,
.au_verfsize = NUL_REPLYSLACK,
.au_ops = &authunix_ops,
.au_flavor = RPC_AUTH_UNIX,
.au_count = REFCOUNT_INIT(1),
@ -209,6 +227,8 @@ const struct rpc_credops unix_credops = {
.crdestroy = unx_destroy_cred,
.crmatch = unx_match,
.crmarshal = unx_marshal,
.crwrap_req = rpcauth_wrap_req_encode,
.crrefresh = unx_refresh,
.crvalidate = unx_validate,
.crunwrap_resp = rpcauth_unwrap_resp_decode,
};

View File

@ -77,8 +77,10 @@ static void call_timeout(struct rpc_task *task);
static void call_connect(struct rpc_task *task);
static void call_connect_status(struct rpc_task *task);
static __be32 *rpc_encode_header(struct rpc_task *task);
static __be32 *rpc_verify_header(struct rpc_task *task);
static int rpc_encode_header(struct rpc_task *task,
struct xdr_stream *xdr);
static int rpc_decode_header(struct rpc_task *task,
struct xdr_stream *xdr);
static int rpc_ping(struct rpc_clnt *clnt);
static void rpc_register_client(struct rpc_clnt *clnt)
@ -1162,6 +1164,29 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req)
}
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
/**
* rpc_prepare_reply_pages - Prepare to receive a reply data payload into pages
* @req: RPC request to prepare
* @pages: vector of struct page pointers
* @base: offset in first page where receive should start, in bytes
* @len: expected size of the upper layer data payload, in bytes
* @hdrsize: expected size of upper layer reply header, in XDR words
*
*/
void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages,
unsigned int base, unsigned int len,
unsigned int hdrsize)
{
/* Subtract one to force an extra word of buffer space for the
* payload's XDR pad to fall into the rcv_buf's tail iovec.
*/
hdrsize += RPC_REPHDRSIZE + req->rq_cred->cr_auth->au_ralign - 1;
xdr_inline_pages(&req->rq_rcv_buf, hdrsize << 2, pages, base, len);
trace_rpc_reply_pages(req);
}
EXPORT_SYMBOL_GPL(rpc_prepare_reply_pages);
void
rpc_call_start(struct rpc_task *task)
{
@ -1665,7 +1690,7 @@ call_refreshresult(struct rpc_task *task)
static void
call_allocate(struct rpc_task *task)
{
unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack;
const struct rpc_auth *auth = task->tk_rqstp->rq_cred->cr_auth;
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
@ -1690,9 +1715,10 @@ call_allocate(struct rpc_task *task)
* and reply headers, and convert both values
* to byte sizes.
*/
req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen;
req->rq_callsize = RPC_CALLHDRSIZE + (auth->au_cslack << 1) +
proc->p_arglen;
req->rq_callsize <<= 2;
req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen;
req->rq_rcvsize = RPC_REPHDRSIZE + auth->au_rslack + proc->p_replen;
req->rq_rcvsize <<= 2;
status = xprt->ops->buf_alloc(task);
@ -1728,10 +1754,7 @@ static void
rpc_xdr_encode(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
kxdreproc_t encode;
__be32 *p;
dprint_status(task);
struct xdr_stream xdr;
xdr_buf_init(&req->rq_snd_buf,
req->rq_buffer,
@ -1740,18 +1763,13 @@ rpc_xdr_encode(struct rpc_task *task)
req->rq_rbuffer,
req->rq_rcvsize);
p = rpc_encode_header(task);
if (p == NULL)
req->rq_snd_buf.head[0].iov_len = 0;
xdr_init_encode(&xdr, &req->rq_snd_buf,
req->rq_snd_buf.head[0].iov_base, req);
if (rpc_encode_header(task, &xdr))
return;
encode = task->tk_msg.rpc_proc->p_encode;
if (encode == NULL)
return;
task->tk_status = rpcauth_wrap_req(task, encode, req, p,
task->tk_msg.rpc_argp);
if (task->tk_status == 0)
xprt_request_prepare(req);
task->tk_status = rpcauth_wrap_req(task, &xdr);
}
/*
@ -1762,6 +1780,7 @@ call_encode(struct rpc_task *task)
{
if (!rpc_task_need_encode(task))
goto out;
dprint_status(task);
/* Encode here so that rpcsec_gss can use correct sequence number. */
rpc_xdr_encode(task);
/* Did the encode result in an error condition? */
@ -1779,6 +1798,8 @@ call_encode(struct rpc_task *task)
rpc_exit(task, task->tk_status);
}
return;
} else {
xprt_request_prepare(task->tk_rqstp);
}
/* Add task to reply queue before transmission to avoid races */
@ -2255,12 +2276,11 @@ call_decode(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
struct rpc_rqst *req = task->tk_rqstp;
kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode;
__be32 *p;
struct xdr_stream xdr;
dprint_status(task);
if (!decode) {
if (!task->tk_msg.rpc_proc->p_decode) {
task->tk_action = rpc_exit_task;
return;
}
@ -2296,212 +2316,190 @@ call_decode(struct rpc_task *task)
goto out_retry;
}
p = rpc_verify_header(task);
if (IS_ERR(p)) {
if (p == ERR_PTR(-EAGAIN))
goto out_retry;
xdr_init_decode(&xdr, &req->rq_rcv_buf,
req->rq_rcv_buf.head[0].iov_base, req);
switch (rpc_decode_header(task, &xdr)) {
case 0:
task->tk_action = rpc_exit_task;
task->tk_status = rpcauth_unwrap_resp(task, &xdr);
dprintk("RPC: %5u %s result %d\n",
task->tk_pid, __func__, task->tk_status);
return;
}
task->tk_action = rpc_exit_task;
task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
task->tk_msg.rpc_resp);
dprintk("RPC: %5u call_decode result %d\n", task->tk_pid,
task->tk_status);
return;
case -EAGAIN:
out_retry:
task->tk_status = 0;
/* Note: rpc_verify_header() may have freed the RPC slot */
if (task->tk_rqstp == req) {
xdr_free_bvec(&req->rq_rcv_buf);
req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
if (task->tk_client->cl_discrtry)
xprt_conditional_disconnect(req->rq_xprt,
req->rq_connect_cookie);
task->tk_status = 0;
/* Note: rpc_decode_header() may have freed the RPC slot */
if (task->tk_rqstp == req) {
xdr_free_bvec(&req->rq_rcv_buf);
req->rq_reply_bytes_recvd = 0;
req->rq_rcv_buf.len = 0;
if (task->tk_client->cl_discrtry)
xprt_conditional_disconnect(req->rq_xprt,
req->rq_connect_cookie);
}
}
}
static __be32 *
rpc_encode_header(struct rpc_task *task)
static int
rpc_encode_header(struct rpc_task *task, struct xdr_stream *xdr)
{
struct rpc_clnt *clnt = task->tk_client;
struct rpc_rqst *req = task->tk_rqstp;
__be32 *p = req->rq_svec[0].iov_base;
__be32 *p;
int error;
/* FIXME: check buffer size? */
error = -EMSGSIZE;
p = xdr_reserve_space(xdr, RPC_CALLHDRSIZE << 2);
if (!p)
goto out_fail;
*p++ = req->rq_xid;
*p++ = rpc_call;
*p++ = cpu_to_be32(RPC_VERSION);
*p++ = cpu_to_be32(clnt->cl_prog);
*p++ = cpu_to_be32(clnt->cl_vers);
*p = cpu_to_be32(task->tk_msg.rpc_proc->p_proc);
p = xprt_skip_transport_header(req->rq_xprt, p);
*p++ = req->rq_xid; /* XID */
*p++ = htonl(RPC_CALL); /* CALL */
*p++ = htonl(RPC_VERSION); /* RPC version */
*p++ = htonl(clnt->cl_prog); /* program number */
*p++ = htonl(clnt->cl_vers); /* program version */
*p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */
p = rpcauth_marshcred(task, p);
if (p)
req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
return p;
error = rpcauth_marshcred(task, xdr);
if (error < 0)
goto out_fail;
return 0;
out_fail:
trace_rpc_bad_callhdr(task);
rpc_exit(task, error);
return error;
}
static __be32 *
rpc_verify_header(struct rpc_task *task)
static noinline int
rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr)
{
struct rpc_clnt *clnt = task->tk_client;
struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0];
int len = task->tk_rqstp->rq_rcv_buf.len >> 2;
__be32 *p = iov->iov_base;
u32 n;
int error = -EACCES;
__be32 *p;
if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) {
/* RFC-1014 says that the representation of XDR data must be a
* multiple of four bytes
* - if it isn't pointer subtraction in the NFS client may give
* undefined results
*/
dprintk("RPC: %5u %s: XDR representation not a multiple of"
" 4 bytes: 0x%x\n", task->tk_pid, __func__,
task->tk_rqstp->rq_rcv_buf.len);
error = -EIO;
goto out_err;
}
if ((len -= 3) < 0)
goto out_overflow;
/* RFC-1014 says that the representation of XDR data must be a
* multiple of four bytes
* - if it isn't pointer subtraction in the NFS client may give
* undefined results
*/
if (task->tk_rqstp->rq_rcv_buf.len & 3)
goto out_badlen;
p += 1; /* skip XID */
if ((n = ntohl(*p++)) != RPC_REPLY) {
dprintk("RPC: %5u %s: not an RPC reply: %x\n",
task->tk_pid, __func__, n);
error = -EIO;
goto out_garbage;
}
p = xdr_inline_decode(xdr, 3 * sizeof(*p));
if (!p)
goto out_unparsable;
p++; /* skip XID */
if (*p++ != rpc_reply)
goto out_unparsable;
if (*p++ != rpc_msg_accepted)
goto out_msg_denied;
if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
if (--len < 0)
goto out_overflow;
switch ((n = ntohl(*p++))) {
case RPC_AUTH_ERROR:
break;
case RPC_MISMATCH:
dprintk("RPC: %5u %s: RPC call version mismatch!\n",
task->tk_pid, __func__);
error = -EPROTONOSUPPORT;
goto out_err;
default:
dprintk("RPC: %5u %s: RPC call rejected, "
"unknown error: %x\n",
task->tk_pid, __func__, n);
error = -EIO;
goto out_err;
}
if (--len < 0)
goto out_overflow;
switch ((n = ntohl(*p++))) {
case RPC_AUTH_REJECTEDCRED:
case RPC_AUTH_REJECTEDVERF:
case RPCSEC_GSS_CREDPROBLEM:
case RPCSEC_GSS_CTXPROBLEM:
if (!task->tk_cred_retry)
break;
task->tk_cred_retry--;
dprintk("RPC: %5u %s: retry stale creds\n",
task->tk_pid, __func__);
rpcauth_invalcred(task);
/* Ensure we obtain a new XID! */
xprt_release(task);
task->tk_action = call_reserve;
goto out_retry;
case RPC_AUTH_BADCRED:
case RPC_AUTH_BADVERF:
/* possibly garbled cred/verf? */
if (!task->tk_garb_retry)
break;
task->tk_garb_retry--;
dprintk("RPC: %5u %s: retry garbled creds\n",
task->tk_pid, __func__);
task->tk_action = call_encode;
goto out_retry;
case RPC_AUTH_TOOWEAK:
printk(KERN_NOTICE "RPC: server %s requires stronger "
"authentication.\n",
task->tk_xprt->servername);
break;
default:
dprintk("RPC: %5u %s: unknown auth error: %x\n",
task->tk_pid, __func__, n);
error = -EIO;
}
dprintk("RPC: %5u %s: call rejected %d\n",
task->tk_pid, __func__, n);
goto out_err;
}
p = rpcauth_checkverf(task, p);
if (IS_ERR(p)) {
error = PTR_ERR(p);
dprintk("RPC: %5u %s: auth check failed with %d\n",
task->tk_pid, __func__, error);
goto out_garbage; /* bad verifier, retry */
}
len = p - (__be32 *)iov->iov_base - 1;
if (len < 0)
goto out_overflow;
switch ((n = ntohl(*p++))) {
case RPC_SUCCESS:
return p;
case RPC_PROG_UNAVAIL:
dprintk("RPC: %5u %s: program %u is unsupported "
"by server %s\n", task->tk_pid, __func__,
(unsigned int)clnt->cl_prog,
task->tk_xprt->servername);
error = rpcauth_checkverf(task, xdr);
if (error)
goto out_verifier;
p = xdr_inline_decode(xdr, sizeof(*p));
if (!p)
goto out_unparsable;
switch (*p) {
case rpc_success:
return 0;
case rpc_prog_unavail:
trace_rpc__prog_unavail(task);
error = -EPFNOSUPPORT;
goto out_err;
case RPC_PROG_MISMATCH:
dprintk("RPC: %5u %s: program %u, version %u unsupported "
"by server %s\n", task->tk_pid, __func__,
(unsigned int)clnt->cl_prog,
(unsigned int)clnt->cl_vers,
task->tk_xprt->servername);
case rpc_prog_mismatch:
trace_rpc__prog_mismatch(task);
error = -EPROTONOSUPPORT;
goto out_err;
case RPC_PROC_UNAVAIL:
dprintk("RPC: %5u %s: proc %s unsupported by program %u, "
"version %u on server %s\n",
task->tk_pid, __func__,
rpc_proc_name(task),
clnt->cl_prog, clnt->cl_vers,
task->tk_xprt->servername);
case rpc_proc_unavail:
trace_rpc__proc_unavail(task);
error = -EOPNOTSUPP;
goto out_err;
case RPC_GARBAGE_ARGS:
dprintk("RPC: %5u %s: server saw garbage\n",
task->tk_pid, __func__);
break; /* retry */
case rpc_garbage_args:
trace_rpc__garbage_args(task);
break;
default:
dprintk("RPC: %5u %s: server accept status: %x\n",
task->tk_pid, __func__, n);
/* Also retry */
trace_rpc__unparsable(task);
}
out_garbage:
clnt->cl_stats->rpcgarbage++;
if (task->tk_garb_retry) {
task->tk_garb_retry--;
dprintk("RPC: %5u %s: retrying\n",
task->tk_pid, __func__);
task->tk_action = call_encode;
out_retry:
return ERR_PTR(-EAGAIN);
return -EAGAIN;
}
out_err:
rpc_exit(task, error);
dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid,
__func__, error);
return ERR_PTR(error);
out_overflow:
dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid,
__func__);
return error;
out_badlen:
trace_rpc__unparsable(task);
error = -EIO;
goto out_err;
out_unparsable:
trace_rpc__unparsable(task);
error = -EIO;
goto out_garbage;
out_verifier:
trace_rpc_bad_verifier(task);
goto out_garbage;
out_msg_denied:
p = xdr_inline_decode(xdr, sizeof(*p));
if (!p)
goto out_unparsable;
switch (*p++) {
case rpc_auth_error:
break;
case rpc_mismatch:
trace_rpc__mismatch(task);
error = -EPROTONOSUPPORT;
goto out_err;
default:
trace_rpc__unparsable(task);
error = -EIO;
goto out_err;
}
p = xdr_inline_decode(xdr, sizeof(*p));
if (!p)
goto out_unparsable;
switch (*p++) {
case rpc_autherr_rejectedcred:
case rpc_autherr_rejectedverf:
case rpcsec_gsserr_credproblem:
case rpcsec_gsserr_ctxproblem:
if (!task->tk_cred_retry)
break;
task->tk_cred_retry--;
trace_rpc__stale_creds(task);
rpcauth_invalcred(task);
/* Ensure we obtain a new XID! */
xprt_release(task);
task->tk_action = call_reserve;
return -EAGAIN;
case rpc_autherr_badcred:
case rpc_autherr_badverf:
/* possibly garbled cred/verf? */
if (!task->tk_garb_retry)
break;
task->tk_garb_retry--;
trace_rpc__bad_creds(task);
task->tk_action = call_encode;
return -EAGAIN;
case rpc_autherr_tooweak:
trace_rpc__auth_tooweak(task);
pr_warn("RPC: server %s requires stronger authentication.\n",
task->tk_xprt->servername);
break;
default:
trace_rpc__unparsable(task);
error = -EIO;
}
goto out_err;
}
static void rpcproc_encode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr,

View File

@ -1144,17 +1144,6 @@ void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {}
#endif
/*
* Setup response header for TCP, it has a 4B record length field.
*/
static void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp)
{
struct kvec *resv = &rqstp->rq_res.head[0];
/* tcp needs a space for the record length... */
svc_putnl(resv, 0);
}
/*
* Common routine for processing the RPC request.
*/
@ -1182,10 +1171,6 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
clear_bit(RQ_DROPME, &rqstp->rq_flags);
/* Setup reply header */
if (rqstp->rq_prot == IPPROTO_TCP)
svc_tcp_prep_reply_hdr(rqstp);
svc_putu32(resv, rqstp->rq_xid);
vers = svc_getnl(argv);
@ -1443,6 +1428,10 @@ svc_process(struct svc_rqst *rqstp)
goto out_drop;
}
/* Reserve space for the record marker */
if (rqstp->rq_prot == IPPROTO_TCP)
svc_putnl(resv, 0);
/* Returns 1 for send, 0 for drop */
if (likely(svc_process_common(rqstp, argv, resv)))
return svc_send(rqstp);

View File

@ -16,6 +16,7 @@
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/msg_prot.h>
#include <linux/bvec.h>
#include <trace/events/sunrpc.h>
/*
* XDR functions for basic NFS types
@ -162,6 +163,15 @@ xdr_free_bvec(struct xdr_buf *buf)
buf->bvec = NULL;
}
/**
* xdr_inline_pages - Prepare receive buffer for a large reply
* @xdr: xdr_buf into which reply will be placed
* @offset: expected offset where data payload will start, in bytes
* @pages: vector of struct page pointers
* @base: offset in first page where receive should start, in bytes
* @len: expected size of the upper layer data payload, in bytes
*
*/
void
xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
struct page **pages, unsigned int base, unsigned int len)
@ -179,6 +189,8 @@ xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
tail->iov_base = buf + offset;
tail->iov_len = buflen - offset;
if ((xdr->page_len & 3) == 0)
tail->iov_len -= sizeof(__be32);
xdr->buflen += len;
}
@ -346,13 +358,15 @@ EXPORT_SYMBOL_GPL(_copy_from_pages);
* 'len' bytes. The extra data is not lost, but is instead
* moved into the inlined pages and/or the tail.
*/
static void
static unsigned int
xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
{
struct kvec *head, *tail;
size_t copy, offs;
unsigned int pglen = buf->page_len;
unsigned int result;
result = 0;
tail = buf->tail;
head = buf->head;
@ -366,6 +380,7 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
copy = tail->iov_len - len;
memmove((char *)tail->iov_base + len,
tail->iov_base, copy);
result += copy;
}
/* Copy from the inlined pages into the tail */
copy = len;
@ -376,11 +391,13 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
copy = 0;
else if (copy > tail->iov_len - offs)
copy = tail->iov_len - offs;
if (copy != 0)
if (copy != 0) {
_copy_from_pages((char *)tail->iov_base + offs,
buf->pages,
buf->page_base + pglen + offs - len,
copy);
result += copy;
}
/* Do we also need to copy data from the head into the tail ? */
if (len > pglen) {
offs = copy = len - pglen;
@ -390,6 +407,7 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
(char *)head->iov_base +
head->iov_len - offs,
copy);
result += copy;
}
}
/* Now handle pages */
@ -405,12 +423,15 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
_copy_to_pages(buf->pages, buf->page_base,
(char *)head->iov_base + head->iov_len - len,
copy);
result += copy;
}
head->iov_len -= len;
buf->buflen -= len;
/* Have we truncated the message? */
if (buf->len > buf->buflen)
buf->len = buf->buflen;
return result;
}
/**
@ -422,14 +443,16 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
* 'len' bytes. The extra data is not lost, but is instead
* moved into the tail.
*/
static void
static unsigned int
xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
{
struct kvec *tail;
size_t copy;
unsigned int pglen = buf->page_len;
unsigned int tailbuf_len;
unsigned int result;
result = 0;
tail = buf->tail;
BUG_ON (len > pglen);
@ -447,18 +470,22 @@ xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
if (tail->iov_len > len) {
char *p = (char *)tail->iov_base + len;
memmove(p, tail->iov_base, tail->iov_len - len);
result += tail->iov_len - len;
} else
copy = tail->iov_len;
/* Copy from the inlined pages into the tail */
_copy_from_pages((char *)tail->iov_base,
buf->pages, buf->page_base + pglen - len,
copy);
result += copy;
}
buf->page_len -= len;
buf->buflen -= len;
/* Have we truncated the message? */
if (buf->len > buf->buflen)
buf->len = buf->buflen;
return result;
}
void
@ -483,6 +510,7 @@ EXPORT_SYMBOL_GPL(xdr_stream_pos);
* @xdr: pointer to xdr_stream struct
* @buf: pointer to XDR buffer in which to encode data
* @p: current pointer inside XDR buffer
* @rqst: pointer to controlling rpc_rqst, for debugging
*
* Note: at the moment the RPC client only passes the length of our
* scratch buffer in the xdr_buf's header kvec. Previously this
@ -491,7 +519,8 @@ EXPORT_SYMBOL_GPL(xdr_stream_pos);
* of the buffer length, and takes care of adjusting the kvec
* length for us.
*/
void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
struct rpc_rqst *rqst)
{
struct kvec *iov = buf->head;
int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
@ -513,6 +542,7 @@ void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
buf->len += len;
iov->iov_len += len;
}
xdr->rqst = rqst;
}
EXPORT_SYMBOL_GPL(xdr_init_encode);
@ -551,9 +581,9 @@ static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
int frag1bytes, frag2bytes;
if (nbytes > PAGE_SIZE)
return NULL; /* Bigger buffers require special handling */
goto out_overflow; /* Bigger buffers require special handling */
if (xdr->buf->len + nbytes > xdr->buf->buflen)
return NULL; /* Sorry, we're totally out of space */
goto out_overflow; /* Sorry, we're totally out of space */
frag1bytes = (xdr->end - xdr->p) << 2;
frag2bytes = nbytes - frag1bytes;
if (xdr->iov)
@ -582,6 +612,9 @@ static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
xdr->buf->page_len += frag2bytes;
xdr->buf->len += nbytes;
return p;
out_overflow:
trace_rpc_xdr_overflow(xdr, nbytes);
return NULL;
}
/**
@ -819,8 +852,10 @@ static bool xdr_set_next_buffer(struct xdr_stream *xdr)
* @xdr: pointer to xdr_stream struct
* @buf: pointer to XDR buffer from which to decode data
* @p: current pointer inside XDR buffer
* @rqst: pointer to controlling rpc_rqst, for debugging
*/
void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
struct rpc_rqst *rqst)
{
xdr->buf = buf;
xdr->scratch.iov_base = NULL;
@ -836,6 +871,7 @@ void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
xdr->nwords -= p - xdr->p;
xdr->p = p;
}
xdr->rqst = rqst;
}
EXPORT_SYMBOL_GPL(xdr_init_decode);
@ -854,7 +890,7 @@ void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
buf->page_len = len;
buf->buflen = len;
buf->len = len;
xdr_init_decode(xdr, buf, NULL);
xdr_init_decode(xdr, buf, NULL, NULL);
}
EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
@ -896,20 +932,23 @@ static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
size_t cplen = (char *)xdr->end - (char *)xdr->p;
if (nbytes > xdr->scratch.iov_len)
return NULL;
goto out_overflow;
p = __xdr_inline_decode(xdr, cplen);
if (p == NULL)
return NULL;
memcpy(cpdest, p, cplen);
if (!xdr_set_next_buffer(xdr))
goto out_overflow;
cpdest += cplen;
nbytes -= cplen;
if (!xdr_set_next_buffer(xdr))
return NULL;
p = __xdr_inline_decode(xdr, nbytes);
if (p == NULL)
return NULL;
memcpy(cpdest, p, nbytes);
return xdr->scratch.iov_base;
out_overflow:
trace_rpc_xdr_overflow(xdr, nbytes);
return NULL;
}
/**
@ -926,14 +965,17 @@ __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
{
__be32 *p;
if (nbytes == 0)
if (unlikely(nbytes == 0))
return xdr->p;
if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
return NULL;
goto out_overflow;
p = __xdr_inline_decode(xdr, nbytes);
if (p != NULL)
return p;
return xdr_copy_to_scratch(xdr, nbytes);
out_overflow:
trace_rpc_xdr_overflow(xdr, nbytes);
return NULL;
}
EXPORT_SYMBOL_GPL(xdr_inline_decode);
@ -943,13 +985,17 @@ static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
struct kvec *iov;
unsigned int nwords = XDR_QUADLEN(len);
unsigned int cur = xdr_stream_pos(xdr);
unsigned int copied, offset;
if (xdr->nwords == 0)
return 0;
/* Realign pages to current pointer position */
iov = buf->head;
iov = buf->head;
if (iov->iov_len > cur) {
xdr_shrink_bufhead(buf, iov->iov_len - cur);
offset = iov->iov_len - cur;
copied = xdr_shrink_bufhead(buf, offset);
trace_rpc_xdr_alignment(xdr, offset, copied);
xdr->nwords = XDR_QUADLEN(buf->len - cur);
}
@ -961,7 +1007,9 @@ static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
len = buf->page_len;
else if (nwords < xdr->nwords) {
/* Truncate page data and move it into the tail */
xdr_shrink_pagelen(buf, buf->page_len - len);
offset = buf->page_len - len;
copied = xdr_shrink_pagelen(buf, offset);
trace_rpc_xdr_alignment(xdr, offset, copied);
xdr->nwords = XDR_QUADLEN(buf->len - cur);
}
return len;
@ -1102,47 +1150,6 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
}
EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
/**
* xdr_buf_trim - lop at most "len" bytes off the end of "buf"
* @buf: buf to be trimmed
* @len: number of bytes to reduce "buf" by
*
* Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
* that it's possible that we'll trim less than that amount if the xdr_buf is
* too small, or if (for instance) it's all in the head and the parser has
* already read too far into it.
*/
void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
{
size_t cur;
unsigned int trim = len;
if (buf->tail[0].iov_len) {
cur = min_t(size_t, buf->tail[0].iov_len, trim);
buf->tail[0].iov_len -= cur;
trim -= cur;
if (!trim)
goto fix_len;
}
if (buf->page_len) {
cur = min_t(unsigned int, buf->page_len, trim);
buf->page_len -= cur;
trim -= cur;
if (!trim)
goto fix_len;
}
if (buf->head[0].iov_len) {
cur = min_t(size_t, buf->head[0].iov_len, trim);
buf->head[0].iov_len -= cur;
trim -= cur;
}
fix_len:
buf->len -= (len - trim);
}
EXPORT_SYMBOL_GPL(xdr_buf_trim);
static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
{
unsigned int this_len;

View File

@ -1168,6 +1168,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
/* Note: req is added _before_ pos */
list_add_tail(&req->rq_xmit, &pos->rq_xmit);
INIT_LIST_HEAD(&req->rq_xmit2);
trace_xprt_enq_xmit(task, 1);
goto out;
}
} else if (RPC_IS_SWAPPER(task)) {
@ -1179,6 +1180,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
/* Note: req is added _before_ pos */
list_add_tail(&req->rq_xmit, &pos->rq_xmit);
INIT_LIST_HEAD(&req->rq_xmit2);
trace_xprt_enq_xmit(task, 2);
goto out;
}
} else if (!req->rq_seqno) {
@ -1187,11 +1189,13 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
continue;
list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
INIT_LIST_HEAD(&req->rq_xmit);
trace_xprt_enq_xmit(task, 3);
goto out;
}
}
list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
INIT_LIST_HEAD(&req->rq_xmit2);
trace_xprt_enq_xmit(task, 4);
out:
set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
spin_unlock(&xprt->queue_lock);
@ -1316,8 +1320,6 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
int is_retrans = RPC_WAS_SENT(task);
int status;
dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
if (!req->rq_bytes_sent) {
if (xprt_request_data_received(task)) {
status = 0;
@ -1339,9 +1341,9 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
connect_cookie = xprt->connect_cookie;
status = xprt->ops->send_request(req);
trace_xprt_transmit(xprt, req->rq_xid, status);
if (status != 0) {
req->rq_ntrans--;
trace_xprt_transmit(req, status);
return status;
}
@ -1350,7 +1352,6 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
xprt_inject_disconnect(xprt);
dprintk("RPC: %5u xmit complete\n", task->tk_pid);
task->tk_flags |= RPC_TASK_SENT;
spin_lock_bh(&xprt->transport_lock);
@ -1363,6 +1364,7 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
req->rq_connect_cookie = connect_cookie;
out_dequeue:
trace_xprt_transmit(req, status);
xprt_request_dequeue_transmit(task);
rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
return status;

View File

@ -123,7 +123,7 @@ static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
xdr_init_encode(&req->rl_stream, &req->rl_hdrbuf,
req->rl_rdmabuf->rg_base);
req->rl_rdmabuf->rg_base, rqst);
p = xdr_reserve_space(&req->rl_stream, 28);
if (unlikely(!p))

View File

@ -391,7 +391,7 @@ frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
*/
struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_mr_seg *seg,
int nsegs, bool writing, u32 xid,
int nsegs, bool writing, __be32 xid,
struct rpcrdma_mr **out)
{
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
@ -446,7 +446,7 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
goto out_mapmr_err;
ibmr->iova &= 0x00000000ffffffff;
ibmr->iova |= ((u64)cpu_to_be32(xid)) << 32;
ibmr->iova |= ((u64)be32_to_cpu(xid)) << 32;
key = (u8)(ibmr->rkey & 0x000000FF);
ib_update_fast_reg_key(ibmr, ++key);

View File

@ -164,6 +164,21 @@ static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read;
}
/* The client is required to provide a Reply chunk if the maximum
* size of the non-payload part of the RPC Reply is larger than
* the inline threshold.
*/
static bool
rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt,
const struct rpc_rqst *rqst)
{
const struct xdr_buf *buf = &rqst->rq_rcv_buf;
const struct rpcrdma_ia *ia = &r_xprt->rx_ia;
return buf->head[0].iov_len + buf->tail[0].iov_len <
ia->ri_max_inline_read;
}
/* Split @vec on page boundaries into SGEs. FMR registers pages, not
* a byte range. Other modes coalesce these SGEs into a single MR
* when they can.
@ -733,7 +748,7 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
xdr_init_encode(xdr, &req->rl_hdrbuf,
req->rl_rdmabuf->rg_base);
req->rl_rdmabuf->rg_base, rqst);
/* Fixed header fields */
ret = -EMSGSIZE;
@ -762,7 +777,8 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
*/
if (rpcrdma_results_inline(r_xprt, rqst))
wtype = rpcrdma_noch;
else if (ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ)
else if ((ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ) &&
rpcrdma_nonpayload_inline(r_xprt, rqst))
wtype = rpcrdma_writech;
else
wtype = rpcrdma_replych;
@ -1313,7 +1329,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
/* Fixed transport header fields */
xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
rep->rr_hdrbuf.head[0].iov_base);
rep->rr_hdrbuf.head[0].iov_base, NULL);
p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
if (unlikely(!p))
goto out_shortreply;

View File

@ -304,7 +304,6 @@ xprt_setup_rdma_bc(struct xprt_create *args)
xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO;
xprt->prot = XPRT_TRANSPORT_BC_RDMA;
xprt->tsh_size = 0;
xprt->ops = &xprt_rdma_bc_procs;
memcpy(&xprt->addr, args->dstaddr, args->addrlen);

View File

@ -332,7 +332,6 @@ xprt_setup_rdma(struct xprt_create *args)
xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO;
xprt->resvport = 0; /* privileged port not needed */
xprt->tsh_size = 0; /* RPC-RDMA handles framing */
xprt->ops = &xprt_rdma_procs;
/*

View File

@ -1481,6 +1481,8 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
if (ep->rep_receive_count > needed)
goto out;
needed -= ep->rep_receive_count;
if (!temp)
needed += RPCRDMA_MAX_RECV_BATCH;
count = 0;
wr = NULL;

View File

@ -205,6 +205,16 @@ struct rpcrdma_rep {
struct ib_recv_wr rr_recv_wr;
};
/* To reduce the rate at which a transport invokes ib_post_recv
* (and thus the hardware doorbell rate), xprtrdma posts Receive
* WRs in batches.
*
* Setting this to zero disables Receive post batching.
*/
enum {
RPCRDMA_MAX_RECV_BATCH = 7,
};
/* struct rpcrdma_sendctx - DMA mapped SGEs to unmap after Send completes
*/
struct rpcrdma_req;
@ -577,7 +587,7 @@ void frwr_release_mr(struct rpcrdma_mr *mr);
size_t frwr_maxpages(struct rpcrdma_xprt *r_xprt);
struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_mr_seg *seg,
int nsegs, bool writing, u32 xid,
int nsegs, bool writing, __be32 xid,
struct rpcrdma_mr **mr);
int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req);
void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs);

View File

@ -769,6 +769,29 @@ static int xs_send_pagedata(struct socket *sock, struct msghdr *msg, struct xdr_
return xs_sendmsg(sock, msg, base + xdr->page_base);
}
#define xs_record_marker_len() sizeof(rpc_fraghdr)
/* Common case:
* - stream transport
* - sending from byte 0 of the message
* - the message is wholly contained in @xdr's head iovec
*/
static int xs_send_rm_and_kvec(struct socket *sock, struct msghdr *msg,
rpc_fraghdr marker, struct kvec *vec, size_t base)
{
struct kvec iov[2] = {
[0] = {
.iov_base = &marker,
.iov_len = sizeof(marker)
},
[1] = *vec,
};
size_t len = iov[0].iov_len + iov[1].iov_len;
iov_iter_kvec(&msg->msg_iter, WRITE, iov, 2, len);
return xs_sendmsg(sock, msg, base);
}
/**
* xs_sendpages - write pages directly to a socket
* @sock: socket to send on
@ -776,34 +799,42 @@ static int xs_send_pagedata(struct socket *sock, struct msghdr *msg, struct xdr_
* @addrlen: UDP only -- length of destination address
* @xdr: buffer containing this request
* @base: starting position in the buffer
* @rm: stream record marker field
* @sent_p: return the total number of bytes successfully queued for sending
*
*/
static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, int *sent_p)
static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, rpc_fraghdr rm, int *sent_p)
{
struct msghdr msg = {
.msg_name = addr,
.msg_namelen = addrlen,
.msg_flags = XS_SENDMSG_FLAGS | MSG_MORE,
};
unsigned int remainder = xdr->len - base;
unsigned int rmsize = rm ? sizeof(rm) : 0;
unsigned int remainder = rmsize + xdr->len - base;
unsigned int want;
int err = 0;
if (unlikely(!sock))
return -ENOTSOCK;
if (base < xdr->head[0].iov_len) {
unsigned int len = xdr->head[0].iov_len - base;
want = xdr->head[0].iov_len + rmsize;
if (base < want) {
unsigned int len = want - base;
remainder -= len;
if (remainder == 0)
msg.msg_flags &= ~MSG_MORE;
err = xs_send_kvec(sock, &msg, &xdr->head[0], base);
if (rmsize)
err = xs_send_rm_and_kvec(sock, &msg, rm,
&xdr->head[0], base);
else
err = xs_send_kvec(sock, &msg, &xdr->head[0], base);
if (remainder == 0 || err != len)
goto out;
*sent_p += err;
base = 0;
} else
base -= xdr->head[0].iov_len;
base -= want;
if (base < xdr->page_len) {
unsigned int len = xdr->page_len - base;
@ -891,13 +922,14 @@ xs_send_request_was_aborted(struct sock_xprt *transport, struct rpc_rqst *req)
}
/*
* Construct a stream transport record marker in @buf.
* Return the stream record marker field for a record of length < 2^31-1
*/
static inline void xs_encode_stream_record_marker(struct xdr_buf *buf)
static rpc_fraghdr
xs_stream_record_marker(struct xdr_buf *xdr)
{
u32 reclen = buf->len - sizeof(rpc_fraghdr);
rpc_fraghdr *base = buf->head[0].iov_base;
*base = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | reclen);
if (!xdr->len)
return 0;
return cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | (u32)xdr->len);
}
/**
@ -926,14 +958,13 @@ static int xs_local_send_request(struct rpc_rqst *req)
return -ENOTCONN;
}
xs_encode_stream_record_marker(&req->rq_snd_buf);
xs_pktdump("packet data:",
req->rq_svec->iov_base, req->rq_svec->iov_len);
req->rq_xtime = ktime_get();
status = xs_sendpages(transport->sock, NULL, 0, xdr,
transport->xmit.offset,
xs_stream_record_marker(xdr),
&sent);
dprintk("RPC: %s(%u) = %d\n",
__func__, xdr->len - transport->xmit.offset, status);
@ -1001,7 +1032,7 @@ static int xs_udp_send_request(struct rpc_rqst *req)
req->rq_xtime = ktime_get();
status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen,
xdr, 0, &sent);
xdr, 0, 0, &sent);
dprintk("RPC: xs_udp_send_request(%u) = %d\n",
xdr->len, status);
@ -1076,8 +1107,6 @@ static int xs_tcp_send_request(struct rpc_rqst *req)
return -ENOTCONN;
}
xs_encode_stream_record_marker(&req->rq_snd_buf);
xs_pktdump("packet data:",
req->rq_svec->iov_base,
req->rq_svec->iov_len);
@ -1093,6 +1122,7 @@ static int xs_tcp_send_request(struct rpc_rqst *req)
sent = 0;
status = xs_sendpages(transport->sock, NULL, 0, xdr,
transport->xmit.offset,
xs_stream_record_marker(xdr),
&sent);
dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
@ -2547,26 +2577,35 @@ static int bc_sendto(struct rpc_rqst *req)
{
int len;
struct xdr_buf *xbufp = &req->rq_snd_buf;
struct rpc_xprt *xprt = req->rq_xprt;
struct sock_xprt *transport =
container_of(xprt, struct sock_xprt, xprt);
struct socket *sock = transport->sock;
container_of(req->rq_xprt, struct sock_xprt, xprt);
unsigned long headoff;
unsigned long tailoff;
struct page *tailpage;
struct msghdr msg = {
.msg_flags = MSG_MORE
};
rpc_fraghdr marker = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT |
(u32)xbufp->len);
struct kvec iov = {
.iov_base = &marker,
.iov_len = sizeof(marker),
};
xs_encode_stream_record_marker(xbufp);
len = kernel_sendmsg(transport->sock, &msg, &iov, 1, iov.iov_len);
if (len != iov.iov_len)
return -EAGAIN;
tailpage = NULL;
if (xbufp->tail[0].iov_len)
tailpage = virt_to_page(xbufp->tail[0].iov_base);
tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
len = svc_send_common(sock, xbufp,
len = svc_send_common(transport->sock, xbufp,
virt_to_page(xbufp->head[0].iov_base), headoff,
xbufp->tail[0].iov_base, tailoff);
if (len != xbufp->len) {
printk(KERN_NOTICE "Error sending entire callback!\n");
len = -EAGAIN;
}
tailpage, tailoff);
if (len != xbufp->len)
return -EAGAIN;
return len;
}
@ -2806,7 +2845,6 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
transport = container_of(xprt, struct sock_xprt, xprt);
xprt->prot = 0;
xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
xprt->bind_timeout = XS_BIND_TO;
@ -2875,7 +2913,6 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
transport = container_of(xprt, struct sock_xprt, xprt);
xprt->prot = IPPROTO_UDP;
xprt->tsh_size = 0;
/* XXX: header size can vary due to auth type, IPv6, etc. */
xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
@ -2955,7 +2992,6 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
transport = container_of(xprt, struct sock_xprt, xprt);
xprt->prot = IPPROTO_TCP;
xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
xprt->bind_timeout = XS_BIND_TO;
@ -3028,7 +3064,6 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
transport = container_of(xprt, struct sock_xprt, xprt);
xprt->prot = IPPROTO_TCP;
xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
xprt->timeout = &xs_tcp_default_timeout;