KEYS: Use individual pages in big_key for crypto buffers

kmalloc() can't always allocate large enough buffers for big_key to use for
crypto (1MB + some metadata) so we cannot use that to allocate the buffer.
Further, vmalloc'd pages can't be passed to sg_init_one() and the aead
crypto accessors cannot be called progressively and must be passed all the
data in one go (which means we can't pass the data in one block at a time).

Fix this by allocating the buffer pages individually and passing them
through a multientry scatterlist to the crypto layer.  This has the bonus
advantage that we don't have to allocate a contiguous series of pages.

We then vmap() the page list and pass that through to the VFS read/write
routines.

This can trigger a warning:

	WARNING: CPU: 0 PID: 60912 at mm/page_alloc.c:3883 __alloc_pages_nodemask+0xb7c/0x15f8
	([<00000000002acbb6>] __alloc_pages_nodemask+0x1ee/0x15f8)
	 [<00000000002dd356>] kmalloc_order+0x46/0x90
	 [<00000000002dd3e0>] kmalloc_order_trace+0x40/0x1f8
	 [<0000000000326a10>] __kmalloc+0x430/0x4c0
	 [<00000000004343e4>] big_key_preparse+0x7c/0x210
	 [<000000000042c040>] key_create_or_update+0x128/0x420
	 [<000000000042e52c>] SyS_add_key+0x124/0x220
	 [<00000000007bba2c>] system_call+0xc4/0x2b0

from the keyctl/padd/useradd test of the keyutils testsuite on s390x.

Note that it might be better to shovel data through in page-sized lumps
instead as there's no particular need to use a monolithic buffer unless the
kernel itself wants to access the data.

Fixes: 13100a72f4 ("Security: Keys: Big keys stored encrypted")
Reported-by: Paul Bunyan <pbunyan@redhat.com>
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Kirill Marinushkin <k.marinushkin@gmail.com>
This commit is contained in:
David Howells 2018-02-22 14:38:34 +00:00
parent 4b34968e77
commit d9f4bb1a0f
1 changed files with 87 additions and 23 deletions

View File

@ -22,6 +22,13 @@
#include <keys/big_key-type.h> #include <keys/big_key-type.h>
#include <crypto/aead.h> #include <crypto/aead.h>
struct big_key_buf {
unsigned int nr_pages;
void *virt;
struct scatterlist *sg;
struct page *pages[];
};
/* /*
* Layout of key payload words. * Layout of key payload words.
*/ */
@ -91,10 +98,9 @@ static DEFINE_MUTEX(big_key_aead_lock);
/* /*
* Encrypt/decrypt big_key data * Encrypt/decrypt big_key data
*/ */
static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key) static int big_key_crypt(enum big_key_op op, struct big_key_buf *buf, size_t datalen, u8 *key)
{ {
int ret; int ret;
struct scatterlist sgio;
struct aead_request *aead_req; struct aead_request *aead_req;
/* We always use a zero nonce. The reason we can get away with this is /* We always use a zero nonce. The reason we can get away with this is
* because we're using a different randomly generated key for every * because we're using a different randomly generated key for every
@ -109,8 +115,7 @@ static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key)
return -ENOMEM; return -ENOMEM;
memset(zero_nonce, 0, sizeof(zero_nonce)); memset(zero_nonce, 0, sizeof(zero_nonce));
sg_init_one(&sgio, data, datalen + (op == BIG_KEY_ENC ? ENC_AUTHTAG_SIZE : 0)); aead_request_set_crypt(aead_req, buf->sg, buf->sg, datalen, zero_nonce);
aead_request_set_crypt(aead_req, &sgio, &sgio, datalen, zero_nonce);
aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
aead_request_set_ad(aead_req, 0); aead_request_set_ad(aead_req, 0);
@ -129,22 +134,82 @@ error:
return ret; return ret;
} }
/*
* Free up the buffer.
*/
static void big_key_free_buffer(struct big_key_buf *buf)
{
unsigned int i;
if (buf->virt) {
memset(buf->virt, 0, buf->nr_pages * PAGE_SIZE);
vunmap(buf->virt);
}
for (i = 0; i < buf->nr_pages; i++)
if (buf->pages[i])
__free_page(buf->pages[i]);
kfree(buf);
}
/*
* Allocate a buffer consisting of a set of pages with a virtual mapping
* applied over them.
*/
static void *big_key_alloc_buffer(size_t len)
{
struct big_key_buf *buf;
unsigned int npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned int i, l;
buf = kzalloc(sizeof(struct big_key_buf) +
sizeof(struct page) * npg +
sizeof(struct scatterlist) * npg,
GFP_KERNEL);
if (!buf)
return NULL;
buf->nr_pages = npg;
buf->sg = (void *)(buf->pages + npg);
sg_init_table(buf->sg, npg);
for (i = 0; i < buf->nr_pages; i++) {
buf->pages[i] = alloc_page(GFP_KERNEL);
if (!buf->pages[i])
goto nomem;
l = min_t(size_t, len, PAGE_SIZE);
sg_set_page(&buf->sg[i], buf->pages[i], l, 0);
len -= l;
}
buf->virt = vmap(buf->pages, buf->nr_pages, VM_MAP, PAGE_KERNEL);
if (!buf->virt)
goto nomem;
return buf;
nomem:
big_key_free_buffer(buf);
return NULL;
}
/* /*
* Preparse a big key * Preparse a big key
*/ */
int big_key_preparse(struct key_preparsed_payload *prep) int big_key_preparse(struct key_preparsed_payload *prep)
{ {
struct big_key_buf *buf;
struct path *path = (struct path *)&prep->payload.data[big_key_path]; struct path *path = (struct path *)&prep->payload.data[big_key_path];
struct file *file; struct file *file;
u8 *enckey; u8 *enckey;
u8 *data = NULL;
ssize_t written; ssize_t written;
size_t datalen = prep->datalen; size_t datalen = prep->datalen, enclen = datalen + ENC_AUTHTAG_SIZE;
int ret; int ret;
ret = -EINVAL;
if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data) if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data)
goto error; return -EINVAL;
/* Set an arbitrary quota */ /* Set an arbitrary quota */
prep->quotalen = 16; prep->quotalen = 16;
@ -157,13 +222,12 @@ int big_key_preparse(struct key_preparsed_payload *prep)
* *
* File content is stored encrypted with randomly generated key. * File content is stored encrypted with randomly generated key.
*/ */
size_t enclen = datalen + ENC_AUTHTAG_SIZE;
loff_t pos = 0; loff_t pos = 0;
data = kmalloc(enclen, GFP_KERNEL); buf = big_key_alloc_buffer(enclen);
if (!data) if (!buf)
return -ENOMEM; return -ENOMEM;
memcpy(data, prep->data, datalen); memcpy(buf->virt, prep->data, datalen);
/* generate random key */ /* generate random key */
enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL); enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL);
@ -176,7 +240,7 @@ int big_key_preparse(struct key_preparsed_payload *prep)
goto err_enckey; goto err_enckey;
/* encrypt aligned data */ /* encrypt aligned data */
ret = big_key_crypt(BIG_KEY_ENC, data, datalen, enckey); ret = big_key_crypt(BIG_KEY_ENC, buf, datalen, enckey);
if (ret) if (ret)
goto err_enckey; goto err_enckey;
@ -187,7 +251,7 @@ int big_key_preparse(struct key_preparsed_payload *prep)
goto err_enckey; goto err_enckey;
} }
written = kernel_write(file, data, enclen, &pos); written = kernel_write(file, buf->virt, enclen, &pos);
if (written != enclen) { if (written != enclen) {
ret = written; ret = written;
if (written >= 0) if (written >= 0)
@ -202,7 +266,7 @@ int big_key_preparse(struct key_preparsed_payload *prep)
*path = file->f_path; *path = file->f_path;
path_get(path); path_get(path);
fput(file); fput(file);
kzfree(data); big_key_free_buffer(buf);
} else { } else {
/* Just store the data in a buffer */ /* Just store the data in a buffer */
void *data = kmalloc(datalen, GFP_KERNEL); void *data = kmalloc(datalen, GFP_KERNEL);
@ -220,7 +284,7 @@ err_fput:
err_enckey: err_enckey:
kzfree(enckey); kzfree(enckey);
error: error:
kzfree(data); big_key_free_buffer(buf);
return ret; return ret;
} }
@ -298,15 +362,15 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
return datalen; return datalen;
if (datalen > BIG_KEY_FILE_THRESHOLD) { if (datalen > BIG_KEY_FILE_THRESHOLD) {
struct big_key_buf *buf;
struct path *path = (struct path *)&key->payload.data[big_key_path]; struct path *path = (struct path *)&key->payload.data[big_key_path];
struct file *file; struct file *file;
u8 *data;
u8 *enckey = (u8 *)key->payload.data[big_key_data]; u8 *enckey = (u8 *)key->payload.data[big_key_data];
size_t enclen = datalen + ENC_AUTHTAG_SIZE; size_t enclen = datalen + ENC_AUTHTAG_SIZE;
loff_t pos = 0; loff_t pos = 0;
data = kmalloc(enclen, GFP_KERNEL); buf = big_key_alloc_buffer(enclen);
if (!data) if (!buf)
return -ENOMEM; return -ENOMEM;
file = dentry_open(path, O_RDONLY, current_cred()); file = dentry_open(path, O_RDONLY, current_cred());
@ -316,26 +380,26 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
} }
/* read file to kernel and decrypt */ /* read file to kernel and decrypt */
ret = kernel_read(file, data, enclen, &pos); ret = kernel_read(file, buf->virt, enclen, &pos);
if (ret >= 0 && ret != enclen) { if (ret >= 0 && ret != enclen) {
ret = -EIO; ret = -EIO;
goto err_fput; goto err_fput;
} }
ret = big_key_crypt(BIG_KEY_DEC, data, enclen, enckey); ret = big_key_crypt(BIG_KEY_DEC, buf, enclen, enckey);
if (ret) if (ret)
goto err_fput; goto err_fput;
ret = datalen; ret = datalen;
/* copy decrypted data to user */ /* copy decrypted data to user */
if (copy_to_user(buffer, data, datalen) != 0) if (copy_to_user(buffer, buf->virt, datalen) != 0)
ret = -EFAULT; ret = -EFAULT;
err_fput: err_fput:
fput(file); fput(file);
error: error:
kzfree(data); big_key_free_buffer(buf);
} else { } else {
ret = datalen; ret = datalen;
if (copy_to_user(buffer, key->payload.data[big_key_data], if (copy_to_user(buffer, key->payload.data[big_key_data],