d407574e79
Pull f2fs updates from Jaegeuk Kim: "New Features: - uplift filesystem encryption into fs/crypto/ - give sysfs entries to control memroy consumption Enhancements: - aio performance by preallocating blocks in ->write_iter - use writepages lock for only WB_SYNC_ALL - avoid redundant inline_data conversion - enhance forground GC - use wait_for_stable_page as possible - speed up SEEK_DATA and fiiemap Bug Fixes: - corner case in terms of -ENOSPC for inline_data - hung task caused by long latency in shrinker - corruption between atomic write and f2fs_trace_pid - avoid garbage lengths in dentries - revoke atomicly written pages if an error occurs In addition, there are various minor bug fixes and clean-ups" * tag 'for-f2fs-4.6' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (81 commits) f2fs: submit node page write bios when really required f2fs: add missing argument to f2fs_setxattr stub f2fs: fix to avoid unneeded unlock_new_inode f2fs: clean up opened code with f2fs_update_dentry f2fs: declare static functions f2fs: use cryptoapi crc32 functions f2fs: modify the readahead method in ra_node_page() f2fs crypto: sync ext4_lookup and ext4_file_open fs crypto: move per-file encryption from f2fs tree to fs/crypto f2fs: mutex can't be used by down_write_nest_lock() f2fs: recovery missing dot dentries in root directory f2fs: fix to avoid deadlock when merging inline data f2fs: introduce f2fs_flush_merged_bios for cleanup f2fs: introduce f2fs_update_data_blkaddr for cleanup f2fs crypto: fix incorrect positioning for GCing encrypted data page f2fs: fix incorrect upper bound when iterating inode mapping tree f2fs: avoid hungtask problem caused by losing wake_up f2fs: trace old block address for CoWed page f2fs: try to flush inode after merging inline data f2fs: show more info about superblock recovery ...
273 lines
7.1 KiB
C
273 lines
7.1 KiB
C
/*
|
|
* key management facility for FS encryption support.
|
|
*
|
|
* Copyright (C) 2015, Google, Inc.
|
|
*
|
|
* This contains encryption key functions.
|
|
*
|
|
* Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015.
|
|
*/
|
|
|
|
#include <keys/encrypted-type.h>
|
|
#include <keys/user-type.h>
|
|
#include <linux/random.h>
|
|
#include <linux/scatterlist.h>
|
|
#include <uapi/linux/keyctl.h>
|
|
#include <linux/fscrypto.h>
|
|
|
|
static void derive_crypt_complete(struct crypto_async_request *req, int rc)
|
|
{
|
|
struct fscrypt_completion_result *ecr = req->data;
|
|
|
|
if (rc == -EINPROGRESS)
|
|
return;
|
|
|
|
ecr->res = rc;
|
|
complete(&ecr->completion);
|
|
}
|
|
|
|
/**
|
|
* derive_key_aes() - Derive a key using AES-128-ECB
|
|
* @deriving_key: Encryption key used for derivation.
|
|
* @source_key: Source key to which to apply derivation.
|
|
* @derived_key: Derived key.
|
|
*
|
|
* Return: Zero on success; non-zero otherwise.
|
|
*/
|
|
static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE],
|
|
u8 source_key[FS_AES_256_XTS_KEY_SIZE],
|
|
u8 derived_key[FS_AES_256_XTS_KEY_SIZE])
|
|
{
|
|
int res = 0;
|
|
struct skcipher_request *req = NULL;
|
|
DECLARE_FS_COMPLETION_RESULT(ecr);
|
|
struct scatterlist src_sg, dst_sg;
|
|
struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
|
|
|
|
if (IS_ERR(tfm)) {
|
|
res = PTR_ERR(tfm);
|
|
tfm = NULL;
|
|
goto out;
|
|
}
|
|
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
|
|
req = skcipher_request_alloc(tfm, GFP_NOFS);
|
|
if (!req) {
|
|
res = -ENOMEM;
|
|
goto out;
|
|
}
|
|
skcipher_request_set_callback(req,
|
|
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
|
|
derive_crypt_complete, &ecr);
|
|
res = crypto_skcipher_setkey(tfm, deriving_key,
|
|
FS_AES_128_ECB_KEY_SIZE);
|
|
if (res < 0)
|
|
goto out;
|
|
|
|
sg_init_one(&src_sg, source_key, FS_AES_256_XTS_KEY_SIZE);
|
|
sg_init_one(&dst_sg, derived_key, FS_AES_256_XTS_KEY_SIZE);
|
|
skcipher_request_set_crypt(req, &src_sg, &dst_sg,
|
|
FS_AES_256_XTS_KEY_SIZE, NULL);
|
|
res = crypto_skcipher_encrypt(req);
|
|
if (res == -EINPROGRESS || res == -EBUSY) {
|
|
wait_for_completion(&ecr.completion);
|
|
res = ecr.res;
|
|
}
|
|
out:
|
|
skcipher_request_free(req);
|
|
crypto_free_skcipher(tfm);
|
|
return res;
|
|
}
|
|
|
|
static void put_crypt_info(struct fscrypt_info *ci)
|
|
{
|
|
if (!ci)
|
|
return;
|
|
|
|
key_put(ci->ci_keyring_key);
|
|
crypto_free_skcipher(ci->ci_ctfm);
|
|
kmem_cache_free(fscrypt_info_cachep, ci);
|
|
}
|
|
|
|
int get_crypt_info(struct inode *inode)
|
|
{
|
|
struct fscrypt_info *crypt_info;
|
|
u8 full_key_descriptor[FS_KEY_DESC_PREFIX_SIZE +
|
|
(FS_KEY_DESCRIPTOR_SIZE * 2) + 1];
|
|
struct key *keyring_key = NULL;
|
|
struct fscrypt_key *master_key;
|
|
struct fscrypt_context ctx;
|
|
const struct user_key_payload *ukp;
|
|
struct crypto_skcipher *ctfm;
|
|
const char *cipher_str;
|
|
u8 raw_key[FS_MAX_KEY_SIZE];
|
|
u8 mode;
|
|
int res;
|
|
|
|
res = fscrypt_initialize();
|
|
if (res)
|
|
return res;
|
|
|
|
if (!inode->i_sb->s_cop->get_context)
|
|
return -EOPNOTSUPP;
|
|
retry:
|
|
crypt_info = ACCESS_ONCE(inode->i_crypt_info);
|
|
if (crypt_info) {
|
|
if (!crypt_info->ci_keyring_key ||
|
|
key_validate(crypt_info->ci_keyring_key) == 0)
|
|
return 0;
|
|
fscrypt_put_encryption_info(inode, crypt_info);
|
|
goto retry;
|
|
}
|
|
|
|
res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
|
|
if (res < 0) {
|
|
if (!fscrypt_dummy_context_enabled(inode))
|
|
return res;
|
|
ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS;
|
|
ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS;
|
|
ctx.flags = 0;
|
|
} else if (res != sizeof(ctx)) {
|
|
return -EINVAL;
|
|
}
|
|
res = 0;
|
|
|
|
crypt_info = kmem_cache_alloc(fscrypt_info_cachep, GFP_NOFS);
|
|
if (!crypt_info)
|
|
return -ENOMEM;
|
|
|
|
crypt_info->ci_flags = ctx.flags;
|
|
crypt_info->ci_data_mode = ctx.contents_encryption_mode;
|
|
crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
|
|
crypt_info->ci_ctfm = NULL;
|
|
crypt_info->ci_keyring_key = NULL;
|
|
memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
|
|
sizeof(crypt_info->ci_master_key));
|
|
if (S_ISREG(inode->i_mode))
|
|
mode = crypt_info->ci_data_mode;
|
|
else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
|
|
mode = crypt_info->ci_filename_mode;
|
|
else
|
|
BUG();
|
|
|
|
switch (mode) {
|
|
case FS_ENCRYPTION_MODE_AES_256_XTS:
|
|
cipher_str = "xts(aes)";
|
|
break;
|
|
case FS_ENCRYPTION_MODE_AES_256_CTS:
|
|
cipher_str = "cts(cbc(aes))";
|
|
break;
|
|
default:
|
|
printk_once(KERN_WARNING
|
|
"%s: unsupported key mode %d (ino %u)\n",
|
|
__func__, mode, (unsigned) inode->i_ino);
|
|
res = -ENOKEY;
|
|
goto out;
|
|
}
|
|
if (fscrypt_dummy_context_enabled(inode)) {
|
|
memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE);
|
|
goto got_key;
|
|
}
|
|
memcpy(full_key_descriptor, FS_KEY_DESC_PREFIX,
|
|
FS_KEY_DESC_PREFIX_SIZE);
|
|
sprintf(full_key_descriptor + FS_KEY_DESC_PREFIX_SIZE,
|
|
"%*phN", FS_KEY_DESCRIPTOR_SIZE,
|
|
ctx.master_key_descriptor);
|
|
full_key_descriptor[FS_KEY_DESC_PREFIX_SIZE +
|
|
(2 * FS_KEY_DESCRIPTOR_SIZE)] = '\0';
|
|
keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL);
|
|
if (IS_ERR(keyring_key)) {
|
|
res = PTR_ERR(keyring_key);
|
|
keyring_key = NULL;
|
|
goto out;
|
|
}
|
|
crypt_info->ci_keyring_key = keyring_key;
|
|
if (keyring_key->type != &key_type_logon) {
|
|
printk_once(KERN_WARNING
|
|
"%s: key type must be logon\n", __func__);
|
|
res = -ENOKEY;
|
|
goto out;
|
|
}
|
|
down_read(&keyring_key->sem);
|
|
ukp = user_key_payload(keyring_key);
|
|
if (ukp->datalen != sizeof(struct fscrypt_key)) {
|
|
res = -EINVAL;
|
|
up_read(&keyring_key->sem);
|
|
goto out;
|
|
}
|
|
master_key = (struct fscrypt_key *)ukp->data;
|
|
BUILD_BUG_ON(FS_AES_128_ECB_KEY_SIZE != FS_KEY_DERIVATION_NONCE_SIZE);
|
|
|
|
if (master_key->size != FS_AES_256_XTS_KEY_SIZE) {
|
|
printk_once(KERN_WARNING
|
|
"%s: key size incorrect: %d\n",
|
|
__func__, master_key->size);
|
|
res = -ENOKEY;
|
|
up_read(&keyring_key->sem);
|
|
goto out;
|
|
}
|
|
res = derive_key_aes(ctx.nonce, master_key->raw, raw_key);
|
|
up_read(&keyring_key->sem);
|
|
if (res)
|
|
goto out;
|
|
got_key:
|
|
ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
|
|
if (!ctfm || IS_ERR(ctfm)) {
|
|
res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
|
|
printk(KERN_DEBUG
|
|
"%s: error %d (inode %u) allocating crypto tfm\n",
|
|
__func__, res, (unsigned) inode->i_ino);
|
|
goto out;
|
|
}
|
|
crypt_info->ci_ctfm = ctfm;
|
|
crypto_skcipher_clear_flags(ctfm, ~0);
|
|
crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY);
|
|
res = crypto_skcipher_setkey(ctfm, raw_key, fscrypt_key_size(mode));
|
|
if (res)
|
|
goto out;
|
|
|
|
memzero_explicit(raw_key, sizeof(raw_key));
|
|
if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) {
|
|
put_crypt_info(crypt_info);
|
|
goto retry;
|
|
}
|
|
return 0;
|
|
|
|
out:
|
|
if (res == -ENOKEY)
|
|
res = 0;
|
|
put_crypt_info(crypt_info);
|
|
memzero_explicit(raw_key, sizeof(raw_key));
|
|
return res;
|
|
}
|
|
|
|
void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci)
|
|
{
|
|
struct fscrypt_info *prev;
|
|
|
|
if (ci == NULL)
|
|
ci = ACCESS_ONCE(inode->i_crypt_info);
|
|
if (ci == NULL)
|
|
return;
|
|
|
|
prev = cmpxchg(&inode->i_crypt_info, ci, NULL);
|
|
if (prev != ci)
|
|
return;
|
|
|
|
put_crypt_info(ci);
|
|
}
|
|
EXPORT_SYMBOL(fscrypt_put_encryption_info);
|
|
|
|
int fscrypt_get_encryption_info(struct inode *inode)
|
|
{
|
|
struct fscrypt_info *ci = inode->i_crypt_info;
|
|
|
|
if (!ci ||
|
|
(ci->ci_keyring_key &&
|
|
(ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
|
|
(1 << KEY_FLAG_REVOKED) |
|
|
(1 << KEY_FLAG_DEAD)))))
|
|
return get_crypt_info(inode);
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(fscrypt_get_encryption_info);
|