Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull more s390 updates from Martin Schwidefsky:
 "Next to the usual bug fixes (including the TASK_SIZE fix), there is
  one larger crypto item. It allows to use protected keys with the
  in-kernel crypto API

  The protected key support has two parts, the pkey user space API to
  convert key formats and the paes crypto module that uses a protected
  key instead of a standard AES key"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390: TASK_SIZE for kernel threads
  s390/crypt: Add protected key AES module
  s390/dasd: fix spelling mistake: "supportet" -> "supported"
  s390/pkey: Introduce pkey kernel module
  s390/zcrypt: export additional symbols
  s390/zcrypt: Rework CONFIG_ZCRYPT Kconfig text.
  s390/zcrypt: Cleanup leftover module code.
  s390/nmi: purge tlbs after control register validation
  s390/nmi: fix order of register validation
  s390/crypto: Add PCKMO inline function
  s390/zcrypt: Enable request count reset for cards and queues.
  s390/mm: use _SEGMENT_ENTRY_EMPTY in the code
  s390/chsc: Add exception handler for CHSC instruction
  s390: opt into HAVE_COPY_THREAD_TLS
  s390: restore address space when returning to user space
  s390: rename CIF_ASCE to CIF_ASCE_PRIMARY
This commit is contained in:
Linus Torvalds 2017-02-27 23:03:04 -08:00
commit 3f5595e3d0
30 changed files with 2182 additions and 93 deletions

View File

@ -134,6 +134,7 @@ config S390
select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
select HAVE_COPY_THREAD_TLS
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
select HAVE_DMA_CONTIGUOUS

View File

@ -678,6 +678,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m

View File

@ -628,6 +628,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m

View File

@ -6,7 +6,7 @@ obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o sha_common.o
obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o sha_common.o
obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o
obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o
obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o
obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o paes_s390.o
obj-$(CONFIG_S390_PRNG) += prng.o
obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o
obj-$(CONFIG_CRYPTO_CRC32_S390) += crc32-vx_s390.o

View File

@ -0,0 +1,619 @@
/*
* Cryptographic API.
*
* s390 implementation of the AES Cipher Algorithm with protected keys.
*
* s390 Version:
* Copyright IBM Corp. 2017
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
* Harald Freudenberger <freude@de.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*
*/
#define KMSG_COMPONENT "paes_s390"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <linux/bug.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <crypto/xts.h>
#include <asm/cpacf.h>
#include <asm/pkey.h>
static u8 *ctrblk;
static DEFINE_SPINLOCK(ctrblk_lock);
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
struct s390_paes_ctx {
struct pkey_seckey sk;
struct pkey_protkey pk;
unsigned long fc;
};
struct s390_pxts_ctx {
struct pkey_seckey sk[2];
struct pkey_protkey pk[2];
unsigned long fc;
};
static inline int __paes_convert_key(struct pkey_seckey *sk,
struct pkey_protkey *pk)
{
int i, ret;
/* try three times in case of failure */
for (i = 0; i < 3; i++) {
ret = pkey_skey2pkey(sk, pk);
if (ret == 0)
break;
}
return ret;
}
static int __paes_set_key(struct s390_paes_ctx *ctx)
{
unsigned long fc;
if (__paes_convert_key(&ctx->sk, &ctx->pk))
return -EINVAL;
/* Pick the correct function code based on the protected key type */
fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 :
(ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 :
(ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0;
/* Check if the function code is available */
ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
return ctx->fc ? 0 : -EINVAL;
}
static int ecb_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
if (key_len != SECKEYBLOBSIZE)
return -EINVAL;
memcpy(ctx->sk.seckey, in_key, SECKEYBLOBSIZE);
if (__paes_set_key(ctx)) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
return 0;
}
static int ecb_paes_crypt(struct blkcipher_desc *desc,
unsigned long modifier,
struct blkcipher_walk *walk)
{
struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int nbytes, n, k;
int ret;
ret = blkcipher_walk_virt(desc, walk);
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
k = cpacf_km(ctx->fc | modifier, ctx->pk.protkey,
walk->dst.virt.addr, walk->src.virt.addr, n);
if (k)
ret = blkcipher_walk_done(desc, walk, nbytes - k);
if (k < n) {
if (__paes_set_key(ctx) != 0)
return blkcipher_walk_done(desc, walk, -EIO);
}
}
return ret;
}
static int ecb_paes_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_paes_crypt(desc, CPACF_ENCRYPT, &walk);
}
static int ecb_paes_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_paes_crypt(desc, CPACF_DECRYPT, &walk);
}
static struct crypto_alg ecb_paes_alg = {
.cra_name = "ecb(paes)",
.cra_driver_name = "ecb-paes-s390",
.cra_priority = 400, /* combo: aes + ecb */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_paes_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ecb_paes_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = SECKEYBLOBSIZE,
.max_keysize = SECKEYBLOBSIZE,
.setkey = ecb_paes_set_key,
.encrypt = ecb_paes_encrypt,
.decrypt = ecb_paes_decrypt,
}
}
};
static int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
{
unsigned long fc;
if (__paes_convert_key(&ctx->sk, &ctx->pk))
return -EINVAL;
/* Pick the correct function code based on the protected key type */
fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMC_PAES_128 :
(ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMC_PAES_192 :
(ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KMC_PAES_256 : 0;
/* Check if the function code is available */
ctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
return ctx->fc ? 0 : -EINVAL;
}
static int cbc_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
memcpy(ctx->sk.seckey, in_key, SECKEYBLOBSIZE);
if (__cbc_paes_set_key(ctx)) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
return 0;
}
static int cbc_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
struct blkcipher_walk *walk)
{
struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int nbytes, n, k;
int ret;
struct {
u8 iv[AES_BLOCK_SIZE];
u8 key[MAXPROTKEYSIZE];
} param;
ret = blkcipher_walk_virt(desc, walk);
memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
k = cpacf_kmc(ctx->fc | modifier, &param,
walk->dst.virt.addr, walk->src.virt.addr, n);
if (k)
ret = blkcipher_walk_done(desc, walk, nbytes - k);
if (n < k) {
if (__cbc_paes_set_key(ctx) != 0)
return blkcipher_walk_done(desc, walk, -EIO);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
}
}
memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
return ret;
}
static int cbc_paes_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_paes_crypt(desc, 0, &walk);
}
static int cbc_paes_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_paes_crypt(desc, CPACF_DECRYPT, &walk);
}
static struct crypto_alg cbc_paes_alg = {
.cra_name = "cbc(paes)",
.cra_driver_name = "cbc-paes-s390",
.cra_priority = 400, /* combo: aes + cbc */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_paes_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(cbc_paes_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = SECKEYBLOBSIZE,
.max_keysize = SECKEYBLOBSIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = cbc_paes_set_key,
.encrypt = cbc_paes_encrypt,
.decrypt = cbc_paes_decrypt,
}
}
};
static int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
{
unsigned long fc;
if (__paes_convert_key(&ctx->sk[0], &ctx->pk[0]) ||
__paes_convert_key(&ctx->sk[1], &ctx->pk[1]))
return -EINVAL;
if (ctx->pk[0].type != ctx->pk[1].type)
return -EINVAL;
/* Pick the correct function code based on the protected key type */
fc = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PXTS_128 :
(ctx->pk[0].type == PKEY_KEYTYPE_AES_256) ?
CPACF_KM_PXTS_256 : 0;
/* Check if the function code is available */
ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
return ctx->fc ? 0 : -EINVAL;
}
static int xts_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm);
u8 ckey[2 * AES_MAX_KEY_SIZE];
unsigned int ckey_len;
memcpy(ctx->sk[0].seckey, in_key, SECKEYBLOBSIZE);
memcpy(ctx->sk[1].seckey, in_key + SECKEYBLOBSIZE, SECKEYBLOBSIZE);
if (__xts_paes_set_key(ctx)) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
/*
* xts_check_key verifies the key length is not odd and makes
* sure that the two keys are not the same. This can be done
* on the two protected keys as well
*/
ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ?
AES_KEYSIZE_128 : AES_KEYSIZE_256;
memcpy(ckey, ctx->pk[0].protkey, ckey_len);
memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len);
return xts_check_key(tfm, ckey, 2*ckey_len);
}
static int xts_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
struct blkcipher_walk *walk)
{
struct s390_pxts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int keylen, offset, nbytes, n, k;
int ret;
struct {
u8 key[MAXPROTKEYSIZE]; /* key + verification pattern */
u8 tweak[16];
u8 block[16];
u8 bit[16];
u8 xts[16];
} pcc_param;
struct {
u8 key[MAXPROTKEYSIZE]; /* key + verification pattern */
u8 init[16];
} xts_param;
ret = blkcipher_walk_virt(desc, walk);
keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64;
offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0;
retry:
memset(&pcc_param, 0, sizeof(pcc_param));
memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen);
cpacf_pcc(ctx->fc, pcc_param.key + offset);
memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen);
memcpy(xts_param.init, pcc_param.xts, 16);
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
k = cpacf_km(ctx->fc | modifier, xts_param.key + offset,
walk->dst.virt.addr, walk->src.virt.addr, n);
if (k)
ret = blkcipher_walk_done(desc, walk, nbytes - k);
if (k < n) {
if (__xts_paes_set_key(ctx) != 0)
return blkcipher_walk_done(desc, walk, -EIO);
goto retry;
}
}
return ret;
}
static int xts_paes_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return xts_paes_crypt(desc, 0, &walk);
}
static int xts_paes_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return xts_paes_crypt(desc, CPACF_DECRYPT, &walk);
}
static struct crypto_alg xts_paes_alg = {
.cra_name = "xts(paes)",
.cra_driver_name = "xts-paes-s390",
.cra_priority = 400, /* combo: aes + xts */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_pxts_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(xts_paes_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = 2 * SECKEYBLOBSIZE,
.max_keysize = 2 * SECKEYBLOBSIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = xts_paes_set_key,
.encrypt = xts_paes_encrypt,
.decrypt = xts_paes_decrypt,
}
}
};
static int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
{
unsigned long fc;
if (__paes_convert_key(&ctx->sk, &ctx->pk))
return -EINVAL;
/* Pick the correct function code based on the protected key type */
fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMCTR_PAES_128 :
(ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMCTR_PAES_192 :
(ctx->pk.type == PKEY_KEYTYPE_AES_256) ?
CPACF_KMCTR_PAES_256 : 0;
/* Check if the function code is available */
ctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
return ctx->fc ? 0 : -EINVAL;
}
static int ctr_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
memcpy(ctx->sk.seckey, in_key, key_len);
if (__ctr_paes_set_key(ctx)) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
return 0;
}
static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
{
unsigned int i, n;
/* only use complete blocks, max. PAGE_SIZE */
memcpy(ctrptr, iv, AES_BLOCK_SIZE);
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
ctrptr += AES_BLOCK_SIZE;
}
return n;
}
static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
struct blkcipher_walk *walk)
{
struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
u8 buf[AES_BLOCK_SIZE], *ctrptr;
unsigned int nbytes, n, k;
int ret, locked;
locked = spin_trylock(&ctrblk_lock);
ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
n = AES_BLOCK_SIZE;
if (nbytes >= 2*AES_BLOCK_SIZE && locked)
n = __ctrblk_init(ctrblk, walk->iv, nbytes);
ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
k = cpacf_kmctr(ctx->fc | modifier, ctx->pk.protkey,
walk->dst.virt.addr, walk->src.virt.addr,
n, ctrptr);
if (k) {
if (ctrptr == ctrblk)
memcpy(walk->iv, ctrptr + k - AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
crypto_inc(walk->iv, AES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, nbytes - n);
}
if (k < n) {
if (__ctr_paes_set_key(ctx) != 0)
return blkcipher_walk_done(desc, walk, -EIO);
}
}
if (locked)
spin_unlock(&ctrblk_lock);
/*
* final block may be < AES_BLOCK_SIZE, copy only nbytes
*/
if (nbytes) {
while (1) {
if (cpacf_kmctr(ctx->fc | modifier,
ctx->pk.protkey, buf,
walk->src.virt.addr, AES_BLOCK_SIZE,
walk->iv) == AES_BLOCK_SIZE)
break;
if (__ctr_paes_set_key(ctx) != 0)
return blkcipher_walk_done(desc, walk, -EIO);
}
memcpy(walk->dst.virt.addr, buf, nbytes);
crypto_inc(walk->iv, AES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0);
}
return ret;
}
static int ctr_paes_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ctr_paes_crypt(desc, 0, &walk);
}
static int ctr_paes_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ctr_paes_crypt(desc, CPACF_DECRYPT, &walk);
}
static struct crypto_alg ctr_paes_alg = {
.cra_name = "ctr(paes)",
.cra_driver_name = "ctr-paes-s390",
.cra_priority = 400, /* combo: aes + ctr */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct s390_paes_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ctr_paes_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = SECKEYBLOBSIZE,
.max_keysize = SECKEYBLOBSIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = ctr_paes_set_key,
.encrypt = ctr_paes_encrypt,
.decrypt = ctr_paes_decrypt,
}
}
};
static inline void __crypto_unregister_alg(struct crypto_alg *alg)
{
if (!list_empty(&alg->cra_list))
crypto_unregister_alg(alg);
}
static void paes_s390_fini(void)
{
if (ctrblk)
free_page((unsigned long) ctrblk);
__crypto_unregister_alg(&ctr_paes_alg);
__crypto_unregister_alg(&xts_paes_alg);
__crypto_unregister_alg(&cbc_paes_alg);
__crypto_unregister_alg(&ecb_paes_alg);
}
static int __init paes_s390_init(void)
{
int ret;
/* Query available functions for KM, KMC and KMCTR */
cpacf_query(CPACF_KM, &km_functions);
cpacf_query(CPACF_KMC, &kmc_functions);
cpacf_query(CPACF_KMCTR, &kmctr_functions);
if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) ||
cpacf_test_func(&km_functions, CPACF_KM_PAES_192) ||
cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) {
ret = crypto_register_alg(&ecb_paes_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) {
ret = crypto_register_alg(&cbc_paes_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) ||
cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) {
ret = crypto_register_alg(&xts_paes_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) ||
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) ||
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) {
ret = crypto_register_alg(&ctr_paes_alg);
if (ret)
goto out_err;
ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
if (!ctrblk) {
ret = -ENOMEM;
goto out_err;
}
}
return 0;
out_err:
paes_s390_fini();
return ret;
}
module_init(paes_s390_init);
module_exit(paes_s390_fini);
MODULE_ALIAS_CRYPTO("aes-all");
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm with protected keys");
MODULE_LICENSE("GPL");

View File

@ -229,6 +229,7 @@ CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m

View File

@ -28,8 +28,9 @@
#define CPACF_PPNO 0xb93c /* MSA5 */
/*
* Decryption modifier bit
* En/decryption modifier bits
*/
#define CPACF_ENCRYPT 0x00
#define CPACF_DECRYPT 0x80
/*
@ -42,8 +43,13 @@
#define CPACF_KM_AES_128 0x12
#define CPACF_KM_AES_192 0x13
#define CPACF_KM_AES_256 0x14
#define CPACF_KM_PAES_128 0x1a
#define CPACF_KM_PAES_192 0x1b
#define CPACF_KM_PAES_256 0x1c
#define CPACF_KM_XTS_128 0x32
#define CPACF_KM_XTS_256 0x34
#define CPACF_KM_PXTS_128 0x3a
#define CPACF_KM_PXTS_256 0x3c
/*
* Function codes for the KMC (CIPHER MESSAGE WITH CHAINING)
@ -56,6 +62,9 @@
#define CPACF_KMC_AES_128 0x12
#define CPACF_KMC_AES_192 0x13
#define CPACF_KMC_AES_256 0x14
#define CPACF_KMC_PAES_128 0x1a
#define CPACF_KMC_PAES_192 0x1b
#define CPACF_KMC_PAES_256 0x1c
#define CPACF_KMC_PRNG 0x43
/*
@ -69,6 +78,9 @@
#define CPACF_KMCTR_AES_128 0x12
#define CPACF_KMCTR_AES_192 0x13
#define CPACF_KMCTR_AES_256 0x14
#define CPACF_KMCTR_PAES_128 0x1a
#define CPACF_KMCTR_PAES_192 0x1b
#define CPACF_KMCTR_PAES_256 0x1c
/*
* Function codes for the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
@ -98,6 +110,18 @@
#define CPACF_KMAC_TDEA_128 0x02
#define CPACF_KMAC_TDEA_192 0x03
/*
* Function codes for the PCKMO (PERFORM CRYPTOGRAPHIC KEY MANAGEMENT)
* instruction
*/
#define CPACF_PCKMO_QUERY 0x00
#define CPACF_PCKMO_ENC_DES_KEY 0x01
#define CPACF_PCKMO_ENC_TDES_128_KEY 0x02
#define CPACF_PCKMO_ENC_TDES_192_KEY 0x03
#define CPACF_PCKMO_ENC_AES_128_KEY 0x12
#define CPACF_PCKMO_ENC_AES_192_KEY 0x13
#define CPACF_PCKMO_ENC_AES_256_KEY 0x14
/*
* Function codes for the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION)
* instruction
@ -397,4 +421,24 @@ static inline void cpacf_pcc(unsigned long func, void *param)
: "cc", "memory");
}
/**
* cpacf_pckmo() - executes the PCKMO (PERFORM CRYPTOGRAPHIC KEY
* MANAGEMENT) instruction
* @func: the function code passed to PCKMO; see CPACF_PCKMO_xxx defines
* @param: address of parameter block; see POP for details on each func
*
* Returns 0.
*/
static inline void cpacf_pckmo(long func, void *param)
{
register unsigned long r0 asm("0") = (unsigned long) func;
register unsigned long r1 asm("1") = (unsigned long) param;
asm volatile(
" .insn rre,%[opc] << 16,0,0\n" /* PCKMO opcode */
:
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_PCKMO)
: "cc", "memory");
}
#endif /* _ASM_S390_CPACF_H */

View File

@ -63,7 +63,7 @@ static inline void set_user_asce(struct mm_struct *mm)
S390_lowcore.user_asce = mm->context.asce;
if (current->thread.mm_segment.ar4)
__ctl_load(S390_lowcore.user_asce, 7, 7);
set_cpu_flag(CIF_ASCE);
set_cpu_flag(CIF_ASCE_PRIMARY);
}
static inline void clear_user_asce(void)
@ -81,7 +81,7 @@ static inline void load_kernel_asce(void)
__ctl_store(asce, 1, 1);
if (asce != S390_lowcore.kernel_asce)
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
set_cpu_flag(CIF_ASCE);
set_cpu_flag(CIF_ASCE_PRIMARY);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,

View File

@ -640,12 +640,12 @@ static inline int pud_bad(pud_t pud)
static inline int pmd_present(pmd_t pmd)
{
return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
}
static inline int pmd_none(pmd_t pmd)
{
return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
}
static inline unsigned long pmd_pfn(pmd_t pmd)
@ -803,7 +803,7 @@ static inline void pud_clear(pud_t *pud)
static inline void pmd_clear(pmd_t *pmdp)
{
pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID;
pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
@ -1357,7 +1357,7 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
}
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
@ -1367,10 +1367,10 @@ static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
{
if (full) {
pmd_t pmd = *pmdp;
*pmdp = __pmd(_SEGMENT_ENTRY_INVALID);
*pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
return pmd;
}
return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
}
#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
@ -1384,7 +1384,7 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
static inline void pmdp_invalidate(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
}
#define __HAVE_ARCH_PMDP_SET_WRPROTECT

View File

@ -0,0 +1,90 @@
/*
* Kernelspace interface to the pkey device driver
*
* Copyright IBM Corp. 2016
*
* Author: Harald Freudenberger <freude@de.ibm.com>
*
*/
#ifndef _KAPI_PKEY_H
#define _KAPI_PKEY_H
#include <linux/ioctl.h>
#include <linux/types.h>
#include <uapi/asm/pkey.h>
/*
* Generate (AES) random secure key.
* @param cardnr may be -1 (use default card)
* @param domain may be -1 (use default domain)
* @param keytype one of the PKEY_KEYTYPE values
* @param seckey pointer to buffer receiving the secure key
* @return 0 on success, negative errno value on failure
*/
int pkey_genseckey(__u16 cardnr, __u16 domain,
__u32 keytype, struct pkey_seckey *seckey);
/*
* Generate (AES) secure key with given key value.
* @param cardnr may be -1 (use default card)
* @param domain may be -1 (use default domain)
* @param keytype one of the PKEY_KEYTYPE values
* @param clrkey pointer to buffer with clear key data
* @param seckey pointer to buffer receiving the secure key
* @return 0 on success, negative errno value on failure
*/
int pkey_clr2seckey(__u16 cardnr, __u16 domain, __u32 keytype,
const struct pkey_clrkey *clrkey,
struct pkey_seckey *seckey);
/*
* Derive (AES) proteced key from the (AES) secure key blob.
* @param cardnr may be -1 (use default card)
* @param domain may be -1 (use default domain)
* @param seckey pointer to buffer with the input secure key
* @param protkey pointer to buffer receiving the protected key and
* additional info (type, length)
* @return 0 on success, negative errno value on failure
*/
int pkey_sec2protkey(__u16 cardnr, __u16 domain,
const struct pkey_seckey *seckey,
struct pkey_protkey *protkey);
/*
* Derive (AES) protected key from a given clear key value.
* @param keytype one of the PKEY_KEYTYPE values
* @param clrkey pointer to buffer with clear key data
* @param protkey pointer to buffer receiving the protected key and
* additional info (type, length)
* @return 0 on success, negative errno value on failure
*/
int pkey_clr2protkey(__u32 keytype,
const struct pkey_clrkey *clrkey,
struct pkey_protkey *protkey);
/*
* Search for a matching crypto card based on the Master Key
* Verification Pattern provided inside a secure key.
* @param seckey pointer to buffer with the input secure key
* @param cardnr pointer to cardnr, receives the card number on success
* @param domain pointer to domain, receives the domain number on success
* @param verify if set, always verify by fetching verification pattern
* from card
* @return 0 on success, negative errno value on failure. If no card could be
* found, -ENODEV is returned.
*/
int pkey_findcard(const struct pkey_seckey *seckey,
__u16 *cardnr, __u16 *domain, int verify);
/*
* Find card and transform secure key to protected key.
* @param seckey pointer to buffer with the input secure key
* @param protkey pointer to buffer receiving the protected key and
* additional info (type, length)
* @return 0 on success, negative errno value on failure
*/
int pkey_skey2pkey(const struct pkey_seckey *seckey,
struct pkey_protkey *protkey);
#endif /* _KAPI_PKEY_H */

View File

@ -14,14 +14,16 @@
#include <linux/const.h>
#define CIF_MCCK_PENDING 0 /* machine check handling is pending */
#define CIF_ASCE 1 /* user asce needs fixup / uaccess */
#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
#define CIF_FPU 3 /* restore FPU registers */
#define CIF_IGNORE_IRQ 4 /* ignore interrupt (for udelay) */
#define CIF_ENABLED_WAIT 5 /* in enabled wait state */
#define CIF_ASCE_PRIMARY 1 /* primary asce needs fixup / uaccess */
#define CIF_ASCE_SECONDARY 2 /* secondary asce needs fixup / uaccess */
#define CIF_NOHZ_DELAY 3 /* delay HZ disable for a tick */
#define CIF_FPU 4 /* restore FPU registers */
#define CIF_IGNORE_IRQ 5 /* ignore interrupt (for udelay) */
#define CIF_ENABLED_WAIT 6 /* in enabled wait state */
#define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING)
#define _CIF_ASCE _BITUL(CIF_ASCE)
#define _CIF_ASCE_PRIMARY _BITUL(CIF_ASCE_PRIMARY)
#define _CIF_ASCE_SECONDARY _BITUL(CIF_ASCE_SECONDARY)
#define _CIF_NOHZ_DELAY _BITUL(CIF_NOHZ_DELAY)
#define _CIF_FPU _BITUL(CIF_FPU)
#define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ)
@ -89,7 +91,8 @@ extern void execve_tail(void);
* User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
*/
#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit)
#define TASK_SIZE_OF(tsk) ((tsk)->mm ? \
(tsk)->mm->context.asce_limit : TASK_MAX_SIZE)
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
(1UL << 30) : (1UL << 41))
#define TASK_SIZE TASK_SIZE_OF(current)
@ -200,10 +203,12 @@ struct stack_frame {
struct task_struct;
struct mm_struct;
struct seq_file;
struct pt_regs;
typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable);
void dump_trace(dump_trace_func_t func, void *data,
struct task_struct *task, unsigned long sp);
void show_registers(struct pt_regs *regs);
void show_cacheinfo(struct seq_file *m);

View File

@ -14,6 +14,7 @@
*/
#include <linux/sched.h>
#include <linux/errno.h>
#include <asm/processor.h>
#include <asm/ctl_reg.h>
#define VERIFY_READ 0
@ -36,18 +37,20 @@
#define get_ds() (KERNEL_DS)
#define get_fs() (current->thread.mm_segment)
#define set_fs(x) \
do { \
unsigned long __pto; \
current->thread.mm_segment = (x); \
__pto = current->thread.mm_segment.ar4 ? \
S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
__ctl_load(__pto, 7, 7); \
} while (0)
#define segment_eq(a,b) ((a).ar4 == (b).ar4)
static inline void set_fs(mm_segment_t fs)
{
current->thread.mm_segment = fs;
if (segment_eq(fs, KERNEL_DS)) {
set_cpu_flag(CIF_ASCE_SECONDARY);
__ctl_load(S390_lowcore.kernel_asce, 7, 7);
} else {
clear_cpu_flag(CIF_ASCE_SECONDARY);
__ctl_load(S390_lowcore.user_asce, 7, 7);
}
}
static inline int __range_ok(unsigned long addr, unsigned long size)
{
return 1;

View File

@ -24,6 +24,7 @@ header-y += mman.h
header-y += monwriter.h
header-y += msgbuf.h
header-y += param.h
header-y += pkey.h
header-y += poll.h
header-y += posix_types.h
header-y += ptrace.h

View File

@ -0,0 +1,112 @@
/*
* Userspace interface to the pkey device driver
*
* Copyright IBM Corp. 2017
*
* Author: Harald Freudenberger <freude@de.ibm.com>
*
*/
#ifndef _UAPI_PKEY_H
#define _UAPI_PKEY_H
#include <linux/ioctl.h>
#include <linux/types.h>
/*
* Ioctl calls supported by the pkey device driver
*/
#define PKEY_IOCTL_MAGIC 'p'
#define SECKEYBLOBSIZE 64 /* secure key blob size is always 64 bytes */
#define MAXPROTKEYSIZE 64 /* a protected key blob may be up to 64 bytes */
#define MAXCLRKEYSIZE 32 /* a clear key value may be up to 32 bytes */
/* defines for the type field within the pkey_protkey struct */
#define PKEY_KEYTYPE_AES_128 1
#define PKEY_KEYTYPE_AES_192 2
#define PKEY_KEYTYPE_AES_256 3
/* Struct to hold a secure key blob */
struct pkey_seckey {
__u8 seckey[SECKEYBLOBSIZE]; /* the secure key blob */
};
/* Struct to hold protected key and length info */
struct pkey_protkey {
__u32 type; /* key type, one of the PKEY_KEYTYPE values */
__u32 len; /* bytes actually stored in protkey[] */
__u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */
};
/* Struct to hold a clear key value */
struct pkey_clrkey {
__u8 clrkey[MAXCLRKEYSIZE]; /* 16, 24, or 32 byte clear key value */
};
/*
* Generate secure key
*/
struct pkey_genseck {
__u16 cardnr; /* in: card to use or FFFF for any */
__u16 domain; /* in: domain or FFFF for any */
__u32 keytype; /* in: key type to generate */
struct pkey_seckey seckey; /* out: the secure key blob */
};
#define PKEY_GENSECK _IOWR(PKEY_IOCTL_MAGIC, 0x01, struct pkey_genseck)
/*
* Construct secure key from clear key value
*/
struct pkey_clr2seck {
__u16 cardnr; /* in: card to use or FFFF for any */
__u16 domain; /* in: domain or FFFF for any */
__u32 keytype; /* in: key type to generate */
struct pkey_clrkey clrkey; /* in: the clear key value */
struct pkey_seckey seckey; /* out: the secure key blob */
};
#define PKEY_CLR2SECK _IOWR(PKEY_IOCTL_MAGIC, 0x02, struct pkey_clr2seck)
/*
* Fabricate protected key from a secure key
*/
struct pkey_sec2protk {
__u16 cardnr; /* in: card to use or FFFF for any */
__u16 domain; /* in: domain or FFFF for any */
struct pkey_seckey seckey; /* in: the secure key blob */
struct pkey_protkey protkey; /* out: the protected key */
};
#define PKEY_SEC2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x03, struct pkey_sec2protk)
/*
* Fabricate protected key from an clear key value
*/
struct pkey_clr2protk {
__u32 keytype; /* in: key type to generate */
struct pkey_clrkey clrkey; /* in: the clear key value */
struct pkey_protkey protkey; /* out: the protected key */
};
#define PKEY_CLR2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x04, struct pkey_clr2protk)
/*
* Search for matching crypto card based on the Master Key
* Verification Pattern provided inside a secure key.
*/
struct pkey_findcard {
struct pkey_seckey seckey; /* in: the secure key blob */
__u16 cardnr; /* out: card number */
__u16 domain; /* out: domain number */
};
#define PKEY_FINDCARD _IOWR(PKEY_IOCTL_MAGIC, 0x05, struct pkey_findcard)
/*
* Combined together: findcard + sec2prot
*/
struct pkey_skey2pkey {
struct pkey_seckey seckey; /* in: the secure key blob */
struct pkey_protkey protkey; /* out: the protected key */
};
#define PKEY_SKEY2PKEY _IOWR(PKEY_IOCTL_MAGIC, 0x06, struct pkey_skey2pkey)
#endif /* _UAPI_PKEY_H */

View File

@ -50,7 +50,8 @@ _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_UPROBE)
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
_TIF_SYSCALL_TRACEPOINT)
_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE | _CIF_FPU)
_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \
_CIF_ASCE_SECONDARY | _CIF_FPU)
_PIF_WORK = (_PIF_PER_TRAP)
#define BASED(name) name-cleanup_critical(%r13)
@ -339,8 +340,8 @@ ENTRY(system_call)
jo .Lsysc_notify_resume
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
jo .Lsysc_vxrs
TSTMSK __LC_CPU_FLAGS,_CIF_ASCE
jo .Lsysc_uaccess
TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
jnz .Lsysc_asce
j .Lsysc_return # beware of critical section cleanup
#
@ -358,12 +359,15 @@ ENTRY(system_call)
jg s390_handle_mcck # TIF bit will be cleared by handler
#
# _CIF_ASCE is set, load user space asce
# _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce
#
.Lsysc_uaccess:
ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
.Lsysc_asce:
ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
j .Lsysc_return
TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_SECONDARY
jz .Lsysc_return
larl %r14,.Lsysc_return
jg set_fs_fixup
#
# CIF_FPU is set, restore floating-point controls and floating-point registers.
@ -661,8 +665,8 @@ ENTRY(io_int_handler)
jo .Lio_notify_resume
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
jo .Lio_vxrs
TSTMSK __LC_CPU_FLAGS,_CIF_ASCE
jo .Lio_uaccess
TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
jnz .Lio_asce
j .Lio_return # beware of critical section cleanup
#
@ -675,12 +679,15 @@ ENTRY(io_int_handler)
j .Lio_return
#
# _CIF_ASCE is set, load user space asce
# _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce
#
.Lio_uaccess:
ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
.Lio_asce:
ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
j .Lio_return
TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_SECONDARY
jz .Lio_return
larl %r14,.Lio_return
jg set_fs_fixup
#
# CIF_FPU is set, restore floating-point controls and floating-point registers.

View File

@ -80,5 +80,6 @@ long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
DECLARE_PER_CPU(u64, mt_cycles[8]);
void verify_facilities(void);
void set_fs_fixup(void);
#endif /* _ENTRY_H */

View File

@ -116,6 +116,19 @@ static int notrace s390_validate_registers(union mci mci, int umode)
s390_handle_damage();
kill_task = 1;
}
/* Validate control registers */
if (!mci.cr) {
/*
* Control registers have unknown contents.
* Can't recover and therefore stopping machine.
*/
s390_handle_damage();
} else {
asm volatile(
" lctlg 0,15,0(%0)\n"
" ptlb\n"
: : "a" (&S390_lowcore.cregs_save_area) : "memory");
}
if (!mci.fp) {
/*
* Floating point registers can't be restored. If the
@ -208,18 +221,6 @@ static int notrace s390_validate_registers(union mci mci, int umode)
*/
kill_task = 1;
}
/* Validate control registers */
if (!mci.cr) {
/*
* Control registers have unknown contents.
* Can't recover and therefore stopping machine.
*/
s390_handle_damage();
} else {
asm volatile(
" lctlg 0,15,0(%0)"
: : "a" (&S390_lowcore.cregs_save_area) : "memory");
}
/*
* We don't even try to validate the TOD register, since we simply
* can't write something sensible into that register.

View File

@ -100,8 +100,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
return 0;
}
int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
unsigned long arg, struct task_struct *p)
int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
unsigned long arg, struct task_struct *p, unsigned long tls)
{
struct fake_frame
{
@ -156,7 +156,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
/* Set a new TLS ? */
if (clone_flags & CLONE_SETTLS) {
unsigned long tls = frame->childregs.gprs[6];
if (is_compat_task()) {
p->thread.acrs[0] = (unsigned int)tls;
} else {
@ -234,3 +233,16 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
ret = PAGE_ALIGN(mm->brk + brk_rnd());
return (ret > mm->brk) ? ret : mm->brk;
}
void set_fs_fixup(void)
{
struct pt_regs *regs = current_pt_regs();
static bool warned;
set_fs(USER_DS);
if (warned)
return;
WARN(1, "Unbalanced set_fs - int code: 0x%x\n", regs->int_code);
show_registers(regs);
warned = true;
}

View File

@ -359,8 +359,8 @@ static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
spin_lock(&gmap->guest_table_lock);
entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
if (entry) {
flush = (*entry != _SEGMENT_ENTRY_INVALID);
*entry = _SEGMENT_ENTRY_INVALID;
flush = (*entry != _SEGMENT_ENTRY_EMPTY);
*entry = _SEGMENT_ENTRY_EMPTY;
}
spin_unlock(&gmap->guest_table_lock);
return flush;
@ -589,7 +589,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
return rc;
ptl = pmd_lock(mm, pmd);
spin_lock(&gmap->guest_table_lock);
if (*table == _SEGMENT_ENTRY_INVALID) {
if (*table == _SEGMENT_ENTRY_EMPTY) {
rc = radix_tree_insert(&gmap->host_to_guest,
vmaddr >> PMD_SHIFT, table);
if (!rc)

View File

@ -62,7 +62,7 @@ static inline unsigned long __pte_to_rste(pte_t pte)
rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC,
_SEGMENT_ENTRY_NOEXEC);
} else
rste = _SEGMENT_ENTRY_INVALID;
rste = _SEGMENT_ENTRY_EMPTY;
return rste;
}

View File

@ -62,19 +62,32 @@ config CRYPTO_DEV_GEODE
will be called geode-aes.
config ZCRYPT
tristate "Support for PCI-attached cryptographic adapters"
tristate "Support for s390 cryptographic adapters"
depends on S390
select HW_RANDOM
help
Select this option if you want to use a PCI-attached cryptographic
adapter like:
+ PCI Cryptographic Accelerator (PCICA)
+ PCI Cryptographic Coprocessor (PCICC)
Select this option if you want to enable support for
s390 cryptographic adapters like:
+ PCI-X Cryptographic Coprocessor (PCIXCC)
+ Crypto Express2 Coprocessor (CEX2C)
+ Crypto Express2 Accelerator (CEX2A)
+ Crypto Express3 Coprocessor (CEX3C)
+ Crypto Express3 Accelerator (CEX3A)
+ Crypto Express 2,3,4 or 5 Coprocessor (CEXxC)
+ Crypto Express 2,3,4 or 5 Accelerator (CEXxA)
+ Crypto Express 4 or 5 EP11 Coprocessor (CEXxP)
config PKEY
tristate "Kernel API for protected key handling"
depends on S390
depends on ZCRYPT
help
With this option enabled the pkey kernel module provides an API
for creation and handling of protected keys. Other parts of the
kernel or userspace applications may use these functions.
Select this option if you want to enable the kernel and userspace
API for proteced key handling.
Please note that creation of protected keys from secure keys
requires to have at least one CEX card in coprocessor mode
available at runtime.
config CRYPTO_SHA1_S390
tristate "SHA1 digest algorithm"
@ -124,6 +137,7 @@ config CRYPTO_AES_S390
depends on S390
select CRYPTO_ALGAPI
select CRYPTO_BLKCIPHER
select PKEY
help
This is the s390 hardware accelerated implementation of the
AES cipher algorithms (FIPS-197).

View File

@ -4864,7 +4864,7 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
break;
case 3: /* tsa_intrg */
len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.intrg.: not supportet yet\n");
" tsb->tsa.intrg.: not supported yet\n");
break;
}

View File

@ -165,13 +165,15 @@ int tpi(struct tpi_info *addr)
int chsc(void *chsc_area)
{
typedef struct { char _[4096]; } addr_type;
int cc;
int cc = -EIO;
asm volatile(
" .insn rre,0xb25f0000,%2,0\n"
" ipm %0\n"
"0: ipm %0\n"
" srl %0,28\n"
: "=d" (cc), "=m" (*(addr_type *) chsc_area)
"1:\n"
EX_TABLE(0b, 1b)
: "+d" (cc), "=m" (*(addr_type *) chsc_area)
: "d" (chsc_area), "m" (*(addr_type *) chsc_area)
: "cc");
trace_s390_cio_chsc(chsc_area, cc);

View File

@ -10,3 +10,7 @@ zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
obj-$(CONFIG_ZCRYPT) += zcrypt.o
# adapter drivers depend on ap.o and zcrypt.o
obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o
# pkey kernel module
pkey-objs := pkey_api.o
obj-$(CONFIG_PKEY) += pkey.o

View File

@ -1107,16 +1107,6 @@ static void ap_config_timeout(unsigned long ptr)
queue_work(system_long_wq, &ap_scan_work);
}
static void ap_reset_domain(void)
{
int i;
if (ap_domain_index == -1 || !ap_test_config_domain(ap_domain_index))
return;
for (i = 0; i < AP_DEVICES; i++)
ap_rapq(AP_MKQID(i, ap_domain_index));
}
static void ap_reset_all(void)
{
int i, j;

View File

@ -58,9 +58,9 @@ static ssize_t ap_functions_show(struct device *dev,
static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
static ssize_t ap_request_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
static ssize_t ap_req_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ap_card *ac = to_ap_card(dev);
unsigned int req_cnt;
@ -72,7 +72,23 @@ static ssize_t ap_request_count_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
}
static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
static ssize_t ap_req_count_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ap_card *ac = to_ap_card(dev);
struct ap_queue *aq;
spin_lock_bh(&ap_list_lock);
for_each_ap_queue(aq, ac)
aq->total_request_count = 0;
spin_unlock_bh(&ap_list_lock);
atomic_set(&ac->total_request_count, 0);
return count;
}
static DEVICE_ATTR(request_count, 0644, ap_req_count_show, ap_req_count_store);
static ssize_t ap_requestq_count_show(struct device *dev,
struct device_attribute *attr, char *buf)

View File

@ -459,9 +459,9 @@ EXPORT_SYMBOL(ap_queue_resume);
/*
* AP queue related attributes.
*/
static ssize_t ap_request_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
static ssize_t ap_req_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
unsigned int req_cnt;
@ -472,7 +472,20 @@ static ssize_t ap_request_count_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
}
static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
static ssize_t ap_req_count_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ap_queue *aq = to_ap_queue(dev);
spin_lock_bh(&aq->lock);
aq->total_request_count = 0;
spin_unlock_bh(&aq->lock);
return count;
}
static DEVICE_ATTR(request_count, 0644, ap_req_count_show, ap_req_count_store);
static ssize_t ap_requestq_count_show(struct device *dev,
struct device_attribute *attr, char *buf)

File diff suppressed because it is too large Load Diff

View File

@ -374,7 +374,7 @@ out:
return rc;
}
static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
long zcrypt_send_cprb(struct ica_xcRB *xcRB)
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
@ -444,6 +444,7 @@ out:
AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc;
}
EXPORT_SYMBOL(zcrypt_send_cprb);
static bool is_desired_ep11_card(unsigned int dev_id,
unsigned short target_num,
@ -619,7 +620,7 @@ out:
return rc;
}
static void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix)
void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;

View File

@ -190,5 +190,7 @@ void zcrypt_msgtype_unregister(struct zcrypt_ops *);
struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
int zcrypt_api_init(void);
void zcrypt_api_exit(void);
long zcrypt_send_cprb(struct ica_xcRB *xcRB);
void zcrypt_device_status_mask(struct zcrypt_device_matrix *devstatus);
#endif /* _ZCRYPT_API_H_ */