Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto update from Herbert Xu:
 - Optimised AES/SHA1 for ARM.
 - IPsec ESN support in talitos and caam.
 - x86_64/avx implementation of cast5/cast6.
 - Add/use multi-algorithm registration helpers where possible.
 - Added IBM Power7+ in-Nest support.
 - Misc fixes.

Fix up trivial conflicts in crypto/Kconfig due to the sparc64 crypto
config options being added next to the new ARM ones.

[ Side note: cut-and-paste duplicate help texts make those conflicts
  harder to read than necessary, thanks to git being smart about
  minimizing conflicts and maximizing the common parts... ]

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (71 commits)
  crypto: x86/glue_helper - fix storing of new IV in CBC encryption
  crypto: cast5/avx - fix storing of new IV in CBC encryption
  crypto: tcrypt - add missing tests for camellia and ghash
  crypto: testmgr - make test_aead also test 'dst != src' code paths
  crypto: testmgr - make test_skcipher also test 'dst != src' code paths
  crypto: testmgr - add test vectors for CTR mode IV increasement
  crypto: testmgr - add test vectors for partial ctr(cast5) and ctr(cast6)
  crypto: testmgr - allow non-multi page and multi page skcipher tests from same test template
  crypto: caam - increase TRNG clocks per sample
  crypto, tcrypt: remove local_bh_disable/enable() around local_irq_disable/enable()
  crypto: tegra-aes - fix error return code
  crypto: crypto4xx - fix error return code
  crypto: hifn_795x - fix error return code
  crypto: ux500 - fix error return code
  crypto: caam - fix error IDs for SEC v5.x RNG4
  hwrng: mxc-rnga - Access data via structure
  hwrng: mxc-rnga - Adapt clocks to new i.mx clock framework
  crypto: caam - add IPsec ESN support
  crypto: 842 - remove .cra_list initialization
  Revert "[CRYPTO] cast6: inline bloat--"
  ...
This commit is contained in:
Linus Torvalds 2012-10-04 09:06:34 -07:00
commit d66e6737d4
100 changed files with 11466 additions and 1746 deletions

View File

@ -3438,6 +3438,18 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux.git
S: Maintained
F: arch/ia64/
IBM Power in-Nest Crypto Acceleration
M: Kent Yoder <key@linux.vnet.ibm.com>
L: linux-crypto@vger.kernel.org
S: Supported
F: drivers/crypto/nx/
IBM Power 842 compression accelerator
M: Robert Jennings <rcj@linux.vnet.ibm.com>
S: Supported
F: drivers/crypto/nx/nx-842.c
F: include/linux/nx842.h
IBM Power Linux RAID adapter
M: Brian King <brking@us.ibm.com>
S: Supported

View File

@ -254,6 +254,7 @@ core-$(CONFIG_VFP) += arch/arm/vfp/
# If we have a machine-specific directory, then include it in the build.
core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/
core-y += arch/arm/net/
core-y += arch/arm/crypto/
core-y += $(machdirs) $(platdirs)
drivers-$(CONFIG_OPROFILE) += arch/arm/oprofile/

9
arch/arm/crypto/Makefile Normal file
View File

@ -0,0 +1,9 @@
#
# Arch-specific CryptoAPI modules.
#
obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o
obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o
aes-arm-y := aes-armv4.o aes_glue.o
sha1-arm-y := sha1-armv4-large.o sha1_glue.o

1112
arch/arm/crypto/aes-armv4.S Normal file

File diff suppressed because it is too large Load Diff

108
arch/arm/crypto/aes_glue.c Normal file
View File

@ -0,0 +1,108 @@
/*
* Glue Code for the asm optimized version of the AES Cipher Algorithm
*/
#include <linux/module.h>
#include <linux/crypto.h>
#include <crypto/aes.h>
#define AES_MAXNR 14
typedef struct {
unsigned int rd_key[4 *(AES_MAXNR + 1)];
int rounds;
} AES_KEY;
struct AES_CTX {
AES_KEY enc_key;
AES_KEY dec_key;
};
asmlinkage void AES_encrypt(const u8 *in, u8 *out, AES_KEY *ctx);
asmlinkage void AES_decrypt(const u8 *in, u8 *out, AES_KEY *ctx);
asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct AES_CTX *ctx = crypto_tfm_ctx(tfm);
AES_encrypt(src, dst, &ctx->enc_key);
}
static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct AES_CTX *ctx = crypto_tfm_ctx(tfm);
AES_decrypt(src, dst, &ctx->dec_key);
}
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct AES_CTX *ctx = crypto_tfm_ctx(tfm);
switch (key_len) {
case AES_KEYSIZE_128:
key_len = 128;
break;
case AES_KEYSIZE_192:
key_len = 192;
break;
case AES_KEYSIZE_256:
key_len = 256;
break;
default:
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
if (private_AES_set_encrypt_key(in_key, key_len, &ctx->enc_key) == -1) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
/* private_AES_set_decrypt_key expects an encryption key as input */
ctx->dec_key = ctx->enc_key;
if (private_AES_set_decrypt_key(in_key, key_len, &ctx->dec_key) == -1) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
return 0;
}
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-asm",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct AES_CTX),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_setkey = aes_set_key,
.cia_encrypt = aes_encrypt,
.cia_decrypt = aes_decrypt
}
}
};
static int __init aes_init(void)
{
return crypto_register_alg(&aes_alg);
}
static void __exit aes_fini(void)
{
crypto_unregister_alg(&aes_alg);
}
module_init(aes_init);
module_exit(aes_fini);
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm (ASM)");
MODULE_LICENSE("GPL");
MODULE_ALIAS("aes");
MODULE_ALIAS("aes-asm");
MODULE_AUTHOR("David McCullough <ucdevel@gmail.com>");

View File

@ -0,0 +1,503 @@
#define __ARM_ARCH__ __LINUX_ARM_ARCH__
@ ====================================================================
@ Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
@ project. The module is, however, dual licensed under OpenSSL and
@ CRYPTOGAMS licenses depending on where you obtain it. For further
@ details see http://www.openssl.org/~appro/cryptogams/.
@ ====================================================================
@ sha1_block procedure for ARMv4.
@
@ January 2007.
@ Size/performance trade-off
@ ====================================================================
@ impl size in bytes comp cycles[*] measured performance
@ ====================================================================
@ thumb 304 3212 4420
@ armv4-small 392/+29% 1958/+64% 2250/+96%
@ armv4-compact 740/+89% 1552/+26% 1840/+22%
@ armv4-large 1420/+92% 1307/+19% 1370/+34%[***]
@ full unroll ~5100/+260% ~1260/+4% ~1300/+5%
@ ====================================================================
@ thumb = same as 'small' but in Thumb instructions[**] and
@ with recurring code in two private functions;
@ small = detached Xload/update, loops are folded;
@ compact = detached Xload/update, 5x unroll;
@ large = interleaved Xload/update, 5x unroll;
@ full unroll = interleaved Xload/update, full unroll, estimated[!];
@
@ [*] Manually counted instructions in "grand" loop body. Measured
@ performance is affected by prologue and epilogue overhead,
@ i-cache availability, branch penalties, etc.
@ [**] While each Thumb instruction is twice smaller, they are not as
@ diverse as ARM ones: e.g., there are only two arithmetic
@ instructions with 3 arguments, no [fixed] rotate, addressing
@ modes are limited. As result it takes more instructions to do
@ the same job in Thumb, therefore the code is never twice as
@ small and always slower.
@ [***] which is also ~35% better than compiler generated code. Dual-
@ issue Cortex A8 core was measured to process input block in
@ ~990 cycles.
@ August 2010.
@
@ Rescheduling for dual-issue pipeline resulted in 13% improvement on
@ Cortex A8 core and in absolute terms ~870 cycles per input block
@ [or 13.6 cycles per byte].
@ February 2011.
@
@ Profiler-assisted and platform-specific optimization resulted in 10%
@ improvement on Cortex A8 core and 12.2 cycles per byte.
.text
.global sha1_block_data_order
.type sha1_block_data_order,%function
.align 2
sha1_block_data_order:
stmdb sp!,{r4-r12,lr}
add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
ldmia r0,{r3,r4,r5,r6,r7}
.Lloop:
ldr r8,.LK_00_19
mov r14,sp
sub sp,sp,#15*4
mov r5,r5,ror#30
mov r6,r6,ror#30
mov r7,r7,ror#30 @ [6]
.L_00_15:
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r7,r8,r7,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r5,r6 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r7,r8,r7,ror#2 @ E+=K_00_19
eor r10,r5,r6 @ F_xx_xx
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r4,r10,ror#2
add r7,r7,r9 @ E+=X[i]
eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r7,r7,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r6,r8,r6,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r4,r5 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r6,r8,r6,ror#2 @ E+=K_00_19
eor r10,r4,r5 @ F_xx_xx
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r3,r10,ror#2
add r6,r6,r9 @ E+=X[i]
eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r6,r6,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r5,r8,r5,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r3,r4 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r5,r8,r5,ror#2 @ E+=K_00_19
eor r10,r3,r4 @ F_xx_xx
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r7,r10,ror#2
add r5,r5,r9 @ E+=X[i]
eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r5,r5,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r4,r8,r4,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r7,r3 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r4,r8,r4,ror#2 @ E+=K_00_19
eor r10,r7,r3 @ F_xx_xx
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r6,r10,ror#2
add r4,r4,r9 @ E+=X[i]
eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r4,r4,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r3,r8,r3,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r6,r7 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r3,r8,r3,ror#2 @ E+=K_00_19
eor r10,r6,r7 @ F_xx_xx
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r5,r10,ror#2
add r3,r3,r9 @ E+=X[i]
eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r3,r3,r10 @ E+=F_00_19(B,C,D)
teq r14,sp
bne .L_00_15 @ [((11+4)*5+2)*3]
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r7,r8,r7,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r5,r6 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r7,r8,r7,ror#2 @ E+=K_00_19
eor r10,r5,r6 @ F_xx_xx
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r4,r10,ror#2
add r7,r7,r9 @ E+=X[i]
eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r7,r7,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r6,r8,r6,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r4,r5 @ F_xx_xx
mov r9,r9,ror#31
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r3,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r6,r6,r9 @ E+=X[i]
eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
add r6,r6,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r5,r8,r5,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r3,r4 @ F_xx_xx
mov r9,r9,ror#31
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r7,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r5,r5,r9 @ E+=X[i]
eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
add r5,r5,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r4,r8,r4,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r7,r3 @ F_xx_xx
mov r9,r9,ror#31
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r6,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r4,r4,r9 @ E+=X[i]
eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
add r4,r4,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r3,r8,r3,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r6,r7 @ F_xx_xx
mov r9,r9,ror#31
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r5,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r3,r3,r9 @ E+=X[i]
eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
add r3,r3,r10 @ E+=F_00_19(B,C,D)
ldr r8,.LK_20_39 @ [+15+16*4]
sub sp,sp,#25*4
cmn sp,#0 @ [+3], clear carry to denote 20_39
.L_20_39_or_60_79:
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r7,r8,r7,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r5,r6 @ F_xx_xx
mov r9,r9,ror#31
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r4,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r7,r7,r9 @ E+=X[i]
add r7,r7,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r6,r8,r6,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r4,r5 @ F_xx_xx
mov r9,r9,ror#31
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r3,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r6,r6,r9 @ E+=X[i]
add r6,r6,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r5,r8,r5,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r3,r4 @ F_xx_xx
mov r9,r9,ror#31
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r7,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r5,r5,r9 @ E+=X[i]
add r5,r5,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r4,r8,r4,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r7,r3 @ F_xx_xx
mov r9,r9,ror#31
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r6,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r4,r4,r9 @ E+=X[i]
add r4,r4,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r3,r8,r3,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r6,r7 @ F_xx_xx
mov r9,r9,ror#31
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r5,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r3,r3,r9 @ E+=X[i]
add r3,r3,r10 @ E+=F_20_39(B,C,D)
teq r14,sp @ preserve carry
bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes
ldr r8,.LK_40_59
sub sp,sp,#20*4 @ [+2]
.L_40_59:
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r7,r8,r7,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r5,r6 @ F_xx_xx
mov r9,r9,ror#31
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r4,r10,ror#2 @ F_xx_xx
and r11,r5,r6 @ F_xx_xx
add r7,r7,r9 @ E+=X[i]
add r7,r7,r10 @ E+=F_40_59(B,C,D)
add r7,r7,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r6,r8,r6,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r4,r5 @ F_xx_xx
mov r9,r9,ror#31
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r3,r10,ror#2 @ F_xx_xx
and r11,r4,r5 @ F_xx_xx
add r6,r6,r9 @ E+=X[i]
add r6,r6,r10 @ E+=F_40_59(B,C,D)
add r6,r6,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r5,r8,r5,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r3,r4 @ F_xx_xx
mov r9,r9,ror#31
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r7,r10,ror#2 @ F_xx_xx
and r11,r3,r4 @ F_xx_xx
add r5,r5,r9 @ E+=X[i]
add r5,r5,r10 @ E+=F_40_59(B,C,D)
add r5,r5,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r4,r8,r4,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r7,r3 @ F_xx_xx
mov r9,r9,ror#31
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r6,r10,ror#2 @ F_xx_xx
and r11,r7,r3 @ F_xx_xx
add r4,r4,r9 @ E+=X[i]
add r4,r4,r10 @ E+=F_40_59(B,C,D)
add r4,r4,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r3,r8,r3,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r6,r7 @ F_xx_xx
mov r9,r9,ror#31
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r5,r10,ror#2 @ F_xx_xx
and r11,r6,r7 @ F_xx_xx
add r3,r3,r9 @ E+=X[i]
add r3,r3,r10 @ E+=F_40_59(B,C,D)
add r3,r3,r11,ror#2
teq r14,sp
bne .L_40_59 @ [+((12+5)*5+2)*4]
ldr r8,.LK_60_79
sub sp,sp,#20*4
cmp sp,#0 @ set carry to denote 60_79
b .L_20_39_or_60_79 @ [+4], spare 300 bytes
.L_done:
add sp,sp,#80*4 @ "deallocate" stack frame
ldmia r0,{r8,r9,r10,r11,r12}
add r3,r8,r3
add r4,r9,r4
add r5,r10,r5,ror#2
add r6,r11,r6,ror#2
add r7,r12,r7,ror#2
stmia r0,{r3,r4,r5,r6,r7}
teq r1,r2
bne .Lloop @ [+18], total 1307
#if __ARM_ARCH__>=5
ldmia sp!,{r4-r12,pc}
#else
ldmia sp!,{r4-r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.align 2
.LK_00_19: .word 0x5a827999
.LK_20_39: .word 0x6ed9eba1
.LK_40_59: .word 0x8f1bbcdc
.LK_60_79: .word 0xca62c1d6
.size sha1_block_data_order,.-sha1_block_data_order
.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
.align 2

179
arch/arm/crypto/sha1_glue.c Normal file
View File

@ -0,0 +1,179 @@
/*
* Cryptographic API.
* Glue code for the SHA1 Secure Hash Algorithm assembler implementation
*
* This file is based on sha1_generic.c and sha1_ssse3_glue.c
*
* Copyright (c) Alan Smithee.
* Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
* Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
* Copyright (c) Mathias Krause <minipli@googlemail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <asm/byteorder.h>
struct SHA1_CTX {
uint32_t h0,h1,h2,h3,h4;
u64 count;
u8 data[SHA1_BLOCK_SIZE];
};
asmlinkage void sha1_block_data_order(struct SHA1_CTX *digest,
const unsigned char *data, unsigned int rounds);
static int sha1_init(struct shash_desc *desc)
{
struct SHA1_CTX *sctx = shash_desc_ctx(desc);
memset(sctx, 0, sizeof(*sctx));
sctx->h0 = SHA1_H0;
sctx->h1 = SHA1_H1;
sctx->h2 = SHA1_H2;
sctx->h3 = SHA1_H3;
sctx->h4 = SHA1_H4;
return 0;
}
static int __sha1_update(struct SHA1_CTX *sctx, const u8 *data,
unsigned int len, unsigned int partial)
{
unsigned int done = 0;
sctx->count += len;
if (partial) {
done = SHA1_BLOCK_SIZE - partial;
memcpy(sctx->data + partial, data, done);
sha1_block_data_order(sctx, sctx->data, 1);
}
if (len - done >= SHA1_BLOCK_SIZE) {
const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE;
sha1_block_data_order(sctx, data + done, rounds);
done += rounds * SHA1_BLOCK_SIZE;
}
memcpy(sctx->data, data + done, len - done);
return 0;
}
static int sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct SHA1_CTX *sctx = shash_desc_ctx(desc);
unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
int res;
/* Handle the fast case right here */
if (partial + len < SHA1_BLOCK_SIZE) {
sctx->count += len;
memcpy(sctx->data + partial, data, len);
return 0;
}
res = __sha1_update(sctx, data, len, partial);
return res;
}
/* Add padding and return the message digest. */
static int sha1_final(struct shash_desc *desc, u8 *out)
{
struct SHA1_CTX *sctx = shash_desc_ctx(desc);
unsigned int i, index, padlen;
__be32 *dst = (__be32 *)out;
__be64 bits;
static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };
bits = cpu_to_be64(sctx->count << 3);
/* Pad out to 56 mod 64 and append length */
index = sctx->count % SHA1_BLOCK_SIZE;
padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index);
/* We need to fill a whole block for __sha1_update() */
if (padlen <= 56) {
sctx->count += padlen;
memcpy(sctx->data + index, padding, padlen);
} else {
__sha1_update(sctx, padding, padlen, index);
}
__sha1_update(sctx, (const u8 *)&bits, sizeof(bits), 56);
/* Store state in digest */
for (i = 0; i < 5; i++)
dst[i] = cpu_to_be32(((u32 *)sctx)[i]);
/* Wipe context */
memset(sctx, 0, sizeof(*sctx));
return 0;
}
static int sha1_export(struct shash_desc *desc, void *out)
{
struct SHA1_CTX *sctx = shash_desc_ctx(desc);
memcpy(out, sctx, sizeof(*sctx));
return 0;
}
static int sha1_import(struct shash_desc *desc, const void *in)
{
struct SHA1_CTX *sctx = shash_desc_ctx(desc);
memcpy(sctx, in, sizeof(*sctx));
return 0;
}
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_init,
.update = sha1_update,
.final = sha1_final,
.export = sha1_export,
.import = sha1_import,
.descsize = sizeof(struct SHA1_CTX),
.statesize = sizeof(struct SHA1_CTX),
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-asm",
.cra_priority = 150,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init sha1_mod_init(void)
{
return crypto_register_shash(&alg);
}
static void __exit sha1_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
module_init(sha1_mod_init);
module_exit(sha1_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm (ARM)");
MODULE_ALIAS("sha1");
MODULE_AUTHOR("David McCullough <ucdevel@gmail.com>");

View File

@ -486,7 +486,8 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
CONFIG_CRYPTO_DEV_NX=m
CONFIG_CRYPTO_DEV_NX=y
CONFIG_CRYPTO_DEV_NX_ENCRYPT=m
CONFIG_VIRTUALIZATION=y
CONFIG_KVM_BOOK3S_64=m
CONFIG_KVM_BOOK3S_64_HV=y

View File

@ -369,7 +369,8 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
CONFIG_CRYPTO_DEV_NX=m
CONFIG_CRYPTO_DEV_NX=y
CONFIG_CRYPTO_DEV_NX_ENCRYPT=m
CONFIG_VIRTUALIZATION=y
CONFIG_KVM_BOOK3S_64=m
CONFIG_KVM_BOOK3S_64_HV=y

View File

@ -705,6 +705,7 @@ static void __init early_cmdline_parse(void)
#endif
#define OV5_TYPE1_AFFINITY 0x80 /* Type 1 NUMA affinity */
#define OV5_PFO_HW_RNG 0x80 /* PFO Random Number Generator */
#define OV5_PFO_HW_842 0x40 /* PFO Compression Accelerator */
#define OV5_PFO_HW_ENCR 0x20 /* PFO Encryption Accelerator */
/* Option Vector 6: IBM PAPR hints */
@ -774,8 +775,7 @@ static unsigned char ibm_architecture_vec[] = {
0,
0,
0,
OV5_PFO_HW_RNG | OV5_PFO_HW_ENCR,
OV5_PFO_HW_RNG | OV5_PFO_HW_ENCR | OV5_PFO_HW_842,
/* option vector 6: IBM PAPR hints */
4 - 2, /* length */
0,

View File

@ -216,7 +216,6 @@ static struct crypto_alg aes_alg = {
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_aes_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
.cra_init = fallback_init_cip,
.cra_exit = fallback_exit_cip,
.cra_u = {
@ -398,7 +397,6 @@ static struct crypto_alg ecb_aes_alg = {
.cra_ctxsize = sizeof(struct s390_aes_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
.cra_init = fallback_init_blk,
.cra_exit = fallback_exit_blk,
.cra_u = {
@ -508,7 +506,6 @@ static struct crypto_alg cbc_aes_alg = {
.cra_ctxsize = sizeof(struct s390_aes_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
.cra_init = fallback_init_blk,
.cra_exit = fallback_exit_blk,
.cra_u = {
@ -710,7 +707,6 @@ static struct crypto_alg xts_aes_alg = {
.cra_ctxsize = sizeof(struct s390_xts_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(xts_aes_alg.cra_list),
.cra_init = xts_fallback_init,
.cra_exit = xts_fallback_exit,
.cra_u = {
@ -832,7 +828,6 @@ static struct crypto_alg ctr_aes_alg = {
.cra_ctxsize = sizeof(struct s390_aes_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ctr_aes_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,

View File

@ -70,7 +70,6 @@ static struct crypto_alg des_alg = {
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(des_alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = DES_KEY_SIZE,
@ -163,7 +162,6 @@ static struct crypto_alg ecb_des_alg = {
.cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ecb_des_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = DES_KEY_SIZE,
@ -206,7 +204,6 @@ static struct crypto_alg cbc_des_alg = {
.cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(cbc_des_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = DES_KEY_SIZE,
@ -271,7 +268,6 @@ static struct crypto_alg des3_alg = {
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(des3_alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = DES3_KEY_SIZE,
@ -314,8 +310,6 @@ static struct crypto_alg ecb_des3_alg = {
.cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(
ecb_des3_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = DES3_KEY_SIZE,
@ -358,8 +352,6 @@ static struct crypto_alg cbc_des3_alg = {
.cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(
cbc_des3_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = DES3_KEY_SIZE,
@ -452,7 +444,6 @@ static struct crypto_alg ctr_des_alg = {
.cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ctr_des_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = DES_KEY_SIZE,
@ -496,7 +487,6 @@ static struct crypto_alg ctr_des3_alg = {
.cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ctr_des3_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = DES3_KEY_SIZE,

View File

@ -135,7 +135,6 @@ static struct shash_alg ghash_alg = {
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ghash_alg.base.cra_list),
},
};

View File

@ -12,6 +12,8 @@ obj-$(CONFIG_CRYPTO_SERPENT_SSE2_586) += serpent-sse2-i586.o
obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) += camellia-x86_64.o
obj-$(CONFIG_CRYPTO_CAST5_AVX_X86_64) += cast5-avx-x86_64.o
obj-$(CONFIG_CRYPTO_CAST6_AVX_X86_64) += cast6-avx-x86_64.o
obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o
obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o
@ -32,6 +34,8 @@ serpent-sse2-i586-y := serpent-sse2-i586-asm_32.o serpent_sse2_glue.o
aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o
camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o
cast5-avx-x86_64-y := cast5-avx-x86_64-asm_64.o cast5_avx_glue.o
cast6-avx-x86_64-y := cast6-avx-x86_64-asm_64.o cast6_avx_glue.o
blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o
twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o

View File

@ -40,7 +40,6 @@ static struct crypto_alg aes_alg = {
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,

View File

@ -28,6 +28,9 @@
#include <crypto/aes.h>
#include <crypto/cryptd.h>
#include <crypto/ctr.h>
#include <crypto/b128ops.h>
#include <crypto/lrw.h>
#include <crypto/xts.h>
#include <asm/cpu_device_id.h>
#include <asm/i387.h>
#include <asm/crypto/aes.h>
@ -41,18 +44,10 @@
#define HAS_CTR
#endif
#if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
#define HAS_LRW
#endif
#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
#define HAS_PCBC
#endif
#if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
#define HAS_XTS
#endif
/* This data is stored at the end of the crypto_tfm struct.
* It's a type of per "session" data storage location.
* This needs to be 16 byte aligned.
@ -79,6 +74,16 @@ struct aesni_hash_subkey_req_data {
#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
#define RFC4106_HASH_SUBKEY_SIZE 16
struct aesni_lrw_ctx {
struct lrw_table_ctx lrw_table;
u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
};
struct aesni_xts_ctx {
u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
};
asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
unsigned int key_len);
asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
@ -398,13 +403,6 @@ static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
#endif
#endif
#ifdef HAS_LRW
static int ablk_lrw_init(struct crypto_tfm *tfm)
{
return ablk_init_common(tfm, "fpu(lrw(__driver-aes-aesni))");
}
#endif
#ifdef HAS_PCBC
static int ablk_pcbc_init(struct crypto_tfm *tfm)
{
@ -412,12 +410,160 @@ static int ablk_pcbc_init(struct crypto_tfm *tfm)
}
#endif
#ifdef HAS_XTS
static int ablk_xts_init(struct crypto_tfm *tfm)
static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
{
return ablk_init_common(tfm, "fpu(xts(__driver-aes-aesni))");
aesni_ecb_enc(ctx, blks, blks, nbytes);
}
static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
{
aesni_ecb_dec(ctx, blks, blks, nbytes);
}
static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
int err;
err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
keylen - AES_BLOCK_SIZE);
if (err)
return err;
return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
}
static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
{
struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
lrw_free_table(&ctx->lrw_table);
}
static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[8];
struct lrw_crypt_req req = {
.tbuf = buf,
.tbuflen = sizeof(buf),
.table_ctx = &ctx->lrw_table,
.crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
.crypt_fn = lrw_xts_encrypt_callback,
};
int ret;
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
kernel_fpu_begin();
ret = lrw_crypt(desc, dst, src, nbytes, &req);
kernel_fpu_end();
return ret;
}
static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[8];
struct lrw_crypt_req req = {
.tbuf = buf,
.tbuflen = sizeof(buf),
.table_ctx = &ctx->lrw_table,
.crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
.crypt_fn = lrw_xts_decrypt_callback,
};
int ret;
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
kernel_fpu_begin();
ret = lrw_crypt(desc, dst, src, nbytes, &req);
kernel_fpu_end();
return ret;
}
static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
int err;
/* key consists of keys of equal size concatenated, therefore
* the length must be even
*/
if (keylen % 2) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
/* first half of xts-key is for crypt */
err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
if (err)
return err;
/* second half of xts-key is for tweak */
return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
keylen / 2);
}
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[8];
struct xts_crypt_req req = {
.tbuf = buf,
.tbuflen = sizeof(buf),
.tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
.tweak_fn = XTS_TWEAK_CAST(aesni_enc),
.crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
.crypt_fn = lrw_xts_encrypt_callback,
};
int ret;
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
kernel_fpu_begin();
ret = xts_crypt(desc, dst, src, nbytes, &req);
kernel_fpu_end();
return ret;
}
static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[8];
struct xts_crypt_req req = {
.tbuf = buf,
.tbuflen = sizeof(buf),
.tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
.tweak_fn = XTS_TWEAK_CAST(aesni_enc),
.crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
.crypt_fn = lrw_xts_decrypt_callback,
};
int ret;
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
kernel_fpu_begin();
ret = xts_crypt(desc, dst, src, nbytes, &req);
kernel_fpu_end();
return ret;
}
#endif
#ifdef CONFIG_X86_64
static int rfc4106_init(struct crypto_tfm *tfm)
@ -1035,30 +1181,6 @@ static struct crypto_alg aesni_algs[] = { {
},
#endif
#endif
#ifdef HAS_LRW
}, {
.cra_name = "lrw(aes)",
.cra_driver_name = "lrw-aes-aesni",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = ablk_lrw_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
.max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
},
},
#endif
#ifdef HAS_PCBC
}, {
.cra_name = "pcbc(aes)",
@ -1083,7 +1205,69 @@ static struct crypto_alg aesni_algs[] = { {
},
},
#endif
#ifdef HAS_XTS
}, {
.cra_name = "__lrw-aes-aesni",
.cra_driver_name = "__driver-lrw-aes-aesni",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aesni_lrw_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_exit = lrw_aesni_exit_tfm,
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
.max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = lrw_aesni_setkey,
.encrypt = lrw_encrypt,
.decrypt = lrw_decrypt,
},
},
}, {
.cra_name = "__xts-aes-aesni",
.cra_driver_name = "__driver-xts-aes-aesni",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aesni_xts_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = xts_aesni_setkey,
.encrypt = xts_encrypt,
.decrypt = xts_decrypt,
},
},
}, {
.cra_name = "lrw(aes)",
.cra_driver_name = "lrw-aes-aesni",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
.max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
},
},
}, {
.cra_name = "xts(aes)",
.cra_driver_name = "xts-aes-aesni",
@ -1094,7 +1278,7 @@ static struct crypto_alg aesni_algs[] = { {
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = ablk_xts_init,
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
@ -1106,7 +1290,6 @@ static struct crypto_alg aesni_algs[] = { {
.decrypt = ablk_decrypt,
},
},
#endif
} };
@ -1118,7 +1301,7 @@ MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
static int __init aesni_init(void)
{
int err, i;
int err;
if (!x86_match_cpu(aesni_cpu_id))
return -ENODEV;
@ -1127,9 +1310,6 @@ static int __init aesni_init(void)
if (err)
return err;
for (i = 0; i < ARRAY_SIZE(aesni_algs); i++)
INIT_LIST_HEAD(&aesni_algs[i].cra_list);
return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
}

View File

@ -367,7 +367,6 @@ static struct crypto_alg bf_algs[4] = { {
.cra_ctxsize = sizeof(struct bf_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(bf_algs[0].cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = BF_MIN_KEY_SIZE,
@ -387,7 +386,6 @@ static struct crypto_alg bf_algs[4] = { {
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(bf_algs[1].cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = BF_MIN_KEY_SIZE,
@ -407,7 +405,6 @@ static struct crypto_alg bf_algs[4] = { {
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(bf_algs[2].cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = BF_MIN_KEY_SIZE,
@ -428,7 +425,6 @@ static struct crypto_alg bf_algs[4] = { {
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(bf_algs[3].cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = BF_MIN_KEY_SIZE,

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,376 @@
/*
* Cast5 Cipher 16-way parallel algorithm (AVX/x86_64)
*
* Copyright (C) 2012 Johannes Goetzfried
* <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
*
* Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
*/
.file "cast5-avx-x86_64-asm_64.S"
.extern cast5_s1
.extern cast5_s2
.extern cast5_s3
.extern cast5_s4
/* structure of crypto context */
#define km 0
#define kr (16*4)
#define rr ((16*4)+16)
/* s-boxes */
#define s1 cast5_s1
#define s2 cast5_s2
#define s3 cast5_s3
#define s4 cast5_s4
/**********************************************************************
16-way AVX cast5
**********************************************************************/
#define CTX %rdi
#define RL1 %xmm0
#define RR1 %xmm1
#define RL2 %xmm2
#define RR2 %xmm3
#define RL3 %xmm4
#define RR3 %xmm5
#define RL4 %xmm6
#define RR4 %xmm7
#define RX %xmm8
#define RKM %xmm9
#define RKR %xmm10
#define RKRF %xmm11
#define RKRR %xmm12
#define R32 %xmm13
#define R1ST %xmm14
#define RTMP %xmm15
#define RID1 %rbp
#define RID1d %ebp
#define RID2 %rsi
#define RID2d %esi
#define RGI1 %rdx
#define RGI1bl %dl
#define RGI1bh %dh
#define RGI2 %rcx
#define RGI2bl %cl
#define RGI2bh %ch
#define RGI3 %rax
#define RGI3bl %al
#define RGI3bh %ah
#define RGI4 %rbx
#define RGI4bl %bl
#define RGI4bh %bh
#define RFS1 %r8
#define RFS1d %r8d
#define RFS2 %r9
#define RFS2d %r9d
#define RFS3 %r10
#define RFS3d %r10d
#define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
movzbl src ## bh, RID1d; \
movzbl src ## bl, RID2d; \
shrq $16, src; \
movl s1(, RID1, 4), dst ## d; \
op1 s2(, RID2, 4), dst ## d; \
movzbl src ## bh, RID1d; \
movzbl src ## bl, RID2d; \
interleave_op(il_reg); \
op2 s3(, RID1, 4), dst ## d; \
op3 s4(, RID2, 4), dst ## d;
#define dummy(d) /* do nothing */
#define shr_next(reg) \
shrq $16, reg;
#define F_head(a, x, gi1, gi2, op0) \
op0 a, RKM, x; \
vpslld RKRF, x, RTMP; \
vpsrld RKRR, x, x; \
vpor RTMP, x, x; \
\
vmovq x, gi1; \
vpextrq $1, x, gi2;
#define F_tail(a, x, gi1, gi2, op1, op2, op3) \
lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
\
lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \
shlq $32, RFS2; \
orq RFS1, RFS2; \
lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \
shlq $32, RFS1; \
orq RFS1, RFS3; \
\
vmovq RFS2, x; \
vpinsrq $1, RFS3, x, x;
#define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
F_head(b1, RX, RGI1, RGI2, op0); \
F_head(b2, RX, RGI3, RGI4, op0); \
\
F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \
F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \
\
vpxor a1, RX, a1; \
vpxor a2, RTMP, a2;
#define F1_2(a1, b1, a2, b2) \
F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
#define F2_2(a1, b1, a2, b2) \
F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
#define F3_2(a1, b1, a2, b2) \
F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
#define subround(a1, b1, a2, b2, f) \
F ## f ## _2(a1, b1, a2, b2);
#define round(l, r, n, f) \
vbroadcastss (km+(4*n))(CTX), RKM; \
vpand R1ST, RKR, RKRF; \
vpsubq RKRF, R32, RKRR; \
vpsrldq $1, RKR, RKR; \
subround(l ## 1, r ## 1, l ## 2, r ## 2, f); \
subround(l ## 3, r ## 3, l ## 4, r ## 4, f);
#define enc_preload_rkr() \
vbroadcastss .L16_mask, RKR; \
/* add 16-bit rotation to key rotations (mod 32) */ \
vpxor kr(CTX), RKR, RKR;
#define dec_preload_rkr() \
vbroadcastss .L16_mask, RKR; \
/* add 16-bit rotation to key rotations (mod 32) */ \
vpxor kr(CTX), RKR, RKR; \
vpshufb .Lbswap128_mask, RKR, RKR;
#define transpose_2x4(x0, x1, t0, t1) \
vpunpckldq x1, x0, t0; \
vpunpckhdq x1, x0, t1; \
\
vpunpcklqdq t1, t0, x0; \
vpunpckhqdq t1, t0, x1;
#define inpack_blocks(in, x0, x1, t0, t1, rmask) \
vmovdqu (0*4*4)(in), x0; \
vmovdqu (1*4*4)(in), x1; \
vpshufb rmask, x0, x0; \
vpshufb rmask, x1, x1; \
\
transpose_2x4(x0, x1, t0, t1)
#define outunpack_blocks(out, x0, x1, t0, t1, rmask) \
transpose_2x4(x0, x1, t0, t1) \
\
vpshufb rmask, x0, x0; \
vpshufb rmask, x1, x1; \
vmovdqu x0, (0*4*4)(out); \
vmovdqu x1, (1*4*4)(out);
#define outunpack_xor_blocks(out, x0, x1, t0, t1, rmask) \
transpose_2x4(x0, x1, t0, t1) \
\
vpshufb rmask, x0, x0; \
vpshufb rmask, x1, x1; \
vpxor (0*4*4)(out), x0, x0; \
vmovdqu x0, (0*4*4)(out); \
vpxor (1*4*4)(out), x1, x1; \
vmovdqu x1, (1*4*4)(out);
.data
.align 16
.Lbswap_mask:
.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
.Lbswap128_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
.L16_mask:
.byte 16, 16, 16, 16
.L32_mask:
.byte 32, 0, 0, 0
.Lfirst_mask:
.byte 0x1f, 0, 0, 0
.text
.align 16
.global __cast5_enc_blk_16way
.type __cast5_enc_blk_16way,@function;
__cast5_enc_blk_16way:
/* input:
* %rdi: ctx, CTX
* %rsi: dst
* %rdx: src
* %rcx: bool, if true: xor output
*/
pushq %rbp;
pushq %rbx;
pushq %rcx;
vmovdqa .Lbswap_mask, RKM;
vmovd .Lfirst_mask, R1ST;
vmovd .L32_mask, R32;
enc_preload_rkr();
leaq 1*(2*4*4)(%rdx), %rax;
inpack_blocks(%rdx, RL1, RR1, RTMP, RX, RKM);
inpack_blocks(%rax, RL2, RR2, RTMP, RX, RKM);
leaq 2*(2*4*4)(%rdx), %rax;
inpack_blocks(%rax, RL3, RR3, RTMP, RX, RKM);
leaq 3*(2*4*4)(%rdx), %rax;
inpack_blocks(%rax, RL4, RR4, RTMP, RX, RKM);
movq %rsi, %r11;
round(RL, RR, 0, 1);
round(RR, RL, 1, 2);
round(RL, RR, 2, 3);
round(RR, RL, 3, 1);
round(RL, RR, 4, 2);
round(RR, RL, 5, 3);
round(RL, RR, 6, 1);
round(RR, RL, 7, 2);
round(RL, RR, 8, 3);
round(RR, RL, 9, 1);
round(RL, RR, 10, 2);
round(RR, RL, 11, 3);
movzbl rr(CTX), %eax;
testl %eax, %eax;
jnz __skip_enc;
round(RL, RR, 12, 1);
round(RR, RL, 13, 2);
round(RL, RR, 14, 3);
round(RR, RL, 15, 1);
__skip_enc:
popq %rcx;
popq %rbx;
popq %rbp;
vmovdqa .Lbswap_mask, RKM;
leaq 1*(2*4*4)(%r11), %rax;
testb %cl, %cl;
jnz __enc_xor16;
outunpack_blocks(%r11, RR1, RL1, RTMP, RX, RKM);
outunpack_blocks(%rax, RR2, RL2, RTMP, RX, RKM);
leaq 2*(2*4*4)(%r11), %rax;
outunpack_blocks(%rax, RR3, RL3, RTMP, RX, RKM);
leaq 3*(2*4*4)(%r11), %rax;
outunpack_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
ret;
__enc_xor16:
outunpack_xor_blocks(%r11, RR1, RL1, RTMP, RX, RKM);
outunpack_xor_blocks(%rax, RR2, RL2, RTMP, RX, RKM);
leaq 2*(2*4*4)(%r11), %rax;
outunpack_xor_blocks(%rax, RR3, RL3, RTMP, RX, RKM);
leaq 3*(2*4*4)(%r11), %rax;
outunpack_xor_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
ret;
.align 16
.global cast5_dec_blk_16way
.type cast5_dec_blk_16way,@function;
cast5_dec_blk_16way:
/* input:
* %rdi: ctx, CTX
* %rsi: dst
* %rdx: src
*/
pushq %rbp;
pushq %rbx;
vmovdqa .Lbswap_mask, RKM;
vmovd .Lfirst_mask, R1ST;
vmovd .L32_mask, R32;
dec_preload_rkr();
leaq 1*(2*4*4)(%rdx), %rax;
inpack_blocks(%rdx, RL1, RR1, RTMP, RX, RKM);
inpack_blocks(%rax, RL2, RR2, RTMP, RX, RKM);
leaq 2*(2*4*4)(%rdx), %rax;
inpack_blocks(%rax, RL3, RR3, RTMP, RX, RKM);
leaq 3*(2*4*4)(%rdx), %rax;
inpack_blocks(%rax, RL4, RR4, RTMP, RX, RKM);
movq %rsi, %r11;
movzbl rr(CTX), %eax;
testl %eax, %eax;
jnz __skip_dec;
round(RL, RR, 15, 1);
round(RR, RL, 14, 3);
round(RL, RR, 13, 2);
round(RR, RL, 12, 1);
__dec_tail:
round(RL, RR, 11, 3);
round(RR, RL, 10, 2);
round(RL, RR, 9, 1);
round(RR, RL, 8, 3);
round(RL, RR, 7, 2);
round(RR, RL, 6, 1);
round(RL, RR, 5, 3);
round(RR, RL, 4, 2);
round(RL, RR, 3, 1);
round(RR, RL, 2, 3);
round(RL, RR, 1, 2);
round(RR, RL, 0, 1);
vmovdqa .Lbswap_mask, RKM;
popq %rbx;
popq %rbp;
leaq 1*(2*4*4)(%r11), %rax;
outunpack_blocks(%r11, RR1, RL1, RTMP, RX, RKM);
outunpack_blocks(%rax, RR2, RL2, RTMP, RX, RKM);
leaq 2*(2*4*4)(%r11), %rax;
outunpack_blocks(%rax, RR3, RL3, RTMP, RX, RKM);
leaq 3*(2*4*4)(%r11), %rax;
outunpack_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
ret;
__skip_dec:
vpsrldq $4, RKR, RKR;
jmp __dec_tail;

View File

@ -0,0 +1,530 @@
/*
* Glue Code for the AVX assembler implemention of the Cast5 Cipher
*
* Copyright (C) 2012 Johannes Goetzfried
* <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
*/
#include <linux/module.h>
#include <linux/hardirq.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <crypto/algapi.h>
#include <crypto/cast5.h>
#include <crypto/cryptd.h>
#include <crypto/ctr.h>
#include <asm/xcr.h>
#include <asm/xsave.h>
#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h>
#define CAST5_PARALLEL_BLOCKS 16
asmlinkage void __cast5_enc_blk_16way(struct cast5_ctx *ctx, u8 *dst,
const u8 *src, bool xor);
asmlinkage void cast5_dec_blk_16way(struct cast5_ctx *ctx, u8 *dst,
const u8 *src);
static inline void cast5_enc_blk_xway(struct cast5_ctx *ctx, u8 *dst,
const u8 *src)
{
__cast5_enc_blk_16way(ctx, dst, src, false);
}
static inline void cast5_enc_blk_xway_xor(struct cast5_ctx *ctx, u8 *dst,
const u8 *src)
{
__cast5_enc_blk_16way(ctx, dst, src, true);
}
static inline void cast5_dec_blk_xway(struct cast5_ctx *ctx, u8 *dst,
const u8 *src)
{
cast5_dec_blk_16way(ctx, dst, src);
}
static inline bool cast5_fpu_begin(bool fpu_enabled, unsigned int nbytes)
{
return glue_fpu_begin(CAST5_BLOCK_SIZE, CAST5_PARALLEL_BLOCKS,
NULL, fpu_enabled, nbytes);
}
static inline void cast5_fpu_end(bool fpu_enabled)
{
return glue_fpu_end(fpu_enabled);
}
static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
bool enc)
{
bool fpu_enabled = false;
struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = CAST5_BLOCK_SIZE;
unsigned int nbytes;
int err;
err = blkcipher_walk_virt(desc, walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
while ((nbytes = walk->nbytes)) {
u8 *wsrc = walk->src.virt.addr;
u8 *wdst = walk->dst.virt.addr;
fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
/* Process multi-block batch */
if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
do {
if (enc)
cast5_enc_blk_xway(ctx, wdst, wsrc);
else
cast5_dec_blk_xway(ctx, wdst, wsrc);
wsrc += bsize * CAST5_PARALLEL_BLOCKS;
wdst += bsize * CAST5_PARALLEL_BLOCKS;
nbytes -= bsize * CAST5_PARALLEL_BLOCKS;
} while (nbytes >= bsize * CAST5_PARALLEL_BLOCKS);
if (nbytes < bsize)
goto done;
}
/* Handle leftovers */
do {
if (enc)
__cast5_encrypt(ctx, wdst, wsrc);
else
__cast5_decrypt(ctx, wdst, wsrc);
wsrc += bsize;
wdst += bsize;
nbytes -= bsize;
} while (nbytes >= bsize);
done:
err = blkcipher_walk_done(desc, walk, nbytes);
}
cast5_fpu_end(fpu_enabled);
return err;
}
static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_crypt(desc, &walk, true);
}
static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_crypt(desc, &walk, false);
}
static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = CAST5_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
u64 *dst = (u64 *)walk->dst.virt.addr;
u64 *iv = (u64 *)walk->iv;
do {
*dst = *src ^ *iv;
__cast5_encrypt(ctx, (u8 *)dst, (u8 *)dst);
iv = dst;
src += 1;
dst += 1;
nbytes -= bsize;
} while (nbytes >= bsize);
*(u64 *)walk->iv = *iv;
return nbytes;
}
static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
nbytes = __cbc_encrypt(desc, &walk);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
return err;
}
static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = CAST5_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
u64 *dst = (u64 *)walk->dst.virt.addr;
u64 ivs[CAST5_PARALLEL_BLOCKS - 1];
u64 last_iv;
int i;
/* Start of the last block. */
src += nbytes / bsize - 1;
dst += nbytes / bsize - 1;
last_iv = *src;
/* Process multi-block batch */
if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
do {
nbytes -= bsize * (CAST5_PARALLEL_BLOCKS - 1);
src -= CAST5_PARALLEL_BLOCKS - 1;
dst -= CAST5_PARALLEL_BLOCKS - 1;
for (i = 0; i < CAST5_PARALLEL_BLOCKS - 1; i++)
ivs[i] = src[i];
cast5_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
for (i = 0; i < CAST5_PARALLEL_BLOCKS - 1; i++)
*(dst + (i + 1)) ^= *(ivs + i);
nbytes -= bsize;
if (nbytes < bsize)
goto done;
*dst ^= *(src - 1);
src -= 1;
dst -= 1;
} while (nbytes >= bsize * CAST5_PARALLEL_BLOCKS);
if (nbytes < bsize)
goto done;
}
/* Handle leftovers */
for (;;) {
__cast5_decrypt(ctx, (u8 *)dst, (u8 *)src);
nbytes -= bsize;
if (nbytes < bsize)
break;
*dst ^= *(src - 1);
src -= 1;
dst -= 1;
}
done:
*dst ^= *(u64 *)walk->iv;
*(u64 *)walk->iv = last_iv;
return nbytes;
}
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
bool fpu_enabled = false;
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
while ((nbytes = walk.nbytes)) {
fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
nbytes = __cbc_decrypt(desc, &walk);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
cast5_fpu_end(fpu_enabled);
return err;
}
static void ctr_crypt_final(struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
u8 *ctrblk = walk->iv;
u8 keystream[CAST5_BLOCK_SIZE];
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
__cast5_encrypt(ctx, keystream, ctrblk);
crypto_xor(keystream, src, nbytes);
memcpy(dst, keystream, nbytes);
crypto_inc(ctrblk, CAST5_BLOCK_SIZE);
}
static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = CAST5_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
u64 *dst = (u64 *)walk->dst.virt.addr;
u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv);
__be64 ctrblocks[CAST5_PARALLEL_BLOCKS];
int i;
/* Process multi-block batch */
if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
do {
/* create ctrblks for parallel encrypt */
for (i = 0; i < CAST5_PARALLEL_BLOCKS; i++) {
if (dst != src)
dst[i] = src[i];
ctrblocks[i] = cpu_to_be64(ctrblk++);
}
cast5_enc_blk_xway_xor(ctx, (u8 *)dst,
(u8 *)ctrblocks);
src += CAST5_PARALLEL_BLOCKS;
dst += CAST5_PARALLEL_BLOCKS;
nbytes -= bsize * CAST5_PARALLEL_BLOCKS;
} while (nbytes >= bsize * CAST5_PARALLEL_BLOCKS);
if (nbytes < bsize)
goto done;
}
/* Handle leftovers */
do {
if (dst != src)
*dst = *src;
ctrblocks[0] = cpu_to_be64(ctrblk++);
__cast5_encrypt(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks);
*dst ^= ctrblocks[0];
src += 1;
dst += 1;
nbytes -= bsize;
} while (nbytes >= bsize);
done:
*(__be64 *)walk->iv = cpu_to_be64(ctrblk);
return nbytes;
}
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
bool fpu_enabled = false;
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, CAST5_BLOCK_SIZE);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
nbytes = __ctr_crypt(desc, &walk);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
cast5_fpu_end(fpu_enabled);
if (walk.nbytes) {
ctr_crypt_final(desc, &walk);
err = blkcipher_walk_done(desc, &walk, 0);
}
return err;
}
static struct crypto_alg cast5_algs[6] = { {
.cra_name = "__ecb-cast5-avx",
.cra_driver_name = "__driver-ecb-cast5-avx",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = CAST5_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cast5_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = CAST5_MIN_KEY_SIZE,
.max_keysize = CAST5_MAX_KEY_SIZE,
.setkey = cast5_setkey,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
},
},
}, {
.cra_name = "__cbc-cast5-avx",
.cra_driver_name = "__driver-cbc-cast5-avx",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = CAST5_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cast5_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = CAST5_MIN_KEY_SIZE,
.max_keysize = CAST5_MAX_KEY_SIZE,
.setkey = cast5_setkey,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
},
},
}, {
.cra_name = "__ctr-cast5-avx",
.cra_driver_name = "__driver-ctr-cast5-avx",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct cast5_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = CAST5_MIN_KEY_SIZE,
.max_keysize = CAST5_MAX_KEY_SIZE,
.ivsize = CAST5_BLOCK_SIZE,
.setkey = cast5_setkey,
.encrypt = ctr_crypt,
.decrypt = ctr_crypt,
},
},
}, {
.cra_name = "ecb(cast5)",
.cra_driver_name = "ecb-cast5-avx",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CAST5_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = CAST5_MIN_KEY_SIZE,
.max_keysize = CAST5_MAX_KEY_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
},
},
}, {
.cra_name = "cbc(cast5)",
.cra_driver_name = "cbc-cast5-avx",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CAST5_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = CAST5_MIN_KEY_SIZE,
.max_keysize = CAST5_MAX_KEY_SIZE,
.ivsize = CAST5_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = __ablk_encrypt,
.decrypt = ablk_decrypt,
},
},
}, {
.cra_name = "ctr(cast5)",
.cra_driver_name = "ctr-cast5-avx",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = CAST5_MIN_KEY_SIZE,
.max_keysize = CAST5_MAX_KEY_SIZE,
.ivsize = CAST5_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_encrypt,
.geniv = "chainiv",
},
},
} };
static int __init cast5_init(void)
{
u64 xcr0;
if (!cpu_has_avx || !cpu_has_osxsave) {
pr_info("AVX instructions are not detected.\n");
return -ENODEV;
}
xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
pr_info("AVX detected but unusable.\n");
return -ENODEV;
}
return crypto_register_algs(cast5_algs, ARRAY_SIZE(cast5_algs));
}
static void __exit cast5_exit(void)
{
crypto_unregister_algs(cast5_algs, ARRAY_SIZE(cast5_algs));
}
module_init(cast5_init);
module_exit(cast5_exit);
MODULE_DESCRIPTION("Cast5 Cipher Algorithm, AVX optimized");
MODULE_LICENSE("GPL");
MODULE_ALIAS("cast5");

View File

@ -0,0 +1,383 @@
/*
* Cast6 Cipher 8-way parallel algorithm (AVX/x86_64)
*
* Copyright (C) 2012 Johannes Goetzfried
* <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
*
* Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
*/
.file "cast6-avx-x86_64-asm_64.S"
.extern cast6_s1
.extern cast6_s2
.extern cast6_s3
.extern cast6_s4
/* structure of crypto context */
#define km 0
#define kr (12*4*4)
/* s-boxes */
#define s1 cast6_s1
#define s2 cast6_s2
#define s3 cast6_s3
#define s4 cast6_s4
/**********************************************************************
8-way AVX cast6
**********************************************************************/
#define CTX %rdi
#define RA1 %xmm0
#define RB1 %xmm1
#define RC1 %xmm2
#define RD1 %xmm3
#define RA2 %xmm4
#define RB2 %xmm5
#define RC2 %xmm6
#define RD2 %xmm7
#define RX %xmm8
#define RKM %xmm9
#define RKR %xmm10
#define RKRF %xmm11
#define RKRR %xmm12
#define R32 %xmm13
#define R1ST %xmm14
#define RTMP %xmm15
#define RID1 %rbp
#define RID1d %ebp
#define RID2 %rsi
#define RID2d %esi
#define RGI1 %rdx
#define RGI1bl %dl
#define RGI1bh %dh
#define RGI2 %rcx
#define RGI2bl %cl
#define RGI2bh %ch
#define RGI3 %rax
#define RGI3bl %al
#define RGI3bh %ah
#define RGI4 %rbx
#define RGI4bl %bl
#define RGI4bh %bh
#define RFS1 %r8
#define RFS1d %r8d
#define RFS2 %r9
#define RFS2d %r9d
#define RFS3 %r10
#define RFS3d %r10d
#define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
movzbl src ## bh, RID1d; \
movzbl src ## bl, RID2d; \
shrq $16, src; \
movl s1(, RID1, 4), dst ## d; \
op1 s2(, RID2, 4), dst ## d; \
movzbl src ## bh, RID1d; \
movzbl src ## bl, RID2d; \
interleave_op(il_reg); \
op2 s3(, RID1, 4), dst ## d; \
op3 s4(, RID2, 4), dst ## d;
#define dummy(d) /* do nothing */
#define shr_next(reg) \
shrq $16, reg;
#define F_head(a, x, gi1, gi2, op0) \
op0 a, RKM, x; \
vpslld RKRF, x, RTMP; \
vpsrld RKRR, x, x; \
vpor RTMP, x, x; \
\
vmovq x, gi1; \
vpextrq $1, x, gi2;
#define F_tail(a, x, gi1, gi2, op1, op2, op3) \
lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
\
lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \
shlq $32, RFS2; \
orq RFS1, RFS2; \
lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \
shlq $32, RFS1; \
orq RFS1, RFS3; \
\
vmovq RFS2, x; \
vpinsrq $1, RFS3, x, x;
#define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
F_head(b1, RX, RGI1, RGI2, op0); \
F_head(b2, RX, RGI3, RGI4, op0); \
\
F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \
F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \
\
vpxor a1, RX, a1; \
vpxor a2, RTMP, a2;
#define F1_2(a1, b1, a2, b2) \
F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
#define F2_2(a1, b1, a2, b2) \
F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
#define F3_2(a1, b1, a2, b2) \
F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
#define qop(in, out, f) \
F ## f ## _2(out ## 1, in ## 1, out ## 2, in ## 2);
#define get_round_keys(nn) \
vbroadcastss (km+(4*(nn)))(CTX), RKM; \
vpand R1ST, RKR, RKRF; \
vpsubq RKRF, R32, RKRR; \
vpsrldq $1, RKR, RKR;
#define Q(n) \
get_round_keys(4*n+0); \
qop(RD, RC, 1); \
\
get_round_keys(4*n+1); \
qop(RC, RB, 2); \
\
get_round_keys(4*n+2); \
qop(RB, RA, 3); \
\
get_round_keys(4*n+3); \
qop(RA, RD, 1);
#define QBAR(n) \
get_round_keys(4*n+3); \
qop(RA, RD, 1); \
\
get_round_keys(4*n+2); \
qop(RB, RA, 3); \
\
get_round_keys(4*n+1); \
qop(RC, RB, 2); \
\
get_round_keys(4*n+0); \
qop(RD, RC, 1);
#define shuffle(mask) \
vpshufb mask, RKR, RKR;
#define preload_rkr(n, do_mask, mask) \
vbroadcastss .L16_mask, RKR; \
/* add 16-bit rotation to key rotations (mod 32) */ \
vpxor (kr+n*16)(CTX), RKR, RKR; \
do_mask(mask);
#define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
vpunpckldq x1, x0, t0; \
vpunpckhdq x1, x0, t2; \
vpunpckldq x3, x2, t1; \
vpunpckhdq x3, x2, x3; \
\
vpunpcklqdq t1, t0, x0; \
vpunpckhqdq t1, t0, x1; \
vpunpcklqdq x3, t2, x2; \
vpunpckhqdq x3, t2, x3;
#define inpack_blocks(in, x0, x1, x2, x3, t0, t1, t2, rmask) \
vmovdqu (0*4*4)(in), x0; \
vmovdqu (1*4*4)(in), x1; \
vmovdqu (2*4*4)(in), x2; \
vmovdqu (3*4*4)(in), x3; \
vpshufb rmask, x0, x0; \
vpshufb rmask, x1, x1; \
vpshufb rmask, x2, x2; \
vpshufb rmask, x3, x3; \
\
transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
#define outunpack_blocks(out, x0, x1, x2, x3, t0, t1, t2, rmask) \
transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
\
vpshufb rmask, x0, x0; \
vpshufb rmask, x1, x1; \
vpshufb rmask, x2, x2; \
vpshufb rmask, x3, x3; \
vmovdqu x0, (0*4*4)(out); \
vmovdqu x1, (1*4*4)(out); \
vmovdqu x2, (2*4*4)(out); \
vmovdqu x3, (3*4*4)(out);
#define outunpack_xor_blocks(out, x0, x1, x2, x3, t0, t1, t2, rmask) \
transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
\
vpshufb rmask, x0, x0; \
vpshufb rmask, x1, x1; \
vpshufb rmask, x2, x2; \
vpshufb rmask, x3, x3; \
vpxor (0*4*4)(out), x0, x0; \
vmovdqu x0, (0*4*4)(out); \
vpxor (1*4*4)(out), x1, x1; \
vmovdqu x1, (1*4*4)(out); \
vpxor (2*4*4)(out), x2, x2; \
vmovdqu x2, (2*4*4)(out); \
vpxor (3*4*4)(out), x3, x3; \
vmovdqu x3, (3*4*4)(out);
.data
.align 16
.Lbswap_mask:
.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
.Lrkr_enc_Q_Q_QBAR_QBAR:
.byte 0, 1, 2, 3, 4, 5, 6, 7, 11, 10, 9, 8, 15, 14, 13, 12
.Lrkr_enc_QBAR_QBAR_QBAR_QBAR:
.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
.Lrkr_dec_Q_Q_Q_Q:
.byte 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3
.Lrkr_dec_Q_Q_QBAR_QBAR:
.byte 12, 13, 14, 15, 8, 9, 10, 11, 7, 6, 5, 4, 3, 2, 1, 0
.Lrkr_dec_QBAR_QBAR_QBAR_QBAR:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
.L16_mask:
.byte 16, 16, 16, 16
.L32_mask:
.byte 32, 0, 0, 0
.Lfirst_mask:
.byte 0x1f, 0, 0, 0
.text
.align 16
.global __cast6_enc_blk_8way
.type __cast6_enc_blk_8way,@function;
__cast6_enc_blk_8way:
/* input:
* %rdi: ctx, CTX
* %rsi: dst
* %rdx: src
* %rcx: bool, if true: xor output
*/
pushq %rbp;
pushq %rbx;
pushq %rcx;
vmovdqa .Lbswap_mask, RKM;
vmovd .Lfirst_mask, R1ST;
vmovd .L32_mask, R32;
leaq (4*4*4)(%rdx), %rax;
inpack_blocks(%rdx, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
inpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
movq %rsi, %r11;
preload_rkr(0, dummy, none);
Q(0);
Q(1);
Q(2);
Q(3);
preload_rkr(1, shuffle, .Lrkr_enc_Q_Q_QBAR_QBAR);
Q(4);
Q(5);
QBAR(6);
QBAR(7);
preload_rkr(2, shuffle, .Lrkr_enc_QBAR_QBAR_QBAR_QBAR);
QBAR(8);
QBAR(9);
QBAR(10);
QBAR(11);
popq %rcx;
popq %rbx;
popq %rbp;
vmovdqa .Lbswap_mask, RKM;
leaq (4*4*4)(%r11), %rax;
testb %cl, %cl;
jnz __enc_xor8;
outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
ret;
__enc_xor8:
outunpack_xor_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
outunpack_xor_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
ret;
.align 16
.global cast6_dec_blk_8way
.type cast6_dec_blk_8way,@function;
cast6_dec_blk_8way:
/* input:
* %rdi: ctx, CTX
* %rsi: dst
* %rdx: src
*/
pushq %rbp;
pushq %rbx;
vmovdqa .Lbswap_mask, RKM;
vmovd .Lfirst_mask, R1ST;
vmovd .L32_mask, R32;
leaq (4*4*4)(%rdx), %rax;
inpack_blocks(%rdx, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
inpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
movq %rsi, %r11;
preload_rkr(2, shuffle, .Lrkr_dec_Q_Q_Q_Q);
Q(11);
Q(10);
Q(9);
Q(8);
preload_rkr(1, shuffle, .Lrkr_dec_Q_Q_QBAR_QBAR);
Q(7);
Q(6);
QBAR(5);
QBAR(4);
preload_rkr(0, shuffle, .Lrkr_dec_QBAR_QBAR_QBAR_QBAR);
QBAR(3);
QBAR(2);
QBAR(1);
QBAR(0);
popq %rbx;
popq %rbp;
vmovdqa .Lbswap_mask, RKM;
leaq (4*4*4)(%r11), %rax;
outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
ret;

View File

@ -0,0 +1,648 @@
/*
* Glue Code for the AVX assembler implemention of the Cast6 Cipher
*
* Copyright (C) 2012 Johannes Goetzfried
* <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
*/
#include <linux/module.h>
#include <linux/hardirq.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <crypto/algapi.h>
#include <crypto/cast6.h>
#include <crypto/cryptd.h>
#include <crypto/b128ops.h>
#include <crypto/ctr.h>
#include <crypto/lrw.h>
#include <crypto/xts.h>
#include <asm/xcr.h>
#include <asm/xsave.h>
#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h>
#define CAST6_PARALLEL_BLOCKS 8
asmlinkage void __cast6_enc_blk_8way(struct cast6_ctx *ctx, u8 *dst,
const u8 *src, bool xor);
asmlinkage void cast6_dec_blk_8way(struct cast6_ctx *ctx, u8 *dst,
const u8 *src);
static inline void cast6_enc_blk_xway(struct cast6_ctx *ctx, u8 *dst,
const u8 *src)
{
__cast6_enc_blk_8way(ctx, dst, src, false);
}
static inline void cast6_enc_blk_xway_xor(struct cast6_ctx *ctx, u8 *dst,
const u8 *src)
{
__cast6_enc_blk_8way(ctx, dst, src, true);
}
static inline void cast6_dec_blk_xway(struct cast6_ctx *ctx, u8 *dst,
const u8 *src)
{
cast6_dec_blk_8way(ctx, dst, src);
}
static void cast6_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
{
u128 ivs[CAST6_PARALLEL_BLOCKS - 1];
unsigned int j;
for (j = 0; j < CAST6_PARALLEL_BLOCKS - 1; j++)
ivs[j] = src[j];
cast6_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
for (j = 0; j < CAST6_PARALLEL_BLOCKS - 1; j++)
u128_xor(dst + (j + 1), dst + (j + 1), ivs + j);
}
static void cast6_crypt_ctr(void *ctx, u128 *dst, const u128 *src, u128 *iv)
{
be128 ctrblk;
u128_to_be128(&ctrblk, iv);
u128_inc(iv);
__cast6_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk);
u128_xor(dst, src, (u128 *)&ctrblk);
}
static void cast6_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src,
u128 *iv)
{
be128 ctrblks[CAST6_PARALLEL_BLOCKS];
unsigned int i;
for (i = 0; i < CAST6_PARALLEL_BLOCKS; i++) {
if (dst != src)
dst[i] = src[i];
u128_to_be128(&ctrblks[i], iv);
u128_inc(iv);
}
cast6_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks);
}
static const struct common_glue_ctx cast6_enc = {
.num_funcs = 2,
.fpu_blocks_limit = CAST6_PARALLEL_BLOCKS,
.funcs = { {
.num_blocks = CAST6_PARALLEL_BLOCKS,
.fn_u = { .ecb = GLUE_FUNC_CAST(cast6_enc_blk_xway) }
}, {
.num_blocks = 1,
.fn_u = { .ecb = GLUE_FUNC_CAST(__cast6_encrypt) }
} }
};
static const struct common_glue_ctx cast6_ctr = {
.num_funcs = 2,
.fpu_blocks_limit = CAST6_PARALLEL_BLOCKS,
.funcs = { {
.num_blocks = CAST6_PARALLEL_BLOCKS,
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(cast6_crypt_ctr_xway) }
}, {
.num_blocks = 1,
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(cast6_crypt_ctr) }
} }
};
static const struct common_glue_ctx cast6_dec = {
.num_funcs = 2,
.fpu_blocks_limit = CAST6_PARALLEL_BLOCKS,
.funcs = { {
.num_blocks = CAST6_PARALLEL_BLOCKS,
.fn_u = { .ecb = GLUE_FUNC_CAST(cast6_dec_blk_xway) }
}, {
.num_blocks = 1,
.fn_u = { .ecb = GLUE_FUNC_CAST(__cast6_decrypt) }
} }
};
static const struct common_glue_ctx cast6_dec_cbc = {
.num_funcs = 2,
.fpu_blocks_limit = CAST6_PARALLEL_BLOCKS,
.funcs = { {
.num_blocks = CAST6_PARALLEL_BLOCKS,
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(cast6_decrypt_cbc_xway) }
}, {
.num_blocks = 1,
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__cast6_decrypt) }
} }
};
static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
return glue_ecb_crypt_128bit(&cast6_enc, desc, dst, src, nbytes);
}
static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
return glue_ecb_crypt_128bit(&cast6_dec, desc, dst, src, nbytes);
}
static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__cast6_encrypt), desc,
dst, src, nbytes);
}
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
return glue_cbc_decrypt_128bit(&cast6_dec_cbc, desc, dst, src,
nbytes);
}
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
return glue_ctr_crypt_128bit(&cast6_ctr, desc, dst, src, nbytes);
}
static inline bool cast6_fpu_begin(bool fpu_enabled, unsigned int nbytes)
{
return glue_fpu_begin(CAST6_BLOCK_SIZE, CAST6_PARALLEL_BLOCKS,
NULL, fpu_enabled, nbytes);
}
static inline void cast6_fpu_end(bool fpu_enabled)
{
glue_fpu_end(fpu_enabled);
}
struct crypt_priv {
struct cast6_ctx *ctx;
bool fpu_enabled;
};
static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
{
const unsigned int bsize = CAST6_BLOCK_SIZE;
struct crypt_priv *ctx = priv;
int i;
ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
if (nbytes == bsize * CAST6_PARALLEL_BLOCKS) {
cast6_enc_blk_xway(ctx->ctx, srcdst, srcdst);
return;
}
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
__cast6_encrypt(ctx->ctx, srcdst, srcdst);
}
static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
{
const unsigned int bsize = CAST6_BLOCK_SIZE;
struct crypt_priv *ctx = priv;
int i;
ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
if (nbytes == bsize * CAST6_PARALLEL_BLOCKS) {
cast6_dec_blk_xway(ctx->ctx, srcdst, srcdst);
return;
}
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
__cast6_decrypt(ctx->ctx, srcdst, srcdst);
}
struct cast6_lrw_ctx {
struct lrw_table_ctx lrw_table;
struct cast6_ctx cast6_ctx;
};
static int lrw_cast6_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct cast6_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
int err;
err = __cast6_setkey(&ctx->cast6_ctx, key, keylen - CAST6_BLOCK_SIZE,
&tfm->crt_flags);
if (err)
return err;
return lrw_init_table(&ctx->lrw_table, key + keylen - CAST6_BLOCK_SIZE);
}
static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct cast6_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[CAST6_PARALLEL_BLOCKS];
struct crypt_priv crypt_ctx = {
.ctx = &ctx->cast6_ctx,
.fpu_enabled = false,
};
struct lrw_crypt_req req = {
.tbuf = buf,
.tbuflen = sizeof(buf),
.table_ctx = &ctx->lrw_table,
.crypt_ctx = &crypt_ctx,
.crypt_fn = encrypt_callback,
};
int ret;
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
ret = lrw_crypt(desc, dst, src, nbytes, &req);
cast6_fpu_end(crypt_ctx.fpu_enabled);
return ret;
}
static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct cast6_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[CAST6_PARALLEL_BLOCKS];
struct crypt_priv crypt_ctx = {
.ctx = &ctx->cast6_ctx,
.fpu_enabled = false,
};
struct lrw_crypt_req req = {
.tbuf = buf,
.tbuflen = sizeof(buf),
.table_ctx = &ctx->lrw_table,
.crypt_ctx = &crypt_ctx,
.crypt_fn = decrypt_callback,
};
int ret;
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
ret = lrw_crypt(desc, dst, src, nbytes, &req);
cast6_fpu_end(crypt_ctx.fpu_enabled);
return ret;
}
static void lrw_exit_tfm(struct crypto_tfm *tfm)
{
struct cast6_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
lrw_free_table(&ctx->lrw_table);
}
struct cast6_xts_ctx {
struct cast6_ctx tweak_ctx;
struct cast6_ctx crypt_ctx;
};
static int xts_cast6_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct cast6_xts_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
int err;
/* key consists of keys of equal size concatenated, therefore
* the length must be even
*/
if (keylen % 2) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
/* first half of xts-key is for crypt */
err = __cast6_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
if (err)
return err;
/* second half of xts-key is for tweak */
return __cast6_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
flags);
}
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct cast6_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[CAST6_PARALLEL_BLOCKS];
struct crypt_priv crypt_ctx = {
.ctx = &ctx->crypt_ctx,
.fpu_enabled = false,
};
struct xts_crypt_req req = {
.tbuf = buf,
.tbuflen = sizeof(buf),
.tweak_ctx = &ctx->tweak_ctx,
.tweak_fn = XTS_TWEAK_CAST(__cast6_encrypt),
.crypt_ctx = &crypt_ctx,
.crypt_fn = encrypt_callback,
};
int ret;
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
ret = xts_crypt(desc, dst, src, nbytes, &req);
cast6_fpu_end(crypt_ctx.fpu_enabled);
return ret;
}
static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct cast6_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[CAST6_PARALLEL_BLOCKS];
struct crypt_priv crypt_ctx = {
.ctx = &ctx->crypt_ctx,
.fpu_enabled = false,
};
struct xts_crypt_req req = {
.tbuf = buf,
.tbuflen = sizeof(buf),
.tweak_ctx = &ctx->tweak_ctx,
.tweak_fn = XTS_TWEAK_CAST(__cast6_encrypt),
.crypt_ctx = &crypt_ctx,
.crypt_fn = decrypt_callback,
};
int ret;
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
ret = xts_crypt(desc, dst, src, nbytes, &req);
cast6_fpu_end(crypt_ctx.fpu_enabled);
return ret;
}
static struct crypto_alg cast6_algs[10] = { {
.cra_name = "__ecb-cast6-avx",
.cra_driver_name = "__driver-ecb-cast6-avx",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = CAST6_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cast6_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = CAST6_MIN_KEY_SIZE,
.max_keysize = CAST6_MAX_KEY_SIZE,
.setkey = cast6_setkey,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
},
},
}, {
.cra_name = "__cbc-cast6-avx",
.cra_driver_name = "__driver-cbc-cast6-avx",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = CAST6_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cast6_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = CAST6_MIN_KEY_SIZE,
.max_keysize = CAST6_MAX_KEY_SIZE,
.setkey = cast6_setkey,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
},
},
}, {
.cra_name = "__ctr-cast6-avx",
.cra_driver_name = "__driver-ctr-cast6-avx",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct cast6_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = CAST6_MIN_KEY_SIZE,
.max_keysize = CAST6_MAX_KEY_SIZE,
.ivsize = CAST6_BLOCK_SIZE,
.setkey = cast6_setkey,
.encrypt = ctr_crypt,
.decrypt = ctr_crypt,
},
},
}, {
.cra_name = "__lrw-cast6-avx",
.cra_driver_name = "__driver-lrw-cast6-avx",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = CAST6_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cast6_lrw_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_exit = lrw_exit_tfm,
.cra_u = {
.blkcipher = {
.min_keysize = CAST6_MIN_KEY_SIZE +
CAST6_BLOCK_SIZE,
.max_keysize = CAST6_MAX_KEY_SIZE +
CAST6_BLOCK_SIZE,
.ivsize = CAST6_BLOCK_SIZE,
.setkey = lrw_cast6_setkey,
.encrypt = lrw_encrypt,
.decrypt = lrw_decrypt,
},
},
}, {
.cra_name = "__xts-cast6-avx",
.cra_driver_name = "__driver-xts-cast6-avx",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = CAST6_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cast6_xts_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = CAST6_MIN_KEY_SIZE * 2,
.max_keysize = CAST6_MAX_KEY_SIZE * 2,
.ivsize = CAST6_BLOCK_SIZE,
.setkey = xts_cast6_setkey,
.encrypt = xts_encrypt,
.decrypt = xts_decrypt,
},
},
}, {
.cra_name = "ecb(cast6)",
.cra_driver_name = "ecb-cast6-avx",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CAST6_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = CAST6_MIN_KEY_SIZE,
.max_keysize = CAST6_MAX_KEY_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
},
},
}, {
.cra_name = "cbc(cast6)",
.cra_driver_name = "cbc-cast6-avx",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CAST6_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = CAST6_MIN_KEY_SIZE,
.max_keysize = CAST6_MAX_KEY_SIZE,
.ivsize = CAST6_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = __ablk_encrypt,
.decrypt = ablk_decrypt,
},
},
}, {
.cra_name = "ctr(cast6)",
.cra_driver_name = "ctr-cast6-avx",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = CAST6_MIN_KEY_SIZE,
.max_keysize = CAST6_MAX_KEY_SIZE,
.ivsize = CAST6_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_encrypt,
.geniv = "chainiv",
},
},
}, {
.cra_name = "lrw(cast6)",
.cra_driver_name = "lrw-cast6-avx",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CAST6_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = CAST6_MIN_KEY_SIZE +
CAST6_BLOCK_SIZE,
.max_keysize = CAST6_MAX_KEY_SIZE +
CAST6_BLOCK_SIZE,
.ivsize = CAST6_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
},
},
}, {
.cra_name = "xts(cast6)",
.cra_driver_name = "xts-cast6-avx",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CAST6_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = CAST6_MIN_KEY_SIZE * 2,
.max_keysize = CAST6_MAX_KEY_SIZE * 2,
.ivsize = CAST6_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
},
},
} };
static int __init cast6_init(void)
{
u64 xcr0;
if (!cpu_has_avx || !cpu_has_osxsave) {
pr_info("AVX instructions are not detected.\n");
return -ENODEV;
}
xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
pr_info("AVX detected but unusable.\n");
return -ENODEV;
}
return crypto_register_algs(cast6_algs, ARRAY_SIZE(cast6_algs));
}
static void __exit cast6_exit(void)
{
crypto_unregister_algs(cast6_algs, ARRAY_SIZE(cast6_algs));
}
module_init(cast6_init);
module_exit(cast6_exit);
MODULE_DESCRIPTION("Cast6 Cipher Algorithm, AVX optimized");
MODULE_LICENSE("GPL");
MODULE_ALIAS("cast6");

View File

@ -150,7 +150,6 @@ static struct shash_alg ghash_alg = {
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ghash_alg.base.cra_list),
},
};
@ -288,7 +287,6 @@ static struct ahash_alg ghash_async_alg = {
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_type = &crypto_ahash_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ghash_async_alg.halg.base.cra_list),
.cra_init = ghash_async_init_tfm,
.cra_exit = ghash_async_exit_tfm,
},

View File

@ -110,7 +110,7 @@ static unsigned int __glue_cbc_encrypt_128bit(const common_glue_func_t fn,
nbytes -= bsize;
} while (nbytes >= bsize);
u128_xor((u128 *)walk->iv, (u128 *)walk->iv, iv);
*(u128 *)walk->iv = *iv;
return nbytes;
}

View File

@ -97,7 +97,6 @@ static struct crypto_alg alg = {
.cra_ctxsize = sizeof(struct salsa20_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = {
.blkcipher = {
.setkey = setkey,

View File

@ -390,7 +390,6 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[0].cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = SERPENT_MIN_KEY_SIZE,
@ -410,7 +409,6 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[1].cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = SERPENT_MIN_KEY_SIZE,
@ -430,7 +428,6 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[2].cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = SERPENT_MIN_KEY_SIZE,
@ -451,7 +448,6 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[3].cra_list),
.cra_exit = lrw_exit_tfm,
.cra_u = {
.blkcipher = {
@ -475,7 +471,6 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[4].cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = SERPENT_MIN_KEY_SIZE * 2,
@ -496,7 +491,6 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[5].cra_list),
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
@ -518,7 +512,6 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[6].cra_list),
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
@ -541,7 +534,6 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[7].cra_list),
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
@ -565,7 +557,6 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[8].cra_list),
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
@ -590,7 +581,6 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[9].cra_list),
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {

View File

@ -393,7 +393,6 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[0].cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = SERPENT_MIN_KEY_SIZE,
@ -413,7 +412,6 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[1].cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = SERPENT_MIN_KEY_SIZE,
@ -433,7 +431,6 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[2].cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = SERPENT_MIN_KEY_SIZE,
@ -454,7 +451,6 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[3].cra_list),
.cra_exit = lrw_exit_tfm,
.cra_u = {
.blkcipher = {
@ -478,7 +474,6 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[4].cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = SERPENT_MIN_KEY_SIZE * 2,
@ -499,7 +494,6 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[5].cra_list),
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
@ -521,7 +515,6 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[6].cra_list),
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
@ -544,7 +537,6 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[7].cra_list),
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
@ -568,7 +560,6 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[8].cra_list),
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
@ -593,7 +584,6 @@ static struct crypto_alg serpent_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_algs[9].cra_list),
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {

View File

@ -4,6 +4,8 @@
* Copyright (C) 2012 Johannes Goetzfried
* <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
*
* Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@ -47,16 +49,22 @@
#define RC2 %xmm6
#define RD2 %xmm7
#define RX %xmm8
#define RY %xmm9
#define RX0 %xmm8
#define RY0 %xmm9
#define RK1 %xmm10
#define RK2 %xmm11
#define RX1 %xmm10
#define RY1 %xmm11
#define RID1 %rax
#define RID1b %al
#define RID2 %rbx
#define RID2b %bl
#define RK1 %xmm12
#define RK2 %xmm13
#define RT %xmm14
#define RR %xmm15
#define RID1 %rbp
#define RID1d %ebp
#define RID2 %rsi
#define RID2d %esi
#define RGI1 %rdx
#define RGI1bl %dl
@ -65,6 +73,13 @@
#define RGI2bl %cl
#define RGI2bh %ch
#define RGI3 %rax
#define RGI3bl %al
#define RGI3bh %ah
#define RGI4 %rbx
#define RGI4bl %bl
#define RGI4bh %bh
#define RGS1 %r8
#define RGS1d %r8d
#define RGS2 %r9
@ -73,89 +88,123 @@
#define RGS3d %r10d
#define lookup_32bit(t0, t1, t2, t3, src, dst) \
movb src ## bl, RID1b; \
movb src ## bh, RID2b; \
movl t0(CTX, RID1, 4), dst ## d; \
xorl t1(CTX, RID2, 4), dst ## d; \
#define lookup_32bit(t0, t1, t2, t3, src, dst, interleave_op, il_reg) \
movzbl src ## bl, RID1d; \
movzbl src ## bh, RID2d; \
shrq $16, src; \
movb src ## bl, RID1b; \
movb src ## bh, RID2b; \
movl t0(CTX, RID1, 4), dst ## d; \
movl t1(CTX, RID2, 4), RID2d; \
movzbl src ## bl, RID1d; \
xorl RID2d, dst ## d; \
movzbl src ## bh, RID2d; \
interleave_op(il_reg); \
xorl t2(CTX, RID1, 4), dst ## d; \
xorl t3(CTX, RID2, 4), dst ## d;
#define G(a, x, t0, t1, t2, t3) \
vmovq a, RGI1; \
vpsrldq $8, a, x; \
vmovq x, RGI2; \
\
lookup_32bit(t0, t1, t2, t3, RGI1, RGS1); \
shrq $16, RGI1; \
lookup_32bit(t0, t1, t2, t3, RGI1, RGS2); \
shlq $32, RGS2; \
orq RGS1, RGS2; \
\
lookup_32bit(t0, t1, t2, t3, RGI2, RGS1); \
shrq $16, RGI2; \
lookup_32bit(t0, t1, t2, t3, RGI2, RGS3); \
shlq $32, RGS3; \
orq RGS1, RGS3; \
\
vmovq RGS2, x; \
vpinsrq $1, RGS3, x, x;
#define dummy(d) /* do nothing */
#define encround(a, b, c, d, x, y) \
G(a, x, s0, s1, s2, s3); \
G(b, y, s1, s2, s3, s0); \
#define shr_next(reg) \
shrq $16, reg;
#define G(gi1, gi2, x, t0, t1, t2, t3) \
lookup_32bit(t0, t1, t2, t3, ##gi1, RGS1, shr_next, ##gi1); \
lookup_32bit(t0, t1, t2, t3, ##gi2, RGS3, shr_next, ##gi2); \
\
lookup_32bit(t0, t1, t2, t3, ##gi1, RGS2, dummy, none); \
shlq $32, RGS2; \
orq RGS1, RGS2; \
lookup_32bit(t0, t1, t2, t3, ##gi2, RGS1, dummy, none); \
shlq $32, RGS1; \
orq RGS1, RGS3;
#define round_head_2(a, b, x1, y1, x2, y2) \
vmovq b ## 1, RGI3; \
vpextrq $1, b ## 1, RGI4; \
\
G(RGI1, RGI2, x1, s0, s1, s2, s3); \
vmovq a ## 2, RGI1; \
vpextrq $1, a ## 2, RGI2; \
vmovq RGS2, x1; \
vpinsrq $1, RGS3, x1, x1; \
\
G(RGI3, RGI4, y1, s1, s2, s3, s0); \
vmovq b ## 2, RGI3; \
vpextrq $1, b ## 2, RGI4; \
vmovq RGS2, y1; \
vpinsrq $1, RGS3, y1, y1; \
\
G(RGI1, RGI2, x2, s0, s1, s2, s3); \
vmovq RGS2, x2; \
vpinsrq $1, RGS3, x2, x2; \
\
G(RGI3, RGI4, y2, s1, s2, s3, s0); \
vmovq RGS2, y2; \
vpinsrq $1, RGS3, y2, y2;
#define encround_tail(a, b, c, d, x, y, prerotate) \
vpaddd x, y, x; \
vpaddd x, RK1, RT;\
prerotate(b); \
vpxor RT, c, c; \
vpaddd y, x, y; \
vpaddd x, RK1, x; \
vpaddd y, RK2, y; \
vpxor x, c, c; \
vpsrld $1, c, x; \
vpsrld $1, c, RT; \
vpslld $(32 - 1), c, c; \
vpor c, x, c; \
vpslld $1, d, x; \
vpsrld $(32 - 1), d, d; \
vpor d, x, d; \
vpxor d, y, d;
vpor c, RT, c; \
vpxor d, y, d; \
#define decround(a, b, c, d, x, y) \
G(a, x, s0, s1, s2, s3); \
G(b, y, s1, s2, s3, s0); \
#define decround_tail(a, b, c, d, x, y, prerotate) \
vpaddd x, y, x; \
vpaddd x, RK1, RT;\
prerotate(a); \
vpxor RT, c, c; \
vpaddd y, x, y; \
vpaddd y, RK2, y; \
vpxor d, y, d; \
vpsrld $1, d, y; \
vpslld $(32 - 1), d, d; \
vpor d, y, d; \
vpslld $1, c, y; \
vpsrld $(32 - 1), c, c; \
vpor c, y, c; \
vpaddd x, RK1, x; \
vpxor x, c, c;
#define encrypt_round(n, a, b, c, d) \
vbroadcastss (k+4*(2*(n)))(CTX), RK1; \
vbroadcastss (k+4*(2*(n)+1))(CTX), RK2; \
encround(a ## 1, b ## 1, c ## 1, d ## 1, RX, RY); \
encround(a ## 2, b ## 2, c ## 2, d ## 2, RX, RY);
#define rotate_1l(x) \
vpslld $1, x, RR; \
vpsrld $(32 - 1), x, x; \
vpor x, RR, x;
#define decrypt_round(n, a, b, c, d) \
vbroadcastss (k+4*(2*(n)))(CTX), RK1; \
vbroadcastss (k+4*(2*(n)+1))(CTX), RK2; \
decround(a ## 1, b ## 1, c ## 1, d ## 1, RX, RY); \
decround(a ## 2, b ## 2, c ## 2, d ## 2, RX, RY);
#define preload_rgi(c) \
vmovq c, RGI1; \
vpextrq $1, c, RGI2;
#define encrypt_round(n, a, b, c, d, preload, prerotate) \
vbroadcastss (k+4*(2*(n)))(CTX), RK1; \
vbroadcastss (k+4*(2*(n)+1))(CTX), RK2; \
round_head_2(a, b, RX0, RY0, RX1, RY1); \
encround_tail(a ## 1, b ## 1, c ## 1, d ## 1, RX0, RY0, prerotate); \
preload(c ## 1); \
encround_tail(a ## 2, b ## 2, c ## 2, d ## 2, RX1, RY1, prerotate);
#define decrypt_round(n, a, b, c, d, preload, prerotate) \
vbroadcastss (k+4*(2*(n)))(CTX), RK1; \
vbroadcastss (k+4*(2*(n)+1))(CTX), RK2; \
round_head_2(a, b, RX0, RY0, RX1, RY1); \
decround_tail(a ## 1, b ## 1, c ## 1, d ## 1, RX0, RY0, prerotate); \
preload(c ## 1); \
decround_tail(a ## 2, b ## 2, c ## 2, d ## 2, RX1, RY1, prerotate);
#define encrypt_cycle(n) \
encrypt_round((2*n), RA, RB, RC, RD); \
encrypt_round(((2*n) + 1), RC, RD, RA, RB);
encrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l); \
encrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l);
#define encrypt_cycle_last(n) \
encrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l); \
encrypt_round(((2*n) + 1), RC, RD, RA, RB, dummy, dummy);
#define decrypt_cycle(n) \
decrypt_round(((2*n) + 1), RC, RD, RA, RB); \
decrypt_round((2*n), RA, RB, RC, RD);
decrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l); \
decrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l);
#define decrypt_cycle_last(n) \
decrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l); \
decrypt_round((2*n), RA, RB, RC, RD, dummy, dummy);
#define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
vpunpckldq x1, x0, t0; \
@ -216,17 +265,20 @@ __twofish_enc_blk_8way:
* %rcx: bool, if true: xor output
*/
pushq %rbp;
pushq %rbx;
pushq %rcx;
vmovdqu w(CTX), RK1;
leaq (4*4*4)(%rdx), %rax;
inpack_blocks(%rdx, RA1, RB1, RC1, RD1, RK1, RX, RY, RK2);
inpack_blocks(%rax, RA2, RB2, RC2, RD2, RK1, RX, RY, RK2);
inpack_blocks(%rdx, RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
preload_rgi(RA1);
rotate_1l(RD1);
inpack_blocks(%rax, RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
rotate_1l(RD2);
xorq RID1, RID1;
xorq RID2, RID2;
movq %rsi, %r11;
encrypt_cycle(0);
encrypt_cycle(1);
@ -235,26 +287,27 @@ __twofish_enc_blk_8way:
encrypt_cycle(4);
encrypt_cycle(5);
encrypt_cycle(6);
encrypt_cycle(7);
encrypt_cycle_last(7);
vmovdqu (w+4*4)(CTX), RK1;
popq %rcx;
popq %rbx;
popq %rbp;
leaq (4*4*4)(%rsi), %rax;
leaq (4*4*4)(%r11), %rax;
testb %cl, %cl;
jnz __enc_xor8;
outunpack_blocks(%rsi, RC1, RD1, RA1, RB1, RK1, RX, RY, RK2);
outunpack_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX, RY, RK2);
outunpack_blocks(%r11, RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
outunpack_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
ret;
__enc_xor8:
outunpack_xor_blocks(%rsi, RC1, RD1, RA1, RB1, RK1, RX, RY, RK2);
outunpack_xor_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX, RY, RK2);
outunpack_xor_blocks(%r11, RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
outunpack_xor_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
ret;
@ -269,16 +322,19 @@ twofish_dec_blk_8way:
* %rdx: src
*/
pushq %rbp;
pushq %rbx;
vmovdqu (w+4*4)(CTX), RK1;
leaq (4*4*4)(%rdx), %rax;
inpack_blocks(%rdx, RC1, RD1, RA1, RB1, RK1, RX, RY, RK2);
inpack_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX, RY, RK2);
inpack_blocks(%rdx, RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
preload_rgi(RC1);
rotate_1l(RA1);
inpack_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
rotate_1l(RA2);
xorq RID1, RID1;
xorq RID2, RID2;
movq %rsi, %r11;
decrypt_cycle(7);
decrypt_cycle(6);
@ -287,14 +343,15 @@ twofish_dec_blk_8way:
decrypt_cycle(3);
decrypt_cycle(2);
decrypt_cycle(1);
decrypt_cycle(0);
decrypt_cycle_last(0);
vmovdqu (w)(CTX), RK1;
popq %rbx;
popq %rbp;
leaq (4*4*4)(%rsi), %rax;
outunpack_blocks(%rsi, RA1, RB1, RC1, RD1, RK1, RX, RY, RK2);
outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RK1, RX, RY, RK2);
leaq (4*4*4)(%r11), %rax;
outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
ret;

View File

@ -378,7 +378,6 @@ static struct crypto_alg twofish_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(twofish_algs[0].cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = TF_MIN_KEY_SIZE,
@ -398,7 +397,6 @@ static struct crypto_alg twofish_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(twofish_algs[1].cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = TF_MIN_KEY_SIZE,
@ -418,7 +416,6 @@ static struct crypto_alg twofish_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(twofish_algs[2].cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = TF_MIN_KEY_SIZE,
@ -439,7 +436,6 @@ static struct crypto_alg twofish_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(twofish_algs[3].cra_list),
.cra_exit = lrw_twofish_exit_tfm,
.cra_u = {
.blkcipher = {
@ -463,7 +459,6 @@ static struct crypto_alg twofish_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(twofish_algs[4].cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = TF_MIN_KEY_SIZE * 2,
@ -484,7 +479,6 @@ static struct crypto_alg twofish_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(twofish_algs[5].cra_list),
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
@ -506,7 +500,6 @@ static struct crypto_alg twofish_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(twofish_algs[6].cra_list),
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
@ -529,7 +522,6 @@ static struct crypto_alg twofish_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(twofish_algs[7].cra_list),
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
@ -553,7 +545,6 @@ static struct crypto_alg twofish_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(twofish_algs[8].cra_list),
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
@ -578,7 +569,6 @@ static struct crypto_alg twofish_algs[10] = { {
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(twofish_algs[9].cra_list),
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {

View File

@ -70,7 +70,6 @@ static struct crypto_alg alg = {
.cra_ctxsize = sizeof(struct twofish_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = TF_MIN_KEY_SIZE,

View File

@ -342,7 +342,6 @@ static struct crypto_alg tf_algs[5] = { {
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(tf_algs[0].cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = TF_MIN_KEY_SIZE,
@ -362,7 +361,6 @@ static struct crypto_alg tf_algs[5] = { {
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(tf_algs[1].cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = TF_MIN_KEY_SIZE,
@ -383,7 +381,6 @@ static struct crypto_alg tf_algs[5] = { {
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(tf_algs[2].cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = TF_MIN_KEY_SIZE,
@ -404,7 +401,6 @@ static struct crypto_alg tf_algs[5] = { {
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(tf_algs[3].cra_list),
.cra_exit = lrw_twofish_exit_tfm,
.cra_u = {
.blkcipher = {
@ -426,7 +422,6 @@ static struct crypto_alg tf_algs[5] = { {
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(tf_algs[4].cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = TF_MIN_KEY_SIZE * 2,

182
crypto/842.c Normal file
View File

@ -0,0 +1,182 @@
/*
* Cryptographic API for the 842 compression algorithm.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright (C) IBM Corporation, 2011
*
* Authors: Robert Jennings <rcj@linux.vnet.ibm.com>
* Seth Jennings <sjenning@linux.vnet.ibm.com>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/vmalloc.h>
#include <linux/nx842.h>
#include <linux/lzo.h>
#include <linux/timer.h>
static int nx842_uselzo;
struct nx842_ctx {
void *nx842_wmem; /* working memory for 842/lzo */
};
enum nx842_crypto_type {
NX842_CRYPTO_TYPE_842,
NX842_CRYPTO_TYPE_LZO
};
#define NX842_SENTINEL 0xdeadbeef
struct nx842_crypto_header {
unsigned int sentinel; /* debug */
enum nx842_crypto_type type;
};
static int nx842_init(struct crypto_tfm *tfm)
{
struct nx842_ctx *ctx = crypto_tfm_ctx(tfm);
int wmemsize;
wmemsize = max_t(int, nx842_get_workmem_size(), LZO1X_MEM_COMPRESS);
ctx->nx842_wmem = kmalloc(wmemsize, GFP_NOFS);
if (!ctx->nx842_wmem)
return -ENOMEM;
return 0;
}
static void nx842_exit(struct crypto_tfm *tfm)
{
struct nx842_ctx *ctx = crypto_tfm_ctx(tfm);
kfree(ctx->nx842_wmem);
}
static void nx842_reset_uselzo(unsigned long data)
{
nx842_uselzo = 0;
}
static DEFINE_TIMER(failover_timer, nx842_reset_uselzo, 0, 0);
static int nx842_crypto_compress(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
struct nx842_ctx *ctx = crypto_tfm_ctx(tfm);
struct nx842_crypto_header *hdr;
unsigned int tmp_len = *dlen;
size_t lzodlen; /* needed for lzo */
int err;
*dlen = 0;
hdr = (struct nx842_crypto_header *)dst;
hdr->sentinel = NX842_SENTINEL; /* debug */
dst += sizeof(struct nx842_crypto_header);
tmp_len -= sizeof(struct nx842_crypto_header);
lzodlen = tmp_len;
if (likely(!nx842_uselzo)) {
err = nx842_compress(src, slen, dst, &tmp_len, ctx->nx842_wmem);
if (likely(!err)) {
hdr->type = NX842_CRYPTO_TYPE_842;
*dlen = tmp_len + sizeof(struct nx842_crypto_header);
return 0;
}
/* hardware failed */
nx842_uselzo = 1;
/* set timer to check for hardware again in 1 second */
mod_timer(&failover_timer, jiffies + msecs_to_jiffies(1000));
}
/* no hardware, use lzo */
err = lzo1x_1_compress(src, slen, dst, &lzodlen, ctx->nx842_wmem);
if (err != LZO_E_OK)
return -EINVAL;
hdr->type = NX842_CRYPTO_TYPE_LZO;
*dlen = lzodlen + sizeof(struct nx842_crypto_header);
return 0;
}
static int nx842_crypto_decompress(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
struct nx842_ctx *ctx = crypto_tfm_ctx(tfm);
struct nx842_crypto_header *hdr;
unsigned int tmp_len = *dlen;
size_t lzodlen; /* needed for lzo */
int err;
*dlen = 0;
hdr = (struct nx842_crypto_header *)src;
if (unlikely(hdr->sentinel != NX842_SENTINEL))
return -EINVAL;
src += sizeof(struct nx842_crypto_header);
slen -= sizeof(struct nx842_crypto_header);
if (likely(hdr->type == NX842_CRYPTO_TYPE_842)) {
err = nx842_decompress(src, slen, dst, &tmp_len,
ctx->nx842_wmem);
if (err)
return -EINVAL;
*dlen = tmp_len;
} else if (hdr->type == NX842_CRYPTO_TYPE_LZO) {
lzodlen = tmp_len;
err = lzo1x_decompress_safe(src, slen, dst, &lzodlen);
if (err != LZO_E_OK)
return -EINVAL;
*dlen = lzodlen;
} else
return -EINVAL;
return 0;
}
static struct crypto_alg alg = {
.cra_name = "842",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_ctxsize = sizeof(struct nx842_ctx),
.cra_module = THIS_MODULE,
.cra_init = nx842_init,
.cra_exit = nx842_exit,
.cra_u = { .compress = {
.coa_compress = nx842_crypto_compress,
.coa_decompress = nx842_crypto_decompress } }
};
static int __init nx842_mod_init(void)
{
del_timer(&failover_timer);
return crypto_register_alg(&alg);
}
static void __exit nx842_mod_exit(void)
{
crypto_unregister_alg(&alg);
}
module_init(nx842_mod_init);
module_exit(nx842_mod_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("842 Compression Algorithm");

View File

@ -460,6 +460,15 @@ config CRYPTO_SHA1_SPARC64
SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
using sparc64 crypto instructions, when available.
config CRYPTO_SHA1_ARM
tristate "SHA1 digest algorithm (ARM-asm)"
depends on ARM
select CRYPTO_SHA1
select CRYPTO_HASH
help
SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
using optimized ARM assembler.
config CRYPTO_SHA256
tristate "SHA224 and SHA256 digest algorithm"
select CRYPTO_HASH
@ -609,6 +618,8 @@ config CRYPTO_AES_NI_INTEL
select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86
select CRYPTO_ALGAPI
select CRYPTO_LRW
select CRYPTO_XTS
help
Use Intel AES-NI instructions for AES algorithm.
@ -661,6 +672,30 @@ config CRYPTO_AES_SPARC64
for some popular block cipher mode is supported too, including
ECB and CBC.
config CRYPTO_AES_ARM
tristate "AES cipher algorithms (ARM-asm)"
depends on ARM
select CRYPTO_ALGAPI
select CRYPTO_AES
help
Use optimized AES assembler routines for ARM platforms.
AES cipher algorithms (FIPS-197). AES uses the Rijndael
algorithm.
Rijndael appears to be consistently a very good performer in
both hardware and software across a wide range of computing
environments regardless of its use in feedback or non-feedback
modes. Its key setup time is excellent, and its key agility is
good. Rijndael's very low memory requirements make it very well
suited for restricted-space environments, in which it also
demonstrates excellent performance. Rijndael's operations are
among the easiest to defend against power and timing attacks.
The AES specifies three key sizes: 128, 192 and 256 bits
See <http://csrc.nist.gov/encryption/aes/> for more information.
config CRYPTO_ANUBIS
tristate "Anubis cipher algorithm"
select CRYPTO_ALGAPI
@ -781,6 +816,20 @@ config CRYPTO_CAST5
The CAST5 encryption algorithm (synonymous with CAST-128) is
described in RFC2144.
config CRYPTO_CAST5_AVX_X86_64
tristate "CAST5 (CAST-128) cipher algorithm (x86_64/AVX)"
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86
select CRYPTO_CAST5
help
The CAST5 encryption algorithm (synonymous with CAST-128) is
described in RFC2144.
This module provides the Cast5 cipher algorithm that processes
sixteen blocks parallel using the AVX instruction set.
config CRYPTO_CAST6
tristate "CAST6 (CAST-256) cipher algorithm"
select CRYPTO_ALGAPI
@ -788,6 +837,23 @@ config CRYPTO_CAST6
The CAST6 encryption algorithm (synonymous with CAST-256) is
described in RFC2612.
config CRYPTO_CAST6_AVX_X86_64
tristate "CAST6 (CAST-256) cipher algorithm (x86_64/AVX)"
depends on X86 && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_CAST6
select CRYPTO_LRW
select CRYPTO_XTS
help
The CAST6 encryption algorithm (synonymous with CAST-256) is
described in RFC2612.
This module provides the Cast6 cipher algorithm that processes
eight blocks parallel using the AVX instruction set.
config CRYPTO_DES
tristate "DES and Triple DES EDE cipher algorithms"
select CRYPTO_ALGAPI
@ -1106,6 +1172,15 @@ config CRYPTO_LZO
help
This is the LZO algorithm.
config CRYPTO_842
tristate "842 compression algorithm"
depends on CRYPTO_DEV_NX_COMPRESS
# 842 uses lzo if the hardware becomes unavailable
select LZO_COMPRESS
select LZO_DECOMPRESS
help
This is the 842 algorithm.
comment "Random Number Generation"
config CRYPTO_ANSI_CPRNG

View File

@ -68,8 +68,8 @@ obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
obj-$(CONFIG_CRYPTO_CAST5) += cast5.o
obj-$(CONFIG_CRYPTO_CAST6) += cast6.o
obj-$(CONFIG_CRYPTO_CAST5) += cast5_generic.o
obj-$(CONFIG_CRYPTO_CAST6) += cast6_generic.o
obj-$(CONFIG_CRYPTO_ARC4) += arc4.o
obj-$(CONFIG_CRYPTO_TEA) += tea.o
obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
@ -82,6 +82,7 @@ obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
obj-$(CONFIG_CRYPTO_LZO) += lzo.o
obj-$(CONFIG_CRYPTO_842) += 842.o
obj-$(CONFIG_CRYPTO_RNG2) += rng.o
obj-$(CONFIG_CRYPTO_RNG2) += krng.o
obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o

View File

@ -1448,7 +1448,6 @@ static struct crypto_alg aes_alg = {
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,

View File

@ -382,26 +382,6 @@ static int cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
return 0;
}
static struct crypto_alg rng_alg = {
.cra_name = "stdrng",
.cra_driver_name = "ansi_cprng",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_RNG,
.cra_ctxsize = sizeof(struct prng_context),
.cra_type = &crypto_rng_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(rng_alg.cra_list),
.cra_init = cprng_init,
.cra_exit = cprng_exit,
.cra_u = {
.rng = {
.rng_make_random = cprng_get_random,
.rng_reset = cprng_reset,
.seedsize = DEFAULT_PRNG_KSZ + 2*DEFAULT_BLK_SZ,
}
}
};
#ifdef CONFIG_CRYPTO_FIPS
static int fips_cprng_get_random(struct crypto_rng *tfm, u8 *rdata,
unsigned int dlen)
@ -438,8 +418,27 @@ static int fips_cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
out:
return rc;
}
#endif
static struct crypto_alg fips_rng_alg = {
static struct crypto_alg rng_algs[] = { {
.cra_name = "stdrng",
.cra_driver_name = "ansi_cprng",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_RNG,
.cra_ctxsize = sizeof(struct prng_context),
.cra_type = &crypto_rng_type,
.cra_module = THIS_MODULE,
.cra_init = cprng_init,
.cra_exit = cprng_exit,
.cra_u = {
.rng = {
.rng_make_random = cprng_get_random,
.rng_reset = cprng_reset,
.seedsize = DEFAULT_PRNG_KSZ + 2*DEFAULT_BLK_SZ,
}
}
#ifdef CONFIG_CRYPTO_FIPS
}, {
.cra_name = "fips(ansi_cprng)",
.cra_driver_name = "fips_ansi_cprng",
.cra_priority = 300,
@ -447,7 +446,6 @@ static struct crypto_alg fips_rng_alg = {
.cra_ctxsize = sizeof(struct prng_context),
.cra_type = &crypto_rng_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(rng_alg.cra_list),
.cra_init = cprng_init,
.cra_exit = cprng_exit,
.cra_u = {
@ -457,33 +455,18 @@ static struct crypto_alg fips_rng_alg = {
.seedsize = DEFAULT_PRNG_KSZ + 2*DEFAULT_BLK_SZ,
}
}
};
#endif
} };
/* Module initalization */
static int __init prng_mod_init(void)
{
int rc = 0;
rc = crypto_register_alg(&rng_alg);
#ifdef CONFIG_CRYPTO_FIPS
if (rc)
goto out;
rc = crypto_register_alg(&fips_rng_alg);
out:
#endif
return rc;
return crypto_register_algs(rng_algs, ARRAY_SIZE(rng_algs));
}
static void __exit prng_mod_fini(void)
{
crypto_unregister_alg(&rng_alg);
#ifdef CONFIG_CRYPTO_FIPS
crypto_unregister_alg(&fips_rng_alg);
#endif
return;
crypto_unregister_algs(rng_algs, ARRAY_SIZE(rng_algs));
}
MODULE_LICENSE("GPL");

View File

@ -678,7 +678,6 @@ static struct crypto_alg anubis_alg = {
.cra_ctxsize = sizeof (struct anubis_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(anubis_alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = ANUBIS_MIN_KEY_SIZE,
.cia_max_keysize = ANUBIS_MAX_KEY_SIZE,

View File

@ -115,7 +115,6 @@ static struct crypto_alg alg = {
.cra_ctxsize = sizeof(struct bf_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = BF_MIN_KEY_SIZE,
.cia_max_keysize = BF_MAX_KEY_SIZE,

View File

@ -1072,7 +1072,6 @@ static struct crypto_alg camellia_alg = {
.cra_ctxsize = sizeof(struct camellia_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(camellia_alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = CAMELLIA_MIN_KEY_SIZE,

View File

@ -4,8 +4,8 @@
* Derived from GnuPG implementation of cast5.
*
* Major Changes.
* Complete conformance to rfc2144.
* Supports key size from 40 to 128 bits.
* Complete conformance to rfc2144.
* Supports key size from 40 to 128 bits.
*
* Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
* Copyright (C) 2003 Kartikey Mahendra Bhatt <kartik_me@hotmail.com>.
@ -28,19 +28,10 @@
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#define CAST5_BLOCK_SIZE 8
#define CAST5_MIN_KEY_SIZE 5
#define CAST5_MAX_KEY_SIZE 16
struct cast5_ctx {
u32 Km[16];
u8 Kr[16];
int rr; /* rr?number of rounds = 16:number of rounds = 12; (rfc 2144) */
};
#include <crypto/cast5.h>
static const u32 s1[256] = {
const u32 cast5_s1[256] = {
0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f,
0x9c004dd3, 0x6003e540, 0xcf9fc949,
0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0,
@ -106,7 +97,8 @@ static const u32 s1[256] = {
0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c,
0x5ac9f049, 0xdd8f0f00, 0x5c8165bf
};
static const u32 s2[256] = {
EXPORT_SYMBOL_GPL(cast5_s1);
const u32 cast5_s2[256] = {
0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a,
0xeec5207a, 0x55889c94, 0x72fc0651,
0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef,
@ -172,7 +164,8 @@ static const u32 s2[256] = {
0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539,
0x73bfbe70, 0x83877605, 0x4523ecf1
};
static const u32 s3[256] = {
EXPORT_SYMBOL_GPL(cast5_s2);
const u32 cast5_s3[256] = {
0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff,
0x369fe44b, 0x8c1fc644, 0xaececa90,
0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806,
@ -238,7 +231,8 @@ static const u32 s3[256] = {
0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636,
0xa133c501, 0xe9d3531c, 0xee353783
};
static const u32 s4[256] = {
EXPORT_SYMBOL_GPL(cast5_s3);
const u32 cast5_s4[256] = {
0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb,
0x64ad8c57, 0x85510443, 0xfa020ed1,
0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43,
@ -304,6 +298,7 @@ static const u32 s4[256] = {
0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0,
0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2
};
EXPORT_SYMBOL_GPL(cast5_s4);
static const u32 s5[256] = {
0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff,
0x1dd358f5, 0x44dd9d44, 0x1731167f,
@ -569,17 +564,21 @@ static const u32 sb8[256] = {
0xeaee6801, 0x8db2a283, 0xea8bf59e
};
#define s1 cast5_s1
#define s2 cast5_s2
#define s3 cast5_s3
#define s4 cast5_s4
#define F1(D, m, r) ((I = ((m) + (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]))
(((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]))
#define F2(D, m, r) ((I = ((m) ^ (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]))
(((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]))
#define F3(D, m, r) ((I = ((m) - (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]))
(((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]))
static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
void __cast5_encrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf)
{
struct cast5_ctx *c = crypto_tfm_ctx(tfm);
const __be32 *src = (const __be32 *)inbuf;
__be32 *dst = (__be32 *)outbuf;
u32 l, r, t;
@ -628,10 +627,15 @@ static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
dst[0] = cpu_to_be32(r);
dst[1] = cpu_to_be32(l);
}
EXPORT_SYMBOL_GPL(__cast5_encrypt);
static void cast5_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
{
__cast5_encrypt(crypto_tfm_ctx(tfm), outbuf, inbuf);
}
void __cast5_decrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf)
{
struct cast5_ctx *c = crypto_tfm_ctx(tfm);
const __be32 *src = (const __be32 *)inbuf;
__be32 *dst = (__be32 *)outbuf;
u32 l, r, t;
@ -667,6 +671,12 @@ static void cast5_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
dst[0] = cpu_to_be32(r);
dst[1] = cpu_to_be32(l);
}
EXPORT_SYMBOL_GPL(__cast5_decrypt);
static void cast5_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
{
__cast5_decrypt(crypto_tfm_ctx(tfm), outbuf, inbuf);
}
static void key_schedule(u32 *x, u32 *z, u32 *k)
{
@ -743,7 +753,7 @@ static void key_schedule(u32 *x, u32 *z, u32 *k)
}
static int cast5_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned key_len)
int cast5_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len)
{
struct cast5_ctx *c = crypto_tfm_ctx(tfm);
int i;
@ -771,20 +781,22 @@ static int cast5_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned key_len)
c->Kr[i] = k[i] & 0x1f;
return 0;
}
EXPORT_SYMBOL_GPL(cast5_setkey);
static struct crypto_alg alg = {
.cra_name = "cast5",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = CAST5_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cast5_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = {
.cra_name = "cast5",
.cra_driver_name = "cast5-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = CAST5_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cast5_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = CAST5_MIN_KEY_SIZE,
.cia_max_keysize = CAST5_MAX_KEY_SIZE,
.cia_setkey = cast5_setkey,
.cia_setkey = cast5_setkey,
.cia_encrypt = cast5_encrypt,
.cia_decrypt = cast5_decrypt
}
@ -806,4 +818,4 @@ module_exit(cast5_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cast5 Cipher Algorithm");
MODULE_ALIAS("cast5");

View File

@ -25,24 +25,21 @@
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <crypto/cast6.h>
#define CAST6_BLOCK_SIZE 16
#define CAST6_MIN_KEY_SIZE 16
#define CAST6_MAX_KEY_SIZE 32
struct cast6_ctx {
u32 Km[12][4];
u8 Kr[12][4];
};
#define s1 cast6_s1
#define s2 cast6_s2
#define s3 cast6_s3
#define s4 cast6_s4
#define F1(D, r, m) ((I = ((m) + (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]))
(((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]))
#define F2(D, r, m) ((I = ((m) ^ (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]))
(((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]))
#define F3(D, r, m) ((I = ((m) - (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]))
(((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]))
static const u32 s1[256] = {
const u32 cast6_s1[256] = {
0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f,
0x9c004dd3, 0x6003e540, 0xcf9fc949,
0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0,
@ -108,8 +105,9 @@ static const u32 s1[256] = {
0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c,
0x5ac9f049, 0xdd8f0f00, 0x5c8165bf
};
EXPORT_SYMBOL_GPL(cast6_s1);
static const u32 s2[256] = {
const u32 cast6_s2[256] = {
0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a,
0xeec5207a, 0x55889c94, 0x72fc0651,
0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef,
@ -175,8 +173,9 @@ static const u32 s2[256] = {
0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539,
0x73bfbe70, 0x83877605, 0x4523ecf1
};
EXPORT_SYMBOL_GPL(cast6_s2);
static const u32 s3[256] = {
const u32 cast6_s3[256] = {
0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff,
0x369fe44b, 0x8c1fc644, 0xaececa90,
0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806,
@ -242,8 +241,9 @@ static const u32 s3[256] = {
0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636,
0xa133c501, 0xe9d3531c, 0xee353783
};
EXPORT_SYMBOL_GPL(cast6_s3);
static const u32 s4[256] = {
const u32 cast6_s4[256] = {
0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb,
0x64ad8c57, 0x85510443, 0xfa020ed1,
0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43,
@ -309,6 +309,7 @@ static const u32 s4[256] = {
0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0,
0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2
};
EXPORT_SYMBOL_GPL(cast6_s4);
static const u32 Tm[24][8] = {
{ 0x5a827999, 0xc95c653a, 0x383650db, 0xa7103c7c, 0x15ea281d,
@ -369,7 +370,7 @@ static const u8 Tr[4][8] = {
};
/* forward octave */
static void W(u32 *key, unsigned int i)
static inline void W(u32 *key, unsigned int i)
{
u32 I;
key[6] ^= F1(key[7], Tr[i % 4][0], Tm[i][0]);
@ -382,14 +383,12 @@ static void W(u32 *key, unsigned int i)
key[7] ^= F2(key[0], Tr[i % 4][7], Tm[i][7]);
}
static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned key_len)
int __cast6_setkey(struct cast6_ctx *c, const u8 *in_key,
unsigned key_len, u32 *flags)
{
int i;
u32 key[8];
__be32 p_key[8]; /* padded key */
struct cast6_ctx *c = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
if (key_len % 4 != 0) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
@ -425,9 +424,17 @@ static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key,
return 0;
}
EXPORT_SYMBOL_GPL(__cast6_setkey);
int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
{
return __cast6_setkey(crypto_tfm_ctx(tfm), key, keylen,
&tfm->crt_flags);
}
EXPORT_SYMBOL_GPL(cast6_setkey);
/*forward quad round*/
static void Q(u32 *block, u8 *Kr, u32 *Km)
static inline void Q(u32 *block, u8 *Kr, u32 *Km)
{
u32 I;
block[2] ^= F1(block[3], Kr[0], Km[0]);
@ -437,7 +444,7 @@ static void Q(u32 *block, u8 *Kr, u32 *Km)
}
/*reverse quad round*/
static void QBAR(u32 *block, u8 *Kr, u32 *Km)
static inline void QBAR(u32 *block, u8 *Kr, u32 *Km)
{
u32 I;
block[3] ^= F1(block[0], Kr[3], Km[3]);
@ -446,9 +453,8 @@ static void QBAR(u32 *block, u8 *Kr, u32 *Km)
block[2] ^= F1(block[3], Kr[0], Km[0]);
}
static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
void __cast6_encrypt(struct cast6_ctx *c, u8 *outbuf, const u8 *inbuf)
{
struct cast6_ctx *c = crypto_tfm_ctx(tfm);
const __be32 *src = (const __be32 *)inbuf;
__be32 *dst = (__be32 *)outbuf;
u32 block[4];
@ -478,10 +484,15 @@ static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
dst[2] = cpu_to_be32(block[2]);
dst[3] = cpu_to_be32(block[3]);
}
EXPORT_SYMBOL_GPL(__cast6_encrypt);
static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
{
__cast6_encrypt(crypto_tfm_ctx(tfm), outbuf, inbuf);
}
void __cast6_decrypt(struct cast6_ctx *c, u8 *outbuf, const u8 *inbuf)
{
struct cast6_ctx *c = crypto_tfm_ctx(tfm);
const __be32 *src = (const __be32 *)inbuf;
__be32 *dst = (__be32 *)outbuf;
u32 block[4];
@ -511,15 +522,22 @@ static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
dst[2] = cpu_to_be32(block[2]);
dst[3] = cpu_to_be32(block[3]);
}
EXPORT_SYMBOL_GPL(__cast6_decrypt);
static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
{
__cast6_decrypt(crypto_tfm_ctx(tfm), outbuf, inbuf);
}
static struct crypto_alg alg = {
.cra_name = "cast6",
.cra_driver_name = "cast6-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = CAST6_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cast6_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = CAST6_MIN_KEY_SIZE,
@ -545,3 +563,4 @@ module_exit(cast6_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cast6 Cipher Algorithm");
MODULE_ALIAS("cast6");

View File

@ -94,18 +94,6 @@ static int skcipher_null_crypt(struct blkcipher_desc *desc,
return err;
}
static struct crypto_alg compress_null = {
.cra_name = "compress_null",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_blocksize = NULL_BLOCK_SIZE,
.cra_ctxsize = 0,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(compress_null.cra_list),
.cra_u = { .compress = {
.coa_compress = null_compress,
.coa_decompress = null_compress } }
};
static struct shash_alg digest_null = {
.digestsize = NULL_DIGEST_SIZE,
.setkey = null_hash_setkey,
@ -122,22 +110,19 @@ static struct shash_alg digest_null = {
}
};
static struct crypto_alg cipher_null = {
static struct crypto_alg null_algs[3] = { {
.cra_name = "cipher_null",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = NULL_BLOCK_SIZE,
.cra_ctxsize = 0,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(cipher_null.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = NULL_KEY_SIZE,
.cia_max_keysize = NULL_KEY_SIZE,
.cia_setkey = null_setkey,
.cia_encrypt = null_crypt,
.cia_decrypt = null_crypt } }
};
static struct crypto_alg skcipher_null = {
}, {
.cra_name = "ecb(cipher_null)",
.cra_driver_name = "ecb-cipher_null",
.cra_priority = 100,
@ -146,7 +131,6 @@ static struct crypto_alg skcipher_null = {
.cra_type = &crypto_blkcipher_type,
.cra_ctxsize = 0,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(skcipher_null.cra_list),
.cra_u = { .blkcipher = {
.min_keysize = NULL_KEY_SIZE,
.max_keysize = NULL_KEY_SIZE,
@ -154,7 +138,16 @@ static struct crypto_alg skcipher_null = {
.setkey = null_setkey,
.encrypt = skcipher_null_crypt,
.decrypt = skcipher_null_crypt } }
};
}, {
.cra_name = "compress_null",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_blocksize = NULL_BLOCK_SIZE,
.cra_ctxsize = 0,
.cra_module = THIS_MODULE,
.cra_u = { .compress = {
.coa_compress = null_compress,
.coa_decompress = null_compress } }
} };
MODULE_ALIAS("compress_null");
MODULE_ALIAS("digest_null");
@ -164,40 +157,26 @@ static int __init crypto_null_mod_init(void)
{
int ret = 0;
ret = crypto_register_alg(&cipher_null);
ret = crypto_register_algs(null_algs, ARRAY_SIZE(null_algs));
if (ret < 0)
goto out;
ret = crypto_register_alg(&skcipher_null);
if (ret < 0)
goto out_unregister_cipher;
ret = crypto_register_shash(&digest_null);
if (ret < 0)
goto out_unregister_skcipher;
goto out_unregister_algs;
ret = crypto_register_alg(&compress_null);
if (ret < 0)
goto out_unregister_digest;
return 0;
out_unregister_algs:
crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs));
out:
return ret;
out_unregister_digest:
crypto_unregister_shash(&digest_null);
out_unregister_skcipher:
crypto_unregister_alg(&skcipher_null);
out_unregister_cipher:
crypto_unregister_alg(&cipher_null);
goto out;
}
static void __exit crypto_null_mod_fini(void)
{
crypto_unregister_alg(&compress_null);
crypto_unregister_shash(&digest_null);
crypto_unregister_alg(&skcipher_null);
crypto_unregister_alg(&cipher_null);
crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs));
}
module_init(crypto_null_mod_init);

View File

@ -30,7 +30,7 @@
#include "internal.h"
DEFINE_MUTEX(crypto_cfg_mutex);
static DEFINE_MUTEX(crypto_cfg_mutex);
/* The crypto netlink socket */
static struct sock *crypto_nlsk;

View File

@ -199,7 +199,6 @@ static struct crypto_alg alg = {
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_ctxsize = sizeof(struct deflate_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_init = deflate_init,
.cra_exit = deflate_exit,
.cra_u = { .compress = {

View File

@ -943,59 +943,44 @@ static void des3_ede_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
d[1] = cpu_to_le32(L);
}
static struct crypto_alg des_alg = {
static struct crypto_alg des_algs[2] = { {
.cra_name = "des",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct des_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
.cra_list = LIST_HEAD_INIT(des_alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = DES_KEY_SIZE,
.cia_max_keysize = DES_KEY_SIZE,
.cia_setkey = des_setkey,
.cia_encrypt = des_encrypt,
.cia_decrypt = des_decrypt } }
};
static struct crypto_alg des3_ede_alg = {
}, {
.cra_name = "des3_ede",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct des3_ede_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
.cra_list = LIST_HEAD_INIT(des3_ede_alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = DES3_EDE_KEY_SIZE,
.cia_max_keysize = DES3_EDE_KEY_SIZE,
.cia_setkey = des3_ede_setkey,
.cia_encrypt = des3_ede_encrypt,
.cia_decrypt = des3_ede_decrypt } }
};
} };
MODULE_ALIAS("des3_ede");
static int __init des_generic_mod_init(void)
{
int ret = 0;
ret = crypto_register_alg(&des_alg);
if (ret < 0)
goto out;
ret = crypto_register_alg(&des3_ede_alg);
if (ret < 0)
crypto_unregister_alg(&des_alg);
out:
return ret;
return crypto_register_algs(des_algs, ARRAY_SIZE(des_algs));
}
static void __exit des_generic_mod_fini(void)
{
crypto_unregister_alg(&des3_ede_alg);
crypto_unregister_alg(&des_alg);
crypto_unregister_algs(des_algs, ARRAY_SIZE(des_algs));
}
module_init(des_generic_mod_init);

View File

@ -396,7 +396,6 @@ static struct crypto_alg fcrypt_alg = {
.cra_ctxsize = sizeof(struct fcrypt_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
.cra_list = LIST_HEAD_INIT(fcrypt_alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = 8,
.cia_max_keysize = 8,

View File

@ -153,7 +153,6 @@ static struct shash_alg ghash_alg = {
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ghash_alg.base.cra_list),
.cra_exit = ghash_exit_tfm,
},
};

View File

@ -853,7 +853,6 @@ static struct crypto_alg khazad_alg = {
.cra_ctxsize = sizeof (struct khazad_ctx),
.cra_alignmask = 7,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(khazad_alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = KHAZAD_KEY_SIZE,
.cia_max_keysize = KHAZAD_KEY_SIZE,

View File

@ -35,7 +35,6 @@ static struct crypto_alg krng_alg = {
.cra_ctxsize = 0,
.cra_type = &crypto_rng_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(krng_alg.cra_list),
.cra_u = {
.rng = {
.rng_make_random = krng_get_random,

View File

@ -81,7 +81,6 @@ static struct crypto_alg alg = {
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_ctxsize = sizeof(struct lzo_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_init = lzo_init,
.cra_exit = lzo_exit,
.cra_u = { .compress = {

View File

@ -221,7 +221,6 @@ static struct crypto_alg alg = {
.cra_ctxsize = sizeof(struct salsa20_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = {
.blkcipher = {
.setkey = setkey,

View File

@ -449,7 +449,6 @@ static struct crypto_alg seed_alg = {
.cra_ctxsize = sizeof(struct seed_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(seed_alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = SEED_KEY_SIZE,

View File

@ -567,24 +567,6 @@ static void serpent_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
__serpent_decrypt(ctx, dst, src);
}
static struct crypto_alg serpent_alg = {
.cra_name = "serpent",
.cra_driver_name = "serpent-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = SERPENT_MIN_KEY_SIZE,
.cia_max_keysize = SERPENT_MAX_KEY_SIZE,
.cia_setkey = serpent_setkey,
.cia_encrypt = serpent_encrypt,
.cia_decrypt = serpent_decrypt } }
};
static int tnepres_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
@ -637,41 +619,44 @@ static void tnepres_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
d[3] = swab32(rd[0]);
}
static struct crypto_alg tnepres_alg = {
static struct crypto_alg srp_algs[2] = { {
.cra_name = "serpent",
.cra_driver_name = "serpent-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = SERPENT_MIN_KEY_SIZE,
.cia_max_keysize = SERPENT_MAX_KEY_SIZE,
.cia_setkey = serpent_setkey,
.cia_encrypt = serpent_encrypt,
.cia_decrypt = serpent_decrypt } }
}, {
.cra_name = "tnepres",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = SERPENT_MIN_KEY_SIZE,
.cia_max_keysize = SERPENT_MAX_KEY_SIZE,
.cia_setkey = tnepres_setkey,
.cia_encrypt = tnepres_encrypt,
.cia_decrypt = tnepres_decrypt } }
};
} };
static int __init serpent_mod_init(void)
{
int ret = crypto_register_alg(&serpent_alg);
if (ret)
return ret;
ret = crypto_register_alg(&tnepres_alg);
if (ret)
crypto_unregister_alg(&serpent_alg);
return ret;
return crypto_register_algs(srp_algs, ARRAY_SIZE(srp_algs));
}
static void __exit serpent_mod_fini(void)
{
crypto_unregister_alg(&tnepres_alg);
crypto_unregister_alg(&serpent_alg);
crypto_unregister_algs(srp_algs, ARRAY_SIZE(srp_algs));
}
module_init(serpent_mod_init);

View File

@ -336,7 +336,7 @@ static int sha256_import(struct shash_desc *desc, const void *in)
return 0;
}
static struct shash_alg sha256 = {
static struct shash_alg sha256_algs[2] = { {
.digestsize = SHA256_DIGEST_SIZE,
.init = sha256_init,
.update = sha256_update,
@ -352,9 +352,7 @@ static struct shash_alg sha256 = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static struct shash_alg sha224 = {
}, {
.digestsize = SHA224_DIGEST_SIZE,
.init = sha224_init,
.update = sha256_update,
@ -367,29 +365,16 @@ static struct shash_alg sha224 = {
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
} };
static int __init sha256_generic_mod_init(void)
{
int ret = 0;
ret = crypto_register_shash(&sha224);
if (ret < 0)
return ret;
ret = crypto_register_shash(&sha256);
if (ret < 0)
crypto_unregister_shash(&sha224);
return ret;
return crypto_register_shashes(sha256_algs, ARRAY_SIZE(sha256_algs));
}
static void __exit sha256_generic_mod_fini(void)
{
crypto_unregister_shash(&sha224);
crypto_unregister_shash(&sha256);
crypto_unregister_shashes(sha256_algs, ARRAY_SIZE(sha256_algs));
}
module_init(sha256_generic_mod_init);

View File

@ -242,7 +242,7 @@ static int sha384_final(struct shash_desc *desc, u8 *hash)
return 0;
}
static struct shash_alg sha512 = {
static struct shash_alg sha512_algs[2] = { {
.digestsize = SHA512_DIGEST_SIZE,
.init = sha512_init,
.update = sha512_update,
@ -254,9 +254,7 @@ static struct shash_alg sha512 = {
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static struct shash_alg sha384 = {
}, {
.digestsize = SHA384_DIGEST_SIZE,
.init = sha384_init,
.update = sha512_update,
@ -268,24 +266,16 @@ static struct shash_alg sha384 = {
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
} };
static int __init sha512_generic_mod_init(void)
{
int ret = 0;
if ((ret = crypto_register_shash(&sha384)) < 0)
goto out;
if ((ret = crypto_register_shash(&sha512)) < 0)
crypto_unregister_shash(&sha384);
out:
return ret;
return crypto_register_shashes(sha512_algs, ARRAY_SIZE(sha512_algs));
}
static void __exit sha512_generic_mod_fini(void)
{
crypto_unregister_shash(&sha384);
crypto_unregister_shash(&sha512);
crypto_unregister_shashes(sha512_algs, ARRAY_SIZE(sha512_algs));
}
module_init(sha512_generic_mod_init);

View File

@ -629,6 +629,42 @@ int crypto_unregister_shash(struct shash_alg *alg)
}
EXPORT_SYMBOL_GPL(crypto_unregister_shash);
int crypto_register_shashes(struct shash_alg *algs, int count)
{
int i, ret;
for (i = 0; i < count; i++) {
ret = crypto_register_shash(&algs[i]);
if (ret)
goto err;
}
return 0;
err:
for (--i; i >= 0; --i)
crypto_unregister_shash(&algs[i]);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_register_shashes);
int crypto_unregister_shashes(struct shash_alg *algs, int count)
{
int i, ret;
for (i = count - 1; i >= 0; --i) {
ret = crypto_unregister_shash(&algs[i]);
if (ret)
pr_err("Failed to unregister %s %s: %d\n",
algs[i].base.cra_driver_name,
algs[i].base.cra_name, ret);
}
return 0;
}
EXPORT_SYMBOL_GPL(crypto_unregister_shashes);
int shash_register_instance(struct crypto_template *tmpl,
struct shash_instance *inst)
{

View File

@ -97,7 +97,6 @@ static int test_cipher_cycles(struct blkcipher_desc *desc, int enc,
int ret = 0;
int i;
local_bh_disable();
local_irq_disable();
/* Warm-up run. */
@ -130,7 +129,6 @@ static int test_cipher_cycles(struct blkcipher_desc *desc, int enc,
out:
local_irq_enable();
local_bh_enable();
if (ret == 0)
printk("1 operation in %lu cycles (%d bytes)\n",
@ -300,7 +298,6 @@ static int test_hash_cycles_digest(struct hash_desc *desc,
int i;
int ret;
local_bh_disable();
local_irq_disable();
/* Warm-up run. */
@ -327,7 +324,6 @@ static int test_hash_cycles_digest(struct hash_desc *desc,
out:
local_irq_enable();
local_bh_enable();
if (ret)
return ret;
@ -348,7 +344,6 @@ static int test_hash_cycles(struct hash_desc *desc, struct scatterlist *sg,
if (plen == blen)
return test_hash_cycles_digest(desc, sg, blen, out);
local_bh_disable();
local_irq_disable();
/* Warm-up run. */
@ -391,7 +386,6 @@ static int test_hash_cycles(struct hash_desc *desc, struct scatterlist *sg,
out:
local_irq_enable();
local_bh_enable();
if (ret)
return ret;
@ -1037,10 +1031,16 @@ static int do_test(int m)
case 14:
ret += tcrypt_test("ecb(cast5)");
ret += tcrypt_test("cbc(cast5)");
ret += tcrypt_test("ctr(cast5)");
break;
case 15:
ret += tcrypt_test("ecb(cast6)");
ret += tcrypt_test("cbc(cast6)");
ret += tcrypt_test("ctr(cast6)");
ret += tcrypt_test("lrw(cast6)");
ret += tcrypt_test("xts(cast6)");
break;
case 16:
@ -1112,6 +1112,9 @@ static int do_test(int m)
case 32:
ret += tcrypt_test("ecb(camellia)");
ret += tcrypt_test("cbc(camellia)");
ret += tcrypt_test("ctr(camellia)");
ret += tcrypt_test("lrw(camellia)");
ret += tcrypt_test("xts(camellia)");
break;
case 33:
ret += tcrypt_test("sha224");
@ -1165,6 +1168,10 @@ static int do_test(int m)
ret += tcrypt_test("rfc4309(ccm(aes))");
break;
case 46:
ret += tcrypt_test("ghash");
break;
case 100:
ret += tcrypt_test("hmac(md5)");
break;
@ -1359,6 +1366,44 @@ static int do_test(int m)
speed_template_8);
break;
case 209:
test_cipher_speed("ecb(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16);
test_cipher_speed("ecb(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16);
test_cipher_speed("cbc(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16);
test_cipher_speed("cbc(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16);
test_cipher_speed("ctr(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16);
test_cipher_speed("ctr(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16);
break;
case 210:
test_cipher_speed("ecb(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("ecb(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("cbc(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("cbc(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("ctr(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("ctr(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("lrw(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_32_48);
test_cipher_speed("lrw(cast6)", DECRYPT, sec, NULL, 0,
speed_template_32_48);
test_cipher_speed("xts(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_32_64);
test_cipher_speed("xts(cast6)", DECRYPT, sec, NULL, 0,
speed_template_32_64);
break;
case 300:
/* fall through */
@ -1639,6 +1684,44 @@ static int do_test(int m)
speed_template_8);
break;
case 506:
test_acipher_speed("ecb(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16);
test_acipher_speed("ecb(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16);
test_acipher_speed("cbc(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16);
test_acipher_speed("cbc(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16);
test_acipher_speed("ctr(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16);
test_acipher_speed("ctr(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16);
break;
case 507:
test_acipher_speed("ecb(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ecb(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("cbc(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("cbc(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ctr(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ctr(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("lrw(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_32_48);
test_acipher_speed("lrw(cast6)", DECRYPT, sec, NULL, 0,
speed_template_32_48);
test_acipher_speed("xts(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_32_64);
test_acipher_speed("xts(cast6)", DECRYPT, sec, NULL, 0,
speed_template_32_64);
break;
case 1000:
test_available();
break;

View File

@ -47,6 +47,7 @@ static struct cipher_speed_template des3_speed_template[] = {
*/
static u8 speed_template_8[] = {8, 0};
static u8 speed_template_24[] = {24, 0};
static u8 speed_template_8_16[] = {8, 16, 0};
static u8 speed_template_8_32[] = {8, 32, 0};
static u8 speed_template_16_32[] = {16, 32, 0};
static u8 speed_template_16_24_32[] = {16, 24, 32, 0};

View File

@ -219,84 +219,55 @@ static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
out[1] = cpu_to_le32(z);
}
static struct crypto_alg tea_alg = {
static struct crypto_alg tea_algs[3] = { {
.cra_name = "tea",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = TEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct tea_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(tea_alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = TEA_KEY_SIZE,
.cia_max_keysize = TEA_KEY_SIZE,
.cia_setkey = tea_setkey,
.cia_encrypt = tea_encrypt,
.cia_decrypt = tea_decrypt } }
};
static struct crypto_alg xtea_alg = {
}, {
.cra_name = "xtea",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = XTEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct xtea_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(xtea_alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = XTEA_KEY_SIZE,
.cia_max_keysize = XTEA_KEY_SIZE,
.cia_setkey = xtea_setkey,
.cia_encrypt = xtea_encrypt,
.cia_decrypt = xtea_decrypt } }
};
static struct crypto_alg xeta_alg = {
}, {
.cra_name = "xeta",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = XTEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct xtea_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(xtea_alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = XTEA_KEY_SIZE,
.cia_max_keysize = XTEA_KEY_SIZE,
.cia_setkey = xtea_setkey,
.cia_encrypt = xeta_encrypt,
.cia_decrypt = xeta_decrypt } }
};
} };
static int __init tea_mod_init(void)
{
int ret = 0;
ret = crypto_register_alg(&tea_alg);
if (ret < 0)
goto out;
ret = crypto_register_alg(&xtea_alg);
if (ret < 0) {
crypto_unregister_alg(&tea_alg);
goto out;
}
ret = crypto_register_alg(&xeta_alg);
if (ret < 0) {
crypto_unregister_alg(&tea_alg);
crypto_unregister_alg(&xtea_alg);
goto out;
}
out:
return ret;
return crypto_register_algs(tea_algs, ARRAY_SIZE(tea_algs));
}
static void __exit tea_mod_fini(void)
{
crypto_unregister_alg(&tea_alg);
crypto_unregister_alg(&xtea_alg);
crypto_unregister_alg(&xeta_alg);
crypto_unregister_algs(tea_algs, ARRAY_SIZE(tea_algs));
}
MODULE_ALIAS("xtea");

View File

@ -358,8 +358,9 @@ out_nobuf:
return ret;
}
static int test_aead(struct crypto_aead *tfm, int enc,
struct aead_testvec *template, unsigned int tcount)
static int __test_aead(struct crypto_aead *tfm, int enc,
struct aead_testvec *template, unsigned int tcount,
const bool diff_dst)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
unsigned int i, j, k, n, temp;
@ -367,15 +368,18 @@ static int test_aead(struct crypto_aead *tfm, int enc,
char *q;
char *key;
struct aead_request *req;
struct scatterlist sg[8];
struct scatterlist asg[8];
const char *e;
struct scatterlist *sg;
struct scatterlist *asg;
struct scatterlist *sgout;
const char *e, *d;
struct tcrypt_result result;
unsigned int authsize;
void *input;
void *output;
void *assoc;
char iv[MAX_IVLEN];
char *xbuf[XBUFSIZE];
char *xoutbuf[XBUFSIZE];
char *axbuf[XBUFSIZE];
if (testmgr_alloc_buf(xbuf))
@ -383,6 +387,21 @@ static int test_aead(struct crypto_aead *tfm, int enc,
if (testmgr_alloc_buf(axbuf))
goto out_noaxbuf;
if (diff_dst && testmgr_alloc_buf(xoutbuf))
goto out_nooutbuf;
/* avoid "the frame size is larger than 1024 bytes" compiler warning */
sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 3 : 2), GFP_KERNEL);
if (!sg)
goto out_nosg;
asg = &sg[8];
sgout = &asg[8];
if (diff_dst)
d = "-ddst";
else
d = "";
if (enc == ENCRYPT)
e = "encryption";
else
@ -392,8 +411,8 @@ static int test_aead(struct crypto_aead *tfm, int enc,
req = aead_request_alloc(tfm, GFP_KERNEL);
if (!req) {
printk(KERN_ERR "alg: aead: Failed to allocate request for "
"%s\n", algo);
pr_err("alg: aead%s: Failed to allocate request for %s\n",
d, algo);
goto out;
}
@ -432,9 +451,8 @@ static int test_aead(struct crypto_aead *tfm, int enc,
ret = crypto_aead_setkey(tfm, key,
template[i].klen);
if (!ret == template[i].fail) {
printk(KERN_ERR "alg: aead: setkey failed on "
"test %d for %s: flags=%x\n", j, algo,
crypto_aead_get_flags(tfm));
pr_err("alg: aead%s: setkey failed on test %d for %s: flags=%x\n",
d, j, algo, crypto_aead_get_flags(tfm));
goto out;
} else if (ret)
continue;
@ -442,18 +460,26 @@ static int test_aead(struct crypto_aead *tfm, int enc,
authsize = abs(template[i].rlen - template[i].ilen);
ret = crypto_aead_setauthsize(tfm, authsize);
if (ret) {
printk(KERN_ERR "alg: aead: Failed to set "
"authsize to %u on test %d for %s\n",
authsize, j, algo);
pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
d, authsize, j, algo);
goto out;
}
sg_init_one(&sg[0], input,
template[i].ilen + (enc ? authsize : 0));
if (diff_dst) {
output = xoutbuf[0];
sg_init_one(&sgout[0], output,
template[i].ilen +
(enc ? authsize : 0));
} else {
output = input;
}
sg_init_one(&asg[0], assoc, template[i].alen);
aead_request_set_crypt(req, sg, sg,
aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
template[i].ilen, iv);
aead_request_set_assoc(req, asg, template[i].alen);
@ -466,10 +492,8 @@ static int test_aead(struct crypto_aead *tfm, int enc,
case 0:
if (template[i].novrfy) {
/* verification was supposed to fail */
printk(KERN_ERR "alg: aead: %s failed "
"on test %d for %s: ret was 0, "
"expected -EBADMSG\n",
e, j, algo);
pr_err("alg: aead%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
d, e, j, algo);
/* so really, we got a bad message */
ret = -EBADMSG;
goto out;
@ -489,15 +513,15 @@ static int test_aead(struct crypto_aead *tfm, int enc,
continue;
/* fall through */
default:
printk(KERN_ERR "alg: aead: %s failed on test "
"%d for %s: ret=%d\n", e, j, algo, -ret);
pr_err("alg: aead%s: %s failed on test %d for %s: ret=%d\n",
d, e, j, algo, -ret);
goto out;
}
q = input;
q = output;
if (memcmp(q, template[i].result, template[i].rlen)) {
printk(KERN_ERR "alg: aead: Test %d failed on "
"%s for %s\n", j, e, algo);
pr_err("alg: aead%s: Test %d failed on %s for %s\n",
d, j, e, algo);
hexdump(q, template[i].rlen);
ret = -EINVAL;
goto out;
@ -522,9 +546,8 @@ static int test_aead(struct crypto_aead *tfm, int enc,
ret = crypto_aead_setkey(tfm, key, template[i].klen);
if (!ret == template[i].fail) {
printk(KERN_ERR "alg: aead: setkey failed on "
"chunk test %d for %s: flags=%x\n", j,
algo, crypto_aead_get_flags(tfm));
pr_err("alg: aead%s: setkey failed on chunk test %d for %s: flags=%x\n",
d, j, algo, crypto_aead_get_flags(tfm));
goto out;
} else if (ret)
continue;
@ -533,6 +556,8 @@ static int test_aead(struct crypto_aead *tfm, int enc,
ret = -EINVAL;
sg_init_table(sg, template[i].np);
if (diff_dst)
sg_init_table(sgout, template[i].np);
for (k = 0, temp = 0; k < template[i].np; k++) {
if (WARN_ON(offset_in_page(IDX[k]) +
template[i].tap[k] > PAGE_SIZE))
@ -551,14 +576,26 @@ static int test_aead(struct crypto_aead *tfm, int enc,
q[n] = 0;
sg_set_buf(&sg[k], q, template[i].tap[k]);
if (diff_dst) {
q = xoutbuf[IDX[k] >> PAGE_SHIFT] +
offset_in_page(IDX[k]);
memset(q, 0, template[i].tap[k]);
if (offset_in_page(q) + n < PAGE_SIZE)
q[n] = 0;
sg_set_buf(&sgout[k], q,
template[i].tap[k]);
}
temp += template[i].tap[k];
}
ret = crypto_aead_setauthsize(tfm, authsize);
if (ret) {
printk(KERN_ERR "alg: aead: Failed to set "
"authsize to %u on chunk test %d for "
"%s\n", authsize, j, algo);
pr_err("alg: aead%s: Failed to set authsize to %u on chunk test %d for %s\n",
d, authsize, j, algo);
goto out;
}
@ -571,6 +608,9 @@ static int test_aead(struct crypto_aead *tfm, int enc,
}
sg[k - 1].length += authsize;
if (diff_dst)
sgout[k - 1].length += authsize;
}
sg_init_table(asg, template[i].anp);
@ -588,7 +628,7 @@ static int test_aead(struct crypto_aead *tfm, int enc,
temp += template[i].atap[k];
}
aead_request_set_crypt(req, sg, sg,
aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
template[i].ilen,
iv);
@ -602,10 +642,8 @@ static int test_aead(struct crypto_aead *tfm, int enc,
case 0:
if (template[i].novrfy) {
/* verification was supposed to fail */
printk(KERN_ERR "alg: aead: %s failed "
"on chunk test %d for %s: ret "
"was 0, expected -EBADMSG\n",
e, j, algo);
pr_err("alg: aead%s: %s failed on chunk test %d for %s: ret was 0, expected -EBADMSG\n",
d, e, j, algo);
/* so really, we got a bad message */
ret = -EBADMSG;
goto out;
@ -625,32 +663,35 @@ static int test_aead(struct crypto_aead *tfm, int enc,
continue;
/* fall through */
default:
printk(KERN_ERR "alg: aead: %s failed on "
"chunk test %d for %s: ret=%d\n", e, j,
algo, -ret);
pr_err("alg: aead%s: %s failed on chunk test %d for %s: ret=%d\n",
d, e, j, algo, -ret);
goto out;
}
ret = -EINVAL;
for (k = 0, temp = 0; k < template[i].np; k++) {
q = xbuf[IDX[k] >> PAGE_SHIFT] +
offset_in_page(IDX[k]);
if (diff_dst)
q = xoutbuf[IDX[k] >> PAGE_SHIFT] +
offset_in_page(IDX[k]);
else
q = xbuf[IDX[k] >> PAGE_SHIFT] +
offset_in_page(IDX[k]);
n = template[i].tap[k];
if (k == template[i].np - 1)
n += enc ? authsize : -authsize;
if (memcmp(q, template[i].result + temp, n)) {
printk(KERN_ERR "alg: aead: Chunk "
"test %d failed on %s at page "
"%u for %s\n", j, e, k, algo);
pr_err("alg: aead%s: Chunk test %d failed on %s at page %u for %s\n",
d, j, e, k, algo);
hexdump(q, n);
goto out;
}
q += n;
if (k == template[i].np - 1 && !enc) {
if (memcmp(q, template[i].input +
if (!diff_dst &&
memcmp(q, template[i].input +
temp + n, authsize))
n = authsize;
else
@ -661,11 +702,8 @@ static int test_aead(struct crypto_aead *tfm, int enc,
;
}
if (n) {
printk(KERN_ERR "alg: aead: Result "
"buffer corruption in chunk "
"test %d on %s at page %u for "
"%s: %u bytes:\n", j, e, k,
algo, n);
pr_err("alg: aead%s: Result buffer corruption in chunk test %d on %s at page %u for %s: %u bytes:\n",
d, j, e, k, algo, n);
hexdump(q, n);
goto out;
}
@ -679,6 +717,11 @@ static int test_aead(struct crypto_aead *tfm, int enc,
out:
aead_request_free(req);
kfree(sg);
out_nosg:
if (diff_dst)
testmgr_free_buf(xoutbuf);
out_nooutbuf:
testmgr_free_buf(axbuf);
out_noaxbuf:
testmgr_free_buf(xbuf);
@ -686,6 +729,20 @@ out_noxbuf:
return ret;
}
static int test_aead(struct crypto_aead *tfm, int enc,
struct aead_testvec *template, unsigned int tcount)
{
int ret;
/* test 'dst == src' case */
ret = __test_aead(tfm, enc, template, tcount, false);
if (ret)
return ret;
/* test 'dst != src' case */
return __test_aead(tfm, enc, template, tcount, true);
}
static int test_cipher(struct crypto_cipher *tfm, int enc,
struct cipher_testvec *template, unsigned int tcount)
{
@ -761,8 +818,9 @@ out_nobuf:
return ret;
}
static int test_skcipher(struct crypto_ablkcipher *tfm, int enc,
struct cipher_testvec *template, unsigned int tcount)
static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
struct cipher_testvec *template, unsigned int tcount,
const bool diff_dst)
{
const char *algo =
crypto_tfm_alg_driver_name(crypto_ablkcipher_tfm(tfm));
@ -770,16 +828,26 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc,
char *q;
struct ablkcipher_request *req;
struct scatterlist sg[8];
const char *e;
struct scatterlist sgout[8];
const char *e, *d;
struct tcrypt_result result;
void *data;
char iv[MAX_IVLEN];
char *xbuf[XBUFSIZE];
char *xoutbuf[XBUFSIZE];
int ret = -ENOMEM;
if (testmgr_alloc_buf(xbuf))
goto out_nobuf;
if (diff_dst && testmgr_alloc_buf(xoutbuf))
goto out_nooutbuf;
if (diff_dst)
d = "-ddst";
else
d = "";
if (enc == ENCRYPT)
e = "encryption";
else
@ -789,8 +857,8 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc,
req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
if (!req) {
printk(KERN_ERR "alg: skcipher: Failed to allocate request "
"for %s\n", algo);
pr_err("alg: skcipher%s: Failed to allocate request for %s\n",
d, algo);
goto out;
}
@ -804,7 +872,7 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc,
else
memset(iv, 0, MAX_IVLEN);
if (!(template[i].np)) {
if (!(template[i].np) || (template[i].also_non_np)) {
j++;
ret = -EINVAL;
@ -822,16 +890,21 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc,
ret = crypto_ablkcipher_setkey(tfm, template[i].key,
template[i].klen);
if (!ret == template[i].fail) {
printk(KERN_ERR "alg: skcipher: setkey failed "
"on test %d for %s: flags=%x\n", j,
algo, crypto_ablkcipher_get_flags(tfm));
pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n",
d, j, algo,
crypto_ablkcipher_get_flags(tfm));
goto out;
} else if (ret)
continue;
sg_init_one(&sg[0], data, template[i].ilen);
if (diff_dst) {
data = xoutbuf[0];
sg_init_one(&sgout[0], data, template[i].ilen);
}
ablkcipher_request_set_crypt(req, sg, sg,
ablkcipher_request_set_crypt(req, sg,
(diff_dst) ? sgout : sg,
template[i].ilen, iv);
ret = enc ?
crypto_ablkcipher_encrypt(req) :
@ -850,16 +923,15 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc,
}
/* fall through */
default:
printk(KERN_ERR "alg: skcipher: %s failed on "
"test %d for %s: ret=%d\n", e, j, algo,
-ret);
pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n",
d, e, j, algo, -ret);
goto out;
}
q = data;
if (memcmp(q, template[i].result, template[i].rlen)) {
printk(KERN_ERR "alg: skcipher: Test %d "
"failed on %s for %s\n", j, e, algo);
pr_err("alg: skcipher%s: Test %d failed on %s for %s\n",
d, j, e, algo);
hexdump(q, template[i].rlen);
ret = -EINVAL;
goto out;
@ -886,9 +958,8 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc,
ret = crypto_ablkcipher_setkey(tfm, template[i].key,
template[i].klen);
if (!ret == template[i].fail) {
printk(KERN_ERR "alg: skcipher: setkey failed "
"on chunk test %d for %s: flags=%x\n",
j, algo,
pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n",
d, j, algo,
crypto_ablkcipher_get_flags(tfm));
goto out;
} else if (ret)
@ -897,6 +968,8 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc,
temp = 0;
ret = -EINVAL;
sg_init_table(sg, template[i].np);
if (diff_dst)
sg_init_table(sgout, template[i].np);
for (k = 0; k < template[i].np; k++) {
if (WARN_ON(offset_in_page(IDX[k]) +
template[i].tap[k] > PAGE_SIZE))
@ -913,11 +986,24 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc,
q[template[i].tap[k]] = 0;
sg_set_buf(&sg[k], q, template[i].tap[k]);
if (diff_dst) {
q = xoutbuf[IDX[k] >> PAGE_SHIFT] +
offset_in_page(IDX[k]);
sg_set_buf(&sgout[k], q,
template[i].tap[k]);
memset(q, 0, template[i].tap[k]);
if (offset_in_page(q) +
template[i].tap[k] < PAGE_SIZE)
q[template[i].tap[k]] = 0;
}
temp += template[i].tap[k];
}
ablkcipher_request_set_crypt(req, sg, sg,
ablkcipher_request_set_crypt(req, sg,
(diff_dst) ? sgout : sg,
template[i].ilen, iv);
ret = enc ?
@ -937,23 +1023,25 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc,
}
/* fall through */
default:
printk(KERN_ERR "alg: skcipher: %s failed on "
"chunk test %d for %s: ret=%d\n", e, j,
algo, -ret);
pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n",
d, e, j, algo, -ret);
goto out;
}
temp = 0;
ret = -EINVAL;
for (k = 0; k < template[i].np; k++) {
q = xbuf[IDX[k] >> PAGE_SHIFT] +
offset_in_page(IDX[k]);
if (diff_dst)
q = xoutbuf[IDX[k] >> PAGE_SHIFT] +
offset_in_page(IDX[k]);
else
q = xbuf[IDX[k] >> PAGE_SHIFT] +
offset_in_page(IDX[k]);
if (memcmp(q, template[i].result + temp,
template[i].tap[k])) {
printk(KERN_ERR "alg: skcipher: Chunk "
"test %d failed on %s at page "
"%u for %s\n", j, e, k, algo);
pr_err("alg: skcipher%s: Chunk test %d failed on %s at page %u for %s\n",
d, j, e, k, algo);
hexdump(q, template[i].tap[k]);
goto out;
}
@ -962,11 +1050,8 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc,
for (n = 0; offset_in_page(q + n) && q[n]; n++)
;
if (n) {
printk(KERN_ERR "alg: skcipher: "
"Result buffer corruption in "
"chunk test %d on %s at page "
"%u for %s: %u bytes:\n", j, e,
k, algo, n);
pr_err("alg: skcipher%s: Result buffer corruption in chunk test %d on %s at page %u for %s: %u bytes:\n",
d, j, e, k, algo, n);
hexdump(q, n);
goto out;
}
@ -979,11 +1064,28 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc,
out:
ablkcipher_request_free(req);
if (diff_dst)
testmgr_free_buf(xoutbuf);
out_nooutbuf:
testmgr_free_buf(xbuf);
out_nobuf:
return ret;
}
static int test_skcipher(struct crypto_ablkcipher *tfm, int enc,
struct cipher_testvec *template, unsigned int tcount)
{
int ret;
/* test 'dst == src' case */
ret = __test_skcipher(tfm, enc, template, tcount, false);
if (ret)
return ret;
/* test 'dst != src' case */
return __test_skcipher(tfm, enc, template, tcount, true);
}
static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate,
struct comp_testvec *dtemplate, int ctcount, int dtcount)
{
@ -1534,6 +1636,36 @@ static int alg_test_null(const struct alg_test_desc *desc,
/* Please keep this list sorted by algorithm name. */
static const struct alg_test_desc alg_test_descs[] = {
{
.alg = "__cbc-cast5-avx",
.test = alg_test_null,
.suite = {
.cipher = {
.enc = {
.vecs = NULL,
.count = 0
},
.dec = {
.vecs = NULL,
.count = 0
}
}
}
}, {
.alg = "__cbc-cast6-avx",
.test = alg_test_null,
.suite = {
.cipher = {
.enc = {
.vecs = NULL,
.count = 0
},
.dec = {
.vecs = NULL,
.count = 0
}
}
}
}, {
.alg = "__cbc-serpent-avx",
.test = alg_test_null,
.suite = {
@ -1594,6 +1726,36 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}
}, {
.alg = "__driver-cbc-cast5-avx",
.test = alg_test_null,
.suite = {
.cipher = {
.enc = {
.vecs = NULL,
.count = 0
},
.dec = {
.vecs = NULL,
.count = 0
}
}
}
}, {
.alg = "__driver-cbc-cast6-avx",
.test = alg_test_null,
.suite = {
.cipher = {
.enc = {
.vecs = NULL,
.count = 0
},
.dec = {
.vecs = NULL,
.count = 0
}
}
}
}, {
.alg = "__driver-cbc-serpent-avx",
.test = alg_test_null,
@ -1655,6 +1817,36 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}
}, {
.alg = "__driver-ecb-cast5-avx",
.test = alg_test_null,
.suite = {
.cipher = {
.enc = {
.vecs = NULL,
.count = 0
},
.dec = {
.vecs = NULL,
.count = 0
}
}
}
}, {
.alg = "__driver-ecb-cast6-avx",
.test = alg_test_null,
.suite = {
.cipher = {
.enc = {
.vecs = NULL,
.count = 0
},
.dec = {
.vecs = NULL,
.count = 0
}
}
}
}, {
.alg = "__driver-ecb-serpent-avx",
.test = alg_test_null,
@ -1817,6 +2009,36 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}
}, {
.alg = "cbc(cast5)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = cast5_cbc_enc_tv_template,
.count = CAST5_CBC_ENC_TEST_VECTORS
},
.dec = {
.vecs = cast5_cbc_dec_tv_template,
.count = CAST5_CBC_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "cbc(cast6)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = cast6_cbc_enc_tv_template,
.count = CAST6_CBC_ENC_TEST_VECTORS
},
.dec = {
.vecs = cast6_cbc_dec_tv_template,
.count = CAST6_CBC_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "cbc(des)",
.test = alg_test_skcipher,
@ -1936,6 +2158,36 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}
}, {
.alg = "cryptd(__driver-ecb-cast5-avx)",
.test = alg_test_null,
.suite = {
.cipher = {
.enc = {
.vecs = NULL,
.count = 0
},
.dec = {
.vecs = NULL,
.count = 0
}
}
}
}, {
.alg = "cryptd(__driver-ecb-cast6-avx)",
.test = alg_test_null,
.suite = {
.cipher = {
.enc = {
.vecs = NULL,
.count = 0
},
.dec = {
.vecs = NULL,
.count = 0
}
}
}
}, {
.alg = "cryptd(__driver-ecb-serpent-avx)",
.test = alg_test_null,
@ -2053,6 +2305,36 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}
}, {
.alg = "ctr(cast5)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = cast5_ctr_enc_tv_template,
.count = CAST5_CTR_ENC_TEST_VECTORS
},
.dec = {
.vecs = cast5_ctr_dec_tv_template,
.count = CAST5_CTR_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "ctr(cast6)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = cast6_ctr_enc_tv_template,
.count = CAST6_CTR_ENC_TEST_VECTORS
},
.dec = {
.vecs = cast6_ctr_dec_tv_template,
.count = CAST6_CTR_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "ctr(serpent)",
.test = alg_test_skcipher,
@ -2529,6 +2811,21 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}
}, {
.alg = "lrw(cast6)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = cast6_lrw_enc_tv_template,
.count = CAST6_LRW_ENC_TEST_VECTORS
},
.dec = {
.vecs = cast6_lrw_dec_tv_template,
.count = CAST6_LRW_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "lrw(serpent)",
.test = alg_test_skcipher,
@ -2881,6 +3178,21 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}
}, {
.alg = "xts(cast6)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = cast6_xts_enc_tv_template,
.count = CAST6_XTS_ENC_TEST_VECTORS
},
.dec = {
.vecs = cast6_xts_dec_tv_template,
.count = CAST6_XTS_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "xts(serpent)",
.test = alg_test_skcipher,

File diff suppressed because it is too large Load Diff

View File

@ -628,7 +628,7 @@ static int tgr128_final(struct shash_desc *desc, u8 * out)
return 0;
}
static struct shash_alg tgr192 = {
static struct shash_alg tgr_algs[3] = { {
.digestsize = TGR192_DIGEST_SIZE,
.init = tgr192_init,
.update = tgr192_update,
@ -640,9 +640,7 @@ static struct shash_alg tgr192 = {
.cra_blocksize = TGR192_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static struct shash_alg tgr160 = {
}, {
.digestsize = TGR160_DIGEST_SIZE,
.init = tgr192_init,
.update = tgr192_update,
@ -654,9 +652,7 @@ static struct shash_alg tgr160 = {
.cra_blocksize = TGR192_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static struct shash_alg tgr128 = {
}, {
.digestsize = TGR128_DIGEST_SIZE,
.init = tgr192_init,
.update = tgr192_update,
@ -668,38 +664,16 @@ static struct shash_alg tgr128 = {
.cra_blocksize = TGR192_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
} };
static int __init tgr192_mod_init(void)
{
int ret = 0;
ret = crypto_register_shash(&tgr192);
if (ret < 0) {
goto out;
}
ret = crypto_register_shash(&tgr160);
if (ret < 0) {
crypto_unregister_shash(&tgr192);
goto out;
}
ret = crypto_register_shash(&tgr128);
if (ret < 0) {
crypto_unregister_shash(&tgr192);
crypto_unregister_shash(&tgr160);
}
out:
return ret;
return crypto_register_shashes(tgr_algs, ARRAY_SIZE(tgr_algs));
}
static void __exit tgr192_mod_fini(void)
{
crypto_unregister_shash(&tgr192);
crypto_unregister_shash(&tgr160);
crypto_unregister_shash(&tgr128);
crypto_unregister_shashes(tgr_algs, ARRAY_SIZE(tgr_algs));
}
MODULE_ALIAS("tgr160");

View File

@ -188,7 +188,6 @@ static struct crypto_alg alg = {
.cra_ctxsize = sizeof(struct twofish_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = TF_MIN_KEY_SIZE,
.cia_max_keysize = TF_MAX_KEY_SIZE,

View File

@ -38,11 +38,11 @@
* Constants and masks
*/
#define UINT64_C(x) x##ULL
const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */
const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */
const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */
const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
static const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */
static const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */
static const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
static const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */
static const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
#define pe64_to_cpup le64_to_cpup /* Prefer little endian */

View File

@ -1119,7 +1119,7 @@ static int wp256_final(struct shash_desc *desc, u8 *out)
return 0;
}
static struct shash_alg wp512 = {
static struct shash_alg wp_algs[3] = { {
.digestsize = WP512_DIGEST_SIZE,
.init = wp512_init,
.update = wp512_update,
@ -1131,9 +1131,7 @@ static struct shash_alg wp512 = {
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static struct shash_alg wp384 = {
}, {
.digestsize = WP384_DIGEST_SIZE,
.init = wp512_init,
.update = wp512_update,
@ -1145,9 +1143,7 @@ static struct shash_alg wp384 = {
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static struct shash_alg wp256 = {
}, {
.digestsize = WP256_DIGEST_SIZE,
.init = wp512_init,
.update = wp512_update,
@ -1159,39 +1155,16 @@ static struct shash_alg wp256 = {
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
} };
static int __init wp512_mod_init(void)
{
int ret = 0;
ret = crypto_register_shash(&wp512);
if (ret < 0)
goto out;
ret = crypto_register_shash(&wp384);
if (ret < 0)
{
crypto_unregister_shash(&wp512);
goto out;
}
ret = crypto_register_shash(&wp256);
if (ret < 0)
{
crypto_unregister_shash(&wp512);
crypto_unregister_shash(&wp384);
}
out:
return ret;
return crypto_register_shashes(wp_algs, ARRAY_SIZE(wp_algs));
}
static void __exit wp512_mod_fini(void)
{
crypto_unregister_shash(&wp512);
crypto_unregister_shash(&wp384);
crypto_unregister_shash(&wp256);
crypto_unregister_shashes(wp_algs, ARRAY_SIZE(wp_algs));
}
MODULE_ALIAS("wp384");

View File

@ -59,16 +59,21 @@
#define RNGA_STATUS_LAST_READ_STATUS 0x00000002
#define RNGA_STATUS_SECURITY_VIOLATION 0x00000001
static struct platform_device *rng_dev;
struct mxc_rng {
struct device *dev;
struct hwrng rng;
void __iomem *mem;
struct clk *clk;
};
static int mxc_rnga_data_present(struct hwrng *rng, int wait)
{
void __iomem *rng_base = (void __iomem *)rng->priv;
int i;
struct mxc_rng *mxc_rng = container_of(rng, struct mxc_rng, rng);
for (i = 0; i < 20; i++) {
/* how many random numbers are in FIFO? [0-16] */
int level = (__raw_readl(rng_base + RNGA_STATUS) &
int level = (__raw_readl(mxc_rng->mem + RNGA_STATUS) &
RNGA_STATUS_LEVEL_MASK) >> 8;
if (level || !wait)
return !!level;
@ -81,20 +86,20 @@ static int mxc_rnga_data_read(struct hwrng *rng, u32 * data)
{
int err;
u32 ctrl;
void __iomem *rng_base = (void __iomem *)rng->priv;
struct mxc_rng *mxc_rng = container_of(rng, struct mxc_rng, rng);
/* retrieve a random number from FIFO */
*data = __raw_readl(rng_base + RNGA_OUTPUT_FIFO);
*data = __raw_readl(mxc_rng->mem + RNGA_OUTPUT_FIFO);
/* some error while reading this random number? */
err = __raw_readl(rng_base + RNGA_STATUS) & RNGA_STATUS_ERROR_INT;
err = __raw_readl(mxc_rng->mem + RNGA_STATUS) & RNGA_STATUS_ERROR_INT;
/* if error: clear error interrupt, but doesn't return random number */
if (err) {
dev_dbg(&rng_dev->dev, "Error while reading random number!\n");
ctrl = __raw_readl(rng_base + RNGA_CONTROL);
dev_dbg(mxc_rng->dev, "Error while reading random number!\n");
ctrl = __raw_readl(mxc_rng->mem + RNGA_CONTROL);
__raw_writel(ctrl | RNGA_CONTROL_CLEAR_INT,
rng_base + RNGA_CONTROL);
mxc_rng->mem + RNGA_CONTROL);
return 0;
} else
return 4;
@ -103,22 +108,22 @@ static int mxc_rnga_data_read(struct hwrng *rng, u32 * data)
static int mxc_rnga_init(struct hwrng *rng)
{
u32 ctrl, osc;
void __iomem *rng_base = (void __iomem *)rng->priv;
struct mxc_rng *mxc_rng = container_of(rng, struct mxc_rng, rng);
/* wake up */
ctrl = __raw_readl(rng_base + RNGA_CONTROL);
__raw_writel(ctrl & ~RNGA_CONTROL_SLEEP, rng_base + RNGA_CONTROL);
ctrl = __raw_readl(mxc_rng->mem + RNGA_CONTROL);
__raw_writel(ctrl & ~RNGA_CONTROL_SLEEP, mxc_rng->mem + RNGA_CONTROL);
/* verify if oscillator is working */
osc = __raw_readl(rng_base + RNGA_STATUS);
osc = __raw_readl(mxc_rng->mem + RNGA_STATUS);
if (osc & RNGA_STATUS_OSC_DEAD) {
dev_err(&rng_dev->dev, "RNGA Oscillator is dead!\n");
dev_err(mxc_rng->dev, "RNGA Oscillator is dead!\n");
return -ENODEV;
}
/* go running */
ctrl = __raw_readl(rng_base + RNGA_CONTROL);
__raw_writel(ctrl | RNGA_CONTROL_GO, rng_base + RNGA_CONTROL);
ctrl = __raw_readl(mxc_rng->mem + RNGA_CONTROL);
__raw_writel(ctrl | RNGA_CONTROL_GO, mxc_rng->mem + RNGA_CONTROL);
return 0;
}
@ -126,40 +131,40 @@ static int mxc_rnga_init(struct hwrng *rng)
static void mxc_rnga_cleanup(struct hwrng *rng)
{
u32 ctrl;
void __iomem *rng_base = (void __iomem *)rng->priv;
struct mxc_rng *mxc_rng = container_of(rng, struct mxc_rng, rng);
ctrl = __raw_readl(rng_base + RNGA_CONTROL);
ctrl = __raw_readl(mxc_rng->mem + RNGA_CONTROL);
/* stop rnga */
__raw_writel(ctrl & ~RNGA_CONTROL_GO, rng_base + RNGA_CONTROL);
__raw_writel(ctrl & ~RNGA_CONTROL_GO, mxc_rng->mem + RNGA_CONTROL);
}
static struct hwrng mxc_rnga = {
.name = "mxc-rnga",
.init = mxc_rnga_init,
.cleanup = mxc_rnga_cleanup,
.data_present = mxc_rnga_data_present,
.data_read = mxc_rnga_data_read
};
static int __init mxc_rnga_probe(struct platform_device *pdev)
{
int err = -ENODEV;
struct clk *clk;
struct resource *res, *mem;
void __iomem *rng_base = NULL;
struct mxc_rng *mxc_rng;
if (rng_dev)
return -EBUSY;
mxc_rng = devm_kzalloc(&pdev->dev, sizeof(struct mxc_rng),
GFP_KERNEL);
if (!mxc_rng)
return -ENOMEM;
clk = clk_get(&pdev->dev, "rng");
if (IS_ERR(clk)) {
mxc_rng->dev = &pdev->dev;
mxc_rng->rng.name = "mxc-rnga";
mxc_rng->rng.init = mxc_rnga_init;
mxc_rng->rng.cleanup = mxc_rnga_cleanup,
mxc_rng->rng.data_present = mxc_rnga_data_present,
mxc_rng->rng.data_read = mxc_rnga_data_read,
mxc_rng->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(mxc_rng->clk)) {
dev_err(&pdev->dev, "Could not get rng_clk!\n");
err = PTR_ERR(clk);
err = PTR_ERR(mxc_rng->clk);
goto out;
}
clk_enable(clk);
clk_prepare_enable(mxc_rng->clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@ -173,36 +178,27 @@ static int __init mxc_rnga_probe(struct platform_device *pdev)
goto err_region;
}
rng_base = ioremap(res->start, resource_size(res));
if (!rng_base) {
mxc_rng->mem = ioremap(res->start, resource_size(res));
if (!mxc_rng->mem) {
err = -ENOMEM;
goto err_ioremap;
}
mxc_rnga.priv = (unsigned long)rng_base;
err = hwrng_register(&mxc_rnga);
err = hwrng_register(&mxc_rng->rng);
if (err) {
dev_err(&pdev->dev, "MXC RNGA registering failed (%d)\n", err);
goto err_register;
goto err_ioremap;
}
rng_dev = pdev;
dev_info(&pdev->dev, "MXC RNGA Registered.\n");
return 0;
err_register:
iounmap(rng_base);
rng_base = NULL;
err_ioremap:
release_mem_region(res->start, resource_size(res));
err_region:
clk_disable(clk);
clk_put(clk);
clk_disable_unprepare(mxc_rng->clk);
out:
return err;
@ -211,17 +207,15 @@ out:
static int __exit mxc_rnga_remove(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
void __iomem *rng_base = (void __iomem *)mxc_rnga.priv;
struct clk *clk = clk_get(&pdev->dev, "rng");
struct mxc_rng *mxc_rng = platform_get_drvdata(pdev);
hwrng_unregister(&mxc_rnga);
hwrng_unregister(&mxc_rng->rng);
iounmap(rng_base);
iounmap(mxc_rng->mem);
release_mem_region(res->start, resource_size(res));
clk_disable(clk);
clk_put(clk);
clk_disable_unprepare(mxc_rng->clk);
return 0;
}

View File

@ -75,42 +75,35 @@ static int __devinit octeon_rng_probe(struct platform_device *pdev)
res_ports = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res_ports)
goto err_ports;
return -ENOENT;
res_result = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res_result)
goto err_ports;
return -ENOENT;
rng->control_status = devm_ioremap_nocache(&pdev->dev,
res_ports->start,
sizeof(u64));
if (!rng->control_status)
goto err_ports;
return -ENOENT;
rng->result = devm_ioremap_nocache(&pdev->dev,
res_result->start,
sizeof(u64));
if (!rng->result)
goto err_r;
return -ENOENT;
rng->ops = ops;
dev_set_drvdata(&pdev->dev, &rng->ops);
ret = hwrng_register(&rng->ops);
if (ret)
goto err;
return -ENOENT;
dev_info(&pdev->dev, "Octeon Random Number Generator\n");
return 0;
err:
devm_iounmap(&pdev->dev, rng->control_status);
err_r:
devm_iounmap(&pdev->dev, rng->result);
err_ports:
devm_kfree(&pdev->dev, rng);
return -ENOENT;
}
static int __exit octeon_rng_remove(struct platform_device *pdev)

View File

@ -298,21 +298,15 @@ config CRYPTO_DEV_TEGRA_AES
will be called tegra-aes.
config CRYPTO_DEV_NX
tristate "Support for Power7+ in-Nest cryptographic acceleration"
bool "Support for IBM Power7+ in-Nest cryptographic acceleration"
depends on PPC64 && IBMVIO
select CRYPTO_AES
select CRYPTO_CBC
select CRYPTO_ECB
select CRYPTO_CCM
select CRYPTO_GCM
select CRYPTO_AUTHENC
select CRYPTO_XCBC
select CRYPTO_SHA256
select CRYPTO_SHA512
default n
help
Support for Power7+ in-Nest cryptographic acceleration. This
module supports acceleration for AES and SHA2 algorithms. If you
choose 'M' here, this module will be called nx_crypto.
Support for Power7+ in-Nest cryptographic acceleration.
if CRYPTO_DEV_NX
source "drivers/crypto/nx/Kconfig"
endif
config CRYPTO_DEV_UX500
tristate "Driver for ST-Ericsson UX500 crypto hardware acceleration"
@ -340,7 +334,7 @@ config CRYPTO_DEV_ATMEL_AES
select CRYPTO_AES
select CRYPTO_ALGAPI
select CRYPTO_BLKCIPHER
select CONFIG_AT_HDMAC
select AT_HDMAC
help
Some Atmel processors have AES hw accelerator.
Select this if you want to use the Atmel module for

View File

@ -1226,6 +1226,7 @@ static int __init crypto4xx_probe(struct platform_device *ofdev)
core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
if (!core_dev->dev->ce_base) {
dev_err(dev, "failed to of_iomap\n");
rc = -ENOMEM;
goto err_iomap;
}

View File

@ -24,15 +24,10 @@
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
@ -1017,7 +1012,6 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
int err, i, j;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
INIT_LIST_HEAD(&aes_algs[i].cra_list);
err = crypto_register_alg(&aes_algs[i]);
if (err)
goto err_aes_algs;
@ -1026,7 +1020,6 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
atmel_aes_hw_version_init(dd);
if (dd->hw_version >= 0x130) {
INIT_LIST_HEAD(&aes_cfb64_alg[0].cra_list);
err = crypto_register_alg(&aes_cfb64_alg[0]);
if (err)
goto err_aes_cfb64_alg;

View File

@ -24,15 +24,10 @@
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>

View File

@ -24,15 +24,10 @@
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
@ -1044,7 +1039,6 @@ static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
int err, i, j;
for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
INIT_LIST_HEAD(&tdes_algs[i].cra_list);
err = crypto_register_alg(&tdes_algs[i]);
if (err)
goto err_tdes_algs;

View File

@ -205,7 +205,7 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
{
u32 *key_jump_cmd;
init_sh_desc(desc, HDR_SHARE_WAIT);
init_sh_desc(desc, HDR_SHARE_SERIAL);
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
@ -224,7 +224,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
struct aead_tfm *tfm = &aead->base.crt_aead;
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
bool keys_fit_inline = 0;
bool keys_fit_inline = false;
u32 *key_jump_cmd, *jump_cmd;
u32 geniv, moveiv;
u32 *desc;
@ -239,7 +239,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
ctx->split_key_pad_len + ctx->enckeylen <=
CAAM_DESC_BYTES_MAX)
keys_fit_inline = 1;
keys_fit_inline = true;
/* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
@ -297,12 +297,12 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
ctx->split_key_pad_len + ctx->enckeylen <=
CAAM_DESC_BYTES_MAX)
keys_fit_inline = 1;
keys_fit_inline = true;
desc = ctx->sh_desc_dec;
/* aead_decrypt shared descriptor */
init_sh_desc(desc, HDR_SHARE_WAIT);
init_sh_desc(desc, HDR_SHARE_SERIAL);
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
@ -365,7 +365,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
ctx->split_key_pad_len + ctx->enckeylen <=
CAAM_DESC_BYTES_MAX)
keys_fit_inline = 1;
keys_fit_inline = true;
/* aead_givencrypt shared descriptor */
desc = ctx->sh_desc_givenc;
@ -564,7 +564,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* ablkcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
init_sh_desc(desc, HDR_SHARE_WAIT);
init_sh_desc(desc, HDR_SHARE_SERIAL);
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
@ -605,7 +605,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
init_sh_desc(desc, HDR_SHARE_WAIT);
init_sh_desc(desc, HDR_SHARE_SERIAL);
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
@ -1354,10 +1354,10 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
contig &= ~GIV_SRC_CONTIG;
if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
contig &= ~GIV_DST_CONTIG;
if (unlikely(req->src != req->dst)) {
dst_nents = dst_nents ? : 1;
sec4_sg_len += 1;
}
if (unlikely(req->src != req->dst)) {
dst_nents = dst_nents ? : 1;
sec4_sg_len += 1;
}
if (!(contig & GIV_SRC_CONTIG)) {
assoc_nents = assoc_nents ? : 1;
src_nents = src_nents ? : 1;
@ -1650,7 +1650,11 @@ struct caam_alg_template {
};
static struct caam_alg_template driver_algs[] = {
/* single-pass ipsec_esp descriptor */
/*
* single-pass ipsec_esp descriptor
* authencesn(*,*) is also registered, although not present
* explicitly here.
*/
{
.name = "authenc(hmac(md5),cbc(aes))",
.driver_name = "authenc-hmac-md5-cbc-aes-caam",
@ -2213,7 +2217,9 @@ static int __init caam_algapi_init(void)
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
/* TODO: check if h/w supports alg */
struct caam_crypto_alg *t_alg;
bool done = false;
authencesn:
t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
@ -2227,8 +2233,25 @@ static int __init caam_algapi_init(void)
dev_warn(ctrldev, "%s alg registration failed\n",
t_alg->crypto_alg.cra_driver_name);
kfree(t_alg);
} else
} else {
list_add_tail(&t_alg->entry, &priv->alg_list);
if (driver_algs[i].type == CRYPTO_ALG_TYPE_AEAD &&
!memcmp(driver_algs[i].name, "authenc", 7) &&
!done) {
char *name;
name = driver_algs[i].name;
memmove(name + 10, name + 7, strlen(name) - 7);
memcpy(name + 7, "esn", 3);
name = driver_algs[i].driver_name;
memmove(name + 10, name + 7, strlen(name) - 7);
memcpy(name + 7, "esn", 3);
done = true;
goto authencesn;
}
}
}
if (!list_empty(&priv->alg_list))
dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n",

View File

@ -225,7 +225,7 @@ static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
{
u32 *key_jump_cmd;
init_sh_desc(desc, HDR_SHARE_WAIT);
init_sh_desc(desc, HDR_SHARE_SERIAL);
if (ctx->split_key_len) {
/* Skip if already shared */
@ -311,7 +311,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_update shared descriptor */
desc = ctx->sh_desc_update;
init_sh_desc(desc, HDR_SHARE_WAIT);
init_sh_desc(desc, HDR_SHARE_SERIAL);
/* Import context from software */
append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
@ -430,6 +430,10 @@ static u32 hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
int ret = 0;
desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
if (!desc) {
dev_err(jrdev, "unable to allocate key input memory\n");
return -ENOMEM;
}
init_job_desc(desc, 0);
@ -1736,8 +1740,11 @@ static void __exit caam_algapi_hash_exit(void)
struct caam_hash_alg *t_alg, *n;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
if (!dev_node)
return;
if (!dev_node) {
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
if (!dev_node)
return;
}
pdev = of_find_device_by_node(dev_node);
if (!pdev)
@ -1812,8 +1819,11 @@ static int __init caam_algapi_hash_init(void)
int i = 0, err = 0;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
if (!dev_node)
return -ENODEV;
if (!dev_node) {
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
if (!dev_node)
return -ENODEV;
}
pdev = of_find_device_by_node(dev_node);
if (!pdev)

View File

@ -193,7 +193,7 @@ static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx)
struct device *jrdev = ctx->jrdev;
u32 *desc = ctx->sh_desc;
init_sh_desc(desc, HDR_SHARE_WAIT);
init_sh_desc(desc, HDR_SHARE_SERIAL);
/* Propagate errors from shared to job descriptor */
append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
@ -284,8 +284,11 @@ static int __init caam_rng_init(void)
struct caam_drv_private *priv;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
if (!dev_node)
return -ENODEV;
if (!dev_node) {
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
if (!dev_node)
return -ENODEV;
}
pdev = of_find_device_by_node(dev_node);
if (!pdev)

View File

@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/debugfs.h>
#include <linux/circ_buf.h>
#include <linux/string.h>
#include <net/xfrm.h>
#include <crypto/algapi.h>

View File

@ -129,7 +129,7 @@ static int instantiate_rng(struct device *jrdev)
/*
* By default, the TRNG runs for 200 clocks per sample;
* 800 clocks per sample generates better entropy.
* 1600 clocks per sample generates better entropy.
*/
static void kick_trng(struct platform_device *pdev)
{
@ -144,9 +144,9 @@ static void kick_trng(struct platform_device *pdev)
/* put RNG4 into program mode */
setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
/* 800 clocks per sample */
/* 1600 clocks per sample */
val = rd_reg32(&r4tst->rtsdctl);
val = (val & ~RTSDCTL_ENT_DLY_MASK) | (800 << RTSDCTL_ENT_DLY_SHIFT);
val = (val & ~RTSDCTL_ENT_DLY_MASK) | (1600 << RTSDCTL_ENT_DLY_SHIFT);
wr_reg32(&r4tst->rtsdctl, val);
/* min. freq. count */
wr_reg32(&r4tst->rtfrqmin, 400);

View File

@ -77,10 +77,8 @@ static void report_ccb_status(u32 status, char *outstr)
"Not instantiated",
"Test instantiate",
"Prediction resistance",
"",
"Prediction resistance and test request",
"Uninstantiate",
"",
"Secure key generation",
};
u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>

View File

@ -54,6 +54,10 @@ u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
int ret = 0;
desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
if (!desc) {
dev_err(jrdev, "unable to allocate key input memory\n");
return -ENOMEM;
}
init_job_desc(desc, 0);

View File

@ -289,7 +289,6 @@ static struct crypto_alg geode_alg = {
.cra_blocksize = AES_MIN_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct geode_aes_op),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(geode_alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
@ -402,7 +401,6 @@ static struct crypto_alg geode_cbc_alg = {
.cra_alignmask = 15,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(geode_cbc_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
@ -489,7 +487,6 @@ static struct crypto_alg geode_ecb_alg = {
.cra_alignmask = 15,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(geode_ecb_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
@ -588,21 +585,8 @@ static struct pci_driver geode_aes_driver = {
.remove = __devexit_p(geode_aes_remove)
};
static int __init
geode_aes_init(void)
{
return pci_register_driver(&geode_aes_driver);
}
static void __exit
geode_aes_exit(void)
{
pci_unregister_driver(&geode_aes_driver);
}
module_pci_driver(geode_aes_driver);
MODULE_AUTHOR("Advanced Micro Devices, Inc.");
MODULE_DESCRIPTION("Geode LX Hardware AES driver");
MODULE_LICENSE("GPL");
module_init(geode_aes_init);
module_exit(geode_aes_exit);

View File

@ -2611,14 +2611,17 @@ static int __devinit hifn_probe(struct pci_dev *pdev, const struct pci_device_id
size = pci_resource_len(pdev, i);
dev->bar[i] = ioremap_nocache(addr, size);
if (!dev->bar[i])
if (!dev->bar[i]) {
err = -ENOMEM;
goto err_out_unmap_bars;
}
}
dev->desc_virt = pci_alloc_consistent(pdev, sizeof(struct hifn_dma),
&dev->desc_dma);
if (!dev->desc_virt) {
dprintk("Failed to allocate descriptor rings.\n");
err = -ENOMEM;
goto err_out_unmap_bars;
}
memset(dev->desc_virt, 0, sizeof(struct hifn_dma));

26
drivers/crypto/nx/Kconfig Normal file
View File

@ -0,0 +1,26 @@
config CRYPTO_DEV_NX_ENCRYPT
tristate "Encryption acceleration support"
depends on PPC64 && IBMVIO
default y
select CRYPTO_AES
select CRYPTO_CBC
select CRYPTO_ECB
select CRYPTO_CCM
select CRYPTO_GCM
select CRYPTO_AUTHENC
select CRYPTO_XCBC
select CRYPTO_SHA256
select CRYPTO_SHA512
help
Support for Power7+ in-Nest encryption acceleration. This
module supports acceleration for AES and SHA2 algorithms. If you
choose 'M' here, this module will be called nx_crypto.
config CRYPTO_DEV_NX_COMPRESS
tristate "Compression acceleration support"
depends on PPC64 && IBMVIO
default y
help
Support for Power7+ in-Nest compression acceleration. This
module supports acceleration for AES and SHA2 algorithms. If you
choose 'M' here, this module will be called nx_compress.

View File

@ -1,4 +1,4 @@
obj-$(CONFIG_CRYPTO_DEV_NX) += nx-crypto.o
obj-$(CONFIG_CRYPTO_DEV_NX_ENCRYPT) += nx-crypto.o
nx-crypto-objs := nx.o \
nx_debugfs.o \
nx-aes-cbc.o \
@ -9,3 +9,6 @@ nx-crypto-objs := nx.o \
nx-aes-xcbc.o \
nx-sha256.o \
nx-sha512.o
obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS) += nx-compress.o
nx-compress-objs := nx-842.o

1617
drivers/crypto/nx/nx-842.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -127,7 +127,6 @@ struct crypto_alg nx_cbc_aes_alg = {
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(nx_cbc_aes_alg.cra_list),
.cra_init = nx_crypto_ctx_aes_cbc_init,
.cra_exit = nx_crypto_ctx_exit,
.cra_blkcipher = {

View File

@ -430,7 +430,6 @@ struct crypto_alg nx_ccm_aes_alg = {
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_aead_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(nx_ccm_aes_alg.cra_list),
.cra_init = nx_crypto_ctx_aes_ccm_init,
.cra_exit = nx_crypto_ctx_exit,
.cra_aead = {
@ -453,7 +452,6 @@ struct crypto_alg nx_ccm4309_aes_alg = {
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_nivaead_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(nx_ccm4309_aes_alg.cra_list),
.cra_init = nx_crypto_ctx_aes_ccm_init,
.cra_exit = nx_crypto_ctx_exit,
.cra_aead = {

View File

@ -141,7 +141,6 @@ struct crypto_alg nx_ctr_aes_alg = {
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(nx_ctr_aes_alg.cra_list),
.cra_init = nx_crypto_ctx_aes_ctr_init,
.cra_exit = nx_crypto_ctx_exit,
.cra_blkcipher = {
@ -163,7 +162,6 @@ struct crypto_alg nx_ctr3686_aes_alg = {
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(nx_ctr3686_aes_alg.cra_list),
.cra_init = nx_crypto_ctx_aes_ctr_init,
.cra_exit = nx_crypto_ctx_exit,
.cra_blkcipher = {

View File

@ -126,7 +126,6 @@ struct crypto_alg nx_ecb_aes_alg = {
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(nx_ecb_aes_alg.cra_list),
.cra_init = nx_crypto_ctx_aes_ecb_init,
.cra_exit = nx_crypto_ctx_exit,
.cra_blkcipher = {

View File

@ -316,7 +316,6 @@ struct crypto_alg nx_gcm_aes_alg = {
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_aead_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(nx_gcm_aes_alg.cra_list),
.cra_init = nx_crypto_ctx_aes_gcm_init,
.cra_exit = nx_crypto_ctx_exit,
.cra_aead = {
@ -338,7 +337,6 @@ struct crypto_alg nx_gcm4106_aes_alg = {
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_nivaead_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(nx_gcm4106_aes_alg.cra_list),
.cra_init = nx_crypto_ctx_aes_gcm_init,
.cra_exit = nx_crypto_ctx_exit,
.cra_aead = {

View File

@ -876,7 +876,6 @@ static int omap_aes_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(algs); i++) {
pr_debug("i: %d\n", i);
INIT_LIST_HEAD(&algs[i].cra_list);
err = crypto_register_alg(&algs[i]);
if (err)
goto err_algs;

View File

@ -328,7 +328,6 @@ static struct crypto_alg aes_alg = {
.cra_ctxsize = sizeof(struct aes_ctx),
.cra_alignmask = PADLOCK_ALIGNMENT - 1,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
@ -408,7 +407,6 @@ static struct crypto_alg ecb_aes_alg = {
.cra_alignmask = PADLOCK_ALIGNMENT - 1,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
@ -491,7 +489,6 @@ static struct crypto_alg cbc_aes_alg = {
.cra_alignmask = PADLOCK_ALIGNMENT - 1,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,

View File

@ -626,7 +626,6 @@ static int s5p_aes_probe(struct platform_device *pdev)
crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
for (i = 0; i < ARRAY_SIZE(algs); i++) {
INIT_LIST_HEAD(&algs[i].cra_list);
err = crypto_register_alg(&algs[i]);
if (err)
goto err_algs;

View File

@ -38,6 +38,7 @@
#include <linux/spinlock.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
@ -714,8 +715,13 @@ badkey:
/*
* talitos_edesc - s/w-extended descriptor
* @assoc_nents: number of segments in associated data scatterlist
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
* @assoc_chained: whether assoc is chained or not
* @src_chained: whether src is chained or not
* @dst_chained: whether dst is chained or not
* @iv_dma: dma address of iv for checking continuity and link table
* @dma_len: length of dma mapped link_tbl space
* @dma_link_tbl: bus physical address of link_tbl
* @desc: h/w descriptor
@ -726,10 +732,13 @@ badkey:
* of link_tbl data
*/
struct talitos_edesc {
int assoc_nents;
int src_nents;
int dst_nents;
int src_is_chained;
int dst_is_chained;
bool assoc_chained;
bool src_chained;
bool dst_chained;
dma_addr_t iv_dma;
int dma_len;
dma_addr_t dma_link_tbl;
struct talitos_desc desc;
@ -738,7 +747,7 @@ struct talitos_edesc {
static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
unsigned int nents, enum dma_data_direction dir,
int chained)
bool chained)
{
if (unlikely(chained))
while (sg) {
@ -768,13 +777,13 @@ static void talitos_sg_unmap(struct device *dev,
unsigned int dst_nents = edesc->dst_nents ? : 1;
if (src != dst) {
if (edesc->src_is_chained)
if (edesc->src_chained)
talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
else
dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
if (dst) {
if (edesc->dst_is_chained)
if (edesc->dst_chained)
talitos_unmap_sg_chain(dev, dst,
DMA_FROM_DEVICE);
else
@ -782,7 +791,7 @@ static void talitos_sg_unmap(struct device *dev,
DMA_FROM_DEVICE);
}
} else
if (edesc->src_is_chained)
if (edesc->src_chained)
talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
else
dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
@ -797,7 +806,13 @@ static void ipsec_esp_unmap(struct device *dev,
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE);
if (edesc->assoc_chained)
talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
else
/* assoc_nents counts also for IV in non-contiguous cases */
dma_unmap_sg(dev, areq->assoc,
edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
DMA_TO_DEVICE);
talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
@ -825,9 +840,10 @@ static void ipsec_esp_encrypt_done(struct device *dev,
ipsec_esp_unmap(dev, edesc, areq);
/* copy the generated ICV to dst */
if (edesc->dma_len) {
if (edesc->dst_nents) {
icvdata = &edesc->link_tbl[edesc->src_nents +
edesc->dst_nents + 2];
edesc->dst_nents + 2 +
edesc->assoc_nents];
sg = sg_last(areq->dst, edesc->dst_nents);
memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
icvdata, ctx->authsize);
@ -857,7 +873,8 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
/* auth check */
if (edesc->dma_len)
icvdata = &edesc->link_tbl[edesc->src_nents +
edesc->dst_nents + 2];
edesc->dst_nents + 2 +
edesc->assoc_nents];
else
icvdata = &edesc->link_tbl[0];
@ -932,10 +949,9 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
* fill in and submit ipsec_esp descriptor
*/
static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
u8 *giv, u64 seq,
void (*callback) (struct device *dev,
struct talitos_desc *desc,
void *context, int error))
u64 seq, void (*callback) (struct device *dev,
struct talitos_desc *desc,
void *context, int error))
{
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(aead);
@ -950,12 +966,42 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
/* hmac key */
map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
0, DMA_TO_DEVICE);
/* hmac data */
map_single_talitos_ptr(dev, &desc->ptr[1], areq->assoclen + ivsize,
sg_virt(areq->assoc), 0, DMA_TO_DEVICE);
desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize);
if (edesc->assoc_nents) {
int tbl_off = edesc->src_nents + edesc->dst_nents + 2;
struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
sizeof(struct talitos_ptr));
desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
/* assoc_nents - 1 entries for assoc, 1 for IV */
sg_count = sg_to_link_tbl(areq->assoc, edesc->assoc_nents - 1,
areq->assoclen, tbl_ptr);
/* add IV to link table */
tbl_ptr += sg_count - 1;
tbl_ptr->j_extent = 0;
tbl_ptr++;
to_talitos_ptr(tbl_ptr, edesc->iv_dma);
tbl_ptr->len = cpu_to_be16(ivsize);
tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL);
} else {
to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->assoc));
desc->ptr[1].j_extent = 0;
}
/* cipher iv */
map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0,
DMA_TO_DEVICE);
to_talitos_ptr(&desc->ptr[2], edesc->iv_dma);
desc->ptr[2].len = cpu_to_be16(ivsize);
desc->ptr[2].j_extent = 0;
/* Sync needed for the aead_givencrypt case */
dma_sync_single_for_device(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
/* cipher key */
map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
@ -974,7 +1020,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE,
edesc->src_is_chained);
edesc->src_chained);
if (sg_count == 1) {
to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src));
@ -1006,32 +1052,30 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
if (areq->src != areq->dst)
sg_count = talitos_map_sg(dev, areq->dst,
edesc->dst_nents ? : 1,
DMA_FROM_DEVICE,
edesc->dst_is_chained);
DMA_FROM_DEVICE, edesc->dst_chained);
if (sg_count == 1) {
to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst));
} else {
struct talitos_ptr *link_tbl_ptr =
&edesc->link_tbl[edesc->src_nents + 1];
int tbl_off = edesc->src_nents + 1;
struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
(edesc->src_nents + 1) *
sizeof(struct talitos_ptr));
tbl_off * sizeof(struct talitos_ptr));
sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
link_tbl_ptr);
tbl_ptr);
/* Add an entry to the link table for ICV data */
link_tbl_ptr += sg_count - 1;
link_tbl_ptr->j_extent = 0;
sg_count++;
link_tbl_ptr++;
link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
link_tbl_ptr->len = cpu_to_be16(authsize);
tbl_ptr += sg_count - 1;
tbl_ptr->j_extent = 0;
tbl_ptr++;
tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
tbl_ptr->len = cpu_to_be16(authsize);
/* icv data follows link tables */
to_talitos_ptr(link_tbl_ptr, edesc->dma_link_tbl +
(edesc->src_nents + edesc->dst_nents + 2) *
to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
(tbl_off + edesc->dst_nents + 1 +
edesc->assoc_nents) *
sizeof(struct talitos_ptr));
desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
@ -1053,17 +1097,17 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
/*
* derive number of elements in scatterlist
*/
static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
{
struct scatterlist *sg = sg_list;
int sg_nents = 0;
*chained = 0;
*chained = false;
while (nbytes > 0) {
sg_nents++;
nbytes -= sg->length;
if (!sg_is_last(sg) && (sg + 1)->length == 0)
*chained = 1;
*chained = true;
sg = scatterwalk_sg_next(sg);
}
@ -1132,17 +1176,21 @@ static size_t sg_copy_end_to_buffer(struct scatterlist *sgl, unsigned int nents,
* allocate and map the extended descriptor
*/
static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
struct scatterlist *assoc,
struct scatterlist *src,
struct scatterlist *dst,
int hash_result,
u8 *iv,
unsigned int assoclen,
unsigned int cryptlen,
unsigned int authsize,
unsigned int ivsize,
int icv_stashing,
u32 cryptoflags)
{
struct talitos_edesc *edesc;
int src_nents, dst_nents, alloc_len, dma_len;
int src_chained, dst_chained = 0;
int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
bool assoc_chained = false, src_chained = false, dst_chained = false;
dma_addr_t iv_dma = 0;
gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
@ -1151,10 +1199,29 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
return ERR_PTR(-EINVAL);
}
if (iv)
iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
if (assoc) {
/*
* Currently it is assumed that iv is provided whenever assoc
* is.
*/
BUG_ON(!iv);
assoc_nents = sg_count(assoc, assoclen, &assoc_chained);
talitos_map_sg(dev, assoc, assoc_nents, DMA_TO_DEVICE,
assoc_chained);
assoc_nents = (assoc_nents == 1) ? 0 : assoc_nents;
if (assoc_nents || sg_dma_address(assoc) + assoclen != iv_dma)
assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
}
src_nents = sg_count(src, cryptlen + authsize, &src_chained);
src_nents = (src_nents == 1) ? 0 : src_nents;
if (hash_result) {
if (!dst) {
dst_nents = 0;
} else {
if (dst == src) {
@ -1172,9 +1239,9 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
* and the ICV data itself
*/
alloc_len = sizeof(struct talitos_edesc);
if (src_nents || dst_nents) {
dma_len = (src_nents + dst_nents + 2) *
sizeof(struct talitos_ptr) + authsize;
if (assoc_nents || src_nents || dst_nents) {
dma_len = (src_nents + dst_nents + 2 + assoc_nents) *
sizeof(struct talitos_ptr) + authsize;
alloc_len += dma_len;
} else {
dma_len = 0;
@ -1183,14 +1250,20 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
edesc = kmalloc(alloc_len, GFP_DMA | flags);
if (!edesc) {
talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
if (iv_dma)
dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
dev_err(dev, "could not allocate edescriptor\n");
return ERR_PTR(-ENOMEM);
}
edesc->assoc_nents = assoc_nents;
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
edesc->src_is_chained = src_chained;
edesc->dst_is_chained = dst_chained;
edesc->assoc_chained = assoc_chained;
edesc->src_chained = src_chained;
edesc->dst_chained = dst_chained;
edesc->iv_dma = iv_dma;
edesc->dma_len = dma_len;
if (dma_len)
edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
@ -1200,14 +1273,16 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
return edesc;
}
static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq,
static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
int icv_stashing)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
unsigned int ivsize = crypto_aead_ivsize(authenc);
return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
areq->cryptlen, ctx->authsize, icv_stashing,
return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
iv, areq->assoclen, areq->cryptlen,
ctx->authsize, ivsize, icv_stashing,
areq->base.flags);
}
@ -1218,14 +1293,14 @@ static int aead_encrypt(struct aead_request *req)
struct talitos_edesc *edesc;
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, 0);
edesc = aead_edesc_alloc(req, req->iv, 0);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* set encrypt */
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done);
return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done);
}
static int aead_decrypt(struct aead_request *req)
@ -1241,7 +1316,7 @@ static int aead_decrypt(struct aead_request *req)
req->cryptlen -= authsize;
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, 1);
edesc = aead_edesc_alloc(req, req->iv, 1);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@ -1257,9 +1332,7 @@ static int aead_decrypt(struct aead_request *req)
/* reset integrity check result bits */
edesc->desc.hdr_lo = 0;
return ipsec_esp(edesc, req, NULL, 0,
ipsec_esp_decrypt_hwauth_done);
return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done);
}
/* Have to check the ICV with software */
@ -1268,7 +1341,8 @@ static int aead_decrypt(struct aead_request *req)
/* stash incoming ICV for later cmp with ICV generated by the h/w */
if (edesc->dma_len)
icvdata = &edesc->link_tbl[edesc->src_nents +
edesc->dst_nents + 2];
edesc->dst_nents + 2 +
edesc->assoc_nents];
else
icvdata = &edesc->link_tbl[0];
@ -1277,7 +1351,7 @@ static int aead_decrypt(struct aead_request *req)
memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
ctx->authsize);
return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done);
return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done);
}
static int aead_givencrypt(struct aead_givcrypt_request *req)
@ -1288,7 +1362,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *req)
struct talitos_edesc *edesc;
/* allocate extended descriptor */
edesc = aead_edesc_alloc(areq, 0);
edesc = aead_edesc_alloc(areq, req->giv, 0);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@ -1299,8 +1373,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *req)
/* avoid consecutive packets going out with same IV */
*(__be64 *)req->giv ^= cpu_to_be64(req->seq);
return ipsec_esp(edesc, areq, req->giv, req->seq,
ipsec_esp_encrypt_done);
return ipsec_esp(edesc, areq, req->seq, ipsec_esp_encrypt_done);
}
static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
@ -1356,7 +1429,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
struct device *dev = ctx->dev;
struct talitos_desc *desc = &edesc->desc;
unsigned int cryptlen = areq->nbytes;
unsigned int ivsize;
unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
int sg_count, ret;
/* first DWORD empty */
@ -1365,9 +1438,9 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
desc->ptr[0].j_extent = 0;
/* cipher iv */
ivsize = crypto_ablkcipher_ivsize(cipher);
map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, areq->info, 0,
DMA_TO_DEVICE);
to_talitos_ptr(&desc->ptr[1], edesc->iv_dma);
desc->ptr[1].len = cpu_to_be16(ivsize);
desc->ptr[1].j_extent = 0;
/* cipher key */
map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
@ -1382,7 +1455,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE,
edesc->src_is_chained);
edesc->src_chained);
if (sg_count == 1) {
to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src));
@ -1409,8 +1482,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
if (areq->src != areq->dst)
sg_count = talitos_map_sg(dev, areq->dst,
edesc->dst_nents ? : 1,
DMA_FROM_DEVICE,
edesc->dst_is_chained);
DMA_FROM_DEVICE, edesc->dst_chained);
if (sg_count == 1) {
to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst));
@ -1450,9 +1522,11 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
{
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
areq->nbytes, 0, 0, areq->base.flags);
return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
areq->info, 0, areq->nbytes, 0, ivsize, 0,
areq->base.flags);
}
static int ablkcipher_encrypt(struct ablkcipher_request *areq)
@ -1578,8 +1652,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
sg_count = talitos_map_sg(dev, req_ctx->psrc,
edesc->src_nents ? : 1,
DMA_TO_DEVICE,
edesc->src_is_chained);
DMA_TO_DEVICE, edesc->src_chained);
if (sg_count == 1) {
to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc));
@ -1631,8 +1704,8 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, 1,
nbytes, 0, 0, areq->base.flags);
return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
nbytes, 0, 0, 0, areq->base.flags);
}
static int ahash_init(struct ahash_request *areq)
@ -1690,7 +1763,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
unsigned int nbytes_to_hash;
unsigned int to_hash_later;
unsigned int nsg;
int chained;
bool chained;
if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
/* Buffer up to one whole block */
@ -1902,21 +1975,18 @@ struct talitos_alg_template {
};
static struct talitos_alg_template driver_algs[] = {
/* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
/*
* AEAD algorithms. These use a single-pass ipsec_esp descriptor.
* authencesn(*,*) is also registered, although not present
* explicitly here.
*/
{ .type = CRYPTO_ALG_TYPE_AEAD,
.alg.crypto = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
.cra_type = &crypto_aead_type,
.cra_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
.givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
}
@ -1935,14 +2005,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
.cra_type = &crypto_aead_type,
.cra_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
.givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
}
@ -1962,14 +2025,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
.cra_type = &crypto_aead_type,
.cra_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
.givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
}
@ -1988,14 +2044,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
.cra_type = &crypto_aead_type,
.cra_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
.givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
}
@ -2015,14 +2064,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
.cra_type = &crypto_aead_type,
.cra_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
.givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
}
@ -2041,14 +2083,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
.cra_type = &crypto_aead_type,
.cra_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
.givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
}
@ -2068,14 +2103,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
.cra_type = &crypto_aead_type,
.cra_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
.givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
}
@ -2094,14 +2122,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
.cra_type = &crypto_aead_type,
.cra_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
.givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
}
@ -2121,14 +2142,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
.cra_type = &crypto_aead_type,
.cra_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
.givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
}
@ -2147,14 +2161,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
.cra_type = &crypto_aead_type,
.cra_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
.givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
}
@ -2174,14 +2181,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
.cra_type = &crypto_aead_type,
.cra_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
.givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
}
@ -2200,14 +2200,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
.cra_type = &crypto_aead_type,
.cra_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
.givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
}
@ -2229,12 +2222,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC,
.cra_type = &crypto_ablkcipher_type,
.cra_ablkcipher = {
.setkey = ablkcipher_setkey,
.encrypt = ablkcipher_encrypt,
.decrypt = ablkcipher_decrypt,
.geniv = "eseqiv",
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
@ -2251,12 +2239,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC,
.cra_type = &crypto_ablkcipher_type,
.cra_ablkcipher = {
.setkey = ablkcipher_setkey,
.encrypt = ablkcipher_encrypt,
.decrypt = ablkcipher_decrypt,
.geniv = "eseqiv",
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
@ -2270,11 +2253,6 @@ static struct talitos_alg_template driver_algs[] = {
/* AHASH algorithms. */
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.halg.digestsize = MD5_DIGEST_SIZE,
.halg.base = {
.cra_name = "md5",
@ -2282,7 +2260,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = MD5_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
.cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@ -2291,11 +2268,6 @@ static struct talitos_alg_template driver_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.halg.digestsize = SHA1_DIGEST_SIZE,
.halg.base = {
.cra_name = "sha1",
@ -2303,7 +2275,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
.cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@ -2312,11 +2283,6 @@ static struct talitos_alg_template driver_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.halg.digestsize = SHA224_DIGEST_SIZE,
.halg.base = {
.cra_name = "sha224",
@ -2324,7 +2290,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
.cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@ -2333,11 +2298,6 @@ static struct talitos_alg_template driver_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.halg.digestsize = SHA256_DIGEST_SIZE,
.halg.base = {
.cra_name = "sha256",
@ -2345,7 +2305,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
.cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@ -2354,11 +2313,6 @@ static struct talitos_alg_template driver_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.halg.digestsize = SHA384_DIGEST_SIZE,
.halg.base = {
.cra_name = "sha384",
@ -2366,7 +2320,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
.cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@ -2375,11 +2328,6 @@ static struct talitos_alg_template driver_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.halg.digestsize = SHA512_DIGEST_SIZE,
.halg.base = {
.cra_name = "sha512",
@ -2387,7 +2335,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
.cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@ -2396,12 +2343,6 @@ static struct talitos_alg_template driver_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.setkey = ahash_setkey,
.halg.digestsize = MD5_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(md5)",
@ -2409,7 +2350,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = MD5_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
.cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@ -2418,12 +2358,6 @@ static struct talitos_alg_template driver_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.setkey = ahash_setkey,
.halg.digestsize = SHA1_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha1)",
@ -2431,7 +2365,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
.cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@ -2440,12 +2373,6 @@ static struct talitos_alg_template driver_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.setkey = ahash_setkey,
.halg.digestsize = SHA224_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha224)",
@ -2453,7 +2380,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
.cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@ -2462,12 +2388,6 @@ static struct talitos_alg_template driver_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.setkey = ahash_setkey,
.halg.digestsize = SHA256_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha256)",
@ -2475,7 +2395,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
.cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@ -2484,12 +2403,6 @@ static struct talitos_alg_template driver_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.setkey = ahash_setkey,
.halg.digestsize = SHA384_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha384)",
@ -2497,7 +2410,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
.cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@ -2506,12 +2418,6 @@ static struct talitos_alg_template driver_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.setkey = ahash_setkey,
.halg.digestsize = SHA512_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha512)",
@ -2519,7 +2425,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC,
.cra_type = &crypto_ahash_type
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@ -2677,14 +2582,34 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
case CRYPTO_ALG_TYPE_ABLKCIPHER:
alg = &t_alg->algt.alg.crypto;
alg->cra_init = talitos_cra_init;
alg->cra_type = &crypto_ablkcipher_type;
alg->cra_ablkcipher.setkey = ablkcipher_setkey;
alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
alg->cra_ablkcipher.geniv = "eseqiv";
break;
case CRYPTO_ALG_TYPE_AEAD:
alg = &t_alg->algt.alg.crypto;
alg->cra_init = talitos_cra_init_aead;
alg->cra_type = &crypto_aead_type;
alg->cra_aead.setkey = aead_setkey;
alg->cra_aead.setauthsize = aead_setauthsize;
alg->cra_aead.encrypt = aead_encrypt;
alg->cra_aead.decrypt = aead_decrypt;
alg->cra_aead.givencrypt = aead_givencrypt;
alg->cra_aead.geniv = "<built-in>";
break;
case CRYPTO_ALG_TYPE_AHASH:
alg = &t_alg->algt.alg.hash.halg.base;
alg->cra_init = talitos_cra_init_ahash;
alg->cra_type = &crypto_ahash_type;
t_alg->algt.alg.hash.init = ahash_init;
t_alg->algt.alg.hash.update = ahash_update;
t_alg->algt.alg.hash.final = ahash_final;
t_alg->algt.alg.hash.finup = ahash_finup;
t_alg->algt.alg.hash.digest = ahash_digest;
t_alg->algt.alg.hash.setkey = ahash_setkey;
if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
!strncmp(alg->cra_name, "hmac", 4)) {
kfree(t_alg);
@ -2896,7 +2821,9 @@ static int talitos_probe(struct platform_device *ofdev)
if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
struct talitos_crypto_alg *t_alg;
char *name = NULL;
bool authenc = false;
authencesn:
t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
@ -2911,6 +2838,8 @@ static int talitos_probe(struct platform_device *ofdev)
err = crypto_register_alg(
&t_alg->algt.alg.crypto);
name = t_alg->algt.alg.crypto.cra_driver_name;
authenc = authenc ? !authenc :
!(bool)memcmp(name, "authenc", 7);
break;
case CRYPTO_ALG_TYPE_AHASH:
err = crypto_register_ahash(
@ -2923,8 +2852,25 @@ static int talitos_probe(struct platform_device *ofdev)
dev_err(dev, "%s alg registration failed\n",
name);
kfree(t_alg);
} else
} else {
list_add_tail(&t_alg->entry, &priv->alg_list);
if (authenc) {
struct crypto_alg *alg =
&driver_algs[i].alg.crypto;
name = alg->cra_name;
memmove(name + 10, name + 7,
strlen(name) - 7);
memcpy(name + 7, "esn", 3);
name = alg->cra_driver_name;
memmove(name + 10, name + 7,
strlen(name) - 7);
memcpy(name + 7, "esn", 3);
goto authencesn;
}
}
}
}
if (!list_empty(&priv->alg_list))

View File

@ -969,6 +969,7 @@ static int tegra_aes_probe(struct platform_device *pdev)
aes_wq = alloc_workqueue("tegra_aes_wq", WQ_HIGHPRI | WQ_UNBOUND, 1);
if (!aes_wq) {
dev_err(dev, "alloc_workqueue failed\n");
err = -ENOMEM;
goto out;
}
@ -1004,8 +1005,6 @@ static int tegra_aes_probe(struct platform_device *pdev)
aes_dev = dd;
for (i = 0; i < ARRAY_SIZE(algs); i++) {
INIT_LIST_HEAD(&algs[i].cra_list);
algs[i].cra_priority = 300;
algs[i].cra_ctxsize = sizeof(struct tegra_aes_ctx);
algs[i].cra_module = THIS_MODULE;

View File

@ -1486,6 +1486,7 @@ static int ux500_cryp_probe(struct platform_device *pdev)
if (!res_irq) {
dev_err(dev, "[%s]: IORESOURCE_IRQ unavailable",
__func__);
ret = -ENODEV;
goto out_power;
}

View File

@ -1991,7 +1991,6 @@ static int __init ux500_hash_mod_init(void)
static void __exit ux500_hash_mod_fini(void)
{
platform_driver_unregister(&hash_driver);
return;
}
module_init(ux500_hash_mod_init);

27
include/crypto/cast5.h Normal file
View File

@ -0,0 +1,27 @@
#ifndef _CRYPTO_CAST5_H
#define _CRYPTO_CAST5_H
#include <linux/types.h>
#include <linux/crypto.h>
#define CAST5_BLOCK_SIZE 8
#define CAST5_MIN_KEY_SIZE 5
#define CAST5_MAX_KEY_SIZE 16
struct cast5_ctx {
u32 Km[16];
u8 Kr[16];
int rr; /* rr ? rounds = 12 : rounds = 16; (rfc 2144) */
};
int cast5_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen);
void __cast5_encrypt(struct cast5_ctx *ctx, u8 *dst, const u8 *src);
void __cast5_decrypt(struct cast5_ctx *ctx, u8 *dst, const u8 *src);
extern const u32 cast5_s1[256];
extern const u32 cast5_s2[256];
extern const u32 cast5_s3[256];
extern const u32 cast5_s4[256];
#endif

28
include/crypto/cast6.h Normal file
View File

@ -0,0 +1,28 @@
#ifndef _CRYPTO_CAST6_H
#define _CRYPTO_CAST6_H
#include <linux/types.h>
#include <linux/crypto.h>
#define CAST6_BLOCK_SIZE 16
#define CAST6_MIN_KEY_SIZE 16
#define CAST6_MAX_KEY_SIZE 32
struct cast6_ctx {
u32 Km[12][4];
u8 Kr[12][4];
};
int __cast6_setkey(struct cast6_ctx *ctx, const u8 *key,
unsigned int keylen, u32 *flags);
int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen);
void __cast6_encrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src);
void __cast6_decrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src);
extern const u32 cast6_s1[256];
extern const u32 cast6_s2[256];
extern const u32 cast6_s3[256];
extern const u32 cast6_s4[256];
#endif

View File

@ -83,6 +83,8 @@ struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask);
int crypto_register_shash(struct shash_alg *alg);
int crypto_unregister_shash(struct shash_alg *alg);
int crypto_register_shashes(struct shash_alg *algs, int count);
int crypto_unregister_shashes(struct shash_alg *algs, int count);
int shash_register_instance(struct crypto_template *tmpl,
struct shash_instance *inst);
void shash_free_instance(struct crypto_instance *inst);

11
include/linux/nx842.h Normal file
View File

@ -0,0 +1,11 @@
#ifndef __NX842_H__
#define __NX842_H__
int nx842_get_workmem_size(void);
int nx842_get_workmem_size_aligned(void);
int nx842_compress(const unsigned char *in, unsigned int in_len,
unsigned char *out, unsigned int *out_len, void *wrkmem);
int nx842_decompress(const unsigned char *in, unsigned int in_len,
unsigned char *out, unsigned int *out_len, void *wrkmem);
#endif