Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (50 commits)
  crypto: ixp4xx - Select CRYPTO_AUTHENC
  crypto: s390 - Respect STFL bit
  crypto: talitos - Add support for sha256 and md5 variants
  crypto: hash - Move ahash functions into crypto/hash.h
  crypto: crc32c - Add ahash implementation
  crypto: hash - Added scatter list walking helper
  crypto: prng - Deterministic CPRNG
  crypto: hash - Removed vestigial ahash fields
  crypto: hash - Fixed digest size check
  crypto: rmd - sparse annotations
  crypto: rmd128 - sparse annotations
  crypto: camellia - Use kernel-provided bitops, unaligned access helpers
  crypto: talitos - Use proper form for algorithm driver names
  crypto: talitos - Add support for 3des
  crypto: padlock - Make module loading quieter when hardware isn't available
  crypto: tcrpyt - Remove unnecessary kmap/kunmap calls
  crypto: ixp4xx - Hardware crypto support for IXP4xx CPUs
  crypto: talitos - Freescale integrated security engine (SEC) driver
  [CRYPTO] tcrypt: Add self test for des3_ebe cipher operating in cbc mode
  [CRYPTO] rmd: Use pointer form of endian swapping operations
  ...
This commit is contained in:
Linus Torvalds 2008-07-14 13:40:42 -07:00
commit 3b23e665b6
32 changed files with 7226 additions and 325 deletions

View File

@ -296,6 +296,10 @@ static inline int crypt_s390_func_available(int func)
unsigned char status[16];
int ret;
/* check if CPACF facility (bit 17) is available */
if (!(stfl() & 1ULL << (31 - 17)))
return 0;
switch (func & CRYPT_S390_OP_MASK) {
case CRYPT_S390_KM:
ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);

View File

@ -65,6 +65,7 @@ config CRYPTO_NULL
config CRYPTO_CRYPTD
tristate "Software async crypto daemon"
select CRYPTO_BLKCIPHER
select CRYPTO_HASH
select CRYPTO_MANAGER
help
This is a generic software asynchronous crypto daemon that
@ -212,7 +213,7 @@ comment "Digest"
config CRYPTO_CRC32C
tristate "CRC32c CRC algorithm"
select CRYPTO_ALGAPI
select CRYPTO_HASH
select LIBCRC32C
help
Castagnoli, et al Cyclic Redundancy-Check Algorithm. Used
@ -241,6 +242,57 @@ config CRYPTO_MICHAEL_MIC
should not be used for other purposes because of the weakness
of the algorithm.
config CRYPTO_RMD128
tristate "RIPEMD-128 digest algorithm"
select CRYPTO_ALGAPI
help
RIPEMD-128 (ISO/IEC 10118-3:2004).
RIPEMD-128 is a 128-bit cryptographic hash function. It should only
to be used as a secure replacement for RIPEMD. For other use cases
RIPEMD-160 should be used.
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_RMD160
tristate "RIPEMD-160 digest algorithm"
select CRYPTO_ALGAPI
help
RIPEMD-160 (ISO/IEC 10118-3:2004).
RIPEMD-160 is a 160-bit cryptographic hash function. It is intended
to be used as a secure replacement for the 128-bit hash functions
MD4, MD5 and it's predecessor RIPEMD (not to be confused with RIPEMD-128).
It's speed is comparable to SHA1 and there are no known attacks against
RIPEMD-160.
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_RMD256
tristate "RIPEMD-256 digest algorithm"
select CRYPTO_ALGAPI
help
RIPEMD-256 is an optional extension of RIPEMD-128 with a 256 bit hash.
It is intended for applications that require longer hash-results, without
needing a larger security level (than RIPEMD-128).
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_RMD320
tristate "RIPEMD-320 digest algorithm"
select CRYPTO_ALGAPI
help
RIPEMD-320 is an optional extension of RIPEMD-160 with a 320 bit hash.
It is intended for applications that require longer hash-results, without
needing a larger security level (than RIPEMD-160).
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_SHA1
tristate "SHA1 digest algorithm"
select CRYPTO_ALGAPI
@ -614,6 +666,15 @@ config CRYPTO_LZO
help
This is the LZO algorithm.
comment "Random Number Generation"
config CRYPTO_PRNG
tristate "Pseudo Random Number Generation for Cryptographic modules"
help
This option enables the generic pseudo random number generator
for cryptographic modules. Uses the Algorithm specified in
ANSI X9.31 A.2.4
source "drivers/crypto/Kconfig"
endif # if CRYPTO

View File

@ -19,6 +19,7 @@ obj-$(CONFIG_CRYPTO_BLKCIPHER) += crypto_blkcipher.o
obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
crypto_hash-objs := hash.o
crypto_hash-objs += ahash.o
obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o
obj-$(CONFIG_CRYPTO_MANAGER) += cryptomgr.o
@ -27,6 +28,10 @@ obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
obj-$(CONFIG_CRYPTO_MD4) += md4.o
obj-$(CONFIG_CRYPTO_MD5) += md5.o
obj-$(CONFIG_CRYPTO_RMD128) += rmd128.o
obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o
obj-$(CONFIG_CRYPTO_RMD256) += rmd256.o
obj-$(CONFIG_CRYPTO_RMD320) += rmd320.o
obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
@ -64,7 +69,7 @@ obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o
obj-$(CONFIG_CRYPTO_LZO) += lzo.o
obj-$(CONFIG_CRYPTO_PRNG) += prng.o
obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
#

194
crypto/ahash.c Normal file
View File

@ -0,0 +1,194 @@
/*
* Asynchronous Cryptographic Hash operations.
*
* This is the asynchronous version of hash.c with notification of
* completion via a callback.
*
* Copyright (c) 2008 Loc Ho <lho@amcc.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include "internal.h"
static int hash_walk_next(struct crypto_hash_walk *walk)
{
unsigned int alignmask = walk->alignmask;
unsigned int offset = walk->offset;
unsigned int nbytes = min(walk->entrylen,
((unsigned int)(PAGE_SIZE)) - offset);
walk->data = crypto_kmap(walk->pg, 0);
walk->data += offset;
if (offset & alignmask)
nbytes = alignmask + 1 - (offset & alignmask);
walk->entrylen -= nbytes;
return nbytes;
}
static int hash_walk_new_entry(struct crypto_hash_walk *walk)
{
struct scatterlist *sg;
sg = walk->sg;
walk->pg = sg_page(sg);
walk->offset = sg->offset;
walk->entrylen = sg->length;
if (walk->entrylen > walk->total)
walk->entrylen = walk->total;
walk->total -= walk->entrylen;
return hash_walk_next(walk);
}
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
{
unsigned int alignmask = walk->alignmask;
unsigned int nbytes = walk->entrylen;
walk->data -= walk->offset;
if (nbytes && walk->offset & alignmask && !err) {
walk->offset += alignmask - 1;
walk->offset = ALIGN(walk->offset, alignmask + 1);
walk->data += walk->offset;
nbytes = min(nbytes,
((unsigned int)(PAGE_SIZE)) - walk->offset);
walk->entrylen -= nbytes;
return nbytes;
}
crypto_kunmap(walk->data, 0);
crypto_yield(walk->flags);
if (err)
return err;
walk->offset = 0;
if (nbytes)
return hash_walk_next(walk);
if (!walk->total)
return 0;
walk->sg = scatterwalk_sg_next(walk->sg);
return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
int crypto_hash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk)
{
walk->total = req->nbytes;
if (!walk->total)
return 0;
walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
walk->sg = req->src;
walk->flags = req->base.flags;
return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct ahash_alg *ahash = crypto_ahash_alg(tfm);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
int ret;
u8 *buffer, *alignbuffer;
unsigned long absize;
absize = keylen + alignmask;
buffer = kmalloc(absize, GFP_ATOMIC);
if (!buffer)
return -ENOMEM;
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
ret = ahash->setkey(tfm, alignbuffer, keylen);
memset(alignbuffer, 0, keylen);
kfree(buffer);
return ret;
}
static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct ahash_alg *ahash = crypto_ahash_alg(tfm);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
if ((unsigned long)key & alignmask)
return ahash_setkey_unaligned(tfm, key, keylen);
return ahash->setkey(tfm, key, keylen);
}
static unsigned int crypto_ahash_ctxsize(struct crypto_alg *alg, u32 type,
u32 mask)
{
return alg->cra_ctxsize;
}
static int crypto_init_ahash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
{
struct ahash_alg *alg = &tfm->__crt_alg->cra_ahash;
struct ahash_tfm *crt = &tfm->crt_ahash;
if (alg->digestsize > PAGE_SIZE / 8)
return -EINVAL;
crt->init = alg->init;
crt->update = alg->update;
crt->final = alg->final;
crt->digest = alg->digest;
crt->setkey = ahash_setkey;
crt->digestsize = alg->digestsize;
return 0;
}
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
{
seq_printf(m, "type : ahash\n");
seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
"yes" : "no");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "digestsize : %u\n", alg->cra_hash.digestsize);
}
const struct crypto_type crypto_ahash_type = {
.ctxsize = crypto_ahash_ctxsize,
.init = crypto_init_ahash_ops,
#ifdef CONFIG_PROC_FS
.show = crypto_ahash_show,
#endif
};
EXPORT_SYMBOL_GPL(crypto_ahash_type);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");

View File

@ -235,8 +235,12 @@ static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
return crypto_init_cipher_ops(tfm);
case CRYPTO_ALG_TYPE_DIGEST:
return crypto_init_digest_ops(tfm);
if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) !=
CRYPTO_ALG_TYPE_HASH_MASK)
return crypto_init_digest_ops_async(tfm);
else
return crypto_init_digest_ops(tfm);
case CRYPTO_ALG_TYPE_COMPRESS:
return crypto_init_compress_ops(tfm);

View File

@ -35,6 +35,8 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bitops.h>
#include <asm/unaligned.h>
static const u32 camellia_sp1110[256] = {
0x70707000,0x82828200,0x2c2c2c00,0xececec00,
@ -335,20 +337,6 @@ static const u32 camellia_sp4404[256] = {
/*
* macros
*/
#define GETU32(v, pt) \
do { \
/* latest breed of gcc is clever enough to use move */ \
memcpy(&(v), (pt), 4); \
(v) = be32_to_cpu(v); \
} while(0)
/* rotation right shift 1byte */
#define ROR8(x) (((x) >> 8) + ((x) << 24))
/* rotation left shift 1bit */
#define ROL1(x) (((x) << 1) + ((x) >> 31))
/* rotation left shift 1byte */
#define ROL8(x) (((x) << 8) + ((x) >> 24))
#define ROLDQ(ll, lr, rl, rr, w0, w1, bits) \
do { \
w0 = ll; \
@ -383,7 +371,7 @@ static const u32 camellia_sp4404[256] = {
^ camellia_sp3033[(u8)(il >> 8)] \
^ camellia_sp4404[(u8)(il )]; \
yl ^= yr; \
yr = ROR8(yr); \
yr = ror32(yr, 8); \
yr ^= yl; \
} while(0)
@ -405,7 +393,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
subL[7] ^= subL[1]; subR[7] ^= subR[1];
subL[1] ^= subR[1] & ~subR[9];
dw = subL[1] & subL[9],
subR[1] ^= ROL1(dw); /* modified for FLinv(kl2) */
subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl2) */
/* round 8 */
subL[11] ^= subL[1]; subR[11] ^= subR[1];
/* round 10 */
@ -414,7 +402,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
subL[15] ^= subL[1]; subR[15] ^= subR[1];
subL[1] ^= subR[1] & ~subR[17];
dw = subL[1] & subL[17],
subR[1] ^= ROL1(dw); /* modified for FLinv(kl4) */
subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl4) */
/* round 14 */
subL[19] ^= subL[1]; subR[19] ^= subR[1];
/* round 16 */
@ -430,7 +418,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
} else {
subL[1] ^= subR[1] & ~subR[25];
dw = subL[1] & subL[25],
subR[1] ^= ROL1(dw); /* modified for FLinv(kl6) */
subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl6) */
/* round 20 */
subL[27] ^= subL[1]; subR[27] ^= subR[1];
/* round 22 */
@ -450,7 +438,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
subL[26] ^= kw4l; subR[26] ^= kw4r;
kw4l ^= kw4r & ~subR[24];
dw = kw4l & subL[24],
kw4r ^= ROL1(dw); /* modified for FL(kl5) */
kw4r ^= rol32(dw, 1); /* modified for FL(kl5) */
}
/* round 17 */
subL[22] ^= kw4l; subR[22] ^= kw4r;
@ -460,7 +448,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
subL[18] ^= kw4l; subR[18] ^= kw4r;
kw4l ^= kw4r & ~subR[16];
dw = kw4l & subL[16],
kw4r ^= ROL1(dw); /* modified for FL(kl3) */
kw4r ^= rol32(dw, 1); /* modified for FL(kl3) */
/* round 11 */
subL[14] ^= kw4l; subR[14] ^= kw4r;
/* round 9 */
@ -469,7 +457,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
subL[10] ^= kw4l; subR[10] ^= kw4r;
kw4l ^= kw4r & ~subR[8];
dw = kw4l & subL[8],
kw4r ^= ROL1(dw); /* modified for FL(kl1) */
kw4r ^= rol32(dw, 1); /* modified for FL(kl1) */
/* round 5 */
subL[6] ^= kw4l; subR[6] ^= kw4r;
/* round 3 */
@ -494,7 +482,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
SUBKEY_R(6) = subR[5] ^ subR[7];
tl = subL[10] ^ (subR[10] & ~subR[8]);
dw = tl & subL[8], /* FL(kl1) */
tr = subR[10] ^ ROL1(dw);
tr = subR[10] ^ rol32(dw, 1);
SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */
SUBKEY_R(7) = subR[6] ^ tr;
SUBKEY_L(8) = subL[8]; /* FL(kl1) */
@ -503,7 +491,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
SUBKEY_R(9) = subR[9];
tl = subL[7] ^ (subR[7] & ~subR[9]);
dw = tl & subL[9], /* FLinv(kl2) */
tr = subR[7] ^ ROL1(dw);
tr = subR[7] ^ rol32(dw, 1);
SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */
SUBKEY_R(10) = tr ^ subR[11];
SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */
@ -516,7 +504,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
SUBKEY_R(14) = subR[13] ^ subR[15];
tl = subL[18] ^ (subR[18] & ~subR[16]);
dw = tl & subL[16], /* FL(kl3) */
tr = subR[18] ^ ROL1(dw);
tr = subR[18] ^ rol32(dw, 1);
SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */
SUBKEY_R(15) = subR[14] ^ tr;
SUBKEY_L(16) = subL[16]; /* FL(kl3) */
@ -525,7 +513,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
SUBKEY_R(17) = subR[17];
tl = subL[15] ^ (subR[15] & ~subR[17]);
dw = tl & subL[17], /* FLinv(kl4) */
tr = subR[15] ^ ROL1(dw);
tr = subR[15] ^ rol32(dw, 1);
SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */
SUBKEY_R(18) = tr ^ subR[19];
SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */
@ -544,7 +532,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
} else {
tl = subL[26] ^ (subR[26] & ~subR[24]);
dw = tl & subL[24], /* FL(kl5) */
tr = subR[26] ^ ROL1(dw);
tr = subR[26] ^ rol32(dw, 1);
SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */
SUBKEY_R(23) = subR[22] ^ tr;
SUBKEY_L(24) = subL[24]; /* FL(kl5) */
@ -553,7 +541,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
SUBKEY_R(25) = subR[25];
tl = subL[23] ^ (subR[23] & ~subR[25]);
dw = tl & subL[25], /* FLinv(kl6) */
tr = subR[23] ^ ROL1(dw);
tr = subR[23] ^ rol32(dw, 1);
SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */
SUBKEY_R(26) = tr ^ subR[27];
SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */
@ -573,17 +561,17 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
/* apply the inverse of the last half of P-function */
i = 2;
do {
dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = ROL8(dw);/* round 1 */
dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = rol32(dw, 8);/* round 1 */
SUBKEY_R(i + 0) = SUBKEY_L(i + 0) ^ dw; SUBKEY_L(i + 0) = dw;
dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = ROL8(dw);/* round 2 */
dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = rol32(dw, 8);/* round 2 */
SUBKEY_R(i + 1) = SUBKEY_L(i + 1) ^ dw; SUBKEY_L(i + 1) = dw;
dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = ROL8(dw);/* round 3 */
dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = rol32(dw, 8);/* round 3 */
SUBKEY_R(i + 2) = SUBKEY_L(i + 2) ^ dw; SUBKEY_L(i + 2) = dw;
dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = ROL8(dw);/* round 4 */
dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = rol32(dw, 8);/* round 4 */
SUBKEY_R(i + 3) = SUBKEY_L(i + 3) ^ dw; SUBKEY_L(i + 3) = dw;
dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = ROL8(dw);/* round 5 */
dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = rol32(dw, 9);/* round 5 */
SUBKEY_R(i + 4) = SUBKEY_L(i + 4) ^ dw; SUBKEY_L(i + 4) = dw;
dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = ROL8(dw);/* round 6 */
dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = rol32(dw, 8);/* round 6 */
SUBKEY_R(i + 5) = SUBKEY_L(i + 5) ^ dw; SUBKEY_L(i + 5) = dw;
i += 8;
} while (i < max);
@ -599,10 +587,10 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey)
/**
* k == kll || klr || krl || krr (|| is concatenation)
*/
GETU32(kll, key );
GETU32(klr, key + 4);
GETU32(krl, key + 8);
GETU32(krr, key + 12);
kll = get_unaligned_be32(key);
klr = get_unaligned_be32(key + 4);
krl = get_unaligned_be32(key + 8);
krr = get_unaligned_be32(key + 12);
/* generate KL dependent subkeys */
/* kw1 */
@ -707,14 +695,14 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey)
* key = (kll || klr || krl || krr || krll || krlr || krrl || krrr)
* (|| is concatenation)
*/
GETU32(kll, key );
GETU32(klr, key + 4);
GETU32(krl, key + 8);
GETU32(krr, key + 12);
GETU32(krll, key + 16);
GETU32(krlr, key + 20);
GETU32(krrl, key + 24);
GETU32(krrr, key + 28);
kll = get_unaligned_be32(key);
klr = get_unaligned_be32(key + 4);
krl = get_unaligned_be32(key + 8);
krr = get_unaligned_be32(key + 12);
krll = get_unaligned_be32(key + 16);
krlr = get_unaligned_be32(key + 20);
krrl = get_unaligned_be32(key + 24);
krrr = get_unaligned_be32(key + 28);
/* generate KL dependent subkeys */
/* kw1 */
@ -870,13 +858,13 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey)
t0 &= ll; \
t2 |= rr; \
rl ^= t2; \
lr ^= ROL1(t0); \
lr ^= rol32(t0, 1); \
t3 = krl; \
t1 = klr; \
t3 &= rl; \
t1 |= lr; \
ll ^= t1; \
rr ^= ROL1(t3); \
rr ^= rol32(t3, 1); \
} while(0)
#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \
@ -892,7 +880,7 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey)
il ^= kl; \
ir ^= il ^ kr; \
yl ^= ir; \
yr ^= ROR8(il) ^ ir; \
yr ^= ror32(il, 8) ^ ir; \
} while(0)
/* max = 24: 128bit encrypt, max = 32: 256bit encrypt */

View File

@ -5,20 +5,23 @@
*
* This module file is a wrapper to invoke the lib/crc32c routines.
*
* Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/crypto.h>
#include <linux/crc32c.h>
#include <linux/kernel.h>
#define CHKSUM_BLOCK_SIZE 32
#define CHKSUM_BLOCK_SIZE 1
#define CHKSUM_DIGEST_SIZE 4
struct chksum_ctx {
@ -71,7 +74,7 @@ static void chksum_final(struct crypto_tfm *tfm, u8 *out)
*(__le32 *)out = ~cpu_to_le32(mctx->crc);
}
static int crc32c_cra_init(struct crypto_tfm *tfm)
static int crc32c_cra_init_old(struct crypto_tfm *tfm)
{
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
@ -79,14 +82,14 @@ static int crc32c_cra_init(struct crypto_tfm *tfm)
return 0;
}
static struct crypto_alg alg = {
static struct crypto_alg old_alg = {
.cra_name = "crc32c",
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct chksum_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_init = crc32c_cra_init,
.cra_list = LIST_HEAD_INIT(old_alg.cra_list),
.cra_init = crc32c_cra_init_old,
.cra_u = {
.digest = {
.dia_digestsize= CHKSUM_DIGEST_SIZE,
@ -98,14 +101,125 @@ static struct crypto_alg alg = {
}
};
/*
* Setting the seed allows arbitrary accumulators and flexible XOR policy
* If your algorithm starts with ~0, then XOR with ~0 before you set
* the seed.
*/
static int crc32c_setkey(struct crypto_ahash *hash, const u8 *key,
unsigned int keylen)
{
u32 *mctx = crypto_ahash_ctx(hash);
if (keylen != sizeof(u32)) {
crypto_ahash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
*mctx = le32_to_cpup((__le32 *)key);
return 0;
}
static int crc32c_init(struct ahash_request *req)
{
u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
u32 *crcp = ahash_request_ctx(req);
*crcp = *mctx;
return 0;
}
static int crc32c_update(struct ahash_request *req)
{
struct crypto_hash_walk walk;
u32 *crcp = ahash_request_ctx(req);
u32 crc = *crcp;
int nbytes;
for (nbytes = crypto_hash_walk_first(req, &walk); nbytes;
nbytes = crypto_hash_walk_done(&walk, 0))
crc = crc32c(crc, walk.data, nbytes);
*crcp = crc;
return 0;
}
static int crc32c_final(struct ahash_request *req)
{
u32 *crcp = ahash_request_ctx(req);
*(__le32 *)req->result = ~cpu_to_le32p(crcp);
return 0;
}
static int crc32c_digest(struct ahash_request *req)
{
struct crypto_hash_walk walk;
u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
u32 crc = *mctx;
int nbytes;
for (nbytes = crypto_hash_walk_first(req, &walk); nbytes;
nbytes = crypto_hash_walk_done(&walk, 0))
crc = crc32c(crc, walk.data, nbytes);
*(__le32 *)req->result = ~cpu_to_le32(crc);
return 0;
}
static int crc32c_cra_init(struct crypto_tfm *tfm)
{
u32 *key = crypto_tfm_ctx(tfm);
*key = ~0;
tfm->crt_ahash.reqsize = sizeof(u32);
return 0;
}
static struct crypto_alg alg = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_AHASH,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_alignmask = 3,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_init = crc32c_cra_init,
.cra_type = &crypto_ahash_type,
.cra_u = {
.ahash = {
.digestsize = CHKSUM_DIGEST_SIZE,
.setkey = crc32c_setkey,
.init = crc32c_init,
.update = crc32c_update,
.final = crc32c_final,
.digest = crc32c_digest,
}
}
};
static int __init crc32c_mod_init(void)
{
return crypto_register_alg(&alg);
int err;
err = crypto_register_alg(&old_alg);
if (err)
return err;
err = crypto_register_alg(&alg);
if (err)
crypto_unregister_alg(&old_alg);
return err;
}
static void __exit crc32c_mod_fini(void)
{
crypto_unregister_alg(&alg);
crypto_unregister_alg(&old_alg);
}
module_init(crc32c_mod_init);

View File

@ -11,6 +11,7 @@
*/
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@ -45,6 +46,13 @@ struct cryptd_blkcipher_request_ctx {
crypto_completion_t complete;
};
struct cryptd_hash_ctx {
struct crypto_hash *child;
};
struct cryptd_hash_request_ctx {
crypto_completion_t complete;
};
static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm)
{
@ -82,10 +90,8 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
rctx = ablkcipher_request_ctx(req);
if (unlikely(err == -EINPROGRESS)) {
rctx->complete(&req->base, err);
return;
}
if (unlikely(err == -EINPROGRESS))
goto out;
desc.tfm = child;
desc.info = req->info;
@ -95,8 +101,9 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
req->base.complete = rctx->complete;
out:
local_bh_disable();
req->base.complete(&req->base, err);
rctx->complete(&req->base, err);
local_bh_enable();
}
@ -261,6 +268,240 @@ out_put_alg:
return inst;
}
static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
struct crypto_spawn *spawn = &ictx->spawn;
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_hash *cipher;
cipher = crypto_spawn_hash(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
tfm->crt_ahash.reqsize =
sizeof(struct cryptd_hash_request_ctx);
return 0;
}
static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
struct cryptd_state *state = cryptd_get_state(tfm);
int active;
mutex_lock(&state->mutex);
active = ahash_tfm_in_queue(&state->queue,
__crypto_ahash_cast(tfm));
mutex_unlock(&state->mutex);
BUG_ON(active);
crypto_free_hash(ctx->child);
}
static int cryptd_hash_setkey(struct crypto_ahash *parent,
const u8 *key, unsigned int keylen)
{
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
struct crypto_hash *child = ctx->child;
int err;
crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_hash_setkey(child, key, keylen);
crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
static int cryptd_hash_enqueue(struct ahash_request *req,
crypto_completion_t complete)
{
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cryptd_state *state =
cryptd_get_state(crypto_ahash_tfm(tfm));
int err;
rctx->complete = req->base.complete;
req->base.complete = complete;
spin_lock_bh(&state->lock);
err = ahash_enqueue_request(&state->queue, req);
spin_unlock_bh(&state->lock);
wake_up_process(state->task);
return err;
}
static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
struct crypto_hash *child = ctx->child;
struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx;
struct hash_desc desc;
rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
desc.tfm = child;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_crt(child)->init(&desc);
req->base.complete = rctx->complete;
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int cryptd_hash_init_enqueue(struct ahash_request *req)
{
return cryptd_hash_enqueue(req, cryptd_hash_init);
}
static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
struct crypto_hash *child = ctx->child;
struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx;
struct hash_desc desc;
rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
desc.tfm = child;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_crt(child)->update(&desc,
req->src,
req->nbytes);
req->base.complete = rctx->complete;
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int cryptd_hash_update_enqueue(struct ahash_request *req)
{
return cryptd_hash_enqueue(req, cryptd_hash_update);
}
static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
struct crypto_hash *child = ctx->child;
struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx;
struct hash_desc desc;
rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
desc.tfm = child;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_crt(child)->final(&desc, req->result);
req->base.complete = rctx->complete;
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int cryptd_hash_final_enqueue(struct ahash_request *req)
{
return cryptd_hash_enqueue(req, cryptd_hash_final);
}
static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
struct crypto_hash *child = ctx->child;
struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx;
struct hash_desc desc;
rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
desc.tfm = child;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_crt(child)->digest(&desc,
req->src,
req->nbytes,
req->result);
req->base.complete = rctx->complete;
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int cryptd_hash_digest_enqueue(struct ahash_request *req)
{
return cryptd_hash_enqueue(req, cryptd_hash_digest);
}
static struct crypto_instance *cryptd_alloc_hash(
struct rtattr **tb, struct cryptd_state *state)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH,
CRYPTO_ALG_TYPE_HASH_MASK);
if (IS_ERR(alg))
return ERR_PTR(PTR_ERR(alg));
inst = cryptd_alloc_instance(alg, state);
if (IS_ERR(inst))
goto out_put_alg;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC;
inst->alg.cra_type = &crypto_ahash_type;
inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize;
inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
inst->alg.cra_init = cryptd_hash_init_tfm;
inst->alg.cra_exit = cryptd_hash_exit_tfm;
inst->alg.cra_ahash.init = cryptd_hash_init_enqueue;
inst->alg.cra_ahash.update = cryptd_hash_update_enqueue;
inst->alg.cra_ahash.final = cryptd_hash_final_enqueue;
inst->alg.cra_ahash.setkey = cryptd_hash_setkey;
inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue;
out_put_alg:
crypto_mod_put(alg);
return inst;
}
static struct cryptd_state state;
static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
@ -274,6 +515,8 @@ static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_BLKCIPHER:
return cryptd_alloc_blkcipher(tb, &state);
case CRYPTO_ALG_TYPE_DIGEST:
return cryptd_alloc_hash(tb, &state);
}
return ERR_PTR(-EINVAL);

View File

@ -12,6 +12,7 @@
*
*/
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include <linux/mm.h>
#include <linux/errno.h>
@ -141,7 +142,7 @@ int crypto_init_digest_ops(struct crypto_tfm *tfm)
struct hash_tfm *ops = &tfm->crt_hash;
struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
if (dalg->dia_digestsize > crypto_tfm_alg_blocksize(tfm))
if (dalg->dia_digestsize > PAGE_SIZE / 8)
return -EINVAL;
ops->init = init;
@ -157,3 +158,83 @@ int crypto_init_digest_ops(struct crypto_tfm *tfm)
void crypto_exit_digest_ops(struct crypto_tfm *tfm)
{
}
static int digest_async_nosetkey(struct crypto_ahash *tfm_async, const u8 *key,
unsigned int keylen)
{
crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK);
return -ENOSYS;
}
static int digest_async_setkey(struct crypto_ahash *tfm_async, const u8 *key,
unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async);
struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK);
return dalg->dia_setkey(tfm, key, keylen);
}
static int digest_async_init(struct ahash_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
dalg->dia_init(tfm);
return 0;
}
static int digest_async_update(struct ahash_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct hash_desc desc = {
.tfm = __crypto_hash_cast(tfm),
.flags = req->base.flags,
};
update(&desc, req->src, req->nbytes);
return 0;
}
static int digest_async_final(struct ahash_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct hash_desc desc = {
.tfm = __crypto_hash_cast(tfm),
.flags = req->base.flags,
};
final(&desc, req->result);
return 0;
}
static int digest_async_digest(struct ahash_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct hash_desc desc = {
.tfm = __crypto_hash_cast(tfm),
.flags = req->base.flags,
};
return digest(&desc, req->src, req->nbytes, req->result);
}
int crypto_init_digest_ops_async(struct crypto_tfm *tfm)
{
struct ahash_tfm *crt = &tfm->crt_ahash;
struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
if (dalg->dia_digestsize > crypto_tfm_alg_blocksize(tfm))
return -EINVAL;
crt->init = digest_async_init;
crt->update = digest_async_update;
crt->final = digest_async_final;
crt->digest = digest_async_digest;
crt->setkey = dalg->dia_setkey ? digest_async_setkey :
digest_async_nosetkey;
crt->digestsize = dalg->dia_digestsize;
return 0;
}

View File

@ -9,6 +9,7 @@
* any later version.
*/
#include <crypto/internal/hash.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
@ -59,24 +60,107 @@ static int hash_setkey(struct crypto_hash *crt, const u8 *key,
return alg->setkey(crt, key, keylen);
}
static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
static int hash_async_setkey(struct crypto_ahash *tfm_async, const u8 *key,
unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async);
struct crypto_hash *tfm_hash = __crypto_hash_cast(tfm);
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
return alg->setkey(tfm_hash, key, keylen);
}
static int hash_async_init(struct ahash_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
struct hash_desc desc = {
.tfm = __crypto_hash_cast(tfm),
.flags = req->base.flags,
};
return alg->init(&desc);
}
static int hash_async_update(struct ahash_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
struct hash_desc desc = {
.tfm = __crypto_hash_cast(tfm),
.flags = req->base.flags,
};
return alg->update(&desc, req->src, req->nbytes);
}
static int hash_async_final(struct ahash_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
struct hash_desc desc = {
.tfm = __crypto_hash_cast(tfm),
.flags = req->base.flags,
};
return alg->final(&desc, req->result);
}
static int hash_async_digest(struct ahash_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
struct hash_desc desc = {
.tfm = __crypto_hash_cast(tfm),
.flags = req->base.flags,
};
return alg->digest(&desc, req->src, req->nbytes, req->result);
}
static int crypto_init_hash_ops_async(struct crypto_tfm *tfm)
{
struct ahash_tfm *crt = &tfm->crt_ahash;
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
crt->init = hash_async_init;
crt->update = hash_async_update;
crt->final = hash_async_final;
crt->digest = hash_async_digest;
crt->setkey = hash_async_setkey;
crt->digestsize = alg->digestsize;
return 0;
}
static int crypto_init_hash_ops_sync(struct crypto_tfm *tfm)
{
struct hash_tfm *crt = &tfm->crt_hash;
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
if (alg->digestsize > crypto_tfm_alg_blocksize(tfm))
return -EINVAL;
crt->init = alg->init;
crt->update = alg->update;
crt->final = alg->final;
crt->digest = alg->digest;
crt->setkey = hash_setkey;
crt->init = alg->init;
crt->update = alg->update;
crt->final = alg->final;
crt->digest = alg->digest;
crt->setkey = hash_setkey;
crt->digestsize = alg->digestsize;
return 0;
}
static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
{
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
if (alg->digestsize > PAGE_SIZE / 8)
return -EINVAL;
if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) != CRYPTO_ALG_TYPE_HASH_MASK)
return crypto_init_hash_ops_async(tfm);
else
return crypto_init_hash_ops_sync(tfm);
}
static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)

View File

@ -226,6 +226,7 @@ static struct crypto_instance *hmac_alloc(struct rtattr **tb)
struct crypto_instance *inst;
struct crypto_alg *alg;
int err;
int ds;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH);
if (err)
@ -236,6 +237,13 @@ static struct crypto_instance *hmac_alloc(struct rtattr **tb)
if (IS_ERR(alg))
return ERR_CAST(alg);
inst = ERR_PTR(-EINVAL);
ds = (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize :
alg->cra_digest.dia_digestsize;
if (ds > alg->cra_blocksize)
goto out_put_alg;
inst = crypto_alloc_instance("hmac", alg);
if (IS_ERR(inst))
goto out_put_alg;
@ -246,14 +254,10 @@ static struct crypto_instance *hmac_alloc(struct rtattr **tb)
inst->alg.cra_alignmask = alg->cra_alignmask;
inst->alg.cra_type = &crypto_hash_type;
inst->alg.cra_hash.digestsize =
(alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize :
alg->cra_digest.dia_digestsize;
inst->alg.cra_hash.digestsize = ds;
inst->alg.cra_ctxsize = sizeof(struct hmac_ctx) +
ALIGN(inst->alg.cra_blocksize * 2 +
inst->alg.cra_hash.digestsize,
ALIGN(inst->alg.cra_blocksize * 2 + ds,
sizeof(void *));
inst->alg.cra_init = hmac_init_tfm;

View File

@ -86,6 +86,7 @@ struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask);
struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
int crypto_init_digest_ops(struct crypto_tfm *tfm);
int crypto_init_digest_ops_async(struct crypto_tfm *tfm);
int crypto_init_cipher_ops(struct crypto_tfm *tfm);
int crypto_init_compress_ops(struct crypto_tfm *tfm);

410
crypto/prng.c Normal file
View File

@ -0,0 +1,410 @@
/*
* PRNG: Pseudo Random Number Generator
* Based on NIST Recommended PRNG From ANSI X9.31 Appendix A.2.4 using
* AES 128 cipher in RFC3686 ctr mode
*
* (C) Neil Horman <nhorman@tuxdriver.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* any later version.
*
*
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/crypto.h>
#include <linux/highmem.h>
#include <linux/moduleparam.h>
#include <linux/jiffies.h>
#include <linux/timex.h>
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include "prng.h"
#define TEST_PRNG_ON_START 0
#define DEFAULT_PRNG_KEY "0123456789abcdef1011"
#define DEFAULT_PRNG_KSZ 20
#define DEFAULT_PRNG_IV "defaultv"
#define DEFAULT_PRNG_IVSZ 8
#define DEFAULT_BLK_SZ 16
#define DEFAULT_V_SEED "zaybxcwdveuftgsh"
/*
* Flags for the prng_context flags field
*/
#define PRNG_FIXED_SIZE 0x1
#define PRNG_NEED_RESET 0x2
/*
* Note: DT is our counter value
* I is our intermediate value
* V is our seed vector
* See http://csrc.nist.gov/groups/STM/cavp/documents/rng/931rngext.pdf
* for implementation details
*/
struct prng_context {
char *prng_key;
char *prng_iv;
spinlock_t prng_lock;
unsigned char rand_data[DEFAULT_BLK_SZ];
unsigned char last_rand_data[DEFAULT_BLK_SZ];
unsigned char DT[DEFAULT_BLK_SZ];
unsigned char I[DEFAULT_BLK_SZ];
unsigned char V[DEFAULT_BLK_SZ];
u32 rand_data_valid;
struct crypto_blkcipher *tfm;
u32 flags;
};
static int dbg;
static void hexdump(char *note, unsigned char *buf, unsigned int len)
{
if (dbg) {
printk(KERN_CRIT "%s", note);
print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
16, 1,
buf, len, false);
}
}
#define dbgprint(format, args...) do {if(dbg) printk(format, ##args);} while(0)
static void xor_vectors(unsigned char *in1, unsigned char *in2,
unsigned char *out, unsigned int size)
{
int i;
for (i=0;i<size;i++)
out[i] = in1[i] ^ in2[i];
}
/*
* Returns DEFAULT_BLK_SZ bytes of random data per call
* returns 0 if generation succeded, <0 if something went wrong
*/
static int _get_more_prng_bytes(struct prng_context *ctx)
{
int i;
struct blkcipher_desc desc;
struct scatterlist sg_in, sg_out;
int ret;
unsigned char tmp[DEFAULT_BLK_SZ];
desc.tfm = ctx->tfm;
desc.flags = 0;
dbgprint(KERN_CRIT "Calling _get_more_prng_bytes for context %p\n",ctx);
hexdump("Input DT: ", ctx->DT, DEFAULT_BLK_SZ);
hexdump("Input I: ", ctx->I, DEFAULT_BLK_SZ);
hexdump("Input V: ", ctx->V, DEFAULT_BLK_SZ);
/*
* This algorithm is a 3 stage state machine
*/
for (i=0;i<3;i++) {
desc.tfm = ctx->tfm;
desc.flags = 0;
switch (i) {
case 0:
/*
* Start by encrypting the counter value
* This gives us an intermediate value I
*/
memcpy(tmp, ctx->DT, DEFAULT_BLK_SZ);
sg_init_one(&sg_out, &ctx->I[0], DEFAULT_BLK_SZ);
hexdump("tmp stage 0: ", tmp, DEFAULT_BLK_SZ);
break;
case 1:
/*
* Next xor I with our secret vector V
* encrypt that result to obtain our
* pseudo random data which we output
*/
xor_vectors(ctx->I, ctx->V, tmp, DEFAULT_BLK_SZ);
sg_init_one(&sg_out, &ctx->rand_data[0], DEFAULT_BLK_SZ);
hexdump("tmp stage 1: ", tmp, DEFAULT_BLK_SZ);
break;
case 2:
/*
* First check that we didn't produce the same random data
* that we did last time around through this
*/
if (!memcmp(ctx->rand_data, ctx->last_rand_data, DEFAULT_BLK_SZ)) {
printk(KERN_ERR "ctx %p Failed repetition check!\n",
ctx);
ctx->flags |= PRNG_NEED_RESET;
return -1;
}
memcpy(ctx->last_rand_data, ctx->rand_data, DEFAULT_BLK_SZ);
/*
* Lastly xor the random data with I
* and encrypt that to obtain a new secret vector V
*/
xor_vectors(ctx->rand_data, ctx->I, tmp, DEFAULT_BLK_SZ);
sg_init_one(&sg_out, &ctx->V[0], DEFAULT_BLK_SZ);
hexdump("tmp stage 2: ", tmp, DEFAULT_BLK_SZ);
break;
}
/* Initialize our input buffer */
sg_init_one(&sg_in, &tmp[0], DEFAULT_BLK_SZ);
/* do the encryption */
ret = crypto_blkcipher_encrypt(&desc, &sg_out, &sg_in, DEFAULT_BLK_SZ);
/* And check the result */
if (ret) {
dbgprint(KERN_CRIT "Encryption of new block failed for context %p\n",ctx);
ctx->rand_data_valid = DEFAULT_BLK_SZ;
return -1;
}
}
/*
* Now update our DT value
*/
for (i=DEFAULT_BLK_SZ-1;i>0;i--) {
ctx->DT[i] = ctx->DT[i-1];
}
ctx->DT[0] += 1;
dbgprint("Returning new block for context %p\n",ctx);
ctx->rand_data_valid = 0;
hexdump("Output DT: ", ctx->DT, DEFAULT_BLK_SZ);
hexdump("Output I: ", ctx->I, DEFAULT_BLK_SZ);
hexdump("Output V: ", ctx->V, DEFAULT_BLK_SZ);
hexdump("New Random Data: ", ctx->rand_data, DEFAULT_BLK_SZ);
return 0;
}
/* Our exported functions */
int get_prng_bytes(char *buf, int nbytes, struct prng_context *ctx)
{
unsigned long flags;
unsigned char *ptr = buf;
unsigned int byte_count = (unsigned int)nbytes;
int err;
if (nbytes < 0)
return -EINVAL;
spin_lock_irqsave(&ctx->prng_lock, flags);
err = -EFAULT;
if (ctx->flags & PRNG_NEED_RESET)
goto done;
/*
* If the FIXED_SIZE flag is on, only return whole blocks of
* pseudo random data
*/
err = -EINVAL;
if (ctx->flags & PRNG_FIXED_SIZE) {
if (nbytes < DEFAULT_BLK_SZ)
goto done;
byte_count = DEFAULT_BLK_SZ;
}
err = byte_count;
dbgprint(KERN_CRIT "getting %d random bytes for context %p\n",byte_count, ctx);
remainder:
if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
if (_get_more_prng_bytes(ctx) < 0) {
memset(buf, 0, nbytes);
err = -EFAULT;
goto done;
}
}
/*
* Copy up to the next whole block size
*/
if (byte_count < DEFAULT_BLK_SZ) {
for (;ctx->rand_data_valid < DEFAULT_BLK_SZ; ctx->rand_data_valid++) {
*ptr = ctx->rand_data[ctx->rand_data_valid];
ptr++;
byte_count--;
if (byte_count == 0)
goto done;
}
}
/*
* Now copy whole blocks
*/
for(;byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) {
if (_get_more_prng_bytes(ctx) < 0) {
memset(buf, 0, nbytes);
err = -1;
goto done;
}
memcpy(ptr, ctx->rand_data, DEFAULT_BLK_SZ);
ctx->rand_data_valid += DEFAULT_BLK_SZ;
ptr += DEFAULT_BLK_SZ;
}
/*
* Now copy any extra partial data
*/
if (byte_count)
goto remainder;
done:
spin_unlock_irqrestore(&ctx->prng_lock, flags);
dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n",err, ctx);
return err;
}
EXPORT_SYMBOL_GPL(get_prng_bytes);
struct prng_context *alloc_prng_context(void)
{
struct prng_context *ctx=kzalloc(sizeof(struct prng_context), GFP_KERNEL);
spin_lock_init(&ctx->prng_lock);
if (reset_prng_context(ctx, NULL, NULL, NULL, NULL)) {
kfree(ctx);
ctx = NULL;
}
dbgprint(KERN_CRIT "returning context %p\n",ctx);
return ctx;
}
EXPORT_SYMBOL_GPL(alloc_prng_context);
void free_prng_context(struct prng_context *ctx)
{
crypto_free_blkcipher(ctx->tfm);
kfree(ctx);
}
EXPORT_SYMBOL_GPL(free_prng_context);
int reset_prng_context(struct prng_context *ctx,
unsigned char *key, unsigned char *iv,
unsigned char *V, unsigned char *DT)
{
int ret;
int iv_len;
int rc = -EFAULT;
spin_lock(&ctx->prng_lock);
ctx->flags |= PRNG_NEED_RESET;
if (key)
memcpy(ctx->prng_key,key,strlen(ctx->prng_key));
else
ctx->prng_key = DEFAULT_PRNG_KEY;
if (iv)
memcpy(ctx->prng_iv,iv, strlen(ctx->prng_iv));
else
ctx->prng_iv = DEFAULT_PRNG_IV;
if (V)
memcpy(ctx->V,V,DEFAULT_BLK_SZ);
else
memcpy(ctx->V,DEFAULT_V_SEED,DEFAULT_BLK_SZ);
if (DT)
memcpy(ctx->DT, DT, DEFAULT_BLK_SZ);
else
memset(ctx->DT, 0, DEFAULT_BLK_SZ);
memset(ctx->rand_data,0,DEFAULT_BLK_SZ);
memset(ctx->last_rand_data,0,DEFAULT_BLK_SZ);
if (ctx->tfm)
crypto_free_blkcipher(ctx->tfm);
ctx->tfm = crypto_alloc_blkcipher("rfc3686(ctr(aes))",0,0);
if (!ctx->tfm) {
dbgprint(KERN_CRIT "Failed to alloc crypto tfm for context %p\n",ctx->tfm);
goto out;
}
ctx->rand_data_valid = DEFAULT_BLK_SZ;
ret = crypto_blkcipher_setkey(ctx->tfm, ctx->prng_key, strlen(ctx->prng_key));
if (ret) {
dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n",
crypto_blkcipher_get_flags(ctx->tfm));
crypto_free_blkcipher(ctx->tfm);
goto out;
}
iv_len = crypto_blkcipher_ivsize(ctx->tfm);
if (iv_len) {
crypto_blkcipher_set_iv(ctx->tfm, ctx->prng_iv, iv_len);
}
rc = 0;
ctx->flags &= ~PRNG_NEED_RESET;
out:
spin_unlock(&ctx->prng_lock);
return rc;
}
EXPORT_SYMBOL_GPL(reset_prng_context);
/* Module initalization */
static int __init prng_mod_init(void)
{
#ifdef TEST_PRNG_ON_START
int i;
unsigned char tmpbuf[DEFAULT_BLK_SZ];
struct prng_context *ctx = alloc_prng_context();
if (ctx == NULL)
return -EFAULT;
for (i=0;i<16;i++) {
if (get_prng_bytes(tmpbuf, DEFAULT_BLK_SZ, ctx) < 0) {
free_prng_context(ctx);
return -EFAULT;
}
}
free_prng_context(ctx);
#endif
return 0;
}
static void __exit prng_mod_fini(void)
{
return;
}
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Software Pseudo Random Number Generator");
MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
module_param(dbg, int, 0);
MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
module_init(prng_mod_init);
module_exit(prng_mod_fini);

27
crypto/prng.h Normal file
View File

@ -0,0 +1,27 @@
/*
* PRNG: Pseudo Random Number Generator
*
* (C) Neil Horman <nhorman@tuxdriver.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* any later version.
*
*
*/
#ifndef _PRNG_H_
#define _PRNG_H_
struct prng_context;
int get_prng_bytes(char *buf, int nbytes, struct prng_context *ctx);
struct prng_context *alloc_prng_context(void);
int reset_prng_context(struct prng_context *ctx,
unsigned char *key, unsigned char *iv,
unsigned char *V,
unsigned char *DT);
void free_prng_context(struct prng_context *ctx);
#endif

43
crypto/ripemd.h Normal file
View File

@ -0,0 +1,43 @@
/*
* Common values for RIPEMD algorithms
*/
#ifndef _CRYPTO_RMD_H
#define _CRYPTO_RMD_H
#define RMD128_DIGEST_SIZE 16
#define RMD128_BLOCK_SIZE 64
#define RMD160_DIGEST_SIZE 20
#define RMD160_BLOCK_SIZE 64
#define RMD256_DIGEST_SIZE 32
#define RMD256_BLOCK_SIZE 64
#define RMD320_DIGEST_SIZE 40
#define RMD320_BLOCK_SIZE 64
/* initial values */
#define RMD_H0 0x67452301UL
#define RMD_H1 0xefcdab89UL
#define RMD_H2 0x98badcfeUL
#define RMD_H3 0x10325476UL
#define RMD_H4 0xc3d2e1f0UL
#define RMD_H5 0x76543210UL
#define RMD_H6 0xfedcba98UL
#define RMD_H7 0x89abcdefUL
#define RMD_H8 0x01234567UL
#define RMD_H9 0x3c2d1e0fUL
/* constants */
#define RMD_K1 0x00000000UL
#define RMD_K2 0x5a827999UL
#define RMD_K3 0x6ed9eba1UL
#define RMD_K4 0x8f1bbcdcUL
#define RMD_K5 0xa953fd4eUL
#define RMD_K6 0x50a28be6UL
#define RMD_K7 0x5c4dd124UL
#define RMD_K8 0x6d703ef3UL
#define RMD_K9 0x7a6d76e9UL
#endif

325
crypto/rmd128.c Normal file
View File

@ -0,0 +1,325 @@
/*
* Cryptographic API.
*
* RIPEMD-128 - RACE Integrity Primitives Evaluation Message Digest.
*
* Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
*
* Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <asm/byteorder.h>
#include "ripemd.h"
struct rmd128_ctx {
u64 byte_count;
u32 state[4];
__le32 buffer[16];
};
#define K1 RMD_K1
#define K2 RMD_K2
#define K3 RMD_K3
#define K4 RMD_K4
#define KK1 RMD_K6
#define KK2 RMD_K7
#define KK3 RMD_K8
#define KK4 RMD_K1
#define F1(x, y, z) (x ^ y ^ z) /* XOR */
#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */
#define F3(x, y, z) ((x | ~y) ^ z)
#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */
#define ROUND(a, b, c, d, f, k, x, s) { \
(a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
(a) = rol32((a), (s)); \
}
static void rmd128_transform(u32 *state, const __le32 *in)
{
u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd;
/* Initialize left lane */
aa = state[0];
bb = state[1];
cc = state[2];
dd = state[3];
/* Initialize right lane */
aaa = state[0];
bbb = state[1];
ccc = state[2];
ddd = state[3];
/* round 1: left lane */
ROUND(aa, bb, cc, dd, F1, K1, in[0], 11);
ROUND(dd, aa, bb, cc, F1, K1, in[1], 14);
ROUND(cc, dd, aa, bb, F1, K1, in[2], 15);
ROUND(bb, cc, dd, aa, F1, K1, in[3], 12);
ROUND(aa, bb, cc, dd, F1, K1, in[4], 5);
ROUND(dd, aa, bb, cc, F1, K1, in[5], 8);
ROUND(cc, dd, aa, bb, F1, K1, in[6], 7);
ROUND(bb, cc, dd, aa, F1, K1, in[7], 9);
ROUND(aa, bb, cc, dd, F1, K1, in[8], 11);
ROUND(dd, aa, bb, cc, F1, K1, in[9], 13);
ROUND(cc, dd, aa, bb, F1, K1, in[10], 14);
ROUND(bb, cc, dd, aa, F1, K1, in[11], 15);
ROUND(aa, bb, cc, dd, F1, K1, in[12], 6);
ROUND(dd, aa, bb, cc, F1, K1, in[13], 7);
ROUND(cc, dd, aa, bb, F1, K1, in[14], 9);
ROUND(bb, cc, dd, aa, F1, K1, in[15], 8);
/* round 2: left lane */
ROUND(aa, bb, cc, dd, F2, K2, in[7], 7);
ROUND(dd, aa, bb, cc, F2, K2, in[4], 6);
ROUND(cc, dd, aa, bb, F2, K2, in[13], 8);
ROUND(bb, cc, dd, aa, F2, K2, in[1], 13);
ROUND(aa, bb, cc, dd, F2, K2, in[10], 11);
ROUND(dd, aa, bb, cc, F2, K2, in[6], 9);
ROUND(cc, dd, aa, bb, F2, K2, in[15], 7);
ROUND(bb, cc, dd, aa, F2, K2, in[3], 15);
ROUND(aa, bb, cc, dd, F2, K2, in[12], 7);
ROUND(dd, aa, bb, cc, F2, K2, in[0], 12);
ROUND(cc, dd, aa, bb, F2, K2, in[9], 15);
ROUND(bb, cc, dd, aa, F2, K2, in[5], 9);
ROUND(aa, bb, cc, dd, F2, K2, in[2], 11);
ROUND(dd, aa, bb, cc, F2, K2, in[14], 7);
ROUND(cc, dd, aa, bb, F2, K2, in[11], 13);
ROUND(bb, cc, dd, aa, F2, K2, in[8], 12);
/* round 3: left lane */
ROUND(aa, bb, cc, dd, F3, K3, in[3], 11);
ROUND(dd, aa, bb, cc, F3, K3, in[10], 13);
ROUND(cc, dd, aa, bb, F3, K3, in[14], 6);
ROUND(bb, cc, dd, aa, F3, K3, in[4], 7);
ROUND(aa, bb, cc, dd, F3, K3, in[9], 14);
ROUND(dd, aa, bb, cc, F3, K3, in[15], 9);
ROUND(cc, dd, aa, bb, F3, K3, in[8], 13);
ROUND(bb, cc, dd, aa, F3, K3, in[1], 15);
ROUND(aa, bb, cc, dd, F3, K3, in[2], 14);
ROUND(dd, aa, bb, cc, F3, K3, in[7], 8);
ROUND(cc, dd, aa, bb, F3, K3, in[0], 13);
ROUND(bb, cc, dd, aa, F3, K3, in[6], 6);
ROUND(aa, bb, cc, dd, F3, K3, in[13], 5);
ROUND(dd, aa, bb, cc, F3, K3, in[11], 12);
ROUND(cc, dd, aa, bb, F3, K3, in[5], 7);
ROUND(bb, cc, dd, aa, F3, K3, in[12], 5);
/* round 4: left lane */
ROUND(aa, bb, cc, dd, F4, K4, in[1], 11);
ROUND(dd, aa, bb, cc, F4, K4, in[9], 12);
ROUND(cc, dd, aa, bb, F4, K4, in[11], 14);
ROUND(bb, cc, dd, aa, F4, K4, in[10], 15);
ROUND(aa, bb, cc, dd, F4, K4, in[0], 14);
ROUND(dd, aa, bb, cc, F4, K4, in[8], 15);
ROUND(cc, dd, aa, bb, F4, K4, in[12], 9);
ROUND(bb, cc, dd, aa, F4, K4, in[4], 8);
ROUND(aa, bb, cc, dd, F4, K4, in[13], 9);
ROUND(dd, aa, bb, cc, F4, K4, in[3], 14);
ROUND(cc, dd, aa, bb, F4, K4, in[7], 5);
ROUND(bb, cc, dd, aa, F4, K4, in[15], 6);
ROUND(aa, bb, cc, dd, F4, K4, in[14], 8);
ROUND(dd, aa, bb, cc, F4, K4, in[5], 6);
ROUND(cc, dd, aa, bb, F4, K4, in[6], 5);
ROUND(bb, cc, dd, aa, F4, K4, in[2], 12);
/* round 1: right lane */
ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[5], 8);
ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[14], 9);
ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[7], 9);
ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[0], 11);
ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[9], 13);
ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[2], 15);
ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[11], 15);
ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[4], 5);
ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[13], 7);
ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[6], 7);
ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[15], 8);
ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[8], 11);
ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[1], 14);
ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[10], 14);
ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[3], 12);
ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[12], 6);
/* round 2: right lane */
ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[6], 9);
ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[11], 13);
ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[3], 15);
ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[7], 7);
ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[0], 12);
ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[13], 8);
ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[5], 9);
ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[10], 11);
ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[14], 7);
ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[15], 7);
ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[8], 12);
ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[12], 7);
ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[4], 6);
ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[9], 15);
ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[1], 13);
ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[2], 11);
/* round 3: right lane */
ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[15], 9);
ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[5], 7);
ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[1], 15);
ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[3], 11);
ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[7], 8);
ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[14], 6);
ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[6], 6);
ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[9], 14);
ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[11], 12);
ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[8], 13);
ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[12], 5);
ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[2], 14);
ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[10], 13);
ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[0], 13);
ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[4], 7);
ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[13], 5);
/* round 4: right lane */
ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[8], 15);
ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[6], 5);
ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[4], 8);
ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[1], 11);
ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[3], 14);
ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[11], 14);
ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[15], 6);
ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[0], 14);
ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[5], 6);
ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[12], 9);
ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[2], 12);
ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[13], 9);
ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[9], 12);
ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[7], 5);
ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[10], 15);
ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[14], 8);
/* combine results */
ddd += cc + state[1]; /* final result for state[0] */
state[1] = state[2] + dd + aaa;
state[2] = state[3] + aa + bbb;
state[3] = state[0] + bb + ccc;
state[0] = ddd;
return;
}
static void rmd128_init(struct crypto_tfm *tfm)
{
struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm);
rctx->byte_count = 0;
rctx->state[0] = RMD_H0;
rctx->state[1] = RMD_H1;
rctx->state[2] = RMD_H2;
rctx->state[3] = RMD_H3;
memset(rctx->buffer, 0, sizeof(rctx->buffer));
}
static void rmd128_update(struct crypto_tfm *tfm, const u8 *data,
unsigned int len)
{
struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm);
const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
rctx->byte_count += len;
/* Enough space in buffer? If so copy and we're done */
if (avail > len) {
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
data, len);
return;
}
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
data, avail);
rmd128_transform(rctx->state, rctx->buffer);
data += avail;
len -= avail;
while (len >= sizeof(rctx->buffer)) {
memcpy(rctx->buffer, data, sizeof(rctx->buffer));
rmd128_transform(rctx->state, rctx->buffer);
data += sizeof(rctx->buffer);
len -= sizeof(rctx->buffer);
}
memcpy(rctx->buffer, data, len);
}
/* Add padding and return the message digest. */
static void rmd128_final(struct crypto_tfm *tfm, u8 *out)
{
struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm);
u32 i, index, padlen;
__le64 bits;
__le32 *dst = (__le32 *)out;
static const u8 padding[64] = { 0x80, };
bits = cpu_to_le64(rctx->byte_count << 3);
/* Pad out to 56 mod 64 */
index = rctx->byte_count & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64+56) - index);
rmd128_update(tfm, padding, padlen);
/* Append length */
rmd128_update(tfm, (const u8 *)&bits, sizeof(bits));
/* Store state in digest */
for (i = 0; i < 4; i++)
dst[i] = cpu_to_le32p(&rctx->state[i]);
/* Wipe context */
memset(rctx, 0, sizeof(*rctx));
}
static struct crypto_alg alg = {
.cra_name = "rmd128",
.cra_driver_name = "rmd128",
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = RMD128_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct rmd128_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .digest = {
.dia_digestsize = RMD128_DIGEST_SIZE,
.dia_init = rmd128_init,
.dia_update = rmd128_update,
.dia_final = rmd128_final } }
};
static int __init rmd128_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit rmd128_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(rmd128_mod_init);
module_exit(rmd128_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RIPEMD-128 Message Digest");
MODULE_ALIAS("rmd128");

369
crypto/rmd160.c Normal file
View File

@ -0,0 +1,369 @@
/*
* Cryptographic API.
*
* RIPEMD-160 - RACE Integrity Primitives Evaluation Message Digest.
*
* Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
*
* Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <asm/byteorder.h>
#include "ripemd.h"
struct rmd160_ctx {
u64 byte_count;
u32 state[5];
__le32 buffer[16];
};
#define K1 RMD_K1
#define K2 RMD_K2
#define K3 RMD_K3
#define K4 RMD_K4
#define K5 RMD_K5
#define KK1 RMD_K6
#define KK2 RMD_K7
#define KK3 RMD_K8
#define KK4 RMD_K9
#define KK5 RMD_K1
#define F1(x, y, z) (x ^ y ^ z) /* XOR */
#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */
#define F3(x, y, z) ((x | ~y) ^ z)
#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */
#define F5(x, y, z) (x ^ (y | ~z))
#define ROUND(a, b, c, d, e, f, k, x, s) { \
(a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
(a) = rol32((a), (s)) + (e); \
(c) = rol32((c), 10); \
}
static void rmd160_transform(u32 *state, const __le32 *in)
{
u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee;
/* Initialize left lane */
aa = state[0];
bb = state[1];
cc = state[2];
dd = state[3];
ee = state[4];
/* Initialize right lane */
aaa = state[0];
bbb = state[1];
ccc = state[2];
ddd = state[3];
eee = state[4];
/* round 1: left lane */
ROUND(aa, bb, cc, dd, ee, F1, K1, in[0], 11);
ROUND(ee, aa, bb, cc, dd, F1, K1, in[1], 14);
ROUND(dd, ee, aa, bb, cc, F1, K1, in[2], 15);
ROUND(cc, dd, ee, aa, bb, F1, K1, in[3], 12);
ROUND(bb, cc, dd, ee, aa, F1, K1, in[4], 5);
ROUND(aa, bb, cc, dd, ee, F1, K1, in[5], 8);
ROUND(ee, aa, bb, cc, dd, F1, K1, in[6], 7);
ROUND(dd, ee, aa, bb, cc, F1, K1, in[7], 9);
ROUND(cc, dd, ee, aa, bb, F1, K1, in[8], 11);
ROUND(bb, cc, dd, ee, aa, F1, K1, in[9], 13);
ROUND(aa, bb, cc, dd, ee, F1, K1, in[10], 14);
ROUND(ee, aa, bb, cc, dd, F1, K1, in[11], 15);
ROUND(dd, ee, aa, bb, cc, F1, K1, in[12], 6);
ROUND(cc, dd, ee, aa, bb, F1, K1, in[13], 7);
ROUND(bb, cc, dd, ee, aa, F1, K1, in[14], 9);
ROUND(aa, bb, cc, dd, ee, F1, K1, in[15], 8);
/* round 2: left lane" */
ROUND(ee, aa, bb, cc, dd, F2, K2, in[7], 7);
ROUND(dd, ee, aa, bb, cc, F2, K2, in[4], 6);
ROUND(cc, dd, ee, aa, bb, F2, K2, in[13], 8);
ROUND(bb, cc, dd, ee, aa, F2, K2, in[1], 13);
ROUND(aa, bb, cc, dd, ee, F2, K2, in[10], 11);
ROUND(ee, aa, bb, cc, dd, F2, K2, in[6], 9);
ROUND(dd, ee, aa, bb, cc, F2, K2, in[15], 7);
ROUND(cc, dd, ee, aa, bb, F2, K2, in[3], 15);
ROUND(bb, cc, dd, ee, aa, F2, K2, in[12], 7);
ROUND(aa, bb, cc, dd, ee, F2, K2, in[0], 12);
ROUND(ee, aa, bb, cc, dd, F2, K2, in[9], 15);
ROUND(dd, ee, aa, bb, cc, F2, K2, in[5], 9);
ROUND(cc, dd, ee, aa, bb, F2, K2, in[2], 11);
ROUND(bb, cc, dd, ee, aa, F2, K2, in[14], 7);
ROUND(aa, bb, cc, dd, ee, F2, K2, in[11], 13);
ROUND(ee, aa, bb, cc, dd, F2, K2, in[8], 12);
/* round 3: left lane" */
ROUND(dd, ee, aa, bb, cc, F3, K3, in[3], 11);
ROUND(cc, dd, ee, aa, bb, F3, K3, in[10], 13);
ROUND(bb, cc, dd, ee, aa, F3, K3, in[14], 6);
ROUND(aa, bb, cc, dd, ee, F3, K3, in[4], 7);
ROUND(ee, aa, bb, cc, dd, F3, K3, in[9], 14);
ROUND(dd, ee, aa, bb, cc, F3, K3, in[15], 9);
ROUND(cc, dd, ee, aa, bb, F3, K3, in[8], 13);
ROUND(bb, cc, dd, ee, aa, F3, K3, in[1], 15);
ROUND(aa, bb, cc, dd, ee, F3, K3, in[2], 14);
ROUND(ee, aa, bb, cc, dd, F3, K3, in[7], 8);
ROUND(dd, ee, aa, bb, cc, F3, K3, in[0], 13);
ROUND(cc, dd, ee, aa, bb, F3, K3, in[6], 6);
ROUND(bb, cc, dd, ee, aa, F3, K3, in[13], 5);
ROUND(aa, bb, cc, dd, ee, F3, K3, in[11], 12);
ROUND(ee, aa, bb, cc, dd, F3, K3, in[5], 7);
ROUND(dd, ee, aa, bb, cc, F3, K3, in[12], 5);
/* round 4: left lane" */
ROUND(cc, dd, ee, aa, bb, F4, K4, in[1], 11);
ROUND(bb, cc, dd, ee, aa, F4, K4, in[9], 12);
ROUND(aa, bb, cc, dd, ee, F4, K4, in[11], 14);
ROUND(ee, aa, bb, cc, dd, F4, K4, in[10], 15);
ROUND(dd, ee, aa, bb, cc, F4, K4, in[0], 14);
ROUND(cc, dd, ee, aa, bb, F4, K4, in[8], 15);
ROUND(bb, cc, dd, ee, aa, F4, K4, in[12], 9);
ROUND(aa, bb, cc, dd, ee, F4, K4, in[4], 8);
ROUND(ee, aa, bb, cc, dd, F4, K4, in[13], 9);
ROUND(dd, ee, aa, bb, cc, F4, K4, in[3], 14);
ROUND(cc, dd, ee, aa, bb, F4, K4, in[7], 5);
ROUND(bb, cc, dd, ee, aa, F4, K4, in[15], 6);
ROUND(aa, bb, cc, dd, ee, F4, K4, in[14], 8);
ROUND(ee, aa, bb, cc, dd, F4, K4, in[5], 6);
ROUND(dd, ee, aa, bb, cc, F4, K4, in[6], 5);
ROUND(cc, dd, ee, aa, bb, F4, K4, in[2], 12);
/* round 5: left lane" */
ROUND(bb, cc, dd, ee, aa, F5, K5, in[4], 9);
ROUND(aa, bb, cc, dd, ee, F5, K5, in[0], 15);
ROUND(ee, aa, bb, cc, dd, F5, K5, in[5], 5);
ROUND(dd, ee, aa, bb, cc, F5, K5, in[9], 11);
ROUND(cc, dd, ee, aa, bb, F5, K5, in[7], 6);
ROUND(bb, cc, dd, ee, aa, F5, K5, in[12], 8);
ROUND(aa, bb, cc, dd, ee, F5, K5, in[2], 13);
ROUND(ee, aa, bb, cc, dd, F5, K5, in[10], 12);
ROUND(dd, ee, aa, bb, cc, F5, K5, in[14], 5);
ROUND(cc, dd, ee, aa, bb, F5, K5, in[1], 12);
ROUND(bb, cc, dd, ee, aa, F5, K5, in[3], 13);
ROUND(aa, bb, cc, dd, ee, F5, K5, in[8], 14);
ROUND(ee, aa, bb, cc, dd, F5, K5, in[11], 11);
ROUND(dd, ee, aa, bb, cc, F5, K5, in[6], 8);
ROUND(cc, dd, ee, aa, bb, F5, K5, in[15], 5);
ROUND(bb, cc, dd, ee, aa, F5, K5, in[13], 6);
/* round 1: right lane */
ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[5], 8);
ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[14], 9);
ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[7], 9);
ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[0], 11);
ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[9], 13);
ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[2], 15);
ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[11], 15);
ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[4], 5);
ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[13], 7);
ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[6], 7);
ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[15], 8);
ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[8], 11);
ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[1], 14);
ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[10], 14);
ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[3], 12);
ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[12], 6);
/* round 2: right lane */
ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[6], 9);
ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[11], 13);
ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[3], 15);
ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[7], 7);
ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[0], 12);
ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[13], 8);
ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[5], 9);
ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[10], 11);
ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[14], 7);
ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[15], 7);
ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[8], 12);
ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[12], 7);
ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[4], 6);
ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[9], 15);
ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[1], 13);
ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[2], 11);
/* round 3: right lane */
ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[15], 9);
ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[5], 7);
ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[1], 15);
ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[3], 11);
ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[7], 8);
ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[14], 6);
ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[6], 6);
ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[9], 14);
ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[11], 12);
ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[8], 13);
ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[12], 5);
ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[2], 14);
ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[10], 13);
ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[0], 13);
ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[4], 7);
ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[13], 5);
/* round 4: right lane */
ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[8], 15);
ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[6], 5);
ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[4], 8);
ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[1], 11);
ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[3], 14);
ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[11], 14);
ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[15], 6);
ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[0], 14);
ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[5], 6);
ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[12], 9);
ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[2], 12);
ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[13], 9);
ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[9], 12);
ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[7], 5);
ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[10], 15);
ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[14], 8);
/* round 5: right lane */
ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[12], 8);
ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[15], 5);
ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[10], 12);
ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[4], 9);
ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[1], 12);
ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[5], 5);
ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[8], 14);
ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[7], 6);
ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[6], 8);
ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[2], 13);
ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[13], 6);
ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[14], 5);
ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[0], 15);
ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[3], 13);
ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[9], 11);
ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[11], 11);
/* combine results */
ddd += cc + state[1]; /* final result for state[0] */
state[1] = state[2] + dd + eee;
state[2] = state[3] + ee + aaa;
state[3] = state[4] + aa + bbb;
state[4] = state[0] + bb + ccc;
state[0] = ddd;
return;
}
static void rmd160_init(struct crypto_tfm *tfm)
{
struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm);
rctx->byte_count = 0;
rctx->state[0] = RMD_H0;
rctx->state[1] = RMD_H1;
rctx->state[2] = RMD_H2;
rctx->state[3] = RMD_H3;
rctx->state[4] = RMD_H4;
memset(rctx->buffer, 0, sizeof(rctx->buffer));
}
static void rmd160_update(struct crypto_tfm *tfm, const u8 *data,
unsigned int len)
{
struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm);
const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
rctx->byte_count += len;
/* Enough space in buffer? If so copy and we're done */
if (avail > len) {
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
data, len);
return;
}
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
data, avail);
rmd160_transform(rctx->state, rctx->buffer);
data += avail;
len -= avail;
while (len >= sizeof(rctx->buffer)) {
memcpy(rctx->buffer, data, sizeof(rctx->buffer));
rmd160_transform(rctx->state, rctx->buffer);
data += sizeof(rctx->buffer);
len -= sizeof(rctx->buffer);
}
memcpy(rctx->buffer, data, len);
}
/* Add padding and return the message digest. */
static void rmd160_final(struct crypto_tfm *tfm, u8 *out)
{
struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm);
u32 i, index, padlen;
__le64 bits;
__le32 *dst = (__le32 *)out;
static const u8 padding[64] = { 0x80, };
bits = cpu_to_le64(rctx->byte_count << 3);
/* Pad out to 56 mod 64 */
index = rctx->byte_count & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64+56) - index);
rmd160_update(tfm, padding, padlen);
/* Append length */
rmd160_update(tfm, (const u8 *)&bits, sizeof(bits));
/* Store state in digest */
for (i = 0; i < 5; i++)
dst[i] = cpu_to_le32p(&rctx->state[i]);
/* Wipe context */
memset(rctx, 0, sizeof(*rctx));
}
static struct crypto_alg alg = {
.cra_name = "rmd160",
.cra_driver_name = "rmd160",
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = RMD160_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct rmd160_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .digest = {
.dia_digestsize = RMD160_DIGEST_SIZE,
.dia_init = rmd160_init,
.dia_update = rmd160_update,
.dia_final = rmd160_final } }
};
static int __init rmd160_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit rmd160_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(rmd160_mod_init);
module_exit(rmd160_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RIPEMD-160 Message Digest");
MODULE_ALIAS("rmd160");

344
crypto/rmd256.c Normal file
View File

@ -0,0 +1,344 @@
/*
* Cryptographic API.
*
* RIPEMD-256 - RACE Integrity Primitives Evaluation Message Digest.
*
* Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
*
* Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <asm/byteorder.h>
#include "ripemd.h"
struct rmd256_ctx {
u64 byte_count;
u32 state[8];
__le32 buffer[16];
};
#define K1 RMD_K1
#define K2 RMD_K2
#define K3 RMD_K3
#define K4 RMD_K4
#define KK1 RMD_K6
#define KK2 RMD_K7
#define KK3 RMD_K8
#define KK4 RMD_K1
#define F1(x, y, z) (x ^ y ^ z) /* XOR */
#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */
#define F3(x, y, z) ((x | ~y) ^ z)
#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */
#define ROUND(a, b, c, d, f, k, x, s) { \
(a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
(a) = rol32((a), (s)); \
}
static void rmd256_transform(u32 *state, const __le32 *in)
{
u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd, tmp;
/* Initialize left lane */
aa = state[0];
bb = state[1];
cc = state[2];
dd = state[3];
/* Initialize right lane */
aaa = state[4];
bbb = state[5];
ccc = state[6];
ddd = state[7];
/* round 1: left lane */
ROUND(aa, bb, cc, dd, F1, K1, in[0], 11);
ROUND(dd, aa, bb, cc, F1, K1, in[1], 14);
ROUND(cc, dd, aa, bb, F1, K1, in[2], 15);
ROUND(bb, cc, dd, aa, F1, K1, in[3], 12);
ROUND(aa, bb, cc, dd, F1, K1, in[4], 5);
ROUND(dd, aa, bb, cc, F1, K1, in[5], 8);
ROUND(cc, dd, aa, bb, F1, K1, in[6], 7);
ROUND(bb, cc, dd, aa, F1, K1, in[7], 9);
ROUND(aa, bb, cc, dd, F1, K1, in[8], 11);
ROUND(dd, aa, bb, cc, F1, K1, in[9], 13);
ROUND(cc, dd, aa, bb, F1, K1, in[10], 14);
ROUND(bb, cc, dd, aa, F1, K1, in[11], 15);
ROUND(aa, bb, cc, dd, F1, K1, in[12], 6);
ROUND(dd, aa, bb, cc, F1, K1, in[13], 7);
ROUND(cc, dd, aa, bb, F1, K1, in[14], 9);
ROUND(bb, cc, dd, aa, F1, K1, in[15], 8);
/* round 1: right lane */
ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[5], 8);
ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[14], 9);
ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[7], 9);
ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[0], 11);
ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[9], 13);
ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[2], 15);
ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[11], 15);
ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[4], 5);
ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[13], 7);
ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[6], 7);
ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[15], 8);
ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[8], 11);
ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[1], 14);
ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[10], 14);
ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[3], 12);
ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[12], 6);
/* Swap contents of "a" registers */
tmp = aa; aa = aaa; aaa = tmp;
/* round 2: left lane */
ROUND(aa, bb, cc, dd, F2, K2, in[7], 7);
ROUND(dd, aa, bb, cc, F2, K2, in[4], 6);
ROUND(cc, dd, aa, bb, F2, K2, in[13], 8);
ROUND(bb, cc, dd, aa, F2, K2, in[1], 13);
ROUND(aa, bb, cc, dd, F2, K2, in[10], 11);
ROUND(dd, aa, bb, cc, F2, K2, in[6], 9);
ROUND(cc, dd, aa, bb, F2, K2, in[15], 7);
ROUND(bb, cc, dd, aa, F2, K2, in[3], 15);
ROUND(aa, bb, cc, dd, F2, K2, in[12], 7);
ROUND(dd, aa, bb, cc, F2, K2, in[0], 12);
ROUND(cc, dd, aa, bb, F2, K2, in[9], 15);
ROUND(bb, cc, dd, aa, F2, K2, in[5], 9);
ROUND(aa, bb, cc, dd, F2, K2, in[2], 11);
ROUND(dd, aa, bb, cc, F2, K2, in[14], 7);
ROUND(cc, dd, aa, bb, F2, K2, in[11], 13);
ROUND(bb, cc, dd, aa, F2, K2, in[8], 12);
/* round 2: right lane */
ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[6], 9);
ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[11], 13);
ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[3], 15);
ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[7], 7);
ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[0], 12);
ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[13], 8);
ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[5], 9);
ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[10], 11);
ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[14], 7);
ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[15], 7);
ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[8], 12);
ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[12], 7);
ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[4], 6);
ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[9], 15);
ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[1], 13);
ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[2], 11);
/* Swap contents of "b" registers */
tmp = bb; bb = bbb; bbb = tmp;
/* round 3: left lane */
ROUND(aa, bb, cc, dd, F3, K3, in[3], 11);
ROUND(dd, aa, bb, cc, F3, K3, in[10], 13);
ROUND(cc, dd, aa, bb, F3, K3, in[14], 6);
ROUND(bb, cc, dd, aa, F3, K3, in[4], 7);
ROUND(aa, bb, cc, dd, F3, K3, in[9], 14);
ROUND(dd, aa, bb, cc, F3, K3, in[15], 9);
ROUND(cc, dd, aa, bb, F3, K3, in[8], 13);
ROUND(bb, cc, dd, aa, F3, K3, in[1], 15);
ROUND(aa, bb, cc, dd, F3, K3, in[2], 14);
ROUND(dd, aa, bb, cc, F3, K3, in[7], 8);
ROUND(cc, dd, aa, bb, F3, K3, in[0], 13);
ROUND(bb, cc, dd, aa, F3, K3, in[6], 6);
ROUND(aa, bb, cc, dd, F3, K3, in[13], 5);
ROUND(dd, aa, bb, cc, F3, K3, in[11], 12);
ROUND(cc, dd, aa, bb, F3, K3, in[5], 7);
ROUND(bb, cc, dd, aa, F3, K3, in[12], 5);
/* round 3: right lane */
ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[15], 9);
ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[5], 7);
ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[1], 15);
ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[3], 11);
ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[7], 8);
ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[14], 6);
ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[6], 6);
ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[9], 14);
ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[11], 12);
ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[8], 13);
ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[12], 5);
ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[2], 14);
ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[10], 13);
ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[0], 13);
ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[4], 7);
ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[13], 5);
/* Swap contents of "c" registers */
tmp = cc; cc = ccc; ccc = tmp;
/* round 4: left lane */
ROUND(aa, bb, cc, dd, F4, K4, in[1], 11);
ROUND(dd, aa, bb, cc, F4, K4, in[9], 12);
ROUND(cc, dd, aa, bb, F4, K4, in[11], 14);
ROUND(bb, cc, dd, aa, F4, K4, in[10], 15);
ROUND(aa, bb, cc, dd, F4, K4, in[0], 14);
ROUND(dd, aa, bb, cc, F4, K4, in[8], 15);
ROUND(cc, dd, aa, bb, F4, K4, in[12], 9);
ROUND(bb, cc, dd, aa, F4, K4, in[4], 8);
ROUND(aa, bb, cc, dd, F4, K4, in[13], 9);
ROUND(dd, aa, bb, cc, F4, K4, in[3], 14);
ROUND(cc, dd, aa, bb, F4, K4, in[7], 5);
ROUND(bb, cc, dd, aa, F4, K4, in[15], 6);
ROUND(aa, bb, cc, dd, F4, K4, in[14], 8);
ROUND(dd, aa, bb, cc, F4, K4, in[5], 6);
ROUND(cc, dd, aa, bb, F4, K4, in[6], 5);
ROUND(bb, cc, dd, aa, F4, K4, in[2], 12);
/* round 4: right lane */
ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[8], 15);
ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[6], 5);
ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[4], 8);
ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[1], 11);
ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[3], 14);
ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[11], 14);
ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[15], 6);
ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[0], 14);
ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[5], 6);
ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[12], 9);
ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[2], 12);
ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[13], 9);
ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[9], 12);
ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[7], 5);
ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[10], 15);
ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[14], 8);
/* Swap contents of "d" registers */
tmp = dd; dd = ddd; ddd = tmp;
/* combine results */
state[0] += aa;
state[1] += bb;
state[2] += cc;
state[3] += dd;
state[4] += aaa;
state[5] += bbb;
state[6] += ccc;
state[7] += ddd;
return;
}
static void rmd256_init(struct crypto_tfm *tfm)
{
struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm);
rctx->byte_count = 0;
rctx->state[0] = RMD_H0;
rctx->state[1] = RMD_H1;
rctx->state[2] = RMD_H2;
rctx->state[3] = RMD_H3;
rctx->state[4] = RMD_H5;
rctx->state[5] = RMD_H6;
rctx->state[6] = RMD_H7;
rctx->state[7] = RMD_H8;
memset(rctx->buffer, 0, sizeof(rctx->buffer));
}
static void rmd256_update(struct crypto_tfm *tfm, const u8 *data,
unsigned int len)
{
struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm);
const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
rctx->byte_count += len;
/* Enough space in buffer? If so copy and we're done */
if (avail > len) {
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
data, len);
return;
}
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
data, avail);
rmd256_transform(rctx->state, rctx->buffer);
data += avail;
len -= avail;
while (len >= sizeof(rctx->buffer)) {
memcpy(rctx->buffer, data, sizeof(rctx->buffer));
rmd256_transform(rctx->state, rctx->buffer);
data += sizeof(rctx->buffer);
len -= sizeof(rctx->buffer);
}
memcpy(rctx->buffer, data, len);
}
/* Add padding and return the message digest. */
static void rmd256_final(struct crypto_tfm *tfm, u8 *out)
{
struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm);
u32 i, index, padlen;
__le64 bits;
__le32 *dst = (__le32 *)out;
static const u8 padding[64] = { 0x80, };
bits = cpu_to_le64(rctx->byte_count << 3);
/* Pad out to 56 mod 64 */
index = rctx->byte_count & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64+56) - index);
rmd256_update(tfm, padding, padlen);
/* Append length */
rmd256_update(tfm, (const u8 *)&bits, sizeof(bits));
/* Store state in digest */
for (i = 0; i < 8; i++)
dst[i] = cpu_to_le32p(&rctx->state[i]);
/* Wipe context */
memset(rctx, 0, sizeof(*rctx));
}
static struct crypto_alg alg = {
.cra_name = "rmd256",
.cra_driver_name = "rmd256",
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = RMD256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct rmd256_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .digest = {
.dia_digestsize = RMD256_DIGEST_SIZE,
.dia_init = rmd256_init,
.dia_update = rmd256_update,
.dia_final = rmd256_final } }
};
static int __init rmd256_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit rmd256_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(rmd256_mod_init);
module_exit(rmd256_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RIPEMD-256 Message Digest");
MODULE_ALIAS("rmd256");

393
crypto/rmd320.c Normal file
View File

@ -0,0 +1,393 @@
/*
* Cryptographic API.
*
* RIPEMD-320 - RACE Integrity Primitives Evaluation Message Digest.
*
* Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
*
* Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <asm/byteorder.h>
#include "ripemd.h"
struct rmd320_ctx {
u64 byte_count;
u32 state[10];
__le32 buffer[16];
};
#define K1 RMD_K1
#define K2 RMD_K2
#define K3 RMD_K3
#define K4 RMD_K4
#define K5 RMD_K5
#define KK1 RMD_K6
#define KK2 RMD_K7
#define KK3 RMD_K8
#define KK4 RMD_K9
#define KK5 RMD_K1
#define F1(x, y, z) (x ^ y ^ z) /* XOR */
#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */
#define F3(x, y, z) ((x | ~y) ^ z)
#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */
#define F5(x, y, z) (x ^ (y | ~z))
#define ROUND(a, b, c, d, e, f, k, x, s) { \
(a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
(a) = rol32((a), (s)) + (e); \
(c) = rol32((c), 10); \
}
static void rmd320_transform(u32 *state, const __le32 *in)
{
u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee, tmp;
/* Initialize left lane */
aa = state[0];
bb = state[1];
cc = state[2];
dd = state[3];
ee = state[4];
/* Initialize right lane */
aaa = state[5];
bbb = state[6];
ccc = state[7];
ddd = state[8];
eee = state[9];
/* round 1: left lane */
ROUND(aa, bb, cc, dd, ee, F1, K1, in[0], 11);
ROUND(ee, aa, bb, cc, dd, F1, K1, in[1], 14);
ROUND(dd, ee, aa, bb, cc, F1, K1, in[2], 15);
ROUND(cc, dd, ee, aa, bb, F1, K1, in[3], 12);
ROUND(bb, cc, dd, ee, aa, F1, K1, in[4], 5);
ROUND(aa, bb, cc, dd, ee, F1, K1, in[5], 8);
ROUND(ee, aa, bb, cc, dd, F1, K1, in[6], 7);
ROUND(dd, ee, aa, bb, cc, F1, K1, in[7], 9);
ROUND(cc, dd, ee, aa, bb, F1, K1, in[8], 11);
ROUND(bb, cc, dd, ee, aa, F1, K1, in[9], 13);
ROUND(aa, bb, cc, dd, ee, F1, K1, in[10], 14);
ROUND(ee, aa, bb, cc, dd, F1, K1, in[11], 15);
ROUND(dd, ee, aa, bb, cc, F1, K1, in[12], 6);
ROUND(cc, dd, ee, aa, bb, F1, K1, in[13], 7);
ROUND(bb, cc, dd, ee, aa, F1, K1, in[14], 9);
ROUND(aa, bb, cc, dd, ee, F1, K1, in[15], 8);
/* round 1: right lane */
ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[5], 8);
ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[14], 9);
ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[7], 9);
ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[0], 11);
ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[9], 13);
ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[2], 15);
ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[11], 15);
ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[4], 5);
ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[13], 7);
ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[6], 7);
ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[15], 8);
ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[8], 11);
ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[1], 14);
ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[10], 14);
ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[3], 12);
ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[12], 6);
/* Swap contents of "a" registers */
tmp = aa; aa = aaa; aaa = tmp;
/* round 2: left lane" */
ROUND(ee, aa, bb, cc, dd, F2, K2, in[7], 7);
ROUND(dd, ee, aa, bb, cc, F2, K2, in[4], 6);
ROUND(cc, dd, ee, aa, bb, F2, K2, in[13], 8);
ROUND(bb, cc, dd, ee, aa, F2, K2, in[1], 13);
ROUND(aa, bb, cc, dd, ee, F2, K2, in[10], 11);
ROUND(ee, aa, bb, cc, dd, F2, K2, in[6], 9);
ROUND(dd, ee, aa, bb, cc, F2, K2, in[15], 7);
ROUND(cc, dd, ee, aa, bb, F2, K2, in[3], 15);
ROUND(bb, cc, dd, ee, aa, F2, K2, in[12], 7);
ROUND(aa, bb, cc, dd, ee, F2, K2, in[0], 12);
ROUND(ee, aa, bb, cc, dd, F2, K2, in[9], 15);
ROUND(dd, ee, aa, bb, cc, F2, K2, in[5], 9);
ROUND(cc, dd, ee, aa, bb, F2, K2, in[2], 11);
ROUND(bb, cc, dd, ee, aa, F2, K2, in[14], 7);
ROUND(aa, bb, cc, dd, ee, F2, K2, in[11], 13);
ROUND(ee, aa, bb, cc, dd, F2, K2, in[8], 12);
/* round 2: right lane */
ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[6], 9);
ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[11], 13);
ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[3], 15);
ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[7], 7);
ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[0], 12);
ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[13], 8);
ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[5], 9);
ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[10], 11);
ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[14], 7);
ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[15], 7);
ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[8], 12);
ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[12], 7);
ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[4], 6);
ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[9], 15);
ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[1], 13);
ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[2], 11);
/* Swap contents of "b" registers */
tmp = bb; bb = bbb; bbb = tmp;
/* round 3: left lane" */
ROUND(dd, ee, aa, bb, cc, F3, K3, in[3], 11);
ROUND(cc, dd, ee, aa, bb, F3, K3, in[10], 13);
ROUND(bb, cc, dd, ee, aa, F3, K3, in[14], 6);
ROUND(aa, bb, cc, dd, ee, F3, K3, in[4], 7);
ROUND(ee, aa, bb, cc, dd, F3, K3, in[9], 14);
ROUND(dd, ee, aa, bb, cc, F3, K3, in[15], 9);
ROUND(cc, dd, ee, aa, bb, F3, K3, in[8], 13);
ROUND(bb, cc, dd, ee, aa, F3, K3, in[1], 15);
ROUND(aa, bb, cc, dd, ee, F3, K3, in[2], 14);
ROUND(ee, aa, bb, cc, dd, F3, K3, in[7], 8);
ROUND(dd, ee, aa, bb, cc, F3, K3, in[0], 13);
ROUND(cc, dd, ee, aa, bb, F3, K3, in[6], 6);
ROUND(bb, cc, dd, ee, aa, F3, K3, in[13], 5);
ROUND(aa, bb, cc, dd, ee, F3, K3, in[11], 12);
ROUND(ee, aa, bb, cc, dd, F3, K3, in[5], 7);
ROUND(dd, ee, aa, bb, cc, F3, K3, in[12], 5);
/* round 3: right lane */
ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[15], 9);
ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[5], 7);
ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[1], 15);
ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[3], 11);
ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[7], 8);
ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[14], 6);
ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[6], 6);
ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[9], 14);
ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[11], 12);
ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[8], 13);
ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[12], 5);
ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[2], 14);
ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[10], 13);
ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[0], 13);
ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[4], 7);
ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[13], 5);
/* Swap contents of "c" registers */
tmp = cc; cc = ccc; ccc = tmp;
/* round 4: left lane" */
ROUND(cc, dd, ee, aa, bb, F4, K4, in[1], 11);
ROUND(bb, cc, dd, ee, aa, F4, K4, in[9], 12);
ROUND(aa, bb, cc, dd, ee, F4, K4, in[11], 14);
ROUND(ee, aa, bb, cc, dd, F4, K4, in[10], 15);
ROUND(dd, ee, aa, bb, cc, F4, K4, in[0], 14);
ROUND(cc, dd, ee, aa, bb, F4, K4, in[8], 15);
ROUND(bb, cc, dd, ee, aa, F4, K4, in[12], 9);
ROUND(aa, bb, cc, dd, ee, F4, K4, in[4], 8);
ROUND(ee, aa, bb, cc, dd, F4, K4, in[13], 9);
ROUND(dd, ee, aa, bb, cc, F4, K4, in[3], 14);
ROUND(cc, dd, ee, aa, bb, F4, K4, in[7], 5);
ROUND(bb, cc, dd, ee, aa, F4, K4, in[15], 6);
ROUND(aa, bb, cc, dd, ee, F4, K4, in[14], 8);
ROUND(ee, aa, bb, cc, dd, F4, K4, in[5], 6);
ROUND(dd, ee, aa, bb, cc, F4, K4, in[6], 5);
ROUND(cc, dd, ee, aa, bb, F4, K4, in[2], 12);
/* round 4: right lane */
ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[8], 15);
ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[6], 5);
ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[4], 8);
ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[1], 11);
ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[3], 14);
ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[11], 14);
ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[15], 6);
ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[0], 14);
ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[5], 6);
ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[12], 9);
ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[2], 12);
ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[13], 9);
ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[9], 12);
ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[7], 5);
ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[10], 15);
ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[14], 8);
/* Swap contents of "d" registers */
tmp = dd; dd = ddd; ddd = tmp;
/* round 5: left lane" */
ROUND(bb, cc, dd, ee, aa, F5, K5, in[4], 9);
ROUND(aa, bb, cc, dd, ee, F5, K5, in[0], 15);
ROUND(ee, aa, bb, cc, dd, F5, K5, in[5], 5);
ROUND(dd, ee, aa, bb, cc, F5, K5, in[9], 11);
ROUND(cc, dd, ee, aa, bb, F5, K5, in[7], 6);
ROUND(bb, cc, dd, ee, aa, F5, K5, in[12], 8);
ROUND(aa, bb, cc, dd, ee, F5, K5, in[2], 13);
ROUND(ee, aa, bb, cc, dd, F5, K5, in[10], 12);
ROUND(dd, ee, aa, bb, cc, F5, K5, in[14], 5);
ROUND(cc, dd, ee, aa, bb, F5, K5, in[1], 12);
ROUND(bb, cc, dd, ee, aa, F5, K5, in[3], 13);
ROUND(aa, bb, cc, dd, ee, F5, K5, in[8], 14);
ROUND(ee, aa, bb, cc, dd, F5, K5, in[11], 11);
ROUND(dd, ee, aa, bb, cc, F5, K5, in[6], 8);
ROUND(cc, dd, ee, aa, bb, F5, K5, in[15], 5);
ROUND(bb, cc, dd, ee, aa, F5, K5, in[13], 6);
/* round 5: right lane */
ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[12], 8);
ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[15], 5);
ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[10], 12);
ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[4], 9);
ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[1], 12);
ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[5], 5);
ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[8], 14);
ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[7], 6);
ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[6], 8);
ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[2], 13);
ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[13], 6);
ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[14], 5);
ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[0], 15);
ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[3], 13);
ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[9], 11);
ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[11], 11);
/* Swap contents of "e" registers */
tmp = ee; ee = eee; eee = tmp;
/* combine results */
state[0] += aa;
state[1] += bb;
state[2] += cc;
state[3] += dd;
state[4] += ee;
state[5] += aaa;
state[6] += bbb;
state[7] += ccc;
state[8] += ddd;
state[9] += eee;
return;
}
static void rmd320_init(struct crypto_tfm *tfm)
{
struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm);
rctx->byte_count = 0;
rctx->state[0] = RMD_H0;
rctx->state[1] = RMD_H1;
rctx->state[2] = RMD_H2;
rctx->state[3] = RMD_H3;
rctx->state[4] = RMD_H4;
rctx->state[5] = RMD_H5;
rctx->state[6] = RMD_H6;
rctx->state[7] = RMD_H7;
rctx->state[8] = RMD_H8;
rctx->state[9] = RMD_H9;
memset(rctx->buffer, 0, sizeof(rctx->buffer));
}
static void rmd320_update(struct crypto_tfm *tfm, const u8 *data,
unsigned int len)
{
struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm);
const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
rctx->byte_count += len;
/* Enough space in buffer? If so copy and we're done */
if (avail > len) {
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
data, len);
return;
}
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
data, avail);
rmd320_transform(rctx->state, rctx->buffer);
data += avail;
len -= avail;
while (len >= sizeof(rctx->buffer)) {
memcpy(rctx->buffer, data, sizeof(rctx->buffer));
rmd320_transform(rctx->state, rctx->buffer);
data += sizeof(rctx->buffer);
len -= sizeof(rctx->buffer);
}
memcpy(rctx->buffer, data, len);
}
/* Add padding and return the message digest. */
static void rmd320_final(struct crypto_tfm *tfm, u8 *out)
{
struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm);
u32 i, index, padlen;
__le64 bits;
__le32 *dst = (__le32 *)out;
static const u8 padding[64] = { 0x80, };
bits = cpu_to_le64(rctx->byte_count << 3);
/* Pad out to 56 mod 64 */
index = rctx->byte_count & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64+56) - index);
rmd320_update(tfm, padding, padlen);
/* Append length */
rmd320_update(tfm, (const u8 *)&bits, sizeof(bits));
/* Store state in digest */
for (i = 0; i < 10; i++)
dst[i] = cpu_to_le32p(&rctx->state[i]);
/* Wipe context */
memset(rctx, 0, sizeof(*rctx));
}
static struct crypto_alg alg = {
.cra_name = "rmd320",
.cra_driver_name = "rmd320",
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = RMD320_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct rmd320_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .digest = {
.dia_digestsize = RMD320_DIGEST_SIZE,
.dia_init = rmd320_init,
.dia_update = rmd320_update,
.dia_final = rmd320_final } }
};
static int __init rmd320_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit rmd320_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(rmd320_mod_init);
module_exit(rmd320_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RIPEMD-320 Message Digest");
MODULE_ALIAS("rmd320");

View File

@ -13,15 +13,9 @@
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* 2007-11-13 Added GCM tests
* 2007-11-13 Added AEAD support
* 2007-11-06 Added SHA-224 and SHA-224-HMAC tests
* 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests
* 2004-08-09 Added cipher speed tests (Reyk Floeter <reyk@vantronix.net>)
* 2003-09-14 Rewritten by Kartikey Mahendra Bhatt
*
*/
#include <crypto/hash.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
@ -30,7 +24,6 @@
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/crypto.h>
#include <linux/highmem.h>
#include <linux/moduleparam.h>
#include <linux/jiffies.h>
#include <linux/timex.h>
@ -38,7 +31,7 @@
#include "tcrypt.h"
/*
* Need to kmalloc() memory for testing kmap().
* Need to kmalloc() memory for testing.
*/
#define TVMEMSIZE 16384
#define XBUFSIZE 32768
@ -46,7 +39,7 @@
/*
* Indexes into the xbuf to simulate cross-page access.
*/
#define IDX1 37
#define IDX1 32
#define IDX2 32400
#define IDX3 1
#define IDX4 8193
@ -83,7 +76,8 @@ static char *check[] = {
"blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes",
"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
"camellia", "seed", "salsa20", "lzo", "cts", NULL
"camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
"lzo", "cts", NULL
};
static void hexdump(unsigned char *buf, unsigned int len)
@ -110,22 +104,30 @@ static void test_hash(char *algo, struct hash_testvec *template,
unsigned int i, j, k, temp;
struct scatterlist sg[8];
char result[64];
struct crypto_hash *tfm;
struct hash_desc desc;
struct crypto_ahash *tfm;
struct ahash_request *req;
struct tcrypt_result tresult;
int ret;
void *hash_buff;
printk("\ntesting %s\n", algo);
tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC);
init_completion(&tresult.completion);
tfm = crypto_alloc_ahash(algo, 0, 0);
if (IS_ERR(tfm)) {
printk("failed to load transform for %s: %ld\n", algo,
PTR_ERR(tfm));
return;
}
desc.tfm = tfm;
desc.flags = 0;
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) {
printk(KERN_ERR "failed to allocate request for %s\n", algo);
goto out_noreq;
}
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
tcrypt_complete, &tresult);
for (i = 0; i < tcount; i++) {
printk("test %u:\n", i + 1);
@ -139,8 +141,9 @@ static void test_hash(char *algo, struct hash_testvec *template,
sg_init_one(&sg[0], hash_buff, template[i].psize);
if (template[i].ksize) {
ret = crypto_hash_setkey(tfm, template[i].key,
template[i].ksize);
crypto_ahash_clear_flags(tfm, ~0);
ret = crypto_ahash_setkey(tfm, template[i].key,
template[i].ksize);
if (ret) {
printk("setkey() failed ret=%d\n", ret);
kfree(hash_buff);
@ -148,17 +151,30 @@ static void test_hash(char *algo, struct hash_testvec *template,
}
}
ret = crypto_hash_digest(&desc, sg, template[i].psize, result);
if (ret) {
ahash_request_set_crypt(req, sg, result, template[i].psize);
ret = crypto_ahash_digest(req);
switch (ret) {
case 0:
break;
case -EINPROGRESS:
case -EBUSY:
ret = wait_for_completion_interruptible(
&tresult.completion);
if (!ret && !(ret = tresult.err)) {
INIT_COMPLETION(tresult.completion);
break;
}
/* fall through */
default:
printk("digest () failed ret=%d\n", ret);
kfree(hash_buff);
goto out;
}
hexdump(result, crypto_hash_digestsize(tfm));
hexdump(result, crypto_ahash_digestsize(tfm));
printk("%s\n",
memcmp(result, template[i].digest,
crypto_hash_digestsize(tfm)) ?
crypto_ahash_digestsize(tfm)) ?
"fail" : "pass");
kfree(hash_buff);
}
@ -187,8 +203,9 @@ static void test_hash(char *algo, struct hash_testvec *template,
}
if (template[i].ksize) {
ret = crypto_hash_setkey(tfm, template[i].key,
template[i].ksize);
crypto_ahash_clear_flags(tfm, ~0);
ret = crypto_ahash_setkey(tfm, template[i].key,
template[i].ksize);
if (ret) {
printk("setkey() failed ret=%d\n", ret);
@ -196,29 +213,44 @@ static void test_hash(char *algo, struct hash_testvec *template,
}
}
ret = crypto_hash_digest(&desc, sg, template[i].psize,
result);
if (ret) {
ahash_request_set_crypt(req, sg, result,
template[i].psize);
ret = crypto_ahash_digest(req);
switch (ret) {
case 0:
break;
case -EINPROGRESS:
case -EBUSY:
ret = wait_for_completion_interruptible(
&tresult.completion);
if (!ret && !(ret = tresult.err)) {
INIT_COMPLETION(tresult.completion);
break;
}
/* fall through */
default:
printk("digest () failed ret=%d\n", ret);
goto out;
}
hexdump(result, crypto_hash_digestsize(tfm));
hexdump(result, crypto_ahash_digestsize(tfm));
printk("%s\n",
memcmp(result, template[i].digest,
crypto_hash_digestsize(tfm)) ?
crypto_ahash_digestsize(tfm)) ?
"fail" : "pass");
}
}
out:
crypto_free_hash(tfm);
ahash_request_free(req);
out_noreq:
crypto_free_ahash(tfm);
}
static void test_aead(char *algo, int enc, struct aead_testvec *template,
unsigned int tcount)
{
unsigned int ret, i, j, k, temp;
unsigned int ret, i, j, k, n, temp;
char *q;
struct crypto_aead *tfm;
char *key;
@ -344,13 +376,12 @@ static void test_aead(char *algo, int enc, struct aead_testvec *template,
goto next_one;
}
q = kmap(sg_page(&sg[0])) + sg[0].offset;
q = input;
hexdump(q, template[i].rlen);
printk(KERN_INFO "enc/dec: %s\n",
memcmp(q, template[i].result,
template[i].rlen) ? "fail" : "pass");
kunmap(sg_page(&sg[0]));
next_one:
if (!template[i].key)
kfree(key);
@ -360,7 +391,6 @@ next_one:
}
printk(KERN_INFO "\ntesting %s %s across pages (chunking)\n", algo, e);
memset(xbuf, 0, XBUFSIZE);
memset(axbuf, 0, XBUFSIZE);
for (i = 0, j = 0; i < tcount; i++) {
@ -388,6 +418,7 @@ next_one:
goto out;
}
memset(xbuf, 0, XBUFSIZE);
sg_init_table(sg, template[i].np);
for (k = 0, temp = 0; k < template[i].np; k++) {
memcpy(&xbuf[IDX[k]],
@ -450,7 +481,7 @@ next_one:
for (k = 0, temp = 0; k < template[i].np; k++) {
printk(KERN_INFO "page %u\n", k);
q = kmap(sg_page(&sg[k])) + sg[k].offset;
q = &axbuf[IDX[k]];
hexdump(q, template[i].tap[k]);
printk(KERN_INFO "%s\n",
memcmp(q, template[i].result + temp,
@ -459,8 +490,15 @@ next_one:
0 : authsize)) ?
"fail" : "pass");
for (n = 0; q[template[i].tap[k] + n]; n++)
;
if (n) {
printk("Result buffer corruption %u "
"bytes:\n", n);
hexdump(&q[template[i].tap[k]], n);
}
temp += template[i].tap[k];
kunmap(sg_page(&sg[k]));
}
}
}
@ -473,7 +511,7 @@ out:
static void test_cipher(char *algo, int enc,
struct cipher_testvec *template, unsigned int tcount)
{
unsigned int ret, i, j, k, temp;
unsigned int ret, i, j, k, n, temp;
char *q;
struct crypto_ablkcipher *tfm;
struct ablkcipher_request *req;
@ -569,19 +607,17 @@ static void test_cipher(char *algo, int enc,
goto out;
}
q = kmap(sg_page(&sg[0])) + sg[0].offset;
q = data;
hexdump(q, template[i].rlen);
printk("%s\n",
memcmp(q, template[i].result,
template[i].rlen) ? "fail" : "pass");
kunmap(sg_page(&sg[0]));
}
kfree(data);
}
printk("\ntesting %s %s across pages (chunking)\n", algo, e);
memset(xbuf, 0, XBUFSIZE);
j = 0;
for (i = 0; i < tcount; i++) {
@ -596,6 +632,7 @@ static void test_cipher(char *algo, int enc,
printk("test %u (%d bit key):\n",
j, template[i].klen * 8);
memset(xbuf, 0, XBUFSIZE);
crypto_ablkcipher_clear_flags(tfm, ~0);
if (template[i].wk)
crypto_ablkcipher_set_flags(
@ -649,14 +686,21 @@ static void test_cipher(char *algo, int enc,
temp = 0;
for (k = 0; k < template[i].np; k++) {
printk("page %u\n", k);
q = kmap(sg_page(&sg[k])) + sg[k].offset;
q = &xbuf[IDX[k]];
hexdump(q, template[i].tap[k]);
printk("%s\n",
memcmp(q, template[i].result + temp,
template[i].tap[k]) ? "fail" :
"pass");
for (n = 0; q[template[i].tap[k] + n]; n++)
;
if (n) {
printk("Result buffer corruption %u "
"bytes:\n", n);
hexdump(&q[template[i].tap[k]], n);
}
temp += template[i].tap[k];
kunmap(sg_page(&sg[k]));
}
}
}
@ -1172,6 +1216,14 @@ static void do_test(void)
test_cipher("ecb(des3_ede)", DECRYPT, des3_ede_dec_tv_template,
DES3_EDE_DEC_TEST_VECTORS);
test_cipher("cbc(des3_ede)", ENCRYPT,
des3_ede_cbc_enc_tv_template,
DES3_EDE_CBC_ENC_TEST_VECTORS);
test_cipher("cbc(des3_ede)", DECRYPT,
des3_ede_cbc_dec_tv_template,
DES3_EDE_CBC_DEC_TEST_VECTORS);
test_hash("md4", md4_tv_template, MD4_TEST_VECTORS);
test_hash("sha224", sha224_tv_template, SHA224_TEST_VECTORS);
@ -1382,6 +1434,14 @@ static void do_test(void)
DES3_EDE_ENC_TEST_VECTORS);
test_cipher("ecb(des3_ede)", DECRYPT, des3_ede_dec_tv_template,
DES3_EDE_DEC_TEST_VECTORS);
test_cipher("cbc(des3_ede)", ENCRYPT,
des3_ede_cbc_enc_tv_template,
DES3_EDE_CBC_ENC_TEST_VECTORS);
test_cipher("cbc(des3_ede)", DECRYPT,
des3_ede_cbc_dec_tv_template,
DES3_EDE_CBC_DEC_TEST_VECTORS);
break;
case 5:
@ -1550,7 +1610,7 @@ static void do_test(void)
case 29:
test_hash("tgr128", tgr128_tv_template, TGR128_TEST_VECTORS);
break;
case 30:
test_cipher("ecb(xeta)", ENCRYPT, xeta_enc_tv_template,
XETA_ENC_TEST_VECTORS);
@ -1615,6 +1675,22 @@ static void do_test(void)
CTS_MODE_DEC_TEST_VECTORS);
break;
case 39:
test_hash("rmd128", rmd128_tv_template, RMD128_TEST_VECTORS);
break;
case 40:
test_hash("rmd160", rmd160_tv_template, RMD160_TEST_VECTORS);
break;
case 41:
test_hash("rmd256", rmd256_tv_template, RMD256_TEST_VECTORS);
break;
case 42:
test_hash("rmd320", rmd320_tv_template, RMD320_TEST_VECTORS);
break;
case 100:
test_hash("hmac(md5)", hmac_md5_tv_template,
HMAC_MD5_TEST_VECTORS);
@ -1650,6 +1726,16 @@ static void do_test(void)
XCBC_AES_TEST_VECTORS);
break;
case 107:
test_hash("hmac(rmd128)", hmac_rmd128_tv_template,
HMAC_RMD128_TEST_VECTORS);
break;
case 108:
test_hash("hmac(rmd160)", hmac_rmd160_tv_template,
HMAC_RMD160_TEST_VECTORS);
break;
case 200:
test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
@ -1788,6 +1874,22 @@ static void do_test(void)
test_hash_speed("sha224", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 314:
test_hash_speed("rmd128", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 315:
test_hash_speed("rmd160", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 316:
test_hash_speed("rmd256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 317:
test_hash_speed("rmd320", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 399:
break;

View File

@ -13,12 +13,6 @@
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* 2007-11-13 Added GCM tests
* 2007-11-13 Added AEAD support
* 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests
* 2004-08-09 Cipher speed tests by Reyk Floeter <reyk@vantronix.net>
* 2003-09-14 Changes by Kartikey Mahendra Bhatt
*
*/
#ifndef _CRYPTO_TCRYPT_H
#define _CRYPTO_TCRYPT_H
@ -168,6 +162,271 @@ static struct hash_testvec md5_tv_template[] = {
.digest = "\x57\xed\xf4\xa2\x2b\xe3\xc9\x55"
"\xac\x49\xda\x2e\x21\x07\xb6\x7a",
}
};
/*
* RIPEMD-128 test vectors from ISO/IEC 10118-3:2004(E)
*/
#define RMD128_TEST_VECTORS 10
static struct hash_testvec rmd128_tv_template[] = {
{
.digest = "\xcd\xf2\x62\x13\xa1\x50\xdc\x3e"
"\xcb\x61\x0f\x18\xf6\xb3\x8b\x46",
}, {
.plaintext = "a",
.psize = 1,
.digest = "\x86\xbe\x7a\xfa\x33\x9d\x0f\xc7"
"\xcf\xc7\x85\xe7\x2f\x57\x8d\x33",
}, {
.plaintext = "abc",
.psize = 3,
.digest = "\xc1\x4a\x12\x19\x9c\x66\xe4\xba"
"\x84\x63\x6b\x0f\x69\x14\x4c\x77",
}, {
.plaintext = "message digest",
.psize = 14,
.digest = "\x9e\x32\x7b\x3d\x6e\x52\x30\x62"
"\xaf\xc1\x13\x2d\x7d\xf9\xd1\xb8",
}, {
.plaintext = "abcdefghijklmnopqrstuvwxyz",
.psize = 26,
.digest = "\xfd\x2a\xa6\x07\xf7\x1d\xc8\xf5"
"\x10\x71\x49\x22\xb3\x71\x83\x4e",
}, {
.plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
"fghijklmnopqrstuvwxyz0123456789",
.psize = 62,
.digest = "\xd1\xe9\x59\xeb\x17\x9c\x91\x1f"
"\xae\xa4\x62\x4c\x60\xc5\xc7\x02",
}, {
.plaintext = "1234567890123456789012345678901234567890"
"1234567890123456789012345678901234567890",
.psize = 80,
.digest = "\x3f\x45\xef\x19\x47\x32\xc2\xdb"
"\xb2\xc4\xa2\xc7\x69\x79\x5f\xa3",
}, {
.plaintext = "abcdbcdecdefdefgefghfghighij"
"hijkijkljklmklmnlmnomnopnopq",
.psize = 56,
.digest = "\xa1\xaa\x06\x89\xd0\xfa\xfa\x2d"
"\xdc\x22\xe8\x8b\x49\x13\x3a\x06",
.np = 2,
.tap = { 28, 28 },
}, {
.plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghi"
"jklmghijklmnhijklmnoijklmnopjklmnopqklmnopqr"
"lmnopqrsmnopqrstnopqrstu",
.psize = 112,
.digest = "\xd4\xec\xc9\x13\xe1\xdf\x77\x6b"
"\xf4\x8d\xe9\xd5\x5b\x1f\x25\x46",
}, {
.plaintext = "abcdbcdecdefdefgefghfghighijhijk",
.psize = 32,
.digest = "\x13\xfc\x13\xe8\xef\xff\x34\x7d"
"\xe1\x93\xff\x46\xdb\xac\xcf\xd4",
}
};
/*
* RIPEMD-160 test vectors from ISO/IEC 10118-3:2004(E)
*/
#define RMD160_TEST_VECTORS 10
static struct hash_testvec rmd160_tv_template[] = {
{
.digest = "\x9c\x11\x85\xa5\xc5\xe9\xfc\x54\x61\x28"
"\x08\x97\x7e\xe8\xf5\x48\xb2\x25\x8d\x31",
}, {
.plaintext = "a",
.psize = 1,
.digest = "\x0b\xdc\x9d\x2d\x25\x6b\x3e\xe9\xda\xae"
"\x34\x7b\xe6\xf4\xdc\x83\x5a\x46\x7f\xfe",
}, {
.plaintext = "abc",
.psize = 3,
.digest = "\x8e\xb2\x08\xf7\xe0\x5d\x98\x7a\x9b\x04"
"\x4a\x8e\x98\xc6\xb0\x87\xf1\x5a\x0b\xfc",
}, {
.plaintext = "message digest",
.psize = 14,
.digest = "\x5d\x06\x89\xef\x49\xd2\xfa\xe5\x72\xb8"
"\x81\xb1\x23\xa8\x5f\xfa\x21\x59\x5f\x36",
}, {
.plaintext = "abcdefghijklmnopqrstuvwxyz",
.psize = 26,
.digest = "\xf7\x1c\x27\x10\x9c\x69\x2c\x1b\x56\xbb"
"\xdc\xeb\x5b\x9d\x28\x65\xb3\x70\x8d\xbc",
}, {
.plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
"fghijklmnopqrstuvwxyz0123456789",
.psize = 62,
.digest = "\xb0\xe2\x0b\x6e\x31\x16\x64\x02\x86\xed"
"\x3a\x87\xa5\x71\x30\x79\xb2\x1f\x51\x89",
}, {
.plaintext = "1234567890123456789012345678901234567890"
"1234567890123456789012345678901234567890",
.psize = 80,
.digest = "\x9b\x75\x2e\x45\x57\x3d\x4b\x39\xf4\xdb"
"\xd3\x32\x3c\xab\x82\xbf\x63\x32\x6b\xfb",
}, {
.plaintext = "abcdbcdecdefdefgefghfghighij"
"hijkijkljklmklmnlmnomnopnopq",
.psize = 56,
.digest = "\x12\xa0\x53\x38\x4a\x9c\x0c\x88\xe4\x05"
"\xa0\x6c\x27\xdc\xf4\x9a\xda\x62\xeb\x2b",
.np = 2,
.tap = { 28, 28 },
}, {
.plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghi"
"jklmghijklmnhijklmnoijklmnopjklmnopqklmnopqr"
"lmnopqrsmnopqrstnopqrstu",
.psize = 112,
.digest = "\x6f\x3f\xa3\x9b\x6b\x50\x3c\x38\x4f\x91"
"\x9a\x49\xa7\xaa\x5c\x2c\x08\xbd\xfb\x45",
}, {
.plaintext = "abcdbcdecdefdefgefghfghighijhijk",
.psize = 32,
.digest = "\x94\xc2\x64\x11\x54\x04\xe6\x33\x79\x0d"
"\xfc\xc8\x7b\x58\x7d\x36\x77\x06\x7d\x9f",
}
};
/*
* RIPEMD-256 test vectors
*/
#define RMD256_TEST_VECTORS 8
static struct hash_testvec rmd256_tv_template[] = {
{
.digest = "\x02\xba\x4c\x4e\x5f\x8e\xcd\x18"
"\x77\xfc\x52\xd6\x4d\x30\xe3\x7a"
"\x2d\x97\x74\xfb\x1e\x5d\x02\x63"
"\x80\xae\x01\x68\xe3\xc5\x52\x2d",
}, {
.plaintext = "a",
.psize = 1,
.digest = "\xf9\x33\x3e\x45\xd8\x57\xf5\xd9"
"\x0a\x91\xba\xb7\x0a\x1e\xba\x0c"
"\xfb\x1b\xe4\xb0\x78\x3c\x9a\xcf"
"\xcd\x88\x3a\x91\x34\x69\x29\x25",
}, {
.plaintext = "abc",
.psize = 3,
.digest = "\xaf\xbd\x6e\x22\x8b\x9d\x8c\xbb"
"\xce\xf5\xca\x2d\x03\xe6\xdb\xa1"
"\x0a\xc0\xbc\x7d\xcb\xe4\x68\x0e"
"\x1e\x42\xd2\xe9\x75\x45\x9b\x65",
}, {
.plaintext = "message digest",
.psize = 14,
.digest = "\x87\xe9\x71\x75\x9a\x1c\xe4\x7a"
"\x51\x4d\x5c\x91\x4c\x39\x2c\x90"
"\x18\xc7\xc4\x6b\xc1\x44\x65\x55"
"\x4a\xfc\xdf\x54\xa5\x07\x0c\x0e",
}, {
.plaintext = "abcdefghijklmnopqrstuvwxyz",
.psize = 26,
.digest = "\x64\x9d\x30\x34\x75\x1e\xa2\x16"
"\x77\x6b\xf9\xa1\x8a\xcc\x81\xbc"
"\x78\x96\x11\x8a\x51\x97\x96\x87"
"\x82\xdd\x1f\xd9\x7d\x8d\x51\x33",
}, {
.plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
"fghijklmnopqrstuvwxyz0123456789",
.psize = 62,
.digest = "\x57\x40\xa4\x08\xac\x16\xb7\x20"
"\xb8\x44\x24\xae\x93\x1c\xbb\x1f"
"\xe3\x63\xd1\xd0\xbf\x40\x17\xf1"
"\xa8\x9f\x7e\xa6\xde\x77\xa0\xb8",
}, {
.plaintext = "1234567890123456789012345678901234567890"
"1234567890123456789012345678901234567890",
.psize = 80,
.digest = "\x06\xfd\xcc\x7a\x40\x95\x48\xaa"
"\xf9\x13\x68\xc0\x6a\x62\x75\xb5"
"\x53\xe3\xf0\x99\xbf\x0e\xa4\xed"
"\xfd\x67\x78\xdf\x89\xa8\x90\xdd",
}, {
.plaintext = "abcdbcdecdefdefgefghfghighij"
"hijkijkljklmklmnlmnomnopnopq",
.psize = 56,
.digest = "\x38\x43\x04\x55\x83\xaa\xc6\xc8"
"\xc8\xd9\x12\x85\x73\xe7\xa9\x80"
"\x9a\xfb\x2a\x0f\x34\xcc\xc3\x6e"
"\xa9\xe7\x2f\x16\xf6\x36\x8e\x3f",
.np = 2,
.tap = { 28, 28 },
}
};
/*
* RIPEMD-320 test vectors
*/
#define RMD320_TEST_VECTORS 8
static struct hash_testvec rmd320_tv_template[] = {
{
.digest = "\x22\xd6\x5d\x56\x61\x53\x6c\xdc\x75\xc1"
"\xfd\xf5\xc6\xde\x7b\x41\xb9\xf2\x73\x25"
"\xeb\xc6\x1e\x85\x57\x17\x7d\x70\x5a\x0e"
"\xc8\x80\x15\x1c\x3a\x32\xa0\x08\x99\xb8",
}, {
.plaintext = "a",
.psize = 1,
.digest = "\xce\x78\x85\x06\x38\xf9\x26\x58\xa5\xa5"
"\x85\x09\x75\x79\x92\x6d\xda\x66\x7a\x57"
"\x16\x56\x2c\xfc\xf6\xfb\xe7\x7f\x63\x54"
"\x2f\x99\xb0\x47\x05\xd6\x97\x0d\xff\x5d",
}, {
.plaintext = "abc",
.psize = 3,
.digest = "\xde\x4c\x01\xb3\x05\x4f\x89\x30\xa7\x9d"
"\x09\xae\x73\x8e\x92\x30\x1e\x5a\x17\x08"
"\x5b\xef\xfd\xc1\xb8\xd1\x16\x71\x3e\x74"
"\xf8\x2f\xa9\x42\xd6\x4c\xdb\xc4\x68\x2d",
}, {
.plaintext = "message digest",
.psize = 14,
.digest = "\x3a\x8e\x28\x50\x2e\xd4\x5d\x42\x2f\x68"
"\x84\x4f\x9d\xd3\x16\xe7\xb9\x85\x33\xfa"
"\x3f\x2a\x91\xd2\x9f\x84\xd4\x25\xc8\x8d"
"\x6b\x4e\xff\x72\x7d\xf6\x6a\x7c\x01\x97",
}, {
.plaintext = "abcdefghijklmnopqrstuvwxyz",
.psize = 26,
.digest = "\xca\xbd\xb1\x81\x0b\x92\x47\x0a\x20\x93"
"\xaa\x6b\xce\x05\x95\x2c\x28\x34\x8c\xf4"
"\x3f\xf6\x08\x41\x97\x51\x66\xbb\x40\xed"
"\x23\x40\x04\xb8\x82\x44\x63\xe6\xb0\x09",
}, {
.plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
"fghijklmnopqrstuvwxyz0123456789",
.psize = 62,
.digest = "\xed\x54\x49\x40\xc8\x6d\x67\xf2\x50\xd2"
"\x32\xc3\x0b\x7b\x3e\x57\x70\xe0\xc6\x0c"
"\x8c\xb9\xa4\xca\xfe\x3b\x11\x38\x8a\xf9"
"\x92\x0e\x1b\x99\x23\x0b\x84\x3c\x86\xa4",
}, {
.plaintext = "1234567890123456789012345678901234567890"
"1234567890123456789012345678901234567890",
.psize = 80,
.digest = "\x55\x78\x88\xaf\x5f\x6d\x8e\xd6\x2a\xb6"
"\x69\x45\xc6\xd2\xa0\xa4\x7e\xcd\x53\x41"
"\xe9\x15\xeb\x8f\xea\x1d\x05\x24\x95\x5f"
"\x82\x5d\xc7\x17\xe4\xa0\x08\xab\x2d\x42",
}, {
.plaintext = "abcdbcdecdefdefgefghfghighij"
"hijkijkljklmklmnlmnomnopnopq",
.psize = 56,
.digest = "\xd0\x34\xa7\x95\x0c\xf7\x22\x02\x1b\xa4"
"\xb8\x4d\xf7\x69\xa5\xde\x20\x60\xe2\x59"
"\xdf\x4c\x9b\xb4\xa4\x26\x8c\x0e\x93\x5b"
"\xbc\x74\x70\xa9\x69\xc9\xd0\x72\xa1\xac",
.np = 2,
.tap = { 28, 28 },
}
};
/*
@ -816,6 +1075,168 @@ static struct hash_testvec hmac_md5_tv_template[] =
},
};
/*
* HMAC-RIPEMD128 test vectors from RFC2286
*/
#define HMAC_RMD128_TEST_VECTORS 7
static struct hash_testvec hmac_rmd128_tv_template[] = {
{
.key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
.ksize = 16,
.plaintext = "Hi There",
.psize = 8,
.digest = "\xfb\xf6\x1f\x94\x92\xaa\x4b\xbf"
"\x81\xc1\x72\xe8\x4e\x07\x34\xdb",
}, {
.key = "Jefe",
.ksize = 4,
.plaintext = "what do ya want for nothing?",
.psize = 28,
.digest = "\x87\x5f\x82\x88\x62\xb6\xb3\x34"
"\xb4\x27\xc5\x5f\x9f\x7f\xf0\x9b",
.np = 2,
.tap = { 14, 14 },
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
.ksize = 16,
.plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
"\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
"\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
"\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
.psize = 50,
.digest = "\x09\xf0\xb2\x84\x6d\x2f\x54\x3d"
"\xa3\x63\xcb\xec\x8d\x62\xa3\x8d",
}, {
.key = "\x01\x02\x03\x04\x05\x06\x07\x08"
"\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
"\x11\x12\x13\x14\x15\x16\x17\x18\x19",
.ksize = 25,
.plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
"\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
"\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
"\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
.psize = 50,
.digest = "\xbd\xbb\xd7\xcf\x03\xe4\x4b\x5a"
"\xa6\x0a\xf8\x15\xbe\x4d\x22\x94",
}, {
.key = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c",
.ksize = 16,
.plaintext = "Test With Truncation",
.psize = 20,
.digest = "\xe7\x98\x08\xf2\x4b\x25\xfd\x03"
"\x1c\x15\x5f\x0d\x55\x1d\x9a\x3a",
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa",
.ksize = 80,
.plaintext = "Test Using Larger Than Block-Size Key - Hash Key First",
.psize = 54,
.digest = "\xdc\x73\x29\x28\xde\x98\x10\x4a"
"\x1f\x59\xd3\x73\xc1\x50\xac\xbb",
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa",
.ksize = 80,
.plaintext = "Test Using Larger Than Block-Size Key and Larger Than One "
"Block-Size Data",
.psize = 73,
.digest = "\x5c\x6b\xec\x96\x79\x3e\x16\xd4"
"\x06\x90\xc2\x37\x63\x5f\x30\xc5",
},
};
/*
* HMAC-RIPEMD160 test vectors from RFC2286
*/
#define HMAC_RMD160_TEST_VECTORS 7
static struct hash_testvec hmac_rmd160_tv_template[] = {
{
.key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
.ksize = 20,
.plaintext = "Hi There",
.psize = 8,
.digest = "\x24\xcb\x4b\xd6\x7d\x20\xfc\x1a\x5d\x2e"
"\xd7\x73\x2d\xcc\x39\x37\x7f\x0a\x56\x68",
}, {
.key = "Jefe",
.ksize = 4,
.plaintext = "what do ya want for nothing?",
.psize = 28,
.digest = "\xdd\xa6\xc0\x21\x3a\x48\x5a\x9e\x24\xf4"
"\x74\x20\x64\xa7\xf0\x33\xb4\x3c\x40\x69",
.np = 2,
.tap = { 14, 14 },
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
.ksize = 20,
.plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
"\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
"\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
"\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
.psize = 50,
.digest = "\xb0\xb1\x05\x36\x0d\xe7\x59\x96\x0a\xb4"
"\xf3\x52\x98\xe1\x16\xe2\x95\xd8\xe7\xc1",
}, {
.key = "\x01\x02\x03\x04\x05\x06\x07\x08"
"\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
"\x11\x12\x13\x14\x15\x16\x17\x18\x19",
.ksize = 25,
.plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
"\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
"\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
"\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
.psize = 50,
.digest = "\xd5\xca\x86\x2f\x4d\x21\xd5\xe6\x10\xe1"
"\x8b\x4c\xf1\xbe\xb9\x7a\x43\x65\xec\xf4",
}, {
.key = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c",
.ksize = 20,
.plaintext = "Test With Truncation",
.psize = 20,
.digest = "\x76\x19\x69\x39\x78\xf9\x1d\x90\x53\x9a"
"\xe7\x86\x50\x0f\xf3\xd8\xe0\x51\x8e\x39",
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa",
.ksize = 80,
.plaintext = "Test Using Larger Than Block-Size Key - Hash Key First",
.psize = 54,
.digest = "\x64\x66\xca\x07\xac\x5e\xac\x29\xe1\xbd"
"\x52\x3e\x5a\xda\x76\x05\xb7\x91\xfd\x8b",
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa",
.ksize = 80,
.plaintext = "Test Using Larger Than Block-Size Key and Larger Than One "
"Block-Size Data",
.psize = 73,
.digest = "\x69\xea\x60\x79\x8d\x71\x61\x6c\xce\x5f"
"\xd0\x87\x1e\x23\x75\x4c\xd7\x5d\x5a\x0a",
},
};
/*
* HMAC-SHA1 test vectors from RFC2202
*/
@ -1442,6 +1863,8 @@ static struct hash_testvec hmac_sha512_tv_template[] = {
#define DES_CBC_DEC_TEST_VECTORS 4
#define DES3_EDE_ENC_TEST_VECTORS 3
#define DES3_EDE_DEC_TEST_VECTORS 3
#define DES3_EDE_CBC_ENC_TEST_VECTORS 1
#define DES3_EDE_CBC_DEC_TEST_VECTORS 1
static struct cipher_testvec des_enc_tv_template[] = {
{ /* From Applied Cryptography */
@ -1680,9 +2103,6 @@ static struct cipher_testvec des_cbc_dec_tv_template[] = {
},
};
/*
* We really need some more test vectors, especially for DES3 CBC.
*/
static struct cipher_testvec des3_ede_enc_tv_template[] = {
{ /* These are from openssl */
.key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
@ -1745,6 +2165,94 @@ static struct cipher_testvec des3_ede_dec_tv_template[] = {
},
};
static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
{ /* Generated from openssl */
.key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
"\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
"\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
.klen = 24,
.iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
.input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
"\x53\x20\x63\x65\x65\x72\x73\x74"
"\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
"\x20\x79\x65\x53\x72\x63\x74\x65"
"\x20\x73\x6f\x54\x20\x6f\x61\x4d"
"\x79\x6e\x53\x20\x63\x65\x65\x72"
"\x73\x74\x54\x20\x6f\x6f\x4d\x20"
"\x6e\x61\x20\x79\x65\x53\x72\x63"
"\x74\x65\x20\x73\x6f\x54\x20\x6f"
"\x61\x4d\x79\x6e\x53\x20\x63\x65"
"\x65\x72\x73\x74\x54\x20\x6f\x6f"
"\x4d\x20\x6e\x61\x20\x79\x65\x53"
"\x72\x63\x74\x65\x20\x73\x6f\x54"
"\x20\x6f\x61\x4d\x79\x6e\x53\x20"
"\x63\x65\x65\x72\x73\x74\x54\x20"
"\x6f\x6f\x4d\x20\x6e\x61\x0a\x79",
.ilen = 128,
.result = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4"
"\x67\x17\x21\xc7\x6e\x8a\xd5\x49"
"\x74\xb3\x49\x05\xc5\x1c\xd0\xed"
"\x12\x56\x5c\x53\x96\xb6\x00\x7d"
"\x90\x48\xfc\xf5\x8d\x29\x39\xcc"
"\x8a\xd5\x35\x18\x36\x23\x4e\xd7"
"\x76\xd1\xda\x0c\x94\x67\xbb\x04"
"\x8b\xf2\x03\x6c\xa8\xcf\xb6\xea"
"\x22\x64\x47\xaa\x8f\x75\x13\xbf"
"\x9f\xc2\xc3\xf0\xc9\x56\xc5\x7a"
"\x71\x63\x2e\x89\x7b\x1e\x12\xca"
"\xe2\x5f\xaf\xd8\xa4\xf8\xc9\x7a"
"\xd6\xf9\x21\x31\x62\x44\x45\xa6"
"\xd6\xbc\x5a\xd3\x2d\x54\x43\xcc"
"\x9d\xde\xa5\x70\xe9\x42\x45\x8a"
"\x6b\xfa\xb1\x91\x13\xb0\xd9\x19",
.rlen = 128,
},
};
static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
{ /* Generated from openssl */
.key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
"\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
"\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
.klen = 24,
.iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
.input = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4"
"\x67\x17\x21\xc7\x6e\x8a\xd5\x49"
"\x74\xb3\x49\x05\xc5\x1c\xd0\xed"
"\x12\x56\x5c\x53\x96\xb6\x00\x7d"
"\x90\x48\xfc\xf5\x8d\x29\x39\xcc"
"\x8a\xd5\x35\x18\x36\x23\x4e\xd7"
"\x76\xd1\xda\x0c\x94\x67\xbb\x04"
"\x8b\xf2\x03\x6c\xa8\xcf\xb6\xea"
"\x22\x64\x47\xaa\x8f\x75\x13\xbf"
"\x9f\xc2\xc3\xf0\xc9\x56\xc5\x7a"
"\x71\x63\x2e\x89\x7b\x1e\x12\xca"
"\xe2\x5f\xaf\xd8\xa4\xf8\xc9\x7a"
"\xd6\xf9\x21\x31\x62\x44\x45\xa6"
"\xd6\xbc\x5a\xd3\x2d\x54\x43\xcc"
"\x9d\xde\xa5\x70\xe9\x42\x45\x8a"
"\x6b\xfa\xb1\x91\x13\xb0\xd9\x19",
.ilen = 128,
.result = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
"\x53\x20\x63\x65\x65\x72\x73\x74"
"\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
"\x20\x79\x65\x53\x72\x63\x74\x65"
"\x20\x73\x6f\x54\x20\x6f\x61\x4d"
"\x79\x6e\x53\x20\x63\x65\x65\x72"
"\x73\x74\x54\x20\x6f\x6f\x4d\x20"
"\x6e\x61\x20\x79\x65\x53\x72\x63"
"\x74\x65\x20\x73\x6f\x54\x20\x6f"
"\x61\x4d\x79\x6e\x53\x20\x63\x65"
"\x65\x72\x73\x74\x54\x20\x6f\x6f"
"\x4d\x20\x6e\x61\x20\x79\x65\x53"
"\x72\x63\x74\x65\x20\x73\x6f\x54"
"\x20\x6f\x61\x4d\x79\x6e\x53\x20"
"\x63\x65\x65\x72\x73\x74\x54\x20"
"\x6f\x6f\x4d\x20\x6e\x61\x0a\x79",
.rlen = 128,
},
};
/*
* Blowfish test vectors.
*/

View File

@ -174,4 +174,30 @@ config CRYPTO_DEV_HIFN_795X_RNG
Select this option if you want to enable the random number generator
on the HIFN 795x crypto adapters.
config CRYPTO_DEV_TALITOS
tristate "Talitos Freescale Security Engine (SEC)"
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
select HW_RANDOM
depends on FSL_SOC
help
Say 'Y' here to use the Freescale Security Engine (SEC)
to offload cryptographic algorithm computation.
The Freescale SEC is present on PowerQUICC 'E' processors, such
as the MPC8349E and MPC8548E.
To compile this driver as a module, choose M here: the module
will be called talitos.
config CRYPTO_DEV_IXP4XX
tristate "Driver for IXP4xx crypto hardware acceleration"
depends on ARCH_IXP4XX
select CRYPTO_DES
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
help
Driver for the IXP4xx NPE crypto engine.
endif # CRYPTO_HW

View File

@ -2,3 +2,5 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o

View File

@ -29,7 +29,6 @@
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/highmem.h>
#include <linux/interrupt.h>
#include <linux/crypto.h>
#include <linux/hw_random.h>
#include <linux/ktime.h>
@ -369,7 +368,9 @@ static atomic_t hifn_dev_number;
#define HIFN_D_DST_RSIZE 80*4
#define HIFN_D_RES_RSIZE 24*4
#define HIFN_QUEUE_LENGTH HIFN_D_CMD_RSIZE-5
#define HIFN_D_DST_DALIGN 4
#define HIFN_QUEUE_LENGTH HIFN_D_CMD_RSIZE-1
#define AES_MIN_KEY_SIZE 16
#define AES_MAX_KEY_SIZE 32
@ -535,10 +536,10 @@ struct hifn_crypt_command
*/
struct hifn_mac_command
{
volatile u16 masks;
volatile u16 header_skip;
volatile u16 source_count;
volatile u16 reserved;
volatile __le16 masks;
volatile __le16 header_skip;
volatile __le16 source_count;
volatile __le16 reserved;
};
#define HIFN_MAC_CMD_ALG_MASK 0x0001
@ -564,10 +565,10 @@ struct hifn_mac_command
struct hifn_comp_command
{
volatile u16 masks;
volatile u16 header_skip;
volatile u16 source_count;
volatile u16 reserved;
volatile __le16 masks;
volatile __le16 header_skip;
volatile __le16 source_count;
volatile __le16 reserved;
};
#define HIFN_COMP_CMD_SRCLEN_M 0xc000
@ -583,10 +584,10 @@ struct hifn_comp_command
struct hifn_base_result
{
volatile u16 flags;
volatile u16 session;
volatile u16 src_cnt; /* 15:0 of source count */
volatile u16 dst_cnt; /* 15:0 of dest count */
volatile __le16 flags;
volatile __le16 session;
volatile __le16 src_cnt; /* 15:0 of source count */
volatile __le16 dst_cnt; /* 15:0 of dest count */
};
#define HIFN_BASE_RES_DSTOVERRUN 0x0200 /* destination overrun */
@ -597,8 +598,8 @@ struct hifn_base_result
struct hifn_comp_result
{
volatile u16 flags;
volatile u16 crc;
volatile __le16 flags;
volatile __le16 crc;
};
#define HIFN_COMP_RES_LCB_M 0xff00 /* longitudinal check byte */
@ -609,8 +610,8 @@ struct hifn_comp_result
struct hifn_mac_result
{
volatile u16 flags;
volatile u16 reserved;
volatile __le16 flags;
volatile __le16 reserved;
/* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */
};
@ -619,8 +620,8 @@ struct hifn_mac_result
struct hifn_crypt_result
{
volatile u16 flags;
volatile u16 reserved;
volatile __le16 flags;
volatile __le16 reserved;
};
#define HIFN_CRYPT_RES_SRC_NOTZERO 0x0001 /* source expired */
@ -686,12 +687,12 @@ static inline u32 hifn_read_1(struct hifn_device *dev, u32 reg)
static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val)
{
writel(val, dev->bar[0] + reg);
writel((__force u32)cpu_to_le32(val), dev->bar[0] + reg);
}
static inline void hifn_write_1(struct hifn_device *dev, u32 reg, u32 val)
{
writel(val, dev->bar[1] + reg);
writel((__force u32)cpu_to_le32(val), dev->bar[1] + reg);
}
static void hifn_wait_puc(struct hifn_device *dev)
@ -894,7 +895,7 @@ static int hifn_enable_crypto(struct hifn_device *dev)
char *offtbl = NULL;
int i;
for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
for (i = 0; i < ARRAY_SIZE(pci2id); i++) {
if (pci2id[i].pci_vendor == dev->pdev->vendor &&
pci2id[i].pci_prod == dev->pdev->device) {
offtbl = pci2id[i].card_id;
@ -1037,14 +1038,14 @@ static void hifn_init_registers(struct hifn_device *dev)
hifn_write_0(dev, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
/* write all 4 ring address registers */
hifn_write_1(dev, HIFN_1_DMA_CRAR, __cpu_to_le32(dptr +
offsetof(struct hifn_dma, cmdr[0])));
hifn_write_1(dev, HIFN_1_DMA_SRAR, __cpu_to_le32(dptr +
offsetof(struct hifn_dma, srcr[0])));
hifn_write_1(dev, HIFN_1_DMA_DRAR, __cpu_to_le32(dptr +
offsetof(struct hifn_dma, dstr[0])));
hifn_write_1(dev, HIFN_1_DMA_RRAR, __cpu_to_le32(dptr +
offsetof(struct hifn_dma, resr[0])));
hifn_write_1(dev, HIFN_1_DMA_CRAR, dptr +
offsetof(struct hifn_dma, cmdr[0]));
hifn_write_1(dev, HIFN_1_DMA_SRAR, dptr +
offsetof(struct hifn_dma, srcr[0]));
hifn_write_1(dev, HIFN_1_DMA_DRAR, dptr +
offsetof(struct hifn_dma, dstr[0]));
hifn_write_1(dev, HIFN_1_DMA_RRAR, dptr +
offsetof(struct hifn_dma, resr[0]));
mdelay(2);
#if 0
@ -1166,109 +1167,15 @@ static int hifn_setup_crypto_command(struct hifn_device *dev,
return cmd_len;
}
static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
unsigned int offset, unsigned int size)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
int idx;
dma_addr_t addr;
addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_TODEVICE);
idx = dma->srci;
dma->srcr[idx].p = __cpu_to_le32(addr);
dma->srcr[idx].l = __cpu_to_le32(size) | HIFN_D_VALID |
HIFN_D_MASKDONEIRQ | HIFN_D_NOINVALID | HIFN_D_LAST;
if (++idx == HIFN_D_SRC_RSIZE) {
dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID |
HIFN_D_JUMP |
HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
idx = 0;
}
dma->srci = idx;
dma->srcu++;
if (!(dev->flags & HIFN_FLAG_SRC_BUSY)) {
hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
dev->flags |= HIFN_FLAG_SRC_BUSY;
}
return size;
}
static void hifn_setup_res_desc(struct hifn_device *dev)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT |
HIFN_D_VALID | HIFN_D_LAST);
/*
* dma->resr[dma->resi].l = __cpu_to_le32(HIFN_MAX_RESULT | HIFN_D_VALID |
* HIFN_D_LAST | HIFN_D_NOINVALID);
*/
if (++dma->resi == HIFN_D_RES_RSIZE) {
dma->resr[HIFN_D_RES_RSIZE].l = __cpu_to_le32(HIFN_D_VALID |
HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
dma->resi = 0;
}
dma->resu++;
if (!(dev->flags & HIFN_FLAG_RES_BUSY)) {
hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
dev->flags |= HIFN_FLAG_RES_BUSY;
}
}
static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
unsigned offset, unsigned size)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
int idx;
dma_addr_t addr;
addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_FROMDEVICE);
idx = dma->dsti;
dma->dstr[idx].p = __cpu_to_le32(addr);
dma->dstr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
HIFN_D_MASKDONEIRQ | HIFN_D_NOINVALID | HIFN_D_LAST);
if (++idx == HIFN_D_DST_RSIZE) {
dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID |
HIFN_D_JUMP | HIFN_D_MASKDONEIRQ |
HIFN_D_LAST | HIFN_D_NOINVALID);
idx = 0;
}
dma->dsti = idx;
dma->dstu++;
if (!(dev->flags & HIFN_FLAG_DST_BUSY)) {
hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
dev->flags |= HIFN_FLAG_DST_BUSY;
}
}
static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned int soff,
struct page *dpage, unsigned int doff, unsigned int nbytes, void *priv,
struct hifn_context *ctx)
static int hifn_setup_cmd_desc(struct hifn_device *dev,
struct hifn_context *ctx, void *priv, unsigned int nbytes)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
int cmd_len, sa_idx;
u8 *buf, *buf_pos;
u16 mask;
dprintk("%s: spage: %p, soffset: %u, dpage: %p, doffset: %u, nbytes: %u, priv: %p, ctx: %p.\n",
dev->name, spage, soff, dpage, doff, nbytes, priv, ctx);
sa_idx = dma->resi;
hifn_setup_src_desc(dev, spage, soff, nbytes);
sa_idx = dma->cmdi;
buf_pos = buf = dma->command_bufs[dma->cmdi];
mask = 0;
@ -1370,16 +1277,113 @@ static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned
hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
dev->flags |= HIFN_FLAG_CMD_BUSY;
}
hifn_setup_dst_desc(dev, dpage, doff, nbytes);
hifn_setup_res_desc(dev);
return 0;
err_out:
return -EINVAL;
}
static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
unsigned int offset, unsigned int size)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
int idx;
dma_addr_t addr;
addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_TODEVICE);
idx = dma->srci;
dma->srcr[idx].p = __cpu_to_le32(addr);
dma->srcr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
if (++idx == HIFN_D_SRC_RSIZE) {
dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID |
HIFN_D_JUMP |
HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
idx = 0;
}
dma->srci = idx;
dma->srcu++;
if (!(dev->flags & HIFN_FLAG_SRC_BUSY)) {
hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
dev->flags |= HIFN_FLAG_SRC_BUSY;
}
return size;
}
static void hifn_setup_res_desc(struct hifn_device *dev)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT |
HIFN_D_VALID | HIFN_D_LAST);
/*
* dma->resr[dma->resi].l = __cpu_to_le32(HIFN_MAX_RESULT | HIFN_D_VALID |
* HIFN_D_LAST);
*/
if (++dma->resi == HIFN_D_RES_RSIZE) {
dma->resr[HIFN_D_RES_RSIZE].l = __cpu_to_le32(HIFN_D_VALID |
HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
dma->resi = 0;
}
dma->resu++;
if (!(dev->flags & HIFN_FLAG_RES_BUSY)) {
hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
dev->flags |= HIFN_FLAG_RES_BUSY;
}
}
static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
unsigned offset, unsigned size)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
int idx;
dma_addr_t addr;
addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_FROMDEVICE);
idx = dma->dsti;
dma->dstr[idx].p = __cpu_to_le32(addr);
dma->dstr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
if (++idx == HIFN_D_DST_RSIZE) {
dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID |
HIFN_D_JUMP | HIFN_D_MASKDONEIRQ |
HIFN_D_LAST);
idx = 0;
}
dma->dsti = idx;
dma->dstu++;
if (!(dev->flags & HIFN_FLAG_DST_BUSY)) {
hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
dev->flags |= HIFN_FLAG_DST_BUSY;
}
}
static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned int soff,
struct page *dpage, unsigned int doff, unsigned int nbytes, void *priv,
struct hifn_context *ctx)
{
dprintk("%s: spage: %p, soffset: %u, dpage: %p, doffset: %u, nbytes: %u, priv: %p, ctx: %p.\n",
dev->name, spage, soff, dpage, doff, nbytes, priv, ctx);
hifn_setup_src_desc(dev, spage, soff, nbytes);
hifn_setup_cmd_desc(dev, ctx, priv, nbytes);
hifn_setup_dst_desc(dev, dpage, doff, nbytes);
hifn_setup_res_desc(dev);
return 0;
}
static int ablkcipher_walk_init(struct ablkcipher_walk *w,
int num, gfp_t gfp_flags)
{
@ -1431,7 +1435,7 @@ static int ablkcipher_add(void *daddr, unsigned int *drestp, struct scatterlist
return -EINVAL;
while (size) {
copy = min(drest, src->length);
copy = min(drest, min(size, src->length));
saddr = kmap_atomic(sg_page(src), KM_SOFTIRQ1);
memcpy(daddr, saddr + src->offset, copy);
@ -1458,10 +1462,6 @@ static int ablkcipher_add(void *daddr, unsigned int *drestp, struct scatterlist
static int ablkcipher_walk(struct ablkcipher_request *req,
struct ablkcipher_walk *w)
{
unsigned blocksize =
crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(req));
unsigned alignmask =
crypto_ablkcipher_alignmask(crypto_ablkcipher_reqtfm(req));
struct scatterlist *src, *dst, *t;
void *daddr;
unsigned int nbytes = req->nbytes, offset, copy, diff;
@ -1477,16 +1477,14 @@ static int ablkcipher_walk(struct ablkcipher_request *req,
dst = &req->dst[idx];
dprintk("\n%s: slen: %u, dlen: %u, soff: %u, doff: %u, offset: %u, "
"blocksize: %u, nbytes: %u.\n",
"nbytes: %u.\n",
__func__, src->length, dst->length, src->offset,
dst->offset, offset, blocksize, nbytes);
dst->offset, offset, nbytes);
if (src->length & (blocksize - 1) ||
src->offset & (alignmask - 1) ||
dst->length & (blocksize - 1) ||
dst->offset & (alignmask - 1) ||
offset) {
unsigned slen = src->length - offset;
if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) ||
!IS_ALIGNED(dst->length, HIFN_D_DST_DALIGN) ||
offset) {
unsigned slen = min(src->length - offset, nbytes);
unsigned dlen = PAGE_SIZE;
t = &w->cache[idx];
@ -1498,8 +1496,8 @@ static int ablkcipher_walk(struct ablkcipher_request *req,
idx += err;
copy = slen & ~(blocksize - 1);
diff = slen & (blocksize - 1);
copy = slen & ~(HIFN_D_DST_DALIGN - 1);
diff = slen & (HIFN_D_DST_DALIGN - 1);
if (dlen < nbytes) {
/*
@ -1507,7 +1505,7 @@ static int ablkcipher_walk(struct ablkcipher_request *req,
* to put there additional blocksized chunk,
* so we mark that page as containing only
* blocksize aligned chunks:
* t->length = (slen & ~(blocksize - 1));
* t->length = (slen & ~(HIFN_D_DST_DALIGN - 1));
* and increase number of bytes to be processed
* in next chunk:
* nbytes += diff;
@ -1544,7 +1542,7 @@ static int ablkcipher_walk(struct ablkcipher_request *req,
kunmap_atomic(daddr, KM_SOFTIRQ0);
} else {
nbytes -= src->length;
nbytes -= min(src->length, nbytes);
idx++;
}
@ -1563,14 +1561,10 @@ static int hifn_setup_session(struct ablkcipher_request *req)
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
struct hifn_device *dev = ctx->dev;
struct page *spage, *dpage;
unsigned long soff, doff, flags;
unsigned long soff, doff, dlen, flags;
unsigned int nbytes = req->nbytes, idx = 0, len;
int err = -EINVAL, sg_num;
struct scatterlist *src, *dst, *t;
unsigned blocksize =
crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(req));
unsigned alignmask =
crypto_ablkcipher_alignmask(crypto_ablkcipher_reqtfm(req));
if (ctx->iv && !ctx->ivsize && ctx->mode != ACRYPTO_MODE_ECB)
goto err_out_exit;
@ -1578,17 +1572,14 @@ static int hifn_setup_session(struct ablkcipher_request *req)
ctx->walk.flags = 0;
while (nbytes) {
src = &req->src[idx];
dst = &req->dst[idx];
dlen = min(dst->length, nbytes);
if (src->length & (blocksize - 1) ||
src->offset & (alignmask - 1) ||
dst->length & (blocksize - 1) ||
dst->offset & (alignmask - 1)) {
if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) ||
!IS_ALIGNED(dlen, HIFN_D_DST_DALIGN))
ctx->walk.flags |= ASYNC_FLAGS_MISALIGNED;
}
nbytes -= src->length;
nbytes -= dlen;
idx++;
}
@ -1602,7 +1593,10 @@ static int hifn_setup_session(struct ablkcipher_request *req)
idx = 0;
sg_num = ablkcipher_walk(req, &ctx->walk);
if (sg_num < 0) {
err = sg_num;
goto err_out_exit;
}
atomic_set(&ctx->sg_num, sg_num);
spin_lock_irqsave(&dev->lock, flags);
@ -1640,7 +1634,7 @@ static int hifn_setup_session(struct ablkcipher_request *req)
if (err)
goto err_out;
nbytes -= len;
nbytes -= min(len, nbytes);
}
dev->active = HIFN_DEFAULT_ACTIVE_NUM;
@ -1651,7 +1645,7 @@ static int hifn_setup_session(struct ablkcipher_request *req)
err_out:
spin_unlock_irqrestore(&dev->lock, flags);
err_out_exit:
if (err && printk_ratelimit())
if (err)
dprintk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, "
"type: %u, err: %d.\n",
dev->name, ctx->iv, ctx->ivsize,
@ -1745,8 +1739,7 @@ static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset
return -EINVAL;
while (size) {
copy = min(dst->length, srest);
copy = min(srest, min(dst->length, size));
daddr = kmap_atomic(sg_page(dst), KM_IRQ0);
memcpy(daddr + dst->offset + offset, saddr, copy);
@ -1803,7 +1796,7 @@ static void hifn_process_ready(struct ablkcipher_request *req, int error)
sg_page(dst), dst->length, nbytes);
if (!t->length) {
nbytes -= dst->length;
nbytes -= min(dst->length, nbytes);
idx++;
continue;
}
@ -2202,9 +2195,9 @@ static int hifn_setup_crypto(struct ablkcipher_request *req, u8 op,
return err;
if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen)
err = hifn_process_queue(dev);
hifn_process_queue(dev);
return err;
return -EINPROGRESS;
}
/*
@ -2364,7 +2357,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
* 3DES ECB, CBC, CFB and OFB modes.
*/
{
.name = "cfb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
.name = "cfb(des3_ede)", .drv_name = "cfb-3des", .bsize = 8,
.ablkcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
@ -2374,7 +2367,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
},
{
.name = "ofb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
.name = "ofb(des3_ede)", .drv_name = "ofb-3des", .bsize = 8,
.ablkcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
@ -2384,8 +2377,9 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
},
{
.name = "cbc(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
.name = "cbc(des3_ede)", .drv_name = "cbc-3des", .bsize = 8,
.ablkcipher = {
.ivsize = HIFN_IV_LENGTH,
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
.setkey = hifn_setkey,
@ -2394,7 +2388,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
},
{
.name = "ecb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
.name = "ecb(des3_ede)", .drv_name = "ecb-3des", .bsize = 8,
.ablkcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
@ -2408,7 +2402,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
* DES ECB, CBC, CFB and OFB modes.
*/
{
.name = "cfb(des)", .drv_name = "hifn-des", .bsize = 8,
.name = "cfb(des)", .drv_name = "cfb-des", .bsize = 8,
.ablkcipher = {
.min_keysize = HIFN_DES_KEY_LENGTH,
.max_keysize = HIFN_DES_KEY_LENGTH,
@ -2418,7 +2412,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
},
{
.name = "ofb(des)", .drv_name = "hifn-des", .bsize = 8,
.name = "ofb(des)", .drv_name = "ofb-des", .bsize = 8,
.ablkcipher = {
.min_keysize = HIFN_DES_KEY_LENGTH,
.max_keysize = HIFN_DES_KEY_LENGTH,
@ -2428,8 +2422,9 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
},
{
.name = "cbc(des)", .drv_name = "hifn-des", .bsize = 8,
.name = "cbc(des)", .drv_name = "cbc-des", .bsize = 8,
.ablkcipher = {
.ivsize = HIFN_IV_LENGTH,
.min_keysize = HIFN_DES_KEY_LENGTH,
.max_keysize = HIFN_DES_KEY_LENGTH,
.setkey = hifn_setkey,
@ -2438,7 +2433,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
},
{
.name = "ecb(des)", .drv_name = "hifn-des", .bsize = 8,
.name = "ecb(des)", .drv_name = "ecb-des", .bsize = 8,
.ablkcipher = {
.min_keysize = HIFN_DES_KEY_LENGTH,
.max_keysize = HIFN_DES_KEY_LENGTH,
@ -2452,7 +2447,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
* AES ECB, CBC, CFB and OFB modes.
*/
{
.name = "ecb(aes)", .drv_name = "hifn-aes", .bsize = 16,
.name = "ecb(aes)", .drv_name = "ecb-aes", .bsize = 16,
.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@ -2462,8 +2457,9 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
},
{
.name = "cbc(aes)", .drv_name = "hifn-aes", .bsize = 16,
.name = "cbc(aes)", .drv_name = "cbc-aes", .bsize = 16,
.ablkcipher = {
.ivsize = HIFN_AES_IV_LENGTH,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = hifn_setkey,
@ -2472,7 +2468,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
},
{
.name = "cfb(aes)", .drv_name = "hifn-aes", .bsize = 16,
.name = "cfb(aes)", .drv_name = "cfb-aes", .bsize = 16,
.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@ -2482,7 +2478,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
},
{
.name = "ofb(aes)", .drv_name = "hifn-aes", .bsize = 16,
.name = "ofb(aes)", .drv_name = "ofb-aes", .bsize = 16,
.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@ -2514,15 +2510,14 @@ static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t)
return -ENOMEM;
snprintf(alg->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name);
snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", t->drv_name);
snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-%s",
t->drv_name, dev->name);
alg->alg.cra_priority = 300;
alg->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
alg->alg.cra_blocksize = t->bsize;
alg->alg.cra_ctxsize = sizeof(struct hifn_context);
alg->alg.cra_alignmask = 15;
if (t->bsize == 8)
alg->alg.cra_alignmask = 3;
alg->alg.cra_alignmask = 0;
alg->alg.cra_type = &crypto_ablkcipher_type;
alg->alg.cra_module = THIS_MODULE;
alg->alg.cra_u.ablkcipher = t->ablkcipher;

File diff suppressed because it is too large Load Diff

View File

@ -385,12 +385,12 @@ static int __init padlock_init(void)
int ret;
if (!cpu_has_xcrypt) {
printk(KERN_ERR PFX "VIA PadLock not detected.\n");
printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
return -ENODEV;
}
if (!cpu_has_xcrypt_enabled) {
printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
return -ENODEV;
}

View File

@ -254,12 +254,12 @@ static int __init padlock_init(void)
int rc = -ENODEV;
if (!cpu_has_phe) {
printk(KERN_ERR PFX "VIA PadLock Hash Engine not detected.\n");
printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
return -ENODEV;
}
if (!cpu_has_phe_enabled) {
printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
return -ENODEV;
}

1597
drivers/crypto/talitos.c Normal file

File diff suppressed because it is too large Load Diff

199
drivers/crypto/talitos.h Normal file
View File

@ -0,0 +1,199 @@
/*
* Freescale SEC (talitos) device register and descriptor header defines
*
* Copyright (c) 2006-2008 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* TALITOS_xxx_LO addresses point to the low data bits (32-63) of the register
*/
/* global register offset addresses */
#define TALITOS_MCR 0x1030 /* master control register */
#define TALITOS_MCR_LO 0x1038
#define TALITOS_MCR_SWR 0x1 /* s/w reset */
#define TALITOS_IMR 0x1008 /* interrupt mask register */
#define TALITOS_IMR_INIT 0x10fff /* enable channel IRQs */
#define TALITOS_IMR_LO 0x100C
#define TALITOS_IMR_LO_INIT 0x20000 /* allow RNGU error IRQs */
#define TALITOS_ISR 0x1010 /* interrupt status register */
#define TALITOS_ISR_CHERR 0xaa /* channel errors mask */
#define TALITOS_ISR_CHDONE 0x55 /* channel done mask */
#define TALITOS_ISR_LO 0x1014
#define TALITOS_ICR 0x1018 /* interrupt clear register */
#define TALITOS_ICR_LO 0x101C
/* channel register address stride */
#define TALITOS_CH_STRIDE 0x100
/* channel configuration register */
#define TALITOS_CCCR(ch) (ch * TALITOS_CH_STRIDE + 0x1108)
#define TALITOS_CCCR_CONT 0x2 /* channel continue */
#define TALITOS_CCCR_RESET 0x1 /* channel reset */
#define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c)
#define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */
#define TALITOS_CCCR_LO_NT 0x4 /* notification type */
#define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */
/* CCPSR: channel pointer status register */
#define TALITOS_CCPSR(ch) (ch * TALITOS_CH_STRIDE + 0x1110)
#define TALITOS_CCPSR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x1114)
#define TALITOS_CCPSR_LO_DOF 0x8000 /* double FF write oflow error */
#define TALITOS_CCPSR_LO_SOF 0x4000 /* single FF write oflow error */
#define TALITOS_CCPSR_LO_MDTE 0x2000 /* master data transfer error */
#define TALITOS_CCPSR_LO_SGDLZ 0x1000 /* s/g data len zero error */
#define TALITOS_CCPSR_LO_FPZ 0x0800 /* fetch ptr zero error */
#define TALITOS_CCPSR_LO_IDH 0x0400 /* illegal desc hdr error */
#define TALITOS_CCPSR_LO_IEU 0x0200 /* invalid EU error */
#define TALITOS_CCPSR_LO_EU 0x0100 /* EU error detected */
#define TALITOS_CCPSR_LO_GB 0x0080 /* gather boundary error */
#define TALITOS_CCPSR_LO_GRL 0x0040 /* gather return/length error */
#define TALITOS_CCPSR_LO_SB 0x0020 /* scatter boundary error */
#define TALITOS_CCPSR_LO_SRL 0x0010 /* scatter return/length error */
/* channel fetch fifo register */
#define TALITOS_FF(ch) (ch * TALITOS_CH_STRIDE + 0x1148)
#define TALITOS_FF_LO(ch) (ch * TALITOS_CH_STRIDE + 0x114c)
/* current descriptor pointer register */
#define TALITOS_CDPR(ch) (ch * TALITOS_CH_STRIDE + 0x1140)
#define TALITOS_CDPR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x1144)
/* descriptor buffer register */
#define TALITOS_DESCBUF(ch) (ch * TALITOS_CH_STRIDE + 0x1180)
#define TALITOS_DESCBUF_LO(ch) (ch * TALITOS_CH_STRIDE + 0x1184)
/* gather link table */
#define TALITOS_GATHER(ch) (ch * TALITOS_CH_STRIDE + 0x11c0)
#define TALITOS_GATHER_LO(ch) (ch * TALITOS_CH_STRIDE + 0x11c4)
/* scatter link table */
#define TALITOS_SCATTER(ch) (ch * TALITOS_CH_STRIDE + 0x11e0)
#define TALITOS_SCATTER_LO(ch) (ch * TALITOS_CH_STRIDE + 0x11e4)
/* execution unit interrupt status registers */
#define TALITOS_DEUISR 0x2030 /* DES unit */
#define TALITOS_DEUISR_LO 0x2034
#define TALITOS_AESUISR 0x4030 /* AES unit */
#define TALITOS_AESUISR_LO 0x4034
#define TALITOS_MDEUISR 0x6030 /* message digest unit */
#define TALITOS_MDEUISR_LO 0x6034
#define TALITOS_AFEUISR 0x8030 /* arc4 unit */
#define TALITOS_AFEUISR_LO 0x8034
#define TALITOS_RNGUISR 0xa030 /* random number unit */
#define TALITOS_RNGUISR_LO 0xa034
#define TALITOS_RNGUSR 0xa028 /* rng status */
#define TALITOS_RNGUSR_LO 0xa02c
#define TALITOS_RNGUSR_LO_RD 0x1 /* reset done */
#define TALITOS_RNGUSR_LO_OFL 0xff0000/* output FIFO length */
#define TALITOS_RNGUDSR 0xa010 /* data size */
#define TALITOS_RNGUDSR_LO 0xa014
#define TALITOS_RNGU_FIFO 0xa800 /* output FIFO */
#define TALITOS_RNGU_FIFO_LO 0xa804 /* output FIFO */
#define TALITOS_RNGURCR 0xa018 /* reset control */
#define TALITOS_RNGURCR_LO 0xa01c
#define TALITOS_RNGURCR_LO_SR 0x1 /* software reset */
#define TALITOS_PKEUISR 0xc030 /* public key unit */
#define TALITOS_PKEUISR_LO 0xc034
#define TALITOS_KEUISR 0xe030 /* kasumi unit */
#define TALITOS_KEUISR_LO 0xe034
#define TALITOS_CRCUISR 0xf030 /* cyclic redundancy check unit*/
#define TALITOS_CRCUISR_LO 0xf034
/*
* talitos descriptor header (hdr) bits
*/
/* written back when done */
#define DESC_HDR_DONE __constant_cpu_to_be32(0xff000000)
/* primary execution unit select */
#define DESC_HDR_SEL0_MASK __constant_cpu_to_be32(0xf0000000)
#define DESC_HDR_SEL0_AFEU __constant_cpu_to_be32(0x10000000)
#define DESC_HDR_SEL0_DEU __constant_cpu_to_be32(0x20000000)
#define DESC_HDR_SEL0_MDEUA __constant_cpu_to_be32(0x30000000)
#define DESC_HDR_SEL0_MDEUB __constant_cpu_to_be32(0xb0000000)
#define DESC_HDR_SEL0_RNG __constant_cpu_to_be32(0x40000000)
#define DESC_HDR_SEL0_PKEU __constant_cpu_to_be32(0x50000000)
#define DESC_HDR_SEL0_AESU __constant_cpu_to_be32(0x60000000)
#define DESC_HDR_SEL0_KEU __constant_cpu_to_be32(0x70000000)
#define DESC_HDR_SEL0_CRCU __constant_cpu_to_be32(0x80000000)
/* primary execution unit mode (MODE0) and derivatives */
#define DESC_HDR_MODE0_ENCRYPT __constant_cpu_to_be32(0x00100000)
#define DESC_HDR_MODE0_AESU_CBC __constant_cpu_to_be32(0x00200000)
#define DESC_HDR_MODE0_DEU_CBC __constant_cpu_to_be32(0x00400000)
#define DESC_HDR_MODE0_DEU_3DES __constant_cpu_to_be32(0x00200000)
#define DESC_HDR_MODE0_MDEU_INIT __constant_cpu_to_be32(0x01000000)
#define DESC_HDR_MODE0_MDEU_HMAC __constant_cpu_to_be32(0x00800000)
#define DESC_HDR_MODE0_MDEU_PAD __constant_cpu_to_be32(0x00400000)
#define DESC_HDR_MODE0_MDEU_MD5 __constant_cpu_to_be32(0x00200000)
#define DESC_HDR_MODE0_MDEU_SHA256 __constant_cpu_to_be32(0x00100000)
#define DESC_HDR_MODE0_MDEU_SHA1 __constant_cpu_to_be32(0x00000000)
#define DESC_HDR_MODE0_MDEU_MD5_HMAC (DESC_HDR_MODE0_MDEU_MD5 | \
DESC_HDR_MODE0_MDEU_HMAC)
#define DESC_HDR_MODE0_MDEU_SHA256_HMAC (DESC_HDR_MODE0_MDEU_SHA256 | \
DESC_HDR_MODE0_MDEU_HMAC)
#define DESC_HDR_MODE0_MDEU_SHA1_HMAC (DESC_HDR_MODE0_MDEU_SHA1 | \
DESC_HDR_MODE0_MDEU_HMAC)
/* secondary execution unit select (SEL1) */
#define DESC_HDR_SEL1_MASK __constant_cpu_to_be32(0x000f0000)
#define DESC_HDR_SEL1_MDEUA __constant_cpu_to_be32(0x00030000)
#define DESC_HDR_SEL1_MDEUB __constant_cpu_to_be32(0x000b0000)
#define DESC_HDR_SEL1_CRCU __constant_cpu_to_be32(0x00080000)
/* secondary execution unit mode (MODE1) and derivatives */
#define DESC_HDR_MODE1_MDEU_INIT __constant_cpu_to_be32(0x00001000)
#define DESC_HDR_MODE1_MDEU_HMAC __constant_cpu_to_be32(0x00000800)
#define DESC_HDR_MODE1_MDEU_PAD __constant_cpu_to_be32(0x00000400)
#define DESC_HDR_MODE1_MDEU_MD5 __constant_cpu_to_be32(0x00000200)
#define DESC_HDR_MODE1_MDEU_SHA256 __constant_cpu_to_be32(0x00000100)
#define DESC_HDR_MODE1_MDEU_SHA1 __constant_cpu_to_be32(0x00000000)
#define DESC_HDR_MODE1_MDEU_MD5_HMAC (DESC_HDR_MODE1_MDEU_MD5 | \
DESC_HDR_MODE1_MDEU_HMAC)
#define DESC_HDR_MODE1_MDEU_SHA256_HMAC (DESC_HDR_MODE1_MDEU_SHA256 | \
DESC_HDR_MODE1_MDEU_HMAC)
#define DESC_HDR_MODE1_MDEU_SHA1_HMAC (DESC_HDR_MODE1_MDEU_SHA1 | \
DESC_HDR_MODE1_MDEU_HMAC)
/* direction of overall data flow (DIR) */
#define DESC_HDR_DIR_INBOUND __constant_cpu_to_be32(0x00000002)
/* request done notification (DN) */
#define DESC_HDR_DONE_NOTIFY __constant_cpu_to_be32(0x00000001)
/* descriptor types */
#define DESC_HDR_TYPE_AESU_CTR_NONSNOOP __constant_cpu_to_be32(0 << 3)
#define DESC_HDR_TYPE_IPSEC_ESP __constant_cpu_to_be32(1 << 3)
#define DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU __constant_cpu_to_be32(2 << 3)
#define DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU __constant_cpu_to_be32(4 << 3)
/* link table extent field bits */
#define DESC_PTR_LNKTBL_JUMP 0x80
#define DESC_PTR_LNKTBL_RETURN 0x02
#define DESC_PTR_LNKTBL_NEXT 0x01

154
include/crypto/hash.h Normal file
View File

@ -0,0 +1,154 @@
/*
* Hash: Hash algorithms under the crypto API
*
* Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#ifndef _CRYPTO_HASH_H
#define _CRYPTO_HASH_H
#include <linux/crypto.h>
struct crypto_ahash {
struct crypto_tfm base;
};
static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
{
return (struct crypto_ahash *)tfm;
}
static inline struct crypto_ahash *crypto_alloc_ahash(const char *alg_name,
u32 type, u32 mask)
{
type &= ~CRYPTO_ALG_TYPE_MASK;
mask &= ~CRYPTO_ALG_TYPE_MASK;
type |= CRYPTO_ALG_TYPE_AHASH;
mask |= CRYPTO_ALG_TYPE_AHASH_MASK;
return __crypto_ahash_cast(crypto_alloc_base(alg_name, type, mask));
}
static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
{
return &tfm->base;
}
static inline void crypto_free_ahash(struct crypto_ahash *tfm)
{
crypto_free_tfm(crypto_ahash_tfm(tfm));
}
static inline unsigned int crypto_ahash_alignmask(
struct crypto_ahash *tfm)
{
return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm));
}
static inline struct ahash_tfm *crypto_ahash_crt(struct crypto_ahash *tfm)
{
return &crypto_ahash_tfm(tfm)->crt_ahash;
}
static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm)
{
return crypto_ahash_crt(tfm)->digestsize;
}
static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm)
{
return crypto_tfm_get_flags(crypto_ahash_tfm(tfm));
}
static inline void crypto_ahash_set_flags(struct crypto_ahash *tfm, u32 flags)
{
crypto_tfm_set_flags(crypto_ahash_tfm(tfm), flags);
}
static inline void crypto_ahash_clear_flags(struct crypto_ahash *tfm, u32 flags)
{
crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags);
}
static inline struct crypto_ahash *crypto_ahash_reqtfm(
struct ahash_request *req)
{
return __crypto_ahash_cast(req->base.tfm);
}
static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm)
{
return crypto_ahash_crt(tfm)->reqsize;
}
static inline int crypto_ahash_setkey(struct crypto_ahash *tfm,
const u8 *key, unsigned int keylen)
{
struct ahash_tfm *crt = crypto_ahash_crt(tfm);
return crt->setkey(tfm, key, keylen);
}
static inline int crypto_ahash_digest(struct ahash_request *req)
{
struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req));
return crt->digest(req);
}
static inline void ahash_request_set_tfm(struct ahash_request *req,
struct crypto_ahash *tfm)
{
req->base.tfm = crypto_ahash_tfm(tfm);
}
static inline struct ahash_request *ahash_request_alloc(
struct crypto_ahash *tfm, gfp_t gfp)
{
struct ahash_request *req;
req = kmalloc(sizeof(struct ahash_request) +
crypto_ahash_reqsize(tfm), gfp);
if (likely(req))
ahash_request_set_tfm(req, tfm);
return req;
}
static inline void ahash_request_free(struct ahash_request *req)
{
kfree(req);
}
static inline struct ahash_request *ahash_request_cast(
struct crypto_async_request *req)
{
return container_of(req, struct ahash_request, base);
}
static inline void ahash_request_set_callback(struct ahash_request *req,
u32 flags,
crypto_completion_t complete,
void *data)
{
req->base.complete = complete;
req->base.data = data;
req->base.flags = flags;
}
static inline void ahash_request_set_crypt(struct ahash_request *req,
struct scatterlist *src, u8 *result,
unsigned int nbytes)
{
req->src = src;
req->nbytes = nbytes;
req->result = result;
}
#endif /* _CRYPTO_HASH_H */

View File

@ -0,0 +1,78 @@
/*
* Hash algorithms.
*
* Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#ifndef _CRYPTO_INTERNAL_HASH_H
#define _CRYPTO_INTERNAL_HASH_H
#include <crypto/algapi.h>
#include <crypto/hash.h>
struct ahash_request;
struct scatterlist;
struct crypto_hash_walk {
char *data;
unsigned int offset;
unsigned int alignmask;
struct page *pg;
unsigned int entrylen;
unsigned int total;
struct scatterlist *sg;
unsigned int flags;
};
extern const struct crypto_type crypto_ahash_type;
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
int crypto_hash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk);
static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm)
{
return crypto_tfm_ctx(&tfm->base);
}
static inline struct ahash_alg *crypto_ahash_alg(
struct crypto_ahash *tfm)
{
return &crypto_ahash_tfm(tfm)->__crt_alg->cra_ahash;
}
static inline int ahash_enqueue_request(struct crypto_queue *queue,
struct ahash_request *request)
{
return crypto_enqueue_request(queue, &request->base);
}
static inline struct ahash_request *ahash_dequeue_request(
struct crypto_queue *queue)
{
return ahash_request_cast(crypto_dequeue_request(queue));
}
static inline void *ahash_request_ctx(struct ahash_request *req)
{
return req->__ctx;
}
static inline int ahash_tfm_in_queue(struct crypto_queue *queue,
struct crypto_ahash *tfm)
{
return crypto_tfm_in_queue(queue, crypto_ahash_tfm(tfm));
}
#endif /* _CRYPTO_INTERNAL_HASH_H */

View File

@ -30,15 +30,17 @@
*/
#define CRYPTO_ALG_TYPE_MASK 0x0000000f
#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
#define CRYPTO_ALG_TYPE_DIGEST 0x00000002
#define CRYPTO_ALG_TYPE_HASH 0x00000003
#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002
#define CRYPTO_ALG_TYPE_AEAD 0x00000003
#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004
#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
#define CRYPTO_ALG_TYPE_COMPRESS 0x00000008
#define CRYPTO_ALG_TYPE_AEAD 0x00000009
#define CRYPTO_ALG_TYPE_DIGEST 0x00000008
#define CRYPTO_ALG_TYPE_HASH 0x00000009
#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c
#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
#define CRYPTO_ALG_LARVAL 0x00000010
@ -102,6 +104,7 @@ struct crypto_async_request;
struct crypto_aead;
struct crypto_blkcipher;
struct crypto_hash;
struct crypto_ahash;
struct crypto_tfm;
struct crypto_type;
struct aead_givcrypt_request;
@ -131,6 +134,16 @@ struct ablkcipher_request {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
struct ahash_request {
struct crypto_async_request base;
unsigned int nbytes;
struct scatterlist *src;
u8 *result;
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
/**
* struct aead_request - AEAD request
* @base: Common attributes for async crypto requests
@ -195,6 +208,17 @@ struct ablkcipher_alg {
unsigned int ivsize;
};
struct ahash_alg {
int (*init)(struct ahash_request *req);
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*digest)(struct ahash_request *req);
int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
unsigned int digestsize;
};
struct aead_alg {
int (*setkey)(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen);
@ -272,6 +296,7 @@ struct compress_alg {
#define cra_cipher cra_u.cipher
#define cra_digest cra_u.digest
#define cra_hash cra_u.hash
#define cra_ahash cra_u.ahash
#define cra_compress cra_u.compress
struct crypto_alg {
@ -298,6 +323,7 @@ struct crypto_alg {
struct cipher_alg cipher;
struct digest_alg digest;
struct hash_alg hash;
struct ahash_alg ahash;
struct compress_alg compress;
} cra_u;
@ -383,6 +409,18 @@ struct hash_tfm {
unsigned int digestsize;
};
struct ahash_tfm {
int (*init)(struct ahash_request *req);
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*digest)(struct ahash_request *req);
int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
unsigned int digestsize;
unsigned int reqsize;
};
struct compress_tfm {
int (*cot_compress)(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
@ -397,6 +435,7 @@ struct compress_tfm {
#define crt_blkcipher crt_u.blkcipher
#define crt_cipher crt_u.cipher
#define crt_hash crt_u.hash
#define crt_ahash crt_u.ahash
#define crt_compress crt_u.compress
struct crypto_tfm {
@ -409,6 +448,7 @@ struct crypto_tfm {
struct blkcipher_tfm blkcipher;
struct cipher_tfm cipher;
struct hash_tfm hash;
struct ahash_tfm ahash;
struct compress_tfm compress;
} crt_u;