7b5a080b3c
The shash interface replaces the current synchronous hash interface. It improves over hash in two ways. Firstly shash is reentrant, meaning that the same tfm may be used by two threads simultaneously as all hashing state is stored in a local descriptor. The other enhancement is that shash no longer takes scatter list entries. This is because shash is specifically designed for synchronous algorithms and as such scatter lists are unnecessary. All existing hash users will be converted to shash once the algorithms have been completely converted. There is also a new finup function that combines update with final. This will be extended to ahash once the algorithm conversion is done. This is also the first time that an algorithm type has their own registration function. Existing algorithm types will be converted to this way in due course. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
87 lines
1.9 KiB
C
87 lines
1.9 KiB
C
/*
|
|
* Hash algorithms.
|
|
*
|
|
* Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms of the GNU General Public License as published by the Free
|
|
* Software Foundation; either version 2 of the License, or (at your option)
|
|
* any later version.
|
|
*
|
|
*/
|
|
|
|
#ifndef _CRYPTO_INTERNAL_HASH_H
|
|
#define _CRYPTO_INTERNAL_HASH_H
|
|
|
|
#include <crypto/algapi.h>
|
|
#include <crypto/hash.h>
|
|
|
|
struct ahash_request;
|
|
struct scatterlist;
|
|
|
|
struct crypto_hash_walk {
|
|
char *data;
|
|
|
|
unsigned int offset;
|
|
unsigned int alignmask;
|
|
|
|
struct page *pg;
|
|
unsigned int entrylen;
|
|
|
|
unsigned int total;
|
|
struct scatterlist *sg;
|
|
|
|
unsigned int flags;
|
|
};
|
|
|
|
extern const struct crypto_type crypto_ahash_type;
|
|
|
|
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
|
|
int crypto_hash_walk_first(struct ahash_request *req,
|
|
struct crypto_hash_walk *walk);
|
|
|
|
int crypto_register_shash(struct shash_alg *alg);
|
|
int crypto_unregister_shash(struct shash_alg *alg);
|
|
|
|
static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm)
|
|
{
|
|
return crypto_tfm_ctx(&tfm->base);
|
|
}
|
|
|
|
static inline struct ahash_alg *crypto_ahash_alg(
|
|
struct crypto_ahash *tfm)
|
|
{
|
|
return &crypto_ahash_tfm(tfm)->__crt_alg->cra_ahash;
|
|
}
|
|
|
|
static inline int ahash_enqueue_request(struct crypto_queue *queue,
|
|
struct ahash_request *request)
|
|
{
|
|
return crypto_enqueue_request(queue, &request->base);
|
|
}
|
|
|
|
static inline struct ahash_request *ahash_dequeue_request(
|
|
struct crypto_queue *queue)
|
|
{
|
|
return ahash_request_cast(crypto_dequeue_request(queue));
|
|
}
|
|
|
|
static inline void *ahash_request_ctx(struct ahash_request *req)
|
|
{
|
|
return req->__ctx;
|
|
}
|
|
|
|
static inline int ahash_tfm_in_queue(struct crypto_queue *queue,
|
|
struct crypto_ahash *tfm)
|
|
{
|
|
return crypto_tfm_in_queue(queue, crypto_ahash_tfm(tfm));
|
|
}
|
|
|
|
static inline void *crypto_shash_ctx(struct crypto_shash *tfm)
|
|
{
|
|
return crypto_tfm_ctx(&tfm->base);
|
|
}
|
|
|
|
#endif /* _CRYPTO_INTERNAL_HASH_H */
|
|
|