crypto: algif_skcipher - Use chunksize instead of blocksize

When algif_skcipher does a partial operation it always process data
that is a multiple of blocksize.  However, for algorithms such as
CTR this is wrong because even though it can process any number of
bytes overall, the partial block must come at the very end and not
in the middle.

This is exactly what chunksize is meant to describe so this patch
changes blocksize to chunksize.

Fixes: 8ff590903d ("crypto: algif_skcipher - User-space...")
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Herbert Xu 2019-09-10 11:42:05 +10:00
parent 7a627db9ca
commit 5b0fe95523
3 changed files with 31 additions and 31 deletions

View File

@ -56,7 +56,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
struct alg_sock *pask = alg_sk(psk); struct alg_sock *pask = alg_sk(psk);
struct af_alg_ctx *ctx = ask->private; struct af_alg_ctx *ctx = ask->private;
struct crypto_skcipher *tfm = pask->private; struct crypto_skcipher *tfm = pask->private;
unsigned int bs = crypto_skcipher_blocksize(tfm); unsigned int bs = crypto_skcipher_chunksize(tfm);
struct af_alg_async_req *areq; struct af_alg_async_req *areq;
int err = 0; int err = 0;
size_t len = 0; size_t len = 0;

View File

@ -205,19 +205,6 @@ static inline unsigned int crypto_skcipher_alg_max_keysize(
return alg->max_keysize; return alg->max_keysize;
} }
static inline unsigned int crypto_skcipher_alg_chunksize(
struct skcipher_alg *alg)
{
if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_BLKCIPHER)
return alg->base.cra_blocksize;
if (alg->base.cra_ablkcipher.encrypt)
return alg->base.cra_blocksize;
return alg->chunksize;
}
static inline unsigned int crypto_skcipher_alg_walksize( static inline unsigned int crypto_skcipher_alg_walksize(
struct skcipher_alg *alg) struct skcipher_alg *alg)
{ {
@ -231,23 +218,6 @@ static inline unsigned int crypto_skcipher_alg_walksize(
return alg->walksize; return alg->walksize;
} }
/**
* crypto_skcipher_chunksize() - obtain chunk size
* @tfm: cipher handle
*
* The block size is set to one for ciphers such as CTR. However,
* you still need to provide incremental updates in multiples of
* the underlying block size as the IV does not have sub-block
* granularity. This is known in this API as the chunk size.
*
* Return: chunk size in bytes
*/
static inline unsigned int crypto_skcipher_chunksize(
struct crypto_skcipher *tfm)
{
return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm));
}
/** /**
* crypto_skcipher_walksize() - obtain walk size * crypto_skcipher_walksize() - obtain walk size
* @tfm: cipher handle * @tfm: cipher handle

View File

@ -304,6 +304,36 @@ static inline unsigned int crypto_skcipher_blocksize(
return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm)); return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm));
} }
static inline unsigned int crypto_skcipher_alg_chunksize(
struct skcipher_alg *alg)
{
if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_BLKCIPHER)
return alg->base.cra_blocksize;
if (alg->base.cra_ablkcipher.encrypt)
return alg->base.cra_blocksize;
return alg->chunksize;
}
/**
* crypto_skcipher_chunksize() - obtain chunk size
* @tfm: cipher handle
*
* The block size is set to one for ciphers such as CTR. However,
* you still need to provide incremental updates in multiples of
* the underlying block size as the IV does not have sub-block
* granularity. This is known in this API as the chunk size.
*
* Return: chunk size in bytes
*/
static inline unsigned int crypto_skcipher_chunksize(
struct crypto_skcipher *tfm)
{
return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm));
}
static inline unsigned int crypto_sync_skcipher_blocksize( static inline unsigned int crypto_sync_skcipher_blocksize(
struct crypto_sync_skcipher *tfm) struct crypto_sync_skcipher *tfm)
{ {