Fix 3.17-rc1 regression introduced by switching the DM crypt target to

using per-bio data.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJUAHLMAAoJEMUj8QotnQNa2ooH/39NBVEhppKaIHzqR6Ps9mI/
 B8kH3eDo9gNK5RAvu7E6QEW3ASSEBVk15DtdjtcnSCzDGlz+cWYCp0KXeptt9GDH
 3DtIg2hKhVddl4XusgO/GpCYZjQR75LDnNryOZTia+dFogP3HWPhZpg7DtQ9o+Ac
 9FChLFHPDy/yQ4QYDuepL3TgeTIDJoQTRkGvzOeYXnsZHU2v2nTJin3qQetDhd51
 2OEedOdrJ9znkj5AI3xL5AXTwl7231c8JZrMbz0oKmUSCvbqY7rrgWr/dFZM+mIt
 OwY4KEDdI06iHuNc2LhzUjbr6GaqTAnB3qSAZ8cSNBLlI+Lg5TatFO7YluUDmD4=
 =6jY1
 -----END PGP SIGNATURE-----

Merge tag 'dm-3.17-fix' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fix from Mike Snitzer:
 "Fix a 3.17-rc1 regression introduced by switching the DM crypt target
  to using per-bio data"

* tag 'dm-3.17-fix' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm crypt: fix access beyond the end of allocated space
This commit is contained in:
Linus Torvalds 2014-08-29 11:49:10 -07:00
commit ef13c8afa6
1 changed files with 19 additions and 6 deletions

View File

@ -1688,6 +1688,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
unsigned int key_size, opt_params;
unsigned long long tmpll;
int ret;
size_t iv_size_padding;
struct dm_arg_set as;
const char *opt_string;
char dummy;
@ -1724,20 +1725,32 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
cc->dmreq_start = sizeof(struct ablkcipher_request);
cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
~(crypto_tfm_ctx_alignment() - 1);
cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
/* Allocate the padding exactly */
iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
& crypto_ablkcipher_alignmask(any_tfm(cc));
} else {
/*
* If the cipher requires greater alignment than kmalloc
* alignment, we don't know the exact position of the
* initialization vector. We must assume worst case.
*/
iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
}
cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
sizeof(struct dm_crypt_request) + cc->iv_size);
sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
if (!cc->req_pool) {
ti->error = "Cannot allocate crypt request mempool";
goto bad;
}
cc->per_bio_data_size = ti->per_bio_data_size =
sizeof(struct dm_crypt_io) + cc->dmreq_start +
sizeof(struct dm_crypt_request) + cc->iv_size;
ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start +
sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
ARCH_KMALLOC_MINALIGN);
cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
if (!cc->page_pool) {