From 48fc267ee6f34f67db42a49f386fdd367cac6f8a Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Fri, 13 Aug 2010 10:10:46 -0400 Subject: [PATCH 01/14] MAINTAINERS: Add maintainer entries for padata/pcrypt Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- MAINTAINERS | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 11e34d5272b8..2dfc90c0243c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4315,6 +4315,15 @@ L: linux-i2c@vger.kernel.org S: Maintained F: drivers/i2c/busses/i2c-pasemi.c +PADATA PARALLEL EXECUTION MECHANISM +M: Steffen Klassert +L: linux-kernel@vger.kernel.org +L: linux-crypto@vger.kernel.org +S: Maintained +F: kernel/padata.c +F: include/linux/padata.h +F: Documentation/padata.txt + PANASONIC LAPTOP ACPI EXTRAS DRIVER M: Harald Welte L: platform-driver-x86@vger.kernel.org @@ -4435,6 +4444,13 @@ L: netdev@vger.kernel.org S: Maintained F: drivers/net/pcnet32.c +PCRYPT PARALLEL CRYPTO ENGINE +M: Steffen Klassert +L: linux-crypto@vger.kernel.org +S: Maintained +F: crypto/pcrypt.c +F: include/crypto/pcrypt.h + PER-TASK DELAY ACCOUNTING M: Balbir Singh S: Maintained From 57a2ce5f54f3120467be760662c6ef3bea3f9579 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 3 Sep 2010 19:09:46 +0800 Subject: [PATCH 02/14] padata: add missing __percpu markup in include/linux/padata.h parallel_data->queue is a percpu pointer but was missing __percpu markup. Add it. Signed-off-by: Namhyung Kim Acked-by: Steffen Klassert Signed-off-by: Herbert Xu --- include/linux/padata.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/padata.h b/include/linux/padata.h index bdcd1e9eacea..4633b2f726b6 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -127,8 +127,8 @@ struct padata_cpumask { */ struct parallel_data { struct padata_instance *pinst; - struct padata_parallel_queue *pqueue; - struct padata_serial_queue *squeue; + struct padata_parallel_queue __percpu *pqueue; + struct padata_serial_queue __percpu *squeue; atomic_t seq_nr; atomic_t reorder_objects; atomic_t refcnt; From b744c679f62b368cb94c21c1dcd4618e42d88d63 Mon Sep 17 00:00:00 2001 From: Dmitry Kasatkin Date: Fri, 3 Sep 2010 19:13:55 +0800 Subject: [PATCH 03/14] crypto: updates to enable omap aes Signed-off-by: Dmitry Kasatkin Signed-off-by: Herbert Xu --- arch/arm/mach-omap2/clock2420_data.c | 2 +- arch/arm/mach-omap2/clock2430_data.c | 2 +- arch/arm/mach-omap2/clock3xxx_data.c | 2 +- arch/arm/mach-omap2/devices.c | 71 ++++++++++++++++++++++++++++ 4 files changed, 74 insertions(+), 3 deletions(-) diff --git a/arch/arm/mach-omap2/clock2420_data.c b/arch/arm/mach-omap2/clock2420_data.c index 37d65d62ed8f..5f2066a6ba74 100644 --- a/arch/arm/mach-omap2/clock2420_data.c +++ b/arch/arm/mach-omap2/clock2420_data.c @@ -1838,7 +1838,7 @@ static struct omap_clk omap2420_clks[] = { CLK(NULL, "des_ick", &des_ick, CK_242X), CLK("omap-sham", "ick", &sha_ick, CK_242X), CLK("omap_rng", "ick", &rng_ick, CK_242X), - CLK(NULL, "aes_ick", &aes_ick, CK_242X), + CLK("omap-aes", "ick", &aes_ick, CK_242X), CLK(NULL, "pka_ick", &pka_ick, CK_242X), CLK(NULL, "usb_fck", &usb_fck, CK_242X), CLK("musb_hdrc", "fck", &osc_ck, CK_242X), diff --git a/arch/arm/mach-omap2/clock2430_data.c b/arch/arm/mach-omap2/clock2430_data.c index b33118fb6a87..701a1716019e 100644 --- a/arch/arm/mach-omap2/clock2430_data.c +++ b/arch/arm/mach-omap2/clock2430_data.c @@ -1926,7 +1926,7 @@ static struct omap_clk omap2430_clks[] = { CLK(NULL, "des_ick", &des_ick, CK_243X), CLK("omap-sham", "ick", &sha_ick, CK_243X), CLK("omap_rng", "ick", &rng_ick, CK_243X), - CLK(NULL, "aes_ick", &aes_ick, CK_243X), + CLK("omap-aes", "ick", &aes_ick, CK_243X), CLK(NULL, "pka_ick", &pka_ick, CK_243X), CLK(NULL, "usb_fck", &usb_fck, CK_243X), CLK("musb_hdrc", "ick", &usbhs_ick, CK_243X), diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c index d33744117ce2..e3e65d1c86b2 100644 --- a/arch/arm/mach-omap2/clock3xxx_data.c +++ b/arch/arm/mach-omap2/clock3xxx_data.c @@ -3288,7 +3288,7 @@ static struct omap_clk omap3xxx_clks[] = { CLK(NULL, "usbtll_ick", &usbtll_ick, CK_3430ES2 | CK_AM35XX), CLK("mmci-omap-hs.2", "ick", &mmchs3_ick, CK_3430ES2 | CK_AM35XX), CLK(NULL, "icr_ick", &icr_ick, CK_343X), - CLK(NULL, "aes2_ick", &aes2_ick, CK_343X), + CLK("omap-aes", "ick", &aes2_ick, CK_343X), CLK("omap-sham", "ick", &sha12_ick, CK_343X), CLK(NULL, "des2_ick", &des2_ick, CK_343X), CLK("mmci-omap-hs.1", "ick", &mmchs2_ick, CK_3XXX), diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c index 03e6c9ed82a4..072893a7d136 100644 --- a/arch/arm/mach-omap2/devices.c +++ b/arch/arm/mach-omap2/devices.c @@ -552,6 +552,76 @@ static void omap_init_sham(void) static inline void omap_init_sham(void) { } #endif +#if defined(CONFIG_CRYPTO_DEV_OMAP_AES) || defined(CONFIG_CRYPTO_DEV_OMAP_AES_MODULE) + +#ifdef CONFIG_ARCH_OMAP24XX +static struct resource omap2_aes_resources[] = { + { + .start = OMAP24XX_SEC_AES_BASE, + .end = OMAP24XX_SEC_AES_BASE + 0x4C, + .flags = IORESOURCE_MEM, + }, + { + .start = OMAP24XX_DMA_AES_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = OMAP24XX_DMA_AES_RX, + .flags = IORESOURCE_DMA, + } +}; +static int omap2_aes_resources_sz = ARRAY_SIZE(omap2_aes_resources); +#else +#define omap2_aes_resources NULL +#define omap2_aes_resources_sz 0 +#endif + +#ifdef CONFIG_ARCH_OMAP34XX +static struct resource omap3_aes_resources[] = { + { + .start = OMAP34XX_SEC_AES_BASE, + .end = OMAP34XX_SEC_AES_BASE + 0x4C, + .flags = IORESOURCE_MEM, + }, + { + .start = OMAP34XX_DMA_AES2_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = OMAP34XX_DMA_AES2_RX, + .flags = IORESOURCE_DMA, + } +}; +static int omap3_aes_resources_sz = ARRAY_SIZE(omap3_aes_resources); +#else +#define omap3_aes_resources NULL +#define omap3_aes_resources_sz 0 +#endif + +static struct platform_device aes_device = { + .name = "omap-aes", + .id = -1, +}; + +static void omap_init_aes(void) +{ + if (cpu_is_omap24xx()) { + aes_device.resource = omap2_aes_resources; + aes_device.num_resources = omap2_aes_resources_sz; + } else if (cpu_is_omap34xx()) { + aes_device.resource = omap3_aes_resources; + aes_device.num_resources = omap3_aes_resources_sz; + } else { + pr_err("%s: platform not supported\n", __func__); + return; + } + platform_device_register(&aes_device); +} + +#else +static inline void omap_init_aes(void) { } +#endif + /*-------------------------------------------------------------------------*/ #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) @@ -908,6 +978,7 @@ static int __init omap2_init_devices(void) omap_hdq_init(); omap_init_sti(); omap_init_sham(); + omap_init_aes(); omap_init_vout(); return 0; From 537559a5b3ef854772bd89fbb43aa77d0bbfb721 Mon Sep 17 00:00:00 2001 From: Dmitry Kasatkin Date: Fri, 3 Sep 2010 19:16:02 +0800 Subject: [PATCH 04/14] crypto: omap-aes - OMAP2/3 AES hw accelerator driver Signed-off-by: Dmitry Kasatkin Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 8 + drivers/crypto/Makefile | 1 + drivers/crypto/omap-aes.c | 948 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 957 insertions(+) create mode 100644 drivers/crypto/omap-aes.c diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index fbf94cf496f0..1ce702170e79 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -243,4 +243,12 @@ config CRYPTO_DEV_OMAP_SHAM OMAP processors have SHA1/MD5 hw accelerator. Select this if you want to use the OMAP module for SHA1/MD5 algorithms. +config CRYPTO_DEV_OMAP_AES + tristate "Support for OMAP AES hw engine" + depends on ARCH_OMAP2 || ARCH_OMAP3 + select CRYPTO_AES + help + OMAP processors have AES module accelerator. Select this if you + want to use the OMAP module for AES algorithms. + endif # CRYPTO_HW diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 6dbbe00c4524..64289c678e98 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -9,4 +9,5 @@ obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o +obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c new file mode 100644 index 000000000000..799ca517c121 --- /dev/null +++ b/drivers/crypto/omap-aes.c @@ -0,0 +1,948 @@ +/* + * Cryptographic API. + * + * Support for OMAP AES HW acceleration. + * + * Copyright (c) 2010 Nokia Corporation + * Author: Dmitry Kasatkin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/* OMAP TRM gives bitfields as start:end, where start is the higher bit + number. For example 7:0 */ +#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) +#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end)) + +#define AES_REG_KEY(x) (0x1C - ((x ^ 0x01) * 0x04)) +#define AES_REG_IV(x) (0x20 + ((x) * 0x04)) + +#define AES_REG_CTRL 0x30 +#define AES_REG_CTRL_CTR_WIDTH (1 << 7) +#define AES_REG_CTRL_CTR (1 << 6) +#define AES_REG_CTRL_CBC (1 << 5) +#define AES_REG_CTRL_KEY_SIZE (3 << 3) +#define AES_REG_CTRL_DIRECTION (1 << 2) +#define AES_REG_CTRL_INPUT_READY (1 << 1) +#define AES_REG_CTRL_OUTPUT_READY (1 << 0) + +#define AES_REG_DATA 0x34 +#define AES_REG_DATA_N(x) (0x34 + ((x) * 0x04)) + +#define AES_REG_REV 0x44 +#define AES_REG_REV_MAJOR 0xF0 +#define AES_REG_REV_MINOR 0x0F + +#define AES_REG_MASK 0x48 +#define AES_REG_MASK_SIDLE (1 << 6) +#define AES_REG_MASK_START (1 << 5) +#define AES_REG_MASK_DMA_OUT_EN (1 << 3) +#define AES_REG_MASK_DMA_IN_EN (1 << 2) +#define AES_REG_MASK_SOFTRESET (1 << 1) +#define AES_REG_AUTOIDLE (1 << 0) + +#define AES_REG_SYSSTATUS 0x4C +#define AES_REG_SYSSTATUS_RESETDONE (1 << 0) + +#define DEFAULT_TIMEOUT (5*HZ) + +#define FLAGS_MODE_MASK 0x000f +#define FLAGS_ENCRYPT BIT(0) +#define FLAGS_CBC BIT(1) +#define FLAGS_GIV BIT(2) + +#define FLAGS_NEW_KEY BIT(4) +#define FLAGS_NEW_IV BIT(5) +#define FLAGS_INIT BIT(6) +#define FLAGS_FAST BIT(7) +#define FLAGS_BUSY 8 + +struct omap_aes_ctx { + struct omap_aes_dev *dd; + + int keylen; + u32 key[AES_KEYSIZE_256 / sizeof(u32)]; + unsigned long flags; +}; + +struct omap_aes_reqctx { + unsigned long mode; +}; + +#define OMAP_AES_QUEUE_LENGTH 1 +#define OMAP_AES_CACHE_SIZE 0 + +struct omap_aes_dev { + struct list_head list; + unsigned long phys_base; + void __iomem *io_base; + struct clk *iclk; + struct omap_aes_ctx *ctx; + struct device *dev; + unsigned long flags; + + u32 *iv; + u32 ctrl; + + spinlock_t lock; + struct crypto_queue queue; + + struct tasklet_struct task; + + struct ablkcipher_request *req; + size_t total; + struct scatterlist *in_sg; + size_t in_offset; + struct scatterlist *out_sg; + size_t out_offset; + + size_t buflen; + void *buf_in; + size_t dma_size; + int dma_in; + int dma_lch_in; + dma_addr_t dma_addr_in; + void *buf_out; + int dma_out; + int dma_lch_out; + dma_addr_t dma_addr_out; +}; + +/* keep registered devices data here */ +static LIST_HEAD(dev_list); +static DEFINE_SPINLOCK(list_lock); + +static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset) +{ + return __raw_readl(dd->io_base + offset); +} + +static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset, + u32 value) +{ + __raw_writel(value, dd->io_base + offset); +} + +static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset, + u32 value, u32 mask) +{ + u32 val; + + val = omap_aes_read(dd, offset); + val &= ~mask; + val |= value; + omap_aes_write(dd, offset, val); +} + +static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset, + u32 *value, int count) +{ + for (; count--; value++, offset += 4) + omap_aes_write(dd, offset, *value); +} + +static int omap_aes_wait(struct omap_aes_dev *dd, u32 offset, u32 bit) +{ + unsigned long timeout = jiffies + DEFAULT_TIMEOUT; + + while (!(omap_aes_read(dd, offset) & bit)) { + if (time_is_before_jiffies(timeout)) { + dev_err(dd->dev, "omap-aes timeout\n"); + return -ETIMEDOUT; + } + } + return 0; +} + +static int omap_aes_hw_init(struct omap_aes_dev *dd) +{ + int err = 0; + + clk_enable(dd->iclk); + if (!(dd->flags & FLAGS_INIT)) { + /* is it necessary to reset before every operation? */ + omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_SOFTRESET, + AES_REG_MASK_SOFTRESET); + /* + * prevent OCP bus error (SRESP) in case an access to the module + * is performed while the module is coming out of soft reset + */ + __asm__ __volatile__("nop"); + __asm__ __volatile__("nop"); + + err = omap_aes_wait(dd, AES_REG_SYSSTATUS, + AES_REG_SYSSTATUS_RESETDONE); + if (!err) + dd->flags |= FLAGS_INIT; + } + + return err; +} + +static void omap_aes_hw_cleanup(struct omap_aes_dev *dd) +{ + clk_disable(dd->iclk); +} + +static void omap_aes_write_ctrl(struct omap_aes_dev *dd) +{ + unsigned int key32; + int i; + u32 val, mask; + + val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); + if (dd->flags & FLAGS_CBC) + val |= AES_REG_CTRL_CBC; + if (dd->flags & FLAGS_ENCRYPT) + val |= AES_REG_CTRL_DIRECTION; + + if (dd->ctrl == val && !(dd->flags & FLAGS_NEW_IV) && + !(dd->ctx->flags & FLAGS_NEW_KEY)) + goto out; + + /* only need to write control registers for new settings */ + + dd->ctrl = val; + + val = 0; + if (dd->dma_lch_out >= 0) + val |= AES_REG_MASK_DMA_OUT_EN; + if (dd->dma_lch_in >= 0) + val |= AES_REG_MASK_DMA_IN_EN; + + mask = AES_REG_MASK_DMA_IN_EN | AES_REG_MASK_DMA_OUT_EN; + + omap_aes_write_mask(dd, AES_REG_MASK, val, mask); + + pr_debug("Set key\n"); + key32 = dd->ctx->keylen / sizeof(u32); + /* set a key */ + for (i = 0; i < key32; i++) { + omap_aes_write(dd, AES_REG_KEY(i), + __le32_to_cpu(dd->ctx->key[i])); + } + dd->ctx->flags &= ~FLAGS_NEW_KEY; + + if (dd->flags & FLAGS_NEW_IV) { + pr_debug("Set IV\n"); + omap_aes_write_n(dd, AES_REG_IV(0), dd->iv, 4); + dd->flags &= ~FLAGS_NEW_IV; + } + + mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION | + AES_REG_CTRL_KEY_SIZE; + + omap_aes_write_mask(dd, AES_REG_CTRL, dd->ctrl, mask); + +out: + /* start DMA or disable idle mode */ + omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START, + AES_REG_MASK_START); +} + +static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx) +{ + struct omap_aes_dev *dd = NULL, *tmp; + + spin_lock_bh(&list_lock); + if (!ctx->dd) { + list_for_each_entry(tmp, &dev_list, list) { + /* FIXME: take fist available aes core */ + dd = tmp; + break; + } + ctx->dd = dd; + } else { + /* already found before */ + dd = ctx->dd; + } + spin_unlock_bh(&list_lock); + + return dd; +} + +static void omap_aes_dma_callback(int lch, u16 ch_status, void *data) +{ + struct omap_aes_dev *dd = data; + + if (lch == dd->dma_lch_out) + tasklet_schedule(&dd->task); +} + +static int omap_aes_dma_init(struct omap_aes_dev *dd) +{ + int err = -ENOMEM; + + dd->dma_lch_out = -1; + dd->dma_lch_in = -1; + + dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); + dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); + dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE; + dd->buflen &= ~(AES_BLOCK_SIZE - 1); + + if (!dd->buf_in || !dd->buf_out) { + dev_err(dd->dev, "unable to alloc pages.\n"); + goto err_alloc; + } + + /* MAP here */ + dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen, + DMA_TO_DEVICE); + if (dma_mapping_error(dd->dev, dd->dma_addr_in)) { + dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); + err = -EINVAL; + goto err_map_in; + } + + dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen, + DMA_FROM_DEVICE); + if (dma_mapping_error(dd->dev, dd->dma_addr_out)) { + dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); + err = -EINVAL; + goto err_map_out; + } + + err = omap_request_dma(dd->dma_in, "omap-aes-rx", + omap_aes_dma_callback, dd, &dd->dma_lch_in); + if (err) { + dev_err(dd->dev, "Unable to request DMA channel\n"); + goto err_dma_in; + } + err = omap_request_dma(dd->dma_out, "omap-aes-tx", + omap_aes_dma_callback, dd, &dd->dma_lch_out); + if (err) { + dev_err(dd->dev, "Unable to request DMA channel\n"); + goto err_dma_out; + } + + omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT, + dd->phys_base + AES_REG_DATA, 0, 4); + + omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); + omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); + + omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT, + dd->phys_base + AES_REG_DATA, 0, 4); + + omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); + omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); + + return 0; + +err_dma_out: + omap_free_dma(dd->dma_lch_in); +err_dma_in: + dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, + DMA_FROM_DEVICE); +err_map_out: + dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE); +err_map_in: + free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE); + free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE); +err_alloc: + if (err) + pr_err("error: %d\n", err); + return err; +} + +static void omap_aes_dma_cleanup(struct omap_aes_dev *dd) +{ + omap_free_dma(dd->dma_lch_out); + omap_free_dma(dd->dma_lch_in); + dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, + DMA_FROM_DEVICE); + dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE); + free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE); + free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE); +} + +static void sg_copy_buf(void *buf, struct scatterlist *sg, + unsigned int start, unsigned int nbytes, int out) +{ + struct scatter_walk walk; + + if (!nbytes) + return; + + scatterwalk_start(&walk, sg); + scatterwalk_advance(&walk, start); + scatterwalk_copychunks(buf, &walk, nbytes, out); + scatterwalk_done(&walk, out, 0); +} + +static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf, + size_t buflen, size_t total, int out) +{ + unsigned int count, off = 0; + + while (buflen && total) { + count = min((*sg)->length - *offset, total); + count = min(count, buflen); + + if (!count) + return off; + + sg_copy_buf(buf + off, *sg, *offset, count, out); + + off += count; + buflen -= count; + *offset += count; + total -= count; + + if (*offset == (*sg)->length) { + *sg = sg_next(*sg); + if (*sg) + *offset = 0; + else + total = 0; + } + } + + return off; +} + +static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, + dma_addr_t dma_addr_out, int length) +{ + struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); + struct omap_aes_dev *dd = ctx->dd; + int len32; + + pr_debug("len: %d\n", length); + + dd->dma_size = length; + + if (!(dd->flags & FLAGS_FAST)) + dma_sync_single_for_device(dd->dev, dma_addr_in, length, + DMA_TO_DEVICE); + + len32 = DIV_ROUND_UP(length, sizeof(u32)); + + /* IN */ + omap_set_dma_transfer_params(dd->dma_lch_in, OMAP_DMA_DATA_TYPE_S32, + len32, 1, OMAP_DMA_SYNC_PACKET, dd->dma_in, + OMAP_DMA_DST_SYNC); + + omap_set_dma_src_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_POST_INC, + dma_addr_in, 0, 0); + + /* OUT */ + omap_set_dma_transfer_params(dd->dma_lch_out, OMAP_DMA_DATA_TYPE_S32, + len32, 1, OMAP_DMA_SYNC_PACKET, + dd->dma_out, OMAP_DMA_SRC_SYNC); + + omap_set_dma_dest_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_POST_INC, + dma_addr_out, 0, 0); + + omap_start_dma(dd->dma_lch_in); + omap_start_dma(dd->dma_lch_out); + + omap_aes_write_ctrl(dd); + + return 0; +} + +static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm( + crypto_ablkcipher_reqtfm(dd->req)); + int err, fast = 0, in, out; + size_t count; + dma_addr_t addr_in, addr_out; + + pr_debug("total: %d\n", dd->total); + + if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) { + /* check for alignment */ + in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)); + out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)); + + fast = in && out; + } + + if (fast) { + count = min(dd->total, sg_dma_len(dd->in_sg)); + count = min(count, sg_dma_len(dd->out_sg)); + + if (count != dd->total) + return -EINVAL; + + pr_debug("fast\n"); + + err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); + if (!err) { + dev_err(dd->dev, "dma_map_sg() error\n"); + return -EINVAL; + } + + err = dma_map_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); + if (!err) { + dev_err(dd->dev, "dma_map_sg() error\n"); + dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); + return -EINVAL; + } + + addr_in = sg_dma_address(dd->in_sg); + addr_out = sg_dma_address(dd->out_sg); + + dd->flags |= FLAGS_FAST; + + } else { + /* use cache buffers */ + count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in, + dd->buflen, dd->total, 0); + + addr_in = dd->dma_addr_in; + addr_out = dd->dma_addr_out; + + dd->flags &= ~FLAGS_FAST; + + } + + dd->total -= count; + + err = omap_aes_hw_init(dd); + + err = omap_aes_crypt_dma(tfm, addr_in, addr_out, count); + + return err; +} + +static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) +{ + struct omap_aes_ctx *ctx; + + pr_debug("err: %d\n", err); + + ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(dd->req)); + + if (!dd->total) + dd->req->base.complete(&dd->req->base, err); +} + +static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) +{ + int err = 0; + size_t count; + + pr_debug("total: %d\n", dd->total); + + omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START); + + omap_aes_hw_cleanup(dd); + + omap_stop_dma(dd->dma_lch_in); + omap_stop_dma(dd->dma_lch_out); + + if (dd->flags & FLAGS_FAST) { + dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); + dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); + } else { + dma_sync_single_for_device(dd->dev, dd->dma_addr_out, + dd->dma_size, DMA_FROM_DEVICE); + + /* copy data */ + count = sg_copy(&dd->out_sg, &dd->out_offset, dd->buf_out, + dd->buflen, dd->dma_size, 1); + if (count != dd->dma_size) { + err = -EINVAL; + pr_err("not all data converted: %u\n", count); + } + } + + if (err || !dd->total) + omap_aes_finish_req(dd, err); + + return err; +} + +static int omap_aes_handle_req(struct omap_aes_dev *dd) +{ + struct crypto_async_request *async_req, *backlog; + struct omap_aes_ctx *ctx; + struct omap_aes_reqctx *rctx; + struct ablkcipher_request *req; + unsigned long flags; + + if (dd->total) + goto start; + + spin_lock_irqsave(&dd->lock, flags); + backlog = crypto_get_backlog(&dd->queue); + async_req = crypto_dequeue_request(&dd->queue); + if (!async_req) + clear_bit(FLAGS_BUSY, &dd->flags); + spin_unlock_irqrestore(&dd->lock, flags); + + if (!async_req) + return 0; + + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + + req = ablkcipher_request_cast(async_req); + + pr_debug("get new req\n"); + + /* assign new request to device */ + dd->req = req; + dd->total = req->nbytes; + dd->in_offset = 0; + dd->in_sg = req->src; + dd->out_offset = 0; + dd->out_sg = req->dst; + + rctx = ablkcipher_request_ctx(req); + ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); + rctx->mode &= FLAGS_MODE_MASK; + dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode; + + dd->iv = req->info; + if ((dd->flags & FLAGS_CBC) && dd->iv) + dd->flags |= FLAGS_NEW_IV; + else + dd->flags &= ~FLAGS_NEW_IV; + + ctx->dd = dd; + if (dd->ctx != ctx) { + /* assign new context to device */ + dd->ctx = ctx; + ctx->flags |= FLAGS_NEW_KEY; + } + + if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) + pr_err("request size is not exact amount of AES blocks\n"); + +start: + return omap_aes_crypt_dma_start(dd); +} + +static void omap_aes_task(unsigned long data) +{ + struct omap_aes_dev *dd = (struct omap_aes_dev *)data; + int err; + + pr_debug("enter\n"); + + err = omap_aes_crypt_dma_stop(dd); + + err = omap_aes_handle_req(dd); + + pr_debug("exit\n"); +} + +static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) +{ + struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( + crypto_ablkcipher_reqtfm(req)); + struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req); + struct omap_aes_dev *dd; + unsigned long flags; + int err; + + pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes, + !!(mode & FLAGS_ENCRYPT), + !!(mode & FLAGS_CBC)); + + dd = omap_aes_find_dev(ctx); + if (!dd) + return -ENODEV; + + rctx->mode = mode; + + spin_lock_irqsave(&dd->lock, flags); + err = ablkcipher_enqueue_request(&dd->queue, req); + spin_unlock_irqrestore(&dd->lock, flags); + + if (!test_and_set_bit(FLAGS_BUSY, &dd->flags)) + omap_aes_handle_req(dd); + + pr_debug("exit\n"); + + return err; +} + +/* ********************** ALG API ************************************ */ + +static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); + + if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && + keylen != AES_KEYSIZE_256) + return -EINVAL; + + pr_debug("enter, keylen: %d\n", keylen); + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + ctx->flags |= FLAGS_NEW_KEY; + + return 0; +} + +static int omap_aes_ecb_encrypt(struct ablkcipher_request *req) +{ + return omap_aes_crypt(req, FLAGS_ENCRYPT); +} + +static int omap_aes_ecb_decrypt(struct ablkcipher_request *req) +{ + return omap_aes_crypt(req, 0); +} + +static int omap_aes_cbc_encrypt(struct ablkcipher_request *req) +{ + return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); +} + +static int omap_aes_cbc_decrypt(struct ablkcipher_request *req) +{ + return omap_aes_crypt(req, FLAGS_CBC); +} + +static int omap_aes_cra_init(struct crypto_tfm *tfm) +{ + pr_debug("enter\n"); + + tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx); + + return 0; +} + +static void omap_aes_cra_exit(struct crypto_tfm *tfm) +{ + pr_debug("enter\n"); +} + +/* ********************** ALGS ************************************ */ + +static struct crypto_alg algs[] = { +{ + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-omap", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct omap_aes_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = omap_aes_cra_init, + .cra_exit = omap_aes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = omap_aes_setkey, + .encrypt = omap_aes_ecb_encrypt, + .decrypt = omap_aes_ecb_decrypt, + } +}, +{ + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-omap", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct omap_aes_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = omap_aes_cra_init, + .cra_exit = omap_aes_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = omap_aes_setkey, + .encrypt = omap_aes_cbc_encrypt, + .decrypt = omap_aes_cbc_decrypt, + } +} +}; + +static int omap_aes_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct omap_aes_dev *dd; + struct resource *res; + int err = -ENOMEM, i, j; + u32 reg; + + dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL); + if (dd == NULL) { + dev_err(dev, "unable to alloc data struct.\n"); + goto err_data; + } + dd->dev = dev; + platform_set_drvdata(pdev, dd); + + spin_lock_init(&dd->lock); + crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH); + + /* Get the base address */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "invalid resource type\n"); + err = -ENODEV; + goto err_res; + } + dd->phys_base = res->start; + + /* Get the DMA */ + res = platform_get_resource(pdev, IORESOURCE_DMA, 0); + if (!res) + dev_info(dev, "no DMA info\n"); + else + dd->dma_out = res->start; + + /* Get the DMA */ + res = platform_get_resource(pdev, IORESOURCE_DMA, 1); + if (!res) + dev_info(dev, "no DMA info\n"); + else + dd->dma_in = res->start; + + /* Initializing the clock */ + dd->iclk = clk_get(dev, "ick"); + if (!dd->iclk) { + dev_err(dev, "clock intialization failed.\n"); + err = -ENODEV; + goto err_res; + } + + dd->io_base = ioremap(dd->phys_base, SZ_4K); + if (!dd->io_base) { + dev_err(dev, "can't ioremap\n"); + err = -ENOMEM; + goto err_io; + } + + clk_enable(dd->iclk); + reg = omap_aes_read(dd, AES_REG_REV); + dev_info(dev, "OMAP AES hw accel rev: %u.%u\n", + (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR); + clk_disable(dd->iclk); + + tasklet_init(&dd->task, omap_aes_task, (unsigned long)dd); + + err = omap_aes_dma_init(dd); + if (err) + goto err_dma; + + INIT_LIST_HEAD(&dd->list); + spin_lock(&list_lock); + list_add_tail(&dd->list, &dev_list); + spin_unlock(&list_lock); + + for (i = 0; i < ARRAY_SIZE(algs); i++) { + pr_debug("i: %d\n", i); + INIT_LIST_HEAD(&algs[i].cra_list); + err = crypto_register_alg(&algs[i]); + if (err) + goto err_algs; + } + + pr_info("probe() done\n"); + + return 0; +err_algs: + for (j = 0; j < i; j++) + crypto_unregister_alg(&algs[j]); + omap_aes_dma_cleanup(dd); +err_dma: + tasklet_kill(&dd->task); + iounmap(dd->io_base); +err_io: + clk_put(dd->iclk); +err_res: + kfree(dd); + dd = NULL; +err_data: + dev_err(dev, "initialization failed.\n"); + return err; +} + +static int omap_aes_remove(struct platform_device *pdev) +{ + struct omap_aes_dev *dd = platform_get_drvdata(pdev); + int i; + + if (!dd) + return -ENODEV; + + spin_lock(&list_lock); + list_del(&dd->list); + spin_unlock(&list_lock); + + for (i = 0; i < ARRAY_SIZE(algs); i++) + crypto_unregister_alg(&algs[i]); + + tasklet_kill(&dd->task); + omap_aes_dma_cleanup(dd); + iounmap(dd->io_base); + clk_put(dd->iclk); + kfree(dd); + dd = NULL; + + return 0; +} + +static struct platform_driver omap_aes_driver = { + .probe = omap_aes_probe, + .remove = omap_aes_remove, + .driver = { + .name = "omap-aes", + .owner = THIS_MODULE, + }, +}; + +static int __init omap_aes_mod_init(void) +{ + pr_info("loading %s driver\n", "omap-aes"); + + if (!cpu_class_is_omap2() || omap_type() != OMAP2_DEVICE_TYPE_SEC) { + pr_err("Unsupported cpu\n"); + return -ENODEV; + } + + return platform_driver_register(&omap_aes_driver); +} + +static void __exit omap_aes_mod_exit(void) +{ + platform_driver_unregister(&omap_aes_driver); +} + +module_init(omap_aes_mod_init); +module_exit(omap_aes_mod_exit); + +MODULE_DESCRIPTION("OMAP AES hw acceleration support."); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Dmitry Kasatkin"); + From e84c5480b782c4009ef65b0248be7f0864573d7e Mon Sep 17 00:00:00 2001 From: Chuck Ebbert Date: Fri, 3 Sep 2010 19:17:49 +0800 Subject: [PATCH 05/14] crypto: fips - FIPS requires algorithm self-tests Signed-off-by: Chuck Ebbert Signed-off-by: Herbert Xu --- crypto/Kconfig | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index e573077f1672..866a1d751aa4 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -23,13 +23,12 @@ comment "Crypto core or helper" config CRYPTO_FIPS bool "FIPS 200 compliance" - depends on CRYPTO_ANSI_CPRNG + depends on CRYPTO_ANSI_CPRNG && !CRYPTO_MANAGER_DISABLE_TESTS help This options enables the fips boot option which is required if you want to system to operate in a FIPS 200 certification. You should say no unless you know what - this is. Note that CRYPTO_ANSI_CPRNG is required if this - option is selected + this is. config CRYPTO_ALGAPI tristate From 584db6a1b5b80513b272b788e5bda43da982817a Mon Sep 17 00:00:00 2001 From: Samu Onkalo Date: Fri, 3 Sep 2010 19:20:19 +0800 Subject: [PATCH 06/14] crypto: omap-sham - Adjust DMA parameters DMA is set to use burst mode also for source channel. It should descrease memory bandwidth needs. DMA synchronization is set to use prefetch mechanism. SHAM block is behind L4 bus and it doesn't have fifo. SHAM block is stalling as long as the new data is available. It takes time to fetch data from memory and transfer it via L4 bus. With prefetch enabled, data is waiting in DMA fifo and SHAM block receives new data block faster. This increases SHA processing speed up to 30 percent depending on the bus / memory load. Signed-off-by: Samu Onkalo Signed-off-by: Dmitry Kasatkin Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 7d1485676886..a081c7c7d03f 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -311,7 +311,8 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, len32 = DIV_ROUND_UP(length, sizeof(u32)); omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32, - 1, OMAP_DMA_SYNC_PACKET, dd->dma, OMAP_DMA_DST_SYNC); + 1, OMAP_DMA_SYNC_PACKET, dd->dma, + OMAP_DMA_DST_SYNC_PREFETCH); omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC, dma_addr, 0, 0); @@ -1072,6 +1073,9 @@ static int omap_sham_dma_init(struct omap_sham_dev *dd) omap_set_dma_dest_burst_mode(dd->dma_lch, OMAP_DMA_DATA_BURST_16); + omap_set_dma_src_burst_mode(dd->dma_lch, + OMAP_DMA_DATA_BURST_4); + return 0; } From 6d8de74c5caa3e2ce7c9f19c1004dbc76d7c7edb Mon Sep 17 00:00:00 2001 From: "Justin P. Mattock" Date: Sun, 12 Sep 2010 10:42:47 +0800 Subject: [PATCH 07/14] crypto: Kconfig - update broken web addresses Below is a patch to update the broken web addresses, in crypto/* that I could locate. Some are just simple typos that needed to be fixed, and some had a change in location altogether.. let me know if any of them need to be changed and such. Signed-off-by: Justin P. Mattock Signed-off-by: Herbert Xu --- crypto/Kconfig | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index 866a1d751aa4..e4bac29a32e7 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -364,7 +364,7 @@ config CRYPTO_RMD128 RIPEMD-160 should be used. Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See + See config CRYPTO_RMD160 tristate "RIPEMD-160 digest algorithm" @@ -381,7 +381,7 @@ config CRYPTO_RMD160 against RIPEMD-160. Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See + See config CRYPTO_RMD256 tristate "RIPEMD-256 digest algorithm" @@ -393,7 +393,7 @@ config CRYPTO_RMD256 (than RIPEMD-128). Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See + See config CRYPTO_RMD320 tristate "RIPEMD-320 digest algorithm" @@ -405,7 +405,7 @@ config CRYPTO_RMD320 (than RIPEMD-160). Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See + See config CRYPTO_SHA1 tristate "SHA1 digest algorithm" @@ -460,7 +460,7 @@ config CRYPTO_WP512 Whirlpool will be part of the ISO/IEC 10118-3:2003(E) standard See also: - + config CRYPTO_GHASH_CLMUL_NI_INTEL tristate "GHASH digest algorithm (CLMUL-NI accelerated)" @@ -578,8 +578,8 @@ config CRYPTO_ANUBIS in the NESSIE competition. See also: - - + + config CRYPTO_ARC4 tristate "ARC4 cipher algorithm" @@ -658,7 +658,7 @@ config CRYPTO_KHAZAD on 32-bit processors. Khazad uses an 128 bit key size. See also: - + config CRYPTO_SALSA20 tristate "Salsa20 stream cipher algorithm (EXPERIMENTAL)" From 50e781613c68f426edbbc8e5c27fcba74494eb8c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 12 Sep 2010 10:44:21 +0800 Subject: [PATCH 08/14] crypto: n2_crypto - Niagara2 driver needs to depend upon CRYPTO_DES Reported-by: Dennis Gilmore Signed-off-by: David S. Miller Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 1ce702170e79..70f4d47bf2cf 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -172,6 +172,7 @@ config CRYPTO_DEV_MV_CESA config CRYPTO_DEV_NIAGARA2 tristate "Niagara2 Stream Processing Unit driver" + select CRYPTO_DES select CRYPTO_ALGAPI depends on SPARC64 help From 298c926c6d7f50d91d6acb76c33b83bab5b5bd5c Mon Sep 17 00:00:00 2001 From: Adrian Hoban Date: Mon, 20 Sep 2010 16:05:12 +0800 Subject: [PATCH 09/14] crypto: cryptd - Adding the AEAD interface type support to cryptd This patch adds AEAD support into the cryptd framework. Having AEAD support in cryptd enables crypto drivers that use the AEAD interface type (such as the patch for AEAD based RFC4106 AES-GCM implementation using Intel New Instructions) to leverage cryptd for asynchronous processing. Signed-off-by: Adrian Hoban Signed-off-by: Tadeusz Struk Signed-off-by: Gabriele Paoloni Signed-off-by: Aidan O'Mahony Signed-off-by: Herbert Xu --- crypto/cryptd.c | 206 +++++++++++++++++++++++++++++++++++++++- include/crypto/cryptd.h | 24 +++++ 2 files changed, 227 insertions(+), 3 deletions(-) diff --git a/crypto/cryptd.c b/crypto/cryptd.c index ef71318976c7..e46d21ae26bc 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -3,6 +3,13 @@ * * Copyright (c) 2006 Herbert Xu * + * Added AEAD support to cryptd. + * Authors: Tadeusz Struk (tadeusz.struk@intel.com) + * Adrian Hoban + * Gabriele Paoloni + * Aidan O'Mahony (aidan.o.mahony@intel.com) + * Copyright (c) 2010, Intel Corporation. + * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) @@ -12,6 +19,7 @@ #include #include +#include #include #include #include @@ -44,6 +52,11 @@ struct hashd_instance_ctx { struct cryptd_queue *queue; }; +struct aead_instance_ctx { + struct crypto_aead_spawn aead_spawn; + struct cryptd_queue *queue; +}; + struct cryptd_blkcipher_ctx { struct crypto_blkcipher *child; }; @@ -61,6 +74,14 @@ struct cryptd_hash_request_ctx { struct shash_desc desc; }; +struct cryptd_aead_ctx { + struct crypto_aead *child; +}; + +struct cryptd_aead_request_ctx { + crypto_completion_t complete; +}; + static void cryptd_queue_worker(struct work_struct *work); static int cryptd_init_queue(struct cryptd_queue *queue, @@ -601,6 +622,144 @@ out_put_alg: return err; } +static void cryptd_aead_crypt(struct aead_request *req, + struct crypto_aead *child, + int err, + int (*crypt)(struct aead_request *req)) +{ + struct cryptd_aead_request_ctx *rctx; + rctx = aead_request_ctx(req); + + if (unlikely(err == -EINPROGRESS)) + goto out; + aead_request_set_tfm(req, child); + err = crypt( req ); + req->base.complete = rctx->complete; +out: + local_bh_disable(); + rctx->complete(&req->base, err); + local_bh_enable(); +} + +static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err) +{ + struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); + struct crypto_aead *child = ctx->child; + struct aead_request *req; + + req = container_of(areq, struct aead_request, base); + cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt); +} + +static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err) +{ + struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); + struct crypto_aead *child = ctx->child; + struct aead_request *req; + + req = container_of(areq, struct aead_request, base); + cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt); +} + +static int cryptd_aead_enqueue(struct aead_request *req, + crypto_completion_t complete) +{ + struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req); + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm)); + + rctx->complete = req->base.complete; + req->base.complete = complete; + return cryptd_enqueue_request(queue, &req->base); +} + +static int cryptd_aead_encrypt_enqueue(struct aead_request *req) +{ + return cryptd_aead_enqueue(req, cryptd_aead_encrypt ); +} + +static int cryptd_aead_decrypt_enqueue(struct aead_request *req) +{ + return cryptd_aead_enqueue(req, cryptd_aead_decrypt ); +} + +static int cryptd_aead_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); + struct aead_instance_ctx *ictx = crypto_instance_ctx(inst); + struct crypto_aead_spawn *spawn = &ictx->aead_spawn; + struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_aead *cipher; + + cipher = crypto_spawn_aead(spawn); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP); + ctx->child = cipher; + tfm->crt_aead.reqsize = sizeof(struct cryptd_aead_request_ctx); + return 0; +} + +static void cryptd_aead_exit_tfm(struct crypto_tfm *tfm) +{ + struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm); + crypto_free_aead(ctx->child); +} + +static int cryptd_create_aead(struct crypto_template *tmpl, + struct rtattr **tb, + struct cryptd_queue *queue) +{ + struct aead_instance_ctx *ctx; + struct crypto_instance *inst; + struct crypto_alg *alg; + int err; + + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_AEAD, + CRYPTO_ALG_TYPE_MASK); + if (IS_ERR(alg)) + return PTR_ERR(alg); + + inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); + err = PTR_ERR(inst); + if (IS_ERR(inst)) + goto out_put_alg; + + ctx = crypto_instance_ctx(inst); + ctx->queue = queue; + + err = crypto_init_spawn(&ctx->aead_spawn.base, alg, inst, + CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + if (err) + goto out_free_inst; + + inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; + inst->alg.cra_type = alg->cra_type; + inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx); + inst->alg.cra_init = cryptd_aead_init_tfm; + inst->alg.cra_exit = cryptd_aead_exit_tfm; + inst->alg.cra_aead.setkey = alg->cra_aead.setkey; + inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize; + inst->alg.cra_aead.geniv = alg->cra_aead.geniv; + inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize; + inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize; + inst->alg.cra_aead.encrypt = cryptd_aead_encrypt_enqueue; + inst->alg.cra_aead.decrypt = cryptd_aead_decrypt_enqueue; + inst->alg.cra_aead.givencrypt = alg->cra_aead.givencrypt; + inst->alg.cra_aead.givdecrypt = alg->cra_aead.givdecrypt; + + err = crypto_register_instance(tmpl, inst); + if (err) { + crypto_drop_spawn(&ctx->aead_spawn.base); +out_free_inst: + kfree(inst); + } +out_put_alg: + crypto_mod_put(alg); + return err; +} + static struct cryptd_queue queue; static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) @@ -616,6 +775,8 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) return cryptd_create_blkcipher(tmpl, tb, &queue); case CRYPTO_ALG_TYPE_DIGEST: return cryptd_create_hash(tmpl, tb, &queue); + case CRYPTO_ALG_TYPE_AEAD: + return cryptd_create_aead(tmpl, tb, &queue); } return -EINVAL; @@ -625,16 +786,21 @@ static void cryptd_free(struct crypto_instance *inst) { struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); + struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst); switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_AHASH: crypto_drop_shash(&hctx->spawn); kfree(ahash_instance(inst)); return; + case CRYPTO_ALG_TYPE_AEAD: + crypto_drop_spawn(&aead_ctx->aead_spawn.base); + kfree(inst); + return; + default: + crypto_drop_spawn(&ctx->spawn); + kfree(inst); } - - crypto_drop_spawn(&ctx->spawn); - kfree(inst); } static struct crypto_template cryptd_tmpl = { @@ -724,6 +890,40 @@ void cryptd_free_ahash(struct cryptd_ahash *tfm) } EXPORT_SYMBOL_GPL(cryptd_free_ahash); +struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, + u32 type, u32 mask) +{ + char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; + struct crypto_aead *tfm; + + if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, + "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) + return ERR_PTR(-EINVAL); + tfm = crypto_alloc_aead(cryptd_alg_name, type, mask); + if (IS_ERR(tfm)) + return ERR_CAST(tfm); + if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { + crypto_free_aead(tfm); + return ERR_PTR(-EINVAL); + } + return __cryptd_aead_cast(tfm); +} +EXPORT_SYMBOL_GPL(cryptd_alloc_aead); + +struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm) +{ + struct cryptd_aead_ctx *ctx; + ctx = crypto_aead_ctx(&tfm->base); + return ctx->child; +} +EXPORT_SYMBOL_GPL(cryptd_aead_child); + +void cryptd_free_aead(struct cryptd_aead *tfm) +{ + crypto_free_aead(&tfm->base); +} +EXPORT_SYMBOL_GPL(cryptd_free_aead); + static int __init cryptd_init(void) { int err; diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h index 1c96b255017c..ba98918bbd9b 100644 --- a/include/crypto/cryptd.h +++ b/include/crypto/cryptd.h @@ -1,5 +1,12 @@ /* * Software async crypto daemon + * + * Added AEAD support to cryptd. + * Authors: Tadeusz Struk (tadeusz.struk@intel.com) + * Adrian Hoban + * Gabriele Paoloni + * Aidan O'Mahony (aidan.o.mahony@intel.com) + * Copyright (c) 2010, Intel Corporation. */ #ifndef _CRYPTO_CRYPT_H @@ -42,4 +49,21 @@ struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm); struct shash_desc *cryptd_shash_desc(struct ahash_request *req); void cryptd_free_ahash(struct cryptd_ahash *tfm); +struct cryptd_aead { + struct crypto_aead base; +}; + +static inline struct cryptd_aead *__cryptd_aead_cast( + struct crypto_aead *tfm) +{ + return (struct cryptd_aead *)tfm; +} + +struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, + u32 type, u32 mask); + +struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm); + +void cryptd_free_aead(struct cryptd_aead *tfm); + #endif From 1d11911a8c572041880c8d86229f6ed971f6f7e2 Mon Sep 17 00:00:00 2001 From: Kim Phillips Date: Thu, 23 Sep 2010 15:55:27 +0800 Subject: [PATCH 10/14] crypto: talitos - fix warning: 'alg' may be used uninitialized in this function drivers/crypto/talitos.c: In function 'talitos_probe': drivers/crypto/talitos.c:2363: warning: 'alg' may be used uninitialized in this function drivers/crypto/talitos.c:2363: note: 'alg' was declared here Signed-off-by: Kim Phillips Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 97f4af1d8a64..bbf39991c0d0 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -2389,6 +2389,9 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, DESC_HDR_MODE0_MDEU_SHA256; } break; + default: + dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type); + return ERR_PTR(-EINVAL); } alg->cra_module = THIS_MODULE; From 0b798247453299c895e3fa1629101dd5e94901b2 Mon Sep 17 00:00:00 2001 From: Kim Phillips Date: Thu, 23 Sep 2010 15:56:08 +0800 Subject: [PATCH 11/14] crypto: talitos - fix checkpatch warning WARNING: kfree(NULL) is safe this check is probably not required + if (priv->chan[i].fifo) + kfree(priv->chan[i].fifo); Signed-off-by: Kim Phillips Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index bbf39991c0d0..1444f8c4dbdd 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -2333,8 +2333,7 @@ static int talitos_remove(struct of_device *ofdev) talitos_unregister_rng(dev); for (i = 0; i < priv->num_channels; i++) - if (priv->chan[i].fifo) - kfree(priv->chan[i].fifo); + kfree(priv->chan[i].fifo); kfree(priv->chan); From a752447af5b61f19e9c50322d9b07cea9a086084 Mon Sep 17 00:00:00 2001 From: Kim Phillips Date: Thu, 23 Sep 2010 15:56:38 +0800 Subject: [PATCH 12/14] crypto: talitos - sparse check endian fixes Signed-off-by: Kim Phillips Signed-off-by: Herbert Xu --- drivers/crypto/talitos.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 1444f8c4dbdd..71e146562d82 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -161,7 +161,7 @@ struct talitos_private { static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) { talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); - talitos_ptr->eptr = cpu_to_be32(upper_32_bits(dma_addr)); + talitos_ptr->eptr = upper_32_bits(dma_addr); } /* @@ -332,10 +332,9 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc, /* GO! */ wmb(); - out_be32(priv->reg + TALITOS_FF(ch), - cpu_to_be32(upper_32_bits(request->dma_desc))); + out_be32(priv->reg + TALITOS_FF(ch), upper_32_bits(request->dma_desc)); out_be32(priv->reg + TALITOS_FF_LO(ch), - cpu_to_be32(lower_32_bits(request->dma_desc))); + lower_32_bits(request->dma_desc)); spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); @@ -1751,14 +1750,14 @@ static int ahash_init_sha224_swinit(struct ahash_request *areq) ahash_init(areq); req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/ - req_ctx->hw_context[0] = cpu_to_be32(SHA224_H0); - req_ctx->hw_context[1] = cpu_to_be32(SHA224_H1); - req_ctx->hw_context[2] = cpu_to_be32(SHA224_H2); - req_ctx->hw_context[3] = cpu_to_be32(SHA224_H3); - req_ctx->hw_context[4] = cpu_to_be32(SHA224_H4); - req_ctx->hw_context[5] = cpu_to_be32(SHA224_H5); - req_ctx->hw_context[6] = cpu_to_be32(SHA224_H6); - req_ctx->hw_context[7] = cpu_to_be32(SHA224_H7); + req_ctx->hw_context[0] = SHA224_H0; + req_ctx->hw_context[1] = SHA224_H1; + req_ctx->hw_context[2] = SHA224_H2; + req_ctx->hw_context[3] = SHA224_H3; + req_ctx->hw_context[4] = SHA224_H4; + req_ctx->hw_context[5] = SHA224_H5; + req_ctx->hw_context[6] = SHA224_H6; + req_ctx->hw_context[7] = SHA224_H7; /* init 64-bit count */ req_ctx->hw_context[8] = 0; From f4e523f2ad179f6bf5691ddc3cd2893856fafc66 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 19 Oct 2010 20:50:23 +0800 Subject: [PATCH 13/14] crypto: hifn_795x - use cancel_delayed_work_sync() Make hifn_795x::hifn_remove() call cancel_delayed_work_sync() instead of calling cancel_delayed_work() followed by flush_scheduled_work(). This is to prepare for the deprecation and removal of flush_scheduled_work(). Signed-off-by: Tejun Heo Signed-off-by: Herbert Xu --- drivers/crypto/hifn_795x.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index e449ac5627a5..0eac3da566ba 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -2700,8 +2700,7 @@ static void __devexit hifn_remove(struct pci_dev *pdev) dev = pci_get_drvdata(pdev); if (dev) { - cancel_delayed_work(&dev->work); - flush_scheduled_work(); + cancel_delayed_work_sync(&dev->work); hifn_unregister_rng(dev); hifn_unregister_alg(dev); From 6d388b43d2c4ef6f0806c9bb9a5edebf00a23c6a Mon Sep 17 00:00:00 2001 From: Tracey Dent Date: Tue, 19 Oct 2010 20:52:26 +0800 Subject: [PATCH 14/14] crypto: Makefile - replace the use of -objs with -y Changed -objs to -y in Makefile. Signed-off-by: Tracey Dent Signed-off-by: Herbert Xu --- drivers/crypto/Makefile | 2 +- drivers/crypto/amcc/Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 64289c678e98..256697330a41 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -2,7 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o -n2_crypto-objs := n2_core.o n2_asm.o +n2_crypto-y := n2_core.o n2_asm.o obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o diff --git a/drivers/crypto/amcc/Makefile b/drivers/crypto/amcc/Makefile index aa376e8d5ed5..5c0c62b65d69 100644 --- a/drivers/crypto/amcc/Makefile +++ b/drivers/crypto/amcc/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o -crypto4xx-objs := crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o +crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o