crypto: brcm - Add Broadcom SPU driver

Add Broadcom Secure Processing Unit (SPU) crypto driver for SPU
hardware crypto offload. The driver supports ablkcipher, ahash,
and aead symmetric crypto operations.

Signed-off-by: Steve Lin <steven.lin1@broadcom.com>
Signed-off-by: Rob Rice <rob.rice@broadcom.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Rob Rice 2017-02-03 12:55:33 -05:00 committed by Herbert Xu
parent 206dc4fc27
commit 9d12ba86f8
12 changed files with 9516 additions and 0 deletions

View File

@ -587,4 +587,19 @@ source "drivers/crypto/chelsio/Kconfig"
source "drivers/crypto/virtio/Kconfig"
config CRYPTO_DEV_BCM_SPU
tristate "Broadcom symmetric crypto/hash acceleration support"
depends on ARCH_BCM_IPROC
depends on BCM_PDC_MBOX
default m
select CRYPTO_DES
select CRYPTO_MD5
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
help
This driver provides support for Broadcom crypto acceleration using the
Secure Processing Unit (SPU). The SPU driver registers ablkcipher,
ahash, and aead algorithms with the kernel cryptographic API.
endif # CRYPTO_HW

View File

@ -35,3 +35,4 @@ obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/

View File

@ -0,0 +1,15 @@
# File: drivers/crypto/bcm/Makefile
#
# Makefile for crypto acceleration files for Broadcom SPU driver
#
# Uncomment to enable debug tracing in the SPU driver.
# CFLAGS_util.o := -DDEBUG
# CFLAGS_cipher.o := -DDEBUG
# CFLAGS_spu.o := -DDEBUG
# CFLAGS_spu2.o := -DDEBUG
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) := bcm_crypto_spu.o
bcm_crypto_spu-objs := util.o spu.o spu2.o cipher.o
ccflags-y += -I. -DBCMDRIVER

4964
drivers/crypto/bcm/cipher.c Normal file

File diff suppressed because it is too large Load Diff

483
drivers/crypto/bcm/cipher.h Normal file
View File

@ -0,0 +1,483 @@
/*
* Copyright 2016 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation (the "GPL").
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 (GPLv2) for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 (GPLv2) along with this source code.
*/
#ifndef _CIPHER_H
#define _CIPHER_H
#include <linux/atomic.h>
#include <linux/mailbox/brcm-message.h>
#include <linux/mailbox_client.h>
#include <crypto/aes.h>
#include <crypto/internal/hash.h>
#include <crypto/aead.h>
#include <crypto/sha.h>
#include <crypto/sha3.h>
#include "spu.h"
#include "spum.h"
#include "spu2.h"
/* Driver supports up to MAX_SPUS SPU blocks */
#define MAX_SPUS 16
#define ARC4_MIN_KEY_SIZE 1
#define ARC4_MAX_KEY_SIZE 256
#define ARC4_BLOCK_SIZE 1
#define ARC4_STATE_SIZE 4
#define CCM_AES_IV_SIZE 16
#define GCM_AES_IV_SIZE 12
#define GCM_ESP_IV_SIZE 8
#define CCM_ESP_IV_SIZE 8
#define RFC4543_ICV_SIZE 16
#define MAX_KEY_SIZE ARC4_MAX_KEY_SIZE
#define MAX_IV_SIZE AES_BLOCK_SIZE
#define MAX_DIGEST_SIZE SHA3_512_DIGEST_SIZE
#define MAX_ASSOC_SIZE 512
/* size of salt value for AES-GCM-ESP and AES-CCM-ESP */
#define GCM_ESP_SALT_SIZE 4
#define CCM_ESP_SALT_SIZE 3
#define MAX_SALT_SIZE GCM_ESP_SALT_SIZE
#define GCM_ESP_SALT_OFFSET 0
#define CCM_ESP_SALT_OFFSET 1
#define GCM_ESP_DIGESTSIZE 16
#define MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
/*
* Maximum number of bytes from a non-final hash request that can be deferred
* until more data is available. With new crypto API framework, this
* can be no more than one block of data.
*/
#define HASH_CARRY_MAX MAX_HASH_BLOCK_SIZE
/* Force at least 4-byte alignment of all SPU message fields */
#define SPU_MSG_ALIGN 4
/* Number of times to resend mailbox message if mb queue is full */
#define SPU_MB_RETRY_MAX 1000
/* op_counts[] indexes */
enum op_type {
SPU_OP_CIPHER,
SPU_OP_HASH,
SPU_OP_HMAC,
SPU_OP_AEAD,
SPU_OP_NUM
};
enum spu_spu_type {
SPU_TYPE_SPUM,
SPU_TYPE_SPU2,
};
/*
* SPUM_NS2 and SPUM_NSP are the SPU-M block on Northstar 2 and Northstar Plus,
* respectively.
*/
enum spu_spu_subtype {
SPU_SUBTYPE_SPUM_NS2,
SPU_SUBTYPE_SPUM_NSP,
SPU_SUBTYPE_SPU2_V1,
SPU_SUBTYPE_SPU2_V2
};
struct spu_type_subtype {
enum spu_spu_type type;
enum spu_spu_subtype subtype;
};
struct cipher_op {
enum spu_cipher_alg alg;
enum spu_cipher_mode mode;
};
struct auth_op {
enum hash_alg alg;
enum hash_mode mode;
};
struct iproc_alg_s {
u32 type;
union {
struct crypto_alg crypto;
struct ahash_alg hash;
struct aead_alg aead;
} alg;
struct cipher_op cipher_info;
struct auth_op auth_info;
bool auth_first;
bool registered;
};
/*
* Buffers for a SPU request/reply message pair. All part of one structure to
* allow a single alloc per request.
*/
struct spu_msg_buf {
/* Request message fragments */
/*
* SPU request message header. For SPU-M, holds MH, EMH, SCTX, BDESC,
* and BD header. For SPU2, holds FMD, OMD.
*/
u8 bcm_spu_req_hdr[ALIGN(SPU2_HEADER_ALLOC_LEN, SPU_MSG_ALIGN)];
/* IV or counter. Size to include salt. Also used for XTS tweek. */
u8 iv_ctr[ALIGN(2 * AES_BLOCK_SIZE, SPU_MSG_ALIGN)];
/* Hash digest. request and response. */
u8 digest[ALIGN(MAX_DIGEST_SIZE, SPU_MSG_ALIGN)];
/* SPU request message padding */
u8 spu_req_pad[ALIGN(SPU_PAD_LEN_MAX, SPU_MSG_ALIGN)];
/* SPU-M request message STATUS field */
u8 tx_stat[ALIGN(SPU_TX_STATUS_LEN, SPU_MSG_ALIGN)];
/* Response message fragments */
/* SPU response message header */
u8 spu_resp_hdr[ALIGN(SPU2_HEADER_ALLOC_LEN, SPU_MSG_ALIGN)];
/* SPU response message STATUS field padding */
u8 rx_stat_pad[ALIGN(SPU_STAT_PAD_MAX, SPU_MSG_ALIGN)];
/* SPU response message STATUS field */
u8 rx_stat[ALIGN(SPU_RX_STATUS_LEN, SPU_MSG_ALIGN)];
union {
/* Buffers only used for ablkcipher */
struct {
/*
* Field used for either SUPDT when RC4 is used
* -OR- tweak value when XTS/AES is used
*/
u8 supdt_tweak[ALIGN(SPU_SUPDT_LEN, SPU_MSG_ALIGN)];
} c;
/* Buffers only used for aead */
struct {
/* SPU response pad for GCM data */
u8 gcmpad[ALIGN(AES_BLOCK_SIZE, SPU_MSG_ALIGN)];
/* SPU request msg padding for GCM AAD */
u8 req_aad_pad[ALIGN(SPU_PAD_LEN_MAX, SPU_MSG_ALIGN)];
/* SPU response data to be discarded */
u8 resp_aad[ALIGN(MAX_ASSOC_SIZE + MAX_IV_SIZE,
SPU_MSG_ALIGN)];
} a;
};
};
struct iproc_ctx_s {
u8 enckey[MAX_KEY_SIZE + ARC4_STATE_SIZE];
unsigned int enckeylen;
u8 authkey[MAX_KEY_SIZE + ARC4_STATE_SIZE];
unsigned int authkeylen;
u8 salt[MAX_SALT_SIZE];
unsigned int salt_len;
unsigned int salt_offset;
u8 iv[MAX_IV_SIZE];
unsigned int digestsize;
struct iproc_alg_s *alg;
bool is_esp;
struct cipher_op cipher;
enum spu_cipher_type cipher_type;
struct auth_op auth;
bool auth_first;
/*
* The maximum length in bytes of the payload in a SPU message for this
* context. For SPU-M, the payload is the combination of AAD and data.
* For SPU2, the payload is just data. A value of SPU_MAX_PAYLOAD_INF
* indicates that there is no limit to the length of the SPU message
* payload.
*/
unsigned int max_payload;
struct crypto_aead *fallback_cipher;
/* auth_type is determined during processing of request */
u8 ipad[MAX_HASH_BLOCK_SIZE];
u8 opad[MAX_HASH_BLOCK_SIZE];
/*
* Buffer to hold SPU message header template. Template is created at
* setkey time for ablkcipher requests, since most of the fields in the
* header are known at that time. At request time, just fill in a few
* missing pieces related to length of data in the request and IVs, etc.
*/
u8 bcm_spu_req_hdr[ALIGN(SPU2_HEADER_ALLOC_LEN, SPU_MSG_ALIGN)];
/* Length of SPU request header */
u16 spu_req_hdr_len;
/* Expected length of SPU response header */
u16 spu_resp_hdr_len;
/*
* shash descriptor - needed to perform incremental hashing in
* in software, when hw doesn't support it.
*/
struct shash_desc *shash;
bool is_rfc4543; /* RFC 4543 style of GMAC */
};
/* state from iproc_reqctx_s necessary for hash state export/import */
struct spu_hash_export_s {
unsigned int total_todo;
unsigned int total_sent;
u8 hash_carry[HASH_CARRY_MAX];
unsigned int hash_carry_len;
u8 incr_hash[MAX_DIGEST_SIZE];
bool is_sw_hmac;
};
struct iproc_reqctx_s {
/* general context */
struct crypto_async_request *parent;
/* only valid after enqueue() */
struct iproc_ctx_s *ctx;
u8 chan_idx; /* Mailbox channel to be used to submit this request */
/* total todo, rx'd, and sent for this request */
unsigned int total_todo;
unsigned int total_received; /* only valid for ablkcipher */
unsigned int total_sent;
/*
* num bytes sent to hw from the src sg in this request. This can differ
* from total_sent for incremental hashing. total_sent includes previous
* init() and update() data. src_sent does not.
*/
unsigned int src_sent;
/*
* For AEAD requests, start of associated data. This will typically
* point to the beginning of the src scatterlist from the request,
* since assoc data is at the beginning of the src scatterlist rather
* than in its own sg.
*/
struct scatterlist *assoc;
/*
* scatterlist entry and offset to start of data for next chunk. Crypto
* API src scatterlist for AEAD starts with AAD, if present. For first
* chunk, src_sg is sg entry at beginning of input data (after AAD).
* src_skip begins at the offset in that sg entry where data begins.
*/
struct scatterlist *src_sg;
int src_nents; /* Number of src entries with data */
u32 src_skip; /* bytes of current sg entry already used */
/*
* Same for destination. For AEAD, if there is AAD, output data must
* be written at offset following AAD.
*/
struct scatterlist *dst_sg;
int dst_nents; /* Number of dst entries with data */
u32 dst_skip; /* bytes of current sg entry already written */
/* Mailbox message used to send this request to PDC driver */
struct brcm_message mb_mssg;
bool bd_suppress; /* suppress BD field in SPU response? */
/* cipher context */
bool is_encrypt;
/*
* CBC mode: IV. CTR mode: counter. Else empty. Used as a DMA
* buffer for AEAD requests. So allocate as DMAable memory. If IV
* concatenated with salt, includes the salt.
*/
u8 *iv_ctr;
/* Length of IV or counter, in bytes */
unsigned int iv_ctr_len;
/*
* Hash requests can be of any size, whether initial, update, or final.
* A non-final request must be submitted to the SPU as an integral
* number of blocks. This may leave data at the end of the request
* that is not a full block. Since the request is non-final, it cannot
* be padded. So, we write the remainder to this hash_carry buffer and
* hold it until the next request arrives. The carry data is then
* submitted at the beginning of the data in the next SPU msg.
* hash_carry_len is the number of bytes currently in hash_carry. These
* fields are only used for ahash requests.
*/
u8 hash_carry[HASH_CARRY_MAX];
unsigned int hash_carry_len;
unsigned int is_final; /* is this the final for the hash op? */
/*
* Digest from incremental hash is saved here to include in next hash
* operation. Cannot be stored in req->result for truncated hashes,
* since result may be sized for final digest. Cannot be saved in
* msg_buf because that gets deleted between incremental hash ops
* and is not saved as part of export().
*/
u8 incr_hash[MAX_DIGEST_SIZE];
/* hmac context */
bool is_sw_hmac;
/* aead context */
struct crypto_tfm *old_tfm;
crypto_completion_t old_complete;
void *old_data;
gfp_t gfp;
/* Buffers used to build SPU request and response messages */
struct spu_msg_buf msg_buf;
};
/*
* Structure encapsulates a set of function pointers specific to the type of
* SPU hardware running. These functions handling creation and parsing of
* SPU request messages and SPU response messages. Includes hardware-specific
* values read from device tree.
*/
struct spu_hw {
void (*spu_dump_msg_hdr)(u8 *buf, unsigned int buf_len);
u32 (*spu_ctx_max_payload)(enum spu_cipher_alg cipher_alg,
enum spu_cipher_mode cipher_mode,
unsigned int blocksize);
u32 (*spu_payload_length)(u8 *spu_hdr);
u16 (*spu_response_hdr_len)(u16 auth_key_len, u16 enc_key_len,
bool is_hash);
u16 (*spu_hash_pad_len)(enum hash_alg hash_alg,
enum hash_mode hash_mode, u32 chunksize,
u16 hash_block_size);
u32 (*spu_gcm_ccm_pad_len)(enum spu_cipher_mode cipher_mode,
unsigned int data_size);
u32 (*spu_assoc_resp_len)(enum spu_cipher_mode cipher_mode,
unsigned int assoc_len,
unsigned int iv_len, bool is_encrypt);
u8 (*spu_aead_ivlen)(enum spu_cipher_mode cipher_mode,
u16 iv_len);
enum hash_type (*spu_hash_type)(u32 src_sent);
u32 (*spu_digest_size)(u32 digest_size, enum hash_alg alg,
enum hash_type);
u32 (*spu_create_request)(u8 *spu_hdr,
struct spu_request_opts *req_opts,
struct spu_cipher_parms *cipher_parms,
struct spu_hash_parms *hash_parms,
struct spu_aead_parms *aead_parms,
unsigned int data_size);
u16 (*spu_cipher_req_init)(u8 *spu_hdr,
struct spu_cipher_parms *cipher_parms);
void (*spu_cipher_req_finish)(u8 *spu_hdr,
u16 spu_req_hdr_len,
unsigned int is_inbound,
struct spu_cipher_parms *cipher_parms,
bool update_key,
unsigned int data_size);
void (*spu_request_pad)(u8 *pad_start, u32 gcm_padding,
u32 hash_pad_len, enum hash_alg auth_alg,
enum hash_mode auth_mode,
unsigned int total_sent, u32 status_padding);
u8 (*spu_xts_tweak_in_payload)(void);
u8 (*spu_tx_status_len)(void);
u8 (*spu_rx_status_len)(void);
int (*spu_status_process)(u8 *statp);
void (*spu_ccm_update_iv)(unsigned int digestsize,
struct spu_cipher_parms *cipher_parms,
unsigned int assoclen, unsigned int chunksize,
bool is_encrypt, bool is_esp);
u32 (*spu_wordalign_padlen)(u32 data_size);
/* The base virtual address of the SPU hw registers */
void __iomem *reg_vbase[MAX_SPUS];
/* Version of the SPU hardware */
enum spu_spu_type spu_type;
/* Sub-version of the SPU hardware */
enum spu_spu_subtype spu_subtype;
/* The number of SPUs on this platform */
u32 num_spu;
};
struct device_private {
struct platform_device *pdev[MAX_SPUS];
struct spu_hw spu;
atomic_t session_count; /* number of streams active */
atomic_t stream_count; /* monotonic counter for streamID's */
/* Length of BCM header. Set to 0 when hw does not expect BCM HEADER. */
u8 bcm_hdr_len;
/* The index of the channel to use for the next crypto request */
atomic_t next_chan;
struct dentry *debugfs_dir;
struct dentry *debugfs_stats;
/* Number of request bytes processed and result bytes returned */
atomic64_t bytes_in;
atomic64_t bytes_out;
/* Number of operations of each type */
atomic_t op_counts[SPU_OP_NUM];
atomic_t cipher_cnt[CIPHER_ALG_LAST][CIPHER_MODE_LAST];
atomic_t hash_cnt[HASH_ALG_LAST];
atomic_t hmac_cnt[HASH_ALG_LAST];
atomic_t aead_cnt[AEAD_TYPE_LAST];
/* Number of calls to setkey() for each operation type */
atomic_t setkey_cnt[SPU_OP_NUM];
/* Number of times request was resubmitted because mb was full */
atomic_t mb_no_spc;
/* Number of mailbox send failures */
atomic_t mb_send_fail;
/* Number of ICV check failures for AEAD messages */
atomic_t bad_icv;
struct mbox_client mcl[MAX_SPUS];
/* Array of mailbox channel pointers, one for each channel */
struct mbox_chan *mbox[MAX_SPUS];
/* Driver initialized */
bool inited;
};
extern struct device_private iproc_priv;
#endif

1251
drivers/crypto/bcm/spu.c Normal file

File diff suppressed because it is too large Load Diff

287
drivers/crypto/bcm/spu.h Normal file
View File

@ -0,0 +1,287 @@
/*
* Copyright 2016 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation (the "GPL").
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 (GPLv2) for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 (GPLv2) along with this source code.
*/
/*
* This file contains the definition of SPU messages. There are currently two
* SPU message formats: SPU-M and SPU2. The hardware uses different values to
* identify the same things in SPU-M vs SPU2. So this file defines values that
* are hardware independent. Software can use these values for any version of
* SPU hardware. These values are used in APIs in spu.c. Functions internal to
* spu.c and spu2.c convert these to hardware-specific values.
*/
#ifndef _SPU_H
#define _SPU_H
#include <linux/types.h>
#include <linux/scatterlist.h>
#include <crypto/sha.h>
enum spu_cipher_alg {
CIPHER_ALG_NONE = 0x0,
CIPHER_ALG_RC4 = 0x1,
CIPHER_ALG_DES = 0x2,
CIPHER_ALG_3DES = 0x3,
CIPHER_ALG_AES = 0x4,
CIPHER_ALG_LAST = 0x5
};
enum spu_cipher_mode {
CIPHER_MODE_NONE = 0x0,
CIPHER_MODE_ECB = 0x0,
CIPHER_MODE_CBC = 0x1,
CIPHER_MODE_OFB = 0x2,
CIPHER_MODE_CFB = 0x3,
CIPHER_MODE_CTR = 0x4,
CIPHER_MODE_CCM = 0x5,
CIPHER_MODE_GCM = 0x6,
CIPHER_MODE_XTS = 0x7,
CIPHER_MODE_LAST = 0x8
};
enum spu_cipher_type {
CIPHER_TYPE_NONE = 0x0,
CIPHER_TYPE_DES = 0x0,
CIPHER_TYPE_3DES = 0x0,
CIPHER_TYPE_INIT = 0x0, /* used for ARC4 */
CIPHER_TYPE_AES128 = 0x0,
CIPHER_TYPE_AES192 = 0x1,
CIPHER_TYPE_UPDT = 0x1, /* used for ARC4 */
CIPHER_TYPE_AES256 = 0x2,
};
enum hash_alg {
HASH_ALG_NONE = 0x0,
HASH_ALG_MD5 = 0x1,
HASH_ALG_SHA1 = 0x2,
HASH_ALG_SHA224 = 0x3,
HASH_ALG_SHA256 = 0x4,
HASH_ALG_AES = 0x5,
HASH_ALG_SHA384 = 0x6,
HASH_ALG_SHA512 = 0x7,
/* Keep SHA3 algorithms at the end always */
HASH_ALG_SHA3_224 = 0x8,
HASH_ALG_SHA3_256 = 0x9,
HASH_ALG_SHA3_384 = 0xa,
HASH_ALG_SHA3_512 = 0xb,
HASH_ALG_LAST
};
enum hash_mode {
HASH_MODE_NONE = 0x0,
HASH_MODE_HASH = 0x0,
HASH_MODE_XCBC = 0x0,
HASH_MODE_CMAC = 0x1,
HASH_MODE_CTXT = 0x1,
HASH_MODE_HMAC = 0x2,
HASH_MODE_RABIN = 0x4,
HASH_MODE_FHMAC = 0x6,
HASH_MODE_CCM = 0x5,
HASH_MODE_GCM = 0x6,
};
enum hash_type {
HASH_TYPE_NONE = 0x0,
HASH_TYPE_FULL = 0x0,
HASH_TYPE_INIT = 0x1,
HASH_TYPE_UPDT = 0x2,
HASH_TYPE_FIN = 0x3,
HASH_TYPE_AES128 = 0x0,
HASH_TYPE_AES192 = 0x1,
HASH_TYPE_AES256 = 0x2
};
enum aead_type {
AES_CCM,
AES_GCM,
AUTHENC,
AEAD_TYPE_LAST
};
extern char *hash_alg_name[HASH_ALG_LAST];
extern char *aead_alg_name[AEAD_TYPE_LAST];
struct spu_request_opts {
bool is_inbound;
bool auth_first;
bool is_aead;
bool is_esp;
bool bd_suppress;
bool is_rfc4543;
};
struct spu_cipher_parms {
enum spu_cipher_alg alg;
enum spu_cipher_mode mode;
enum spu_cipher_type type;
u8 *key_buf;
u16 key_len;
/* iv_buf and iv_len include salt, if applicable */
u8 *iv_buf;
u16 iv_len;
};
struct spu_hash_parms {
enum hash_alg alg;
enum hash_mode mode;
enum hash_type type;
u8 digestsize;
u8 *key_buf;
u16 key_len;
u16 prebuf_len;
/* length of hash pad. signed, needs to handle roll-overs */
int pad_len;
};
struct spu_aead_parms {
u32 assoc_size;
u16 iv_len; /* length of IV field between assoc data and data */
u8 aad_pad_len; /* For AES GCM/CCM, length of padding after AAD */
u8 data_pad_len;/* For AES GCM/CCM, length of padding after data */
bool return_iv; /* True if SPU should return an IV */
u32 ret_iv_len; /* Length in bytes of returned IV */
u32 ret_iv_off; /* Offset into full IV if partial IV returned */
};
/************** SPU sizes ***************/
#define SPU_RX_STATUS_LEN 4
/* Max length of padding for 4-byte alignment of STATUS field */
#define SPU_STAT_PAD_MAX 4
/* Max length of pad fragment. 4 is for 4-byte alignment of STATUS field */
#define SPU_PAD_LEN_MAX (SPU_GCM_CCM_ALIGN + MAX_HASH_BLOCK_SIZE + \
SPU_STAT_PAD_MAX)
/* GCM and CCM require 16-byte alignment */
#define SPU_GCM_CCM_ALIGN 16
/* Length up SUPDT field in SPU response message for RC4 */
#define SPU_SUPDT_LEN 260
/* SPU status error codes. These used as common error codes across all
* SPU variants.
*/
#define SPU_INVALID_ICV 1
/* Indicates no limit to the length of the payload in a SPU message */
#define SPU_MAX_PAYLOAD_INF 0xFFFFFFFF
/* Size of XTS tweak ("i" parameter), in bytes */
#define SPU_XTS_TWEAK_SIZE 16
/* CCM B_0 field definitions, common for SPU-M and SPU2 */
#define CCM_B0_ADATA 0x40
#define CCM_B0_ADATA_SHIFT 6
#define CCM_B0_M_PRIME 0x38
#define CCM_B0_M_PRIME_SHIFT 3
#define CCM_B0_L_PRIME 0x07
#define CCM_B0_L_PRIME_SHIFT 0
#define CCM_ESP_L_VALUE 4
/**
* spu_req_incl_icv() - Return true if SPU request message should include the
* ICV as a separate buffer.
* @cipher_mode: the cipher mode being requested
* @is_encrypt: true if encrypting. false if decrypting.
*
* Return: true if ICV to be included as separate buffer
*/
static __always_inline bool spu_req_incl_icv(enum spu_cipher_mode cipher_mode,
bool is_encrypt)
{
if ((cipher_mode == CIPHER_MODE_GCM) && !is_encrypt)
return true;
if ((cipher_mode == CIPHER_MODE_CCM) && !is_encrypt)
return true;
return false;
}
static __always_inline u32 spu_real_db_size(u32 assoc_size,
u32 aead_iv_buf_len,
u32 prebuf_len,
u32 data_size,
u32 aad_pad_len,
u32 gcm_pad_len,
u32 hash_pad_len)
{
return assoc_size + aead_iv_buf_len + prebuf_len + data_size +
aad_pad_len + gcm_pad_len + hash_pad_len;
}
/************** SPU Functions Prototypes **************/
void spum_dump_msg_hdr(u8 *buf, unsigned int buf_len);
u32 spum_ns2_ctx_max_payload(enum spu_cipher_alg cipher_alg,
enum spu_cipher_mode cipher_mode,
unsigned int blocksize);
u32 spum_nsp_ctx_max_payload(enum spu_cipher_alg cipher_alg,
enum spu_cipher_mode cipher_mode,
unsigned int blocksize);
u32 spum_payload_length(u8 *spu_hdr);
u16 spum_response_hdr_len(u16 auth_key_len, u16 enc_key_len, bool is_hash);
u16 spum_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode,
u32 chunksize, u16 hash_block_size);
u32 spum_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode,
unsigned int data_size);
u32 spum_assoc_resp_len(enum spu_cipher_mode cipher_mode,
unsigned int assoc_len, unsigned int iv_len,
bool is_encrypt);
u8 spum_aead_ivlen(enum spu_cipher_mode cipher_mode, u16 iv_len);
bool spu_req_incl_icv(enum spu_cipher_mode cipher_mode, bool is_encrypt);
enum hash_type spum_hash_type(u32 src_sent);
u32 spum_digest_size(u32 alg_digest_size, enum hash_alg alg,
enum hash_type htype);
u32 spum_create_request(u8 *spu_hdr,
struct spu_request_opts *req_opts,
struct spu_cipher_parms *cipher_parms,
struct spu_hash_parms *hash_parms,
struct spu_aead_parms *aead_parms,
unsigned int data_size);
u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms);
void spum_cipher_req_finish(u8 *spu_hdr,
u16 spu_req_hdr_len,
unsigned int is_inbound,
struct spu_cipher_parms *cipher_parms,
bool update_key,
unsigned int data_size);
void spum_request_pad(u8 *pad_start,
u32 gcm_padding,
u32 hash_pad_len,
enum hash_alg auth_alg,
enum hash_mode auth_mode,
unsigned int total_sent, u32 status_padding);
u8 spum_xts_tweak_in_payload(void);
u8 spum_tx_status_len(void);
u8 spum_rx_status_len(void);
int spum_status_process(u8 *statp);
void spum_ccm_update_iv(unsigned int digestsize,
struct spu_cipher_parms *cipher_parms,
unsigned int assoclen,
unsigned int chunksize,
bool is_encrypt,
bool is_esp);
u32 spum_wordalign_padlen(u32 data_size);
#endif

1401
drivers/crypto/bcm/spu2.c Normal file

File diff suppressed because it is too large Load Diff

228
drivers/crypto/bcm/spu2.h Normal file
View File

@ -0,0 +1,228 @@
/*
* Copyright 2016 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation (the "GPL").
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 (GPLv2) for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 (GPLv2) along with this source code.
*/
/*
* This file contains SPU message definitions specific to SPU2.
*/
#ifndef _SPU2_H
#define _SPU2_H
enum spu2_cipher_type {
SPU2_CIPHER_TYPE_NONE = 0x0,
SPU2_CIPHER_TYPE_AES128 = 0x1,
SPU2_CIPHER_TYPE_AES192 = 0x2,
SPU2_CIPHER_TYPE_AES256 = 0x3,
SPU2_CIPHER_TYPE_DES = 0x4,
SPU2_CIPHER_TYPE_3DES = 0x5,
SPU2_CIPHER_TYPE_LAST
};
enum spu2_cipher_mode {
SPU2_CIPHER_MODE_ECB = 0x0,
SPU2_CIPHER_MODE_CBC = 0x1,
SPU2_CIPHER_MODE_CTR = 0x2,
SPU2_CIPHER_MODE_CFB = 0x3,
SPU2_CIPHER_MODE_OFB = 0x4,
SPU2_CIPHER_MODE_XTS = 0x5,
SPU2_CIPHER_MODE_CCM = 0x6,
SPU2_CIPHER_MODE_GCM = 0x7,
SPU2_CIPHER_MODE_LAST
};
enum spu2_hash_type {
SPU2_HASH_TYPE_NONE = 0x0,
SPU2_HASH_TYPE_AES128 = 0x1,
SPU2_HASH_TYPE_AES192 = 0x2,
SPU2_HASH_TYPE_AES256 = 0x3,
SPU2_HASH_TYPE_MD5 = 0x6,
SPU2_HASH_TYPE_SHA1 = 0x7,
SPU2_HASH_TYPE_SHA224 = 0x8,
SPU2_HASH_TYPE_SHA256 = 0x9,
SPU2_HASH_TYPE_SHA384 = 0xa,
SPU2_HASH_TYPE_SHA512 = 0xb,
SPU2_HASH_TYPE_SHA512_224 = 0xc,
SPU2_HASH_TYPE_SHA512_256 = 0xd,
SPU2_HASH_TYPE_SHA3_224 = 0xe,
SPU2_HASH_TYPE_SHA3_256 = 0xf,
SPU2_HASH_TYPE_SHA3_384 = 0x10,
SPU2_HASH_TYPE_SHA3_512 = 0x11,
SPU2_HASH_TYPE_LAST
};
enum spu2_hash_mode {
SPU2_HASH_MODE_CMAC = 0x0,
SPU2_HASH_MODE_CBC_MAC = 0x1,
SPU2_HASH_MODE_XCBC_MAC = 0x2,
SPU2_HASH_MODE_HMAC = 0x3,
SPU2_HASH_MODE_RABIN = 0x4,
SPU2_HASH_MODE_CCM = 0x5,
SPU2_HASH_MODE_GCM = 0x6,
SPU2_HASH_MODE_RESERVED = 0x7,
SPU2_HASH_MODE_LAST
};
enum spu2_ret_md_opts {
SPU2_RET_NO_MD = 0, /* return no metadata */
SPU2_RET_FMD_OMD = 1, /* return both FMD and OMD */
SPU2_RET_FMD_ONLY = 2, /* return only FMD */
SPU2_RET_FMD_OMD_IV = 3, /* return FMD and OMD with just IVs */
};
/* Fixed Metadata format */
struct SPU2_FMD {
u64 ctrl0;
u64 ctrl1;
u64 ctrl2;
u64 ctrl3;
};
#define FMD_SIZE sizeof(struct SPU2_FMD)
/* Fixed part of request message header length in bytes. Just FMD. */
#define SPU2_REQ_FIXED_LEN FMD_SIZE
#define SPU2_HEADER_ALLOC_LEN (SPU_REQ_FIXED_LEN + \
2 * MAX_KEY_SIZE + 2 * MAX_IV_SIZE)
/* FMD ctrl0 field masks */
#define SPU2_CIPH_ENCRYPT_EN 0x1 /* 0: decrypt, 1: encrypt */
#define SPU2_CIPH_TYPE 0xF0 /* one of spu2_cipher_type */
#define SPU2_CIPH_TYPE_SHIFT 4
#define SPU2_CIPH_MODE 0xF00 /* one of spu2_cipher_mode */
#define SPU2_CIPH_MODE_SHIFT 8
#define SPU2_CFB_MASK 0x7000 /* cipher feedback mask */
#define SPU2_CFB_MASK_SHIFT 12
#define SPU2_PROTO_SEL 0xF00000 /* MACsec, IPsec, TLS... */
#define SPU2_PROTO_SEL_SHIFT 20
#define SPU2_HASH_FIRST 0x1000000 /* 1: hash input is input pkt
* data
*/
#define SPU2_CHK_TAG 0x2000000 /* 1: check digest provided */
#define SPU2_HASH_TYPE 0x1F0000000 /* one of spu2_hash_type */
#define SPU2_HASH_TYPE_SHIFT 28
#define SPU2_HASH_MODE 0xF000000000 /* one of spu2_hash_mode */
#define SPU2_HASH_MODE_SHIFT 36
#define SPU2_CIPH_PAD_EN 0x100000000000 /* 1: Add pad to end of payload for
* enc
*/
#define SPU2_CIPH_PAD 0xFF000000000000 /* cipher pad value */
#define SPU2_CIPH_PAD_SHIFT 48
/* FMD ctrl1 field masks */
#define SPU2_TAG_LOC 0x1 /* 1: end of payload, 0: undef */
#define SPU2_HAS_FR_DATA 0x2 /* 1: msg has frame data */
#define SPU2_HAS_AAD1 0x4 /* 1: msg has AAD1 field */
#define SPU2_HAS_NAAD 0x8 /* 1: msg has NAAD field */
#define SPU2_HAS_AAD2 0x10 /* 1: msg has AAD2 field */
#define SPU2_HAS_ESN 0x20 /* 1: msg has ESN field */
#define SPU2_HASH_KEY_LEN 0xFF00 /* len of hash key in bytes.
* HMAC only.
*/
#define SPU2_HASH_KEY_LEN_SHIFT 8
#define SPU2_CIPH_KEY_LEN 0xFF00000 /* len of cipher key in bytes */
#define SPU2_CIPH_KEY_LEN_SHIFT 20
#define SPU2_GENIV 0x10000000 /* 1: hw generates IV */
#define SPU2_HASH_IV 0x20000000 /* 1: IV incl in hash */
#define SPU2_RET_IV 0x40000000 /* 1: return IV in output msg
* b4 payload
*/
#define SPU2_RET_IV_LEN 0xF00000000 /* length in bytes of IV returned.
* 0 = 16 bytes
*/
#define SPU2_RET_IV_LEN_SHIFT 32
#define SPU2_IV_OFFSET 0xF000000000 /* gen IV offset */
#define SPU2_IV_OFFSET_SHIFT 36
#define SPU2_IV_LEN 0x1F0000000000 /* length of input IV in bytes */
#define SPU2_IV_LEN_SHIFT 40
#define SPU2_HASH_TAG_LEN 0x7F000000000000 /* hash tag length in bytes */
#define SPU2_HASH_TAG_LEN_SHIFT 48
#define SPU2_RETURN_MD 0x300000000000000 /* return metadata */
#define SPU2_RETURN_MD_SHIFT 56
#define SPU2_RETURN_FD 0x400000000000000
#define SPU2_RETURN_AAD1 0x800000000000000
#define SPU2_RETURN_NAAD 0x1000000000000000
#define SPU2_RETURN_AAD2 0x2000000000000000
#define SPU2_RETURN_PAY 0x4000000000000000 /* return payload */
/* FMD ctrl2 field masks */
#define SPU2_AAD1_OFFSET 0xFFF /* byte offset of AAD1 field */
#define SPU2_AAD1_LEN 0xFF000 /* length of AAD1 in bytes */
#define SPU2_AAD1_LEN_SHIFT 12
#define SPU2_AAD2_OFFSET 0xFFF00000 /* byte offset of AAD2 field */
#define SPU2_AAD2_OFFSET_SHIFT 20
#define SPU2_PL_OFFSET 0xFFFFFFFF00000000 /* payload offset from AAD2 */
#define SPU2_PL_OFFSET_SHIFT 32
/* FMD ctrl3 field masks */
#define SPU2_PL_LEN 0xFFFFFFFF /* payload length in bytes */
#define SPU2_TLS_LEN 0xFFFF00000000 /* TLS encrypt: cipher len
* TLS decrypt: compressed len
*/
#define SPU2_TLS_LEN_SHIFT 32
/*
* Max value that can be represented in the Payload Length field of the
* ctrl3 word of FMD.
*/
#define SPU2_MAX_PAYLOAD SPU2_PL_LEN
/* Error values returned in STATUS field of response messages */
#define SPU2_INVALID_ICV 1
void spu2_dump_msg_hdr(u8 *buf, unsigned int buf_len);
u32 spu2_ctx_max_payload(enum spu_cipher_alg cipher_alg,
enum spu_cipher_mode cipher_mode,
unsigned int blocksize);
u32 spu2_payload_length(u8 *spu_hdr);
u16 spu2_response_hdr_len(u16 auth_key_len, u16 enc_key_len, bool is_hash);
u16 spu2_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode,
u32 chunksize, u16 hash_block_size);
u32 spu2_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode,
unsigned int data_size);
u32 spu2_assoc_resp_len(enum spu_cipher_mode cipher_mode,
unsigned int assoc_len, unsigned int iv_len,
bool is_encrypt);
u8 spu2_aead_ivlen(enum spu_cipher_mode cipher_mode,
u16 iv_len);
enum hash_type spu2_hash_type(u32 src_sent);
u32 spu2_digest_size(u32 alg_digest_size, enum hash_alg alg,
enum hash_type htype);
u32 spu2_create_request(u8 *spu_hdr,
struct spu_request_opts *req_opts,
struct spu_cipher_parms *cipher_parms,
struct spu_hash_parms *hash_parms,
struct spu_aead_parms *aead_parms,
unsigned int data_size);
u16 spu2_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms);
void spu2_cipher_req_finish(u8 *spu_hdr,
u16 spu_req_hdr_len,
unsigned int is_inbound,
struct spu_cipher_parms *cipher_parms,
bool update_key,
unsigned int data_size);
void spu2_request_pad(u8 *pad_start, u32 gcm_padding, u32 hash_pad_len,
enum hash_alg auth_alg, enum hash_mode auth_mode,
unsigned int total_sent, u32 status_padding);
u8 spu2_xts_tweak_in_payload(void);
u8 spu2_tx_status_len(void);
u8 spu2_rx_status_len(void);
int spu2_status_process(u8 *statp);
void spu2_ccm_update_iv(unsigned int digestsize,
struct spu_cipher_parms *cipher_parms,
unsigned int assoclen, unsigned int chunksize,
bool is_encrypt, bool is_esp);
u32 spu2_wordalign_padlen(u32 data_size);
#endif

174
drivers/crypto/bcm/spum.h Normal file
View File

@ -0,0 +1,174 @@
/*
* Copyright 2016 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation (the "GPL").
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 (GPLv2) for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 (GPLv2) along with this source code.
*/
/*
* This file contains SPU message definitions specific to SPU-M.
*/
#ifndef _SPUM_H_
#define _SPUM_H_
#define SPU_CRYPTO_OPERATION_GENERIC 0x1
/* Length of STATUS field in tx and rx packets */
#define SPU_TX_STATUS_LEN 4
/* SPU-M error codes */
#define SPU_STATUS_MASK 0x0000FF00
#define SPU_STATUS_SUCCESS 0x00000000
#define SPU_STATUS_INVALID_ICV 0x00000100
#define SPU_STATUS_ERROR_FLAG 0x00020000
/* Request message. MH + EMH + BDESC + BD header */
#define SPU_REQ_FIXED_LEN 24
/*
* Max length of a SPU message header. Used to allocate a buffer where
* the SPU message header is constructed. Can be used for either a SPU-M
* header or a SPU2 header.
* For SPU-M, sum of the following:
* MH - 4 bytes
* EMH - 4
* SCTX - 3 +
* max auth key len - 64
* max cipher key len - 264 (RC4)
* max IV len - 16
* BDESC - 12
* BD header - 4
* Total: 371
*
* For SPU2, FMD_SIZE (32) plus lengths of hash and cipher keys,
* hash and cipher IVs. If SPU2 does not support RC4, then
*/
#define SPU_HEADER_ALLOC_LEN (SPU_REQ_FIXED_LEN + MAX_KEY_SIZE + \
MAX_KEY_SIZE + MAX_IV_SIZE)
/*
* Response message header length. Normally MH, EMH, BD header, but when
* BD_SUPPRESS is used for hash requests, there is no BD header.
*/
#define SPU_RESP_HDR_LEN 12
#define SPU_HASH_RESP_HDR_LEN 8
/*
* Max value that can be represented in the Payload Length field of the BD
* header. This is a 16-bit field.
*/
#define SPUM_NS2_MAX_PAYLOAD (BIT(16) - 1)
/*
* NSP SPU is limited to ~9KB because of FA2 FIFO size limitations;
* Set MAX_PAYLOAD to 8k to allow for addition of header, digest, etc.
* and stay within limitation.
*/
#define SPUM_NSP_MAX_PAYLOAD 8192
/* Buffer Descriptor Header [BDESC]. SPU in big-endian mode. */
struct BDESC_HEADER {
u16 offset_mac; /* word 0 [31-16] */
u16 length_mac; /* word 0 [15-0] */
u16 offset_crypto; /* word 1 [31-16] */
u16 length_crypto; /* word 1 [15-0] */
u16 offset_icv; /* word 2 [31-16] */
u16 offset_iv; /* word 2 [15-0] */
};
/* Buffer Data Header [BD]. SPU in big-endian mode. */
struct BD_HEADER {
u16 size;
u16 prev_length;
};
/* Command Context Header. SPU-M in big endian mode. */
struct MHEADER {
u8 flags; /* [31:24] */
u8 op_code; /* [23:16] */
u16 reserved; /* [15:0] */
};
/* MH header flags bits */
#define MH_SUPDT_PRES BIT(0)
#define MH_HASH_PRES BIT(2)
#define MH_BD_PRES BIT(3)
#define MH_MFM_PRES BIT(4)
#define MH_BDESC_PRES BIT(5)
#define MH_SCTX_PRES BIT(7)
/* SCTX word 0 bit offsets and fields masks */
#define SCTX_SIZE 0x000000FF
/* SCTX word 1 bit shifts and field masks */
#define UPDT_OFST 0x000000FF /* offset of SCTX updateable fld */
#define HASH_TYPE 0x00000300 /* hash alg operation type */
#define HASH_TYPE_SHIFT 8
#define HASH_MODE 0x00001C00 /* one of spu2_hash_mode */
#define HASH_MODE_SHIFT 10
#define HASH_ALG 0x0000E000 /* hash algorithm */
#define HASH_ALG_SHIFT 13
#define CIPHER_TYPE 0x00030000 /* encryption operation type */
#define CIPHER_TYPE_SHIFT 16
#define CIPHER_MODE 0x001C0000 /* encryption mode */
#define CIPHER_MODE_SHIFT 18
#define CIPHER_ALG 0x00E00000 /* encryption algo */
#define CIPHER_ALG_SHIFT 21
#define ICV_IS_512 BIT(27)
#define ICV_IS_512_SHIFT 27
#define CIPHER_ORDER BIT(30)
#define CIPHER_ORDER_SHIFT 30
#define CIPHER_INBOUND BIT(31)
#define CIPHER_INBOUND_SHIFT 31
/* SCTX word 2 bit shifts and field masks */
#define EXP_IV_SIZE 0x7
#define IV_OFFSET BIT(3)
#define IV_OFFSET_SHIFT 3
#define GEN_IV BIT(5)
#define GEN_IV_SHIFT 5
#define EXPLICIT_IV BIT(6)
#define EXPLICIT_IV_SHIFT 6
#define SCTX_IV BIT(7)
#define SCTX_IV_SHIFT 7
#define ICV_SIZE 0x0F00
#define ICV_SIZE_SHIFT 8
#define CHECK_ICV BIT(12)
#define CHECK_ICV_SHIFT 12
#define INSERT_ICV BIT(13)
#define INSERT_ICV_SHIFT 13
#define BD_SUPPRESS BIT(19)
#define BD_SUPPRESS_SHIFT 19
/* Generic Mode Security Context Structure [SCTX] */
struct SCTX {
/* word 0: protocol flags */
u32 proto_flags;
/* word 1: cipher flags */
u32 cipher_flags;
/* word 2: Extended cipher flags */
u32 ecf;
};
struct SPUHEADER {
struct MHEADER mh;
u32 emh;
struct SCTX sa;
};
#endif /* _SPUM_H_ */

581
drivers/crypto/bcm/util.c Normal file
View File

@ -0,0 +1,581 @@
/*
* Copyright 2016 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation (the "GPL").
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 (GPLv2) for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 (GPLv2) along with this source code.
*/
#include <linux/debugfs.h>
#include "cipher.h"
#include "util.h"
/* offset of SPU_OFIFO_CTRL register */
#define SPU_OFIFO_CTRL 0x40
#define SPU_FIFO_WATERMARK 0x1FF
/**
* spu_sg_at_offset() - Find the scatterlist entry at a given distance from the
* start of a scatterlist.
* @sg: [in] Start of a scatterlist
* @skip: [in] Distance from the start of the scatterlist, in bytes
* @sge: [out] Scatterlist entry at skip bytes from start
* @sge_offset: [out] Number of bytes from start of sge buffer to get to
* requested distance.
*
* Return: 0 if entry found at requested distance
* < 0 otherwise
*/
int spu_sg_at_offset(struct scatterlist *sg, unsigned int skip,
struct scatterlist **sge, unsigned int *sge_offset)
{
/* byte index from start of sg to the end of the previous entry */
unsigned int index = 0;
/* byte index from start of sg to the end of the current entry */
unsigned int next_index;
next_index = sg->length;
while (next_index <= skip) {
sg = sg_next(sg);
index = next_index;
if (!sg)
return -EINVAL;
next_index += sg->length;
}
*sge_offset = skip - index;
*sge = sg;
return 0;
}
/* Copy len bytes of sg data, starting at offset skip, to a dest buffer */
void sg_copy_part_to_buf(struct scatterlist *src, u8 *dest,
unsigned int len, unsigned int skip)
{
size_t copied;
unsigned int nents = sg_nents(src);
copied = sg_pcopy_to_buffer(src, nents, dest, len, skip);
if (copied != len) {
flow_log("%s copied %u bytes of %u requested. ",
__func__, (u32)copied, len);
flow_log("sg with %u entries and skip %u\n", nents, skip);
}
}
/*
* Copy data into a scatterlist starting at a specified offset in the
* scatterlist. Specifically, copy len bytes of data in the buffer src
* into the scatterlist dest, starting skip bytes into the scatterlist.
*/
void sg_copy_part_from_buf(struct scatterlist *dest, u8 *src,
unsigned int len, unsigned int skip)
{
size_t copied;
unsigned int nents = sg_nents(dest);
copied = sg_pcopy_from_buffer(dest, nents, src, len, skip);
if (copied != len) {
flow_log("%s copied %u bytes of %u requested. ",
__func__, (u32)copied, len);
flow_log("sg with %u entries and skip %u\n", nents, skip);
}
}
/**
* spu_sg_count() - Determine number of elements in scatterlist to provide a
* specified number of bytes.
* @sg_list: scatterlist to examine
* @skip: index of starting point
* @nbytes: consider elements of scatterlist until reaching this number of
* bytes
*
* Return: the number of sg entries contributing to nbytes of data
*/
int spu_sg_count(struct scatterlist *sg_list, unsigned int skip, int nbytes)
{
struct scatterlist *sg;
int sg_nents = 0;
unsigned int offset;
if (!sg_list)
return 0;
if (spu_sg_at_offset(sg_list, skip, &sg, &offset) < 0)
return 0;
while (sg && (nbytes > 0)) {
sg_nents++;
nbytes -= (sg->length - offset);
offset = 0;
sg = sg_next(sg);
}
return sg_nents;
}
/**
* spu_msg_sg_add() - Copy scatterlist entries from one sg to another, up to a
* given length.
* @to_sg: scatterlist to copy to
* @from_sg: scatterlist to copy from
* @from_skip: number of bytes to skip in from_sg. Non-zero when previous
* request included part of the buffer in entry in from_sg.
* Assumes from_skip < from_sg->length.
* @from_nents number of entries in from_sg
* @length number of bytes to copy. may reach this limit before exhausting
* from_sg.
*
* Copies the entries themselves, not the data in the entries. Assumes to_sg has
* enough entries. Does not limit the size of an individual buffer in to_sg.
*
* to_sg, from_sg, skip are all updated to end of copy
*
* Return: Number of bytes copied
*/
u32 spu_msg_sg_add(struct scatterlist **to_sg,
struct scatterlist **from_sg, u32 *from_skip,
u8 from_nents, u32 length)
{
struct scatterlist *sg; /* an entry in from_sg */
struct scatterlist *to = *to_sg;
struct scatterlist *from = *from_sg;
u32 skip = *from_skip;
u32 offset;
int i;
u32 entry_len = 0;
u32 frag_len = 0; /* length of entry added to to_sg */
u32 copied = 0; /* number of bytes copied so far */
if (length == 0)
return 0;
for_each_sg(from, sg, from_nents, i) {
/* number of bytes in this from entry not yet used */
entry_len = sg->length - skip;
frag_len = min(entry_len, length - copied);
offset = sg->offset + skip;
if (frag_len)
sg_set_page(to++, sg_page(sg), frag_len, offset);
copied += frag_len;
if (copied == entry_len) {
/* used up all of from entry */
skip = 0; /* start at beginning of next entry */
}
if (copied == length)
break;
}
*to_sg = to;
*from_sg = sg;
if (frag_len < entry_len)
*from_skip = skip + frag_len;
else
*from_skip = 0;
return copied;
}
void add_to_ctr(u8 *ctr_pos, unsigned int increment)
{
__be64 *high_be = (__be64 *)ctr_pos;
__be64 *low_be = high_be + 1;
u64 orig_low = __be64_to_cpu(*low_be);
u64 new_low = orig_low + (u64)increment;
*low_be = __cpu_to_be64(new_low);
if (new_low < orig_low)
/* there was a carry from the low 8 bytes */
*high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1);
}
struct sdesc {
struct shash_desc shash;
char ctx[];
};
/* do a synchronous decrypt operation */
int do_decrypt(char *alg_name,
void *key_ptr, unsigned int key_len,
void *iv_ptr, void *src_ptr, void *dst_ptr,
unsigned int block_len)
{
struct scatterlist sg_in[1], sg_out[1];
struct crypto_blkcipher *tfm =
crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC);
struct blkcipher_desc desc = {.tfm = tfm, .flags = 0 };
int ret = 0;
void *iv;
int ivsize;
flow_log("%s() name:%s block_len:%u\n", __func__, alg_name, block_len);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
crypto_blkcipher_setkey((void *)tfm, key_ptr, key_len);
sg_init_table(sg_in, 1);
sg_set_buf(sg_in, src_ptr, block_len);
sg_init_table(sg_out, 1);
sg_set_buf(sg_out, dst_ptr, block_len);
iv = crypto_blkcipher_crt(tfm)->iv;
ivsize = crypto_blkcipher_ivsize(tfm);
memcpy(iv, iv_ptr, ivsize);
ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, block_len);
crypto_free_blkcipher(tfm);
if (ret < 0)
pr_err("aes_decrypt failed %d\n", ret);
return ret;
}
/**
* do_shash() - Do a synchronous hash operation in software
* @name: The name of the hash algorithm
* @result: Buffer where digest is to be written
* @data1: First part of data to hash. May be NULL.
* @data1_len: Length of data1, in bytes
* @data2: Second part of data to hash. May be NULL.
* @data2_len: Length of data2, in bytes
* @key: Key (if keyed hash)
* @key_len: Length of key, in bytes (or 0 if non-keyed hash)
*
* Note that the crypto API will not select this driver's own transform because
* this driver only registers asynchronous algos.
*
* Return: 0 if hash successfully stored in result
* < 0 otherwise
*/
int do_shash(unsigned char *name, unsigned char *result,
const u8 *data1, unsigned int data1_len,
const u8 *data2, unsigned int data2_len,
const u8 *key, unsigned int key_len)
{
int rc;
unsigned int size;
struct crypto_shash *hash;
struct sdesc *sdesc;
hash = crypto_alloc_shash(name, 0, 0);
if (IS_ERR(hash)) {
rc = PTR_ERR(hash);
pr_err("%s: Crypto %s allocation error %d", __func__, name, rc);
return rc;
}
size = sizeof(struct shash_desc) + crypto_shash_descsize(hash);
sdesc = kmalloc(size, GFP_KERNEL);
if (!sdesc) {
rc = -ENOMEM;
pr_err("%s: Memory allocation failure", __func__);
goto do_shash_err;
}
sdesc->shash.tfm = hash;
sdesc->shash.flags = 0x0;
if (key_len > 0) {
rc = crypto_shash_setkey(hash, key, key_len);
if (rc) {
pr_err("%s: Could not setkey %s shash", __func__, name);
goto do_shash_err;
}
}
rc = crypto_shash_init(&sdesc->shash);
if (rc) {
pr_err("%s: Could not init %s shash", __func__, name);
goto do_shash_err;
}
rc = crypto_shash_update(&sdesc->shash, data1, data1_len);
if (rc) {
pr_err("%s: Could not update1", __func__);
goto do_shash_err;
}
if (data2 && data2_len) {
rc = crypto_shash_update(&sdesc->shash, data2, data2_len);
if (rc) {
pr_err("%s: Could not update2", __func__);
goto do_shash_err;
}
}
rc = crypto_shash_final(&sdesc->shash, result);
if (rc)
pr_err("%s: Could not genereate %s hash", __func__, name);
do_shash_err:
crypto_free_shash(hash);
kfree(sdesc);
return rc;
}
/* Dump len bytes of a scatterlist starting at skip bytes into the sg */
void __dump_sg(struct scatterlist *sg, unsigned int skip, unsigned int len)
{
u8 dbuf[16];
unsigned int idx = skip;
unsigned int num_out = 0; /* number of bytes dumped so far */
unsigned int count;
if (packet_debug_logging) {
while (num_out < len) {
count = (len - num_out > 16) ? 16 : len - num_out;
sg_copy_part_to_buf(sg, dbuf, count, idx);
num_out += count;
print_hex_dump(KERN_ALERT, " sg: ", DUMP_PREFIX_NONE,
4, 1, dbuf, count, false);
idx += 16;
}
}
if (debug_logging_sleep)
msleep(debug_logging_sleep);
}
/* Returns the name for a given cipher alg/mode */
char *spu_alg_name(enum spu_cipher_alg alg, enum spu_cipher_mode mode)
{
switch (alg) {
case CIPHER_ALG_RC4:
return "rc4";
case CIPHER_ALG_AES:
switch (mode) {
case CIPHER_MODE_CBC:
return "cbc(aes)";
case CIPHER_MODE_ECB:
return "ecb(aes)";
case CIPHER_MODE_OFB:
return "ofb(aes)";
case CIPHER_MODE_CFB:
return "cfb(aes)";
case CIPHER_MODE_CTR:
return "ctr(aes)";
case CIPHER_MODE_XTS:
return "xts(aes)";
case CIPHER_MODE_GCM:
return "gcm(aes)";
default:
return "aes";
}
break;
case CIPHER_ALG_DES:
switch (mode) {
case CIPHER_MODE_CBC:
return "cbc(des)";
case CIPHER_MODE_ECB:
return "ecb(des)";
case CIPHER_MODE_CTR:
return "ctr(des)";
default:
return "des";
}
break;
case CIPHER_ALG_3DES:
switch (mode) {
case CIPHER_MODE_CBC:
return "cbc(des3_ede)";
case CIPHER_MODE_ECB:
return "ecb(des3_ede)";
case CIPHER_MODE_CTR:
return "ctr(des3_ede)";
default:
return "3des";
}
break;
default:
return "other";
}
}
static ssize_t spu_debugfs_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *offp)
{
struct device_private *ipriv;
char *buf;
ssize_t ret, out_offset, out_count;
int i;
u32 fifo_len;
u32 spu_ofifo_ctrl;
u32 alg;
u32 mode;
u32 op_cnt;
out_count = 2048;
buf = kmalloc(out_count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ipriv = filp->private_data;
out_offset = 0;
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"Number of SPUs.........%u\n",
ipriv->spu.num_spu);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"Current sessions.......%u\n",
atomic_read(&ipriv->session_count));
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"Session count..........%u\n",
atomic_read(&ipriv->stream_count));
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"Cipher setkey..........%u\n",
atomic_read(&ipriv->setkey_cnt[SPU_OP_CIPHER]));
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"Cipher Ops.............%u\n",
atomic_read(&ipriv->op_counts[SPU_OP_CIPHER]));
for (alg = 0; alg < CIPHER_ALG_LAST; alg++) {
for (mode = 0; mode < CIPHER_MODE_LAST; mode++) {
op_cnt = atomic_read(&ipriv->cipher_cnt[alg][mode]);
if (op_cnt) {
out_offset += snprintf(buf + out_offset,
out_count - out_offset,
" %-13s%11u\n",
spu_alg_name(alg, mode), op_cnt);
}
}
}
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"Hash Ops...............%u\n",
atomic_read(&ipriv->op_counts[SPU_OP_HASH]));
for (alg = 0; alg < HASH_ALG_LAST; alg++) {
op_cnt = atomic_read(&ipriv->hash_cnt[alg]);
if (op_cnt) {
out_offset += snprintf(buf + out_offset,
out_count - out_offset,
" %-13s%11u\n",
hash_alg_name[alg], op_cnt);
}
}
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"HMAC setkey............%u\n",
atomic_read(&ipriv->setkey_cnt[SPU_OP_HMAC]));
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"HMAC Ops...............%u\n",
atomic_read(&ipriv->op_counts[SPU_OP_HMAC]));
for (alg = 0; alg < HASH_ALG_LAST; alg++) {
op_cnt = atomic_read(&ipriv->hmac_cnt[alg]);
if (op_cnt) {
out_offset += snprintf(buf + out_offset,
out_count - out_offset,
" %-13s%11u\n",
hash_alg_name[alg], op_cnt);
}
}
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"AEAD setkey............%u\n",
atomic_read(&ipriv->setkey_cnt[SPU_OP_AEAD]));
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"AEAD Ops...............%u\n",
atomic_read(&ipriv->op_counts[SPU_OP_AEAD]));
for (alg = 0; alg < AEAD_TYPE_LAST; alg++) {
op_cnt = atomic_read(&ipriv->aead_cnt[alg]);
if (op_cnt) {
out_offset += snprintf(buf + out_offset,
out_count - out_offset,
" %-13s%11u\n",
aead_alg_name[alg], op_cnt);
}
}
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"Bytes of req data......%llu\n",
(u64)atomic64_read(&ipriv->bytes_out));
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"Bytes of resp data.....%llu\n",
(u64)atomic64_read(&ipriv->bytes_in));
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"Mailbox full...........%u\n",
atomic_read(&ipriv->mb_no_spc));
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"Mailbox send failures..%u\n",
atomic_read(&ipriv->mb_send_fail));
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"Check ICV errors.......%u\n",
atomic_read(&ipriv->bad_icv));
if (ipriv->spu.spu_type == SPU_TYPE_SPUM)
for (i = 0; i < ipriv->spu.num_spu; i++) {
spu_ofifo_ctrl = ioread32(ipriv->spu.reg_vbase[i] +
SPU_OFIFO_CTRL);
fifo_len = spu_ofifo_ctrl & SPU_FIFO_WATERMARK;
out_offset += snprintf(buf + out_offset,
out_count - out_offset,
"SPU %d output FIFO high water.....%u\n",
i, fifo_len);
}
if (out_offset > out_count)
out_offset = out_count;
ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
kfree(buf);
return ret;
}
static const struct file_operations spu_debugfs_stats = {
.owner = THIS_MODULE,
.open = simple_open,
.read = spu_debugfs_read,
};
/*
* Create the debug FS directories. If the top-level directory has not yet
* been created, create it now. Create a stats file in this directory for
* a SPU.
*/
void spu_setup_debugfs(void)
{
if (!debugfs_initialized())
return;
if (!iproc_priv.debugfs_dir)
iproc_priv.debugfs_dir = debugfs_create_dir(KBUILD_MODNAME,
NULL);
if (!iproc_priv.debugfs_stats)
/* Create file with permissions S_IRUSR */
debugfs_create_file("stats", 0400, iproc_priv.debugfs_dir,
&iproc_priv, &spu_debugfs_stats);
}
void spu_free_debugfs(void)
{
debugfs_remove_recursive(iproc_priv.debugfs_dir);
iproc_priv.debugfs_dir = NULL;
}
/**
* format_value_ccm() - Format a value into a buffer, using a specified number
* of bytes (i.e. maybe writing value X into a 4 byte
* buffer, or maybe into a 12 byte buffer), as per the
* SPU CCM spec.
*
* @val: value to write (up to max of unsigned int)
* @buf: (pointer to) buffer to write the value
* @len: number of bytes to use (0 to 255)
*
*/
void format_value_ccm(unsigned int val, u8 *buf, u8 len)
{
int i;
/* First clear full output buffer */
memset(buf, 0, len);
/* Then, starting from right side, fill in with data */
for (i = 0; i < len; i++) {
buf[len - i - 1] = (val >> (8 * i)) & 0xff;
if (i >= 3)
break; /* Only handle up to 32 bits of 'val' */
}
}

116
drivers/crypto/bcm/util.h Normal file
View File

@ -0,0 +1,116 @@
/*
* Copyright 2016 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation (the "GPL").
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 (GPLv2) for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 (GPLv2) along with this source code.
*/
#ifndef _UTIL_H
#define _UTIL_H
#include <linux/kernel.h>
#include <linux/delay.h>
#include "spu.h"
extern int flow_debug_logging;
extern int packet_debug_logging;
extern int debug_logging_sleep;
#ifdef DEBUG
#define flow_log(...) \
do { \
if (flow_debug_logging) { \
printk(__VA_ARGS__); \
if (debug_logging_sleep) \
msleep(debug_logging_sleep); \
} \
} while (0)
#define flow_dump(msg, var, var_len) \
do { \
if (flow_debug_logging) { \
print_hex_dump(KERN_ALERT, msg, DUMP_PREFIX_NONE, \
16, 1, var, var_len, false); \
if (debug_logging_sleep) \
msleep(debug_logging_sleep); \
} \
} while (0)
#define packet_log(...) \
do { \
if (packet_debug_logging) { \
printk(__VA_ARGS__); \
if (debug_logging_sleep) \
msleep(debug_logging_sleep); \
} \
} while (0)
#define packet_dump(msg, var, var_len) \
do { \
if (packet_debug_logging) { \
print_hex_dump(KERN_ALERT, msg, DUMP_PREFIX_NONE, \
16, 1, var, var_len, false); \
if (debug_logging_sleep) \
msleep(debug_logging_sleep); \
} \
} while (0)
void __dump_sg(struct scatterlist *sg, unsigned int skip, unsigned int len);
#define dump_sg(sg, skip, len) __dump_sg(sg, skip, len)
#else /* !DEBUG_ON */
#define flow_log(...) do {} while (0)
#define flow_dump(msg, var, var_len) do {} while (0)
#define packet_log(...) do {} while (0)
#define packet_dump(msg, var, var_len) do {} while (0)
#define dump_sg(sg, skip, len) do {} while (0)
#endif /* DEBUG_ON */
int spu_sg_at_offset(struct scatterlist *sg, unsigned int skip,
struct scatterlist **sge, unsigned int *sge_offset);
/* Copy sg data, from skip, length len, to dest */
void sg_copy_part_to_buf(struct scatterlist *src, u8 *dest,
unsigned int len, unsigned int skip);
/* Copy src into scatterlist from offset, length len */
void sg_copy_part_from_buf(struct scatterlist *dest, u8 *src,
unsigned int len, unsigned int skip);
int spu_sg_count(struct scatterlist *sg_list, unsigned int skip, int nbytes);
u32 spu_msg_sg_add(struct scatterlist **to_sg,
struct scatterlist **from_sg, u32 *skip,
u8 from_nents, u32 tot_len);
void add_to_ctr(u8 *ctr_pos, unsigned int increment);
/* do a synchronous decrypt operation */
int do_decrypt(char *alg_name,
void *key_ptr, unsigned int key_len,
void *iv_ptr, void *src_ptr, void *dst_ptr,
unsigned int block_len);
/* produce a message digest from data of length n bytes */
int do_shash(unsigned char *name, unsigned char *result,
const u8 *data1, unsigned int data1_len,
const u8 *data2, unsigned int data2_len,
const u8 *key, unsigned int key_len);
char *spu_alg_name(enum spu_cipher_alg alg, enum spu_cipher_mode mode);
void spu_setup_debugfs(void);
void spu_free_debugfs(void);
void format_value_ccm(unsigned int val, u8 *buf, u8 len);
#endif