staging: ccree: Strip trailing whitespace

Fix the 994 trailing whitespace checkpatch errors out of 1571
checkpatch issues in the ccree driver

Signed-off-by: Timothée Isnard <timotheecisnard@gmail.com>
Acked-by: Gilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Timothée Isnard 2017-05-03 22:03:13 +02:00 committed by Greg Kroah-Hartman
parent 2ea659a9ef
commit c8f1786531
47 changed files with 993 additions and 993 deletions

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -163,7 +163,7 @@ enum drv_hash_mode {
DRV_HASH_SHA512 = 3,
DRV_HASH_SHA384 = 4,
DRV_HASH_MD5 = 5,
DRV_HASH_CBC_MAC = 6,
DRV_HASH_CBC_MAC = 6,
DRV_HASH_XCBC_MAC = 7,
DRV_HASH_CMAC = 8,
DRV_HASH_MODE_NUM = 9,

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -124,12 +124,12 @@ typedef enum SetupOp {
SETUP_LOAD_STATE2 = 3,
SETUP_LOAD_KEY0 = 4,
SETUP_LOAD_XEX_KEY = 5,
SETUP_WRITE_STATE0 = 8,
SETUP_WRITE_STATE0 = 8,
SETUP_WRITE_STATE1 = 9,
SETUP_WRITE_STATE2 = 10,
SETUP_WRITE_STATE3 = 11,
setupOp_OPTIONTS,
setupOp_END = INT32_MAX,
setupOp_END = INT32_MAX,
}SetupOp_t;
enum AesMacSelector {
@ -196,7 +196,7 @@ void descriptor_log(HwDesc_s *desc);
#if defined(HW_DESCRIPTOR_LOG) || defined(HW_DESC_DUMP_HOST_BUF)
#define LOG_HW_DESC(pDesc) descriptor_log(pDesc)
#else
#define LOG_HW_DESC(pDesc)
#define LOG_HW_DESC(pDesc)
#endif
#if (CC_PAL_MAX_LOG_LEVEL >= CC_PAL_LOG_LEVEL_TRACE) || defined(OEMFW_LOG)
@ -204,8 +204,8 @@ void descriptor_log(HwDesc_s *desc);
#ifdef UART_PRINTF
#define CREATE_DETAILED_DUMP(pDesc) createDetailedDump(pDesc)
#else
#define CREATE_DETAILED_DUMP(pDesc)
#endif
#define CREATE_DETAILED_DUMP(pDesc)
#endif
#define HW_DESC_DUMP(pDesc) do { \
CC_PAL_LOG_TRACE("\n---------------------------------------------------\n"); \
@ -226,7 +226,7 @@ void descriptor_log(HwDesc_s *desc);
/*!
* This macro indicates the end of current HW descriptors flow and release the HW engines.
*
*
* \param pDesc pointer HW descriptor struct
*/
#define HW_DESC_SET_QUEUE_LAST_IND(pDesc) \
@ -236,8 +236,8 @@ void descriptor_log(HwDesc_s *desc);
/*!
* This macro signs the end of HW descriptors flow by asking for completion ack, and release the HW engines
*
* \param pDesc pointer HW descriptor struct
*
* \param pDesc pointer HW descriptor struct
*/
#define HW_DESC_SET_ACK_LAST(pDesc) \
do { \
@ -250,11 +250,11 @@ void descriptor_log(HwDesc_s *desc);
/*!
* This macro sets the DIN field of a HW descriptors
*
* \param pDesc pointer HW descriptor struct
*
* \param pDesc pointer HW descriptor struct
* \param dmaMode The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
* \param dinAdr DIN address
* \param dinSize Data size in bytes
* \param dinSize Data size in bytes
* \param axiNs AXI secure bit
*/
#define HW_DESC_SET_DIN_TYPE(pDesc, dmaMode, dinAdr, dinSize, axiNs) \
@ -268,12 +268,12 @@ void descriptor_log(HwDesc_s *desc);
/*!
* This macro sets the DIN field of a HW descriptors to NO DMA mode. Used for NOP descriptor, register patches and
* other special modes
*
* This macro sets the DIN field of a HW descriptors to NO DMA mode. Used for NOP descriptor, register patches and
* other special modes
*
* \param pDesc pointer HW descriptor struct
* \param dinAdr DIN address
* \param dinSize Data size in bytes
* \param dinSize Data size in bytes
*/
#define HW_DESC_SET_DIN_NO_DMA(pDesc, dinAdr, dinSize) \
do { \
@ -282,13 +282,13 @@ void descriptor_log(HwDesc_s *desc);
} while (0)
/*!
* This macro sets the DIN field of a HW descriptors to SRAM mode.
* Note: No need to check SRAM alignment since host requests do not use SRAM and
* adaptor will enforce alignment check.
*
* This macro sets the DIN field of a HW descriptors to SRAM mode.
* Note: No need to check SRAM alignment since host requests do not use SRAM and
* adaptor will enforce alignment check.
*
* \param pDesc pointer HW descriptor struct
* \param dinAdr DIN address
* \param dinSize Data size in bytes
* \param dinSize Data size in bytes
*/
#define HW_DESC_SET_DIN_SRAM(pDesc, dinAdr, dinSize) \
do { \
@ -297,11 +297,11 @@ void descriptor_log(HwDesc_s *desc);
CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_SIZE, (pDesc)->word[1], (dinSize)); \
} while (0)
/*! This macro sets the DIN field of a HW descriptors to CONST mode
*
/*! This macro sets the DIN field of a HW descriptors to CONST mode
*
* \param pDesc pointer HW descriptor struct
* \param val DIN const value
* \param dinSize Data size in bytes
* \param dinSize Data size in bytes
*/
#define HW_DESC_SET_DIN_CONST(pDesc, val, dinSize) \
do { \
@ -313,7 +313,7 @@ void descriptor_log(HwDesc_s *desc);
/*!
* This macro sets the DIN not last input data indicator
*
*
* \param pDesc pointer HW descriptor struct
*/
#define HW_DESC_SET_DIN_NOT_LAST_INDICATION(pDesc) \
@ -322,12 +322,12 @@ void descriptor_log(HwDesc_s *desc);
} while (0)
/*!
* This macro sets the DOUT field of a HW descriptors
*
* \param pDesc pointer HW descriptor struct
* This macro sets the DOUT field of a HW descriptors
*
* \param pDesc pointer HW descriptor struct
* \param dmaMode The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
* \param doutAdr DOUT address
* \param doutSize Data size in bytes
* \param doutSize Data size in bytes
* \param axiNs AXI secure bit
*/
#define HW_DESC_SET_DOUT_TYPE(pDesc, dmaMode, doutAdr, doutSize, axiNs) \
@ -340,14 +340,14 @@ void descriptor_log(HwDesc_s *desc);
} while (0)
/*!
* This macro sets the DOUT field of a HW descriptors to DLLI type
* The LAST INDICATION is provided by the user
*
* \param pDesc pointer HW descriptor struct
* This macro sets the DOUT field of a HW descriptors to DLLI type
* The LAST INDICATION is provided by the user
*
* \param pDesc pointer HW descriptor struct
* \param doutAdr DOUT address
* \param doutSize Data size in bytes
* \param doutSize Data size in bytes
* \param lastInd The last indication bit
* \param axiNs AXI secure bit
* \param axiNs AXI secure bit
*/
#define HW_DESC_SET_DOUT_DLLI(pDesc, doutAdr, doutSize, axiNs ,lastInd) \
do { \
@ -360,14 +360,14 @@ void descriptor_log(HwDesc_s *desc);
} while (0)
/*!
* This macro sets the DOUT field of a HW descriptors to DLLI type
* The LAST INDICATION is provided by the user
*
* \param pDesc pointer HW descriptor struct
* This macro sets the DOUT field of a HW descriptors to DLLI type
* The LAST INDICATION is provided by the user
*
* \param pDesc pointer HW descriptor struct
* \param doutAdr DOUT address
* \param doutSize Data size in bytes
* \param doutSize Data size in bytes
* \param lastInd The last indication bit
* \param axiNs AXI secure bit
* \param axiNs AXI secure bit
*/
#define HW_DESC_SET_DOUT_MLLI(pDesc, doutAdr, doutSize, axiNs ,lastInd) \
do { \
@ -380,12 +380,12 @@ void descriptor_log(HwDesc_s *desc);
} while (0)
/*!
* This macro sets the DOUT field of a HW descriptors to NO DMA mode. Used for NOP descriptor, register patches and
* other special modes
*
* This macro sets the DOUT field of a HW descriptors to NO DMA mode. Used for NOP descriptor, register patches and
* other special modes
*
* \param pDesc pointer HW descriptor struct
* \param doutAdr DOUT address
* \param doutSize Data size in bytes
* \param doutSize Data size in bytes
* \param registerWriteEnable Enables a write operation to a register
*/
#define HW_DESC_SET_DOUT_NO_DMA(pDesc, doutAdr, doutSize, registerWriteEnable) \
@ -396,8 +396,8 @@ void descriptor_log(HwDesc_s *desc);
} while (0)
/*!
* This macro sets the word for the XOR operation.
*
* This macro sets the word for the XOR operation.
*
* \param pDesc pointer HW descriptor struct
* \param xorVal xor data value
*/
@ -408,7 +408,7 @@ void descriptor_log(HwDesc_s *desc);
/*!
* This macro sets the XOR indicator bit in the descriptor
*
*
* \param pDesc pointer HW descriptor struct
*/
#define HW_DESC_SET_XOR_ACTIVE(pDesc) \
@ -418,7 +418,7 @@ void descriptor_log(HwDesc_s *desc);
/*!
* This macro selects the AES engine instead of HASH engine when setting up combined mode with AES XCBC MAC
*
*
* \param pDesc pointer HW descriptor struct
*/
#define HW_DESC_SET_AES_NOT_HASH_MODE(pDesc) \
@ -428,12 +428,12 @@ void descriptor_log(HwDesc_s *desc);
/*!
* This macro sets the DOUT field of a HW descriptors to SRAM mode
* Note: No need to check SRAM alignment since host requests do not use SRAM and
* adaptor will enforce alignment check.
*
* Note: No need to check SRAM alignment since host requests do not use SRAM and
* adaptor will enforce alignment check.
*
* \param pDesc pointer HW descriptor struct
* \param doutAdr DOUT address
* \param doutSize Data size in bytes
* \param doutSize Data size in bytes
*/
#define HW_DESC_SET_DOUT_SRAM(pDesc, doutAdr, doutSize) \
do { \
@ -445,7 +445,7 @@ void descriptor_log(HwDesc_s *desc);
/*!
* This macro sets the data unit size for XEX mode in data_out_addr[15:0]
*
*
* \param pDesc pointer HW descriptor struct
* \param dataUnitSize data unit size for XEX mode
*/
@ -588,9 +588,9 @@ void descriptor_log(HwDesc_s *desc);
} while (0)
/*!
* This macro sets the DIN field of a HW descriptors to star/stop monitor descriptor.
* This macro sets the DIN field of a HW descriptors to star/stop monitor descriptor.
* Used for performance measurements and debug purposes.
*
*
* \param pDesc pointer HW descriptor struct
*/
#define HW_DESC_SET_DIN_MONITOR_CNTR(pDesc) \

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -21,8 +21,8 @@
#include "cc_pal_log_plat.h"
/*!
@file
@brief This file contains the PAL layer log definitions, by default the log is disabled.
@file
@brief This file contains the PAL layer log definitions, by default the log is disabled.
@defgroup cc_pal_log CryptoCell PAL logging APIs and definitions
@{
@ingroup cc_pal
@ -181,7 +181,7 @@ static inline void CC_PalLogMaskSet(uint32_t setMask) {CC_UNUSED_PARAM(setMask);
/*! Log debug data.*/
#define CC_PAL_LOG_DATA( ...) do {} while (0)
#endif
/**
/**
@}
*/

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -17,15 +17,15 @@
#ifndef CC_PAL_TYPES_H
#define CC_PAL_TYPES_H
/*!
@file
@brief This file contains platform-dependent definitions and types.
/*!
@file
@brief This file contains platform-dependent definitions and types.
@defgroup cc_pal_types CryptoCell PAL platform dependant types
@{
@ingroup cc_pal
*/
#include "cc_pal_types_plat.h"
/*! Boolean definition.*/
@ -69,29 +69,29 @@ typedef enum {
#define CC_MIN( a , b ) ( ( (a) < (b) ) ? (a) : (b) )
#endif
#ifdef max
/*! Definition for maximum. */
#ifdef max
/*! Definition for maximum. */
#define CC_MAX(a,b) max( a , b )
#else
/*! Definition for maximum. */
/*! Definition for maximum. */
#define CC_MAX( a , b ) ( ( (a) > (b) ) ? (a) : (b) )
#endif
/*! Macro that calculates number of full bytes from bits (i.e. 7 bits are 1 byte). */
#define CALC_FULL_BYTES(numBits) ((numBits)/CC_BITS_IN_BYTE + (((numBits) & (CC_BITS_IN_BYTE-1)) > 0))
/*! Macro that calculates number of full 32bits words from bits (i.e. 31 bits are 1 word). */
#define CALC_FULL_32BIT_WORDS(numBits) ((numBits)/CC_BITS_IN_32BIT_WORD + (((numBits) & (CC_BITS_IN_32BIT_WORD-1)) > 0))
/*! Macro that calculates number of full 32bits words from bytes (i.e. 3 bytes are 1 word). */
#define CALC_32BIT_WORDS_FROM_BYTES(sizeBytes) ((sizeBytes)/CC_32BIT_WORD_SIZE + (((sizeBytes) & (CC_32BIT_WORD_SIZE-1)) > 0))
/*! Macro that round up bits to 32bits words. */
/*! Macro that calculates number of full bytes from bits (i.e. 7 bits are 1 byte). */
#define CALC_FULL_BYTES(numBits) ((numBits)/CC_BITS_IN_BYTE + (((numBits) & (CC_BITS_IN_BYTE-1)) > 0))
/*! Macro that calculates number of full 32bits words from bits (i.e. 31 bits are 1 word). */
#define CALC_FULL_32BIT_WORDS(numBits) ((numBits)/CC_BITS_IN_32BIT_WORD + (((numBits) & (CC_BITS_IN_32BIT_WORD-1)) > 0))
/*! Macro that calculates number of full 32bits words from bytes (i.e. 3 bytes are 1 word). */
#define CALC_32BIT_WORDS_FROM_BYTES(sizeBytes) ((sizeBytes)/CC_32BIT_WORD_SIZE + (((sizeBytes) & (CC_32BIT_WORD_SIZE-1)) > 0))
/*! Macro that round up bits to 32bits words. */
#define ROUNDUP_BITS_TO_32BIT_WORD(numBits) (CALC_FULL_32BIT_WORDS(numBits) * CC_BITS_IN_32BIT_WORD)
/*! Macro that round up bits to bytes. */
/*! Macro that round up bits to bytes. */
#define ROUNDUP_BITS_TO_BYTES(numBits) (CALC_FULL_BYTES(numBits) * CC_BITS_IN_BYTE)
/*! Macro that round up bytes to 32bits words. */
#define ROUNDUP_BYTES_TO_32BIT_WORD(sizeBytes) (CALC_32BIT_WORDS_FROM_BYTES(sizeBytes) * CC_32BIT_WORD_SIZE)
/*! Macro that round up bytes to 32bits words. */
#define ROUNDUP_BYTES_TO_32BIT_WORD(sizeBytes) (CALC_32BIT_WORDS_FROM_BYTES(sizeBytes) * CC_32BIT_WORD_SIZE)
/**
/**
@}
*/
#endif

View File

@ -1,20 +1,20 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef SSI_PAL_TYPES_PLAT_H
#define SSI_PAL_TYPES_PLAT_H
/* Linux kernel types */

View File

@ -1,22 +1,22 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*!
* @file
* @file
* @brief This file contains macro definitions for accessing ARM TrustZone CryptoCell register space.
*/
@ -66,7 +66,7 @@ do { \
BITFIELD_GET(reg_val, CC_ ## reg_name ## _ ## fld_name ## _BIT_SHIFT, \
CC_ ## reg_name ## _ ## fld_name ## _BIT_SIZE))
/* yael TBD !!! - *
/* yael TBD !!! - *
* all HW includes should start with CC_ and not DX_ !! */

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -20,31 +20,31 @@
// --------------------------------------
// BLOCK: DSCRPTR
// --------------------------------------
#define DX_DSCRPTR_COMPLETION_COUNTER_REG_OFFSET 0xE00UL
#define DX_DSCRPTR_COMPLETION_COUNTER_REG_OFFSET 0xE00UL
#define DX_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SHIFT 0x0UL
#define DX_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SIZE 0x6UL
#define DX_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SHIFT 0x6UL
#define DX_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SIZE 0x1UL
#define DX_DSCRPTR_SW_RESET_REG_OFFSET 0xE40UL
#define DX_DSCRPTR_SW_RESET_REG_OFFSET 0xE40UL
#define DX_DSCRPTR_SW_RESET_VALUE_BIT_SHIFT 0x0UL
#define DX_DSCRPTR_SW_RESET_VALUE_BIT_SIZE 0x1UL
#define DX_DSCRPTR_QUEUE_SRAM_SIZE_REG_OFFSET 0xE60UL
#define DX_DSCRPTR_QUEUE_SRAM_SIZE_REG_OFFSET 0xE60UL
#define DX_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SHIFT 0x0UL
#define DX_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SIZE 0xAUL
#define DX_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SHIFT 0xAUL
#define DX_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SIZE 0xCUL
#define DX_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SHIFT 0x16UL
#define DX_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SIZE 0x3UL
#define DX_DSCRPTR_SINGLE_ADDR_EN_REG_OFFSET 0xE64UL
#define DX_DSCRPTR_SINGLE_ADDR_EN_REG_OFFSET 0xE64UL
#define DX_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SHIFT 0x0UL
#define DX_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SIZE 0x1UL
#define DX_DSCRPTR_MEASURE_CNTR_REG_OFFSET 0xE68UL
#define DX_DSCRPTR_MEASURE_CNTR_REG_OFFSET 0xE68UL
#define DX_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SHIFT 0x0UL
#define DX_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SIZE 0x20UL
#define DX_DSCRPTR_QUEUE_WORD0_REG_OFFSET 0xE80UL
#define DX_DSCRPTR_QUEUE_WORD0_REG_OFFSET 0xE80UL
#define DX_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SHIFT 0x0UL
#define DX_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SIZE 0x20UL
#define DX_DSCRPTR_QUEUE_WORD1_REG_OFFSET 0xE84UL
#define DX_DSCRPTR_QUEUE_WORD1_REG_OFFSET 0xE84UL
#define DX_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SHIFT 0x0UL
#define DX_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SIZE 0x2UL
#define DX_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SHIFT 0x2UL
@ -59,10 +59,10 @@
#define DX_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SIZE 0x1UL
#define DX_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SHIFT 0x1EUL
#define DX_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SIZE 0x2UL
#define DX_DSCRPTR_QUEUE_WORD2_REG_OFFSET 0xE88UL
#define DX_DSCRPTR_QUEUE_WORD2_REG_OFFSET 0xE88UL
#define DX_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SHIFT 0x0UL
#define DX_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SIZE 0x20UL
#define DX_DSCRPTR_QUEUE_WORD3_REG_OFFSET 0xE8CUL
#define DX_DSCRPTR_QUEUE_WORD3_REG_OFFSET 0xE8CUL
#define DX_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SHIFT 0x0UL
#define DX_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SIZE 0x2UL
#define DX_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SHIFT 0x2UL
@ -77,7 +77,7 @@
#define DX_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SIZE 0x1UL
#define DX_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SHIFT 0x1FUL
#define DX_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SIZE 0x1UL
#define DX_DSCRPTR_QUEUE_WORD4_REG_OFFSET 0xE90UL
#define DX_DSCRPTR_QUEUE_WORD4_REG_OFFSET 0xE90UL
#define DX_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SHIFT 0x0UL
#define DX_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SIZE 0x6UL
#define DX_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SHIFT 0x6UL
@ -110,30 +110,30 @@
#define DX_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SIZE 0x1UL
#define DX_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SHIFT 0x1FUL
#define DX_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SIZE 0x1UL
#define DX_DSCRPTR_QUEUE_WORD5_REG_OFFSET 0xE94UL
#define DX_DSCRPTR_QUEUE_WORD5_REG_OFFSET 0xE94UL
#define DX_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SHIFT 0x0UL
#define DX_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SIZE 0x10UL
#define DX_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SHIFT 0x10UL
#define DX_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SIZE 0x10UL
#define DX_DSCRPTR_QUEUE_WATERMARK_REG_OFFSET 0xE98UL
#define DX_DSCRPTR_QUEUE_WATERMARK_REG_OFFSET 0xE98UL
#define DX_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SHIFT 0x0UL
#define DX_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SIZE 0xAUL
#define DX_DSCRPTR_QUEUE_CONTENT_REG_OFFSET 0xE9CUL
#define DX_DSCRPTR_QUEUE_CONTENT_REG_OFFSET 0xE9CUL
#define DX_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SHIFT 0x0UL
#define DX_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SIZE 0xAUL
// --------------------------------------
// BLOCK: AXI_P
// --------------------------------------
#define DX_AXIM_MON_INFLIGHT_REG_OFFSET 0xB00UL
#define DX_AXIM_MON_INFLIGHT_REG_OFFSET 0xB00UL
#define DX_AXIM_MON_INFLIGHT_VALUE_BIT_SHIFT 0x0UL
#define DX_AXIM_MON_INFLIGHT_VALUE_BIT_SIZE 0x8UL
#define DX_AXIM_MON_INFLIGHTLAST_REG_OFFSET 0xB40UL
#define DX_AXIM_MON_INFLIGHTLAST_REG_OFFSET 0xB40UL
#define DX_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SHIFT 0x0UL
#define DX_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SIZE 0x8UL
#define DX_AXIM_MON_COMP_REG_OFFSET 0xB80UL
#define DX_AXIM_MON_COMP_REG_OFFSET 0xB80UL
#define DX_AXIM_MON_COMP_VALUE_BIT_SHIFT 0x0UL
#define DX_AXIM_MON_COMP_VALUE_BIT_SIZE 0x10UL
#define DX_AXIM_MON_ERR_REG_OFFSET 0xBC4UL
#define DX_AXIM_MON_ERR_REG_OFFSET 0xBC4UL
#define DX_AXIM_MON_ERR_BRESP_BIT_SHIFT 0x0UL
#define DX_AXIM_MON_ERR_BRESP_BIT_SIZE 0x2UL
#define DX_AXIM_MON_ERR_BID_BIT_SHIFT 0x2UL
@ -142,7 +142,7 @@
#define DX_AXIM_MON_ERR_RRESP_BIT_SIZE 0x2UL
#define DX_AXIM_MON_ERR_RID_BIT_SHIFT 0x12UL
#define DX_AXIM_MON_ERR_RID_BIT_SIZE 0x4UL
#define DX_AXIM_CFG_REG_OFFSET 0xBE8UL
#define DX_AXIM_CFG_REG_OFFSET 0xBE8UL
#define DX_AXIM_CFG_BRESPMASK_BIT_SHIFT 0x4UL
#define DX_AXIM_CFG_BRESPMASK_BIT_SIZE 0x1UL
#define DX_AXIM_CFG_RRESPMASK_BIT_SHIFT 0x5UL
@ -151,7 +151,7 @@
#define DX_AXIM_CFG_INFLTMASK_BIT_SIZE 0x1UL
#define DX_AXIM_CFG_COMPMASK_BIT_SHIFT 0x7UL
#define DX_AXIM_CFG_COMPMASK_BIT_SIZE 0x1UL
#define DX_AXIM_ACE_CONST_REG_OFFSET 0xBECUL
#define DX_AXIM_ACE_CONST_REG_OFFSET 0xBECUL
#define DX_AXIM_ACE_CONST_ARDOMAIN_BIT_SHIFT 0x0UL
#define DX_AXIM_ACE_CONST_ARDOMAIN_BIT_SIZE 0x2UL
#define DX_AXIM_ACE_CONST_AWDOMAIN_BIT_SHIFT 0x2UL
@ -170,7 +170,7 @@
#define DX_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SIZE 0x7UL
#define DX_AXIM_ACE_CONST_AWLEN_VAL_BIT_SHIFT 0x19UL
#define DX_AXIM_ACE_CONST_AWLEN_VAL_BIT_SIZE 0x4UL
#define DX_AXIM_CACHE_PARAMS_REG_OFFSET 0xBF0UL
#define DX_AXIM_CACHE_PARAMS_REG_OFFSET 0xBF0UL
#define DX_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SHIFT 0x0UL
#define DX_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SIZE 0x4UL
#define DX_AXIM_CACHE_PARAMS_AWCACHE_BIT_SHIFT 0x4UL

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -20,49 +20,49 @@
// --------------------------------------
// BLOCK: FPGA_ENV_REGS
// --------------------------------------
#define DX_ENV_PKA_DEBUG_MODE_REG_OFFSET 0x024UL
#define DX_ENV_PKA_DEBUG_MODE_REG_OFFSET 0x024UL
#define DX_ENV_PKA_DEBUG_MODE_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_PKA_DEBUG_MODE_VALUE_BIT_SIZE 0x1UL
#define DX_ENV_SCAN_MODE_REG_OFFSET 0x030UL
#define DX_ENV_SCAN_MODE_REG_OFFSET 0x030UL
#define DX_ENV_SCAN_MODE_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_SCAN_MODE_VALUE_BIT_SIZE 0x1UL
#define DX_ENV_CC_ALLOW_SCAN_REG_OFFSET 0x034UL
#define DX_ENV_CC_ALLOW_SCAN_REG_OFFSET 0x034UL
#define DX_ENV_CC_ALLOW_SCAN_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_CC_ALLOW_SCAN_VALUE_BIT_SIZE 0x1UL
#define DX_ENV_CC_HOST_INT_REG_OFFSET 0x0A0UL
#define DX_ENV_CC_HOST_INT_REG_OFFSET 0x0A0UL
#define DX_ENV_CC_HOST_INT_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_CC_HOST_INT_VALUE_BIT_SIZE 0x1UL
#define DX_ENV_CC_PUB_HOST_INT_REG_OFFSET 0x0A4UL
#define DX_ENV_CC_PUB_HOST_INT_REG_OFFSET 0x0A4UL
#define DX_ENV_CC_PUB_HOST_INT_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_CC_PUB_HOST_INT_VALUE_BIT_SIZE 0x1UL
#define DX_ENV_CC_RST_N_REG_OFFSET 0x0A8UL
#define DX_ENV_CC_RST_N_REG_OFFSET 0x0A8UL
#define DX_ENV_CC_RST_N_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_CC_RST_N_VALUE_BIT_SIZE 0x1UL
#define DX_ENV_RST_OVERRIDE_REG_OFFSET 0x0ACUL
#define DX_ENV_RST_OVERRIDE_REG_OFFSET 0x0ACUL
#define DX_ENV_RST_OVERRIDE_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_RST_OVERRIDE_VALUE_BIT_SIZE 0x1UL
#define DX_ENV_CC_POR_N_ADDR_REG_OFFSET 0x0E0UL
#define DX_ENV_CC_POR_N_ADDR_REG_OFFSET 0x0E0UL
#define DX_ENV_CC_POR_N_ADDR_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_CC_POR_N_ADDR_VALUE_BIT_SIZE 0x1UL
#define DX_ENV_CC_COLD_RST_REG_OFFSET 0x0FCUL
#define DX_ENV_CC_COLD_RST_REG_OFFSET 0x0FCUL
#define DX_ENV_CC_COLD_RST_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_CC_COLD_RST_VALUE_BIT_SIZE 0x1UL
#define DX_ENV_DUMMY_ADDR_REG_OFFSET 0x108UL
#define DX_ENV_DUMMY_ADDR_REG_OFFSET 0x108UL
#define DX_ENV_DUMMY_ADDR_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_DUMMY_ADDR_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_COUNTER_CLR_REG_OFFSET 0x118UL
#define DX_ENV_COUNTER_CLR_REG_OFFSET 0x118UL
#define DX_ENV_COUNTER_CLR_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_COUNTER_CLR_VALUE_BIT_SIZE 0x1UL
#define DX_ENV_COUNTER_RD_REG_OFFSET 0x11CUL
#define DX_ENV_COUNTER_RD_REG_OFFSET 0x11CUL
#define DX_ENV_COUNTER_RD_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_COUNTER_RD_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_RNG_DEBUG_ENABLE_REG_OFFSET 0x430UL
#define DX_ENV_RNG_DEBUG_ENABLE_REG_OFFSET 0x430UL
#define DX_ENV_RNG_DEBUG_ENABLE_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_RNG_DEBUG_ENABLE_VALUE_BIT_SIZE 0x1UL
#define DX_ENV_CC_LCS_REG_OFFSET 0x43CUL
#define DX_ENV_CC_LCS_REG_OFFSET 0x43CUL
#define DX_ENV_CC_LCS_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_CC_LCS_VALUE_BIT_SIZE 0x8UL
#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_REG_OFFSET 0x440UL
#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_REG_OFFSET 0x440UL
#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_CM_BIT_SHIFT 0x0UL
#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_CM_BIT_SIZE 0x1UL
#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_DM_BIT_SHIFT 0x1UL
@ -71,54 +71,54 @@
#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_SECURE_BIT_SIZE 0x1UL
#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_RMA_BIT_SHIFT 0x3UL
#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_RMA_BIT_SIZE 0x1UL
#define DX_ENV_DCU_EN_REG_OFFSET 0x444UL
#define DX_ENV_DCU_EN_REG_OFFSET 0x444UL
#define DX_ENV_DCU_EN_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_DCU_EN_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_CC_LCS_IS_VALID_REG_OFFSET 0x448UL
#define DX_ENV_CC_LCS_IS_VALID_REG_OFFSET 0x448UL
#define DX_ENV_CC_LCS_IS_VALID_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_CC_LCS_IS_VALID_VALUE_BIT_SIZE 0x1UL
#define DX_ENV_POWER_DOWN_REG_OFFSET 0x478UL
#define DX_ENV_POWER_DOWN_REG_OFFSET 0x478UL
#define DX_ENV_POWER_DOWN_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_POWER_DOWN_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_DCU_H_EN_REG_OFFSET 0x484UL
#define DX_ENV_DCU_H_EN_REG_OFFSET 0x484UL
#define DX_ENV_DCU_H_EN_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_DCU_H_EN_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_VERSION_REG_OFFSET 0x488UL
#define DX_ENV_VERSION_REG_OFFSET 0x488UL
#define DX_ENV_VERSION_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_VERSION_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_ROSC_WRITE_REG_OFFSET 0x48CUL
#define DX_ENV_ROSC_WRITE_REG_OFFSET 0x48CUL
#define DX_ENV_ROSC_WRITE_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_ROSC_WRITE_VALUE_BIT_SIZE 0x1UL
#define DX_ENV_ROSC_ADDR_REG_OFFSET 0x490UL
#define DX_ENV_ROSC_ADDR_REG_OFFSET 0x490UL
#define DX_ENV_ROSC_ADDR_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_ROSC_ADDR_VALUE_BIT_SIZE 0x8UL
#define DX_ENV_RESET_SESSION_KEY_REG_OFFSET 0x494UL
#define DX_ENV_RESET_SESSION_KEY_REG_OFFSET 0x494UL
#define DX_ENV_RESET_SESSION_KEY_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_RESET_SESSION_KEY_VALUE_BIT_SIZE 0x1UL
#define DX_ENV_SESSION_KEY_0_REG_OFFSET 0x4A0UL
#define DX_ENV_SESSION_KEY_0_REG_OFFSET 0x4A0UL
#define DX_ENV_SESSION_KEY_0_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_SESSION_KEY_0_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_SESSION_KEY_1_REG_OFFSET 0x4A4UL
#define DX_ENV_SESSION_KEY_1_REG_OFFSET 0x4A4UL
#define DX_ENV_SESSION_KEY_1_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_SESSION_KEY_1_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_SESSION_KEY_2_REG_OFFSET 0x4A8UL
#define DX_ENV_SESSION_KEY_2_REG_OFFSET 0x4A8UL
#define DX_ENV_SESSION_KEY_2_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_SESSION_KEY_2_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_SESSION_KEY_3_REG_OFFSET 0x4ACUL
#define DX_ENV_SESSION_KEY_3_REG_OFFSET 0x4ACUL
#define DX_ENV_SESSION_KEY_3_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_SESSION_KEY_3_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_SESSION_KEY_VALID_REG_OFFSET 0x4B0UL
#define DX_ENV_SESSION_KEY_VALID_REG_OFFSET 0x4B0UL
#define DX_ENV_SESSION_KEY_VALID_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_SESSION_KEY_VALID_VALUE_BIT_SIZE 0x1UL
#define DX_ENV_SPIDEN_REG_OFFSET 0x4D0UL
#define DX_ENV_SPIDEN_REG_OFFSET 0x4D0UL
#define DX_ENV_SPIDEN_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_SPIDEN_VALUE_BIT_SIZE 0x1UL
#define DX_ENV_AXIM_USER_PARAMS_REG_OFFSET 0x600UL
#define DX_ENV_AXIM_USER_PARAMS_REG_OFFSET 0x600UL
#define DX_ENV_AXIM_USER_PARAMS_ARUSER_BIT_SHIFT 0x0UL
#define DX_ENV_AXIM_USER_PARAMS_ARUSER_BIT_SIZE 0x5UL
#define DX_ENV_AXIM_USER_PARAMS_AWUSER_BIT_SHIFT 0x5UL
#define DX_ENV_AXIM_USER_PARAMS_AWUSER_BIT_SIZE 0x5UL
#define DX_ENV_SECURITY_MODE_OVERRIDE_REG_OFFSET 0x604UL
#define DX_ENV_SECURITY_MODE_OVERRIDE_REG_OFFSET 0x604UL
#define DX_ENV_SECURITY_MODE_OVERRIDE_AWPROT_NS_BIT_BIT_SHIFT 0x0UL
#define DX_ENV_SECURITY_MODE_OVERRIDE_AWPROT_NS_BIT_BIT_SIZE 0x1UL
#define DX_ENV_SECURITY_MODE_OVERRIDE_AWPROT_NS_OVERRIDE_BIT_SHIFT 0x1UL
@ -127,97 +127,97 @@
#define DX_ENV_SECURITY_MODE_OVERRIDE_ARPROT_NS_BIT_BIT_SIZE 0x1UL
#define DX_ENV_SECURITY_MODE_OVERRIDE_ARPROT_NS_OVERRIDE_BIT_SHIFT 0x3UL
#define DX_ENV_SECURITY_MODE_OVERRIDE_ARPROT_NS_OVERRIDE_BIT_SIZE 0x1UL
#define DX_ENV_AO_CC_KPLT_0_REG_OFFSET 0x620UL
#define DX_ENV_AO_CC_KPLT_0_REG_OFFSET 0x620UL
#define DX_ENV_AO_CC_KPLT_0_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_AO_CC_KPLT_0_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_AO_CC_KPLT_1_REG_OFFSET 0x624UL
#define DX_ENV_AO_CC_KPLT_1_REG_OFFSET 0x624UL
#define DX_ENV_AO_CC_KPLT_1_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_AO_CC_KPLT_1_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_AO_CC_KPLT_2_REG_OFFSET 0x628UL
#define DX_ENV_AO_CC_KPLT_2_REG_OFFSET 0x628UL
#define DX_ENV_AO_CC_KPLT_2_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_AO_CC_KPLT_2_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_AO_CC_KPLT_3_REG_OFFSET 0x62CUL
#define DX_ENV_AO_CC_KPLT_3_REG_OFFSET 0x62CUL
#define DX_ENV_AO_CC_KPLT_3_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_AO_CC_KPLT_3_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_AO_CC_KCST_0_REG_OFFSET 0x630UL
#define DX_ENV_AO_CC_KCST_0_REG_OFFSET 0x630UL
#define DX_ENV_AO_CC_KCST_0_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_AO_CC_KCST_0_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_AO_CC_KCST_1_REG_OFFSET 0x634UL
#define DX_ENV_AO_CC_KCST_1_REG_OFFSET 0x634UL
#define DX_ENV_AO_CC_KCST_1_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_AO_CC_KCST_1_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_AO_CC_KCST_2_REG_OFFSET 0x638UL
#define DX_ENV_AO_CC_KCST_2_REG_OFFSET 0x638UL
#define DX_ENV_AO_CC_KCST_2_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_AO_CC_KCST_2_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_AO_CC_KCST_3_REG_OFFSET 0x63CUL
#define DX_ENV_AO_CC_KCST_3_REG_OFFSET 0x63CUL
#define DX_ENV_AO_CC_KCST_3_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_AO_CC_KCST_3_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_APB_FIPS_ADDR_REG_OFFSET 0x650UL
#define DX_ENV_APB_FIPS_ADDR_REG_OFFSET 0x650UL
#define DX_ENV_APB_FIPS_ADDR_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_APB_FIPS_ADDR_VALUE_BIT_SIZE 0xCUL
#define DX_ENV_APB_FIPS_VAL_REG_OFFSET 0x654UL
#define DX_ENV_APB_FIPS_VAL_REG_OFFSET 0x654UL
#define DX_ENV_APB_FIPS_VAL_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_APB_FIPS_VAL_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_APB_FIPS_MASK_REG_OFFSET 0x658UL
#define DX_ENV_APB_FIPS_MASK_REG_OFFSET 0x658UL
#define DX_ENV_APB_FIPS_MASK_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_APB_FIPS_MASK_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_APB_FIPS_CNT_REG_OFFSET 0x65CUL
#define DX_ENV_APB_FIPS_CNT_REG_OFFSET 0x65CUL
#define DX_ENV_APB_FIPS_CNT_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_APB_FIPS_CNT_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_APB_FIPS_NEW_ADDR_REG_OFFSET 0x660UL
#define DX_ENV_APB_FIPS_NEW_ADDR_REG_OFFSET 0x660UL
#define DX_ENV_APB_FIPS_NEW_ADDR_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_APB_FIPS_NEW_ADDR_VALUE_BIT_SIZE 0xCUL
#define DX_ENV_APB_FIPS_NEW_VAL_REG_OFFSET 0x664UL
#define DX_ENV_APB_FIPS_NEW_VAL_REG_OFFSET 0x664UL
#define DX_ENV_APB_FIPS_NEW_VAL_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_APB_FIPS_NEW_VAL_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_APBP_FIPS_ADDR_REG_OFFSET 0x670UL
#define DX_ENV_APBP_FIPS_ADDR_REG_OFFSET 0x670UL
#define DX_ENV_APBP_FIPS_ADDR_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_APBP_FIPS_ADDR_VALUE_BIT_SIZE 0xCUL
#define DX_ENV_APBP_FIPS_VAL_REG_OFFSET 0x674UL
#define DX_ENV_APBP_FIPS_VAL_REG_OFFSET 0x674UL
#define DX_ENV_APBP_FIPS_VAL_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_APBP_FIPS_VAL_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_APBP_FIPS_MASK_REG_OFFSET 0x678UL
#define DX_ENV_APBP_FIPS_MASK_REG_OFFSET 0x678UL
#define DX_ENV_APBP_FIPS_MASK_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_APBP_FIPS_MASK_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_APBP_FIPS_CNT_REG_OFFSET 0x67CUL
#define DX_ENV_APBP_FIPS_CNT_REG_OFFSET 0x67CUL
#define DX_ENV_APBP_FIPS_CNT_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_APBP_FIPS_CNT_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_APBP_FIPS_NEW_ADDR_REG_OFFSET 0x680UL
#define DX_ENV_APBP_FIPS_NEW_ADDR_REG_OFFSET 0x680UL
#define DX_ENV_APBP_FIPS_NEW_ADDR_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_APBP_FIPS_NEW_ADDR_VALUE_BIT_SIZE 0xCUL
#define DX_ENV_APBP_FIPS_NEW_VAL_REG_OFFSET 0x684UL
#define DX_ENV_APBP_FIPS_NEW_VAL_REG_OFFSET 0x684UL
#define DX_ENV_APBP_FIPS_NEW_VAL_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_APBP_FIPS_NEW_VAL_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_CC_POWERDOWN_EN_REG_OFFSET 0x690UL
#define DX_ENV_CC_POWERDOWN_EN_REG_OFFSET 0x690UL
#define DX_ENV_CC_POWERDOWN_EN_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_CC_POWERDOWN_EN_VALUE_BIT_SIZE 0x1UL
#define DX_ENV_CC_POWERDOWN_RST_EN_REG_OFFSET 0x694UL
#define DX_ENV_CC_POWERDOWN_RST_EN_REG_OFFSET 0x694UL
#define DX_ENV_CC_POWERDOWN_RST_EN_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_CC_POWERDOWN_RST_EN_VALUE_BIT_SIZE 0x1UL
#define DX_ENV_POWERDOWN_RST_CNTR_REG_OFFSET 0x698UL
#define DX_ENV_POWERDOWN_RST_CNTR_REG_OFFSET 0x698UL
#define DX_ENV_POWERDOWN_RST_CNTR_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_POWERDOWN_RST_CNTR_VALUE_BIT_SIZE 0x20UL
#define DX_ENV_POWERDOWN_EN_DEBUG_REG_OFFSET 0x69CUL
#define DX_ENV_POWERDOWN_EN_DEBUG_REG_OFFSET 0x69CUL
#define DX_ENV_POWERDOWN_EN_DEBUG_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_POWERDOWN_EN_DEBUG_VALUE_BIT_SIZE 0x1UL
// --------------------------------------
// BLOCK: ENV_CC_MEMORIES
// --------------------------------------
#define DX_ENV_FUSE_READY_REG_OFFSET 0x000UL
#define DX_ENV_FUSE_READY_REG_OFFSET 0x000UL
#define DX_ENV_FUSE_READY_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_FUSE_READY_VALUE_BIT_SIZE 0x1UL
#define DX_ENV_PERF_RAM_MASTER_REG_OFFSET 0x0ECUL
#define DX_ENV_PERF_RAM_MASTER_REG_OFFSET 0x0ECUL
#define DX_ENV_PERF_RAM_MASTER_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_PERF_RAM_MASTER_VALUE_BIT_SIZE 0x1UL
#define DX_ENV_PERF_RAM_ADDR_HIGH4_REG_OFFSET 0x0F0UL
#define DX_ENV_PERF_RAM_ADDR_HIGH4_REG_OFFSET 0x0F0UL
#define DX_ENV_PERF_RAM_ADDR_HIGH4_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_PERF_RAM_ADDR_HIGH4_VALUE_BIT_SIZE 0x2UL
#define DX_ENV_FUSES_RAM_REG_OFFSET 0x3ECUL
#define DX_ENV_FUSES_RAM_REG_OFFSET 0x3ECUL
#define DX_ENV_FUSES_RAM_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_FUSES_RAM_VALUE_BIT_SIZE 0x20UL
// --------------------------------------
// BLOCK: ENV_PERF_RAM_BASE
// --------------------------------------
#define DX_ENV_PERF_RAM_BASE_REG_OFFSET 0x000UL
#define DX_ENV_PERF_RAM_BASE_REG_OFFSET 0x000UL
#define DX_ENV_PERF_RAM_BASE_VALUE_BIT_SHIFT 0x0UL
#define DX_ENV_PERF_RAM_BASE_VALUE_BIT_SIZE 0x20UL

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -20,7 +20,7 @@
// --------------------------------------
// BLOCK: HOST_P
// --------------------------------------
#define DX_HOST_IRR_REG_OFFSET 0xA00UL
#define DX_HOST_IRR_REG_OFFSET 0xA00UL
#define DX_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SHIFT 0x2UL
#define DX_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SIZE 0x1UL
#define DX_HOST_IRR_AXI_ERR_INT_BIT_SHIFT 0x8UL
@ -31,7 +31,7 @@
#define DX_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE 0x1UL
#define DX_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT 0x17UL
#define DX_HOST_IRR_AXIM_COMP_INT_BIT_SIZE 0x1UL
#define DX_HOST_IMR_REG_OFFSET 0xA04UL
#define DX_HOST_IMR_REG_OFFSET 0xA04UL
#define DX_HOST_IMR_NOT_USED_MASK_BIT_SHIFT 0x1UL
#define DX_HOST_IMR_NOT_USED_MASK_BIT_SIZE 0x1UL
#define DX_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT 0x2UL
@ -44,7 +44,7 @@
#define DX_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE 0x1UL
#define DX_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SHIFT 0x17UL
#define DX_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SIZE 0x1UL
#define DX_HOST_ICR_REG_OFFSET 0xA08UL
#define DX_HOST_ICR_REG_OFFSET 0xA08UL
#define DX_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT 0x2UL
#define DX_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE 0x1UL
#define DX_HOST_ICR_AXI_ERR_CLEAR_BIT_SHIFT 0x8UL
@ -55,10 +55,10 @@
#define DX_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL
#define DX_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL
#define DX_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL
#define DX_HOST_SIGNATURE_REG_OFFSET 0xA24UL
#define DX_HOST_SIGNATURE_REG_OFFSET 0xA24UL
#define DX_HOST_SIGNATURE_VALUE_BIT_SHIFT 0x0UL
#define DX_HOST_SIGNATURE_VALUE_BIT_SIZE 0x20UL
#define DX_HOST_BOOT_REG_OFFSET 0xA28UL
#define DX_HOST_BOOT_REG_OFFSET 0xA28UL
#define DX_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SHIFT 0x0UL
#define DX_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SIZE 0x1UL
#define DX_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SHIFT 0x1UL
@ -115,40 +115,40 @@
#define DX_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SIZE 0x1UL
#define DX_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SHIFT 0x1EUL
#define DX_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SIZE 0x1UL
#define DX_HOST_VERSION_REG_OFFSET 0xA40UL
#define DX_HOST_VERSION_REG_OFFSET 0xA40UL
#define DX_HOST_VERSION_VALUE_BIT_SHIFT 0x0UL
#define DX_HOST_VERSION_VALUE_BIT_SIZE 0x20UL
#define DX_HOST_KFDE0_VALID_REG_OFFSET 0xA60UL
#define DX_HOST_KFDE0_VALID_REG_OFFSET 0xA60UL
#define DX_HOST_KFDE0_VALID_VALUE_BIT_SHIFT 0x0UL
#define DX_HOST_KFDE0_VALID_VALUE_BIT_SIZE 0x1UL
#define DX_HOST_KFDE1_VALID_REG_OFFSET 0xA64UL
#define DX_HOST_KFDE1_VALID_REG_OFFSET 0xA64UL
#define DX_HOST_KFDE1_VALID_VALUE_BIT_SHIFT 0x0UL
#define DX_HOST_KFDE1_VALID_VALUE_BIT_SIZE 0x1UL
#define DX_HOST_KFDE2_VALID_REG_OFFSET 0xA68UL
#define DX_HOST_KFDE2_VALID_REG_OFFSET 0xA68UL
#define DX_HOST_KFDE2_VALID_VALUE_BIT_SHIFT 0x0UL
#define DX_HOST_KFDE2_VALID_VALUE_BIT_SIZE 0x1UL
#define DX_HOST_KFDE3_VALID_REG_OFFSET 0xA6CUL
#define DX_HOST_KFDE3_VALID_REG_OFFSET 0xA6CUL
#define DX_HOST_KFDE3_VALID_VALUE_BIT_SHIFT 0x0UL
#define DX_HOST_KFDE3_VALID_VALUE_BIT_SIZE 0x1UL
#define DX_HOST_GPR0_REG_OFFSET 0xA70UL
#define DX_HOST_GPR0_REG_OFFSET 0xA70UL
#define DX_HOST_GPR0_VALUE_BIT_SHIFT 0x0UL
#define DX_HOST_GPR0_VALUE_BIT_SIZE 0x20UL
#define DX_GPR_HOST_REG_OFFSET 0xA74UL
#define DX_GPR_HOST_REG_OFFSET 0xA74UL
#define DX_GPR_HOST_VALUE_BIT_SHIFT 0x0UL
#define DX_GPR_HOST_VALUE_BIT_SIZE 0x20UL
#define DX_HOST_POWER_DOWN_EN_REG_OFFSET 0xA78UL
#define DX_HOST_POWER_DOWN_EN_REG_OFFSET 0xA78UL
#define DX_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL
#define DX_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL
// --------------------------------------
// BLOCK: HOST_SRAM
// --------------------------------------
#define DX_SRAM_DATA_REG_OFFSET 0xF00UL
#define DX_SRAM_DATA_REG_OFFSET 0xF00UL
#define DX_SRAM_DATA_VALUE_BIT_SHIFT 0x0UL
#define DX_SRAM_DATA_VALUE_BIT_SIZE 0x20UL
#define DX_SRAM_ADDR_REG_OFFSET 0xF04UL
#define DX_SRAM_ADDR_REG_OFFSET 0xF04UL
#define DX_SRAM_ADDR_VALUE_BIT_SHIFT 0x0UL
#define DX_SRAM_ADDR_VALUE_BIT_SIZE 0xFUL
#define DX_SRAM_DATA_READY_REG_OFFSET 0xF08UL
#define DX_SRAM_DATA_READY_REG_OFFSET 0xF08UL
#define DX_SRAM_DATA_READY_VALUE_BIT_SHIFT 0x0UL
#define DX_SRAM_DATA_READY_VALUE_BIT_SIZE 0x1UL

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -19,7 +19,7 @@
#define DX_DEV_SIGNATURE 0xDCC71200UL
#define CC_HW_VERSION 0xef840015UL
#define CC_HW_VERSION 0xef840015UL
#define DX_DEV_SHA_MAX 512

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -58,8 +58,8 @@ enum HashCipherDoPadding {
};
typedef struct SepHashPrivateContext {
/* The current length is placed at the end of the context buffer because the hash
context is used for all HMAC operations as well. HMAC context includes a 64 bytes
/* The current length is placed at the end of the context buffer because the hash
context is used for all HMAC operations as well. HMAC context includes a 64 bytes
K0 field. The size of struct drv_ctx_hash reserved field is 88/184 bytes depend if t
he SHA512 is supported ( in this case teh context size is 256 bytes).
The size of struct drv_ctx_hash reseved field is 20 or 52 depend if the SHA512 is supported.

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -51,7 +51,7 @@
/* Value of each ICV_CMP byte (of 8) in case of success */
#define ICV_VERIF_OK 0x01
#define ICV_VERIF_OK 0x01
struct ssi_aead_handle {
ssi_sram_addr_t sram_workspace_addr;
@ -106,13 +106,13 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
ctx->enckey_dma_addr = 0;
ctx->enckey = NULL;
}
if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
if (ctx->auth_state.xcbc.xcbc_keys != NULL) {
SSI_RESTORE_DMA_ADDR_TO_48BIT(
ctx->auth_state.xcbc.xcbc_keys_dma_addr);
dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
ctx->auth_state.xcbc.xcbc_keys,
ctx->auth_state.xcbc.xcbc_keys,
ctx->auth_state.xcbc.xcbc_keys_dma_addr);
}
SSI_LOG_DEBUG("Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=0x%llX\n",
@ -203,14 +203,14 @@ static int ssi_aead_init(struct crypto_aead *tfm)
2 * MAX_HMAC_DIGEST_SIZE);
SSI_LOG_DEBUG("Allocated authkey buffer in context ctx->authkey=@%p\n",
ctx->auth_state.hmac.ipad_opad);
ctx->auth_state.hmac.padded_authkey = dma_alloc_coherent(dev,
MAX_HMAC_BLOCK_SIZE,
&ctx->auth_state.hmac.padded_authkey_dma_addr, GFP_KERNEL);
if (ctx->auth_state.hmac.padded_authkey == NULL) {
SSI_LOG_ERR("failed to allocate padded_authkey\n");
goto init_failed;
}
}
SSI_UPDATE_DMA_ADDR_TO_48BIT(
ctx->auth_state.hmac.padded_authkey_dma_addr,
MAX_HMAC_BLOCK_SIZE);
@ -225,7 +225,7 @@ init_failed:
ssi_aead_exit(tfm);
return -ENOMEM;
}
static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
{
@ -313,9 +313,9 @@ static int hmac_setkey(HwDesc_s *desc, struct ssi_aead_ctx *ctx)
{
unsigned int hmacPadConst[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
unsigned int digest_ofs = 0;
unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
int idx = 0;
@ -363,7 +363,7 @@ static int hmac_setkey(HwDesc_s *desc, struct ssi_aead_ctx *ctx)
/* Get the digset */
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
HW_DESC_SET_DOUT_DLLI(&desc[idx],
HW_DESC_SET_DOUT_DLLI(&desc[idx],
(ctx->auth_state.hmac.ipad_opad_dma_addr +
digest_ofs),
digest_size, NS_BIT, 0);
@ -420,7 +420,7 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
return 0; /* All tests of keys sizes passed */
}
/*This function prepers the user key so it can pass to the hmac processing
/*This function prepers the user key so it can pass to the hmac processing
(copy to intenral buffer or hash in case of key longer than block */
static int
ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
@ -437,7 +437,7 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keyl
unsigned int idx = 0;
int rc = 0;
HwDesc_s desc[MAX_AEAD_SETKEY_SEQ];
dma_addr_t padded_authkey_dma_addr =
dma_addr_t padded_authkey_dma_addr =
ctx->auth_state.hmac.padded_authkey_dma_addr;
switch (ctx->auth_mode) { /* auth_key required and >0 */
@ -469,7 +469,7 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keyl
HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
idx++;
/* Load the hash current length*/
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], hashmode);
@ -478,17 +478,17 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keyl
HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
idx++;
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
key_dma_addr,
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
key_dma_addr,
keylen, NS_BIT);
HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
idx++;
/* Get hashed key */
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], hashmode);
HW_DESC_SET_CIPHER_MODE(&desc[idx], hashmode);
HW_DESC_SET_DOUT_DLLI(&desc[idx],
padded_authkey_dma_addr,
digestsize,
@ -500,32 +500,32 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keyl
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx],
HASH_DIGEST_RESULT_LITTLE_ENDIAN);
idx++;
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_CONST(&desc[idx], 0, (blocksize - digestsize));
HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
HW_DESC_SET_DOUT_DLLI(&desc[idx],
HW_DESC_SET_DOUT_DLLI(&desc[idx],
(padded_authkey_dma_addr + digestsize),
(blocksize - digestsize),
NS_BIT, 0);
idx++;
} else {
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
key_dma_addr,
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
key_dma_addr,
keylen, NS_BIT);
HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
HW_DESC_SET_DOUT_DLLI(&desc[idx],
HW_DESC_SET_DOUT_DLLI(&desc[idx],
(padded_authkey_dma_addr),
keylen, NS_BIT, 0);
idx++;
if ((blocksize - keylen) != 0) {
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_CONST(&desc[idx], 0,
(blocksize - keylen));
HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
HW_DESC_SET_DOUT_DLLI(&desc[idx],
HW_DESC_SET_DOUT_DLLI(&desc[idx],
(padded_authkey_dma_addr + keylen),
(blocksize - keylen),
NS_BIT, 0);
@ -537,7 +537,7 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keyl
HW_DESC_SET_DIN_CONST(&desc[idx], 0,
(blocksize - keylen));
HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
HW_DESC_SET_DOUT_DLLI(&desc[idx],
HW_DESC_SET_DOUT_DLLI(&desc[idx],
padded_authkey_dma_addr,
blocksize,
NS_BIT, 0);
@ -632,7 +632,7 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
}
END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_1);
/* STAT_PHASE_2: Create sequence */
START_CYCLE_COUNT();
@ -656,7 +656,7 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
/* STAT_PHASE_3: Submit sequence to HW */
START_CYCLE_COUNT();
if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
#ifdef ENABLE_CYCLE_COUNT
ssi_req.op_type = STAT_OP_TYPE_SETKEY;
@ -684,7 +684,7 @@ static int ssi_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
{
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
int rc = 0;
if (keylen < 3)
return -EINVAL;
@ -702,7 +702,7 @@ static int ssi_aead_setauthsize(
unsigned int authsize)
{
struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
CHECK_AND_RETURN_UPON_FIPS_ERROR();
/* Unsupported auth. sizes */
if ((authsize == 0) ||
@ -752,11 +752,11 @@ static int ssi_ccm_setauthsize(struct crypto_aead *authenc,
}
#endif /*SSI_CC_HAS_AES_CCM*/
static inline void
static inline void
ssi_aead_create_assoc_desc(
struct aead_request *areq,
struct aead_request *areq,
unsigned int flow_mode,
HwDesc_s desc[],
HwDesc_s desc[],
unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
@ -769,7 +769,7 @@ ssi_aead_create_assoc_desc(
case SSI_DMA_BUF_DLLI:
SSI_LOG_DEBUG("ASSOC buffer type DLLI\n");
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
sg_dma_address(areq->src),
areq->assoclen, NS_BIT);
HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
@ -797,9 +797,9 @@ ssi_aead_create_assoc_desc(
static inline void
ssi_aead_process_authenc_data_desc(
struct aead_request *areq,
struct aead_request *areq,
unsigned int flow_mode,
HwDesc_s desc[],
HwDesc_s desc[],
unsigned int *seq_size,
int direct)
{
@ -814,7 +814,7 @@ ssi_aead_process_authenc_data_desc(
(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
areq_ctx->dstSgl : areq_ctx->srcSgl;
unsigned int offset =
unsigned int offset =
(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
areq_ctx->dstOffset : areq_ctx->srcOffset;
SSI_LOG_DEBUG("AUTHENC: SRC/DST buffer type DLLI\n");
@ -860,9 +860,9 @@ ssi_aead_process_authenc_data_desc(
static inline void
ssi_aead_process_cipher_data_desc(
struct aead_request *areq,
struct aead_request *areq,
unsigned int flow_mode,
HwDesc_s desc[],
HwDesc_s desc[],
unsigned int *seq_size)
{
unsigned int idx = *seq_size;
@ -926,7 +926,7 @@ static inline void ssi_aead_process_digest_result_desc(
HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
} else {
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx],
HASH_DIGEST_RESULT_LITTLE_ENDIAN);
@ -985,7 +985,7 @@ static inline void ssi_aead_setup_cipher_desc(
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
HW_DESC_SET_FLOW_MODE(&desc[idx], ctx->flow_mode);
if (ctx->flow_mode == S_DIN_to_AES) {
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
((ctx->enc_keylen == 24) ?
CC_AES_KEY_SIZE_MAX : ctx->enc_keylen), NS_BIT);
HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
@ -1035,7 +1035,7 @@ static inline void ssi_aead_hmac_setup_digest_desc(
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
unsigned int idx = *seq_size;
@ -1098,7 +1098,7 @@ static inline void ssi_aead_xcbc_setup_digest_desc(
/* Setup XCBC MAC K2 */
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
(ctx->auth_state.xcbc.xcbc_keys_dma_addr +
(ctx->auth_state.xcbc.xcbc_keys_dma_addr +
AES_KEYSIZE_128),
AES_KEYSIZE_128, NS_BIT);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
@ -1150,7 +1150,7 @@ static inline void ssi_aead_process_digest_scheme_desc(
struct ssi_aead_handle *aead_handle = ctx->drvdata->aead_handle;
unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
unsigned int idx = *seq_size;
@ -1284,9 +1284,9 @@ static inline void ssi_aead_hmac_authenc(
return;
}
/**
/**
* Double-pass flow
* Fallback for unsupported single-pass modes,
* Fallback for unsupported single-pass modes,
* i.e. using assoc. data of non-word-multiple */
if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
/* encrypt first.. */
@ -1335,9 +1335,9 @@ ssi_aead_xcbc_authenc(
return;
}
/**
/**
* Double-pass flow
* Fallback for unsupported single-pass modes,
* Fallback for unsupported single-pass modes,
* i.e. using assoc. data of non-word-multiple */
if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
/* encrypt first.. */
@ -1382,7 +1382,7 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
if (ctx->cipher_mode == DRV_CIPHER_GCTR)
{
if (areq_ctx->plaintext_authenticate_only == true)
areq_ctx->is_single_pass = false;
areq_ctx->is_single_pass = false;
break;
}
@ -1417,7 +1417,7 @@ static unsigned int format_ccm_a0(uint8_t *pA0Buff, uint32_t headerSize)
unsigned int len = 0;
if ( headerSize == 0 ) {
return 0;
}
}
if ( headerSize < ((1UL << 16) - (1UL << 8) )) {
len = 2;
@ -1477,11 +1477,11 @@ static inline int ssi_aead_ccm(
}
/* load key */
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CTR);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
((ctx->enc_keylen == 24) ?
CC_AES_KEY_SIZE_MAX : ctx->enc_keylen),
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CTR);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
((ctx->enc_keylen == 24) ?
CC_AES_KEY_SIZE_MAX : ctx->enc_keylen),
NS_BIT);
HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
@ -1494,19 +1494,19 @@ static inline int ssi_aead_ccm(
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CTR);
HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
req_ctx->gen_ctx.iv_dma_addr,
req_ctx->gen_ctx.iv_dma_addr,
AES_BLOCK_SIZE, NS_BIT);
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
idx++;
/* load MAC key */
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CBC_MAC);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
((ctx->enc_keylen == 24) ?
CC_AES_KEY_SIZE_MAX : ctx->enc_keylen),
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CBC_MAC);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
((ctx->enc_keylen == 24) ?
CC_AES_KEY_SIZE_MAX : ctx->enc_keylen),
NS_BIT);
HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
@ -1520,9 +1520,9 @@ static inline int ssi_aead_ccm(
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CBC_MAC);
HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
req_ctx->mac_buf_dma_addr,
req_ctx->mac_buf_dma_addr,
AES_BLOCK_SIZE, NS_BIT);
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
@ -1534,7 +1534,7 @@ static inline int ssi_aead_ccm(
ssi_aead_create_assoc_desc(req, DIN_HASH, desc, &idx);
} else {
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
sg_dma_address(&req_ctx->ccm_adata_sg),
AES_BLOCK_SIZE + req_ctx->ccm_hdr_size,
NS_BIT);
@ -1582,7 +1582,7 @@ static inline int ssi_aead_ccm(
HW_DESC_SET_DOUT_DLLI(&desc[idx], mac_result , ctx->authsize, NS_BIT, 1);
HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
idx++;
idx++;
*seq_size = idx;
return 0;
@ -1600,9 +1600,9 @@ static int config_ccm_adata(struct aead_request *req) {
uint8_t *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
uint8_t *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
uint8_t *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
DRV_CRYPTO_DIRECTION_ENCRYPT) ?
req->cryptlen :
unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
DRV_CRYPTO_DIRECTION_ENCRYPT) ?
req->cryptlen :
(req->cryptlen - ctx->authsize);
int rc;
memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
@ -1622,13 +1622,13 @@ static int config_ccm_adata(struct aead_request *req) {
*b0 |= (8 * ((m - 2) / 2));
if (req->assoclen > 0)
*b0 |= 64; /* Enable bit 6 if Adata exists. */
rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
if (rc != 0) {
return rc;
}
/* END of "taken from crypto/ccm.c" */
/* l(a) - size of associated data. */
req_ctx->ccm_hdr_size = format_ccm_a0 (a0, req->assoclen);
@ -1654,7 +1654,7 @@ static void ssi_rfc4309_ccm_process(struct aead_request *req)
/* In RFC 4309 there is an 11-bytes nonce+IV part, that we build here. */
memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce, CCM_BLOCK_NONCE_SIZE);
memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv, CCM_BLOCK_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
req->iv = areq_ctx->ctr_iv;
req->assoclen -= CCM_BLOCK_IV_SIZE;
}
#endif /*SSI_CC_HAS_AES_CCM*/
@ -1672,11 +1672,11 @@ static inline void ssi_aead_gcm_setup_ghash_desc(
unsigned int idx = *seq_size;
/* load key to AES*/
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_ECB);
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_ECB);
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
ctx->enc_keylen, NS_BIT);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
ctx->enc_keylen, NS_BIT);
HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
@ -1688,7 +1688,7 @@ static inline void ssi_aead_gcm_setup_ghash_desc(
HW_DESC_SET_DOUT_DLLI(&desc[idx],
req_ctx->hkey_dma_addr,
AES_BLOCK_SIZE,
NS_BIT, 0);
NS_BIT, 0);
HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
idx++;
@ -1701,13 +1701,13 @@ static inline void ssi_aead_gcm_setup_ghash_desc(
/* Load GHASH subkey */
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
req_ctx->hkey_dma_addr,
req_ctx->hkey_dma_addr,
AES_BLOCK_SIZE, NS_BIT);
HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
idx++;
@ -1719,10 +1719,10 @@ static inline void ssi_aead_gcm_setup_ghash_desc(
HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
HW_DESC_SET_CIPHER_DO(&desc[idx], 1); //1=AES_SK RKEK
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
idx++;
@ -1733,7 +1733,7 @@ static inline void ssi_aead_gcm_setup_ghash_desc(
HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
idx++;
@ -1751,11 +1751,11 @@ static inline void ssi_aead_gcm_setup_gctr_desc(
unsigned int idx = *seq_size;
/* load key to AES*/
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
ctx->enc_keylen, NS_BIT);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
ctx->enc_keylen, NS_BIT);
HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
@ -1767,9 +1767,9 @@ static inline void ssi_aead_gcm_setup_gctr_desc(
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
req_ctx->gcm_iv_inc2_dma_addr,
req_ctx->gcm_iv_inc2_dma_addr,
AES_BLOCK_SIZE, NS_BIT);
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
idx++;
@ -1786,7 +1786,7 @@ static inline void ssi_aead_process_gcm_result_desc(
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
dma_addr_t mac_result;
dma_addr_t mac_result;
unsigned int idx = *seq_size;
if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
@ -1797,7 +1797,7 @@ static inline void ssi_aead_process_gcm_result_desc(
/* process(ghash) gcm_block_len */
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
req_ctx->gcm_block_len_dma_addr,
AES_BLOCK_SIZE, NS_BIT);
HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
@ -1813,16 +1813,16 @@ static inline void ssi_aead_process_gcm_result_desc(
HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
idx++;
idx++;
/* load AES/CTR initial CTR value inc by 1*/
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
req_ctx->gcm_iv_inc1_dma_addr,
req_ctx->gcm_iv_inc1_dma_addr,
AES_BLOCK_SIZE, NS_BIT);
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
idx++;
@ -1842,7 +1842,7 @@ static inline void ssi_aead_process_gcm_result_desc(
HW_DESC_SET_DOUT_DLLI(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
idx++;
idx++;
*seq_size = idx;
}
@ -1864,7 +1864,7 @@ static inline int ssi_aead_gcm(
//in RFC4543 no data to encrypt. just copy data from src to dest.
if (req_ctx->plaintext_authenticate_only==true){
if (req_ctx->plaintext_authenticate_only==true){
ssi_aead_process_cipher_data_desc(req, BYPASS, desc, seq_size);
ssi_aead_gcm_setup_ghash_desc(req, desc, seq_size);
/* process(ghash) assoc data */
@ -1883,7 +1883,7 @@ static inline int ssi_aead_gcm(
ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
/* process(gctr+ghash) */
if (req_ctx->cryptlen != 0)
ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, seq_size);
ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, seq_size);
ssi_aead_process_gcm_result_desc(req, desc, seq_size);
idx = *seq_size;
@ -1940,10 +1940,10 @@ static int config_gcm_context(struct aead_request *req) {
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
DRV_CRYPTO_DIRECTION_ENCRYPT) ?
req->cryptlen :
unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
DRV_CRYPTO_DIRECTION_ENCRYPT) ?
req->cryptlen :
(req->cryptlen - ctx->authsize);
__be32 counter = cpu_to_be32(2);
@ -1988,7 +1988,7 @@ static void ssi_rfc4_gcm_process(struct aead_request *req)
memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET, ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv, GCM_BLOCK_RFC4_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
req->iv = areq_ctx->ctr_iv;
req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
}
@ -1999,7 +1999,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
{
int rc = 0;
int seq_len = 0;
HwDesc_s desc[MAX_AEAD_PROCESS_SEQ];
HwDesc_s desc[MAX_AEAD_PROCESS_SEQ];
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
@ -2015,7 +2015,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
/* STAT_PHASE_0: Init and sanity checks */
START_CYCLE_COUNT();
/* Check data length according to mode */
if (unlikely(validate_data_size(ctx, direct, req) != 0)) {
SSI_LOG_ERR("Unsupported crypt/assoc len %d/%d.\n",
@ -2041,7 +2041,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
/* STAT_PHASE_1: Map buffers */
START_CYCLE_COUNT();
if (ctx->cipher_mode == DRV_CIPHER_CTR) {
/* Build CTR IV - Copy nonce from last 4 bytes in
* CTR key to first 4 bytes in CTR IV */
@ -2056,7 +2056,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
/* Replace with counter iv */
req->iv = areq_ctx->ctr_iv;
areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
} else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
} else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
(ctx->cipher_mode == DRV_CIPHER_GCTR) ) {
areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
if (areq_ctx->ctr_iv != req->iv) {
@ -2072,23 +2072,23 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
rc = config_ccm_adata(req);
if (unlikely(rc != 0)) {
SSI_LOG_ERR("config_ccm_adata() returned with a failure %d!", rc);
goto exit;
goto exit;
}
} else {
areq_ctx->ccm_hdr_size = ccm_header_size_null;
areq_ctx->ccm_hdr_size = ccm_header_size_null;
}
#else
areq_ctx->ccm_hdr_size = ccm_header_size_null;
areq_ctx->ccm_hdr_size = ccm_header_size_null;
#endif /*SSI_CC_HAS_AES_CCM*/
#if SSI_CC_HAS_AES_GCM
#if SSI_CC_HAS_AES_GCM
if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
rc = config_gcm_context(req);
if (unlikely(rc != 0)) {
SSI_LOG_ERR("config_gcm_context() returned with a failure %d!", rc);
goto exit;
goto exit;
}
}
}
#endif /*SSI_CC_HAS_AES_GCM*/
rc = ssi_buffer_mgr_map_aead_request(ctx->drvdata, req);
@ -2153,7 +2153,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
#endif /*SSI_CC_HAS_AES_GCM*/
break;
#endif
default:
default:
SSI_LOG_ERR("Unsupported authenc (%d)\n", ctx->auth_mode);
ssi_buffer_mgr_unmap_aead_request(dev, req);
rc = -ENOTSUPP;
@ -2172,7 +2172,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
ssi_buffer_mgr_unmap_aead_request(dev, req);
}
END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_3);
exit:
return rc;
@ -2214,9 +2214,9 @@ static int ssi_rfc4309_ccm_encrypt(struct aead_request *req)
areq_ctx->backup_iv = req->iv;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = true;
ssi_rfc4309_ccm_process(req);
rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
if (rc != -EINPROGRESS)
req->iv = areq_ctx->backup_iv;
@ -2261,10 +2261,10 @@ static int ssi_rfc4309_ccm_decrypt(struct aead_request *req)
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = true;
ssi_rfc4309_ccm_process(req);
rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
if (rc != -EINPROGRESS)
req->iv = areq_ctx->backup_iv;
@ -2280,7 +2280,7 @@ static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
{
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
int rc = 0;
SSI_LOG_DEBUG("ssi_rfc4106_gcm_setkey() keylen %d, key %p \n", keylen, key );
if (keylen < 4)
@ -2298,7 +2298,7 @@ static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
{
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
int rc = 0;
SSI_LOG_DEBUG("ssi_rfc4543_gcm_setkey() keylen %d, key %p \n", keylen, key );
if (keylen < 4)
@ -2374,7 +2374,7 @@ static int ssi_rfc4106_gcm_encrypt(struct aead_request *req)
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
areq_ctx->backup_giv = NULL;
areq_ctx->plaintext_authenticate_only = false;
ssi_rfc4_gcm_process(req);
@ -2393,14 +2393,14 @@ static int ssi_rfc4543_gcm_encrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
//plaintext is not encryped with rfc4543
areq_ctx->plaintext_authenticate_only = true;
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
areq_ctx->backup_giv = NULL;
ssi_rfc4_gcm_process(req);
areq_ctx->is_gcm4543 = true;
@ -2426,7 +2426,7 @@ static int ssi_rfc4106_gcm_decrypt(struct aead_request *req)
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
areq_ctx->backup_giv = NULL;
areq_ctx->plaintext_authenticate_only = false;
ssi_rfc4_gcm_process(req);
@ -2452,7 +2452,7 @@ static int ssi_rfc4543_gcm_decrypt(struct aead_request *req)
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
areq_ctx->backup_giv = NULL;
ssi_rfc4_gcm_process(req);
areq_ctx->is_gcm4543 = true;
@ -2715,7 +2715,7 @@ static struct ssi_alg_template aead_algs[] = {
.cipher_mode = DRV_CIPHER_GCTR,
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_NULL,
},
},
#endif /*SSI_CC_HAS_AES_GCM*/
};

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -35,10 +35,10 @@
/* defines for AES GCM configuration buffer */
#define GCM_BLOCK_LEN_SIZE 8
#define GCM_BLOCK_RFC4_IV_OFFSET 4
#define GCM_BLOCK_RFC4_IV_OFFSET 4
#define GCM_BLOCK_RFC4_IV_SIZE 8 /* IV size for rfc's */
#define GCM_BLOCK_RFC4_NONCE_OFFSET 0
#define GCM_BLOCK_RFC4_NONCE_SIZE 4
#define GCM_BLOCK_RFC4_NONCE_OFFSET 0
#define GCM_BLOCK_RFC4_NONCE_SIZE 4
@ -62,12 +62,12 @@ enum aead_ccm_header_size {
struct aead_req_ctx {
/* Allocate cache line although only 4 bytes are needed to
* assure next field falls @ cache line
* assure next field falls @ cache line
* Used for both: digest HW compare and CCM/GCM MAC value */
uint8_t mac_buf[MAX_MAC_SIZE] ____cacheline_aligned;
uint8_t ctr_iv[AES_BLOCK_SIZE] ____cacheline_aligned;
//used in gcm
//used in gcm
uint8_t gcm_iv_inc1[AES_BLOCK_SIZE] ____cacheline_aligned;
uint8_t gcm_iv_inc2[AES_BLOCK_SIZE] ____cacheline_aligned;
uint8_t hkey[AES_BLOCK_SIZE] ____cacheline_aligned;
@ -85,7 +85,7 @@ struct aead_req_ctx {
dma_addr_t ccm_iv0_dma_addr; /* buffer for internal ccm configurations */
dma_addr_t icv_dma_addr; /* Phys. address of ICV */
//used in gcm
//used in gcm
dma_addr_t gcm_iv_inc1_dma_addr; /* buffer for internal gcm configurations */
dma_addr_t gcm_iv_inc2_dma_addr; /* buffer for internal gcm configurations */
dma_addr_t hkey_dma_addr; /* Phys. address of hkey */

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -102,18 +102,18 @@ dma_addr_t ssi_buff_mgr_update_dma_addr(dma_addr_t orig_addr, uint32_t data_len)
#ifdef CC_DMA_48BIT_SIM_FULL
/* With this code all addresses will be switched to 48 bits. */
/* The if condition protects from double expention */
if((((orig_addr >> 16) & 0xFFFF) != 0xFFFF) &&
if((((orig_addr >> 16) & 0xFFFF) != 0xFFFF) &&
(data_len <= CC_MAX_MLLI_ENTRY_SIZE)) {
#else
if((!(((orig_addr >> 16) & 0xFF) % 2)) &&
if((!(((orig_addr >> 16) & 0xFF) % 2)) &&
(data_len <= CC_MAX_MLLI_ENTRY_SIZE)) {
#endif
tmp_dma_addr = ((orig_addr<<16) | 0xFFFF0000 |
tmp_dma_addr = ((orig_addr<<16) | 0xFFFF0000 |
(orig_addr & UINT16_MAX));
SSI_LOG_DEBUG("MAP DMA: orig address=0x%llX "
"dma_address=0x%llX\n",
orig_addr, tmp_dma_addr);
return tmp_dma_addr;
return tmp_dma_addr;
}
return orig_addr;
}
@ -126,29 +126,29 @@ dma_addr_t ssi_buff_mgr_restore_dma_addr(dma_addr_t orig_addr)
/* The if condition protects from double restoring */
if((orig_addr >> 32) & 0xFFFF ) {
#else
if(((orig_addr >> 32) & 0xFFFF) &&
if(((orig_addr >> 32) & 0xFFFF) &&
!(((orig_addr >> 32) & 0xFF) % 2) ) {
#endif
/*return high 16 bits*/
tmp_dma_addr = ((orig_addr >> 16));
/*clean the 0xFFFF in the lower bits (set in the add expansion)*/
tmp_dma_addr &= 0xFFFF0000;
tmp_dma_addr &= 0xFFFF0000;
/* Set the original 16 bits */
tmp_dma_addr |= (orig_addr & UINT16_MAX);
tmp_dma_addr |= (orig_addr & UINT16_MAX);
SSI_LOG_DEBUG("Release DMA: orig address=0x%llX "
"dma_address=0x%llX\n",
orig_addr, tmp_dma_addr);
return tmp_dma_addr;
return tmp_dma_addr;
}
return orig_addr;
}
#endif
/**
* ssi_buffer_mgr_get_sgl_nents() - Get scatterlist number of entries.
*
*
* @sg_list: SG list
* @nbytes: [IN] Total SGL data bytes.
* @lbytes: [OUT] Returns the amount of bytes at the last entry
* @lbytes: [OUT] Returns the amount of bytes at the last entry
*/
static unsigned int ssi_buffer_mgr_get_sgl_nents(
struct scatterlist *sg_list, unsigned int nbytes, uint32_t *lbytes, bool *is_chained)
@ -179,7 +179,7 @@ static unsigned int ssi_buffer_mgr_get_sgl_nents(
/**
* ssi_buffer_mgr_zero_sgl() - Zero scatter scatter list data.
*
*
* @sgl:
*/
void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, uint32_t data_len)
@ -201,7 +201,7 @@ void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, uint32_t data_len)
/**
* ssi_buffer_mgr_copy_scatterlist_portion() - Copy scatter list data,
* from to_skip to end, to dest and vice versa
*
*
* @dest:
* @sg:
* @to_skip:
@ -306,7 +306,7 @@ static int ssi_buffer_mgr_generate_mlli(
rc =-ENOMEM;
goto build_mlli_exit;
}
SSI_UPDATE_DMA_ADDR_TO_48BIT(mlli_params->mlli_dma_addr,
SSI_UPDATE_DMA_ADDR_TO_48BIT(mlli_params->mlli_dma_addr,
(MAX_NUM_OF_TOTAL_MLLI_ENTRIES*
LLI_ENTRY_BYTE_SIZE));
/* Point to start of MLLI */
@ -315,7 +315,7 @@ static int ssi_buffer_mgr_generate_mlli(
for (i = 0; i < sg_data->num_of_buffers; i++) {
if (sg_data->type[i] == DMA_SGL_TYPE)
rc = ssi_buffer_mgr_render_scatterlist_to_mlli(
sg_data->entry[i].sgl,
sg_data->entry[i].sgl,
sg_data->total_data_len[i], sg_data->offset[i], &total_nents,
&mlli_p);
else /*DMA_BUFF_TYPE*/
@ -329,9 +329,9 @@ static int ssi_buffer_mgr_generate_mlli(
/* set last bit in the current table */
if (sg_data->mlli_nents[i] != NULL) {
/*Calculate the current MLLI table length for the
/*Calculate the current MLLI table length for the
length field in the descriptor*/
*(sg_data->mlli_nents[i]) +=
*(sg_data->mlli_nents[i]) +=
(total_nents - prev_total_nents);
prev_total_nents = total_nents;
}
@ -440,20 +440,20 @@ static int ssi_buffer_mgr_map_scatterlist(
if (unlikely(dma_map_sg(dev, sg, 1, direction) != 1)) {
SSI_LOG_ERR("dma_map_sg() single buffer failed\n");
return -ENOMEM;
}
}
SSI_LOG_DEBUG("Mapped sg: dma_address=0x%llX "
"page_link=0x%08lX addr=%pK offset=%u "
"length=%u\n",
(unsigned long long)sg_dma_address(sg),
sg->page_link,
sg_virt(sg),
(unsigned long long)sg_dma_address(sg),
sg->page_link,
sg_virt(sg),
sg->offset, sg->length);
*lbytes = nbytes;
*nents = 1;
*mapped_nents = 1;
SSI_UPDATE_DMA_ADDR_TO_48BIT(sg_dma_address(sg), sg_dma_len(sg));
} else { /*sg_is_last*/
*nents = ssi_buffer_mgr_get_sgl_nents(sg, nbytes, lbytes,
*nents = ssi_buffer_mgr_get_sgl_nents(sg, nbytes, lbytes,
&is_chained);
if (*nents > max_sg_nents) {
*nents = 0;
@ -498,7 +498,7 @@ ssi_aead_handle_config_buf(struct device *dev,
SSI_LOG_DEBUG(" handle additional data config set to DLLI \n");
/* create sg for the current buffer */
sg_init_one(&areq_ctx->ccm_adata_sg, config_data, AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
if (unlikely(dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1,
if (unlikely(dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1,
DMA_TO_DEVICE) != 1)) {
SSI_LOG_ERR("dma_map_sg() "
"config buffer failed\n");
@ -507,16 +507,16 @@ ssi_aead_handle_config_buf(struct device *dev,
SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
"page_link=0x%08lX addr=%pK "
"offset=%u length=%u\n",
(unsigned long long)sg_dma_address(&areq_ctx->ccm_adata_sg),
areq_ctx->ccm_adata_sg.page_link,
(unsigned long long)sg_dma_address(&areq_ctx->ccm_adata_sg),
areq_ctx->ccm_adata_sg.page_link,
sg_virt(&areq_ctx->ccm_adata_sg),
areq_ctx->ccm_adata_sg.offset,
areq_ctx->ccm_adata_sg.offset,
areq_ctx->ccm_adata_sg.length);
/* prepare for case of MLLI */
if (assoclen > 0) {
ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1,
ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1,
&areq_ctx->ccm_adata_sg,
(AES_BLOCK_SIZE +
(AES_BLOCK_SIZE +
areq_ctx->ccm_hdr_size), 0,
false, NULL);
}
@ -542,10 +542,10 @@ static inline int ssi_ahash_handle_curr_buf(struct device *dev,
SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
"page_link=0x%08lX addr=%pK "
"offset=%u length=%u\n",
(unsigned long long)sg_dma_address(areq_ctx->buff_sg),
areq_ctx->buff_sg->page_link,
(unsigned long long)sg_dma_address(areq_ctx->buff_sg),
areq_ctx->buff_sg->page_link,
sg_virt(areq_ctx->buff_sg),
areq_ctx->buff_sg->offset,
areq_ctx->buff_sg->offset,
areq_ctx->buff_sg->length);
areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
areq_ctx->curr_sg = areq_ctx->buff_sg;
@ -566,12 +566,12 @@ void ssi_buffer_mgr_unmap_blkcipher_request(
struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
if (likely(req_ctx->gen_ctx.iv_dma_addr != 0)) {
SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=0x%llX iv_size=%u\n",
SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=0x%llX iv_size=%u\n",
(unsigned long long)req_ctx->gen_ctx.iv_dma_addr,
ivsize);
SSI_RESTORE_DMA_ADDR_TO_48BIT(req_ctx->gen_ctx.iv_dma_addr);
dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
ivsize,
dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
ivsize,
req_ctx->is_giv ? DMA_BIDIRECTIONAL :
DMA_TO_DEVICE);
}
@ -586,12 +586,12 @@ void ssi_buffer_mgr_unmap_blkcipher_request(
SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(src));
dma_unmap_sg(dev, src, req_ctx->in_nents,
DMA_BIDIRECTIONAL);
SSI_LOG_DEBUG("Unmapped req->src=%pK\n",
SSI_LOG_DEBUG("Unmapped req->src=%pK\n",
sg_virt(src));
if (src != dst) {
SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(dst));
dma_unmap_sg(dev, dst, req_ctx->out_nents,
dma_unmap_sg(dev, dst, req_ctx->out_nents,
DMA_BIDIRECTIONAL);
SSI_LOG_DEBUG("Unmapped req->dst=%pK\n",
sg_virt(dst));
@ -608,7 +608,7 @@ int ssi_buffer_mgr_map_blkcipher_request(
struct scatterlist *dst)
{
struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
struct mlli_params *mlli_params = &req_ctx->mlli_params;
struct mlli_params *mlli_params = &req_ctx->mlli_params;
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
struct device *dev = &drvdata->plat_dev->dev;
struct buffer_array sg_data;
@ -623,12 +623,12 @@ int ssi_buffer_mgr_map_blkcipher_request(
/* Map IV buffer */
if (likely(ivsize != 0) ) {
dump_byte_array("iv", (uint8_t *)info, ivsize);
req_ctx->gen_ctx.iv_dma_addr =
dma_map_single(dev, (void *)info,
ivsize,
req_ctx->gen_ctx.iv_dma_addr =
dma_map_single(dev, (void *)info,
ivsize,
req_ctx->is_giv ? DMA_BIDIRECTIONAL:
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev,
if (unlikely(dma_mapping_error(dev,
req_ctx->gen_ctx.iv_dma_addr))) {
SSI_LOG_ERR("Mapping iv %u B at va=%pK "
"for DMA failed\n", ivsize, info);
@ -641,7 +641,7 @@ int ssi_buffer_mgr_map_blkcipher_request(
(unsigned long long)req_ctx->gen_ctx.iv_dma_addr);
} else
req_ctx->gen_ctx.iv_dma_addr = 0;
/* Map the src SGL */
rc = ssi_buffer_mgr_map_scatterlist(dev, src,
nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
@ -681,11 +681,11 @@ int ssi_buffer_mgr_map_blkcipher_request(
&req_ctx->in_mlli_nents);
ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
req_ctx->out_nents, dst,
nbytes, 0, true,
nbytes, 0, true,
&req_ctx->out_mlli_nents);
}
}
if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
@ -716,7 +716,7 @@ void ssi_buffer_mgr_unmap_aead_request(
if (areq_ctx->mac_buf_dma_addr != 0) {
SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->mac_buf_dma_addr);
dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
}
@ -727,22 +727,22 @@ void ssi_buffer_mgr_unmap_aead_request(
dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
}
if (areq_ctx->gcm_block_len_dma_addr != 0) {
SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->gcm_block_len_dma_addr);
dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
AES_BLOCK_SIZE, DMA_TO_DEVICE);
}
if (areq_ctx->gcm_iv_inc1_dma_addr != 0) {
SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->gcm_iv_inc1_dma_addr);
dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
AES_BLOCK_SIZE, DMA_TO_DEVICE);
}
if (areq_ctx->gcm_iv_inc2_dma_addr != 0) {
SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->gcm_iv_inc2_dma_addr);
dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
AES_BLOCK_SIZE, DMA_TO_DEVICE);
}
}
@ -751,7 +751,7 @@ void ssi_buffer_mgr_unmap_aead_request(
if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
if (areq_ctx->ccm_iv0_dma_addr != 0) {
SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->ccm_iv0_dma_addr);
dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
AES_BLOCK_SIZE, DMA_TO_DEVICE);
}
@ -763,10 +763,10 @@ void ssi_buffer_mgr_unmap_aead_request(
hw_iv_size, DMA_BIDIRECTIONAL);
}
/*In case a pool was set, a table was
/*In case a pool was set, a table was
allocated and should be released */
if (areq_ctx->mlli_params.curr_pool != NULL) {
SSI_LOG_DEBUG("free MLLI buffer: dma=0x%08llX virt=%pK\n",
SSI_LOG_DEBUG("free MLLI buffer: dma=0x%08llX virt=%pK\n",
(unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
areq_ctx->mlli_params.mlli_virt_addr);
SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->mlli_params.mlli_dma_addr);
@ -786,7 +786,7 @@ void ssi_buffer_mgr_unmap_aead_request(
dma_unmap_sg(dev, req->src, ssi_buffer_mgr_get_sgl_nents(req->src,size_to_unmap,&dummy,&chained) , DMA_BIDIRECTIONAL);
if (unlikely(req->src != req->dst)) {
SSI_LOG_DEBUG("Unmapping dst sgl: req->dst=%pK\n",
SSI_LOG_DEBUG("Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst));
SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(req->dst));
dma_unmap_sg(dev, req->dst, ssi_buffer_mgr_get_sgl_nents(req->dst,size_to_unmap,&dummy,&chained),
@ -821,12 +821,12 @@ static inline int ssi_buffer_mgr_get_aead_icv_nents(
unsigned int icv_required_size = authsize > last_entry_data_size ? (authsize - last_entry_data_size) : authsize;
unsigned int nents;
unsigned int i;
if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
*is_icv_fragmented = false;
return 0;
}
for( i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
if (sgl == NULL) {
break;
@ -883,12 +883,12 @@ static inline int ssi_buffer_mgr_aead_chain_iv(
SSI_LOG_ERR("Mapping iv %u B at va=%pK for DMA failed\n",
hw_iv_size, req->iv);
rc = -ENOMEM;
goto chain_iv_exit;
goto chain_iv_exit;
}
SSI_UPDATE_DMA_ADDR_TO_48BIT(areq_ctx->gen_ctx.iv_dma_addr, hw_iv_size);
SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
hw_iv_size, req->iv,
hw_iv_size, req->iv,
(unsigned long long)areq_ctx->gen_ctx.iv_dma_addr);
if (do_chain == true && areq_ctx->plaintext_authenticate_only == true){ // TODO: what about CTR?? ask Ron
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@ -943,7 +943,7 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
//it is assumed that if we reach here , the sgl is already mapped
sg_index = current_sg->length;
if (sg_index > size_of_assoc) { //the first entry in the scatter list contains all the associated data
mapped_nents++;
mapped_nents++;
}
else{
while (sg_index <= size_of_assoc) {
@ -1095,7 +1095,7 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
&areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
(*src_last_bytes - authsize);
areq_ctx->icv_virt_addr = sg_virt(
&areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
&areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
(*src_last_bytes - authsize);
}
@ -1214,8 +1214,8 @@ static inline int ssi_buffer_mgr_aead_chain_data(
size_for_map += crypto_aead_ivsize(tfm);
}
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize:0;
src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->src,size_for_map,&src_last_bytes, &chained);
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize:0;
src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->src,size_for_map,&src_last_bytes, &chained);
sg_index = areq_ctx->srcSgl->length;
//check where the data starts
while (sg_index <= size_to_skip) {
@ -1238,7 +1238,7 @@ static inline int ssi_buffer_mgr_aead_chain_data(
areq_ctx->src.nents = src_mapped_nents;
areq_ctx->srcOffset = offset;
areq_ctx->srcOffset = offset;
if (req->src != req->dst) {
size_for_map = req->assoclen +req->cryptlen;
@ -1253,7 +1253,7 @@ static inline int ssi_buffer_mgr_aead_chain_data(
&dst_mapped_nents);
if (unlikely(rc != 0)) {
rc = -ENOMEM;
goto chain_data_exit;
goto chain_data_exit;
}
}
@ -1303,10 +1303,10 @@ static void ssi_buffer_mgr_update_aead_mlli_nents( struct ssi_drvdata *drvdata,
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
uint32_t curr_mlli_size = 0;
if (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) {
areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
curr_mlli_size = areq_ctx->assoc.mlli_nents *
curr_mlli_size = areq_ctx->assoc.mlli_nents *
LLI_ENTRY_BYTE_SIZE;
}
@ -1318,31 +1318,31 @@ static void ssi_buffer_mgr_update_aead_mlli_nents( struct ssi_drvdata *drvdata,
curr_mlli_size;
areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
if (areq_ctx->is_single_pass == false)
areq_ctx->assoc.mlli_nents +=
areq_ctx->assoc.mlli_nents +=
areq_ctx->src.mlli_nents;
} else {
if (areq_ctx->gen_ctx.op_type ==
if (areq_ctx->gen_ctx.op_type ==
DRV_CRYPTO_DIRECTION_DECRYPT) {
areq_ctx->src.sram_addr =
areq_ctx->src.sram_addr =
drvdata->mlli_sram_addr +
curr_mlli_size;
areq_ctx->dst.sram_addr =
areq_ctx->src.sram_addr +
areq_ctx->src.mlli_nents *
areq_ctx->dst.sram_addr =
areq_ctx->src.sram_addr +
areq_ctx->src.mlli_nents *
LLI_ENTRY_BYTE_SIZE;
if (areq_ctx->is_single_pass == false)
areq_ctx->assoc.mlli_nents +=
areq_ctx->assoc.mlli_nents +=
areq_ctx->src.mlli_nents;
} else {
areq_ctx->dst.sram_addr =
areq_ctx->dst.sram_addr =
drvdata->mlli_sram_addr +
curr_mlli_size;
areq_ctx->src.sram_addr =
areq_ctx->src.sram_addr =
areq_ctx->dst.sram_addr +
areq_ctx->dst.mlli_nents *
areq_ctx->dst.mlli_nents *
LLI_ENTRY_BYTE_SIZE;
if (areq_ctx->is_single_pass == false)
areq_ctx->assoc.mlli_nents +=
areq_ctx->assoc.mlli_nents +=
areq_ctx->dst.mlli_nents;
}
}
@ -1387,8 +1387,8 @@ int ssi_buffer_mgr_map_aead_request(
#endif
/* cacluate the size for cipher remove ICV in decrypt*/
areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
DRV_CRYPTO_DIRECTION_ENCRYPT) ?
areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
DRV_CRYPTO_DIRECTION_ENCRYPT) ?
req->cryptlen :
(req->cryptlen - authsize);
@ -1489,15 +1489,15 @@ int ssi_buffer_mgr_map_aead_request(
LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES+LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
if (unlikely(rc != 0)) {
rc = -ENOMEM;
goto aead_map_failure;
goto aead_map_failure;
}
if (likely(areq_ctx->is_single_pass == true)) {
/*
* Create MLLI table for:
* Create MLLI table for:
* (1) Assoc. data
* (2) Src/Dst SGLs
* Note: IV is contg. buffer (not an SGL)
* Note: IV is contg. buffer (not an SGL)
*/
rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, true, false);
if (unlikely(rc != 0))
@ -1511,19 +1511,19 @@ int ssi_buffer_mgr_map_aead_request(
} else { /* DOUBLE-PASS flow */
/*
* Prepare MLLI table(s) in this order:
*
*
* If ENCRYPT/DECRYPT (inplace):
* (1) MLLI table for assoc
* (2) IV entry (chained right after end of assoc)
* (3) MLLI for src/dst (inplace operation)
*
* If ENCRYPT (non-inplace)
*
* If ENCRYPT (non-inplace)
* (1) MLLI table for assoc
* (2) IV entry (chained right after end of assoc)
* (3) MLLI for dst
* (4) MLLI for src
*
* If DECRYPT (non-inplace)
*
* If DECRYPT (non-inplace)
* (1) MLLI table for assoc
* (2) IV entry (chained right after end of assoc)
* (3) MLLI for src
@ -1572,7 +1572,7 @@ int ssi_buffer_mgr_map_hash_request_final(
areq_ctx->buff0;
uint32_t *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
&areq_ctx->buff0_cnt;
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
struct buffer_array sg_data;
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
uint32_t dummy = 0;
@ -1593,7 +1593,7 @@ int ssi_buffer_mgr_map_hash_request_final(
/* nothing to do */
return 0;
}
/*TODO: copy data in case that buffer is enough for operation */
/* map the previous buffer */
if (*curr_buff_cnt != 0 ) {
@ -1612,7 +1612,7 @@ int ssi_buffer_mgr_map_hash_request_final(
&dummy, &mapped_nents))){
goto unmap_curr_buff;
}
if ( src && (mapped_nents == 1)
if ( src && (mapped_nents == 1)
&& (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) {
memcpy(areq_ctx->buff_sg,src,
sizeof(struct scatterlist));
@ -1668,7 +1668,7 @@ int ssi_buffer_mgr_map_hash_request_update(
areq_ctx->buff1;
uint32_t *next_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
&areq_ctx->buff1_cnt;
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
unsigned int update_data_len;
uint32_t total_in_len = nbytes + *curr_buff_cnt;
struct buffer_array sg_data;
@ -1676,7 +1676,7 @@ int ssi_buffer_mgr_map_hash_request_update(
unsigned int swap_index = 0;
uint32_t dummy = 0;
uint32_t mapped_nents = 0;
SSI_LOG_DEBUG(" update params : curr_buff=%pK "
"curr_buff_cnt=0x%X nbytes=0x%X "
"src=%pK curr_index=%u \n",
@ -1694,12 +1694,12 @@ int ssi_buffer_mgr_map_hash_request_update(
"*curr_buff_cnt=0x%X copy_to=%pK\n",
curr_buff, *curr_buff_cnt,
&curr_buff[*curr_buff_cnt]);
areq_ctx->in_nents =
areq_ctx->in_nents =
ssi_buffer_mgr_get_sgl_nents(src,
nbytes,
&dummy, NULL);
sg_copy_to_buffer(src, areq_ctx->in_nents,
&curr_buff[*curr_buff_cnt], nbytes);
&curr_buff[*curr_buff_cnt], nbytes);
*curr_buff_cnt += nbytes;
return 1;
}
@ -1734,7 +1734,7 @@ int ssi_buffer_mgr_map_hash_request_update(
/* change the buffer index for next operation */
swap_index = 1;
}
if ( update_data_len > *curr_buff_cnt ) {
if ( unlikely( ssi_buffer_mgr_map_scatterlist( dev,src,
(update_data_len -*curr_buff_cnt),
@ -1744,7 +1744,7 @@ int ssi_buffer_mgr_map_hash_request_update(
&dummy, &mapped_nents))){
goto unmap_curr_buff;
}
if ( (mapped_nents == 1)
if ( (mapped_nents == 1)
&& (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) {
/* only one entry in the SG and no previous data */
memcpy(areq_ctx->buff_sg,src,
@ -1792,10 +1792,10 @@ void ssi_buffer_mgr_unmap_hash_request(
uint32_t *prev_len = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
&areq_ctx->buff1_cnt;
/*In case a pool was set, a table was
/*In case a pool was set, a table was
allocated and should be released */
if (areq_ctx->mlli_params.curr_pool != NULL) {
SSI_LOG_DEBUG("free MLLI buffer: dma=0x%llX virt=%pK\n",
SSI_LOG_DEBUG("free MLLI buffer: dma=0x%llX virt=%pK\n",
(unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
areq_ctx->mlli_params.mlli_virt_addr);
SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->mlli_params.mlli_dma_addr);
@ -1803,22 +1803,22 @@ void ssi_buffer_mgr_unmap_hash_request(
areq_ctx->mlli_params.mlli_virt_addr,
areq_ctx->mlli_params.mlli_dma_addr);
}
if ((src) && likely(areq_ctx->in_nents != 0)) {
SSI_LOG_DEBUG("Unmapped sg src: virt=%pK dma=0x%llX len=0x%X\n",
sg_virt(src),
(unsigned long long)sg_dma_address(src),
(unsigned long long)sg_dma_address(src),
sg_dma_len(src));
SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(src));
dma_unmap_sg(dev, src,
dma_unmap_sg(dev, src,
areq_ctx->in_nents, DMA_TO_DEVICE);
}
if (*prev_len != 0) {
SSI_LOG_DEBUG("Unmapped buffer: areq_ctx->buff_sg=%pK"
"dma=0x%llX len 0x%X\n",
"dma=0x%llX len 0x%X\n",
sg_virt(areq_ctx->buff_sg),
(unsigned long long)sg_dma_address(areq_ctx->buff_sg),
(unsigned long long)sg_dma_address(areq_ctx->buff_sg),
sg_dma_len(areq_ctx->buff_sg));
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
if (!do_revert) {
@ -1844,7 +1844,7 @@ int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
buff_mgr_handle->mlli_buffs_pool = dma_pool_create(
"dx_single_mlli_tables", dev,
MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
LLI_ENTRY_BYTE_SIZE,
MLLI_TABLE_MIN_ALIGNMENT, 0);

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -48,7 +48,7 @@ struct mlli_params {
struct dma_pool *curr_pool;
uint8_t *mlli_virt_addr;
dma_addr_t mlli_dma_addr;
uint32_t mlli_len;
uint32_t mlli_len;
};
int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata);
@ -65,7 +65,7 @@ int ssi_buffer_mgr_map_blkcipher_request(
struct scatterlist *dst);
void ssi_buffer_mgr_unmap_blkcipher_request(
struct device *dev,
struct device *dev,
void *ctx,
unsigned int ivsize,
struct scatterlist *src,

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -117,7 +117,7 @@ static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int siz
switch (ctx_p->cipher_mode){
case DRV_CIPHER_XTS:
if ((size >= SSI_MIN_AES_XTS_SIZE) &&
(size <= SSI_MAX_AES_XTS_SIZE) &&
(size <= SSI_MAX_AES_XTS_SIZE) &&
IS_ALIGNED(size, AES_BLOCK_SIZE))
return 0;
break;
@ -189,7 +189,7 @@ static int ssi_blkcipher_init(struct crypto_tfm *tfm)
int rc = 0;
unsigned int max_key_buf_size = get_max_keysize(tfm);
SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx_p,
SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx_p,
crypto_tfm_alg_name(tfm));
CHECK_AND_RETURN_UPON_FIPS_ERROR();
@ -251,7 +251,7 @@ static void ssi_blkcipher_exit(struct crypto_tfm *tfm)
SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx_p->user.key_dma_addr);
dma_unmap_single(dev, ctx_p->user.key_dma_addr, max_key_buf_size,
DMA_TO_DEVICE);
SSI_LOG_DEBUG("Unmapped key buffer key_dma_addr=0x%llX\n",
SSI_LOG_DEBUG("Unmapped key buffer key_dma_addr=0x%llX\n",
(unsigned long long)ctx_p->user.key_dma_addr);
/* Free key buffer in context */
@ -266,9 +266,9 @@ typedef struct tdes_keys{
u8 key3[DES_KEY_SIZE];
}tdes_keys_t;
static const u8 zero_buff[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
static const u8 zero_buff[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
/* The function verifies that tdes keys are not weak.*/
@ -278,7 +278,7 @@ static int ssi_fips_verify_3des_keys(const u8 *key, unsigned int keylen)
tdes_keys_t *tdes_key = (tdes_keys_t*)key;
/* verify key1 != key2 and key3 != key2*/
if (unlikely( (memcmp((u8*)tdes_key->key1, (u8*)tdes_key->key2, sizeof(tdes_key->key1)) == 0) ||
if (unlikely( (memcmp((u8*)tdes_key->key1, (u8*)tdes_key->key2, sizeof(tdes_key->key1)) == 0) ||
(memcmp((u8*)tdes_key->key3, (u8*)tdes_key->key2, sizeof(tdes_key->key3)) == 0) )) {
return -ENOEXEC;
}
@ -317,8 +317,8 @@ static enum HwCryptoKey hw_key_to_cc_hw_key(int slot_num)
return END_OF_KEYS;
}
static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
const u8 *key,
static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
const u8 *key,
unsigned int keylen)
{
struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
@ -334,7 +334,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
CHECK_AND_RETURN_UPON_FIPS_ERROR();
SSI_LOG_DEBUG("ssi_blkcipher_setkey: after FIPS check");
/* STAT_PHASE_0: Init and sanity checks */
START_CYCLE_COUNT();
@ -396,13 +396,13 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
return -EINVAL;
}
}
if ((ctx_p->cipher_mode == DRV_CIPHER_XTS) &&
if ((ctx_p->cipher_mode == DRV_CIPHER_XTS) &&
ssi_fips_verify_xts_keys(key, keylen) != 0) {
SSI_LOG_DEBUG("ssi_blkcipher_setkey: weak XTS key");
return -EINVAL;
}
if ((ctx_p->flow_mode == S_DIN_to_DES) &&
(keylen == DES3_EDE_KEY_SIZE) &&
if ((ctx_p->flow_mode == S_DIN_to_DES) &&
(keylen == DES3_EDE_KEY_SIZE) &&
ssi_fips_verify_3des_keys(key, keylen) != 0) {
SSI_LOG_DEBUG("ssi_blkcipher_setkey: weak 3DES key");
return -EINVAL;
@ -414,7 +414,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
/* STAT_PHASE_1: Copy key to ctx */
START_CYCLE_COUNT();
SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx_p->user.key_dma_addr);
dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
max_key_buf_size, DMA_TO_DEVICE);
#if SSI_CC_HAS_MULTI2
if (ctx_p->flow_mode == S_DIN_to_MULTI2) {
@ -426,7 +426,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
SSI_LOG_DEBUG("ssi_blkcipher_setkey: SSI_CC_HAS_MULTI2 einval");
return -EINVAL;
}
} else
} else
#endif /*SSI_CC_HAS_MULTI2*/
{
memcpy(ctx_p->user.key, key, keylen);
@ -447,11 +447,11 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
}
}
}
dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr,
dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr,
max_key_buf_size, DMA_TO_DEVICE);
SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx_p->user.key_dma_addr ,max_key_buf_size);
ctx_p->keylen = keylen;
END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_1);
SSI_LOG_DEBUG("ssi_blkcipher_setkey: return safely");
@ -496,7 +496,7 @@ ssi_blkcipher_create_setup_desc(
HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
HW_DESC_SET_FLOW_MODE(&desc[*seq_size], flow_mode);
HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], cipher_mode);
if ((cipher_mode == DRV_CIPHER_CTR) ||
if ((cipher_mode == DRV_CIPHER_CTR) ||
(cipher_mode == DRV_CIPHER_OFB) ) {
HW_DESC_SET_SETUP_MODE(&desc[*seq_size],
SETUP_LOAD_STATE1);
@ -517,7 +517,7 @@ ssi_blkcipher_create_setup_desc(
HW_DESC_SET_HW_CRYPTO_KEY(&desc[*seq_size], ctx_p->hw.key1_slot);
} else {
HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
key_dma_addr,
key_dma_addr,
((key_len == 24) ? AES_MAX_KEY_SIZE : key_len),
NS_BIT);
}
@ -559,7 +559,7 @@ ssi_blkcipher_create_setup_desc(
if (ssi_is_hw_key(tfm)) {
HW_DESC_SET_HW_CRYPTO_KEY(&desc[*seq_size], ctx_p->hw.key2_slot);
} else {
HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
(key_dma_addr+key_len/2), key_len/2,
NS_BIT);
}
@ -568,7 +568,7 @@ ssi_blkcipher_create_setup_desc(
HW_DESC_SET_KEY_SIZE_AES(&desc[*seq_size], key_len/2);
HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
(*seq_size)++;
/* Set state */
HW_DESC_INIT(&desc[*seq_size]);
HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_STATE1);
@ -596,7 +596,7 @@ static inline void ssi_blkcipher_create_multi2_setup_desc(
unsigned int *seq_size)
{
struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
int direction = req_ctx->gen_ctx.op_type;
/* Load system key */
HW_DESC_INIT(&desc[*seq_size]);
@ -611,8 +611,8 @@ static inline void ssi_blkcipher_create_multi2_setup_desc(
/* load data key */
HW_DESC_INIT(&desc[*seq_size]);
HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
(ctx_p->user.key_dma_addr +
HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
(ctx_p->user.key_dma_addr +
CC_MULTI2_SYSTEM_KEY_SIZE),
CC_MULTI2_DATA_KEY_SIZE, NS_BIT);
HW_DESC_SET_MULTI2_NUM_ROUNDS(&desc[*seq_size],
@ -622,8 +622,8 @@ static inline void ssi_blkcipher_create_multi2_setup_desc(
HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_STATE0 );
(*seq_size)++;
/* Set state */
HW_DESC_INIT(&desc[*seq_size]);
HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
@ -632,9 +632,9 @@ static inline void ssi_blkcipher_create_multi2_setup_desc(
HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
HW_DESC_SET_FLOW_MODE(&desc[*seq_size], ctx_p->flow_mode);
HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], ctx_p->cipher_mode);
HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_STATE1);
HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_STATE1);
(*seq_size)++;
}
#endif /*SSI_CC_HAS_MULTI2*/
@ -715,7 +715,7 @@ ssi_blkcipher_create_data_desc(
"addr 0x%08X\n",
(unsigned int)ctx_p->drvdata->mlli_sram_addr,
(unsigned int)ctx_p->drvdata->mlli_sram_addr);
HW_DESC_SET_DOUT_MLLI(&desc[*seq_size],
HW_DESC_SET_DOUT_MLLI(&desc[*seq_size],
ctx_p->drvdata->mlli_sram_addr,
req_ctx->in_mlli_nents,
NS_BIT,(areq == NULL)? 0:1);
@ -723,13 +723,13 @@ ssi_blkcipher_create_data_desc(
SSI_LOG_DEBUG(" din/dout params "
"addr 0x%08X addr 0x%08X\n",
(unsigned int)ctx_p->drvdata->mlli_sram_addr,
(unsigned int)ctx_p->drvdata->mlli_sram_addr +
(uint32_t)LLI_ENTRY_BYTE_SIZE *
(unsigned int)ctx_p->drvdata->mlli_sram_addr +
(uint32_t)LLI_ENTRY_BYTE_SIZE *
req_ctx->in_nents);
HW_DESC_SET_DOUT_MLLI(&desc[*seq_size],
HW_DESC_SET_DOUT_MLLI(&desc[*seq_size],
(ctx_p->drvdata->mlli_sram_addr +
LLI_ENTRY_BYTE_SIZE *
req_ctx->in_mlli_nents),
LLI_ENTRY_BYTE_SIZE *
req_ctx->in_mlli_nents),
req_ctx->out_mlli_nents, NS_BIT,(areq == NULL)? 0:1);
}
if (areq != NULL) {
@ -741,7 +741,7 @@ ssi_blkcipher_create_data_desc(
}
static int ssi_blkcipher_complete(struct device *dev,
struct ssi_ablkcipher_ctx *ctx_p,
struct ssi_ablkcipher_ctx *ctx_p,
struct blkcipher_req_ctx *req_ctx,
struct scatterlist *dst, struct scatterlist *src,
void *info, //req info
@ -779,7 +779,7 @@ static int ssi_blkcipher_process(
unsigned int nbytes,
void *info, //req info
unsigned int ivsize,
void *areq,
void *areq,
enum drv_crypto_direction direction)
{
struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
@ -796,7 +796,7 @@ static int ssi_blkcipher_process(
CHECK_AND_RETURN_UPON_FIPS_ERROR();
/* STAT_PHASE_0: Init and sanity checks */
START_CYCLE_COUNT();
/* TODO: check data length according to mode */
if (unlikely(validate_data_size(ctx_p, nbytes))) {
SSI_LOG_ERR("Unsupported data size %d.\n", nbytes);
@ -826,12 +826,12 @@ static int ssi_blkcipher_process(
/* Setup request context */
req_ctx->gen_ctx.op_type = direction;
END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_0);
/* STAT_PHASE_1: Map buffers */
START_CYCLE_COUNT();
rc = ssi_buffer_mgr_map_blkcipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes, info, src, dst);
if (unlikely(rc != 0)) {
SSI_LOG_ERR("map_request() failed\n");
@ -863,7 +863,7 @@ static int ssi_blkcipher_process(
}
/* Data processing */
ssi_blkcipher_create_data_desc(tfm,
req_ctx,
req_ctx,
dst, src,
nbytes,
areq,
@ -880,7 +880,7 @@ static int ssi_blkcipher_process(
/* STAT_PHASE_3: Lock HW and push sequence */
START_CYCLE_COUNT();
rc = send_request(ctx_p->drvdata, &ssi_req, desc, seq_len, (areq == NULL)? 0:1);
if(areq != NULL) {
if (unlikely(rc != -EINPROGRESS)) {
@ -892,17 +892,17 @@ static int ssi_blkcipher_process(
} else {
if (rc != 0) {
ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_3);
END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_3);
} else {
END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_3);
rc = ssi_blkcipher_complete(dev, ctx_p, req_ctx, dst, src, info, ivsize, NULL, ctx_p->drvdata->cc_base);
}
}
}
exit_process:
if (cts_restore_flag != 0)
ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
return rc;
}
@ -941,7 +941,7 @@ static int ssi_sblkcipher_init(struct crypto_tfm *tfm)
static void ssi_sblkcipher_exit(struct crypto_tfm *tfm)
{
struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
kfree(ctx_p->sync_ctx);
SSI_LOG_DEBUG("Free sync ctx buffer in context ctx_p->sync_ctx=@%p\n", ctx_p->sync_ctx);
@ -987,15 +987,15 @@ static int ssi_sblkcipher_decrypt(struct blkcipher_desc *desc,
static int ssi_ablkcipher_init(struct crypto_tfm *tfm)
{
struct ablkcipher_tfm *ablktfm = &tfm->crt_ablkcipher;
ablktfm->reqsize = sizeof(struct blkcipher_req_ctx);
return ssi_blkcipher_init(tfm);
}
static int ssi_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
const u8 *key,
static int ssi_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
const u8 *key,
unsigned int keylen)
{
return ssi_blkcipher_setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
@ -1383,7 +1383,7 @@ static struct ssi_alg_template blkcipher_algs[] = {
#endif /*SSI_CC_HAS_MULTI2*/
};
static
static
struct ssi_crypto_alg *ssi_ablkcipher_create_alg(struct ssi_alg_template *template)
{
struct ssi_crypto_alg *t_alg;
@ -1405,7 +1405,7 @@ struct ssi_crypto_alg *ssi_ablkcipher_create_alg(struct ssi_alg_template *templa
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
alg->cra_ctxsize = sizeof(struct ssi_ablkcipher_ctx);
alg->cra_init = template->synchronous? ssi_sblkcipher_init:ssi_ablkcipher_init;
alg->cra_exit = template->synchronous? ssi_sblkcipher_exit:ssi_blkcipher_exit;
alg->cra_type = template->synchronous? &crypto_blkcipher_type:&crypto_ablkcipher_type;
@ -1428,7 +1428,7 @@ struct ssi_crypto_alg *ssi_ablkcipher_create_alg(struct ssi_alg_template *templa
int ssi_ablkcipher_free(struct ssi_drvdata *drvdata)
{
struct ssi_crypto_alg *t_alg, *n;
struct ssi_blkcipher_handle *blkcipher_handle =
struct ssi_blkcipher_handle *blkcipher_handle =
drvdata->blkcipher_handle;
struct device *dev;
dev = &drvdata->plat_dev->dev;
@ -1489,9 +1489,9 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
kfree(t_alg);
goto fail0;
} else {
list_add_tail(&t_alg->entry,
list_add_tail(&t_alg->entry,
&ablkcipher_handle->blkcipher_alg_list);
SSI_LOG_DEBUG("Registered %s\n",
SSI_LOG_DEBUG("Registered %s\n",
t_alg->crypto_alg.cra_driver_name);
}
}

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -71,7 +71,7 @@ static inline bool ssi_is_hw_key(struct crypto_tfm *tfm)
return (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_HW_KEY);
}
#else
#else
struct arm_hw_key_info {
int hw_key1;

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -45,7 +45,7 @@
/* Define the CryptoCell DMA cache coherency signals configuration */
#if defined (DISABLE_COHERENT_DMA_OPS)
/* Software Controlled Cache Coherency (SCCC) */
/* Software Controlled Cache Coherency (SCCC) */
#define SSI_CACHE_PARAMS (0x000)
/* CC attached to NONE-ACP such as HPP/ACE/AMBA4.
* The customer is responsible to enable/disable this feature

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -155,11 +155,11 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
/* AXI error interrupt */
if (unlikely((irr & SSI_AXI_ERR_IRQ_MASK) != 0)) {
uint32_t axi_err;
/* Read the AXI error ID */
axi_err = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_ERR));
SSI_LOG_DEBUG("AXI completion error: axim_mon_err=0x%08X\n", axi_err);
irr &= ~SSI_AXI_ERR_IRQ_MASK;
}
@ -192,7 +192,7 @@ int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe)
/* Unmask relevant interrupt cause */
val = (~(SSI_COMP_IRQ_MASK | SSI_AXI_ERR_IRQ_MASK | SSI_GPR0_IRQ_MASK));
CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), val);
#ifdef DX_HOST_IRQ_TIMER_INIT_VAL_REG_OFFSET
#ifdef DX_IRQ_DELAY
/* Set CC IRQ delay */
@ -266,7 +266,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
}
SSI_LOG_DEBUG("CC registers mapped from %pa to 0x%p\n", &new_drvdata->res_mem->start, cc_base);
new_drvdata->cc_base = cc_base;
/* Then IRQ */
new_drvdata->res_irq = platform_get_resource(plat_dev, IORESOURCE_IRQ, 0);
@ -396,7 +396,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
init_cc_res_err:
SSI_LOG_ERR("Freeing CC HW resources!\n");
if (new_drvdata != NULL) {
ssi_aead_free(new_drvdata);
ssi_hash_free(new_drvdata);
@ -410,7 +410,7 @@ init_cc_res_err:
#ifdef ENABLE_CC_SYSFS
ssi_sysfs_fini();
#endif
if (req_mem_cc_regs != NULL) {
if (irq_registered) {
free_irq(new_drvdata->res_irq->start, new_drvdata);
@ -432,7 +432,7 @@ init_cc_res_err:
void fini_cc_regs(struct ssi_drvdata *drvdata)
{
/* Mask all interrupts */
WRITE_REGISTER(drvdata->cc_base +
WRITE_REGISTER(drvdata->cc_base +
CC_REG_OFFSET(HOST_RGF, HOST_IMR), 0xFFFFFFFF);
}
@ -505,14 +505,14 @@ static int cc7x_probe(struct platform_device *plat_dev)
static int cc7x_remove(struct platform_device *plat_dev)
{
SSI_LOG_DEBUG("Releasing cc7x resources...\n");
cleanup_cc_resources(plat_dev);
SSI_LOG(KERN_INFO, "ARM cc7x_ree device terminated\n");
#ifdef ENABLE_CYCLE_COUNT
display_all_stat_db();
#endif
return 0;
}
#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -89,7 +89,7 @@
/* Definitions for HW descriptors DIN/DOUT fields */
#define NS_BIT 1
#define AXI_ID 0
/* AXI_ID is not actually the AXI ID of the transaction but the value of AXI_ID
/* AXI_ID is not actually the AXI ID of the transaction but the value of AXI_ID
field in the HW descriptor. The DMA engine +8 that value. */
/* Logging macros */
@ -213,7 +213,7 @@ void dump_byte_array(const char *name, const uint8_t *the_array, unsigned long s
#define START_CYCLE_COUNT_AT(_var) do { _var = get_cycles(); } while(0)
#define END_CYCLE_COUNT_AT(_var, _stat_op_type, _stat_phase) update_host_stat(_stat_op_type, _stat_phase, get_cycles() - _var)
#else
#define DECL_CYCLE_COUNT_RESOURCES
#define DECL_CYCLE_COUNT_RESOURCES
#define START_CYCLE_COUNT() do { } while (0)
#define END_CYCLE_COUNT(_stat_op_type, _stat_phase) do { } while (0)
#define GET_START_CYCLE_COUNT() 0

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -27,8 +27,8 @@ extern int ssi_fips_ext_get_state(ssi_fips_state_t *p_state);
extern int ssi_fips_ext_get_error(ssi_fips_error_t *p_err);
/*
This function returns the REE FIPS state.
It should be called by kernel module.
This function returns the REE FIPS state.
It should be called by kernel module.
*/
int ssi_fips_get_state(ssi_fips_state_t *p_state)
{
@ -46,8 +46,8 @@ int ssi_fips_get_state(ssi_fips_state_t *p_state)
EXPORT_SYMBOL(ssi_fips_get_state);
/*
This function returns the REE FIPS error.
It should be called by kernel module.
This function returns the REE FIPS error.
It should be called by kernel module.
*/
int ssi_fips_get_error(ssi_fips_error_t *p_err)
{

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -23,7 +23,7 @@
#endif
/*!
/*!
@file
@brief This file contains FIPS related defintions and APIs.
*/

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -36,7 +36,7 @@ http://csrc.nist.gov/groups/STM/cavp/documents/aes/XTSTestVectors.zip
* AES CMAC
http://csrc.nist.gov/groups/STM/cavp/index.html#07
http://csrc.nist.gov/groups/STM/cavp/documents/mac/cmactestvectors.zip
* AES-CCM
http://csrc.nist.gov/groups/STM/cavp/#07
http://csrc.nist.gov/groups/STM/cavp/documents/mac/ccmtestvectors.zip
@ -55,12 +55,12 @@ http://csrc.nist.gov/groups/STM/cavp/documents/des/tdesmct_intermediate.zip
* HASH
http://csrc.nist.gov/groups/STM/cavp/#03
http://csrc.nist.gov/groups/STM/cavp/documents/shs/shabytetestvectors.zip
* HMAC
http://csrc.nist.gov/groups/STM/cavp/documents/shs/shabytetestvectors.zip
* HMAC
http://csrc.nist.gov/groups/STM/cavp/#07
http://csrc.nist.gov/groups/STM/cavp/documents/mac/hmactestvectors.zip
http://csrc.nist.gov/groups/STM/cavp/documents/mac/hmactestvectors.zip
*/
/* NIST AES */
@ -86,18 +86,18 @@ http://csrc.nist.gov/groups/STM/cavp/documents/mac/hmactestvectors.zip
#define NIST_AES_CBC_IV { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }
#define NIST_AES_128_CBC_CIPHER { 0x76, 0x49, 0xab, 0xac, 0x81, 0x19, 0xb2, 0x46, 0xce, 0xe9, 0x8e, 0x9b, 0x12, 0xe9, 0x19, 0x7d }
#define NIST_AES_192_CBC_CIPHER { 0x4f, 0x02, 0x1d, 0xb2, 0x43, 0xbc, 0x63, 0x3d, 0x71, 0x78, 0x18, 0x3a, 0x9f, 0xa0, 0x71, 0xe8 }
#define NIST_AES_256_CBC_CIPHER { 0xf5, 0x8c, 0x4c, 0x04, 0xd6, 0xe5, 0xf1, 0xba, 0x77, 0x9e, 0xab, 0xfb, 0x5f, 0x7b, 0xfb, 0xd6 }
#define NIST_AES_192_CBC_CIPHER { 0x4f, 0x02, 0x1d, 0xb2, 0x43, 0xbc, 0x63, 0x3d, 0x71, 0x78, 0x18, 0x3a, 0x9f, 0xa0, 0x71, 0xe8 }
#define NIST_AES_256_CBC_CIPHER { 0xf5, 0x8c, 0x4c, 0x04, 0xd6, 0xe5, 0xf1, 0xba, 0x77, 0x9e, 0xab, 0xfb, 0x5f, 0x7b, 0xfb, 0xd6 }
#define NIST_AES_OFB_IV { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }
#define NIST_AES_128_OFB_CIPHER { 0x3b, 0x3f, 0xd9, 0x2e, 0xb7, 0x2d, 0xad, 0x20, 0x33, 0x34, 0x49, 0xf8, 0xe8, 0x3c, 0xfb, 0x4a }
#define NIST_AES_192_OFB_CIPHER { 0xcd, 0xc8, 0x0d, 0x6f, 0xdd, 0xf1, 0x8c, 0xab, 0x34, 0xc2, 0x59, 0x09, 0xc9, 0x9a, 0x41, 0x74 }
#define NIST_AES_192_OFB_CIPHER { 0xcd, 0xc8, 0x0d, 0x6f, 0xdd, 0xf1, 0x8c, 0xab, 0x34, 0xc2, 0x59, 0x09, 0xc9, 0x9a, 0x41, 0x74 }
#define NIST_AES_256_OFB_CIPHER { 0xdc, 0x7e, 0x84, 0xbf, 0xda, 0x79, 0x16, 0x4b, 0x7e, 0xcd, 0x84, 0x86, 0x98, 0x5d, 0x38, 0x60 }
#define NIST_AES_CTR_IV { 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff }
#define NIST_AES_128_CTR_CIPHER { 0x87, 0x4d, 0x61, 0x91, 0xb6, 0x20, 0xe3, 0x26, 0x1b, 0xef, 0x68, 0x64, 0x99, 0x0d, 0xb6, 0xce }
#define NIST_AES_192_CTR_CIPHER { 0x1a, 0xbc, 0x93, 0x24, 0x17, 0x52, 0x1c, 0xa2, 0x4f, 0x2b, 0x04, 0x59, 0xfe, 0x7e, 0x6e, 0x0b }
#define NIST_AES_256_CTR_CIPHER { 0x60, 0x1e, 0xc3, 0x13, 0x77, 0x57, 0x89, 0xa5, 0xb7, 0xa7, 0xf5, 0x04, 0xbb, 0xf3, 0xd2, 0x28 }
#define NIST_AES_192_CTR_CIPHER { 0x1a, 0xbc, 0x93, 0x24, 0x17, 0x52, 0x1c, 0xa2, 0x4f, 0x2b, 0x04, 0x59, 0xfe, 0x7e, 0x6e, 0x0b }
#define NIST_AES_256_CTR_CIPHER { 0x60, 0x1e, 0xc3, 0x13, 0x77, 0x57, 0x89, 0xa5, 0xb7, 0xa7, 0xf5, 0x04, 0xbb, 0xf3, 0xd2, 0x28 }
#define RFC3962_AES_128_KEY { 0x63, 0x68, 0x69, 0x63, 0x6b, 0x65, 0x6e, 0x20, 0x74, 0x65, 0x72, 0x69, 0x79, 0x61, 0x6b, 0x69 }
@ -111,8 +111,8 @@ http://csrc.nist.gov/groups/STM/cavp/documents/mac/hmactestvectors.zip
0x09, 0x09, 0x23, 0x02, 0x6e, 0x91, 0x77, 0x18, 0x15, 0xf2, 0x9d, 0xab, 0x01, 0x93, 0x2f, 0x2f }
#define NIST_AES_256_XTS_IV { 0x4f, 0xae, 0xf7, 0x11, 0x7c, 0xda, 0x59, 0xc6, 0x6e, 0x4b, 0x92, 0x01, 0x3e, 0x76, 0x8a, 0xd5 }
#define NIST_AES_256_XTS_VECTOR_SIZE 16
#define NIST_AES_256_XTS_PLAIN { 0xeb, 0xab, 0xce, 0x95, 0xb1, 0x4d, 0x3c, 0x8d, 0x6f, 0xb3, 0x50, 0x39, 0x07, 0x90, 0x31, 0x1c }
#define NIST_AES_256_XTS_CIPHER { 0x77, 0x8a, 0xe8, 0xb4, 0x3c, 0xb9, 0x8d, 0x5a, 0x82, 0x50, 0x81, 0xd5, 0xbe, 0x47, 0x1c, 0x63 }
#define NIST_AES_256_XTS_PLAIN { 0xeb, 0xab, 0xce, 0x95, 0xb1, 0x4d, 0x3c, 0x8d, 0x6f, 0xb3, 0x50, 0x39, 0x07, 0x90, 0x31, 0x1c }
#define NIST_AES_256_XTS_CIPHER { 0x77, 0x8a, 0xe8, 0xb4, 0x3c, 0xb9, 0x8d, 0x5a, 0x82, 0x50, 0x81, 0xd5, 0xbe, 0x47, 0x1c, 0x63 }
#define NIST_AES_512_XTS_KEY { 0x1e, 0xa6, 0x61, 0xc5, 0x8d, 0x94, 0x3a, 0x0e, 0x48, 0x01, 0xe4, 0x2f, 0x4b, 0x09, 0x47, 0x14, \
0x9e, 0x7f, 0x9f, 0x8e, 0x3e, 0x68, 0xd0, 0xc7, 0x50, 0x52, 0x10, 0xbd, 0x31, 0x1a, 0x0e, 0x7c, \
@ -121,9 +121,9 @@ http://csrc.nist.gov/groups/STM/cavp/documents/mac/hmactestvectors.zip
#define NIST_AES_512_XTS_IV { 0xad, 0xf8, 0xd9, 0x26, 0x27, 0x46, 0x4a, 0xd2, 0xf0, 0x42, 0x8e, 0x84, 0xa9, 0xf8, 0x75, 0x64, }
#define NIST_AES_512_XTS_VECTOR_SIZE 32
#define NIST_AES_512_XTS_PLAIN { 0x2e, 0xed, 0xea, 0x52, 0xcd, 0x82, 0x15, 0xe1, 0xac, 0xc6, 0x47, 0xe8, 0x10, 0xbb, 0xc3, 0x64, \
0x2e, 0x87, 0x28, 0x7f, 0x8d, 0x2e, 0x57, 0xe3, 0x6c, 0x0a, 0x24, 0xfb, 0xc1, 0x2a, 0x20, 0x2e }
0x2e, 0x87, 0x28, 0x7f, 0x8d, 0x2e, 0x57, 0xe3, 0x6c, 0x0a, 0x24, 0xfb, 0xc1, 0x2a, 0x20, 0x2e }
#define NIST_AES_512_XTS_CIPHER { 0xcb, 0xaa, 0xd0, 0xe2, 0xf6, 0xce, 0xa3, 0xf5, 0x0b, 0x37, 0xf9, 0x34, 0xd4, 0x6a, 0x9b, 0x13, \
0x0b, 0x9d, 0x54, 0xf0, 0x7e, 0x34, 0xf3, 0x6a, 0xf7, 0x93, 0xe8, 0x6f, 0x73, 0xc6, 0xd7, 0xdb }
0x0b, 0x9d, 0x54, 0xf0, 0x7e, 0x34, 0xf3, 0x6a, 0xf7, 0x93, 0xe8, 0x6f, 0x73, 0xc6, 0xd7, 0xdb }
/* NIST AES-CMAC */

View File

@ -1,21 +1,21 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/**************************************************************
This file defines the driver FIPS functions that should be
This file defines the driver FIPS functions that should be
implemented by the driver user. Current implementation is sample code only.
***************************************************************/
@ -32,10 +32,10 @@ static ssi_fips_state_t fips_state = CC_FIPS_STATE_NOT_SUPPORTED;
static ssi_fips_error_t fips_error = CC_REE_FIPS_ERROR_OK;
/*
This function returns the FIPS REE state.
This function returns the FIPS REE state.
The function should be implemented by the driver user, depends on where .
the state value is stored.
The reference code uses global variable.
the state value is stored.
The reference code uses global variable.
*/
int ssi_fips_ext_get_state(ssi_fips_state_t *p_state)
{
@ -51,10 +51,10 @@ int ssi_fips_ext_get_state(ssi_fips_state_t *p_state)
}
/*
This function returns the FIPS REE error.
This function returns the FIPS REE error.
The function should be implemented by the driver user, depends on where .
the error value is stored.
The reference code uses global variable.
the error value is stored.
The reference code uses global variable.
*/
int ssi_fips_ext_get_error(ssi_fips_error_t *p_err)
{
@ -70,10 +70,10 @@ int ssi_fips_ext_get_error(ssi_fips_error_t *p_err)
}
/*
This function sets the FIPS REE state.
This function sets the FIPS REE state.
The function should be implemented by the driver user, depends on where .
the state value is stored.
The reference code uses global variable.
the state value is stored.
The reference code uses global variable.
*/
int ssi_fips_ext_set_state(ssi_fips_state_t state)
{
@ -82,10 +82,10 @@ int ssi_fips_ext_set_state(ssi_fips_state_t state)
}
/*
This function sets the FIPS REE error.
This function sets the FIPS REE error.
The function should be implemented by the driver user, depends on where .
the error value is stored.
The reference code uses global variable.
the error value is stored.
The reference code uses global variable.
*/
int ssi_fips_ext_set_error(ssi_fips_error_t err)
{

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -30,13 +30,13 @@ that executes the KAT.
static const uint32_t digest_len_init[] = {
0x00000040, 0x00000000, 0x00000000, 0x00000000 };
static const uint32_t sha1_init[] = {
static const uint32_t sha1_init[] = {
SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
static const uint32_t sha256_init[] = {
SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
#if (CC_SUPPORT_SHA > 256)
static const uint32_t digest_len_sha512_init[] = {
static const uint32_t digest_len_sha512_init[] = {
0x00000080, 0x00000000, 0x00000000, 0x00000000 };
static const uint64_t sha512_init[] = {
SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
@ -271,7 +271,7 @@ static const FipsGcmData FipsGcmDataTable[] = {
#define FIPS_GCM_NUM_OF_TESTS (sizeof(FipsGcmDataTable) / sizeof(FipsGcmData))
static inline ssi_fips_error_t
static inline ssi_fips_error_t
FIPS_CipherToFipsError(enum drv_cipher_mode mode, bool is_aes)
{
switch (mode)
@ -296,7 +296,7 @@ FIPS_CipherToFipsError(enum drv_cipher_mode mode, bool is_aes)
}
static inline int
static inline int
ssi_cipher_fips_run_test(struct ssi_drvdata *drvdata,
bool is_aes,
int cipher_mode,
@ -331,7 +331,7 @@ ssi_cipher_fips_run_test(struct ssi_drvdata *drvdata,
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], direction);
HW_DESC_SET_FLOW_MODE(&desc[idx], s_flow_mode);
HW_DESC_SET_CIPHER_MODE(&desc[idx], cipher_mode);
if ((cipher_mode == DRV_CIPHER_CTR) ||
if ((cipher_mode == DRV_CIPHER_CTR) ||
(cipher_mode == DRV_CIPHER_OFB) ) {
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
} else {
@ -346,7 +346,7 @@ ssi_cipher_fips_run_test(struct ssi_drvdata *drvdata,
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], direction);
if (is_aes) {
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
key_dma_addr,
key_dma_addr,
((key_len == 24) ? AES_MAX_KEY_SIZE : key_len),
NS_BIT);
HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_len);
@ -376,7 +376,7 @@ ssi_cipher_fips_run_test(struct ssi_drvdata *drvdata,
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], cipher_mode);
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], direction);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
(key_dma_addr+key_len/2), key_len/2, NS_BIT);
HW_DESC_SET_XEX_DATA_UNIT_SIZE(&desc[idx], data_size);
HW_DESC_SET_FLOW_MODE(&desc[idx], s_flow_mode);
@ -481,7 +481,7 @@ ssi_cipher_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffe
}
static inline int
static inline int
ssi_cmac_fips_run_test(struct ssi_drvdata *drvdata,
dma_addr_t key_dma_addr,
size_t key_len,
@ -522,19 +522,19 @@ ssi_cmac_fips_run_test(struct ssi_drvdata *drvdata,
//ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
din_dma_addr,
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
din_dma_addr,
din_len, NS_BIT);
HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
idx++;
/* Get final MAC result */
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DOUT_DLLI(&desc[idx], digest_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT, 0);
HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CMAC);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CMAC);
idx++;
/* perform the operation - Lock HW and push sequence */
@ -605,7 +605,7 @@ ssi_cmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer,
}
static inline ssi_fips_error_t
static inline ssi_fips_error_t
FIPS_HashToFipsError(enum drv_hash_mode hash_mode)
{
switch (hash_mode) {
@ -624,7 +624,7 @@ FIPS_HashToFipsError(enum drv_hash_mode hash_mode)
return CC_REE_FIPS_ERROR_GENERAL;
}
static inline int
static inline int
ssi_hash_fips_run_test(struct ssi_drvdata *drvdata,
dma_addr_t initial_digest_dma_addr,
dma_addr_t din_dma_addr,
@ -779,7 +779,7 @@ ssi_hash_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer,
}
static inline ssi_fips_error_t
static inline ssi_fips_error_t
FIPS_HmacToFipsError(enum drv_hash_mode hash_mode)
{
switch (hash_mode) {
@ -798,7 +798,7 @@ FIPS_HmacToFipsError(enum drv_hash_mode hash_mode)
return CC_REE_FIPS_ERROR_GENERAL;
}
static inline int
static inline int
ssi_hmac_fips_run_test(struct ssi_drvdata *drvdata,
dma_addr_t initial_digest_dma_addr,
dma_addr_t key_dma_addr,
@ -841,7 +841,7 @@ ssi_hmac_fips_run_test(struct ssi_drvdata *drvdata,
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_CONST(&desc[idx], 0, (block_size - key_size));
HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
HW_DESC_SET_DOUT_DLLI(&desc[idx],
HW_DESC_SET_DOUT_DLLI(&desc[idx],
(k0_dma_addr + key_size), (block_size - key_size),
NS_BIT, 0);
idx++;
@ -917,7 +917,7 @@ ssi_hmac_fips_run_test(struct ssi_drvdata *drvdata,
/* data descriptor */
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
din_dma_addr, data_in_size,
NS_BIT);
HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
@ -1112,7 +1112,7 @@ ssi_hmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer,
}
static inline int
static inline int
ssi_ccm_fips_run_test(struct ssi_drvdata *drvdata,
enum drv_crypto_direction direction,
dma_addr_t key_dma_addr,
@ -1160,7 +1160,7 @@ ssi_ccm_fips_run_test(struct ssi_drvdata *drvdata,
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
iv_dma_addr, AES_BLOCK_SIZE,
NS_BIT);
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
idx++;
@ -1183,7 +1183,7 @@ ssi_ccm_fips_run_test(struct ssi_drvdata *drvdata,
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CBC_MAC);
HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_size);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, mac_res_dma_addr, NIST_AESCCM_TAG_SIZE, NS_BIT);
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
@ -1235,7 +1235,7 @@ ssi_ccm_fips_run_test(struct ssi_drvdata *drvdata,
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, mac_res_dma_addr, NIST_AESCCM_TAG_SIZE, NS_BIT);
HW_DESC_SET_DOUT_DLLI(&desc[idx], mac_res_dma_addr, NIST_AESCCM_TAG_SIZE, NS_BIT, 0);
HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
idx++;
idx++;
/* perform the operation - Lock HW and push sequence */
BUG_ON(idx > FIPS_CCM_MAX_SEQ_LEN);
@ -1373,12 +1373,12 @@ ssi_gcm_fips_run_test(struct ssi_drvdata *drvdata,
///////////////////////////////// 1 ////////////////////////////////////
/* load key to AES*/
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_ECB);
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_ECB);
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
HW_DESC_SET_DIN_TYPE(&desc[idx],
DMA_DLLI, key_dma_addr, key_size,
NS_BIT);
NS_BIT);
HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_size);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
@ -1389,7 +1389,7 @@ ssi_gcm_fips_run_test(struct ssi_drvdata *drvdata,
HW_DESC_SET_DIN_CONST(&desc[idx], 0x0, AES_BLOCK_SIZE);
HW_DESC_SET_DOUT_DLLI(&desc[idx],
hkey_dma_addr, AES_BLOCK_SIZE,
NS_BIT, 0);
NS_BIT, 0);
HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
idx++;
@ -1407,8 +1407,8 @@ ssi_gcm_fips_run_test(struct ssi_drvdata *drvdata,
HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
idx++;
@ -1420,10 +1420,10 @@ ssi_gcm_fips_run_test(struct ssi_drvdata *drvdata,
HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
HW_DESC_SET_CIPHER_DO(&desc[idx], 1); //1=AES_SK RKEK
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
idx++;
@ -1434,7 +1434,7 @@ ssi_gcm_fips_run_test(struct ssi_drvdata *drvdata,
HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
idx++;
@ -1447,7 +1447,7 @@ ssi_gcm_fips_run_test(struct ssi_drvdata *drvdata,
///////////////////////////////// 2 ////////////////////////////////////
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
adata_dma_addr, adata_size,
NS_BIT);
HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
@ -1459,12 +1459,12 @@ ssi_gcm_fips_run_test(struct ssi_drvdata *drvdata,
///////////////////////////////// 3 ////////////////////////////////////
/* load key to AES*/
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
key_dma_addr, key_size,
NS_BIT);
NS_BIT);
HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_size);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
@ -1477,7 +1477,7 @@ ssi_gcm_fips_run_test(struct ssi_drvdata *drvdata,
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
iv_inc2_dma_addr, AES_BLOCK_SIZE,
NS_BIT);
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
idx++;
@ -1486,7 +1486,7 @@ ssi_gcm_fips_run_test(struct ssi_drvdata *drvdata,
///////////////////////////////// 4 ////////////////////////////////////
/* process(gctr+ghash) */
// if (req_ctx->cryptlen != 0)
// ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, seq_size);
// ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, seq_size);
///////////////////////////////// 4 ////////////////////////////////////
HW_DESC_INIT(&desc[idx]);
@ -1506,7 +1506,7 @@ ssi_gcm_fips_run_test(struct ssi_drvdata *drvdata,
/* prcess(ghash) gcm_block_len */
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
block_len_dma_addr, AES_BLOCK_SIZE,
NS_BIT);
HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
@ -1522,7 +1522,7 @@ ssi_gcm_fips_run_test(struct ssi_drvdata *drvdata,
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
idx++;
idx++;
/* load AES/CTR initial CTR value inc by 1*/
HW_DESC_INIT(&desc[idx]);
@ -1531,7 +1531,7 @@ ssi_gcm_fips_run_test(struct ssi_drvdata *drvdata,
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
iv_inc1_dma_addr, AES_BLOCK_SIZE,
NS_BIT);
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
idx++;

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -74,12 +74,12 @@ static enum ssi_fips_error ssi_fips_get_tee_error(struct ssi_drvdata *drvdata)
regVal = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, GPR_HOST));
if (regVal == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK)) {
return CC_REE_FIPS_ERROR_OK;
}
}
return CC_REE_FIPS_ERROR_FROM_TEE;
}
/*
/*
This function should push the FIPS REE library status towards the TEE library.
By writing the error state to HOST_GPR0 register. The function is called from .
driver entry point so no need to protect by mutex.
@ -119,7 +119,7 @@ void ssi_fips_fini(struct ssi_drvdata *drvdata)
void fips_handler(struct ssi_drvdata *drvdata)
{
struct ssi_fips_handle *fips_handle_ptr =
struct ssi_fips_handle *fips_handle_ptr =
drvdata->fips_handle;
#ifdef COMP_IN_WQ
queue_delayed_work(fips_handle_ptr->workq, &fips_handle_ptr->fipswork, 0);
@ -154,11 +154,11 @@ static void fips_dsr(unsigned long devarg)
teeFipsError = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, GPR_HOST));
if (teeFipsError != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK)) {
ssi_fips_set_error(drvdata, CC_REE_FIPS_ERROR_FROM_TEE);
}
}
}
/* after verifing that there is nothing to do, Unmask AXI completion interrupt */
CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR),
CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR),
CC_HAL_READ_REGISTER(
CC_REG_OFFSET(HOST_RGF, HOST_IMR)) & ~irq);
}
@ -231,11 +231,11 @@ ssi_fips_error_t cc_fips_run_power_up_tests(struct ssi_drvdata *drvdata)
/* The function checks if FIPS supported and FIPS error exists.*
/* The function checks if FIPS supported and FIPS error exists.*
* It should be used in every driver API.*/
int ssi_fips_check_fips_error(void)
{
ssi_fips_state_t fips_state;
ssi_fips_state_t fips_state;
if (ssi_fips_get_state(&fips_state) != 0) {
FIPS_LOG("ssi_fips_get_state FAILED, returning.. \n");
@ -249,14 +249,14 @@ int ssi_fips_check_fips_error(void)
}
/* The function sets the REE FIPS state.*
/* The function sets the REE FIPS state.*
* It should be used while driver is being loaded .*/
int ssi_fips_set_state(ssi_fips_state_t state)
{
return ssi_fips_ext_set_state(state);
}
/* The function sets the REE FIPS error, and pushes the error to TEE library. *
/* The function sets the REE FIPS error, and pushes the error to TEE library. *
* It should be used when any of the KAT tests fails .*/
int ssi_fips_set_error(struct ssi_drvdata *p_drvdata, ssi_fips_error_t err)
{
@ -268,7 +268,7 @@ int ssi_fips_set_error(struct ssi_drvdata *p_drvdata, ssi_fips_error_t err)
// setting no error is not allowed
if (err == CC_REE_FIPS_ERROR_OK) {
return -ENOEXEC;
}
}
// If error exists, do not set new error
if (ssi_fips_get_error(&current_err) != 0) {
return -ENOEXEC;

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -44,18 +44,18 @@ struct ssi_hash_handle {
static const uint32_t digest_len_init[] = {
0x00000040, 0x00000000, 0x00000000, 0x00000000 };
static const uint32_t md5_init[] = {
static const uint32_t md5_init[] = {
SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
static const uint32_t sha1_init[] = {
static const uint32_t sha1_init[] = {
SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
static const uint32_t sha224_init[] = {
static const uint32_t sha224_init[] = {
SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
static const uint32_t sha256_init[] = {
SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
#if (DX_DEV_SHA_MAX > 256)
static const uint32_t digest_len_sha512_init[] = {
static const uint32_t digest_len_sha512_init[] = {
0x00000080, 0x00000000, 0x00000000, 0x00000000 };
static const uint64_t sha384_init[] = {
SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
@ -66,11 +66,11 @@ static const uint64_t sha512_init[] = {
#endif
static void ssi_hash_create_xcbc_setup(
struct ahash_request *areq,
struct ahash_request *areq,
HwDesc_s desc[],
unsigned int *seq_size);
static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
HwDesc_s desc[],
unsigned int *seq_size);
@ -96,7 +96,7 @@ struct hash_key_req_ctx {
/* hash per-session context */
struct ssi_hash_ctx {
struct ssi_drvdata *drvdata;
/* holds the origin digest; the digest after "setkey" if HMAC,*
/* holds the origin digest; the digest after "setkey" if HMAC,*
the initial digest if HASH. */
uint8_t digest_buff[SSI_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
uint8_t opad_tmp_keys_buff[SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE] ____cacheline_aligned;
@ -115,7 +115,7 @@ static const struct crypto_type crypto_shash_type;
static void ssi_hash_create_data_desc(
struct ahash_req_ctx *areq_ctx,
struct ssi_hash_ctx *ctx,
struct ssi_hash_ctx *ctx,
unsigned int flow_mode,HwDesc_s desc[],
bool is_not_last_data,
unsigned int *seq_size);
@ -131,11 +131,11 @@ static inline void ssi_set_hash_endianity(uint32_t mode, HwDesc_s *desc)
}
}
static int ssi_hash_map_result(struct device *dev,
struct ahash_req_ctx *state,
static int ssi_hash_map_result(struct device *dev,
struct ahash_req_ctx *state,
unsigned int digestsize)
{
state->digest_result_dma_addr =
state->digest_result_dma_addr =
dma_map_single(dev, (void *)state->digest_result_buff,
digestsize,
DMA_BIDIRECTIONAL);
@ -154,8 +154,8 @@ static int ssi_hash_map_result(struct device *dev,
return 0;
}
static int ssi_hash_map_request(struct device *dev,
struct ahash_req_ctx *state,
static int ssi_hash_map_request(struct device *dev,
struct ahash_req_ctx *state,
struct ssi_hash_ctx *ctx)
{
bool is_hmac = ctx->is_hmac;
@ -211,7 +211,7 @@ static int ssi_hash_map_request(struct device *dev,
ctx->inter_digestsize, state->digest_buff);
goto fail3;
}
SSI_UPDATE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr,
SSI_UPDATE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr,
ctx->inter_digestsize);
SSI_LOG_DEBUG("Mapped digest %d B at va=%pK to dma=0x%llX\n",
ctx->inter_digestsize, state->digest_buff,
@ -220,7 +220,7 @@ static int ssi_hash_map_request(struct device *dev,
if (is_hmac) {
SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->digest_buff_dma_addr);
dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->digest_buff_dma_addr,
SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->digest_buff_dma_addr,
ctx->inter_digestsize);
if ((ctx->hw_mode == DRV_CIPHER_XCBC_MAC) || (ctx->hw_mode == DRV_CIPHER_CMAC)) {
memset(state->digest_buff, 0, ctx->inter_digestsize);
@ -238,16 +238,16 @@ static int ssi_hash_map_request(struct device *dev,
}
SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr);
dma_sync_single_for_device(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
SSI_UPDATE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr,
SSI_UPDATE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr,
ctx->inter_digestsize);
if (ctx->hash_mode != DRV_HASH_NULL) {
SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr);
dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
memcpy(state->opad_digest_buff, ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr,
SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr,
ctx->inter_digestsize);
}
}
} else { /*hash*/
/* Copy the initial digests if hash flow. The SRAM contains the
initial digests in the expected order for all SHA* */
@ -338,8 +338,8 @@ fail0:
return rc;
}
static void ssi_hash_unmap_request(struct device *dev,
struct ahash_req_ctx *state,
static void ssi_hash_unmap_request(struct device *dev,
struct ahash_req_ctx *state,
struct ssi_hash_ctx *ctx)
{
if (state->digest_buff_dma_addr != 0) {
@ -375,8 +375,8 @@ static void ssi_hash_unmap_request(struct device *dev,
kfree(state->buff0);
}
static void ssi_hash_unmap_result(struct device *dev,
struct ahash_req_ctx *state,
static void ssi_hash_unmap_result(struct device *dev,
struct ahash_req_ctx *state,
unsigned int digestsize, u8 *result)
{
if (state->digest_result_dma_addr != 0) {
@ -384,10 +384,10 @@ static void ssi_hash_unmap_result(struct device *dev,
dma_unmap_single(dev,
state->digest_result_dma_addr,
digestsize,
DMA_BIDIRECTIONAL);
DMA_BIDIRECTIONAL);
SSI_LOG_DEBUG("unmpa digest result buffer "
"va (%pK) pa (%llx) len %u\n",
state->digest_result_buff,
state->digest_result_buff,
(unsigned long long)state->digest_result_dma_addr,
digestsize);
memcpy(result,
@ -415,7 +415,7 @@ static void ssi_hash_digest_complete(struct device *dev, void *ssi_req, void __i
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
uint32_t digestsize = crypto_ahash_digestsize(tfm);
SSI_LOG_DEBUG("req=%pK\n", req);
ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
@ -431,7 +431,7 @@ static void ssi_hash_complete(struct device *dev, void *ssi_req, void __iomem *c
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
uint32_t digestsize = crypto_ahash_digestsize(tfm);
SSI_LOG_DEBUG("req=%pK\n", req);
ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
@ -440,11 +440,11 @@ static void ssi_hash_complete(struct device *dev, void *ssi_req, void __iomem *c
req->base.complete(&req->base, 0);
}
static int ssi_hash_digest(struct ahash_req_ctx *state,
struct ssi_hash_ctx *ctx,
unsigned int digestsize,
struct scatterlist *src,
unsigned int nbytes, u8 *result,
static int ssi_hash_digest(struct ahash_req_ctx *state,
struct ssi_hash_ctx *ctx,
unsigned int digestsize,
struct scatterlist *src,
unsigned int nbytes, u8 *result,
void *async_req)
{
struct device *dev = &ctx->drvdata->plat_dev->dev;
@ -568,7 +568,7 @@ static int ssi_hash_digest(struct ahash_req_ctx *state,
/* Get final MAC result */
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, async_req? 1:0); /*TODO*/
if (async_req) {
HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
@ -593,7 +593,7 @@ static int ssi_hash_digest(struct ahash_req_ctx *state,
SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
} else {
ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
}
ssi_hash_unmap_result(dev, state, digestsize, result);
ssi_hash_unmap_request(dev, state, ctx);
@ -601,11 +601,11 @@ static int ssi_hash_digest(struct ahash_req_ctx *state,
return rc;
}
static int ssi_hash_update(struct ahash_req_ctx *state,
struct ssi_hash_ctx *ctx,
unsigned int block_size,
struct scatterlist *src,
unsigned int nbytes,
static int ssi_hash_update(struct ahash_req_ctx *state,
struct ssi_hash_ctx *ctx,
unsigned int block_size,
struct scatterlist *src,
unsigned int nbytes,
void *async_req)
{
struct device *dev = &ctx->drvdata->plat_dev->dev;
@ -697,12 +697,12 @@ static int ssi_hash_update(struct ahash_req_ctx *state,
return rc;
}
static int ssi_hash_finup(struct ahash_req_ctx *state,
struct ssi_hash_ctx *ctx,
unsigned int digestsize,
struct scatterlist *src,
unsigned int nbytes,
u8 *result,
static int ssi_hash_finup(struct ahash_req_ctx *state,
struct ssi_hash_ctx *ctx,
unsigned int digestsize,
struct scatterlist *src,
unsigned int nbytes,
u8 *result,
void *async_req)
{
struct device *dev = &ctx->drvdata->plat_dev->dev;
@ -803,7 +803,7 @@ static int ssi_hash_finup(struct ahash_req_ctx *state,
HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
idx++;
if (async_req) {
@ -828,12 +828,12 @@ static int ssi_hash_finup(struct ahash_req_ctx *state,
return rc;
}
static int ssi_hash_final(struct ahash_req_ctx *state,
struct ssi_hash_ctx *ctx,
unsigned int digestsize,
struct scatterlist *src,
unsigned int nbytes,
u8 *result,
static int ssi_hash_final(struct ahash_req_ctx *state,
struct ssi_hash_ctx *ctx,
unsigned int digestsize,
struct scatterlist *src,
unsigned int nbytes,
u8 *result,
void *async_req)
{
struct device *dev = &ctx->drvdata->plat_dev->dev;
@ -972,7 +972,7 @@ static int ssi_hash_final(struct ahash_req_ctx *state,
static int ssi_hash_init(struct ahash_req_ctx *state, struct ssi_hash_ctx *ctx)
{
struct device *dev = &ctx->drvdata->plat_dev->dev;
state->xcbc_count = 0;
state->xcbc_count = 0;
CHECK_AND_RETURN_UPON_FIPS_ERROR();
ssi_hash_map_request(dev, state, ctx);
@ -997,8 +997,8 @@ static int ssi_hash_import(struct ssi_hash_ctx *ctx, const void *in)
#endif
static int ssi_hash_setkey(void *hash,
const u8 *key,
unsigned int keylen,
const u8 *key,
unsigned int keylen,
bool synchronize)
{
unsigned int hmacPadConst[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
@ -1011,7 +1011,7 @@ static int ssi_hash_setkey(void *hash,
ssi_sram_addr_t larval_addr;
SSI_LOG_DEBUG("ssi_hash_setkey: start keylen: %d", keylen);
CHECK_AND_RETURN_UPON_FIPS_ERROR();
if (synchronize) {
ctx = crypto_shash_ctx(((struct crypto_shash *)hash));
@ -1022,7 +1022,7 @@ static int ssi_hash_setkey(void *hash,
blocksize = crypto_tfm_alg_blocksize(&((struct crypto_ahash *)hash)->base);
digestsize = crypto_ahash_digestsize(((struct crypto_ahash *)hash));
}
larval_addr = ssi_ahash_get_larval_digest_sram_addr(
ctx->drvdata, ctx->hash_mode);
@ -1058,7 +1058,7 @@ static int ssi_hash_setkey(void *hash,
HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
idx++;
/* Load the hash current length*/
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
@ -1067,17 +1067,17 @@ static int ssi_hash_setkey(void *hash,
HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
idx++;
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
ctx->key_params.key_dma_addr,
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
ctx->key_params.key_dma_addr,
keylen, NS_BIT);
HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
idx++;
/* Get hashed key */
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
HW_DESC_SET_DOUT_DLLI(&desc[idx], ctx->opad_tmp_keys_dma_addr,
digestsize, NS_BIT, 0);
HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
@ -1085,19 +1085,19 @@ static int ssi_hash_setkey(void *hash,
HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]);
idx++;
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_CONST(&desc[idx], 0, (blocksize - digestsize));
HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
HW_DESC_SET_DOUT_DLLI(&desc[idx],
HW_DESC_SET_DOUT_DLLI(&desc[idx],
(ctx->opad_tmp_keys_dma_addr + digestsize),
(blocksize - digestsize),
NS_BIT, 0);
idx++;
} else {
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
ctx->key_params.key_dma_addr,
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
ctx->key_params.key_dma_addr,
keylen, NS_BIT);
HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
HW_DESC_SET_DOUT_DLLI(&desc[idx],
@ -1109,7 +1109,7 @@ static int ssi_hash_setkey(void *hash,
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_CONST(&desc[idx], 0, (blocksize - keylen));
HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
HW_DESC_SET_DOUT_DLLI(&desc[idx],
HW_DESC_SET_DOUT_DLLI(&desc[idx],
(ctx->opad_tmp_keys_dma_addr + keylen),
(blocksize - keylen),
NS_BIT, 0);
@ -1120,7 +1120,7 @@ static int ssi_hash_setkey(void *hash,
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_CONST(&desc[idx], 0, blocksize);
HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
HW_DESC_SET_DOUT_DLLI(&desc[idx],
HW_DESC_SET_DOUT_DLLI(&desc[idx],
(ctx->opad_tmp_keys_dma_addr),
blocksize,
NS_BIT, 0);
@ -1249,7 +1249,7 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
"keylen=%u\n",
(unsigned long long)ctx->key_params.key_dma_addr,
ctx->key_params.keylen);
ctx->is_hmac = true;
/* 1. Load the AES key */
HW_DESC_INIT(&desc[idx]);
@ -1264,23 +1264,23 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_CONST(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
HW_DESC_SET_DOUT_DLLI(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
XCBC_MAC_K1_OFFSET),
HW_DESC_SET_DOUT_DLLI(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
XCBC_MAC_K1_OFFSET),
CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
idx++;
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_CONST(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
HW_DESC_SET_DOUT_DLLI(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
XCBC_MAC_K2_OFFSET),
HW_DESC_SET_DOUT_DLLI(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
XCBC_MAC_K2_OFFSET),
CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
idx++;
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_CONST(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
HW_DESC_SET_DOUT_DLLI(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
HW_DESC_SET_DOUT_DLLI(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
XCBC_MAC_K3_OFFSET),
CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
idx++;
@ -1324,23 +1324,23 @@ static int ssi_cmac_setkey(struct crypto_ahash *ahash,
/* STAT_PHASE_1: Copy key to ctx */
START_CYCLE_COUNT();
SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr);
dma_sync_single_for_cpu(&ctx->drvdata->plat_dev->dev,
ctx->opad_tmp_keys_dma_addr,
ctx->opad_tmp_keys_dma_addr,
keylen, DMA_TO_DEVICE);
memcpy(ctx->opad_tmp_keys_buff, key, keylen);
if (keylen == 24)
memset(ctx->opad_tmp_keys_buff + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
dma_sync_single_for_device(&ctx->drvdata->plat_dev->dev,
ctx->opad_tmp_keys_dma_addr,
ctx->opad_tmp_keys_dma_addr,
keylen, DMA_TO_DEVICE);
SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr, keylen);
ctx->key_params.keylen = keylen;
END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_1);
return 0;
@ -1416,13 +1416,13 @@ fail:
}
static int ssi_shash_cra_init(struct crypto_tfm *tfm)
{
{
struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
struct shash_alg * shash_alg =
struct shash_alg * shash_alg =
container_of(tfm->__crt_alg, struct shash_alg, base);
struct ssi_hash_alg *ssi_alg =
container_of(shash_alg, struct ssi_hash_alg, shash_alg);
CHECK_AND_RETURN_UPON_FIPS_ERROR();
ctx->hash_mode = ssi_alg->hash_mode;
ctx->hw_mode = ssi_alg->hw_mode;
@ -1435,9 +1435,9 @@ static int ssi_shash_cra_init(struct crypto_tfm *tfm)
static int ssi_ahash_cra_init(struct crypto_tfm *tfm)
{
struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
struct hash_alg_common * hash_alg_common =
struct hash_alg_common * hash_alg_common =
container_of(tfm->__crt_alg, struct hash_alg_common, base);
struct ahash_alg *ahash_alg =
struct ahash_alg *ahash_alg =
container_of(hash_alg_common, struct ahash_alg, halg);
struct ssi_hash_alg *ssi_alg =
container_of(ahash_alg, struct ssi_hash_alg, ahash_alg);
@ -1499,7 +1499,7 @@ static int ssi_mac_update(struct ahash_request *req)
} else {
ssi_hash_create_cmac_setup(req, desc, &idx);
}
ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
/* store the hash digest result in context */
@ -1541,7 +1541,7 @@ static int ssi_mac_final(struct ahash_request *req)
uint32_t rem_cnt = state->buff_index ? state->buff1_cnt :
state->buff0_cnt;
CHECK_AND_RETURN_UPON_FIPS_ERROR();
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
@ -1576,8 +1576,8 @@ static int ssi_mac_final(struct ahash_request *req)
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_ECB);
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
(ctx->opad_tmp_keys_dma_addr +
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
(ctx->opad_tmp_keys_dma_addr +
XCBC_MAC_K1_OFFSET),
keySize, NS_BIT);
HW_DESC_SET_KEY_SIZE_AES(&desc[idx], keyLen);
@ -1599,7 +1599,7 @@ static int ssi_mac_final(struct ahash_request *req)
HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
idx++;
}
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
ssi_hash_create_xcbc_setup(req, desc, &idx);
} else {
@ -1621,14 +1621,14 @@ static int ssi_mac_final(struct ahash_request *req)
HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
idx++;
}
/* Get final MAC result */
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, 1); /*TODO*/
HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
idx++;
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
@ -1659,7 +1659,7 @@ static int ssi_mac_finup(struct ahash_request *req)
SSI_LOG_DEBUG("No data to update. Call to fdx_mac_final \n");
return ssi_mac_final(req);
}
if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
SSI_LOG_ERR("map_ahash_request_final() failed\n");
return -ENOMEM;
@ -1694,14 +1694,14 @@ static int ssi_mac_finup(struct ahash_request *req)
} else {
ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
}
/* Get final MAC result */
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, 1); /*TODO*/
HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
idx++;
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
@ -1728,7 +1728,7 @@ static int ssi_mac_digest(struct ahash_request *req)
SSI_LOG_DEBUG("===== -digest mac (%d) ====\n", req->nbytes);
CHECK_AND_RETURN_UPON_FIPS_ERROR();
if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
SSI_LOG_ERR("map_ahash_source() failed\n");
return -ENOMEM;
@ -1742,7 +1742,7 @@ static int ssi_mac_digest(struct ahash_request *req)
SSI_LOG_ERR("map_ahash_request_final() failed\n");
return -ENOMEM;
}
/* Setup DX request structure */
ssi_req.user_cb = (void *)ssi_hash_digest_complete;
ssi_req.user_arg = (void *)req;
@ -1750,7 +1750,7 @@ static int ssi_mac_digest(struct ahash_request *req)
ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
#endif
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
keyLen = CC_AES_128_BIT_KEY_SIZE;
ssi_hash_create_xcbc_setup(req, desc, &idx);
@ -1769,7 +1769,7 @@ static int ssi_mac_digest(struct ahash_request *req)
} else {
ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
}
/* Get final MAC result */
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT,1);
@ -1777,7 +1777,7 @@ static int ssi_mac_digest(struct ahash_request *req)
HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
HW_DESC_SET_CIPHER_CONFIG0(&desc[idx],DESC_DIRECTION_ENCRYPT_ENCRYPT);
HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
idx++;
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
@ -1792,7 +1792,7 @@ static int ssi_mac_digest(struct ahash_request *req)
//shash wrap functions
#ifdef SYNC_ALGS
static int ssi_shash_digest(struct shash_desc *desc,
static int ssi_shash_digest(struct shash_desc *desc,
const u8 *data, unsigned int len, u8 *out)
{
struct ahash_req_ctx *state = shash_desc_ctx(desc);
@ -1804,14 +1804,14 @@ static int ssi_shash_digest(struct shash_desc *desc,
if (len == 0) {
return ssi_hash_digest(state, ctx, digestsize, NULL, 0, out, NULL);
}
/* sg_init_one may crash when len is 0 (depends on kernel configuration) */
sg_init_one(&src, (const void *)data, len);
return ssi_hash_digest(state, ctx, digestsize, &src, len, out, NULL);
}
static int ssi_shash_update(struct shash_desc *desc,
static int ssi_shash_update(struct shash_desc *desc,
const u8 *data, unsigned int len)
{
struct ahash_req_ctx *state = shash_desc_ctx(desc);
@ -1821,11 +1821,11 @@ static int ssi_shash_update(struct shash_desc *desc,
struct scatterlist src;
sg_init_one(&src, (const void *)data, len);
return ssi_hash_update(state, ctx, blocksize, &src, len, NULL);
}
static int ssi_shash_finup(struct shash_desc *desc,
static int ssi_shash_finup(struct shash_desc *desc,
const u8 *data, unsigned int len, u8 *out)
{
struct ahash_req_ctx *state = shash_desc_ctx(desc);
@ -1833,9 +1833,9 @@ static int ssi_shash_finup(struct shash_desc *desc,
struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
uint32_t digestsize = crypto_shash_digestsize(tfm);
struct scatterlist src;
sg_init_one(&src, (const void *)data, len);
return ssi_hash_finup(state, ctx, digestsize, &src, len, out, NULL);
}
@ -1845,7 +1845,7 @@ static int ssi_shash_final(struct shash_desc *desc, u8 *out)
struct crypto_shash *tfm = desc->tfm;
struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
uint32_t digestsize = crypto_shash_digestsize(tfm);
return ssi_hash_final(state, ctx, digestsize, NULL, 0, out, NULL);
}
@ -1871,12 +1871,12 @@ static int ssi_shash_import(struct shash_desc *desc, const void *in)
{
struct crypto_shash *tfm = desc->tfm;
struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
return ssi_hash_import(ctx, in);
}
#endif
static int ssi_shash_setkey(struct crypto_shash *tfm,
static int ssi_shash_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
return ssi_hash_setkey((void *) tfm, key, keylen, true);
@ -1891,7 +1891,7 @@ static int ssi_ahash_digest(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
uint32_t digestsize = crypto_ahash_digestsize(tfm);
return ssi_hash_digest(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
}
@ -1901,7 +1901,7 @@ static int ssi_ahash_update(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
return ssi_hash_update(state, ctx, block_size, req->src, req->nbytes, (void *)req);
}
@ -1911,7 +1911,7 @@ static int ssi_ahash_finup(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
uint32_t digestsize = crypto_ahash_digestsize(tfm);
return ssi_hash_finup(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
}
@ -1921,7 +1921,7 @@ static int ssi_ahash_final(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
uint32_t digestsize = crypto_ahash_digestsize(tfm);
return ssi_hash_final(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
}
@ -1929,7 +1929,7 @@ static int ssi_ahash_init(struct ahash_request *req)
{
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
SSI_LOG_DEBUG("===== init (%d) ====\n", req->nbytes);
@ -1941,7 +1941,7 @@ static int ssi_ahash_export(struct ahash_request *req, void *out)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
return ssi_hash_export(ctx, out);
}
@ -1949,14 +1949,14 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
return ssi_hash_import(ctx, in);
}
#endif
static int ssi_ahash_setkey(struct crypto_ahash *ahash,
const u8 *key, unsigned int keylen)
{
{
return ssi_hash_setkey((void *) ahash, key, keylen, false);
}
@ -1970,7 +1970,7 @@ struct ssi_hash_template {
union {
struct ahash_alg template_ahash;
struct shash_alg template_shash;
};
};
int hash_mode;
int hw_mode;
int inter_digestsize;
@ -2212,7 +2212,7 @@ static struct ssi_hash_template driver_hash[] = {
.inter_digestsize = AES_BLOCK_SIZE,
},
#endif
};
static struct ssi_hash_alg *
@ -2259,9 +2259,9 @@ ssi_hash_create_alg(struct ssi_hash_template *template, bool keyed)
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
alg->cra_exit = ssi_hash_cra_exit;
if (template->synchronize) {
alg->cra_init = ssi_shash_cra_init;
alg->cra_init = ssi_shash_cra_init;
alg->cra_flags = CRYPTO_ALG_TYPE_SHASH |
CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->cra_type = &crypto_shash_type;
@ -2418,7 +2418,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
sizeof(sha1_init) +
sizeof(sha224_init) +
sizeof(sha256_init);
sram_buff = ssi_sram_mgr_alloc(drvdata, sram_size_to_alloc);
if (sram_buff == NULL_SRAM_ADDR) {
SSI_LOG_ERR("SRAM pool exhausted\n");
@ -2441,7 +2441,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
/* ahash registration */
for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
struct ssi_hash_alg *t_alg;
/* register hmac version */
if ((((struct ssi_hash_template)driver_hash[alg]).hw_mode != DRV_CIPHER_XCBC_MAC) &&
@ -2454,7 +2454,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
goto fail;
}
t_alg->drvdata = drvdata;
if (t_alg->synchronize) {
rc = crypto_register_shash(&t_alg->shash_alg);
if (unlikely(rc != 0)) {
@ -2485,7 +2485,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
goto fail;
}
t_alg->drvdata = drvdata;
if (t_alg->synchronize) {
rc = crypto_register_shash(&t_alg->shash_alg);
if (unlikely(rc != 0)) {
@ -2494,8 +2494,8 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
kfree(t_alg);
goto fail;
} else
list_add_tail(&t_alg->entry, &hash_handle->hash_list);
list_add_tail(&t_alg->entry, &hash_handle->hash_list);
} else {
rc = crypto_register_ahash(&t_alg->ahash_alg);
if (unlikely(rc != 0)) {
@ -2535,14 +2535,14 @@ int ssi_hash_free(struct ssi_drvdata *drvdata)
list_del(&t_hash_alg->entry);
kfree(t_hash_alg);
}
kfree(hash_handle);
drvdata->hash_handle = NULL;
}
return 0;
}
static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
HwDesc_s desc[],
unsigned int *seq_size) {
unsigned int idx = *seq_size;
@ -2552,7 +2552,7 @@ static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
/* Setup XCBC MAC K1 */
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr
+ XCBC_MAC_K1_OFFSET),
CC_AES_128_BIT_KEY_SIZE, NS_BIT);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
@ -2564,7 +2564,7 @@ static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
/* Setup XCBC MAC K2 */
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr
+ XCBC_MAC_K2_OFFSET),
CC_AES_128_BIT_KEY_SIZE, NS_BIT);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
@ -2576,7 +2576,7 @@ static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
/* Setup XCBC MAC K3 */
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr
+ XCBC_MAC_K3_OFFSET),
CC_AES_128_BIT_KEY_SIZE, NS_BIT);
HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE2);
@ -2598,7 +2598,7 @@ static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
*seq_size = idx;
}
static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
HwDesc_s desc[],
unsigned int *seq_size)
{
@ -2634,15 +2634,15 @@ static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
struct ssi_hash_ctx *ctx,
unsigned int flow_mode,
HwDesc_s desc[],
bool is_not_last_data,
bool is_not_last_data,
unsigned int *seq_size)
{
unsigned int idx = *seq_size;
if (likely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_DLLI)) {
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
sg_dma_address(areq_ctx->curr_sg),
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
sg_dma_address(areq_ctx->curr_sg),
areq_ctx->curr_sg->length, NS_BIT);
HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
idx++;
@ -2654,19 +2654,19 @@ static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
}
/* bypass */
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
areq_ctx->mlli_params.mlli_dma_addr,
areq_ctx->mlli_params.mlli_len,
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
areq_ctx->mlli_params.mlli_dma_addr,
areq_ctx->mlli_params.mlli_len,
NS_BIT);
HW_DESC_SET_DOUT_SRAM(&desc[idx],
ctx->drvdata->mlli_sram_addr,
HW_DESC_SET_DOUT_SRAM(&desc[idx],
ctx->drvdata->mlli_sram_addr,
areq_ctx->mlli_params.mlli_len);
HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
idx++;
/* process */
HW_DESC_INIT(&desc[idx]);
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_MLLI,
ctx->drvdata->mlli_sram_addr,
HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_MLLI,
ctx->drvdata->mlli_sram_addr,
areq_ctx->mlli_nents,
NS_BIT);
HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
@ -2680,12 +2680,12 @@ static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
}
/*!
* Gets the address of the initial digest in SRAM
* Gets the address of the initial digest in SRAM
* according to the given hash mode
*
*
* \param drvdata
* \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
*
*
* \return uint32_t The address of the inital digest in SRAM
*/
ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, uint32_t mode)

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -68,7 +68,7 @@ struct ahash_req_ctx {
struct scatterlist *curr_sg;
uint32_t in_nents;
uint32_t mlli_nents;
struct mlli_params mlli_params;
struct mlli_params mlli_params;
};
int ssi_hash_alloc(struct ssi_drvdata *drvdata);
@ -77,22 +77,22 @@ int ssi_hash_free(struct ssi_drvdata *drvdata);
/*!
* Gets the initial digest length
*
* \param drvdata
*
* \param drvdata
* \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256/SHA384/SHA512
*
*
* \return uint32_t returns the address of the initial digest length in SRAM
*/
ssi_sram_addr_t
ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, uint32_t mode);
/*!
* Gets the address of the initial digest in SRAM
* Gets the address of the initial digest in SRAM
* according to the given hash mode
*
* \param drvdata
*
* \param drvdata
* \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256/SHA384/SHA512
*
*
* \return uint32_t The address of the inital digest in SRAM
*/
ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, uint32_t mode);

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -31,8 +31,8 @@
#define SSI_IVPOOL_GEN_SEQ_LEN 4
/**
* struct ssi_ivgen_ctx -IV pool generation context
* @pool: the start address of the iv-pool resides in internal RAM
* struct ssi_ivgen_ctx -IV pool generation context
* @pool: the start address of the iv-pool resides in internal RAM
* @ctr_key_dma: address of pool's encryption key material in internal RAM
* @ctr_iv_dma: address of pool's counter iv in internal RAM
* @next_iv_ofs: the offset to the next available IV in pool
@ -49,12 +49,12 @@ struct ssi_ivgen_ctx {
};
/*!
* Generates SSI_IVPOOL_SIZE of random bytes by
* Generates SSI_IVPOOL_SIZE of random bytes by
* encrypting 0's using AES128-CTR.
*
*
* \param ivgen iv-pool context
* \param iv_seq IN/OUT array to the descriptors sequence
* \param iv_seq_len IN/OUT pointer to the sequence length
* \param iv_seq_len IN/OUT pointer to the sequence length
*/
static int ssi_ivgen_generate_pool(
struct ssi_ivgen_ctx *ivgen_ctx,
@ -110,11 +110,11 @@ static int ssi_ivgen_generate_pool(
}
/*!
* Generates the initial pool in SRAM.
* This function should be invoked when resuming DX driver.
*
* \param drvdata
*
* Generates the initial pool in SRAM.
* This function should be invoked when resuming DX driver.
*
* \param drvdata
*
* \return int Zero for success, negative value otherwise.
*/
int ssi_ivgen_init_sram_pool(struct ssi_drvdata *drvdata)
@ -152,8 +152,8 @@ int ssi_ivgen_init_sram_pool(struct ssi_drvdata *drvdata)
/*!
* Free iv-pool and ivgen context.
*
* \param drvdata
*
* \param drvdata
*/
void ssi_ivgen_fini(struct ssi_drvdata *drvdata)
{
@ -177,11 +177,11 @@ void ssi_ivgen_fini(struct ssi_drvdata *drvdata)
}
/*!
* Allocates iv-pool and maps resources.
* This function generates the first IV pool.
*
* Allocates iv-pool and maps resources.
* This function generates the first IV pool.
*
* \param drvdata Driver's private context
*
*
* \return int Zero for success, negative value otherwise.
*/
int ssi_ivgen_init(struct ssi_drvdata *drvdata)
@ -228,15 +228,15 @@ out:
/*!
* Acquires 16 Bytes IV from the iv-pool
*
*
* \param drvdata Driver private context
* \param iv_out_dma Array of physical IV out addresses
* \param iv_out_dma_len Length of iv_out_dma array (additional elements of iv_out_dma array are ignore)
* \param iv_out_size May be 8 or 16 bytes long
* \param iv_out_size May be 8 or 16 bytes long
* \param iv_seq IN/OUT array to the descriptors sequence
* \param iv_seq_len IN/OUT pointer to the sequence length
*
* \return int Zero for success, negative value otherwise.
* \param iv_seq_len IN/OUT pointer to the sequence length
*
* \return int Zero for success, negative value otherwise.
*/
int ssi_ivgen_getiv(
struct ssi_drvdata *drvdata,

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -23,43 +23,43 @@
#define SSI_IVPOOL_SEQ_LEN 8
/*!
* Allocates iv-pool and maps resources.
* This function generates the first IV pool.
*
* Allocates iv-pool and maps resources.
* This function generates the first IV pool.
*
* \param drvdata Driver's private context
*
*
* \return int Zero for success, negative value otherwise.
*/
int ssi_ivgen_init(struct ssi_drvdata *drvdata);
/*!
* Free iv-pool and ivgen context.
*
* \param drvdata
*
* \param drvdata
*/
void ssi_ivgen_fini(struct ssi_drvdata *drvdata);
/*!
* Generates the initial pool in SRAM.
* This function should be invoked when resuming DX driver.
*
* \param drvdata
*
* Generates the initial pool in SRAM.
* This function should be invoked when resuming DX driver.
*
* \param drvdata
*
* \return int Zero for success, negative value otherwise.
*/
int ssi_ivgen_init_sram_pool(struct ssi_drvdata *drvdata);
/*!
* Acquires 16 Bytes IV from the iv-pool
*
*
* \param drvdata Driver private context
* \param iv_out_dma Array of physical IV out addresses
* \param iv_out_dma_len Length of iv_out_dma array (additional elements of iv_out_dma array are ignore)
* \param iv_out_size May be 8 or 16 bytes long
* \param iv_out_size May be 8 or 16 bytes long
* \param iv_seq IN/OUT array to the descriptors sequence
* \param iv_seq_len IN/OUT pointer to the sequence length
*
* \return int Zero for success, negative value otherwise.
* \param iv_seq_len IN/OUT pointer to the sequence length
*
* \return int Zero for success, negative value otherwise.
*/
int ssi_ivgen_getiv(
struct ssi_drvdata *drvdata,

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -83,7 +83,7 @@ int ssi_power_mgr_runtime_resume(struct device *dev)
/* must be after the queue resuming as it uses the HW queue*/
ssi_hash_init_sram_digest_consts(drvdata);
ssi_ivgen_init_sram_pool(drvdata);
return 0;
}

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -26,9 +26,9 @@
#include "ssi_pm_ext.h"
/*
This function should suspend the HW (if possiable), It should be implemented by
the driver user.
The reference code clears the internal SRAM to imitate lose of state.
This function should suspend the HW (if possiable), It should be implemented by
the driver user.
The reference code clears the internal SRAM to imitate lose of state.
*/
void ssi_pm_ext_hw_suspend(struct device *dev)
{
@ -50,8 +50,8 @@ void ssi_pm_ext_hw_suspend(struct device *dev)
}
/*
This function should resume the HW (if possiable).It should be implemented by
the driver user.
This function should resume the HW (if possiable).It should be implemented by
the driver user.
*/
void ssi_pm_ext_hw_resume(struct device *dev)
{

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -42,8 +42,8 @@
#define MONITOR_CNTR_BIT 0
/**
* Monitor descriptor.
* Used to measure CC performance.
* Monitor descriptor.
* Used to measure CC performance.
*/
#define INIT_CC_MONITOR_DESC(desc_p) \
do { \
@ -51,7 +51,7 @@ do { \
HW_DESC_SET_DIN_MONITOR_CNTR(desc_p); \
} while (0)
/**
/**
* Try adding monitor descriptor BEFORE enqueuing sequence.
*/
#define CC_CYCLE_DESC_HEAD(cc_base_addr, desc_p, lock_p, is_monitored_p) \
@ -65,8 +65,8 @@ do { \
} while (0)
/**
* If CC_CYCLE_DESC_HEAD was successfully added:
* 1. Add memory barrier descriptor to ensure last AXI transaction.
* If CC_CYCLE_DESC_HEAD was successfully added:
* 1. Add memory barrier descriptor to ensure last AXI transaction.
* 2. Add monitor descriptor to sequence tail AFTER enqueuing sequence.
*/
#define CC_CYCLE_DESC_TAIL(cc_base_addr, desc_p, is_monitored) \
@ -82,7 +82,7 @@ do { \
} while (0)
/**
* Try reading CC monitor counter value upon sequence complete.
* Try reading CC monitor counter value upon sequence complete.
* Can only succeed if the lock_p is taken by the owner of the given request.
*/
#define END_CC_MONITOR_COUNT(cc_base_addr, stat_op_type, stat_phase, monitor_null_cycles, lock_p, is_monitored) \
@ -279,10 +279,10 @@ static inline void enqueue_seq(
}
/*!
* Completion will take place if and only if user requested completion
* by setting "is_dout = 0" in send_request().
*
* \param dev
* Completion will take place if and only if user requested completion
* by setting "is_dout = 0" in send_request().
*
* \param dev
* \param dx_compl_h The completion event to signal
*/
static void request_mgr_complete(struct device *dev, void *dx_compl_h, void __iomem *cc_base)
@ -298,14 +298,14 @@ static inline int request_mgr_queues_status_check(
unsigned int total_seq_len)
{
unsigned long poll_queue;
/* SW queue is checked only once as it will not
be chaned during the poll becasue the spinlock_bh
/* SW queue is checked only once as it will not
be chaned during the poll becasue the spinlock_bh
is held by the thread */
if (unlikely(((req_mgr_h->req_queue_head + 1) &
(MAX_REQUEST_QUEUE_SIZE - 1)) ==
(MAX_REQUEST_QUEUE_SIZE - 1)) ==
req_mgr_h->req_queue_tail)) {
SSI_LOG_ERR("SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
SSI_LOG_ERR("SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
return -EBUSY;
}
@ -315,11 +315,11 @@ static inline int request_mgr_queues_status_check(
}
/* Wait for space in HW queue. Poll constant num of iterations. */
for (poll_queue =0; poll_queue < SSI_MAX_POLL_ITER ; poll_queue ++) {
req_mgr_h->q_free_slots =
req_mgr_h->q_free_slots =
CC_HAL_READ_REGISTER(
CC_REG_OFFSET(CRY_KERNEL,
DSCRPTR_QUEUE_CONTENT));
if (unlikely(req_mgr_h->q_free_slots <
if (unlikely(req_mgr_h->q_free_slots <
req_mgr_h->min_free_hw_slots)) {
req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
}
@ -329,12 +329,12 @@ static inline int request_mgr_queues_status_check(
return 0;
}
SSI_LOG_DEBUG("HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
SSI_LOG_DEBUG("HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
req_mgr_h->q_free_slots, total_seq_len);
}
/* No room in the HW queue try again later */
SSI_LOG_DEBUG("HW FIFO full, timeout. req_queue_head=%d "
"sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
"sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
req_mgr_h->req_queue_head,
MAX_REQUEST_QUEUE_SIZE,
req_mgr_h->q_free_slots,
@ -344,15 +344,15 @@ static inline int request_mgr_queues_status_check(
/*!
* Enqueue caller request to crypto hardware.
*
* \param drvdata
*
* \param drvdata
* \param ssi_req The request to enqueue
* \param desc The crypto sequence
* \param len The crypto sequence length
* \param is_dout If "true": completion is handled by the caller
* \param is_dout If "true": completion is handled by the caller
* If "false": this function adds a dummy descriptor completion
* and waits upon completion signal.
*
*
* \return int Returns -EINPROGRESS if "is_dout=true"; "0" if "is_dout=false"
*/
int send_request(
@ -385,7 +385,7 @@ int send_request(
spin_lock_bh(&req_mgr_h->hw_lock);
/* Check if there is enough place in the SW/HW queues
in case iv gen add the max size and in case of no dout add 1
in case iv gen add the max size and in case of no dout add 1
for the internal completion descriptor */
rc = request_mgr_queues_status_check(req_mgr_h,
cc_base,
@ -397,7 +397,7 @@ int send_request(
spin_unlock_bh(&req_mgr_h->hw_lock);
if (rc != -EAGAIN) {
/* Any error other than HW queue full
/* Any error other than HW queue full
(SW queue is full) */
#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
ssi_power_mgr_runtime_put_suspend(&drvdata->plat_dev->dev);
@ -441,12 +441,12 @@ int send_request(
total_seq_len += iv_seq_len;
}
used_sw_slots = ((req_mgr_h->req_queue_head - req_mgr_h->req_queue_tail) & (MAX_REQUEST_QUEUE_SIZE-1));
if (unlikely(used_sw_slots > req_mgr_h->max_used_sw_slots)) {
req_mgr_h->max_used_sw_slots = used_sw_slots;
}
CC_CYCLE_DESC_HEAD(cc_base, &req_mgr_h->monitor_desc,
&req_mgr_h->monitor_lock, &ssi_req->is_monitored_p);
@ -495,11 +495,11 @@ int send_request(
* Enqueue caller request to crypto hardware during init process.
* assume this function is not called in middle of a flow,
* since we set QUEUE_LAST_IND flag in the last descriptor.
*
* \param drvdata
*
* \param drvdata
* \param desc The crypto sequence
* \param len The crypto sequence length
*
*
* \return int Returns "0" upon success
*/
int send_request_init(
@ -530,7 +530,7 @@ int send_request_init(
void complete_request(struct ssi_drvdata *drvdata)
{
struct ssi_request_mgr_handle *request_mgr_handle =
struct ssi_request_mgr_handle *request_mgr_handle =
drvdata->request_mgr_handle;
#ifdef COMP_IN_WQ
queue_delayed_work(request_mgr_handle->workq, &request_mgr_handle->compwork, 0);
@ -553,7 +553,7 @@ static void proc_completions(struct ssi_drvdata *drvdata)
{
struct ssi_crypto_req *ssi_req;
struct platform_device *plat_dev = drvdata->plat_dev;
struct ssi_request_mgr_handle * request_mgr_handle =
struct ssi_request_mgr_handle * request_mgr_handle =
drvdata->request_mgr_handle;
#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
int rc = 0;
@ -612,7 +612,7 @@ static void comp_handler(unsigned long devarg)
{
struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg;
void __iomem *cc_base = drvdata->cc_base;
struct ssi_request_mgr_handle * request_mgr_handle =
struct ssi_request_mgr_handle * request_mgr_handle =
drvdata->request_mgr_handle;
uint32_t irq;
@ -626,38 +626,38 @@ static void comp_handler(unsigned long devarg)
if (irq & SSI_COMP_IRQ_MASK) {
/* To avoid the interrupt from firing as we unmask it, we clear it now */
CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), SSI_COMP_IRQ_MASK);
/* Avoid race with above clear: Test completion counter once more */
request_mgr_handle->axi_completed += CC_REG_FLD_GET(CRY_KERNEL, AXIM_MON_COMP, VALUE,
request_mgr_handle->axi_completed += CC_REG_FLD_GET(CRY_KERNEL, AXIM_MON_COMP, VALUE,
CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET));
/* ISR-to-Tasklet latency */
if (request_mgr_handle->axi_completed) {
/* Only if actually reflects ISR-to-completion-handling latency, i.e.,
not duplicate as a result of interrupt after AXIM_MON_ERR clear, before end of loop */
END_CYCLE_COUNT_AT(drvdata->isr_exit_cycles, STAT_OP_TYPE_GENERIC, STAT_PHASE_1);
}
while (request_mgr_handle->axi_completed) {
do {
proc_completions(drvdata);
/* At this point (after proc_completions()), request_mgr_handle->axi_completed is always 0.
The following assignment was changed to = (previously was +=) to conform KW restrictions. */
request_mgr_handle->axi_completed = CC_REG_FLD_GET(CRY_KERNEL, AXIM_MON_COMP, VALUE,
request_mgr_handle->axi_completed = CC_REG_FLD_GET(CRY_KERNEL, AXIM_MON_COMP, VALUE,
CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET));
} while (request_mgr_handle->axi_completed > 0);
/* To avoid the interrupt from firing as we unmask it, we clear it now */
CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), SSI_COMP_IRQ_MASK);
/* Avoid race with above clear: Test completion counter once more */
request_mgr_handle->axi_completed += CC_REG_FLD_GET(CRY_KERNEL, AXIM_MON_COMP, VALUE,
request_mgr_handle->axi_completed += CC_REG_FLD_GET(CRY_KERNEL, AXIM_MON_COMP, VALUE,
CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET));
}
}
/* after verifing that there is nothing to do, Unmask AXI completion interrupt */
CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR),
CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR),
CC_HAL_READ_REGISTER(
CC_REG_OFFSET(HOST_RGF, HOST_IMR)) & ~irq);
END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC, STAT_PHASE_2);
@ -685,12 +685,12 @@ only verify that the queue can be suspended.
*/
int ssi_request_mgr_runtime_suspend_queue(struct ssi_drvdata *drvdata)
{
struct ssi_request_mgr_handle * request_mgr_handle =
struct ssi_request_mgr_handle * request_mgr_handle =
drvdata->request_mgr_handle;
/* lock the send_request */
spin_lock_bh(&request_mgr_handle->hw_lock);
if (request_mgr_handle->req_queue_head !=
if (request_mgr_handle->req_queue_head !=
request_mgr_handle->req_queue_tail) {
spin_unlock_bh(&request_mgr_handle->hw_lock);
return -EBUSY;
@ -703,7 +703,7 @@ int ssi_request_mgr_runtime_suspend_queue(struct ssi_drvdata *drvdata)
bool ssi_request_mgr_is_queue_runtime_suspend(struct ssi_drvdata *drvdata)
{
struct ssi_request_mgr_handle * request_mgr_handle =
struct ssi_request_mgr_handle * request_mgr_handle =
drvdata->request_mgr_handle;
return request_mgr_handle->is_runtime_suspended;

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -27,15 +27,15 @@ int request_mgr_init(struct ssi_drvdata *drvdata);
/*!
* Enqueue caller request to crypto hardware.
*
* \param drvdata
*
* \param drvdata
* \param ssi_req The request to enqueue
* \param desc The crypto sequence
* \param len The crypto sequence length
* \param is_dout If "true": completion is handled by the caller
* \param is_dout If "true": completion is handled by the caller
* If "false": this function adds a dummy descriptor completion
* and waits upon completion signal.
*
*
* \return int Returns -EINPROGRESS if "is_dout=ture"; "0" if "is_dout=false"
*/
int send_request(

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -29,7 +29,7 @@ struct ssi_sram_mgr_ctx {
/**
* ssi_sram_mgr_fini() - Cleanup SRAM pool.
*
*
* @drvdata: Associated device driver context
*/
void ssi_sram_mgr_fini(struct ssi_drvdata *drvdata)
@ -44,10 +44,10 @@ void ssi_sram_mgr_fini(struct ssi_drvdata *drvdata)
}
/**
* ssi_sram_mgr_init() - Initializes SRAM pool.
* ssi_sram_mgr_init() - Initializes SRAM pool.
* The pool starts right at the beginning of SRAM.
* Returns zero for success, negative value otherwise.
*
*
* @drvdata: Associated device driver context
*/
int ssi_sram_mgr_init(struct ssi_drvdata *drvdata)
@ -77,12 +77,12 @@ out:
}
/*!
* Allocated buffer from SRAM pool.
* Note: Caller is responsible to free the LAST allocated buffer.
* This function does not taking care of any fragmentation may occur
* by the order of calls to alloc/free.
*
* \param drvdata
* Allocated buffer from SRAM pool.
* Note: Caller is responsible to free the LAST allocated buffer.
* This function does not taking care of any fragmentation may occur
* by the order of calls to alloc/free.
*
* \param drvdata
* \param size The requested bytes to allocate
*/
ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, uint32_t size)
@ -100,7 +100,7 @@ ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, uint32_t size)
size, smgr_ctx->sram_free_offset);
return NULL_SRAM_ADDR;
}
p = smgr_ctx->sram_free_offset;
smgr_ctx->sram_free_offset += size;
SSI_LOG_DEBUG("Allocated %u B @ %u\n", size, (unsigned int)p);
@ -109,9 +109,9 @@ ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, uint32_t size)
/**
* ssi_sram_mgr_const2sram_desc() - Create const descriptors sequence to
* set values in given array into SRAM.
* set values in given array into SRAM.
* Note: each const value can't exceed word size.
*
*
* @src: A pointer to array of words to set as consts.
* @dst: The target SRAM buffer to set into
* @nelements: The number of words in "src" array

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -33,39 +33,39 @@ typedef uint64_t ssi_sram_addr_t;
#define NULL_SRAM_ADDR ((ssi_sram_addr_t)-1)
/*!
* Initializes SRAM pool.
* The first X bytes of SRAM are reserved for ROM usage, hence, pool
* starts right after X bytes.
*
* \param drvdata
*
* Initializes SRAM pool.
* The first X bytes of SRAM are reserved for ROM usage, hence, pool
* starts right after X bytes.
*
* \param drvdata
*
* \return int Zero for success, negative value otherwise.
*/
int ssi_sram_mgr_init(struct ssi_drvdata *drvdata);
/*!
* Uninits SRAM pool.
*
* \param drvdata
*
* \param drvdata
*/
void ssi_sram_mgr_fini(struct ssi_drvdata *drvdata);
/*!
* Allocated buffer from SRAM pool.
* Note: Caller is responsible to free the LAST allocated buffer.
* This function does not taking care of any fragmentation may occur
* by the order of calls to alloc/free.
*
* \param drvdata
* Allocated buffer from SRAM pool.
* Note: Caller is responsible to free the LAST allocated buffer.
* This function does not taking care of any fragmentation may occur
* by the order of calls to alloc/free.
*
* \param drvdata
* \param size The requested bytes to allocate
*/
ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, uint32_t size);
/**
* ssi_sram_mgr_const2sram_desc() - Create const descriptors sequence to
* set values in given array into SRAM.
* set values in given array into SRAM.
* Note: each const value can't exceed word size.
*
*
* @src: A pointer to array of words to set as consts.
* @dst: The target SRAM buffer to set into
* @nelements: The number of words in "src" array

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
@ -40,7 +40,7 @@ struct stat_name {
const char *stat_phase_name[MAX_STAT_PHASES];
};
static struct stat_name stat_name_db[MAX_STAT_OP_TYPES] =
static struct stat_name stat_name_db[MAX_STAT_OP_TYPES] =
{
{
/* STAT_OP_TYPE_NULL */
@ -50,8 +50,8 @@ static struct stat_name stat_name_db[MAX_STAT_OP_TYPES] =
{
.op_type_name = "Encode",
.stat_phase_name[STAT_PHASE_0] = "Init and sanity checks",
.stat_phase_name[STAT_PHASE_1] = "Map buffers",
.stat_phase_name[STAT_PHASE_2] = "Create sequence",
.stat_phase_name[STAT_PHASE_1] = "Map buffers",
.stat_phase_name[STAT_PHASE_2] = "Create sequence",
.stat_phase_name[STAT_PHASE_3] = "Send Request",
.stat_phase_name[STAT_PHASE_4] = "HW-Q push",
.stat_phase_name[STAT_PHASE_5] = "Sequence completion",
@ -59,8 +59,8 @@ static struct stat_name stat_name_db[MAX_STAT_OP_TYPES] =
},
{ .op_type_name = "Decode",
.stat_phase_name[STAT_PHASE_0] = "Init and sanity checks",
.stat_phase_name[STAT_PHASE_1] = "Map buffers",
.stat_phase_name[STAT_PHASE_2] = "Create sequence",
.stat_phase_name[STAT_PHASE_1] = "Map buffers",
.stat_phase_name[STAT_PHASE_2] = "Create sequence",
.stat_phase_name[STAT_PHASE_3] = "Send Request",
.stat_phase_name[STAT_PHASE_4] = "HW-Q push",
.stat_phase_name[STAT_PHASE_5] = "Sequence completion",
@ -88,7 +88,7 @@ static struct stat_name stat_name_db[MAX_STAT_OP_TYPES] =
};
/*
* Structure used to create a directory
* Structure used to create a directory
* and its attributes in sysfs.
*/
struct sys_dir {
@ -140,12 +140,12 @@ static void display_db(struct stat_item item[MAX_STAT_OP_TYPES][MAX_STAT_PHASES]
uint64_t avg;
for (i=STAT_OP_TYPE_ENCODE; i<MAX_STAT_OP_TYPES; i++) {
for (j=0; j<MAX_STAT_PHASES; j++) {
for (j=0; j<MAX_STAT_PHASES; j++) {
if (item[i][j].count > 0) {
avg = (uint64_t)item[i][j].sum;
do_div(avg, item[i][j].count);
SSI_LOG_ERR("%s, %s: min=%d avg=%d max=%d sum=%lld count=%d\n",
stat_name_db[i].op_type_name, stat_name_db[i].stat_phase_name[j],
SSI_LOG_ERR("%s, %s: min=%d avg=%d max=%d sum=%lld count=%d\n",
stat_name_db[i].op_type_name, stat_name_db[i].stat_phase_name[j],
item[i][j].min, (int)avg, item[i][j].max, (long long)item[i][j].sum, item[i][j].count);
}
}
@ -271,9 +271,9 @@ void update_cc_stat(
void display_all_stat_db(void)
{
SSI_LOG_ERR("\n======= CYCLE COUNT STATS =======\n");
SSI_LOG_ERR("\n======= CYCLE COUNT STATS =======\n");
display_db(stat_host_db);
SSI_LOG_ERR("\n======= CC HW CYCLE COUNT STATS =======\n");
SSI_LOG_ERR("\n======= CC HW CYCLE COUNT STATS =======\n");
display_db(stat_cc_db);
}
#endif /*CC_CYCLE_COUNT*/

View File

@ -1,15 +1,15 @@
/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/