crypto: caam - Add API's to allocate/free Job Rings

With each of the Job Ring available as a platform device, the
Job Ring driver needs to take care of allocation/deallocation
of the Job Rings to the above interface layers. Added APIs
in Job Ring Driver to allocate/free Job rings

Signed-off-by: Ruchika Gupta <ruchika.gupta@freescale.com>
Reviewed-by: Garg Vakul-B16394 <vakul@freescale.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Ruchika Gupta 2013-10-25 12:01:02 +05:30 committed by Herbert Xu
parent 313ea293e9
commit 07defbfb0f
3 changed files with 62 additions and 3 deletions

View File

@ -44,6 +44,9 @@ struct caam_drv_private_jr {
struct tasklet_struct irqtask;
int irq; /* One per queue */
/* Number of scatterlist crypt transforms active on the JobR */
atomic_t tfm_count ____cacheline_aligned;
/* Job ring info */
int ringsize; /* Size of rings (assume input = output) */
struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */

View File

@ -97,10 +97,9 @@ static int caam_jr_remove(struct platform_device *pdev)
jrpriv = dev_get_drvdata(jrdev);
/*
* Make sure ring is empty before release
* Return EBUSY if job ring already allocated.
*/
if (rd_reg32(&jrpriv->rregs->outring_used) ||
(rd_reg32(&jrpriv->rregs->inpring_avail) != JOBR_DEPTH)) {
if (atomic_read(&jrpriv->tfm_count)) {
dev_err(jrdev, "Device is busy\n");
return -EBUSY;
}
@ -233,6 +232,59 @@ static void caam_jr_dequeue(unsigned long devarg)
clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
}
/**
* caam_jr_alloc() - Alloc a job ring for someone to use as needed.
*
* returns : pointer to the newly allocated physical
* JobR dev can be written to if successful.
**/
struct device *caam_jr_alloc(void)
{
struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
struct device *dev = NULL;
int min_tfm_cnt = INT_MAX;
int tfm_cnt;
spin_lock(&driver_data.jr_alloc_lock);
if (list_empty(&driver_data.jr_list)) {
spin_unlock(&driver_data.jr_alloc_lock);
return ERR_PTR(-ENODEV);
}
list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
tfm_cnt = atomic_read(&jrpriv->tfm_count);
if (tfm_cnt < min_tfm_cnt) {
min_tfm_cnt = tfm_cnt;
min_jrpriv = jrpriv;
}
if (!min_tfm_cnt)
break;
}
if (min_jrpriv) {
atomic_inc(&min_jrpriv->tfm_count);
dev = min_jrpriv->dev;
}
spin_unlock(&driver_data.jr_alloc_lock);
return dev;
}
EXPORT_SYMBOL(caam_jr_alloc);
/**
* caam_jr_free() - Free the Job Ring
* @rdev - points to the dev that identifies the Job ring to
* be released.
**/
void caam_jr_free(struct device *rdev)
{
struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
atomic_dec(&jrpriv->tfm_count);
}
EXPORT_SYMBOL(caam_jr_free);
/**
* caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
* -EBUSY if the queue is full, -EIO if it cannot map the caller's
@ -442,6 +494,8 @@ static int caam_jr_probe(struct platform_device *pdev)
list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
spin_unlock(&driver_data.jr_alloc_lock);
atomic_set(&jrpriv->tfm_count, 0);
return 0;
}

View File

@ -8,6 +8,8 @@
#define JR_H
/* Prototypes for backend-level services exposed to APIs */
struct device *caam_jr_alloc(void);
void caam_jr_free(struct device *rdev);
int caam_jr_enqueue(struct device *dev, u32 *desc,
void (*cbk)(struct device *dev, u32 *desc, u32 status,
void *areq),