Merge tag 'soc-fsl-next-v5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/leo/linux into arm/drivers

NXP/FSL SoC driver updates for v5.7

DPAA2 DPIO driver performance optimization
- Add and use QMAN multiple enqueue interface
- Use function pointer indirection to replace checks in hotpath

QUICC Engine drivers
- Fix sparse warnings and exposed endian issues

* tag 'soc-fsl-next-v5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/leo/linux:
  soc: fsl: qe: fix sparse warnings for ucc_slow.c
  soc: fsl: qe: ucc_slow: remove 0 assignment for kzalloc'ed structure
  soc: fsl: qe: fix sparse warnings for ucc_fast.c
  soc: fsl: qe: fix sparse warnings for qe_ic.c
  soc: fsl: qe: fix sparse warnings for ucc.c
  soc: fsl: qe: fix sparse warning for qe_common.c
  soc: fsl: qe: fix sparse warnings for qe.c
  soc: fsl: dpio: fix dereference of pointer p before null check
  soc: fsl: dpio: Replace QMAN array mode with ring mode enqueue
  soc: fsl: dpio: QMAN performance improvement with function pointer indirection
  soc: fsl: dpio: Adding QMAN multiple enqueue interface

Link: https://lore.kernel.org/r/20200326001257.22696-1-leoyang.li@nxp.com
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
Arnd Bergmann 2020-03-26 10:57:44 +01:00
commit 3a3052f1fc
11 changed files with 936 additions and 128 deletions

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2014-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
* Copyright 2016-2019 NXP
*
*/
#include <linux/types.h>
@ -432,6 +432,69 @@ int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
}
EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
/**
* dpaa2_io_service_enqueue_multiple_fq() - Enqueue multiple frames
* to a frame queue using one fqid.
* @d: the given DPIO service.
* @fqid: the given frame queue id.
* @fd: the frame descriptor which is enqueued.
* @nb: number of frames to be enqueud
*
* Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
* or -ENODEV if there is no dpio service.
*/
int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d,
u32 fqid,
const struct dpaa2_fd *fd,
int nb)
{
struct qbman_eq_desc ed;
d = service_select(d);
if (!d)
return -ENODEV;
qbman_eq_desc_clear(&ed);
qbman_eq_desc_set_no_orp(&ed, 0);
qbman_eq_desc_set_fq(&ed, fqid);
return qbman_swp_enqueue_multiple(d->swp, &ed, fd, 0, nb);
}
EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq);
/**
* dpaa2_io_service_enqueue_multiple_desc_fq() - Enqueue multiple frames
* to different frame queue using a list of fqids.
* @d: the given DPIO service.
* @fqid: the given list of frame queue ids.
* @fd: the frame descriptor which is enqueued.
* @nb: number of frames to be enqueud
*
* Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
* or -ENODEV if there is no dpio service.
*/
int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d,
u32 *fqid,
const struct dpaa2_fd *fd,
int nb)
{
int i;
struct qbman_eq_desc ed[32];
d = service_select(d);
if (!d)
return -ENODEV;
for (i = 0; i < nb; i++) {
qbman_eq_desc_clear(&ed[i]);
qbman_eq_desc_set_no_orp(&ed[i], 0);
qbman_eq_desc_set_fq(&ed[i], fqid[i]);
}
return qbman_swp_enqueue_multiple_desc(d->swp, &ed[0], fd, nb);
}
EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_desc_fq);
/**
* dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
* @d: the given DPIO service.
@ -526,7 +589,7 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire);
/**
* dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
* @max_frames: the maximum number of dequeued result for frames, must be <= 16.
* @max_frames: the maximum number of dequeued result for frames, must be <= 32.
* @dev: the device to allow mapping/unmapping the DMAable region.
*
* The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
@ -541,7 +604,7 @@ struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
struct dpaa2_io_store *ret;
size_t size;
if (!max_frames || (max_frames > 16))
if (!max_frames || (max_frames > 32))
return NULL;
ret = kmalloc(sizeof(*ret), GFP_KERNEL);

View File

@ -1,24 +1,18 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
* Copyright 2016 NXP
* Copyright 2016-2019 NXP
*
*/
#include <asm/cacheflush.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <soc/fsl/dpaa2-global.h>
#include "qbman-portal.h"
#define QMAN_REV_4000 0x04000000
#define QMAN_REV_4100 0x04010000
#define QMAN_REV_4101 0x04010001
#define QMAN_REV_5000 0x05000000
#define QMAN_REV_MASK 0xffff0000
/* All QBMan command and result structures use this "valid bit" encoding */
#define QB_VALID_BIT ((u32)0x80)
@ -28,6 +22,7 @@
/* CINH register offsets */
#define QBMAN_CINH_SWP_EQCR_PI 0x800
#define QBMAN_CINH_SWP_EQCR_CI 0x840
#define QBMAN_CINH_SWP_EQAR 0x8c0
#define QBMAN_CINH_SWP_CR_RT 0x900
#define QBMAN_CINH_SWP_VDQCR_RT 0x940
@ -51,6 +46,8 @@
#define QBMAN_CENA_SWP_CR 0x600
#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
#define QBMAN_CENA_SWP_VDQCR 0x780
#define QBMAN_CENA_SWP_EQCR_CI 0x840
#define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840
/* CENA register offsets in memory-backed mode */
#define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6))
@ -78,6 +75,12 @@
/* opaque token for static dequeues */
#define QMAN_SDQCR_TOKEN 0xbb
#define QBMAN_EQCR_DCA_IDXMASK 0x0f
#define QBMAN_ENQUEUE_FLAG_DCA (1ULL << 31)
#define EQ_DESC_SIZE_WITHOUT_FD 29
#define EQ_DESC_SIZE_FD_START 32
enum qbman_sdqcr_dct {
qbman_sdqcr_dct_null = 0,
qbman_sdqcr_dct_prio_ics,
@ -90,6 +93,82 @@ enum qbman_sdqcr_fc {
qbman_sdqcr_fc_up_to_3 = 1
};
/* Internal Function declaration */
static int qbman_swp_enqueue_direct(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd);
static int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd);
static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd,
uint32_t *flags,
int num_frames);
static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd,
uint32_t *flags,
int num_frames);
static int
qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd,
int num_frames);
static
int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd,
int num_frames);
static int qbman_swp_pull_direct(struct qbman_swp *s,
struct qbman_pull_desc *d);
static int qbman_swp_pull_mem_back(struct qbman_swp *s,
struct qbman_pull_desc *d);
const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
static int qbman_swp_release_direct(struct qbman_swp *s,
const struct qbman_release_desc *d,
const u64 *buffers,
unsigned int num_buffers);
static int qbman_swp_release_mem_back(struct qbman_swp *s,
const struct qbman_release_desc *d,
const u64 *buffers,
unsigned int num_buffers);
/* Function pointers */
int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd)
= qbman_swp_enqueue_direct;
int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd,
uint32_t *flags,
int num_frames)
= qbman_swp_enqueue_multiple_direct;
int
(*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd,
int num_frames)
= qbman_swp_enqueue_multiple_desc_direct;
int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d)
= qbman_swp_pull_direct;
const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
= qbman_swp_dqrr_next_direct;
int (*qbman_swp_release_ptr)(struct qbman_swp *s,
const struct qbman_release_desc *d,
const u64 *buffers,
unsigned int num_buffers)
= qbman_swp_release_direct;
/* Portal Access */
static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
@ -146,6 +225,15 @@ static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
#define QMAN_RT_MODE 0x00000100
static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
{
/* 'first' is included, 'last' is excluded */
if (first <= last)
return last - first;
else
return (2 * ringsize) - (first - last);
}
/**
* qbman_swp_init() - Create a functional object representing the given
* QBMan portal descriptor.
@ -156,11 +244,16 @@ static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
*/
struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
{
struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
struct qbman_swp *p = kzalloc(sizeof(*p), GFP_KERNEL);
u32 reg;
u32 mask_size;
u32 eqcr_pi;
if (!p)
return NULL;
spin_lock_init(&p->access_spinlock);
p->desc = d;
p->mc.valid_bit = QB_VALID_BIT;
p->sdq = 0;
@ -186,25 +279,38 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
p->addr_cena = d->cena_bar;
p->addr_cinh = d->cinh_bar;
if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
memset(p->addr_cena, 0, 64 * 1024);
if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
1, /* Writes Non-cacheable */
0, /* EQCR_CI stashing threshold */
3, /* RPM: Valid bit mode, RCR in array mode */
2, /* DCM: Discrete consumption ack mode */
3, /* EPM: Valid bit mode, EQCR in array mode */
1, /* mem stashing drop enable == TRUE */
1, /* mem stashing priority == TRUE */
1, /* mem stashing enable == TRUE */
1, /* dequeue stashing priority == TRUE */
0, /* dequeue stashing enable == FALSE */
0); /* EQCR_CI stashing priority == FALSE */
if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
1, /* Writes Non-cacheable */
0, /* EQCR_CI stashing threshold */
3, /* RPM: RCR in array mode */
2, /* DCM: Discrete consumption ack */
2, /* EPM: EQCR in ring mode */
1, /* mem stashing drop enable enable */
1, /* mem stashing priority enable */
1, /* mem stashing enable */
1, /* dequeue stashing priority enable */
0, /* dequeue stashing enable enable */
0); /* EQCR_CI stashing priority enable */
} else {
memset(p->addr_cena, 0, 64 * 1024);
reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
1, /* Writes Non-cacheable */
1, /* EQCR_CI stashing threshold */
3, /* RPM: RCR in array mode */
2, /* DCM: Discrete consumption ack */
0, /* EPM: EQCR in ring mode */
1, /* mem stashing drop enable */
1, /* mem stashing priority enable */
1, /* mem stashing enable */
1, /* dequeue stashing priority enable */
0, /* dequeue stashing enable */
0); /* EQCR_CI stashing priority enable */
reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */
}
qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
@ -225,6 +331,30 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
* applied when dequeues from a specific channel are enabled.
*/
qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
p->eqcr.pi_ring_size = 8;
if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
p->eqcr.pi_ring_size = 32;
qbman_swp_enqueue_ptr =
qbman_swp_enqueue_mem_back;
qbman_swp_enqueue_multiple_ptr =
qbman_swp_enqueue_multiple_mem_back;
qbman_swp_enqueue_multiple_desc_ptr =
qbman_swp_enqueue_multiple_desc_mem_back;
qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
qbman_swp_release_ptr = qbman_swp_release_mem_back;
}
for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1;
eqcr_pi = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_PI);
p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
p->eqcr.ci = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_CI)
& p->eqcr.pi_ci_mask;
p->eqcr.available = p->eqcr.pi_ring_size;
return p;
}
@ -378,6 +508,7 @@ enum qb_enqueue_commands {
#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
#define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
/**
* qbman_eq_desc_clear() - Clear the contents of a descriptor to
@ -453,8 +584,9 @@ static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
QMAN_RT_MODE);
}
#define QB_RT_BIT ((u32)0x100)
/**
* qbman_swp_enqueue() - Issue an enqueue command
* qbman_swp_enqueue_direct() - Issue an enqueue command
* @s: the software portal used for enqueue
* @d: the enqueue descriptor
* @fd: the frame descriptor to be enqueued
@ -464,30 +596,351 @@ static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
*
* Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
*/
int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd)
static
int qbman_swp_enqueue_direct(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd)
{
struct qbman_eq_desc *p;
u32 eqar = qbman_read_register(s, QBMAN_CINH_SWP_EQAR);
int flags = 0;
int ret = qbman_swp_enqueue_multiple_direct(s, d, fd, &flags, 1);
if (!EQAR_SUCCESS(eqar))
return -EBUSY;
if (ret >= 0)
ret = 0;
else
ret = -EBUSY;
return ret;
}
p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
memcpy(&p->dca, &d->dca, 31);
memcpy(&p->fd, fd, sizeof(*fd));
/**
* qbman_swp_enqueue_mem_back() - Issue an enqueue command
* @s: the software portal used for enqueue
* @d: the enqueue descriptor
* @fd: the frame descriptor to be enqueued
*
* Please note that 'fd' should only be NULL if the "action" of the
* descriptor is "orp_hole" or "orp_nesn".
*
* Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
*/
static
int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd)
{
int flags = 0;
int ret = qbman_swp_enqueue_multiple_mem_back(s, d, fd, &flags, 1);
if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
/* Set the verb byte, have to substitute in the valid-bit */
dma_wmb();
p->verb = d->verb | EQAR_VB(eqar);
} else {
p->verb = d->verb | EQAR_VB(eqar);
dma_wmb();
qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
if (ret >= 0)
ret = 0;
else
ret = -EBUSY;
return ret;
}
/**
* qbman_swp_enqueue_multiple_direct() - Issue a multi enqueue command
* using one enqueue descriptor
* @s: the software portal used for enqueue
* @d: the enqueue descriptor
* @fd: table pointer of frame descriptor table to be enqueued
* @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
* @num_frames: number of fd to be enqueued
*
* Return the number of fd enqueued, or a negative error number.
*/
static
int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd,
uint32_t *flags,
int num_frames)
{
uint32_t *p = NULL;
const uint32_t *cl = (uint32_t *)d;
uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
int i, num_enqueued = 0;
uint64_t addr_cena;
spin_lock(&s->access_spinlock);
half_mask = (s->eqcr.pi_ci_mask>>1);
full_mask = s->eqcr.pi_ci_mask;
if (!s->eqcr.available) {
eqcr_ci = s->eqcr.ci;
p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
eqcr_ci, s->eqcr.ci);
if (!s->eqcr.available) {
spin_unlock(&s->access_spinlock);
return 0;
}
}
return 0;
eqcr_pi = s->eqcr.pi;
num_enqueued = (s->eqcr.available < num_frames) ?
s->eqcr.available : num_frames;
s->eqcr.available -= num_enqueued;
/* Fill in the EQCR ring */
for (i = 0; i < num_enqueued; i++) {
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
/* Skip copying the verb */
memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
&fd[i], sizeof(*fd));
eqcr_pi++;
}
dma_wmb();
/* Set the verb byte, have to substitute in the valid-bit */
eqcr_pi = s->eqcr.pi;
for (i = 0; i < num_enqueued; i++) {
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
p[0] = cl[0] | s->eqcr.pi_vb;
if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
}
eqcr_pi++;
if (!(eqcr_pi & half_mask))
s->eqcr.pi_vb ^= QB_VALID_BIT;
}
/* Flush all the cacheline without load/store in between */
eqcr_pi = s->eqcr.pi;
addr_cena = (size_t)s->addr_cena;
for (i = 0; i < num_enqueued; i++)
eqcr_pi++;
s->eqcr.pi = eqcr_pi & full_mask;
spin_unlock(&s->access_spinlock);
return num_enqueued;
}
/**
* qbman_swp_enqueue_multiple_mem_back() - Issue a multi enqueue command
* using one enqueue descriptor
* @s: the software portal used for enqueue
* @d: the enqueue descriptor
* @fd: table pointer of frame descriptor table to be enqueued
* @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
* @num_frames: number of fd to be enqueued
*
* Return the number of fd enqueued, or a negative error number.
*/
static
int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd,
uint32_t *flags,
int num_frames)
{
uint32_t *p = NULL;
const uint32_t *cl = (uint32_t *)(d);
uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
int i, num_enqueued = 0;
unsigned long irq_flags;
spin_lock(&s->access_spinlock);
local_irq_save(irq_flags);
half_mask = (s->eqcr.pi_ci_mask>>1);
full_mask = s->eqcr.pi_ci_mask;
if (!s->eqcr.available) {
eqcr_ci = s->eqcr.ci;
p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
s->eqcr.ci = __raw_readl(p) & full_mask;
s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
eqcr_ci, s->eqcr.ci);
if (!s->eqcr.available) {
local_irq_restore(irq_flags);
spin_unlock(&s->access_spinlock);
return 0;
}
}
eqcr_pi = s->eqcr.pi;
num_enqueued = (s->eqcr.available < num_frames) ?
s->eqcr.available : num_frames;
s->eqcr.available -= num_enqueued;
/* Fill in the EQCR ring */
for (i = 0; i < num_enqueued; i++) {
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
/* Skip copying the verb */
memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
&fd[i], sizeof(*fd));
eqcr_pi++;
}
/* Set the verb byte, have to substitute in the valid-bit */
eqcr_pi = s->eqcr.pi;
for (i = 0; i < num_enqueued; i++) {
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
p[0] = cl[0] | s->eqcr.pi_vb;
if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
}
eqcr_pi++;
if (!(eqcr_pi & half_mask))
s->eqcr.pi_vb ^= QB_VALID_BIT;
}
s->eqcr.pi = eqcr_pi & full_mask;
dma_wmb();
qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
(QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
local_irq_restore(irq_flags);
spin_unlock(&s->access_spinlock);
return num_enqueued;
}
/**
* qbman_swp_enqueue_multiple_desc_direct() - Issue a multi enqueue command
* using multiple enqueue descriptor
* @s: the software portal used for enqueue
* @d: table of minimal enqueue descriptor
* @fd: table pointer of frame descriptor table to be enqueued
* @num_frames: number of fd to be enqueued
*
* Return the number of fd enqueued, or a negative error number.
*/
static
int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd,
int num_frames)
{
uint32_t *p;
const uint32_t *cl;
uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
int i, num_enqueued = 0;
uint64_t addr_cena;
half_mask = (s->eqcr.pi_ci_mask>>1);
full_mask = s->eqcr.pi_ci_mask;
if (!s->eqcr.available) {
eqcr_ci = s->eqcr.ci;
p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
eqcr_ci, s->eqcr.ci);
if (!s->eqcr.available)
return 0;
}
eqcr_pi = s->eqcr.pi;
num_enqueued = (s->eqcr.available < num_frames) ?
s->eqcr.available : num_frames;
s->eqcr.available -= num_enqueued;
/* Fill in the EQCR ring */
for (i = 0; i < num_enqueued; i++) {
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
cl = (uint32_t *)(&d[i]);
/* Skip copying the verb */
memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
&fd[i], sizeof(*fd));
eqcr_pi++;
}
dma_wmb();
/* Set the verb byte, have to substitute in the valid-bit */
eqcr_pi = s->eqcr.pi;
for (i = 0; i < num_enqueued; i++) {
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
cl = (uint32_t *)(&d[i]);
p[0] = cl[0] | s->eqcr.pi_vb;
eqcr_pi++;
if (!(eqcr_pi & half_mask))
s->eqcr.pi_vb ^= QB_VALID_BIT;
}
/* Flush all the cacheline without load/store in between */
eqcr_pi = s->eqcr.pi;
addr_cena = (uint64_t)s->addr_cena;
for (i = 0; i < num_enqueued; i++)
eqcr_pi++;
s->eqcr.pi = eqcr_pi & full_mask;
return num_enqueued;
}
/**
* qbman_swp_enqueue_multiple_desc_mem_back() - Issue a multi enqueue command
* using multiple enqueue descriptor
* @s: the software portal used for enqueue
* @d: table of minimal enqueue descriptor
* @fd: table pointer of frame descriptor table to be enqueued
* @num_frames: number of fd to be enqueued
*
* Return the number of fd enqueued, or a negative error number.
*/
static
int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd,
int num_frames)
{
uint32_t *p;
const uint32_t *cl;
uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
int i, num_enqueued = 0;
half_mask = (s->eqcr.pi_ci_mask>>1);
full_mask = s->eqcr.pi_ci_mask;
if (!s->eqcr.available) {
eqcr_ci = s->eqcr.ci;
p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
s->eqcr.ci = __raw_readl(p) & full_mask;
s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
eqcr_ci, s->eqcr.ci);
if (!s->eqcr.available)
return 0;
}
eqcr_pi = s->eqcr.pi;
num_enqueued = (s->eqcr.available < num_frames) ?
s->eqcr.available : num_frames;
s->eqcr.available -= num_enqueued;
/* Fill in the EQCR ring */
for (i = 0; i < num_enqueued; i++) {
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
cl = (uint32_t *)(&d[i]);
/* Skip copying the verb */
memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
&fd[i], sizeof(*fd));
eqcr_pi++;
}
/* Set the verb byte, have to substitute in the valid-bit */
eqcr_pi = s->eqcr.pi;
for (i = 0; i < num_enqueued; i++) {
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
cl = (uint32_t *)(&d[i]);
p[0] = cl[0] | s->eqcr.pi_vb;
eqcr_pi++;
if (!(eqcr_pi & half_mask))
s->eqcr.pi_vb ^= QB_VALID_BIT;
}
s->eqcr.pi = eqcr_pi & full_mask;
dma_wmb();
qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
(QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
return num_enqueued;
}
/* Static (push) dequeue */
@ -645,7 +1098,7 @@ void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
}
/**
* qbman_swp_pull() - Issue the pull dequeue command
* qbman_swp_pull_direct() - Issue the pull dequeue command
* @s: the software portal object
* @d: the software portal descriptor which has been configured with
* the set of qbman_pull_desc_set_*() calls
@ -653,7 +1106,44 @@ void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
* Return 0 for success, and -EBUSY if the software portal is not ready
* to do pull dequeue.
*/
int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
static
int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d)
{
struct qbman_pull_desc *p;
if (!atomic_dec_and_test(&s->vdq.available)) {
atomic_inc(&s->vdq.available);
return -EBUSY;
}
s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
else
p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
p->numf = d->numf;
p->tok = QMAN_DQ_TOKEN_VALID;
p->dq_src = d->dq_src;
p->rsp_addr = d->rsp_addr;
p->rsp_addr_virt = d->rsp_addr_virt;
dma_wmb();
/* Set the verb byte, have to substitute in the valid-bit */
p->verb = d->verb | s->vdq.valid_bit;
s->vdq.valid_bit ^= QB_VALID_BIT;
return 0;
}
/**
* qbman_swp_pull_mem_back() - Issue the pull dequeue command
* @s: the software portal object
* @d: the software portal descriptor which has been configured with
* the set of qbman_pull_desc_set_*() calls
*
* Return 0 for success, and -EBUSY if the software portal is not ready
* to do pull dequeue.
*/
static
int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d)
{
struct qbman_pull_desc *p;
@ -672,17 +1162,11 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
p->rsp_addr = d->rsp_addr;
p->rsp_addr_virt = d->rsp_addr_virt;
if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
dma_wmb();
/* Set the verb byte, have to substitute in the valid-bit */
p->verb = d->verb | s->vdq.valid_bit;
s->vdq.valid_bit ^= QB_VALID_BIT;
} else {
p->verb = d->verb | s->vdq.valid_bit;
s->vdq.valid_bit ^= QB_VALID_BIT;
dma_wmb();
qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
}
/* Set the verb byte, have to substitute in the valid-bit */
p->verb = d->verb | s->vdq.valid_bit;
s->vdq.valid_bit ^= QB_VALID_BIT;
dma_wmb();
qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
return 0;
}
@ -690,14 +1174,14 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
#define QMAN_DQRR_PI_MASK 0xf
/**
* qbman_swp_dqrr_next() - Get an valid DQRR entry
* qbman_swp_dqrr_next_direct() - Get an valid DQRR entry
* @s: the software portal object
*
* Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
* only once, so repeated calls can return a sequence of DQRR entries, without
* requiring they be consumed immediately or in any particular order.
*/
const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
{
u32 verb;
u32 response_verb;
@ -740,10 +1224,99 @@ const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
}
if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
else
p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
verb = p->dq.verb;
/*
* If the valid-bit isn't of the expected polarity, nothing there. Note,
* in the DQRR reset bug workaround, we shouldn't need to skip these
* check, because we've already determined that a new entry is available
* and we've invalidated the cacheline before reading it, so the
* valid-bit behaviour is repaired and should tell us what we already
* knew from reading PI.
*/
if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
prefetch(qbman_get_cmd(s,
QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
return NULL;
}
/*
* There's something there. Move "next_idx" attention to the next ring
* entry (and prefetch it) before returning what we found.
*/
s->dqrr.next_idx++;
s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
if (!s->dqrr.next_idx)
s->dqrr.valid_bit ^= QB_VALID_BIT;
/*
* If this is the final response to a volatile dequeue command
* indicate that the vdq is available
*/
flags = p->dq.stat;
response_verb = verb & QBMAN_RESULT_MASK;
if ((response_verb == QBMAN_RESULT_DQ) &&
(flags & DPAA2_DQ_STAT_VOLATILE) &&
(flags & DPAA2_DQ_STAT_EXPIRED))
atomic_inc(&s->vdq.available);
prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
return p;
}
/**
* qbman_swp_dqrr_next_mem_back() - Get an valid DQRR entry
* @s: the software portal object
*
* Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
* only once, so repeated calls can return a sequence of DQRR entries, without
* requiring they be consumed immediately or in any particular order.
*/
const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
{
u32 verb;
u32 response_verb;
u32 flags;
struct dpaa2_dq *p;
/* Before using valid-bit to detect if something is there, we have to
* handle the case of the DQRR reset bug...
*/
if (unlikely(s->dqrr.reset_bug)) {
/*
* We pick up new entries by cache-inhibited producer index,
* which means that a non-coherent mapping would require us to
* invalidate and read *only* once that PI has indicated that
* there's an entry here. The first trip around the DQRR ring
* will be much less efficient than all subsequent trips around
* it...
*/
u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
QMAN_DQRR_PI_MASK;
/* there are new entries if pi != next_idx */
if (pi == s->dqrr.next_idx)
return NULL;
/*
* if next_idx is/was the last ring index, and 'pi' is
* different, we can disable the workaround as all the ring
* entries have now been DMA'd to so valid-bit checking is
* repaired. Note: this logic needs to be based on next_idx
* (which increments one at a time), rather than on pi (which
* can burst and wrap-around between our snapshots of it).
*/
if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
s->dqrr.next_idx, pi);
s->dqrr.reset_bug = 0;
}
prefetch(qbman_get_cmd(s,
QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
}
p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
verb = p->dq.verb;
/*
@ -872,7 +1445,7 @@ void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
#define RAR_SUCCESS(rar) ((rar) & 0x100)
/**
* qbman_swp_release() - Issue a buffer release command
* qbman_swp_release_direct() - Issue a buffer release command
* @s: the software portal object
* @d: the release descriptor
* @buffers: a pointer pointing to the buffer address to be released
@ -880,8 +1453,9 @@ void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
*
* Return 0 for success, -EBUSY if the release command ring is not ready.
*/
int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
const u64 *buffers, unsigned int num_buffers)
int qbman_swp_release_direct(struct qbman_swp *s,
const struct qbman_release_desc *d,
const u64 *buffers, unsigned int num_buffers)
{
int i;
struct qbman_release_desc *p;
@ -895,28 +1469,59 @@ int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
return -EBUSY;
/* Start the release command */
if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
else
p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
/* Copy the caller's buffer pointers to the command */
for (i = 0; i < num_buffers; i++)
p->buf[i] = cpu_to_le64(buffers[i]);
p->bpid = d->bpid;
if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
/*
* Set the verb byte, have to substitute in the valid-bit
* and the number of buffers.
*/
dma_wmb();
p->verb = d->verb | RAR_VB(rar) | num_buffers;
} else {
p->verb = d->verb | RAR_VB(rar) | num_buffers;
dma_wmb();
qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
RAR_IDX(rar) * 4, QMAN_RT_MODE);
}
/*
* Set the verb byte, have to substitute in the valid-bit
* and the number of buffers.
*/
dma_wmb();
p->verb = d->verb | RAR_VB(rar) | num_buffers;
return 0;
}
/**
* qbman_swp_release_mem_back() - Issue a buffer release command
* @s: the software portal object
* @d: the release descriptor
* @buffers: a pointer pointing to the buffer address to be released
* @num_buffers: number of buffers to be released, must be less than 8
*
* Return 0 for success, -EBUSY if the release command ring is not ready.
*/
int qbman_swp_release_mem_back(struct qbman_swp *s,
const struct qbman_release_desc *d,
const u64 *buffers, unsigned int num_buffers)
{
int i;
struct qbman_release_desc *p;
u32 rar;
if (!num_buffers || (num_buffers > 7))
return -EINVAL;
rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
if (!RAR_SUCCESS(rar))
return -EBUSY;
/* Start the release command */
p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
/* Copy the caller's buffer pointers to the command */
for (i = 0; i < num_buffers; i++)
p->buf[i] = cpu_to_le64(buffers[i]);
p->bpid = d->bpid;
p->verb = d->verb | RAR_VB(rar) | num_buffers;
dma_wmb();
qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
RAR_IDX(rar) * 4, QMAN_RT_MODE);
return 0;
}

View File

@ -9,6 +9,13 @@
#include <soc/fsl/dpaa2-fd.h>
#define QMAN_REV_4000 0x04000000
#define QMAN_REV_4100 0x04010000
#define QMAN_REV_4101 0x04010001
#define QMAN_REV_5000 0x05000000
#define QMAN_REV_MASK 0xffff0000
struct dpaa2_dq;
struct qbman_swp;
@ -81,6 +88,10 @@ struct qbman_eq_desc {
u8 wae;
u8 rspid;
__le64 rsp_addr;
};
struct qbman_eq_desc_with_fd {
struct qbman_eq_desc desc;
u8 fd[32];
};
@ -132,8 +143,48 @@ struct qbman_swp {
u8 dqrr_size;
int reset_bug; /* indicates dqrr reset workaround is needed */
} dqrr;
struct {
u32 pi;
u32 pi_vb;
u32 pi_ring_size;
u32 pi_ci_mask;
u32 ci;
int available;
u32 pend;
u32 no_pfdr;
} eqcr;
spinlock_t access_spinlock;
};
/* Function pointers */
extern
int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd);
extern
int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd,
uint32_t *flags,
int num_frames);
extern
int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd,
int num_frames);
extern
int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d);
extern
const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s);
extern
int (*qbman_swp_release_ptr)(struct qbman_swp *s,
const struct qbman_release_desc *d,
const u64 *buffers,
unsigned int num_buffers);
/* Functions */
struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
void qbman_swp_finish(struct qbman_swp *p);
u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
@ -158,9 +209,6 @@ void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
enum qbman_pull_type_e dct);
int qbman_swp_pull(struct qbman_swp *p, struct qbman_pull_desc *d);
const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s);
void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
@ -172,15 +220,11 @@ void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
u32 qd_bin, u32 qd_prio);
int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd);
void qbman_release_desc_clear(struct qbman_release_desc *d);
void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
const u64 *buffers, unsigned int num_buffers);
int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
unsigned int num_buffers);
int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
@ -193,6 +237,61 @@ void *qbman_swp_mc_start(struct qbman_swp *p);
void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
void *qbman_swp_mc_result(struct qbman_swp *p);
/**
* qbman_swp_enqueue() - Issue an enqueue command
* @s: the software portal used for enqueue
* @d: the enqueue descriptor
* @fd: the frame descriptor to be enqueued
*
* Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
*/
static inline int
qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd)
{
return qbman_swp_enqueue_ptr(s, d, fd);
}
/**
* qbman_swp_enqueue_multiple() - Issue a multi enqueue command
* using one enqueue descriptor
* @s: the software portal used for enqueue
* @d: the enqueue descriptor
* @fd: table pointer of frame descriptor table to be enqueued
* @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
* @num_frames: number of fd to be enqueued
*
* Return the number of fd enqueued, or a negative error number.
*/
static inline int
qbman_swp_enqueue_multiple(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd,
uint32_t *flags,
int num_frames)
{
return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
}
/**
* qbman_swp_enqueue_multiple_desc() - Issue a multi enqueue command
* using multiple enqueue descriptor
* @s: the software portal used for enqueue
* @d: table of minimal enqueue descriptor
* @fd: table pointer of frame descriptor table to be enqueued
* @num_frames: number of fd to be enqueued
*
* Return the number of fd enqueued, or a negative error number.
*/
static inline int
qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd,
int num_frames)
{
return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
}
/**
* qbman_result_is_DQ() - check if the dequeue result is a dequeue response
* @dq: the dequeue result to be checked
@ -504,4 +603,49 @@ int qbman_bp_query(struct qbman_swp *s, u16 bpid,
u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a);
/**
* qbman_swp_release() - Issue a buffer release command
* @s: the software portal object
* @d: the release descriptor
* @buffers: a pointer pointing to the buffer address to be released
* @num_buffers: number of buffers to be released, must be less than 8
*
* Return 0 for success, -EBUSY if the release command ring is not ready.
*/
static inline int qbman_swp_release(struct qbman_swp *s,
const struct qbman_release_desc *d,
const u64 *buffers,
unsigned int num_buffers)
{
return qbman_swp_release_ptr(s, d, buffers, num_buffers);
}
/**
* qbman_swp_pull() - Issue the pull dequeue command
* @s: the software portal object
* @d: the software portal descriptor which has been configured with
* the set of qbman_pull_desc_set_*() calls
*
* Return 0 for success, and -EBUSY if the software portal is not ready
* to do pull dequeue.
*/
static inline int qbman_swp_pull(struct qbman_swp *s,
struct qbman_pull_desc *d)
{
return qbman_swp_pull_ptr(s, d);
}
/**
* qbman_swp_dqrr_next() - Get an valid DQRR entry
* @s: the software portal object
*
* Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
* only once, so repeated calls can return a sequence of DQRR entries, without
* requiring they be consumed immediately or in any particular order.
*/
static inline const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
{
return qbman_swp_dqrr_next_ptr(s);
}
#endif /* __FSL_QBMAN_PORTAL_H */

View File

@ -423,7 +423,7 @@ static void qe_upload_microcode(const void *base,
qe_iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
/* Set I-RAM Ready Register */
qe_iowrite32be(be32_to_cpu(QE_IRAM_READY), &qe_immr->iram.iready);
qe_iowrite32be(QE_IRAM_READY, &qe_immr->iram.iready);
}
/*
@ -525,7 +525,7 @@ int qe_upload_firmware(const struct qe_firmware *firmware)
*/
memset(&qe_firmware_info, 0, sizeof(qe_firmware_info));
strlcpy(qe_firmware_info.id, firmware->id, sizeof(qe_firmware_info.id));
qe_firmware_info.extended_modes = firmware->extended_modes;
qe_firmware_info.extended_modes = be64_to_cpu(firmware->extended_modes);
memcpy(qe_firmware_info.vtraps, firmware->vtraps,
sizeof(firmware->vtraps));

View File

@ -46,7 +46,7 @@ int cpm_muram_init(void)
{
struct device_node *np;
struct resource r;
u32 zero[OF_MAX_ADDR_CELLS] = {};
__be32 zero[OF_MAX_ADDR_CELLS] = {};
resource_size_t max = 0;
int i = 0;
int ret = 0;

View File

@ -44,7 +44,7 @@
struct qe_ic {
/* Control registers offset */
u32 __iomem *regs;
__be32 __iomem *regs;
/* The remapper for this QEIC */
struct irq_domain *irqhost;

View File

@ -632,7 +632,7 @@ int ucc_set_tdm_rxtx_sync(u32 tdm_num, enum qe_clock clock,
{
int source;
u32 shift;
struct qe_mux *qe_mux_reg;
struct qe_mux __iomem *qe_mux_reg;
qe_mux_reg = &qe_immr->qmx;

View File

@ -72,7 +72,7 @@ EXPORT_SYMBOL(ucc_slow_restart_tx);
void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
{
struct ucc_slow *us_regs;
struct ucc_slow __iomem *us_regs;
u32 gumr_l;
us_regs = uccs->us_regs;
@ -93,7 +93,7 @@ EXPORT_SYMBOL(ucc_slow_enable);
void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
{
struct ucc_slow *us_regs;
struct ucc_slow __iomem *us_regs;
u32 gumr_l;
us_regs = uccs->us_regs;
@ -122,7 +122,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
u32 i;
struct ucc_slow __iomem *us_regs;
u32 gumr;
struct qe_bd *bd;
struct qe_bd __iomem *bd;
u32 id;
u32 command;
int ret = 0;
@ -168,16 +168,9 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
return -ENOMEM;
}
uccs->saved_uccm = 0;
uccs->p_rx_frame = 0;
us_regs = uccs->us_regs;
uccs->p_ucce = (u16 *) & (us_regs->ucce);
uccs->p_uccm = (u16 *) & (us_regs->uccm);
#ifdef STATISTICS
uccs->rx_frames = 0;
uccs->tx_frames = 0;
uccs->rx_discarded = 0;
#endif /* STATISTICS */
uccs->p_ucce = &us_regs->ucce;
uccs->p_uccm = &us_regs->uccm;
/* Get PRAM base */
uccs->us_pram_offset =
@ -231,24 +224,24 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
/* clear bd buffer */
qe_iowrite32be(0, &bd->buf);
/* set bd status and length */
qe_iowrite32be(0, (u32 *)bd);
qe_iowrite32be(0, (u32 __iomem *)bd);
bd++;
}
/* for last BD set Wrap bit */
qe_iowrite32be(0, &bd->buf);
qe_iowrite32be(cpu_to_be32(T_W), (u32 *)bd);
qe_iowrite32be(T_W, (u32 __iomem *)bd);
/* Init Rx bds */
bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) {
/* set bd status and length */
qe_iowrite32be(0, (u32 *)bd);
qe_iowrite32be(0, (u32 __iomem *)bd);
/* clear bd buffer */
qe_iowrite32be(0, &bd->buf);
bd++;
}
/* for last BD set Wrap bit */
qe_iowrite32be(cpu_to_be32(R_W), (u32 *)bd);
qe_iowrite32be(R_W, (u32 __iomem *)bd);
qe_iowrite32be(0, &bd->buf);
/* Set GUMR (For more details see the hardware spec.). */
@ -273,8 +266,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
qe_iowrite32be(gumr, &us_regs->gumr_h);
/* gumr_l */
gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc |
us_info->diag | us_info->mode;
gumr = (u32)us_info->tdcr | (u32)us_info->rdcr | (u32)us_info->tenc |
(u32)us_info->renc | (u32)us_info->diag | (u32)us_info->mode;
if (us_info->tci)
gumr |= UCC_SLOW_GUMR_L_TCI;
if (us_info->rinv)
@ -289,8 +282,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
/* if the data is in cachable memory, the 'global' */
/* in the function code should be set. */
uccs->us_pram->tbmr = UCC_BMR_BO_BE;
uccs->us_pram->rbmr = UCC_BMR_BO_BE;
qe_iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->tbmr);
qe_iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->rbmr);
/* rbase, tbase are offsets from MURAM base */
qe_iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase);

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright 2014-2016 Freescale Semiconductor Inc.
* Copyright NXP
* Copyright 2017-2019 NXP
*
*/
#ifndef __FSL_DPAA2_IO_H
@ -109,6 +109,10 @@ int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, u32 fqid,
const struct dpaa2_fd *fd);
int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d, u32 fqid,
const struct dpaa2_fd *fd, int number_of_frame);
int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d, u32 *fqid,
const struct dpaa2_fd *fd, int number_of_frame);
int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio,
u16 qdbin, const struct dpaa2_fd *fd);
int dpaa2_io_service_release(struct dpaa2_io *d, u16 bpid,

View File

@ -178,10 +178,10 @@ struct ucc_fast_info {
struct ucc_fast_private {
struct ucc_fast_info *uf_info;
struct ucc_fast __iomem *uf_regs; /* a pointer to the UCC regs. */
u32 __iomem *p_ucce; /* a pointer to the event register in memory. */
u32 __iomem *p_uccm; /* a pointer to the mask register in memory. */
__be32 __iomem *p_ucce; /* a pointer to the event register in memory. */
__be32 __iomem *p_uccm; /* a pointer to the mask register in memory. */
#ifdef CONFIG_UGETH_TX_ON_DEMAND
u16 __iomem *p_utodr; /* pointer to the transmit on demand register */
__be16 __iomem *p_utodr;/* pointer to the transmit on demand register */
#endif
int enabled_tx; /* Whether channel is enabled for Tx (ENT) */
int enabled_rx; /* Whether channel is enabled for Rx (ENR) */

View File

@ -184,7 +184,7 @@ struct ucc_slow_info {
struct ucc_slow_private {
struct ucc_slow_info *us_info;
struct ucc_slow __iomem *us_regs; /* Ptr to memory map of UCC regs */
struct ucc_slow_pram *us_pram; /* a pointer to the parameter RAM */
struct ucc_slow_pram __iomem *us_pram; /* a pointer to the parameter RAM */
s32 us_pram_offset;
int enabled_tx; /* Whether channel is enabled for Tx (ENT) */
int enabled_rx; /* Whether channel is enabled for Rx (ENR) */
@ -196,13 +196,12 @@ struct ucc_slow_private {
and length for first BD in a frame */
s32 tx_base_offset; /* first BD in Tx BD table offset (In MURAM) */
s32 rx_base_offset; /* first BD in Rx BD table offset (In MURAM) */
struct qe_bd *confBd; /* next BD for confirm after Tx */
struct qe_bd *tx_bd; /* next BD for new Tx request */
struct qe_bd *rx_bd; /* next BD to collect after Rx */
struct qe_bd __iomem *confBd; /* next BD for confirm after Tx */
struct qe_bd __iomem *tx_bd; /* next BD for new Tx request */
struct qe_bd __iomem *rx_bd; /* next BD to collect after Rx */
void *p_rx_frame; /* accumulating receive frame */
u16 *p_ucce; /* a pointer to the event register in memory.
*/
u16 *p_uccm; /* a pointer to the mask register in memory */
__be16 __iomem *p_ucce; /* a pointer to the event register in memory */
__be16 __iomem *p_uccm; /* a pointer to the mask register in memory */
u16 saved_uccm; /* a saved mask for the RX Interrupt bits */
#ifdef STATISTICS
u32 tx_frames; /* Transmitted frames counters */