linux/drivers/dma/ioat/dma_v3.c

1699 lines
47 KiB
C
Raw Normal View History

/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* BSD LICENSE
*
* Copyright(c) 2004-2009 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Support routines for v3+ hardware
*/
#include <linux/module.h>
#include <linux/pci.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 09:04:11 +01:00
#include <linux/gfp.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/prefetch.h>
#include "../dmaengine.h"
#include "registers.h"
#include "hw.h"
#include "dma.h"
#include "dma_v2.h"
extern struct kmem_cache *ioat3_sed_cache;
/* ioat hardware assumes at least two sources for raid operations */
#define src_cnt_to_sw(x) ((x) + 2)
#define src_cnt_to_hw(x) ((x) - 2)
#define ndest_to_sw(x) ((x) + 1)
#define ndest_to_hw(x) ((x) - 1)
#define src16_cnt_to_sw(x) ((x) + 9)
#define src16_cnt_to_hw(x) ((x) - 9)
/* provide a lookup table for setting the source address in the base or
* extended descriptor of an xor or pq descriptor
*/
static const u8 xor_idx_to_desc = 0xe0;
static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
static const u8 pq_idx_to_desc = 0xf8;
static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2 };
static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
0, 1, 2, 3, 4, 5, 6 };
static void ioat3_eh(struct ioat2_dma_chan *ioat);
static void xor_set_src(struct ioat_raw_descriptor *descs[2],
dma_addr_t addr, u32 offset, int idx)
{
struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
raw->field[xor_idx_to_field[idx]] = addr + offset;
}
static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
{
struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
return raw->field[pq_idx_to_field[idx]];
}
static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx)
{
struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
return raw->field[pq16_idx_to_field[idx]];
}
static void pq_set_src(struct ioat_raw_descriptor *descs[2],
dma_addr_t addr, u32 offset, u8 coef, int idx)
{
struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
raw->field[pq_idx_to_field[idx]] = addr + offset;
pq->coef[idx] = coef;
}
static bool is_jf_ioat(struct pci_dev *pdev)
{
switch (pdev->device) {
case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
return true;
default:
return false;
}
}
static bool is_snb_ioat(struct pci_dev *pdev)
{
switch (pdev->device) {
case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
return true;
default:
return false;
}
}
static bool is_ivb_ioat(struct pci_dev *pdev)
{
switch (pdev->device) {
case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
return true;
default:
return false;
}
}
static bool is_hsw_ioat(struct pci_dev *pdev)
{
switch (pdev->device) {
case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
return true;
default:
return false;
}
}
static bool is_xeon_cb32(struct pci_dev *pdev)
{
return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
is_hsw_ioat(pdev);
}
static bool is_bwd_ioat(struct pci_dev *pdev)
{
switch (pdev->device) {
case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
return true;
default:
return false;
}
}
static bool is_bwd_noraid(struct pci_dev *pdev)
{
switch (pdev->device) {
case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
return true;
default:
return false;
}
}
static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
dma_addr_t addr, u32 offset, u8 coef, unsigned idx)
{
struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
struct ioat_pq16a_descriptor *pq16 =
(struct ioat_pq16a_descriptor *)desc[1];
struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
raw->field[pq16_idx_to_field[idx]] = addr + offset;
if (idx < 8)
pq->coef[idx] = coef;
else
pq16->coef[idx - 8] = coef;
}
static struct ioat_sed_ent *
ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool)
{
struct ioat_sed_ent *sed;
gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
sed = kmem_cache_alloc(ioat3_sed_cache, flags);
if (!sed)
return NULL;
sed->hw_pool = hw_pool;
sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool],
flags, &sed->dma);
if (!sed->hw) {
kmem_cache_free(ioat3_sed_cache, sed);
return NULL;
}
return sed;
}
static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed)
{
if (!sed)
return;
dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
kmem_cache_free(ioat3_sed_cache, sed);
}
static bool desc_has_ext(struct ioat_ring_ent *desc)
{
struct ioat_dma_descriptor *hw = desc->hw;
if (hw->ctl_f.op == IOAT_OP_XOR ||
hw->ctl_f.op == IOAT_OP_XOR_VAL) {
struct ioat_xor_descriptor *xor = desc->xor;
if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
return true;
} else if (hw->ctl_f.op == IOAT_OP_PQ ||
hw->ctl_f.op == IOAT_OP_PQ_VAL) {
struct ioat_pq_descriptor *pq = desc->pq;
if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
return true;
}
return false;
}
static u64 ioat3_get_current_completion(struct ioat_chan_common *chan)
{
u64 phys_complete;
u64 completion;
completion = *chan->completion;
phys_complete = ioat_chansts_to_addr(completion);
dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
(unsigned long long) phys_complete);
return phys_complete;
}
static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan,
u64 *phys_complete)
{
*phys_complete = ioat3_get_current_completion(chan);
if (*phys_complete == chan->last_completion)
return false;
clear_bit(IOAT_COMPLETION_ACK, &chan->state);
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
return true;
}
static void
desc_get_errstat(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc)
{
struct ioat_dma_descriptor *hw = desc->hw;
switch (hw->ctl_f.op) {
case IOAT_OP_PQ_VAL:
case IOAT_OP_PQ_VAL_16S:
{
struct ioat_pq_descriptor *pq = desc->pq;
/* check if there's error written */
if (!pq->dwbes_f.wbes)
return;
/* need to set a chanerr var for checking to clear later */
if (pq->dwbes_f.p_val_err)
*desc->result |= SUM_CHECK_P_RESULT;
if (pq->dwbes_f.q_val_err)
*desc->result |= SUM_CHECK_Q_RESULT;
return;
}
default:
return;
}
}
/**
* __cleanup - reclaim used descriptors
* @ioat: channel (ring) to clean
*
* The difference from the dma_v2.c __cleanup() is that this routine
* handles extended descriptors and dma-unmapping raid operations.
*/
static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
{
struct ioat_chan_common *chan = &ioat->base;
struct ioatdma_device *device = chan->device;
struct ioat_ring_ent *desc;
bool seen_current = false;
int idx = ioat->tail, i;
u16 active;
dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
__func__, ioat->head, ioat->tail, ioat->issued);
/*
* At restart of the channel, the completion address and the
* channel status will be 0 due to starting a new chain. Since
* it's new chain and the first descriptor "fails", there is
* nothing to clean up. We do not want to reap the entire submitted
* chain due to this 0 address value and then BUG.
*/
if (!phys_complete)
return;
active = ioat2_ring_active(ioat);
for (i = 0; i < active && !seen_current; i++) {
struct dma_async_tx_descriptor *tx;
smp_read_barrier_depends();
prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
desc = ioat2_get_ring_ent(ioat, idx + i);
dump_desc_dbg(ioat, desc);
/* set err stat if we are using dwbes */
if (device->cap & IOAT_CAP_DWBES)
desc_get_errstat(ioat, desc);
tx = &desc->txd;
if (tx->cookie) {
dma_cookie_complete(tx);
dma_descriptor_unmap(tx);
if (tx->callback) {
tx->callback(tx->callback_param);
tx->callback = NULL;
}
}
if (tx->phys == phys_complete)
seen_current = true;
/* skip extended descriptors */
if (desc_has_ext(desc)) {
BUG_ON(i + 1 >= active);
i++;
}
/* cleanup super extended descriptors */
if (desc->sed) {
ioat3_free_sed(device, desc->sed);
desc->sed = NULL;
}
}
smp_mb(); /* finish all descriptor reads before incrementing tail */
ioat->tail = idx + i;
BUG_ON(active && !seen_current); /* no active descs have written a completion? */
chan->last_completion = phys_complete;
if (active - i == 0) {
dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
__func__);
clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
}
/* 5 microsecond delay per pending descriptor */
writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
}
static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
u64 phys_complete;
spin_lock_bh(&chan->cleanup_lock);
if (ioat3_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete);
if (is_ioat_halted(*chan->completion)) {
u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
ioat3_eh(ioat);
}
}
spin_unlock_bh(&chan->cleanup_lock);
}
static void ioat3_cleanup_event(unsigned long data)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
ioat3_cleanup(ioat);
if (!test_bit(IOAT_RUN, &chan->state))
return;
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
u64 phys_complete;
ioat2_quiesce(chan, 0);
if (ioat3_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete);
__ioat2_restart_chan(ioat);
}
static void ioat3_eh(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
struct pci_dev *pdev = to_pdev(chan);
struct ioat_dma_descriptor *hw;
u64 phys_complete;
struct ioat_ring_ent *desc;
u32 err_handled = 0;
u32 chanerr_int;
u32 chanerr;
/* cleanup so tail points to descriptor that caused the error */
if (ioat3_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete);
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
dev_dbg(to_dev(chan), "%s: error = %x:%x\n",
__func__, chanerr, chanerr_int);
desc = ioat2_get_ring_ent(ioat, ioat->tail);
hw = desc->hw;
dump_desc_dbg(ioat, desc);
switch (hw->ctl_f.op) {
case IOAT_OP_XOR_VAL:
if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
*desc->result |= SUM_CHECK_P_RESULT;
err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
}
break;
case IOAT_OP_PQ_VAL:
case IOAT_OP_PQ_VAL_16S:
if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
*desc->result |= SUM_CHECK_P_RESULT;
err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
}
if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
*desc->result |= SUM_CHECK_Q_RESULT;
err_handled |= IOAT_CHANERR_XOR_Q_ERR;
}
break;
}
/* fault on unhandled error or spurious halt */
if (chanerr ^ err_handled || chanerr == 0) {
dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n",
__func__, chanerr, err_handled);
BUG();
}
writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
/* mark faulting descriptor as complete */
*chan->completion = desc->txd.phys;
spin_lock_bh(&ioat->prep_lock);
ioat3_restart_channel(ioat);
spin_unlock_bh(&ioat->prep_lock);
}
static void check_active(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
if (ioat2_ring_active(ioat)) {
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
return;
}
if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
else if (ioat->alloc_order > ioat_get_alloc_order()) {
/* if the ring is idle, empty, and oversized try to step
* down the size
*/
reshape_ring(ioat, ioat->alloc_order - 1);
/* keep shrinking until we get back to our minimum
* default size
*/
if (ioat->alloc_order > ioat_get_alloc_order())
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
}
}
static void ioat3_timer_event(unsigned long data)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
dma_addr_t phys_complete;
u64 status;
status = ioat_chansts(chan);
/* when halted due to errors check for channel
* programming errors before advancing the completion state
*/
if (is_ioat_halted(status)) {
u32 chanerr;
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
__func__, chanerr);
if (test_bit(IOAT_RUN, &chan->state))
BUG_ON(is_ioat_bug(chanerr));
else /* we never got off the ground */
return;
}
/* if we haven't made progress and we have already
* acknowledged a pending completion once, then be more
* forceful with a restart
*/
spin_lock_bh(&chan->cleanup_lock);
if (ioat_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete);
else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
spin_lock_bh(&ioat->prep_lock);
ioat3_restart_channel(ioat);
spin_unlock_bh(&ioat->prep_lock);
spin_unlock_bh(&chan->cleanup_lock);
return;
} else {
set_bit(IOAT_COMPLETION_ACK, &chan->state);
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
}
if (ioat2_ring_active(ioat))
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
else {
spin_lock_bh(&ioat->prep_lock);
check_active(ioat);
spin_unlock_bh(&ioat->prep_lock);
}
spin_unlock_bh(&chan->cleanup_lock);
}
static enum dma_status
ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
enum dma_status ret;
ret = dma_cookie_status(c, cookie, txstate);
if (ret == DMA_COMPLETE)
return ret;
ioat3_cleanup(ioat);
return dma_cookie_status(c, cookie, txstate);
}
static struct dma_async_tx_descriptor *
__ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
size_t len, unsigned long flags)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioat_ring_ent *compl_desc;
struct ioat_ring_ent *desc;
struct ioat_ring_ent *ext;
size_t total_len = len;
struct ioat_xor_descriptor *xor;
struct ioat_xor_ext_descriptor *xor_ex = NULL;
struct ioat_dma_descriptor *hw;
int num_descs, with_ext, idx, i;
u32 offset = 0;
u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
BUG_ON(src_cnt < 2);
num_descs = ioat2_xferlen_to_descs(ioat, len);
/* we need 2x the number of descriptors to cover greater than 5
* sources
*/
if (src_cnt > 5) {
with_ext = 1;
num_descs *= 2;
} else
with_ext = 0;
/* completion writes from the raid engine may pass completion
* writes from the legacy engine, so we need one extra null
* (legacy) descriptor to ensure all completion writes arrive in
* order.
*/
if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0)
idx = ioat->head;
else
return NULL;
i = 0;
do {
struct ioat_raw_descriptor *descs[2];
size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
int s;
desc = ioat2_get_ring_ent(ioat, idx + i);
xor = desc->xor;
/* save a branch by unconditionally retrieving the
* extended descriptor xor_set_src() knows to not write
* to it in the single descriptor case
*/
ext = ioat2_get_ring_ent(ioat, idx + i + 1);
xor_ex = ext->xor_ex;
descs[0] = (struct ioat_raw_descriptor *) xor;
descs[1] = (struct ioat_raw_descriptor *) xor_ex;
for (s = 0; s < src_cnt; s++)
xor_set_src(descs, src[s], offset, s);
xor->size = xfer_size;
xor->dst_addr = dest + offset;
xor->ctl = 0;
xor->ctl_f.op = op;
xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt);
len -= xfer_size;
offset += xfer_size;
dump_desc_dbg(ioat, desc);
} while ((i += 1 + with_ext) < num_descs);
/* last xor descriptor carries the unmap parameters and fence bit */
desc->txd.flags = flags;
desc->len = total_len;
if (result)
desc->result = result;
xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
/* completion descriptor carries interrupt bit */
compl_desc = ioat2_get_ring_ent(ioat, idx + i);
compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
hw = compl_desc->hw;
hw->ctl = 0;
hw->ctl_f.null = 1;
hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
hw->ctl_f.compl_write = 1;
hw->size = NULL_DESC_BUFFER_SIZE;
dump_desc_dbg(ioat, compl_desc);
/* we leave the channel locked to ensure in order submission */
return &compl_desc->txd;
}
static struct dma_async_tx_descriptor *
ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
unsigned int src_cnt, size_t len, unsigned long flags)
{
return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
}
static struct dma_async_tx_descriptor *
ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
unsigned int src_cnt, size_t len,
enum sum_check_flags *result, unsigned long flags)
{
/* the cleanup routine only sets bits on validate failure, it
* does not clear bits on validate success... so clear it here
*/
*result = 0;
return __ioat3_prep_xor_lock(chan, result, src[0], &src[1],
src_cnt - 1, len, flags);
}
static void
dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext)
{
struct device *dev = to_dev(&ioat->base);
struct ioat_pq_descriptor *pq = desc->pq;
struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL;
struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
int i;
dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
" sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
" src_cnt: %d)\n",
desc_id(desc), (unsigned long long) desc->txd.phys,
(unsigned long long) (pq_ex ? pq_ex->next : pq->next),
desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en,
pq->ctl_f.compl_write,
pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
pq->ctl_f.src_cnt);
for (i = 0; i < src_cnt; i++)
dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
(unsigned long long) pq_get_src(descs, i), pq->coef[i]);
dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
dev_dbg(dev, "\tNEXT: %#llx\n", pq->next);
}
static void dump_pq16_desc_dbg(struct ioat2_dma_chan *ioat,
struct ioat_ring_ent *desc)
{
struct device *dev = to_dev(&ioat->base);
struct ioat_pq_descriptor *pq = desc->pq;
struct ioat_raw_descriptor *descs[] = { (void *)pq,
(void *)pq,
(void *)pq };
int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
int i;
if (desc->sed) {
descs[1] = (void *)desc->sed->hw;
descs[2] = (void *)desc->sed->hw + 64;
}
dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
" sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
" src_cnt: %d)\n",
desc_id(desc), (unsigned long long) desc->txd.phys,
(unsigned long long) pq->next,
desc->txd.flags, pq->size, pq->ctl,
pq->ctl_f.op, pq->ctl_f.int_en,
pq->ctl_f.compl_write,
pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
pq->ctl_f.src_cnt);
for (i = 0; i < src_cnt; i++) {
dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
(unsigned long long) pq16_get_src(descs, i),
pq->coef[i]);
}
dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
}
static struct dma_async_tx_descriptor *
__ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
const dma_addr_t *dst, const dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf,
size_t len, unsigned long flags)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioat_chan_common *chan = &ioat->base;
struct ioatdma_device *device = chan->device;
struct ioat_ring_ent *compl_desc;
struct ioat_ring_ent *desc;
struct ioat_ring_ent *ext;
size_t total_len = len;
struct ioat_pq_descriptor *pq;
struct ioat_pq_ext_descriptor *pq_ex = NULL;
struct ioat_dma_descriptor *hw;
u32 offset = 0;
u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
int i, s, idx, with_ext, num_descs;
int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0;
dev_dbg(to_dev(chan), "%s\n", __func__);
/* the engine requires at least two sources (we provide
* at least 1 implied source in the DMA_PREP_CONTINUE case)
*/
BUG_ON(src_cnt + dmaf_continue(flags) < 2);
num_descs = ioat2_xferlen_to_descs(ioat, len);
/* we need 2x the number of descriptors to cover greater than 3
* sources (we need 1 extra source in the q-only continuation
* case and 3 extra sources in the p+q continuation case.
*/
if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
(dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
with_ext = 1;
num_descs *= 2;
} else
with_ext = 0;
/* completion writes from the raid engine may pass completion
* writes from the legacy engine, so we need one extra null
* (legacy) descriptor to ensure all completion writes arrive in
* order.
*/
if (likely(num_descs) &&
ioat2_check_space_lock(ioat, num_descs + cb32) == 0)
idx = ioat->head;
else
return NULL;
i = 0;
do {
struct ioat_raw_descriptor *descs[2];
size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
desc = ioat2_get_ring_ent(ioat, idx + i);
pq = desc->pq;
/* save a branch by unconditionally retrieving the
* extended descriptor pq_set_src() knows to not write
* to it in the single descriptor case
*/
ext = ioat2_get_ring_ent(ioat, idx + i + with_ext);
pq_ex = ext->pq_ex;
descs[0] = (struct ioat_raw_descriptor *) pq;
descs[1] = (struct ioat_raw_descriptor *) pq_ex;
for (s = 0; s < src_cnt; s++)
pq_set_src(descs, src[s], offset, scf[s], s);
/* see the comment for dma_maxpq in include/linux/dmaengine.h */
if (dmaf_p_disabled_continue(flags))
pq_set_src(descs, dst[1], offset, 1, s++);
else if (dmaf_continue(flags)) {
pq_set_src(descs, dst[0], offset, 0, s++);
pq_set_src(descs, dst[1], offset, 1, s++);
pq_set_src(descs, dst[1], offset, 0, s++);
}
pq->size = xfer_size;
pq->p_addr = dst[0] + offset;
pq->q_addr = dst[1] + offset;
pq->ctl = 0;
pq->ctl_f.op = op;
/* we turn on descriptor write back error status */
if (device->cap & IOAT_CAP_DWBES)
pq->ctl_f.wb_en = result ? 1 : 0;
pq->ctl_f.src_cnt = src_cnt_to_hw(s);
pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
len -= xfer_size;
offset += xfer_size;
} while ((i += 1 + with_ext) < num_descs);
/* last pq descriptor carries the unmap parameters and fence bit */
desc->txd.flags = flags;
desc->len = total_len;
if (result)
desc->result = result;
pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
dump_pq_desc_dbg(ioat, desc, ext);
if (!cb32) {
pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
pq->ctl_f.compl_write = 1;
compl_desc = desc;
} else {
/* completion descriptor carries interrupt bit */
compl_desc = ioat2_get_ring_ent(ioat, idx + i);
compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
hw = compl_desc->hw;
hw->ctl = 0;
hw->ctl_f.null = 1;
hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
hw->ctl_f.compl_write = 1;
hw->size = NULL_DESC_BUFFER_SIZE;
dump_desc_dbg(ioat, compl_desc);
}
/* we leave the channel locked to ensure in order submission */
return &compl_desc->txd;
}
static struct dma_async_tx_descriptor *
__ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
const dma_addr_t *dst, const dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf,
size_t len, unsigned long flags)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioat_chan_common *chan = &ioat->base;
struct ioatdma_device *device = chan->device;
struct ioat_ring_ent *desc;
size_t total_len = len;
struct ioat_pq_descriptor *pq;
u32 offset = 0;
u8 op;
int i, s, idx, num_descs;
/* this function is only called with 9-16 sources */
op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
dev_dbg(to_dev(chan), "%s\n", __func__);
num_descs = ioat2_xferlen_to_descs(ioat, len);
/*
* 16 source pq is only available on cb3.3 and has no completion
* write hw bug.
*/
if (num_descs && ioat2_check_space_lock(ioat, num_descs) == 0)
idx = ioat->head;
else
return NULL;
i = 0;
do {
struct ioat_raw_descriptor *descs[4];
size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
desc = ioat2_get_ring_ent(ioat, idx + i);
pq = desc->pq;
descs[0] = (struct ioat_raw_descriptor *) pq;
desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3);
if (!desc->sed) {
dev_err(to_dev(chan),
"%s: no free sed entries\n", __func__);
return NULL;
}
pq->sed_addr = desc->sed->dma;
desc->sed->parent = desc;
descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw;
descs[2] = (void *)descs[1] + 64;
for (s = 0; s < src_cnt; s++)
pq16_set_src(descs, src[s], offset, scf[s], s);
/* see the comment for dma_maxpq in include/linux/dmaengine.h */
if (dmaf_p_disabled_continue(flags))
pq16_set_src(descs, dst[1], offset, 1, s++);
else if (dmaf_continue(flags)) {
pq16_set_src(descs, dst[0], offset, 0, s++);
pq16_set_src(descs, dst[1], offset, 1, s++);
pq16_set_src(descs, dst[1], offset, 0, s++);
}
pq->size = xfer_size;
pq->p_addr = dst[0] + offset;
pq->q_addr = dst[1] + offset;
pq->ctl = 0;
pq->ctl_f.op = op;
pq->ctl_f.src_cnt = src16_cnt_to_hw(s);
/* we turn on descriptor write back error status */
if (device->cap & IOAT_CAP_DWBES)
pq->ctl_f.wb_en = result ? 1 : 0;
pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
len -= xfer_size;
offset += xfer_size;
} while (++i < num_descs);
/* last pq descriptor carries the unmap parameters and fence bit */
desc->txd.flags = flags;
desc->len = total_len;
if (result)
desc->result = result;
pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
/* with cb3.3 we should be able to do completion w/o a null desc */
pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
pq->ctl_f.compl_write = 1;
dump_pq16_desc_dbg(ioat, desc);
/* we leave the channel locked to ensure in order submission */
return &desc->txd;
}
static int src_cnt_flags(unsigned int src_cnt, unsigned long flags)
{
if (dmaf_p_disabled_continue(flags))
return src_cnt + 1;
else if (dmaf_continue(flags))
return src_cnt + 3;
else
return src_cnt;
}
static struct dma_async_tx_descriptor *
ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len,
unsigned long flags)
{
/* specify valid address for disabled result */
if (flags & DMA_PREP_PQ_DISABLE_P)
dst[0] = dst[1];
if (flags & DMA_PREP_PQ_DISABLE_Q)
dst[1] = dst[0];
/* handle the single source multiply case from the raid6
* recovery path
*/
if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
dma_addr_t single_source[2];
unsigned char single_source_coef[2];
BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q);
single_source[0] = src[0];
single_source[1] = src[0];
single_source_coef[0] = scf[0];
single_source_coef[1] = 0;
return src_cnt_flags(src_cnt, flags) > 8 ?
__ioat3_prep_pq16_lock(chan, NULL, dst, single_source,
2, single_source_coef, len,
flags) :
__ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2,
single_source_coef, len, flags);
} else {
return src_cnt_flags(src_cnt, flags) > 8 ?
__ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
scf, len, flags) :
__ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt,
scf, len, flags);
}
}
static struct dma_async_tx_descriptor *
ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len,
enum sum_check_flags *pqres, unsigned long flags)
{
/* specify valid address for disabled result */
if (flags & DMA_PREP_PQ_DISABLE_P)
pq[0] = pq[1];
if (flags & DMA_PREP_PQ_DISABLE_Q)
pq[1] = pq[0];
/* the cleanup routine only sets bits on validate failure, it
* does not clear bits on validate success... so clear it here
*/
*pqres = 0;
return src_cnt_flags(src_cnt, flags) > 8 ?
__ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
flags) :
__ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
flags);
}
static struct dma_async_tx_descriptor *
ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
unsigned int src_cnt, size_t len, unsigned long flags)
{
unsigned char scf[src_cnt];
dma_addr_t pq[2];
memset(scf, 0, src_cnt);
pq[0] = dst;
flags |= DMA_PREP_PQ_DISABLE_Q;
pq[1] = dst; /* specify valid address for disabled result */
return src_cnt_flags(src_cnt, flags) > 8 ?
__ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
flags) :
__ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
flags);
}
static struct dma_async_tx_descriptor *
ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
unsigned int src_cnt, size_t len,
enum sum_check_flags *result, unsigned long flags)
{
unsigned char scf[src_cnt];
dma_addr_t pq[2];
/* the cleanup routine only sets bits on validate failure, it
* does not clear bits on validate success... so clear it here
*/
*result = 0;
memset(scf, 0, src_cnt);
pq[0] = src[0];
flags |= DMA_PREP_PQ_DISABLE_Q;
pq[1] = pq[0]; /* specify valid address for disabled result */
return src_cnt_flags(src_cnt, flags) > 8 ?
__ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
scf, len, flags) :
__ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
scf, len, flags);
}
static struct dma_async_tx_descriptor *
ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioat_ring_ent *desc;
struct ioat_dma_descriptor *hw;
if (ioat2_check_space_lock(ioat, 1) == 0)
desc = ioat2_get_ring_ent(ioat, ioat->head);
else
return NULL;
hw = desc->hw;
hw->ctl = 0;
hw->ctl_f.null = 1;
hw->ctl_f.int_en = 1;
hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
hw->ctl_f.compl_write = 1;
hw->size = NULL_DESC_BUFFER_SIZE;
hw->src_addr = 0;
hw->dst_addr = 0;
desc->txd.flags = flags;
desc->len = 1;
dump_desc_dbg(ioat, desc);
/* we leave the channel locked to ensure in order submission */
return &desc->txd;
}
static void ioat3_dma_test_callback(void *dma_async_param)
{
struct completion *cmp = dma_async_param;
complete(cmp);
}
#define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
static int ioat_xor_val_self_test(struct ioatdma_device *device)
{
int i, src_idx;
struct page *dest;
struct page *xor_srcs[IOAT_NUM_SRC_TEST];
struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
dma_addr_t dest_dma;
struct dma_async_tx_descriptor *tx;
struct dma_chan *dma_chan;
dma_cookie_t cookie;
u8 cmp_byte = 0;
u32 cmp_word;
u32 xor_val_result;
int err = 0;
struct completion cmp;
unsigned long tmo;
struct device *dev = &device->pdev->dev;
struct dma_device *dma = &device->common;
u8 op = 0;
dev_dbg(dev, "%s\n", __func__);
if (!dma_has_cap(DMA_XOR, dma->cap_mask))
return 0;
for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
if (!xor_srcs[src_idx]) {
while (src_idx--)
__free_page(xor_srcs[src_idx]);
return -ENOMEM;
}
}
dest = alloc_page(GFP_KERNEL);
if (!dest) {
while (src_idx--)
__free_page(xor_srcs[src_idx]);
return -ENOMEM;
}
/* Fill in src buffers */
for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
u8 *ptr = page_address(xor_srcs[src_idx]);
for (i = 0; i < PAGE_SIZE; i++)
ptr[i] = (1 << src_idx);
}
for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
cmp_byte ^= (u8) (1 << src_idx);
cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
(cmp_byte << 8) | cmp_byte;
memset(page_address(dest), 0, PAGE_SIZE);
dma_chan = container_of(dma->channels.next, struct dma_chan,
device_node);
if (dma->device_alloc_chan_resources(dma_chan) < 1) {
err = -ENODEV;
goto out;
}
/* test xor */
op = IOAT_OP_XOR;
dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
dmaengine: ioatdma: fix dma mapping errors Several systems are showing the following stack trace: WARNING: CPU: 0 PID: 2352 at lib/dma-debug.c:1140 check_unmap+0x4ee/0x9e0() ioatdma 0000:00:04.0: DMA-API: device driver failed to check map error[device address=0x0000000465bad000] [size=4096 bytes] [mapped as page] Modules linked in: ioatdma(E+) nfsv3 rpcsec_gss_krb5 nfsv4 dns_resolver nfs fscache cfg80211 rfkill x86_pkg_temp_thermal intel_powerclamp coretemp kvm_intel kvm crct10dif_pclmul crc32_pclmul crc32c_intel cdc_ether ses ghash_clmulni_intel usbnet mii enclosure aesni_intel lrw gf128mul glue_helper iTCO_wdt shpchp ablk_helper iTCO_vendor_support cryptd pcspkr ipmi_devintf sb_edac lpc_ich edac_core mfd_core ipmi_si i2c_i801 wmi ipmi_msghandler nfsd auth_rpcgss nfs_acl lockd sunrpc xfs libcrc32c sd_mod crc_t10dif crct10dif_common mgag200 syscopyarea sysfillrect sysimgblt drm_kms_helper ttm igb drm ptp pps_core dca i2c_algo_bit i2ccore megaraid_sas dm_mirror dm_region_hash dm_log dm_mod [last unloaded: ioatdma] CPU: 0 PID: 2352 Comm: insmod Tainted: G E 3.17.0-rc4+ #14 Hardware name: HP ProLiant m300 Server Cartridge/, BIOS H02 01/30/2014 0000000000000009 ffff88007994b7d8 ffffffff816e7225 ffff88007994b820 ffff88007994b810 ffffffff8107e51d ffff88045fc56c00 ffff88046643ee90 ffffffff8338ccd0 0000000000000286 ffffffff81956629 ffff88007994b870 Call Trace: [<ffffffff816e7225>] dump_stack+0x4d/0x66 [<ffffffff8107e51d>] warn_slowpath_common+0x7d/0xa0 [<ffffffff8107e58c>] warn_slowpath_fmt+0x4c/0x50 [<ffffffff81381e6e>] check_unmap+0x4ee/0x9e0 [<ffffffff813823bf>] debug_dma_unmap_page+0x5f/0x70 [<ffffffffa04546d8>] ioat_xor_val_self_test+0x498/0xcf0 [ioatdma] [<ffffffff81204f0a>] ? kfree+0xda/0x2b0 [<ffffffffa044d510>] ? ioat_dma_setup_interrupts+0x120/0x2d0 [ioatdma] [<ffffffffa0454f4e>] ioat3_dma_self_test+0x1e/0x30 [ioatdma] [<ffffffffa044f904>] ioat_probe+0xf4/0x110 [ioatdma] [<ffffffffa04550f8>] ioat3_dma_probe+0x198/0x3a0 [ioatdma] [<ffffffffa044d18e>] ioat_pci_probe+0x11e/0x1b0 [ioatdma] [<ffffffff81393a15>] local_pci_probe+0x45/0xa0 [<ffffffff81394be5>] ? pci_match_device+0xe5/0x110 [<ffffffff81394d29>] pci_device_probe+0xd9/0x130 [<ffffffff81462860>] driver_probe_device+0x90/0x3c0 [<ffffffff81462c63>] __driver_attach+0x93/0xa0 [<ffffffff81462bd0>] ? __device_attach+0x40/0x40 [<ffffffff8146080b>] bus_for_each_dev+0x6b/0xb0 [<ffffffff814622ce>] driver_attach+0x1e/0x20 [<ffffffff81461ed8>] bus_add_driver+0x188/0x260 [<ffffffffa0423000>] ? 0xffffffffa0423000 [<ffffffff81463734>] driver_register+0x64/0xf0 [<ffffffff813933a0>] __pci_register_driver+0x60/0x70 [<ffffffffa0423089>] ioat_init_module+0x89/0x1000 [ioatdma] [<ffffffff8100212c>] do_one_initcall+0xbc/0x200 [<ffffffff811e8b22>] ? __vunmap+0xd2/0x120 [<ffffffff8111e73c>] load_module+0x14ec/0x1b50 [<ffffffff81119970>] ? store_uevent+0x40/0x40 [<ffffffff8111ef36>] SyS_finit_module+0x86/0xb0 [<ffffffff816f1469>] system_call_fastpath+0x16/0x1b ---[ end trace 1052ccbbc3db4d08 ]--- Mapped at: [<ffffffff81380be1>] debug_dma_map_page+0x91/0x140 [<ffffffffa045440e>] ioat_xor_val_self_test+0x1ce/0xcf0 [ioatdma] [<ffffffffa0454f4e>] ioat3_dma_self_test+0x1e/0x30 [ioatdma] [<ffffffffa044f904>] ioat_probe+0xf4/0x110 [ioatdma] [<ffffffffa04550f8>] ioat3_dma_probe+0x198/0x3a0 [ioatdma] This happens because the current ioatdma DMA test code does not check the return value of dma_map_page() calls with dma_mapping_error(). In addition, it was noticed that mapping for the variable dest_dma is free'd before the last use. This patch fixes these errors by initializing the dma_srcs[] array and checking the returns with dma_mapping_error(). Signed-off-by: Prarit Bhargava <prarit@redhat.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
2014-10-23 13:38:29 +02:00
if (dma_mapping_error(dev, dest_dma))
goto dma_unmap;
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
dmaengine: ioatdma: fix dma mapping errors Several systems are showing the following stack trace: WARNING: CPU: 0 PID: 2352 at lib/dma-debug.c:1140 check_unmap+0x4ee/0x9e0() ioatdma 0000:00:04.0: DMA-API: device driver failed to check map error[device address=0x0000000465bad000] [size=4096 bytes] [mapped as page] Modules linked in: ioatdma(E+) nfsv3 rpcsec_gss_krb5 nfsv4 dns_resolver nfs fscache cfg80211 rfkill x86_pkg_temp_thermal intel_powerclamp coretemp kvm_intel kvm crct10dif_pclmul crc32_pclmul crc32c_intel cdc_ether ses ghash_clmulni_intel usbnet mii enclosure aesni_intel lrw gf128mul glue_helper iTCO_wdt shpchp ablk_helper iTCO_vendor_support cryptd pcspkr ipmi_devintf sb_edac lpc_ich edac_core mfd_core ipmi_si i2c_i801 wmi ipmi_msghandler nfsd auth_rpcgss nfs_acl lockd sunrpc xfs libcrc32c sd_mod crc_t10dif crct10dif_common mgag200 syscopyarea sysfillrect sysimgblt drm_kms_helper ttm igb drm ptp pps_core dca i2c_algo_bit i2ccore megaraid_sas dm_mirror dm_region_hash dm_log dm_mod [last unloaded: ioatdma] CPU: 0 PID: 2352 Comm: insmod Tainted: G E 3.17.0-rc4+ #14 Hardware name: HP ProLiant m300 Server Cartridge/, BIOS H02 01/30/2014 0000000000000009 ffff88007994b7d8 ffffffff816e7225 ffff88007994b820 ffff88007994b810 ffffffff8107e51d ffff88045fc56c00 ffff88046643ee90 ffffffff8338ccd0 0000000000000286 ffffffff81956629 ffff88007994b870 Call Trace: [<ffffffff816e7225>] dump_stack+0x4d/0x66 [<ffffffff8107e51d>] warn_slowpath_common+0x7d/0xa0 [<ffffffff8107e58c>] warn_slowpath_fmt+0x4c/0x50 [<ffffffff81381e6e>] check_unmap+0x4ee/0x9e0 [<ffffffff813823bf>] debug_dma_unmap_page+0x5f/0x70 [<ffffffffa04546d8>] ioat_xor_val_self_test+0x498/0xcf0 [ioatdma] [<ffffffff81204f0a>] ? kfree+0xda/0x2b0 [<ffffffffa044d510>] ? ioat_dma_setup_interrupts+0x120/0x2d0 [ioatdma] [<ffffffffa0454f4e>] ioat3_dma_self_test+0x1e/0x30 [ioatdma] [<ffffffffa044f904>] ioat_probe+0xf4/0x110 [ioatdma] [<ffffffffa04550f8>] ioat3_dma_probe+0x198/0x3a0 [ioatdma] [<ffffffffa044d18e>] ioat_pci_probe+0x11e/0x1b0 [ioatdma] [<ffffffff81393a15>] local_pci_probe+0x45/0xa0 [<ffffffff81394be5>] ? pci_match_device+0xe5/0x110 [<ffffffff81394d29>] pci_device_probe+0xd9/0x130 [<ffffffff81462860>] driver_probe_device+0x90/0x3c0 [<ffffffff81462c63>] __driver_attach+0x93/0xa0 [<ffffffff81462bd0>] ? __device_attach+0x40/0x40 [<ffffffff8146080b>] bus_for_each_dev+0x6b/0xb0 [<ffffffff814622ce>] driver_attach+0x1e/0x20 [<ffffffff81461ed8>] bus_add_driver+0x188/0x260 [<ffffffffa0423000>] ? 0xffffffffa0423000 [<ffffffff81463734>] driver_register+0x64/0xf0 [<ffffffff813933a0>] __pci_register_driver+0x60/0x70 [<ffffffffa0423089>] ioat_init_module+0x89/0x1000 [ioatdma] [<ffffffff8100212c>] do_one_initcall+0xbc/0x200 [<ffffffff811e8b22>] ? __vunmap+0xd2/0x120 [<ffffffff8111e73c>] load_module+0x14ec/0x1b50 [<ffffffff81119970>] ? store_uevent+0x40/0x40 [<ffffffff8111ef36>] SyS_finit_module+0x86/0xb0 [<ffffffff816f1469>] system_call_fastpath+0x16/0x1b ---[ end trace 1052ccbbc3db4d08 ]--- Mapped at: [<ffffffff81380be1>] debug_dma_map_page+0x91/0x140 [<ffffffffa045440e>] ioat_xor_val_self_test+0x1ce/0xcf0 [ioatdma] [<ffffffffa0454f4e>] ioat3_dma_self_test+0x1e/0x30 [ioatdma] [<ffffffffa044f904>] ioat_probe+0xf4/0x110 [ioatdma] [<ffffffffa04550f8>] ioat3_dma_probe+0x198/0x3a0 [ioatdma] This happens because the current ioatdma DMA test code does not check the return value of dma_map_page() calls with dma_mapping_error(). In addition, it was noticed that mapping for the variable dest_dma is free'd before the last use. This patch fixes these errors by initializing the dma_srcs[] array and checking the returns with dma_mapping_error(). Signed-off-by: Prarit Bhargava <prarit@redhat.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
2014-10-23 13:38:29 +02:00
dma_srcs[i] = DMA_ERROR_CODE;
for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
dmaengine: ioatdma: fix dma mapping errors Several systems are showing the following stack trace: WARNING: CPU: 0 PID: 2352 at lib/dma-debug.c:1140 check_unmap+0x4ee/0x9e0() ioatdma 0000:00:04.0: DMA-API: device driver failed to check map error[device address=0x0000000465bad000] [size=4096 bytes] [mapped as page] Modules linked in: ioatdma(E+) nfsv3 rpcsec_gss_krb5 nfsv4 dns_resolver nfs fscache cfg80211 rfkill x86_pkg_temp_thermal intel_powerclamp coretemp kvm_intel kvm crct10dif_pclmul crc32_pclmul crc32c_intel cdc_ether ses ghash_clmulni_intel usbnet mii enclosure aesni_intel lrw gf128mul glue_helper iTCO_wdt shpchp ablk_helper iTCO_vendor_support cryptd pcspkr ipmi_devintf sb_edac lpc_ich edac_core mfd_core ipmi_si i2c_i801 wmi ipmi_msghandler nfsd auth_rpcgss nfs_acl lockd sunrpc xfs libcrc32c sd_mod crc_t10dif crct10dif_common mgag200 syscopyarea sysfillrect sysimgblt drm_kms_helper ttm igb drm ptp pps_core dca i2c_algo_bit i2ccore megaraid_sas dm_mirror dm_region_hash dm_log dm_mod [last unloaded: ioatdma] CPU: 0 PID: 2352 Comm: insmod Tainted: G E 3.17.0-rc4+ #14 Hardware name: HP ProLiant m300 Server Cartridge/, BIOS H02 01/30/2014 0000000000000009 ffff88007994b7d8 ffffffff816e7225 ffff88007994b820 ffff88007994b810 ffffffff8107e51d ffff88045fc56c00 ffff88046643ee90 ffffffff8338ccd0 0000000000000286 ffffffff81956629 ffff88007994b870 Call Trace: [<ffffffff816e7225>] dump_stack+0x4d/0x66 [<ffffffff8107e51d>] warn_slowpath_common+0x7d/0xa0 [<ffffffff8107e58c>] warn_slowpath_fmt+0x4c/0x50 [<ffffffff81381e6e>] check_unmap+0x4ee/0x9e0 [<ffffffff813823bf>] debug_dma_unmap_page+0x5f/0x70 [<ffffffffa04546d8>] ioat_xor_val_self_test+0x498/0xcf0 [ioatdma] [<ffffffff81204f0a>] ? kfree+0xda/0x2b0 [<ffffffffa044d510>] ? ioat_dma_setup_interrupts+0x120/0x2d0 [ioatdma] [<ffffffffa0454f4e>] ioat3_dma_self_test+0x1e/0x30 [ioatdma] [<ffffffffa044f904>] ioat_probe+0xf4/0x110 [ioatdma] [<ffffffffa04550f8>] ioat3_dma_probe+0x198/0x3a0 [ioatdma] [<ffffffffa044d18e>] ioat_pci_probe+0x11e/0x1b0 [ioatdma] [<ffffffff81393a15>] local_pci_probe+0x45/0xa0 [<ffffffff81394be5>] ? pci_match_device+0xe5/0x110 [<ffffffff81394d29>] pci_device_probe+0xd9/0x130 [<ffffffff81462860>] driver_probe_device+0x90/0x3c0 [<ffffffff81462c63>] __driver_attach+0x93/0xa0 [<ffffffff81462bd0>] ? __device_attach+0x40/0x40 [<ffffffff8146080b>] bus_for_each_dev+0x6b/0xb0 [<ffffffff814622ce>] driver_attach+0x1e/0x20 [<ffffffff81461ed8>] bus_add_driver+0x188/0x260 [<ffffffffa0423000>] ? 0xffffffffa0423000 [<ffffffff81463734>] driver_register+0x64/0xf0 [<ffffffff813933a0>] __pci_register_driver+0x60/0x70 [<ffffffffa0423089>] ioat_init_module+0x89/0x1000 [ioatdma] [<ffffffff8100212c>] do_one_initcall+0xbc/0x200 [<ffffffff811e8b22>] ? __vunmap+0xd2/0x120 [<ffffffff8111e73c>] load_module+0x14ec/0x1b50 [<ffffffff81119970>] ? store_uevent+0x40/0x40 [<ffffffff8111ef36>] SyS_finit_module+0x86/0xb0 [<ffffffff816f1469>] system_call_fastpath+0x16/0x1b ---[ end trace 1052ccbbc3db4d08 ]--- Mapped at: [<ffffffff81380be1>] debug_dma_map_page+0x91/0x140 [<ffffffffa045440e>] ioat_xor_val_self_test+0x1ce/0xcf0 [ioatdma] [<ffffffffa0454f4e>] ioat3_dma_self_test+0x1e/0x30 [ioatdma] [<ffffffffa044f904>] ioat_probe+0xf4/0x110 [ioatdma] [<ffffffffa04550f8>] ioat3_dma_probe+0x198/0x3a0 [ioatdma] This happens because the current ioatdma DMA test code does not check the return value of dma_map_page() calls with dma_mapping_error(). In addition, it was noticed that mapping for the variable dest_dma is free'd before the last use. This patch fixes these errors by initializing the dma_srcs[] array and checking the returns with dma_mapping_error(). Signed-off-by: Prarit Bhargava <prarit@redhat.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
2014-10-23 13:38:29 +02:00
if (dma_mapping_error(dev, dma_srcs[i]))
goto dma_unmap;
}
tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
IOAT_NUM_SRC_TEST, PAGE_SIZE,
DMA_PREP_INTERRUPT);
if (!tx) {
dev_err(dev, "Self-test xor prep failed\n");
err = -ENODEV;
goto dma_unmap;
}
async_tx_ack(tx);
init_completion(&cmp);
tx->callback = ioat3_dma_test_callback;
tx->callback_param = &cmp;
cookie = tx->tx_submit(tx);
if (cookie < 0) {
dev_err(dev, "Self-test xor setup failed\n");
err = -ENODEV;
goto dma_unmap;
}
dma->device_issue_pending(dma_chan);
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
dev_err(dev, "Self-test xor timed out\n");
err = -ENODEV;
goto dma_unmap;
}
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
u32 *ptr = page_address(dest);
if (ptr[i] != cmp_word) {
dev_err(dev, "Self-test xor failed compare\n");
err = -ENODEV;
goto free_resources;
}
}
ioat: Fix DMA memory sync direction correct flag ioat does DMA memory sync with DMA_TO_DEVICE direction on a buffer allocated for DMA_FROM_DEVICE dma, resulting in the following warning from dma debug. Fixed the dma_sync_single_for_device() call to use the correct direction. [ 226.288947] WARNING: at lib/dma-debug.c:990 check_sync+0x132/0x550() [ 226.288948] Hardware name: ProLiant DL380p Gen8 [ 226.288951] ioatdma 0000:00:04.0: DMA-API: device driver syncs DMA memory with different direction [device address=0x00000000ffff7000] [size=4096 bytes] [mapped with DMA_FROM_DEVICE] [synced with DMA_TO_DEVICE] [ 226.288953] Modules linked in: iTCO_wdt(+) sb_edac(+) ioatdma(+) microcode serio_raw pcspkr edac_core hpwdt(+) iTCO_vendor_support hpilo(+) dca acpi_power_meter ata_generic pata_acpi sd_mod crc_t10dif ata_piix libata hpsa tg3 netxen_nic(+) sunrpc dm_mirror dm_region_hash dm_log dm_mod [ 226.288967] Pid: 1055, comm: work_for_cpu Tainted: G W 3.3.0-0.20.el7.x86_64 #1 [ 226.288968] Call Trace: [ 226.288974] [<ffffffff810644cf>] warn_slowpath_common+0x7f/0xc0 [ 226.288977] [<ffffffff810645c6>] warn_slowpath_fmt+0x46/0x50 [ 226.288980] [<ffffffff81345502>] check_sync+0x132/0x550 [ 226.288983] [<ffffffff81345c9f>] debug_dma_sync_single_for_device+0x3f/0x50 [ 226.288988] [<ffffffff81661002>] ? wait_for_common+0x72/0x180 [ 226.288995] [<ffffffffa019590f>] ioat_xor_val_self_test+0x3e5/0x832 [ioatdma] [ 226.288999] [<ffffffff811a5739>] ? kfree+0x259/0x270 [ 226.289004] [<ffffffffa0195d77>] ioat3_dma_self_test+0x1b/0x20 [ioatdma] [ 226.289008] [<ffffffffa01952c3>] ioat_probe+0x2f8/0x348 [ioatdma] [ 226.289011] [<ffffffffa0195f51>] ioat3_dma_probe+0x1d5/0x2aa [ioatdma] [ 226.289016] [<ffffffffa0194d12>] ioat_pci_probe+0x139/0x17c [ioatdma] [ 226.289020] [<ffffffff81354b8c>] local_pci_probe+0x5c/0xd0 [ 226.289023] [<ffffffff81083e50>] ? destroy_work_on_stack+0x20/0x20 [ 226.289025] [<ffffffff81083e68>] do_work_for_cpu+0x18/0x30 [ 226.289029] [<ffffffff8108d997>] kthread+0xb7/0xc0 [ 226.289033] [<ffffffff8166cef4>] kernel_thread_helper+0x4/0x10 [ 226.289036] [<ffffffff81662d20>] ? _raw_spin_unlock_irq+0x30/0x50 [ 226.289038] [<ffffffff81663234>] ? retint_restore_args+0x13/0x13 [ 226.289041] [<ffffffff8108d8e0>] ? kthread_worker_fn+0x1a0/0x1a0 [ 226.289044] [<ffffffff8166cef0>] ? gs_change+0x13/0x13 [ 226.289045] ---[ end trace e1618afc7a606089 ]--- [ 226.289047] Mapped at: [ 226.289048] [<ffffffff81345307>] debug_dma_map_page+0x87/0x150 [ 226.289050] [<ffffffffa019653c>] dma_map_page.constprop.18+0x70/0xb34 [ioatdma] [ 226.289054] [<ffffffffa0195702>] ioat_xor_val_self_test+0x1d8/0x832 [ioatdma] [ 226.289058] [<ffffffffa0195d77>] ioat3_dma_self_test+0x1b/0x20 [ioatdma] [ 226.289061] [<ffffffffa01952c3>] ioat_probe+0x2f8/0x348 [ioatdma] Signed-off-by: Shuah Khan <shuah.khan@hp.com> CC: <stable@vger.kernel.org> Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
2012-10-25 18:22:32 +02:00
dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
dmaengine: ioatdma: fix dma mapping errors Several systems are showing the following stack trace: WARNING: CPU: 0 PID: 2352 at lib/dma-debug.c:1140 check_unmap+0x4ee/0x9e0() ioatdma 0000:00:04.0: DMA-API: device driver failed to check map error[device address=0x0000000465bad000] [size=4096 bytes] [mapped as page] Modules linked in: ioatdma(E+) nfsv3 rpcsec_gss_krb5 nfsv4 dns_resolver nfs fscache cfg80211 rfkill x86_pkg_temp_thermal intel_powerclamp coretemp kvm_intel kvm crct10dif_pclmul crc32_pclmul crc32c_intel cdc_ether ses ghash_clmulni_intel usbnet mii enclosure aesni_intel lrw gf128mul glue_helper iTCO_wdt shpchp ablk_helper iTCO_vendor_support cryptd pcspkr ipmi_devintf sb_edac lpc_ich edac_core mfd_core ipmi_si i2c_i801 wmi ipmi_msghandler nfsd auth_rpcgss nfs_acl lockd sunrpc xfs libcrc32c sd_mod crc_t10dif crct10dif_common mgag200 syscopyarea sysfillrect sysimgblt drm_kms_helper ttm igb drm ptp pps_core dca i2c_algo_bit i2ccore megaraid_sas dm_mirror dm_region_hash dm_log dm_mod [last unloaded: ioatdma] CPU: 0 PID: 2352 Comm: insmod Tainted: G E 3.17.0-rc4+ #14 Hardware name: HP ProLiant m300 Server Cartridge/, BIOS H02 01/30/2014 0000000000000009 ffff88007994b7d8 ffffffff816e7225 ffff88007994b820 ffff88007994b810 ffffffff8107e51d ffff88045fc56c00 ffff88046643ee90 ffffffff8338ccd0 0000000000000286 ffffffff81956629 ffff88007994b870 Call Trace: [<ffffffff816e7225>] dump_stack+0x4d/0x66 [<ffffffff8107e51d>] warn_slowpath_common+0x7d/0xa0 [<ffffffff8107e58c>] warn_slowpath_fmt+0x4c/0x50 [<ffffffff81381e6e>] check_unmap+0x4ee/0x9e0 [<ffffffff813823bf>] debug_dma_unmap_page+0x5f/0x70 [<ffffffffa04546d8>] ioat_xor_val_self_test+0x498/0xcf0 [ioatdma] [<ffffffff81204f0a>] ? kfree+0xda/0x2b0 [<ffffffffa044d510>] ? ioat_dma_setup_interrupts+0x120/0x2d0 [ioatdma] [<ffffffffa0454f4e>] ioat3_dma_self_test+0x1e/0x30 [ioatdma] [<ffffffffa044f904>] ioat_probe+0xf4/0x110 [ioatdma] [<ffffffffa04550f8>] ioat3_dma_probe+0x198/0x3a0 [ioatdma] [<ffffffffa044d18e>] ioat_pci_probe+0x11e/0x1b0 [ioatdma] [<ffffffff81393a15>] local_pci_probe+0x45/0xa0 [<ffffffff81394be5>] ? pci_match_device+0xe5/0x110 [<ffffffff81394d29>] pci_device_probe+0xd9/0x130 [<ffffffff81462860>] driver_probe_device+0x90/0x3c0 [<ffffffff81462c63>] __driver_attach+0x93/0xa0 [<ffffffff81462bd0>] ? __device_attach+0x40/0x40 [<ffffffff8146080b>] bus_for_each_dev+0x6b/0xb0 [<ffffffff814622ce>] driver_attach+0x1e/0x20 [<ffffffff81461ed8>] bus_add_driver+0x188/0x260 [<ffffffffa0423000>] ? 0xffffffffa0423000 [<ffffffff81463734>] driver_register+0x64/0xf0 [<ffffffff813933a0>] __pci_register_driver+0x60/0x70 [<ffffffffa0423089>] ioat_init_module+0x89/0x1000 [ioatdma] [<ffffffff8100212c>] do_one_initcall+0xbc/0x200 [<ffffffff811e8b22>] ? __vunmap+0xd2/0x120 [<ffffffff8111e73c>] load_module+0x14ec/0x1b50 [<ffffffff81119970>] ? store_uevent+0x40/0x40 [<ffffffff8111ef36>] SyS_finit_module+0x86/0xb0 [<ffffffff816f1469>] system_call_fastpath+0x16/0x1b ---[ end trace 1052ccbbc3db4d08 ]--- Mapped at: [<ffffffff81380be1>] debug_dma_map_page+0x91/0x140 [<ffffffffa045440e>] ioat_xor_val_self_test+0x1ce/0xcf0 [ioatdma] [<ffffffffa0454f4e>] ioat3_dma_self_test+0x1e/0x30 [ioatdma] [<ffffffffa044f904>] ioat_probe+0xf4/0x110 [ioatdma] [<ffffffffa04550f8>] ioat3_dma_probe+0x198/0x3a0 [ioatdma] This happens because the current ioatdma DMA test code does not check the return value of dma_map_page() calls with dma_mapping_error(). In addition, it was noticed that mapping for the variable dest_dma is free'd before the last use. This patch fixes these errors by initializing the dma_srcs[] array and checking the returns with dma_mapping_error(). Signed-off-by: Prarit Bhargava <prarit@redhat.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
2014-10-23 13:38:29 +02:00
dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
/* skip validate if the capability is not present */
if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
goto free_resources;
op = IOAT_OP_XOR_VAL;
/* validate the sources with the destintation page */
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
xor_val_srcs[i] = xor_srcs[i];
xor_val_srcs[i] = dest;
xor_val_result = 1;
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
dmaengine: ioatdma: fix dma mapping errors Several systems are showing the following stack trace: WARNING: CPU: 0 PID: 2352 at lib/dma-debug.c:1140 check_unmap+0x4ee/0x9e0() ioatdma 0000:00:04.0: DMA-API: device driver failed to check map error[device address=0x0000000465bad000] [size=4096 bytes] [mapped as page] Modules linked in: ioatdma(E+) nfsv3 rpcsec_gss_krb5 nfsv4 dns_resolver nfs fscache cfg80211 rfkill x86_pkg_temp_thermal intel_powerclamp coretemp kvm_intel kvm crct10dif_pclmul crc32_pclmul crc32c_intel cdc_ether ses ghash_clmulni_intel usbnet mii enclosure aesni_intel lrw gf128mul glue_helper iTCO_wdt shpchp ablk_helper iTCO_vendor_support cryptd pcspkr ipmi_devintf sb_edac lpc_ich edac_core mfd_core ipmi_si i2c_i801 wmi ipmi_msghandler nfsd auth_rpcgss nfs_acl lockd sunrpc xfs libcrc32c sd_mod crc_t10dif crct10dif_common mgag200 syscopyarea sysfillrect sysimgblt drm_kms_helper ttm igb drm ptp pps_core dca i2c_algo_bit i2ccore megaraid_sas dm_mirror dm_region_hash dm_log dm_mod [last unloaded: ioatdma] CPU: 0 PID: 2352 Comm: insmod Tainted: G E 3.17.0-rc4+ #14 Hardware name: HP ProLiant m300 Server Cartridge/, BIOS H02 01/30/2014 0000000000000009 ffff88007994b7d8 ffffffff816e7225 ffff88007994b820 ffff88007994b810 ffffffff8107e51d ffff88045fc56c00 ffff88046643ee90 ffffffff8338ccd0 0000000000000286 ffffffff81956629 ffff88007994b870 Call Trace: [<ffffffff816e7225>] dump_stack+0x4d/0x66 [<ffffffff8107e51d>] warn_slowpath_common+0x7d/0xa0 [<ffffffff8107e58c>] warn_slowpath_fmt+0x4c/0x50 [<ffffffff81381e6e>] check_unmap+0x4ee/0x9e0 [<ffffffff813823bf>] debug_dma_unmap_page+0x5f/0x70 [<ffffffffa04546d8>] ioat_xor_val_self_test+0x498/0xcf0 [ioatdma] [<ffffffff81204f0a>] ? kfree+0xda/0x2b0 [<ffffffffa044d510>] ? ioat_dma_setup_interrupts+0x120/0x2d0 [ioatdma] [<ffffffffa0454f4e>] ioat3_dma_self_test+0x1e/0x30 [ioatdma] [<ffffffffa044f904>] ioat_probe+0xf4/0x110 [ioatdma] [<ffffffffa04550f8>] ioat3_dma_probe+0x198/0x3a0 [ioatdma] [<ffffffffa044d18e>] ioat_pci_probe+0x11e/0x1b0 [ioatdma] [<ffffffff81393a15>] local_pci_probe+0x45/0xa0 [<ffffffff81394be5>] ? pci_match_device+0xe5/0x110 [<ffffffff81394d29>] pci_device_probe+0xd9/0x130 [<ffffffff81462860>] driver_probe_device+0x90/0x3c0 [<ffffffff81462c63>] __driver_attach+0x93/0xa0 [<ffffffff81462bd0>] ? __device_attach+0x40/0x40 [<ffffffff8146080b>] bus_for_each_dev+0x6b/0xb0 [<ffffffff814622ce>] driver_attach+0x1e/0x20 [<ffffffff81461ed8>] bus_add_driver+0x188/0x260 [<ffffffffa0423000>] ? 0xffffffffa0423000 [<ffffffff81463734>] driver_register+0x64/0xf0 [<ffffffff813933a0>] __pci_register_driver+0x60/0x70 [<ffffffffa0423089>] ioat_init_module+0x89/0x1000 [ioatdma] [<ffffffff8100212c>] do_one_initcall+0xbc/0x200 [<ffffffff811e8b22>] ? __vunmap+0xd2/0x120 [<ffffffff8111e73c>] load_module+0x14ec/0x1b50 [<ffffffff81119970>] ? store_uevent+0x40/0x40 [<ffffffff8111ef36>] SyS_finit_module+0x86/0xb0 [<ffffffff816f1469>] system_call_fastpath+0x16/0x1b ---[ end trace 1052ccbbc3db4d08 ]--- Mapped at: [<ffffffff81380be1>] debug_dma_map_page+0x91/0x140 [<ffffffffa045440e>] ioat_xor_val_self_test+0x1ce/0xcf0 [ioatdma] [<ffffffffa0454f4e>] ioat3_dma_self_test+0x1e/0x30 [ioatdma] [<ffffffffa044f904>] ioat_probe+0xf4/0x110 [ioatdma] [<ffffffffa04550f8>] ioat3_dma_probe+0x198/0x3a0 [ioatdma] This happens because the current ioatdma DMA test code does not check the return value of dma_map_page() calls with dma_mapping_error(). In addition, it was noticed that mapping for the variable dest_dma is free'd before the last use. This patch fixes these errors by initializing the dma_srcs[] array and checking the returns with dma_mapping_error(). Signed-off-by: Prarit Bhargava <prarit@redhat.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
2014-10-23 13:38:29 +02:00
dma_srcs[i] = DMA_ERROR_CODE;
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
dmaengine: ioatdma: fix dma mapping errors Several systems are showing the following stack trace: WARNING: CPU: 0 PID: 2352 at lib/dma-debug.c:1140 check_unmap+0x4ee/0x9e0() ioatdma 0000:00:04.0: DMA-API: device driver failed to check map error[device address=0x0000000465bad000] [size=4096 bytes] [mapped as page] Modules linked in: ioatdma(E+) nfsv3 rpcsec_gss_krb5 nfsv4 dns_resolver nfs fscache cfg80211 rfkill x86_pkg_temp_thermal intel_powerclamp coretemp kvm_intel kvm crct10dif_pclmul crc32_pclmul crc32c_intel cdc_ether ses ghash_clmulni_intel usbnet mii enclosure aesni_intel lrw gf128mul glue_helper iTCO_wdt shpchp ablk_helper iTCO_vendor_support cryptd pcspkr ipmi_devintf sb_edac lpc_ich edac_core mfd_core ipmi_si i2c_i801 wmi ipmi_msghandler nfsd auth_rpcgss nfs_acl lockd sunrpc xfs libcrc32c sd_mod crc_t10dif crct10dif_common mgag200 syscopyarea sysfillrect sysimgblt drm_kms_helper ttm igb drm ptp pps_core dca i2c_algo_bit i2ccore megaraid_sas dm_mirror dm_region_hash dm_log dm_mod [last unloaded: ioatdma] CPU: 0 PID: 2352 Comm: insmod Tainted: G E 3.17.0-rc4+ #14 Hardware name: HP ProLiant m300 Server Cartridge/, BIOS H02 01/30/2014 0000000000000009 ffff88007994b7d8 ffffffff816e7225 ffff88007994b820 ffff88007994b810 ffffffff8107e51d ffff88045fc56c00 ffff88046643ee90 ffffffff8338ccd0 0000000000000286 ffffffff81956629 ffff88007994b870 Call Trace: [<ffffffff816e7225>] dump_stack+0x4d/0x66 [<ffffffff8107e51d>] warn_slowpath_common+0x7d/0xa0 [<ffffffff8107e58c>] warn_slowpath_fmt+0x4c/0x50 [<ffffffff81381e6e>] check_unmap+0x4ee/0x9e0 [<ffffffff813823bf>] debug_dma_unmap_page+0x5f/0x70 [<ffffffffa04546d8>] ioat_xor_val_self_test+0x498/0xcf0 [ioatdma] [<ffffffff81204f0a>] ? kfree+0xda/0x2b0 [<ffffffffa044d510>] ? ioat_dma_setup_interrupts+0x120/0x2d0 [ioatdma] [<ffffffffa0454f4e>] ioat3_dma_self_test+0x1e/0x30 [ioatdma] [<ffffffffa044f904>] ioat_probe+0xf4/0x110 [ioatdma] [<ffffffffa04550f8>] ioat3_dma_probe+0x198/0x3a0 [ioatdma] [<ffffffffa044d18e>] ioat_pci_probe+0x11e/0x1b0 [ioatdma] [<ffffffff81393a15>] local_pci_probe+0x45/0xa0 [<ffffffff81394be5>] ? pci_match_device+0xe5/0x110 [<ffffffff81394d29>] pci_device_probe+0xd9/0x130 [<ffffffff81462860>] driver_probe_device+0x90/0x3c0 [<ffffffff81462c63>] __driver_attach+0x93/0xa0 [<ffffffff81462bd0>] ? __device_attach+0x40/0x40 [<ffffffff8146080b>] bus_for_each_dev+0x6b/0xb0 [<ffffffff814622ce>] driver_attach+0x1e/0x20 [<ffffffff81461ed8>] bus_add_driver+0x188/0x260 [<ffffffffa0423000>] ? 0xffffffffa0423000 [<ffffffff81463734>] driver_register+0x64/0xf0 [<ffffffff813933a0>] __pci_register_driver+0x60/0x70 [<ffffffffa0423089>] ioat_init_module+0x89/0x1000 [ioatdma] [<ffffffff8100212c>] do_one_initcall+0xbc/0x200 [<ffffffff811e8b22>] ? __vunmap+0xd2/0x120 [<ffffffff8111e73c>] load_module+0x14ec/0x1b50 [<ffffffff81119970>] ? store_uevent+0x40/0x40 [<ffffffff8111ef36>] SyS_finit_module+0x86/0xb0 [<ffffffff816f1469>] system_call_fastpath+0x16/0x1b ---[ end trace 1052ccbbc3db4d08 ]--- Mapped at: [<ffffffff81380be1>] debug_dma_map_page+0x91/0x140 [<ffffffffa045440e>] ioat_xor_val_self_test+0x1ce/0xcf0 [ioatdma] [<ffffffffa0454f4e>] ioat3_dma_self_test+0x1e/0x30 [ioatdma] [<ffffffffa044f904>] ioat_probe+0xf4/0x110 [ioatdma] [<ffffffffa04550f8>] ioat3_dma_probe+0x198/0x3a0 [ioatdma] This happens because the current ioatdma DMA test code does not check the return value of dma_map_page() calls with dma_mapping_error(). In addition, it was noticed that mapping for the variable dest_dma is free'd before the last use. This patch fixes these errors by initializing the dma_srcs[] array and checking the returns with dma_mapping_error(). Signed-off-by: Prarit Bhargava <prarit@redhat.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
2014-10-23 13:38:29 +02:00
if (dma_mapping_error(dev, dma_srcs[i]))
goto dma_unmap;
}
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
&xor_val_result, DMA_PREP_INTERRUPT);
if (!tx) {
dev_err(dev, "Self-test zero prep failed\n");
err = -ENODEV;
goto dma_unmap;
}
async_tx_ack(tx);
init_completion(&cmp);
tx->callback = ioat3_dma_test_callback;
tx->callback_param = &cmp;
cookie = tx->tx_submit(tx);
if (cookie < 0) {
dev_err(dev, "Self-test zero setup failed\n");
err = -ENODEV;
goto dma_unmap;
}
dma->device_issue_pending(dma_chan);
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
dev_err(dev, "Self-test validate timed out\n");
err = -ENODEV;
goto dma_unmap;
}
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
if (xor_val_result != 0) {
dev_err(dev, "Self-test validate failed compare\n");
err = -ENODEV;
goto free_resources;
}
memset(page_address(dest), 0, PAGE_SIZE);
/* test for non-zero parity sum */
op = IOAT_OP_XOR_VAL;
xor_val_result = 0;
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
dmaengine: ioatdma: fix dma mapping errors Several systems are showing the following stack trace: WARNING: CPU: 0 PID: 2352 at lib/dma-debug.c:1140 check_unmap+0x4ee/0x9e0() ioatdma 0000:00:04.0: DMA-API: device driver failed to check map error[device address=0x0000000465bad000] [size=4096 bytes] [mapped as page] Modules linked in: ioatdma(E+) nfsv3 rpcsec_gss_krb5 nfsv4 dns_resolver nfs fscache cfg80211 rfkill x86_pkg_temp_thermal intel_powerclamp coretemp kvm_intel kvm crct10dif_pclmul crc32_pclmul crc32c_intel cdc_ether ses ghash_clmulni_intel usbnet mii enclosure aesni_intel lrw gf128mul glue_helper iTCO_wdt shpchp ablk_helper iTCO_vendor_support cryptd pcspkr ipmi_devintf sb_edac lpc_ich edac_core mfd_core ipmi_si i2c_i801 wmi ipmi_msghandler nfsd auth_rpcgss nfs_acl lockd sunrpc xfs libcrc32c sd_mod crc_t10dif crct10dif_common mgag200 syscopyarea sysfillrect sysimgblt drm_kms_helper ttm igb drm ptp pps_core dca i2c_algo_bit i2ccore megaraid_sas dm_mirror dm_region_hash dm_log dm_mod [last unloaded: ioatdma] CPU: 0 PID: 2352 Comm: insmod Tainted: G E 3.17.0-rc4+ #14 Hardware name: HP ProLiant m300 Server Cartridge/, BIOS H02 01/30/2014 0000000000000009 ffff88007994b7d8 ffffffff816e7225 ffff88007994b820 ffff88007994b810 ffffffff8107e51d ffff88045fc56c00 ffff88046643ee90 ffffffff8338ccd0 0000000000000286 ffffffff81956629 ffff88007994b870 Call Trace: [<ffffffff816e7225>] dump_stack+0x4d/0x66 [<ffffffff8107e51d>] warn_slowpath_common+0x7d/0xa0 [<ffffffff8107e58c>] warn_slowpath_fmt+0x4c/0x50 [<ffffffff81381e6e>] check_unmap+0x4ee/0x9e0 [<ffffffff813823bf>] debug_dma_unmap_page+0x5f/0x70 [<ffffffffa04546d8>] ioat_xor_val_self_test+0x498/0xcf0 [ioatdma] [<ffffffff81204f0a>] ? kfree+0xda/0x2b0 [<ffffffffa044d510>] ? ioat_dma_setup_interrupts+0x120/0x2d0 [ioatdma] [<ffffffffa0454f4e>] ioat3_dma_self_test+0x1e/0x30 [ioatdma] [<ffffffffa044f904>] ioat_probe+0xf4/0x110 [ioatdma] [<ffffffffa04550f8>] ioat3_dma_probe+0x198/0x3a0 [ioatdma] [<ffffffffa044d18e>] ioat_pci_probe+0x11e/0x1b0 [ioatdma] [<ffffffff81393a15>] local_pci_probe+0x45/0xa0 [<ffffffff81394be5>] ? pci_match_device+0xe5/0x110 [<ffffffff81394d29>] pci_device_probe+0xd9/0x130 [<ffffffff81462860>] driver_probe_device+0x90/0x3c0 [<ffffffff81462c63>] __driver_attach+0x93/0xa0 [<ffffffff81462bd0>] ? __device_attach+0x40/0x40 [<ffffffff8146080b>] bus_for_each_dev+0x6b/0xb0 [<ffffffff814622ce>] driver_attach+0x1e/0x20 [<ffffffff81461ed8>] bus_add_driver+0x188/0x260 [<ffffffffa0423000>] ? 0xffffffffa0423000 [<ffffffff81463734>] driver_register+0x64/0xf0 [<ffffffff813933a0>] __pci_register_driver+0x60/0x70 [<ffffffffa0423089>] ioat_init_module+0x89/0x1000 [ioatdma] [<ffffffff8100212c>] do_one_initcall+0xbc/0x200 [<ffffffff811e8b22>] ? __vunmap+0xd2/0x120 [<ffffffff8111e73c>] load_module+0x14ec/0x1b50 [<ffffffff81119970>] ? store_uevent+0x40/0x40 [<ffffffff8111ef36>] SyS_finit_module+0x86/0xb0 [<ffffffff816f1469>] system_call_fastpath+0x16/0x1b ---[ end trace 1052ccbbc3db4d08 ]--- Mapped at: [<ffffffff81380be1>] debug_dma_map_page+0x91/0x140 [<ffffffffa045440e>] ioat_xor_val_self_test+0x1ce/0xcf0 [ioatdma] [<ffffffffa0454f4e>] ioat3_dma_self_test+0x1e/0x30 [ioatdma] [<ffffffffa044f904>] ioat_probe+0xf4/0x110 [ioatdma] [<ffffffffa04550f8>] ioat3_dma_probe+0x198/0x3a0 [ioatdma] This happens because the current ioatdma DMA test code does not check the return value of dma_map_page() calls with dma_mapping_error(). In addition, it was noticed that mapping for the variable dest_dma is free'd before the last use. This patch fixes these errors by initializing the dma_srcs[] array and checking the returns with dma_mapping_error(). Signed-off-by: Prarit Bhargava <prarit@redhat.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
2014-10-23 13:38:29 +02:00
dma_srcs[i] = DMA_ERROR_CODE;
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
dmaengine: ioatdma: fix dma mapping errors Several systems are showing the following stack trace: WARNING: CPU: 0 PID: 2352 at lib/dma-debug.c:1140 check_unmap+0x4ee/0x9e0() ioatdma 0000:00:04.0: DMA-API: device driver failed to check map error[device address=0x0000000465bad000] [size=4096 bytes] [mapped as page] Modules linked in: ioatdma(E+) nfsv3 rpcsec_gss_krb5 nfsv4 dns_resolver nfs fscache cfg80211 rfkill x86_pkg_temp_thermal intel_powerclamp coretemp kvm_intel kvm crct10dif_pclmul crc32_pclmul crc32c_intel cdc_ether ses ghash_clmulni_intel usbnet mii enclosure aesni_intel lrw gf128mul glue_helper iTCO_wdt shpchp ablk_helper iTCO_vendor_support cryptd pcspkr ipmi_devintf sb_edac lpc_ich edac_core mfd_core ipmi_si i2c_i801 wmi ipmi_msghandler nfsd auth_rpcgss nfs_acl lockd sunrpc xfs libcrc32c sd_mod crc_t10dif crct10dif_common mgag200 syscopyarea sysfillrect sysimgblt drm_kms_helper ttm igb drm ptp pps_core dca i2c_algo_bit i2ccore megaraid_sas dm_mirror dm_region_hash dm_log dm_mod [last unloaded: ioatdma] CPU: 0 PID: 2352 Comm: insmod Tainted: G E 3.17.0-rc4+ #14 Hardware name: HP ProLiant m300 Server Cartridge/, BIOS H02 01/30/2014 0000000000000009 ffff88007994b7d8 ffffffff816e7225 ffff88007994b820 ffff88007994b810 ffffffff8107e51d ffff88045fc56c00 ffff88046643ee90 ffffffff8338ccd0 0000000000000286 ffffffff81956629 ffff88007994b870 Call Trace: [<ffffffff816e7225>] dump_stack+0x4d/0x66 [<ffffffff8107e51d>] warn_slowpath_common+0x7d/0xa0 [<ffffffff8107e58c>] warn_slowpath_fmt+0x4c/0x50 [<ffffffff81381e6e>] check_unmap+0x4ee/0x9e0 [<ffffffff813823bf>] debug_dma_unmap_page+0x5f/0x70 [<ffffffffa04546d8>] ioat_xor_val_self_test+0x498/0xcf0 [ioatdma] [<ffffffff81204f0a>] ? kfree+0xda/0x2b0 [<ffffffffa044d510>] ? ioat_dma_setup_interrupts+0x120/0x2d0 [ioatdma] [<ffffffffa0454f4e>] ioat3_dma_self_test+0x1e/0x30 [ioatdma] [<ffffffffa044f904>] ioat_probe+0xf4/0x110 [ioatdma] [<ffffffffa04550f8>] ioat3_dma_probe+0x198/0x3a0 [ioatdma] [<ffffffffa044d18e>] ioat_pci_probe+0x11e/0x1b0 [ioatdma] [<ffffffff81393a15>] local_pci_probe+0x45/0xa0 [<ffffffff81394be5>] ? pci_match_device+0xe5/0x110 [<ffffffff81394d29>] pci_device_probe+0xd9/0x130 [<ffffffff81462860>] driver_probe_device+0x90/0x3c0 [<ffffffff81462c63>] __driver_attach+0x93/0xa0 [<ffffffff81462bd0>] ? __device_attach+0x40/0x40 [<ffffffff8146080b>] bus_for_each_dev+0x6b/0xb0 [<ffffffff814622ce>] driver_attach+0x1e/0x20 [<ffffffff81461ed8>] bus_add_driver+0x188/0x260 [<ffffffffa0423000>] ? 0xffffffffa0423000 [<ffffffff81463734>] driver_register+0x64/0xf0 [<ffffffff813933a0>] __pci_register_driver+0x60/0x70 [<ffffffffa0423089>] ioat_init_module+0x89/0x1000 [ioatdma] [<ffffffff8100212c>] do_one_initcall+0xbc/0x200 [<ffffffff811e8b22>] ? __vunmap+0xd2/0x120 [<ffffffff8111e73c>] load_module+0x14ec/0x1b50 [<ffffffff81119970>] ? store_uevent+0x40/0x40 [<ffffffff8111ef36>] SyS_finit_module+0x86/0xb0 [<ffffffff816f1469>] system_call_fastpath+0x16/0x1b ---[ end trace 1052ccbbc3db4d08 ]--- Mapped at: [<ffffffff81380be1>] debug_dma_map_page+0x91/0x140 [<ffffffffa045440e>] ioat_xor_val_self_test+0x1ce/0xcf0 [ioatdma] [<ffffffffa0454f4e>] ioat3_dma_self_test+0x1e/0x30 [ioatdma] [<ffffffffa044f904>] ioat_probe+0xf4/0x110 [ioatdma] [<ffffffffa04550f8>] ioat3_dma_probe+0x198/0x3a0 [ioatdma] This happens because the current ioatdma DMA test code does not check the return value of dma_map_page() calls with dma_mapping_error(). In addition, it was noticed that mapping for the variable dest_dma is free'd before the last use. This patch fixes these errors by initializing the dma_srcs[] array and checking the returns with dma_mapping_error(). Signed-off-by: Prarit Bhargava <prarit@redhat.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
2014-10-23 13:38:29 +02:00
if (dma_mapping_error(dev, dma_srcs[i]))
goto dma_unmap;
}
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
&xor_val_result, DMA_PREP_INTERRUPT);
if (!tx) {
dev_err(dev, "Self-test 2nd zero prep failed\n");
err = -ENODEV;
goto dma_unmap;
}
async_tx_ack(tx);
init_completion(&cmp);
tx->callback = ioat3_dma_test_callback;
tx->callback_param = &cmp;
cookie = tx->tx_submit(tx);
if (cookie < 0) {
dev_err(dev, "Self-test 2nd zero setup failed\n");
err = -ENODEV;
goto dma_unmap;
}
dma->device_issue_pending(dma_chan);
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
dev_err(dev, "Self-test 2nd validate timed out\n");
err = -ENODEV;
goto dma_unmap;
}
if (xor_val_result != SUM_CHECK_P_RESULT) {
dev_err(dev, "Self-test validate failed compare\n");
err = -ENODEV;
goto dma_unmap;
}
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
goto free_resources;
dma_unmap:
if (op == IOAT_OP_XOR) {
dmaengine: ioatdma: fix dma mapping errors Several systems are showing the following stack trace: WARNING: CPU: 0 PID: 2352 at lib/dma-debug.c:1140 check_unmap+0x4ee/0x9e0() ioatdma 0000:00:04.0: DMA-API: device driver failed to check map error[device address=0x0000000465bad000] [size=4096 bytes] [mapped as page] Modules linked in: ioatdma(E+) nfsv3 rpcsec_gss_krb5 nfsv4 dns_resolver nfs fscache cfg80211 rfkill x86_pkg_temp_thermal intel_powerclamp coretemp kvm_intel kvm crct10dif_pclmul crc32_pclmul crc32c_intel cdc_ether ses ghash_clmulni_intel usbnet mii enclosure aesni_intel lrw gf128mul glue_helper iTCO_wdt shpchp ablk_helper iTCO_vendor_support cryptd pcspkr ipmi_devintf sb_edac lpc_ich edac_core mfd_core ipmi_si i2c_i801 wmi ipmi_msghandler nfsd auth_rpcgss nfs_acl lockd sunrpc xfs libcrc32c sd_mod crc_t10dif crct10dif_common mgag200 syscopyarea sysfillrect sysimgblt drm_kms_helper ttm igb drm ptp pps_core dca i2c_algo_bit i2ccore megaraid_sas dm_mirror dm_region_hash dm_log dm_mod [last unloaded: ioatdma] CPU: 0 PID: 2352 Comm: insmod Tainted: G E 3.17.0-rc4+ #14 Hardware name: HP ProLiant m300 Server Cartridge/, BIOS H02 01/30/2014 0000000000000009 ffff88007994b7d8 ffffffff816e7225 ffff88007994b820 ffff88007994b810 ffffffff8107e51d ffff88045fc56c00 ffff88046643ee90 ffffffff8338ccd0 0000000000000286 ffffffff81956629 ffff88007994b870 Call Trace: [<ffffffff816e7225>] dump_stack+0x4d/0x66 [<ffffffff8107e51d>] warn_slowpath_common+0x7d/0xa0 [<ffffffff8107e58c>] warn_slowpath_fmt+0x4c/0x50 [<ffffffff81381e6e>] check_unmap+0x4ee/0x9e0 [<ffffffff813823bf>] debug_dma_unmap_page+0x5f/0x70 [<ffffffffa04546d8>] ioat_xor_val_self_test+0x498/0xcf0 [ioatdma] [<ffffffff81204f0a>] ? kfree+0xda/0x2b0 [<ffffffffa044d510>] ? ioat_dma_setup_interrupts+0x120/0x2d0 [ioatdma] [<ffffffffa0454f4e>] ioat3_dma_self_test+0x1e/0x30 [ioatdma] [<ffffffffa044f904>] ioat_probe+0xf4/0x110 [ioatdma] [<ffffffffa04550f8>] ioat3_dma_probe+0x198/0x3a0 [ioatdma] [<ffffffffa044d18e>] ioat_pci_probe+0x11e/0x1b0 [ioatdma] [<ffffffff81393a15>] local_pci_probe+0x45/0xa0 [<ffffffff81394be5>] ? pci_match_device+0xe5/0x110 [<ffffffff81394d29>] pci_device_probe+0xd9/0x130 [<ffffffff81462860>] driver_probe_device+0x90/0x3c0 [<ffffffff81462c63>] __driver_attach+0x93/0xa0 [<ffffffff81462bd0>] ? __device_attach+0x40/0x40 [<ffffffff8146080b>] bus_for_each_dev+0x6b/0xb0 [<ffffffff814622ce>] driver_attach+0x1e/0x20 [<ffffffff81461ed8>] bus_add_driver+0x188/0x260 [<ffffffffa0423000>] ? 0xffffffffa0423000 [<ffffffff81463734>] driver_register+0x64/0xf0 [<ffffffff813933a0>] __pci_register_driver+0x60/0x70 [<ffffffffa0423089>] ioat_init_module+0x89/0x1000 [ioatdma] [<ffffffff8100212c>] do_one_initcall+0xbc/0x200 [<ffffffff811e8b22>] ? __vunmap+0xd2/0x120 [<ffffffff8111e73c>] load_module+0x14ec/0x1b50 [<ffffffff81119970>] ? store_uevent+0x40/0x40 [<ffffffff8111ef36>] SyS_finit_module+0x86/0xb0 [<ffffffff816f1469>] system_call_fastpath+0x16/0x1b ---[ end trace 1052ccbbc3db4d08 ]--- Mapped at: [<ffffffff81380be1>] debug_dma_map_page+0x91/0x140 [<ffffffffa045440e>] ioat_xor_val_self_test+0x1ce/0xcf0 [ioatdma] [<ffffffffa0454f4e>] ioat3_dma_self_test+0x1e/0x30 [ioatdma] [<ffffffffa044f904>] ioat_probe+0xf4/0x110 [ioatdma] [<ffffffffa04550f8>] ioat3_dma_probe+0x198/0x3a0 [ioatdma] This happens because the current ioatdma DMA test code does not check the return value of dma_map_page() calls with dma_mapping_error(). In addition, it was noticed that mapping for the variable dest_dma is free'd before the last use. This patch fixes these errors by initializing the dma_srcs[] array and checking the returns with dma_mapping_error(). Signed-off-by: Prarit Bhargava <prarit@redhat.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
2014-10-23 13:38:29 +02:00
if (dest_dma != DMA_ERROR_CODE)
dma_unmap_page(dev, dest_dma, PAGE_SIZE,
DMA_FROM_DEVICE);
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
dmaengine: ioatdma: fix dma mapping errors Several systems are showing the following stack trace: WARNING: CPU: 0 PID: 2352 at lib/dma-debug.c:1140 check_unmap+0x4ee/0x9e0() ioatdma 0000:00:04.0: DMA-API: device driver failed to check map error[device address=0x0000000465bad000] [size=4096 bytes] [mapped as page] Modules linked in: ioatdma(E+) nfsv3 rpcsec_gss_krb5 nfsv4 dns_resolver nfs fscache cfg80211 rfkill x86_pkg_temp_thermal intel_powerclamp coretemp kvm_intel kvm crct10dif_pclmul crc32_pclmul crc32c_intel cdc_ether ses ghash_clmulni_intel usbnet mii enclosure aesni_intel lrw gf128mul glue_helper iTCO_wdt shpchp ablk_helper iTCO_vendor_support cryptd pcspkr ipmi_devintf sb_edac lpc_ich edac_core mfd_core ipmi_si i2c_i801 wmi ipmi_msghandler nfsd auth_rpcgss nfs_acl lockd sunrpc xfs libcrc32c sd_mod crc_t10dif crct10dif_common mgag200 syscopyarea sysfillrect sysimgblt drm_kms_helper ttm igb drm ptp pps_core dca i2c_algo_bit i2ccore megaraid_sas dm_mirror dm_region_hash dm_log dm_mod [last unloaded: ioatdma] CPU: 0 PID: 2352 Comm: insmod Tainted: G E 3.17.0-rc4+ #14 Hardware name: HP ProLiant m300 Server Cartridge/, BIOS H02 01/30/2014 0000000000000009 ffff88007994b7d8 ffffffff816e7225 ffff88007994b820 ffff88007994b810 ffffffff8107e51d ffff88045fc56c00 ffff88046643ee90 ffffffff8338ccd0 0000000000000286 ffffffff81956629 ffff88007994b870 Call Trace: [<ffffffff816e7225>] dump_stack+0x4d/0x66 [<ffffffff8107e51d>] warn_slowpath_common+0x7d/0xa0 [<ffffffff8107e58c>] warn_slowpath_fmt+0x4c/0x50 [<ffffffff81381e6e>] check_unmap+0x4ee/0x9e0 [<ffffffff813823bf>] debug_dma_unmap_page+0x5f/0x70 [<ffffffffa04546d8>] ioat_xor_val_self_test+0x498/0xcf0 [ioatdma] [<ffffffff81204f0a>] ? kfree+0xda/0x2b0 [<ffffffffa044d510>] ? ioat_dma_setup_interrupts+0x120/0x2d0 [ioatdma] [<ffffffffa0454f4e>] ioat3_dma_self_test+0x1e/0x30 [ioatdma] [<ffffffffa044f904>] ioat_probe+0xf4/0x110 [ioatdma] [<ffffffffa04550f8>] ioat3_dma_probe+0x198/0x3a0 [ioatdma] [<ffffffffa044d18e>] ioat_pci_probe+0x11e/0x1b0 [ioatdma] [<ffffffff81393a15>] local_pci_probe+0x45/0xa0 [<ffffffff81394be5>] ? pci_match_device+0xe5/0x110 [<ffffffff81394d29>] pci_device_probe+0xd9/0x130 [<ffffffff81462860>] driver_probe_device+0x90/0x3c0 [<ffffffff81462c63>] __driver_attach+0x93/0xa0 [<ffffffff81462bd0>] ? __device_attach+0x40/0x40 [<ffffffff8146080b>] bus_for_each_dev+0x6b/0xb0 [<ffffffff814622ce>] driver_attach+0x1e/0x20 [<ffffffff81461ed8>] bus_add_driver+0x188/0x260 [<ffffffffa0423000>] ? 0xffffffffa0423000 [<ffffffff81463734>] driver_register+0x64/0xf0 [<ffffffff813933a0>] __pci_register_driver+0x60/0x70 [<ffffffffa0423089>] ioat_init_module+0x89/0x1000 [ioatdma] [<ffffffff8100212c>] do_one_initcall+0xbc/0x200 [<ffffffff811e8b22>] ? __vunmap+0xd2/0x120 [<ffffffff8111e73c>] load_module+0x14ec/0x1b50 [<ffffffff81119970>] ? store_uevent+0x40/0x40 [<ffffffff8111ef36>] SyS_finit_module+0x86/0xb0 [<ffffffff816f1469>] system_call_fastpath+0x16/0x1b ---[ end trace 1052ccbbc3db4d08 ]--- Mapped at: [<ffffffff81380be1>] debug_dma_map_page+0x91/0x140 [<ffffffffa045440e>] ioat_xor_val_self_test+0x1ce/0xcf0 [ioatdma] [<ffffffffa0454f4e>] ioat3_dma_self_test+0x1e/0x30 [ioatdma] [<ffffffffa044f904>] ioat_probe+0xf4/0x110 [ioatdma] [<ffffffffa04550f8>] ioat3_dma_probe+0x198/0x3a0 [ioatdma] This happens because the current ioatdma DMA test code does not check the return value of dma_map_page() calls with dma_mapping_error(). In addition, it was noticed that mapping for the variable dest_dma is free'd before the last use. This patch fixes these errors by initializing the dma_srcs[] array and checking the returns with dma_mapping_error(). Signed-off-by: Prarit Bhargava <prarit@redhat.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
2014-10-23 13:38:29 +02:00
if (dma_srcs[i] != DMA_ERROR_CODE)
dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
DMA_TO_DEVICE);
} else if (op == IOAT_OP_XOR_VAL) {
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
dmaengine: ioatdma: fix dma mapping errors Several systems are showing the following stack trace: WARNING: CPU: 0 PID: 2352 at lib/dma-debug.c:1140 check_unmap+0x4ee/0x9e0() ioatdma 0000:00:04.0: DMA-API: device driver failed to check map error[device address=0x0000000465bad000] [size=4096 bytes] [mapped as page] Modules linked in: ioatdma(E+) nfsv3 rpcsec_gss_krb5 nfsv4 dns_resolver nfs fscache cfg80211 rfkill x86_pkg_temp_thermal intel_powerclamp coretemp kvm_intel kvm crct10dif_pclmul crc32_pclmul crc32c_intel cdc_ether ses ghash_clmulni_intel usbnet mii enclosure aesni_intel lrw gf128mul glue_helper iTCO_wdt shpchp ablk_helper iTCO_vendor_support cryptd pcspkr ipmi_devintf sb_edac lpc_ich edac_core mfd_core ipmi_si i2c_i801 wmi ipmi_msghandler nfsd auth_rpcgss nfs_acl lockd sunrpc xfs libcrc32c sd_mod crc_t10dif crct10dif_common mgag200 syscopyarea sysfillrect sysimgblt drm_kms_helper ttm igb drm ptp pps_core dca i2c_algo_bit i2ccore megaraid_sas dm_mirror dm_region_hash dm_log dm_mod [last unloaded: ioatdma] CPU: 0 PID: 2352 Comm: insmod Tainted: G E 3.17.0-rc4+ #14 Hardware name: HP ProLiant m300 Server Cartridge/, BIOS H02 01/30/2014 0000000000000009 ffff88007994b7d8 ffffffff816e7225 ffff88007994b820 ffff88007994b810 ffffffff8107e51d ffff88045fc56c00 ffff88046643ee90 ffffffff8338ccd0 0000000000000286 ffffffff81956629 ffff88007994b870 Call Trace: [<ffffffff816e7225>] dump_stack+0x4d/0x66 [<ffffffff8107e51d>] warn_slowpath_common+0x7d/0xa0 [<ffffffff8107e58c>] warn_slowpath_fmt+0x4c/0x50 [<ffffffff81381e6e>] check_unmap+0x4ee/0x9e0 [<ffffffff813823bf>] debug_dma_unmap_page+0x5f/0x70 [<ffffffffa04546d8>] ioat_xor_val_self_test+0x498/0xcf0 [ioatdma] [<ffffffff81204f0a>] ? kfree+0xda/0x2b0 [<ffffffffa044d510>] ? ioat_dma_setup_interrupts+0x120/0x2d0 [ioatdma] [<ffffffffa0454f4e>] ioat3_dma_self_test+0x1e/0x30 [ioatdma] [<ffffffffa044f904>] ioat_probe+0xf4/0x110 [ioatdma] [<ffffffffa04550f8>] ioat3_dma_probe+0x198/0x3a0 [ioatdma] [<ffffffffa044d18e>] ioat_pci_probe+0x11e/0x1b0 [ioatdma] [<ffffffff81393a15>] local_pci_probe+0x45/0xa0 [<ffffffff81394be5>] ? pci_match_device+0xe5/0x110 [<ffffffff81394d29>] pci_device_probe+0xd9/0x130 [<ffffffff81462860>] driver_probe_device+0x90/0x3c0 [<ffffffff81462c63>] __driver_attach+0x93/0xa0 [<ffffffff81462bd0>] ? __device_attach+0x40/0x40 [<ffffffff8146080b>] bus_for_each_dev+0x6b/0xb0 [<ffffffff814622ce>] driver_attach+0x1e/0x20 [<ffffffff81461ed8>] bus_add_driver+0x188/0x260 [<ffffffffa0423000>] ? 0xffffffffa0423000 [<ffffffff81463734>] driver_register+0x64/0xf0 [<ffffffff813933a0>] __pci_register_driver+0x60/0x70 [<ffffffffa0423089>] ioat_init_module+0x89/0x1000 [ioatdma] [<ffffffff8100212c>] do_one_initcall+0xbc/0x200 [<ffffffff811e8b22>] ? __vunmap+0xd2/0x120 [<ffffffff8111e73c>] load_module+0x14ec/0x1b50 [<ffffffff81119970>] ? store_uevent+0x40/0x40 [<ffffffff8111ef36>] SyS_finit_module+0x86/0xb0 [<ffffffff816f1469>] system_call_fastpath+0x16/0x1b ---[ end trace 1052ccbbc3db4d08 ]--- Mapped at: [<ffffffff81380be1>] debug_dma_map_page+0x91/0x140 [<ffffffffa045440e>] ioat_xor_val_self_test+0x1ce/0xcf0 [ioatdma] [<ffffffffa0454f4e>] ioat3_dma_self_test+0x1e/0x30 [ioatdma] [<ffffffffa044f904>] ioat_probe+0xf4/0x110 [ioatdma] [<ffffffffa04550f8>] ioat3_dma_probe+0x198/0x3a0 [ioatdma] This happens because the current ioatdma DMA test code does not check the return value of dma_map_page() calls with dma_mapping_error(). In addition, it was noticed that mapping for the variable dest_dma is free'd before the last use. This patch fixes these errors by initializing the dma_srcs[] array and checking the returns with dma_mapping_error(). Signed-off-by: Prarit Bhargava <prarit@redhat.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
2014-10-23 13:38:29 +02:00
if (dma_srcs[i] != DMA_ERROR_CODE)
dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
DMA_TO_DEVICE);
}
free_resources:
dma->device_free_chan_resources(dma_chan);
out:
src_idx = IOAT_NUM_SRC_TEST;
while (src_idx--)
__free_page(xor_srcs[src_idx]);
__free_page(dest);
return err;
}
static int ioat3_dma_self_test(struct ioatdma_device *device)
{
int rc = ioat_dma_self_test(device);
if (rc)
return rc;
rc = ioat_xor_val_self_test(device);
if (rc)
return rc;
return 0;
}
static int ioat3_irq_reinit(struct ioatdma_device *device)
{
struct pci_dev *pdev = device->pdev;
int irq = pdev->irq, i;
if (!is_bwd_ioat(pdev))
return 0;
switch (device->irq_mode) {
case IOAT_MSIX:
for (i = 0; i < device->common.chancnt; i++) {
struct msix_entry *msix = &device->msix_entries[i];
struct ioat_chan_common *chan;
chan = ioat_chan_by_index(device, i);
devm_free_irq(&pdev->dev, msix->vector, chan);
}
pci_disable_msix(pdev);
break;
case IOAT_MSI:
pci_disable_msi(pdev);
/* fall through */
case IOAT_INTX:
devm_free_irq(&pdev->dev, irq, device);
break;
default:
return 0;
}
device->irq_mode = IOAT_NOIRQ;
return ioat_dma_setup_interrupts(device);
}
static int ioat3_reset_hw(struct ioat_chan_common *chan)
{
/* throw away whatever the channel was doing and get it
* initialized, with ioat3 specific workarounds
*/
struct ioatdma_device *device = chan->device;
struct pci_dev *pdev = device->pdev;
u32 chanerr;
u16 dev_id;
int err;
ioat2_quiesce(chan, msecs_to_jiffies(100));
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
if (device->version < IOAT_VER_3_3) {
/* clear any pending errors */
err = pci_read_config_dword(pdev,
IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
if (err) {
dev_err(&pdev->dev,
"channel error register unreachable\n");
return err;
}
pci_write_config_dword(pdev,
IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
/* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
* (workaround for spurious config parity error after restart)
*/
pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
pci_write_config_dword(pdev,
IOAT_PCI_DMAUNCERRSTS_OFFSET,
0x10);
}
}
err = ioat2_reset_sync(chan, msecs_to_jiffies(200));
if (!err)
err = ioat3_irq_reinit(device);
if (err)
dev_err(&pdev->dev, "Failed to reset: %d\n", err);
return err;
}
static void ioat3_intr_quirk(struct ioatdma_device *device)
{
struct dma_device *dma;
struct dma_chan *c;
struct ioat_chan_common *chan;
u32 errmask;
dma = &device->common;
/*
* if we have descriptor write back error status, we mask the
* error interrupts
*/
if (device->cap & IOAT_CAP_DWBES) {
list_for_each_entry(c, &dma->channels, device_node) {
chan = to_chan_common(c);
errmask = readl(chan->reg_base +
IOAT_CHANERR_MASK_OFFSET);
errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
IOAT_CHANERR_XOR_Q_ERR;
writel(errmask, chan->reg_base +
IOAT_CHANERR_MASK_OFFSET);
}
}
}
int ioat3_dma_probe(struct ioatdma_device *device, int dca)
{
struct pci_dev *pdev = device->pdev;
int dca_en = system_has_dca_enabled(pdev);
struct dma_device *dma;
struct dma_chan *c;
struct ioat_chan_common *chan;
bool is_raid_device = false;
int err;
device->enumerate_channels = ioat2_enumerate_channels;
device->reset_hw = ioat3_reset_hw;
device->self_test = ioat3_dma_self_test;
device->intr_quirk = ioat3_intr_quirk;
dma = &device->common;
dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
dma->device_issue_pending = ioat2_issue_pending;
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources;
dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
/* dca is incompatible with raid operations */
if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
if (device->cap & IOAT_CAP_XOR) {
is_raid_device = true;
dma->max_xor = 8;
dma_cap_set(DMA_XOR, dma->cap_mask);
dma->device_prep_dma_xor = ioat3_prep_xor;
dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
dma->device_prep_dma_xor_val = ioat3_prep_xor_val;
}
if (device->cap & IOAT_CAP_PQ) {
is_raid_device = true;
dma->device_prep_dma_pq = ioat3_prep_pq;
dma->device_prep_dma_pq_val = ioat3_prep_pq_val;
dma_cap_set(DMA_PQ, dma->cap_mask);
dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
if (device->cap & IOAT_CAP_RAID16SS) {
dma_set_maxpq(dma, 16, 0);
} else {
dma_set_maxpq(dma, 8, 0);
}
if (!(device->cap & IOAT_CAP_XOR)) {
dma->device_prep_dma_xor = ioat3_prep_pqxor;
dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val;
dma_cap_set(DMA_XOR, dma->cap_mask);
dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
if (device->cap & IOAT_CAP_RAID16SS) {
dma->max_xor = 16;
} else {
dma->max_xor = 8;
}
}
}
dma->device_tx_status = ioat3_tx_status;
device->cleanup_fn = ioat3_cleanup_event;
device->timer_fn = ioat3_timer_event;
/* starting with CB3.3 super extended descriptors are supported */
if (device->cap & IOAT_CAP_RAID16SS) {
char pool_name[14];
int i;
for (i = 0; i < MAX_SED_POOLS; i++) {
snprintf(pool_name, 14, "ioat_hw%d_sed", i);
/* allocate SED DMA pool */
device->sed_hw_pool[i] = dmam_pool_create(pool_name,
&pdev->dev,
SED_SIZE * (i + 1), 64, 0);
if (!device->sed_hw_pool[i])
return -ENOMEM;
}
}
err = ioat_probe(device);
if (err)
return err;
list_for_each_entry(c, &dma->channels, device_node) {
chan = to_chan_common(c);
writel(IOAT_DMA_DCA_ANY_CPU,
chan->reg_base + IOAT_DCACTRL_OFFSET);
}
err = ioat_register(device);
if (err)
return err;
ioat_kobject_add(device, &ioat2_ktype);
if (dca)
device->dca = ioat3_dca_init(pdev, device->reg_base);
return 0;
}