2006-05-24 00:35:34 +00:00
|
|
|
/*
|
2007-10-16 08:27:39 +00:00
|
|
|
* Intel I/OAT DMA Linux driver
|
2015-08-11 15:48:10 +00:00
|
|
|
* Copyright(c) 2004 - 2015 Intel Corporation.
|
2006-05-24 00:35:34 +00:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
2007-10-16 08:27:39 +00:00
|
|
|
* under the terms and conditions of the GNU General Public License,
|
|
|
|
* version 2, as published by the Free Software Foundation.
|
2006-05-24 00:35:34 +00:00
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
2007-10-16 08:27:39 +00:00
|
|
|
* The full GNU General Public License is included in this distribution in
|
|
|
|
* the file called "COPYING".
|
2006-05-24 00:35:34 +00:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This driver supports an Intel I/OAT DMA engine, which does asynchronous
|
|
|
|
* copy operations.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/module.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/slab.h>
|
2006-05-24 00:35:34 +00:00
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/dmaengine.h>
|
|
|
|
#include <linux/delay.h>
|
2006-05-24 00:37:58 +00:00
|
|
|
#include <linux/dma-mapping.h>
|
2008-07-22 17:07:33 +00:00
|
|
|
#include <linux/workqueue.h>
|
2011-05-22 20:47:17 +00:00
|
|
|
#include <linux/prefetch.h>
|
2016-02-10 22:00:32 +00:00
|
|
|
#include <linux/sizes.h>
|
2009-07-28 21:32:12 +00:00
|
|
|
#include "dma.h"
|
|
|
|
#include "registers.h"
|
|
|
|
#include "hw.h"
|
2006-05-24 00:35:34 +00:00
|
|
|
|
2012-03-06 22:34:26 +00:00
|
|
|
#include "../dmaengine.h"
|
|
|
|
|
2016-07-20 20:14:01 +00:00
|
|
|
static char *chanerr_str[] = {
|
|
|
|
"DMA Transfer Destination Address Error",
|
|
|
|
"Next Descriptor Address Error",
|
|
|
|
"Descriptor Error",
|
|
|
|
"Chan Address Value Error",
|
|
|
|
"CHANCMD Error",
|
|
|
|
"Chipset Uncorrectable Data Integrity Error",
|
|
|
|
"DMA Uncorrectable Data Integrity Error",
|
|
|
|
"Read Data Error",
|
|
|
|
"Write Data Error",
|
|
|
|
"Descriptor Control Error",
|
|
|
|
"Descriptor Transfer Size Error",
|
|
|
|
"Completion Address Error",
|
|
|
|
"Interrupt Configuration Error",
|
|
|
|
"Super extended descriptor Address Error",
|
|
|
|
"Unaffiliated Error",
|
|
|
|
"CRC or XOR P Error",
|
|
|
|
"XOR Q Error",
|
|
|
|
"Descriptor Count Error",
|
|
|
|
"DIF All F detect Error",
|
|
|
|
"Guard Tag verification Error",
|
|
|
|
"Application Tag verification Error",
|
|
|
|
"Reference Tag verification Error",
|
|
|
|
"Bundle Bit Error",
|
|
|
|
"Result DIF All F detect Error",
|
|
|
|
"Result Guard Tag verification Error",
|
|
|
|
"Result Application Tag verification Error",
|
|
|
|
"Result Reference Tag verification Error",
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2015-08-11 15:48:55 +00:00
|
|
|
static void ioat_eh(struct ioatdma_chan *ioat_chan);
|
|
|
|
|
2016-07-20 20:14:01 +00:00
|
|
|
static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < 32; i++) {
|
|
|
|
if ((chanerr >> i) & 1) {
|
|
|
|
if (chanerr_str[i]) {
|
|
|
|
dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
|
|
|
|
i, chanerr_str[i]);
|
|
|
|
} else
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-10-16 08:27:40 +00:00
|
|
|
/**
|
|
|
|
* ioat_dma_do_interrupt - handler used for single vector interrupt mode
|
|
|
|
* @irq: interrupt id
|
|
|
|
* @data: interrupt data
|
|
|
|
*/
|
2015-08-11 15:48:43 +00:00
|
|
|
irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
|
2007-10-16 08:27:40 +00:00
|
|
|
{
|
|
|
|
struct ioatdma_device *instance = data;
|
2015-08-11 15:48:21 +00:00
|
|
|
struct ioatdma_chan *ioat_chan;
|
2007-10-16 08:27:40 +00:00
|
|
|
unsigned long attnstatus;
|
|
|
|
int bit;
|
|
|
|
u8 intrctrl;
|
|
|
|
|
|
|
|
intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
|
|
|
|
|
|
|
|
if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
|
|
|
|
return IRQ_NONE;
|
|
|
|
|
|
|
|
if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
|
|
|
|
writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
|
|
|
|
return IRQ_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
|
2010-03-05 21:41:37 +00:00
|
|
|
for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
|
2015-08-11 15:48:21 +00:00
|
|
|
ioat_chan = ioat_chan_by_index(instance, bit);
|
|
|
|
if (test_bit(IOAT_RUN, &ioat_chan->state))
|
|
|
|
tasklet_schedule(&ioat_chan->cleanup_task);
|
2007-10-16 08:27:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
|
|
|
|
* @irq: interrupt id
|
|
|
|
* @data: interrupt data
|
|
|
|
*/
|
2015-08-11 15:48:43 +00:00
|
|
|
irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
|
2007-10-16 08:27:40 +00:00
|
|
|
{
|
2015-08-11 15:48:21 +00:00
|
|
|
struct ioatdma_chan *ioat_chan = data;
|
2007-10-16 08:27:40 +00:00
|
|
|
|
2015-08-11 15:48:21 +00:00
|
|
|
if (test_bit(IOAT_RUN, &ioat_chan->state))
|
|
|
|
tasklet_schedule(&ioat_chan->cleanup_task);
|
2007-10-16 08:27:40 +00:00
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2015-08-11 15:48:21 +00:00
|
|
|
void ioat_stop(struct ioatdma_chan *ioat_chan)
|
2014-02-20 00:19:35 +00:00
|
|
|
{
|
2015-08-11 15:48:27 +00:00
|
|
|
struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
|
|
|
|
struct pci_dev *pdev = ioat_dma->pdev;
|
2015-08-11 15:48:21 +00:00
|
|
|
int chan_id = chan_num(ioat_chan);
|
2014-02-20 00:19:35 +00:00
|
|
|
struct msix_entry *msix;
|
|
|
|
|
|
|
|
/* 1/ stop irq from firing tasklets
|
|
|
|
* 2/ stop the tasklet from re-arming irqs
|
|
|
|
*/
|
2015-08-11 15:48:21 +00:00
|
|
|
clear_bit(IOAT_RUN, &ioat_chan->state);
|
2014-02-20 00:19:35 +00:00
|
|
|
|
|
|
|
/* flush inflight interrupts */
|
2015-08-11 15:48:27 +00:00
|
|
|
switch (ioat_dma->irq_mode) {
|
2014-02-20 00:19:35 +00:00
|
|
|
case IOAT_MSIX:
|
2015-08-11 15:48:27 +00:00
|
|
|
msix = &ioat_dma->msix_entries[chan_id];
|
2014-02-20 00:19:35 +00:00
|
|
|
synchronize_irq(msix->vector);
|
|
|
|
break;
|
|
|
|
case IOAT_MSI:
|
|
|
|
case IOAT_INTX:
|
|
|
|
synchronize_irq(pdev->irq);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* flush inflight timers */
|
2015-08-11 15:48:21 +00:00
|
|
|
del_timer_sync(&ioat_chan->timer);
|
2014-02-20 00:19:35 +00:00
|
|
|
|
|
|
|
/* flush inflight tasklet runs */
|
2015-08-11 15:48:21 +00:00
|
|
|
tasklet_kill(&ioat_chan->cleanup_task);
|
2014-02-20 00:19:35 +00:00
|
|
|
|
|
|
|
/* final cleanup now that everything is quiesced and can't re-arm */
|
2015-08-11 15:49:00 +00:00
|
|
|
ioat_cleanup_event((unsigned long)&ioat_chan->dma_chan);
|
2014-02-20 00:19:35 +00:00
|
|
|
}
|
|
|
|
|
2015-08-11 15:48:55 +00:00
|
|
|
static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
|
2015-08-11 15:48:32 +00:00
|
|
|
{
|
|
|
|
ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
|
|
|
|
ioat_chan->issued = ioat_chan->head;
|
|
|
|
writew(ioat_chan->dmacount,
|
|
|
|
ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
|
|
|
|
dev_dbg(to_dev(ioat_chan),
|
|
|
|
"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
|
|
|
|
__func__, ioat_chan->head, ioat_chan->tail,
|
|
|
|
ioat_chan->issued, ioat_chan->dmacount);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ioat_issue_pending(struct dma_chan *c)
|
|
|
|
{
|
|
|
|
struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
|
|
|
|
|
|
|
|
if (ioat_ring_pending(ioat_chan)) {
|
|
|
|
spin_lock_bh(&ioat_chan->prep_lock);
|
|
|
|
__ioat_issue_pending(ioat_chan);
|
|
|
|
spin_unlock_bh(&ioat_chan->prep_lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ioat_update_pending - log pending descriptors
|
|
|
|
* @ioat: ioat+ channel
|
|
|
|
*
|
|
|
|
* Check if the number of unsubmitted descriptors has exceeded the
|
|
|
|
* watermark. Called with prep_lock held
|
|
|
|
*/
|
|
|
|
static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
|
|
|
|
{
|
|
|
|
if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
|
|
|
|
__ioat_issue_pending(ioat_chan);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
|
|
|
|
{
|
|
|
|
struct ioat_ring_ent *desc;
|
|
|
|
struct ioat_dma_descriptor *hw;
|
|
|
|
|
|
|
|
if (ioat_ring_space(ioat_chan) < 1) {
|
|
|
|
dev_err(to_dev(ioat_chan),
|
|
|
|
"Unable to start null desc - ring full\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_dbg(to_dev(ioat_chan),
|
|
|
|
"%s: head: %#x tail: %#x issued: %#x\n",
|
|
|
|
__func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
|
|
|
|
desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
|
|
|
|
|
|
|
|
hw = desc->hw;
|
|
|
|
hw->ctl = 0;
|
|
|
|
hw->ctl_f.null = 1;
|
|
|
|
hw->ctl_f.int_en = 1;
|
|
|
|
hw->ctl_f.compl_write = 1;
|
|
|
|
/* set size to non-zero value (channel returns error when size is 0) */
|
|
|
|
hw->size = NULL_DESC_BUFFER_SIZE;
|
|
|
|
hw->src_addr = 0;
|
|
|
|
hw->dst_addr = 0;
|
|
|
|
async_tx_ack(&desc->txd);
|
|
|
|
ioat_set_chainaddr(ioat_chan, desc->txd.phys);
|
|
|
|
dump_desc_dbg(ioat_chan, desc);
|
|
|
|
/* make sure descriptors are written before we submit */
|
|
|
|
wmb();
|
|
|
|
ioat_chan->head += 1;
|
|
|
|
__ioat_issue_pending(ioat_chan);
|
|
|
|
}
|
|
|
|
|
2015-08-11 15:48:43 +00:00
|
|
|
void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
|
2015-08-11 15:48:32 +00:00
|
|
|
{
|
|
|
|
spin_lock_bh(&ioat_chan->prep_lock);
|
2015-08-26 20:17:24 +00:00
|
|
|
if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
|
|
|
|
__ioat_start_null_desc(ioat_chan);
|
2015-08-11 15:48:32 +00:00
|
|
|
spin_unlock_bh(&ioat_chan->prep_lock);
|
|
|
|
}
|
|
|
|
|
2015-08-11 15:48:55 +00:00
|
|
|
static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
|
2015-08-11 15:48:32 +00:00
|
|
|
{
|
|
|
|
/* set the tail to be re-issued */
|
|
|
|
ioat_chan->issued = ioat_chan->tail;
|
|
|
|
ioat_chan->dmacount = 0;
|
|
|
|
mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
|
|
|
|
|
|
|
|
dev_dbg(to_dev(ioat_chan),
|
|
|
|
"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
|
|
|
|
__func__, ioat_chan->head, ioat_chan->tail,
|
|
|
|
ioat_chan->issued, ioat_chan->dmacount);
|
|
|
|
|
|
|
|
if (ioat_ring_pending(ioat_chan)) {
|
|
|
|
struct ioat_ring_ent *desc;
|
|
|
|
|
|
|
|
desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
|
|
|
|
ioat_set_chainaddr(ioat_chan, desc->txd.phys);
|
|
|
|
__ioat_issue_pending(ioat_chan);
|
|
|
|
} else
|
|
|
|
__ioat_start_null_desc(ioat_chan);
|
|
|
|
}
|
|
|
|
|
2015-08-11 15:48:55 +00:00
|
|
|
static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
|
2015-08-11 15:48:32 +00:00
|
|
|
{
|
|
|
|
unsigned long end = jiffies + tmo;
|
|
|
|
int err = 0;
|
|
|
|
u32 status;
|
|
|
|
|
|
|
|
status = ioat_chansts(ioat_chan);
|
|
|
|
if (is_ioat_active(status) || is_ioat_idle(status))
|
|
|
|
ioat_suspend(ioat_chan);
|
|
|
|
while (is_ioat_active(status) || is_ioat_idle(status)) {
|
|
|
|
if (tmo && time_after(jiffies, end)) {
|
|
|
|
err = -ETIMEDOUT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
status = ioat_chansts(ioat_chan);
|
|
|
|
cpu_relax();
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-08-11 15:48:55 +00:00
|
|
|
static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
|
2015-08-11 15:48:32 +00:00
|
|
|
{
|
|
|
|
unsigned long end = jiffies + tmo;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
ioat_reset(ioat_chan);
|
|
|
|
while (ioat_reset_pending(ioat_chan)) {
|
|
|
|
if (end && time_after(jiffies, end)) {
|
|
|
|
err = -ETIMEDOUT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
cpu_relax();
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
|
2015-08-25 19:58:05 +00:00
|
|
|
__releases(&ioat_chan->prep_lock)
|
2015-08-11 15:48:32 +00:00
|
|
|
{
|
|
|
|
struct dma_chan *c = tx->chan;
|
|
|
|
struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
|
|
|
|
dma_cookie_t cookie;
|
|
|
|
|
|
|
|
cookie = dma_cookie_assign(tx);
|
|
|
|
dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
|
|
|
|
|
|
|
|
if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
|
|
|
|
mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
|
|
|
|
|
|
|
|
/* make descriptor updates visible before advancing ioat->head,
|
|
|
|
* this is purposefully not smp_wmb() since we are also
|
|
|
|
* publishing the descriptor updates to a dma device
|
|
|
|
*/
|
|
|
|
wmb();
|
|
|
|
|
|
|
|
ioat_chan->head += ioat_chan->produce;
|
|
|
|
|
|
|
|
ioat_update_pending(ioat_chan);
|
|
|
|
spin_unlock_bh(&ioat_chan->prep_lock);
|
|
|
|
|
|
|
|
return cookie;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ioat_ring_ent *
|
2016-02-10 22:00:32 +00:00
|
|
|
ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
|
2015-08-11 15:48:32 +00:00
|
|
|
{
|
|
|
|
struct ioat_dma_descriptor *hw;
|
|
|
|
struct ioat_ring_ent *desc;
|
|
|
|
struct ioatdma_device *ioat_dma;
|
2016-02-10 22:00:32 +00:00
|
|
|
struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
|
|
|
|
int chunk;
|
2015-08-11 15:48:32 +00:00
|
|
|
dma_addr_t phys;
|
2016-02-10 22:00:32 +00:00
|
|
|
u8 *pos;
|
|
|
|
off_t offs;
|
2015-08-11 15:48:32 +00:00
|
|
|
|
|
|
|
ioat_dma = to_ioatdma_device(chan->device);
|
2016-02-10 22:00:32 +00:00
|
|
|
|
|
|
|
chunk = idx / IOAT_DESCS_PER_2M;
|
|
|
|
idx &= (IOAT_DESCS_PER_2M - 1);
|
|
|
|
offs = idx * IOAT_DESC_SZ;
|
|
|
|
pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
|
|
|
|
phys = ioat_chan->descs[chunk].hw + offs;
|
|
|
|
hw = (struct ioat_dma_descriptor *)pos;
|
2015-08-11 15:48:32 +00:00
|
|
|
memset(hw, 0, sizeof(*hw));
|
|
|
|
|
|
|
|
desc = kmem_cache_zalloc(ioat_cache, flags);
|
2016-02-10 22:00:32 +00:00
|
|
|
if (!desc)
|
2015-08-11 15:48:32 +00:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
dma_async_tx_descriptor_init(&desc->txd, chan);
|
|
|
|
desc->txd.tx_submit = ioat_tx_submit_unlock;
|
|
|
|
desc->hw = hw;
|
|
|
|
desc->txd.phys = phys;
|
|
|
|
return desc;
|
|
|
|
}
|
|
|
|
|
2015-08-11 15:48:43 +00:00
|
|
|
void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
|
2015-08-11 15:48:32 +00:00
|
|
|
{
|
|
|
|
kmem_cache_free(ioat_cache, desc);
|
|
|
|
}
|
|
|
|
|
2015-08-11 15:48:43 +00:00
|
|
|
struct ioat_ring_ent **
|
2015-08-11 15:48:32 +00:00
|
|
|
ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
|
|
|
|
{
|
2016-02-10 22:00:32 +00:00
|
|
|
struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
|
2015-08-11 15:48:32 +00:00
|
|
|
struct ioat_ring_ent **ring;
|
2016-02-10 22:00:32 +00:00
|
|
|
int total_descs = 1 << order;
|
|
|
|
int i, chunks;
|
2015-08-11 15:48:32 +00:00
|
|
|
|
|
|
|
/* allocate the array to hold the software ring */
|
2016-02-10 22:00:32 +00:00
|
|
|
ring = kcalloc(total_descs, sizeof(*ring), flags);
|
2015-08-11 15:48:32 +00:00
|
|
|
if (!ring)
|
|
|
|
return NULL;
|
2016-02-10 22:00:32 +00:00
|
|
|
|
|
|
|
ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M;
|
|
|
|
|
|
|
|
for (i = 0; i < chunks; i++) {
|
|
|
|
struct ioat_descs *descs = &ioat_chan->descs[i];
|
|
|
|
|
|
|
|
descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
|
|
|
|
SZ_2M, &descs->hw, flags);
|
|
|
|
if (!descs->virt && (i > 0)) {
|
|
|
|
int idx;
|
|
|
|
|
|
|
|
for (idx = 0; idx < i; idx++) {
|
|
|
|
dma_free_coherent(to_dev(ioat_chan), SZ_2M,
|
|
|
|
descs->virt, descs->hw);
|
|
|
|
descs->virt = NULL;
|
|
|
|
descs->hw = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ioat_chan->desc_chunks = 0;
|
|
|
|
kfree(ring);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < total_descs; i++) {
|
|
|
|
ring[i] = ioat_alloc_ring_ent(c, i, flags);
|
2015-08-11 15:48:32 +00:00
|
|
|
if (!ring[i]) {
|
2016-02-10 22:00:32 +00:00
|
|
|
int idx;
|
|
|
|
|
2015-08-11 15:48:32 +00:00
|
|
|
while (i--)
|
|
|
|
ioat_free_ring_ent(ring[i], c);
|
2016-02-10 22:00:32 +00:00
|
|
|
|
|
|
|
for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
|
|
|
|
dma_free_coherent(to_dev(ioat_chan),
|
|
|
|
SZ_2M,
|
|
|
|
ioat_chan->descs[idx].virt,
|
|
|
|
ioat_chan->descs[idx].hw);
|
|
|
|
ioat_chan->descs[idx].virt = NULL;
|
|
|
|
ioat_chan->descs[idx].hw = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ioat_chan->desc_chunks = 0;
|
2015-08-11 15:48:32 +00:00
|
|
|
kfree(ring);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
set_desc_id(ring[i], i);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* link descs */
|
2016-02-10 22:00:32 +00:00
|
|
|
for (i = 0; i < total_descs-1; i++) {
|
2015-08-11 15:48:32 +00:00
|
|
|
struct ioat_ring_ent *next = ring[i+1];
|
|
|
|
struct ioat_dma_descriptor *hw = ring[i]->hw;
|
|
|
|
|
|
|
|
hw->next = next->txd.phys;
|
|
|
|
}
|
|
|
|
ring[i]->hw->next = ring[0]->txd.phys;
|
|
|
|
|
|
|
|
return ring;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ioat_check_space_lock - verify space and grab ring producer lock
|
|
|
|
* @ioat: ioat,3 channel (ring) to operate on
|
|
|
|
* @num_descs: allocation length
|
|
|
|
*/
|
|
|
|
int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
|
2015-08-25 19:58:05 +00:00
|
|
|
__acquires(&ioat_chan->prep_lock)
|
2015-08-11 15:48:32 +00:00
|
|
|
{
|
|
|
|
spin_lock_bh(&ioat_chan->prep_lock);
|
|
|
|
/* never allow the last descriptor to be consumed, we need at
|
|
|
|
* least one free at all times to allow for on-the-fly ring
|
|
|
|
* resizing.
|
|
|
|
*/
|
|
|
|
if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
|
|
|
|
dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
|
|
|
|
__func__, num_descs, ioat_chan->head,
|
|
|
|
ioat_chan->tail, ioat_chan->issued);
|
|
|
|
ioat_chan->produce = num_descs;
|
|
|
|
return 0; /* with ioat->prep_lock held */
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&ioat_chan->prep_lock);
|
|
|
|
|
|
|
|
dev_dbg_ratelimited(to_dev(ioat_chan),
|
|
|
|
"%s: ring full! num_descs: %d (%x:%x:%x)\n",
|
|
|
|
__func__, num_descs, ioat_chan->head,
|
|
|
|
ioat_chan->tail, ioat_chan->issued);
|
|
|
|
|
|
|
|
/* progress reclaim in the allocation failure case we may be
|
|
|
|
* called under bh_disabled so we need to trigger the timer
|
|
|
|
* event directly
|
|
|
|
*/
|
|
|
|
if (time_is_before_jiffies(ioat_chan->timer.expires)
|
|
|
|
&& timer_pending(&ioat_chan->timer)) {
|
|
|
|
mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
|
2015-08-11 15:49:00 +00:00
|
|
|
ioat_timer_event((unsigned long)ioat_chan);
|
2015-08-11 15:48:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2015-08-11 15:48:55 +00:00
|
|
|
|
|
|
|
static bool desc_has_ext(struct ioat_ring_ent *desc)
|
|
|
|
{
|
|
|
|
struct ioat_dma_descriptor *hw = desc->hw;
|
|
|
|
|
|
|
|
if (hw->ctl_f.op == IOAT_OP_XOR ||
|
|
|
|
hw->ctl_f.op == IOAT_OP_XOR_VAL) {
|
|
|
|
struct ioat_xor_descriptor *xor = desc->xor;
|
|
|
|
|
|
|
|
if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
|
|
|
|
return true;
|
|
|
|
} else if (hw->ctl_f.op == IOAT_OP_PQ ||
|
|
|
|
hw->ctl_f.op == IOAT_OP_PQ_VAL) {
|
|
|
|
struct ioat_pq_descriptor *pq = desc->pq;
|
|
|
|
|
|
|
|
if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed)
|
|
|
|
{
|
|
|
|
if (!sed)
|
|
|
|
return;
|
|
|
|
|
|
|
|
dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
|
|
|
|
kmem_cache_free(ioat_sed_cache, sed);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
|
|
|
|
{
|
|
|
|
u64 phys_complete;
|
|
|
|
u64 completion;
|
|
|
|
|
|
|
|
completion = *ioat_chan->completion;
|
|
|
|
phys_complete = ioat_chansts_to_addr(completion);
|
|
|
|
|
|
|
|
dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
|
|
|
|
(unsigned long long) phys_complete);
|
|
|
|
|
|
|
|
return phys_complete;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
|
|
|
|
u64 *phys_complete)
|
|
|
|
{
|
|
|
|
*phys_complete = ioat_get_current_completion(ioat_chan);
|
|
|
|
if (*phys_complete == ioat_chan->last_completion)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
|
|
|
|
mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
|
|
|
|
{
|
|
|
|
struct ioat_dma_descriptor *hw = desc->hw;
|
|
|
|
|
|
|
|
switch (hw->ctl_f.op) {
|
|
|
|
case IOAT_OP_PQ_VAL:
|
|
|
|
case IOAT_OP_PQ_VAL_16S:
|
|
|
|
{
|
|
|
|
struct ioat_pq_descriptor *pq = desc->pq;
|
|
|
|
|
|
|
|
/* check if there's error written */
|
|
|
|
if (!pq->dwbes_f.wbes)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* need to set a chanerr var for checking to clear later */
|
|
|
|
|
|
|
|
if (pq->dwbes_f.p_val_err)
|
|
|
|
*desc->result |= SUM_CHECK_P_RESULT;
|
|
|
|
|
|
|
|
if (pq->dwbes_f.q_val_err)
|
|
|
|
*desc->result |= SUM_CHECK_Q_RESULT;
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* __cleanup - reclaim used descriptors
|
|
|
|
* @ioat: channel (ring) to clean
|
|
|
|
*/
|
|
|
|
static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
|
|
|
|
{
|
|
|
|
struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
|
|
|
|
struct ioat_ring_ent *desc;
|
|
|
|
bool seen_current = false;
|
|
|
|
int idx = ioat_chan->tail, i;
|
|
|
|
u16 active;
|
|
|
|
|
|
|
|
dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
|
|
|
|
__func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* At restart of the channel, the completion address and the
|
|
|
|
* channel status will be 0 due to starting a new chain. Since
|
|
|
|
* it's new chain and the first descriptor "fails", there is
|
|
|
|
* nothing to clean up. We do not want to reap the entire submitted
|
|
|
|
* chain due to this 0 address value and then BUG.
|
|
|
|
*/
|
|
|
|
if (!phys_complete)
|
|
|
|
return;
|
|
|
|
|
|
|
|
active = ioat_ring_active(ioat_chan);
|
|
|
|
for (i = 0; i < active && !seen_current; i++) {
|
|
|
|
struct dma_async_tx_descriptor *tx;
|
|
|
|
|
|
|
|
smp_read_barrier_depends();
|
|
|
|
prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
|
|
|
|
desc = ioat_get_ring_ent(ioat_chan, idx + i);
|
|
|
|
dump_desc_dbg(ioat_chan, desc);
|
|
|
|
|
|
|
|
/* set err stat if we are using dwbes */
|
|
|
|
if (ioat_dma->cap & IOAT_CAP_DWBES)
|
|
|
|
desc_get_errstat(ioat_chan, desc);
|
|
|
|
|
|
|
|
tx = &desc->txd;
|
|
|
|
if (tx->cookie) {
|
2016-07-20 20:13:55 +00:00
|
|
|
struct dmaengine_result res;
|
|
|
|
|
2015-08-11 15:48:55 +00:00
|
|
|
dma_cookie_complete(tx);
|
|
|
|
dma_descriptor_unmap(tx);
|
2016-07-20 20:13:55 +00:00
|
|
|
res.result = DMA_TRANS_NOERROR;
|
2016-07-20 20:11:33 +00:00
|
|
|
dmaengine_desc_get_callback_invoke(tx, NULL);
|
|
|
|
tx->callback = NULL;
|
2016-07-20 20:13:55 +00:00
|
|
|
tx->callback_result = NULL;
|
2015-08-11 15:48:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (tx->phys == phys_complete)
|
|
|
|
seen_current = true;
|
|
|
|
|
|
|
|
/* skip extended descriptors */
|
|
|
|
if (desc_has_ext(desc)) {
|
|
|
|
BUG_ON(i + 1 >= active);
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* cleanup super extended descriptors */
|
|
|
|
if (desc->sed) {
|
|
|
|
ioat_free_sed(ioat_dma, desc->sed);
|
|
|
|
desc->sed = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* finish all descriptor reads before incrementing tail */
|
|
|
|
smp_mb();
|
|
|
|
ioat_chan->tail = idx + i;
|
|
|
|
/* no active descs have written a completion? */
|
|
|
|
BUG_ON(active && !seen_current);
|
|
|
|
ioat_chan->last_completion = phys_complete;
|
|
|
|
|
|
|
|
if (active - i == 0) {
|
|
|
|
dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
|
|
|
|
__func__);
|
|
|
|
mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 5 microsecond delay per pending descriptor */
|
|
|
|
writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
|
|
|
|
ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
|
|
|
|
{
|
|
|
|
u64 phys_complete;
|
|
|
|
|
|
|
|
spin_lock_bh(&ioat_chan->cleanup_lock);
|
|
|
|
|
|
|
|
if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
|
|
|
|
__cleanup(ioat_chan, phys_complete);
|
|
|
|
|
|
|
|
if (is_ioat_halted(*ioat_chan->completion)) {
|
|
|
|
u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
|
|
|
|
|
2016-07-20 20:13:55 +00:00
|
|
|
if (chanerr &
|
|
|
|
(IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
|
2015-08-11 15:48:55 +00:00
|
|
|
mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
|
|
|
|
ioat_eh(ioat_chan);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_bh(&ioat_chan->cleanup_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ioat_cleanup_event(unsigned long data)
|
|
|
|
{
|
|
|
|
struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
|
|
|
|
|
|
|
|
ioat_cleanup(ioat_chan);
|
|
|
|
if (!test_bit(IOAT_RUN, &ioat_chan->state))
|
|
|
|
return;
|
|
|
|
writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
|
|
|
|
{
|
|
|
|
u64 phys_complete;
|
|
|
|
|
|
|
|
ioat_quiesce(ioat_chan, 0);
|
|
|
|
if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
|
|
|
|
__cleanup(ioat_chan, phys_complete);
|
|
|
|
|
|
|
|
__ioat_restart_chan(ioat_chan);
|
|
|
|
}
|
|
|
|
|
2016-07-20 20:13:55 +00:00
|
|
|
|
|
|
|
static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
|
|
|
|
{
|
|
|
|
struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
|
|
|
|
struct ioat_ring_ent *desc;
|
|
|
|
u16 active;
|
|
|
|
int idx = ioat_chan->tail, i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We assume that the failed descriptor has been processed.
|
|
|
|
* Now we are just returning all the remaining submitted
|
|
|
|
* descriptors to abort.
|
|
|
|
*/
|
|
|
|
active = ioat_ring_active(ioat_chan);
|
|
|
|
|
|
|
|
/* we skip the failed descriptor that tail points to */
|
|
|
|
for (i = 1; i < active; i++) {
|
|
|
|
struct dma_async_tx_descriptor *tx;
|
|
|
|
|
|
|
|
smp_read_barrier_depends();
|
|
|
|
prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
|
|
|
|
desc = ioat_get_ring_ent(ioat_chan, idx + i);
|
|
|
|
|
|
|
|
tx = &desc->txd;
|
|
|
|
if (tx->cookie) {
|
|
|
|
struct dmaengine_result res;
|
|
|
|
|
|
|
|
dma_cookie_complete(tx);
|
|
|
|
dma_descriptor_unmap(tx);
|
|
|
|
res.result = DMA_TRANS_ABORTED;
|
|
|
|
dmaengine_desc_get_callback_invoke(tx, &res);
|
|
|
|
tx->callback = NULL;
|
|
|
|
tx->callback_result = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* skip extended descriptors */
|
|
|
|
if (desc_has_ext(desc)) {
|
|
|
|
WARN_ON(i + 1 >= active);
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* cleanup super extended descriptors */
|
|
|
|
if (desc->sed) {
|
|
|
|
ioat_free_sed(ioat_dma, desc->sed);
|
|
|
|
desc->sed = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
smp_mb(); /* finish all descriptor reads before incrementing tail */
|
|
|
|
ioat_chan->tail = idx + active;
|
|
|
|
|
|
|
|
desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
|
|
|
|
ioat_chan->last_completion = *ioat_chan->completion = desc->txd.phys;
|
|
|
|
}
|
|
|
|
|
2015-08-11 15:48:55 +00:00
|
|
|
static void ioat_eh(struct ioatdma_chan *ioat_chan)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = to_pdev(ioat_chan);
|
|
|
|
struct ioat_dma_descriptor *hw;
|
|
|
|
struct dma_async_tx_descriptor *tx;
|
|
|
|
u64 phys_complete;
|
|
|
|
struct ioat_ring_ent *desc;
|
|
|
|
u32 err_handled = 0;
|
|
|
|
u32 chanerr_int;
|
|
|
|
u32 chanerr;
|
2016-07-20 20:13:55 +00:00
|
|
|
bool abort = false;
|
|
|
|
struct dmaengine_result res;
|
2015-08-11 15:48:55 +00:00
|
|
|
|
|
|
|
/* cleanup so tail points to descriptor that caused the error */
|
|
|
|
if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
|
|
|
|
__cleanup(ioat_chan, phys_complete);
|
|
|
|
|
|
|
|
chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
|
|
|
|
pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
|
|
|
|
|
|
|
|
dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
|
|
|
|
__func__, chanerr, chanerr_int);
|
|
|
|
|
|
|
|
desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
|
|
|
|
hw = desc->hw;
|
|
|
|
dump_desc_dbg(ioat_chan, desc);
|
|
|
|
|
|
|
|
switch (hw->ctl_f.op) {
|
|
|
|
case IOAT_OP_XOR_VAL:
|
|
|
|
if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
|
|
|
|
*desc->result |= SUM_CHECK_P_RESULT;
|
|
|
|
err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case IOAT_OP_PQ_VAL:
|
|
|
|
case IOAT_OP_PQ_VAL_16S:
|
|
|
|
if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
|
|
|
|
*desc->result |= SUM_CHECK_P_RESULT;
|
|
|
|
err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
|
|
|
|
}
|
|
|
|
if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
|
|
|
|
*desc->result |= SUM_CHECK_Q_RESULT;
|
|
|
|
err_handled |= IOAT_CHANERR_XOR_Q_ERR;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-07-20 20:13:55 +00:00
|
|
|
if (chanerr & IOAT_CHANERR_RECOVER_MASK) {
|
|
|
|
if (chanerr & IOAT_CHANERR_READ_DATA_ERR) {
|
|
|
|
res.result = DMA_TRANS_READ_FAILED;
|
|
|
|
err_handled |= IOAT_CHANERR_READ_DATA_ERR;
|
|
|
|
} else if (chanerr & IOAT_CHANERR_WRITE_DATA_ERR) {
|
|
|
|
res.result = DMA_TRANS_WRITE_FAILED;
|
|
|
|
err_handled |= IOAT_CHANERR_WRITE_DATA_ERR;
|
|
|
|
}
|
|
|
|
|
|
|
|
abort = true;
|
|
|
|
} else
|
|
|
|
res.result = DMA_TRANS_NOERROR;
|
|
|
|
|
2015-08-11 15:48:55 +00:00
|
|
|
/* fault on unhandled error or spurious halt */
|
|
|
|
if (chanerr ^ err_handled || chanerr == 0) {
|
|
|
|
dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
|
|
|
|
__func__, chanerr, err_handled);
|
2016-07-20 20:14:01 +00:00
|
|
|
dev_err(to_dev(ioat_chan), "Errors handled:\n");
|
|
|
|
ioat_print_chanerrs(ioat_chan, err_handled);
|
|
|
|
dev_err(to_dev(ioat_chan), "Errors not handled:\n");
|
|
|
|
ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled));
|
|
|
|
|
2015-08-11 15:48:55 +00:00
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
2016-07-20 20:13:55 +00:00
|
|
|
/* cleanup the faulty descriptor since we are continuing */
|
|
|
|
tx = &desc->txd;
|
|
|
|
if (tx->cookie) {
|
|
|
|
dma_cookie_complete(tx);
|
|
|
|
dma_descriptor_unmap(tx);
|
|
|
|
dmaengine_desc_get_callback_invoke(tx, &res);
|
|
|
|
tx->callback = NULL;
|
|
|
|
tx->callback_result = NULL;
|
|
|
|
}
|
2015-08-11 15:48:55 +00:00
|
|
|
|
|
|
|
/* mark faulting descriptor as complete */
|
|
|
|
*ioat_chan->completion = desc->txd.phys;
|
|
|
|
|
|
|
|
spin_lock_bh(&ioat_chan->prep_lock);
|
2016-07-20 20:13:55 +00:00
|
|
|
/* we need abort all descriptors */
|
|
|
|
if (abort) {
|
|
|
|
ioat_abort_descs(ioat_chan);
|
|
|
|
/* clean up the channel, we could be in weird state */
|
|
|
|
ioat_reset_hw(ioat_chan);
|
|
|
|
}
|
|
|
|
|
|
|
|
writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
|
|
|
|
pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
|
|
|
|
|
2015-08-11 15:48:55 +00:00
|
|
|
ioat_restart_channel(ioat_chan);
|
|
|
|
spin_unlock_bh(&ioat_chan->prep_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void check_active(struct ioatdma_chan *ioat_chan)
|
|
|
|
{
|
|
|
|
if (ioat_ring_active(ioat_chan)) {
|
|
|
|
mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
|
|
|
|
mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ioat_timer_event(unsigned long data)
|
|
|
|
{
|
|
|
|
struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
|
|
|
|
dma_addr_t phys_complete;
|
|
|
|
u64 status;
|
|
|
|
|
|
|
|
status = ioat_chansts(ioat_chan);
|
|
|
|
|
|
|
|
/* when halted due to errors check for channel
|
|
|
|
* programming errors before advancing the completion state
|
|
|
|
*/
|
|
|
|
if (is_ioat_halted(status)) {
|
|
|
|
u32 chanerr;
|
|
|
|
|
|
|
|
chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
|
|
|
|
dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
|
|
|
|
__func__, chanerr);
|
2016-07-20 20:14:01 +00:00
|
|
|
dev_err(to_dev(ioat_chan), "Errors:\n");
|
|
|
|
ioat_print_chanerrs(ioat_chan, chanerr);
|
|
|
|
|
2016-07-20 20:13:55 +00:00
|
|
|
if (test_bit(IOAT_RUN, &ioat_chan->state)) {
|
|
|
|
spin_lock_bh(&ioat_chan->cleanup_lock);
|
|
|
|
spin_lock_bh(&ioat_chan->prep_lock);
|
|
|
|
set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
|
|
|
|
spin_unlock_bh(&ioat_chan->prep_lock);
|
|
|
|
|
|
|
|
ioat_abort_descs(ioat_chan);
|
|
|
|
dev_warn(to_dev(ioat_chan), "Reset channel...\n");
|
|
|
|
ioat_reset_hw(ioat_chan);
|
|
|
|
dev_warn(to_dev(ioat_chan), "Restart channel...\n");
|
|
|
|
ioat_restart_channel(ioat_chan);
|
|
|
|
|
|
|
|
spin_lock_bh(&ioat_chan->prep_lock);
|
|
|
|
clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
|
|
|
|
spin_unlock_bh(&ioat_chan->prep_lock);
|
|
|
|
spin_unlock_bh(&ioat_chan->cleanup_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
2015-08-11 15:48:55 +00:00
|
|
|
}
|
|
|
|
|
2016-01-19 15:57:48 +00:00
|
|
|
spin_lock_bh(&ioat_chan->cleanup_lock);
|
|
|
|
|
|
|
|
/* handle the no-actives case */
|
|
|
|
if (!ioat_ring_active(ioat_chan)) {
|
|
|
|
spin_lock_bh(&ioat_chan->prep_lock);
|
|
|
|
check_active(ioat_chan);
|
|
|
|
spin_unlock_bh(&ioat_chan->prep_lock);
|
|
|
|
spin_unlock_bh(&ioat_chan->cleanup_lock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-08-11 15:48:55 +00:00
|
|
|
/* if we haven't made progress and we have already
|
|
|
|
* acknowledged a pending completion once, then be more
|
|
|
|
* forceful with a restart
|
|
|
|
*/
|
|
|
|
if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
|
|
|
|
__cleanup(ioat_chan, phys_complete);
|
|
|
|
else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
|
2016-01-19 15:57:48 +00:00
|
|
|
u32 chanerr;
|
|
|
|
|
|
|
|
chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
|
2016-07-20 20:14:01 +00:00
|
|
|
dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
|
|
|
|
status, chanerr);
|
|
|
|
dev_err(to_dev(ioat_chan), "Errors:\n");
|
|
|
|
ioat_print_chanerrs(ioat_chan, chanerr);
|
|
|
|
|
|
|
|
dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
|
|
|
|
ioat_ring_active(ioat_chan));
|
2016-01-19 15:57:48 +00:00
|
|
|
|
2015-08-11 15:48:55 +00:00
|
|
|
spin_lock_bh(&ioat_chan->prep_lock);
|
2016-07-20 20:13:55 +00:00
|
|
|
set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
|
|
|
|
spin_unlock_bh(&ioat_chan->prep_lock);
|
|
|
|
|
|
|
|
ioat_abort_descs(ioat_chan);
|
|
|
|
dev_warn(to_dev(ioat_chan), "Resetting channel...\n");
|
|
|
|
ioat_reset_hw(ioat_chan);
|
|
|
|
dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
|
2015-08-11 15:48:55 +00:00
|
|
|
ioat_restart_channel(ioat_chan);
|
2016-07-20 20:13:55 +00:00
|
|
|
|
|
|
|
spin_lock_bh(&ioat_chan->prep_lock);
|
|
|
|
clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
|
2015-08-11 15:48:55 +00:00
|
|
|
spin_unlock_bh(&ioat_chan->prep_lock);
|
|
|
|
spin_unlock_bh(&ioat_chan->cleanup_lock);
|
|
|
|
return;
|
2016-01-19 15:57:48 +00:00
|
|
|
} else
|
2015-08-11 15:48:55 +00:00
|
|
|
set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
|
|
|
|
|
2016-01-19 15:57:48 +00:00
|
|
|
mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
|
2015-08-11 15:48:55 +00:00
|
|
|
spin_unlock_bh(&ioat_chan->cleanup_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
enum dma_status
|
|
|
|
ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
|
|
|
|
struct dma_tx_state *txstate)
|
|
|
|
{
|
|
|
|
struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
|
|
|
|
enum dma_status ret;
|
|
|
|
|
|
|
|
ret = dma_cookie_status(c, cookie, txstate);
|
|
|
|
if (ret == DMA_COMPLETE)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ioat_cleanup(ioat_chan);
|
|
|
|
|
|
|
|
return dma_cookie_status(c, cookie, txstate);
|
|
|
|
}
|
|
|
|
|
|
|
|
int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
|
|
|
|
{
|
|
|
|
/* throw away whatever the channel was doing and get it
|
|
|
|
* initialized, with ioat3 specific workarounds
|
|
|
|
*/
|
|
|
|
struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
|
|
|
|
struct pci_dev *pdev = ioat_dma->pdev;
|
|
|
|
u32 chanerr;
|
|
|
|
u16 dev_id;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
|
|
|
|
|
|
|
|
chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
|
|
|
|
writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
|
|
|
|
|
|
|
|
if (ioat_dma->version < IOAT_VER_3_3) {
|
|
|
|
/* clear any pending errors */
|
|
|
|
err = pci_read_config_dword(pdev,
|
|
|
|
IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
|
|
|
|
if (err) {
|
|
|
|
dev_err(&pdev->dev,
|
|
|
|
"channel error register unreachable\n");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
pci_write_config_dword(pdev,
|
|
|
|
IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
|
|
|
|
|
|
|
|
/* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
|
|
|
|
* (workaround for spurious config parity error after restart)
|
|
|
|
*/
|
|
|
|
pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
|
|
|
|
if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
|
|
|
|
pci_write_config_dword(pdev,
|
|
|
|
IOAT_PCI_DMAUNCERRSTS_OFFSET,
|
|
|
|
0x10);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-10 23:18:40 +00:00
|
|
|
if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
|
|
|
|
ioat_dma->msixtba0 = readq(ioat_dma->reg_base + 0x1000);
|
|
|
|
ioat_dma->msixdata0 = readq(ioat_dma->reg_base + 0x1008);
|
|
|
|
ioat_dma->msixpba = readq(ioat_dma->reg_base + 0x1800);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-08-11 15:48:55 +00:00
|
|
|
err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
|
2016-03-10 23:18:40 +00:00
|
|
|
if (!err) {
|
|
|
|
if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
|
|
|
|
writeq(ioat_dma->msixtba0, ioat_dma->reg_base + 0x1000);
|
|
|
|
writeq(ioat_dma->msixdata0, ioat_dma->reg_base + 0x1008);
|
|
|
|
writeq(ioat_dma->msixpba, ioat_dma->reg_base + 0x1800);
|
|
|
|
}
|
|
|
|
}
|
2015-08-11 15:48:55 +00:00
|
|
|
|
|
|
|
if (err)
|
|
|
|
dev_err(&pdev->dev, "Failed to reset: %d\n", err);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|