浏览代码

Merge branch 'ioat' into dmaengine

Dan Williams 15 年之前
父节点
当前提交
e12c4fa377

+ 1 - 2
drivers/dma/Makefile

@@ -1,8 +1,7 @@
 obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
 obj-$(CONFIG_NET_DMA) += iovlock.o
 obj-$(CONFIG_DMATEST) += dmatest.o
-obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
-ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o
+obj-$(CONFIG_INTEL_IOATDMA) += ioat/
 obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
 obj-$(CONFIG_FSL_DMA) += fsldma.o
 obj-$(CONFIG_MV_XOR) += mv_xor.o

+ 2 - 0
drivers/dma/ioat/Makefile

@@ -0,0 +1,2 @@
+obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
+ioatdma-objs := pci.o dma.o dma_v2.o dca.o

+ 8 - 5
drivers/dma/ioat_dca.c → drivers/dma/ioat/dca.c

@@ -33,8 +33,8 @@
 #define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24)
 #endif
 
-#include "ioatdma.h"
-#include "ioatdma_registers.h"
+#include "dma.h"
+#include "registers.h"
 
 /*
  * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
@@ -242,7 +242,8 @@ static struct dca_ops ioat_dca_ops = {
 };
 
 
-struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
+struct dca_provider * __devinit
+ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
 {
 	struct dca_provider *dca;
 	struct ioat_dca_priv *ioatdca;
@@ -407,7 +408,8 @@ static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
 	return slots;
 }
 
-struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
+struct dca_provider * __devinit
+ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
 {
 	struct dca_provider *dca;
 	struct ioat_dca_priv *ioatdca;
@@ -602,7 +604,8 @@ static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
 	return slots;
 }
 
-struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
+struct dca_provider * __devinit
+ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
 {
 	struct dca_provider *dca;
 	struct ioat_dca_priv *ioatdca;

+ 1139 - 0
drivers/dma/ioat/dma.c

@@ -0,0 +1,1139 @@
+/*
+ * Intel I/OAT DMA Linux driver
+ * Copyright(c) 2004 - 2009 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+/*
+ * This driver supports an Intel I/OAT DMA engine, which does asynchronous
+ * copy operations.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/i7300_idle.h>
+#include "dma.h"
+#include "registers.h"
+#include "hw.h"
+
+int ioat_pending_level = 4;
+module_param(ioat_pending_level, int, 0644);
+MODULE_PARM_DESC(ioat_pending_level,
+		 "high-water mark for pushing ioat descriptors (default: 4)");
+
+/* internal functions */
+static void ioat1_cleanup(struct ioat_dma_chan *ioat);
+static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat);
+
+/**
+ * ioat_dma_do_interrupt - handler used for single vector interrupt mode
+ * @irq: interrupt id
+ * @data: interrupt data
+ */
+static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
+{
+	struct ioatdma_device *instance = data;
+	struct ioat_chan_common *chan;
+	unsigned long attnstatus;
+	int bit;
+	u8 intrctrl;
+
+	intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
+
+	if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
+		return IRQ_NONE;
+
+	if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
+		writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
+		return IRQ_NONE;
+	}
+
+	attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
+	for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
+		chan = ioat_chan_by_index(instance, bit);
+		tasklet_schedule(&chan->cleanup_task);
+	}
+
+	writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
+	return IRQ_HANDLED;
+}
+
+/**
+ * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
+ * @irq: interrupt id
+ * @data: interrupt data
+ */
+static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
+{
+	struct ioat_chan_common *chan = data;
+
+	tasklet_schedule(&chan->cleanup_task);
+
+	return IRQ_HANDLED;
+}
+
+static void ioat1_cleanup_tasklet(unsigned long data);
+
+/* common channel initialization */
+void ioat_init_channel(struct ioatdma_device *device,
+		       struct ioat_chan_common *chan, int idx,
+		       void (*timer_fn)(unsigned long),
+		       void (*tasklet)(unsigned long),
+		       unsigned long ioat)
+{
+	struct dma_device *dma = &device->common;
+
+	chan->device = device;
+	chan->reg_base = device->reg_base + (0x80 * (idx + 1));
+	spin_lock_init(&chan->cleanup_lock);
+	chan->common.device = dma;
+	list_add_tail(&chan->common.device_node, &dma->channels);
+	device->idx[idx] = chan;
+	init_timer(&chan->timer);
+	chan->timer.function = timer_fn;
+	chan->timer.data = ioat;
+	tasklet_init(&chan->cleanup_task, tasklet, ioat);
+	tasklet_disable(&chan->cleanup_task);
+}
+
+static void ioat1_timer_event(unsigned long data);
+
+/**
+ * ioat1_dma_enumerate_channels - find and initialize the device's channels
+ * @device: the device to be enumerated
+ */
+static int ioat1_enumerate_channels(struct ioatdma_device *device)
+{
+	u8 xfercap_scale;
+	u32 xfercap;
+	int i;
+	struct ioat_dma_chan *ioat;
+	struct device *dev = &device->pdev->dev;
+	struct dma_device *dma = &device->common;
+
+	INIT_LIST_HEAD(&dma->channels);
+	dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
+	dma->chancnt &= 0x1f; /* bits [4:0] valid */
+	if (dma->chancnt > ARRAY_SIZE(device->idx)) {
+		dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
+			 dma->chancnt, ARRAY_SIZE(device->idx));
+		dma->chancnt = ARRAY_SIZE(device->idx);
+	}
+	xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
+	xfercap_scale &= 0x1f; /* bits [4:0] valid */
+	xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
+	dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
+
+#ifdef  CONFIG_I7300_IDLE_IOAT_CHANNEL
+	if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
+		dma->chancnt--;
+#endif
+	for (i = 0; i < dma->chancnt; i++) {
+		ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
+		if (!ioat)
+			break;
+
+		ioat_init_channel(device, &ioat->base, i,
+				  ioat1_timer_event,
+				  ioat1_cleanup_tasklet,
+				  (unsigned long) ioat);
+		ioat->xfercap = xfercap;
+		spin_lock_init(&ioat->desc_lock);
+		INIT_LIST_HEAD(&ioat->free_desc);
+		INIT_LIST_HEAD(&ioat->used_desc);
+	}
+	dma->chancnt = i;
+	return i;
+}
+
+/**
+ * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
+ *                                 descriptors to hw
+ * @chan: DMA channel handle
+ */
+static inline void
+__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
+{
+	void __iomem *reg_base = ioat->base.reg_base;
+
+	dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
+		__func__, ioat->pending);
+	ioat->pending = 0;
+	writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
+}
+
+static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
+{
+	struct ioat_dma_chan *ioat = to_ioat_chan(chan);
+
+	if (ioat->pending > 0) {
+		spin_lock_bh(&ioat->desc_lock);
+		__ioat1_dma_memcpy_issue_pending(ioat);
+		spin_unlock_bh(&ioat->desc_lock);
+	}
+}
+
+/**
+ * ioat1_reset_channel - restart a channel
+ * @ioat: IOAT DMA channel handle
+ */
+static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
+{
+	struct ioat_chan_common *chan = &ioat->base;
+	void __iomem *reg_base = chan->reg_base;
+	u32 chansts, chanerr;
+
+	dev_warn(to_dev(chan), "reset\n");
+	chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
+	chansts = *chan->completion & IOAT_CHANSTS_STATUS;
+	if (chanerr) {
+		dev_err(to_dev(chan),
+			"chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
+			chan_num(chan), chansts, chanerr);
+		writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
+	}
+
+	/*
+	 * whack it upside the head with a reset
+	 * and wait for things to settle out.
+	 * force the pending count to a really big negative
+	 * to make sure no one forces an issue_pending
+	 * while we're waiting.
+	 */
+
+	ioat->pending = INT_MIN;
+	writeb(IOAT_CHANCMD_RESET,
+	       reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
+	set_bit(IOAT_RESET_PENDING, &chan->state);
+	mod_timer(&chan->timer, jiffies + RESET_DELAY);
+}
+
+static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct dma_chan *c = tx->chan;
+	struct ioat_dma_chan *ioat = to_ioat_chan(c);
+	struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
+	struct ioat_chan_common *chan = &ioat->base;
+	struct ioat_desc_sw *first;
+	struct ioat_desc_sw *chain_tail;
+	dma_cookie_t cookie;
+
+	spin_lock_bh(&ioat->desc_lock);
+	/* cookie incr and addition to used_list must be atomic */
+	cookie = c->cookie;
+	cookie++;
+	if (cookie < 0)
+		cookie = 1;
+	c->cookie = cookie;
+	tx->cookie = cookie;
+	dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
+
+	/* write address into NextDescriptor field of last desc in chain */
+	first = to_ioat_desc(tx->tx_list.next);
+	chain_tail = to_ioat_desc(ioat->used_desc.prev);
+	/* make descriptor updates globally visible before chaining */
+	wmb();
+	chain_tail->hw->next = first->txd.phys;
+	list_splice_tail_init(&tx->tx_list, &ioat->used_desc);
+	dump_desc_dbg(ioat, chain_tail);
+	dump_desc_dbg(ioat, first);
+
+	if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
+		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+	ioat->pending += desc->hw->tx_cnt;
+	if (ioat->pending >= ioat_pending_level)
+		__ioat1_dma_memcpy_issue_pending(ioat);
+	spin_unlock_bh(&ioat->desc_lock);
+
+	return cookie;
+}
+
+/**
+ * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
+ * @ioat: the channel supplying the memory pool for the descriptors
+ * @flags: allocation flags
+ */
+static struct ioat_desc_sw *
+ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
+{
+	struct ioat_dma_descriptor *desc;
+	struct ioat_desc_sw *desc_sw;
+	struct ioatdma_device *ioatdma_device;
+	dma_addr_t phys;
+
+	ioatdma_device = ioat->base.device;
+	desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
+	if (unlikely(!desc))
+		return NULL;
+
+	desc_sw = kzalloc(sizeof(*desc_sw), flags);
+	if (unlikely(!desc_sw)) {
+		pci_pool_free(ioatdma_device->dma_pool, desc, phys);
+		return NULL;
+	}
+
+	memset(desc, 0, sizeof(*desc));
+
+	dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
+	desc_sw->txd.tx_submit = ioat1_tx_submit;
+	desc_sw->hw = desc;
+	desc_sw->txd.phys = phys;
+	set_desc_id(desc_sw, -1);
+
+	return desc_sw;
+}
+
+static int ioat_initial_desc_count = 256;
+module_param(ioat_initial_desc_count, int, 0644);
+MODULE_PARM_DESC(ioat_initial_desc_count,
+		 "ioat1: initial descriptors per channel (default: 256)");
+/**
+ * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
+ * @chan: the channel to be filled out
+ */
+static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
+{
+	struct ioat_dma_chan *ioat = to_ioat_chan(c);
+	struct ioat_chan_common *chan = &ioat->base;
+	struct ioat_desc_sw *desc;
+	u32 chanerr;
+	int i;
+	LIST_HEAD(tmp_list);
+
+	/* have we already been set up? */
+	if (!list_empty(&ioat->free_desc))
+		return ioat->desccount;
+
+	/* Setup register to interrupt and write completion status on error */
+	writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
+
+	chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+	if (chanerr) {
+		dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
+		writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
+	}
+
+	/* Allocate descriptors */
+	for (i = 0; i < ioat_initial_desc_count; i++) {
+		desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
+		if (!desc) {
+			dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
+			break;
+		}
+		set_desc_id(desc, i);
+		list_add_tail(&desc->node, &tmp_list);
+	}
+	spin_lock_bh(&ioat->desc_lock);
+	ioat->desccount = i;
+	list_splice(&tmp_list, &ioat->free_desc);
+	spin_unlock_bh(&ioat->desc_lock);
+
+	/* allocate a completion writeback area */
+	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
+	chan->completion = pci_pool_alloc(chan->device->completion_pool,
+					  GFP_KERNEL, &chan->completion_dma);
+	memset(chan->completion, 0, sizeof(*chan->completion));
+	writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
+	       chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
+	writel(((u64) chan->completion_dma) >> 32,
+	       chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
+
+	tasklet_enable(&chan->cleanup_task);
+	ioat1_dma_start_null_desc(ioat);  /* give chain to dma device */
+	dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
+		__func__, ioat->desccount);
+	return ioat->desccount;
+}
+
+/**
+ * ioat1_dma_free_chan_resources - release all the descriptors
+ * @chan: the channel to be cleaned
+ */
+static void ioat1_dma_free_chan_resources(struct dma_chan *c)
+{
+	struct ioat_dma_chan *ioat = to_ioat_chan(c);
+	struct ioat_chan_common *chan = &ioat->base;
+	struct ioatdma_device *ioatdma_device = chan->device;
+	struct ioat_desc_sw *desc, *_desc;
+	int in_use_descs = 0;
+
+	/* Before freeing channel resources first check
+	 * if they have been previously allocated for this channel.
+	 */
+	if (ioat->desccount == 0)
+		return;
+
+	tasklet_disable(&chan->cleanup_task);
+	del_timer_sync(&chan->timer);
+	ioat1_cleanup(ioat);
+
+	/* Delay 100ms after reset to allow internal DMA logic to quiesce
+	 * before removing DMA descriptor resources.
+	 */
+	writeb(IOAT_CHANCMD_RESET,
+	       chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
+	mdelay(100);
+
+	spin_lock_bh(&ioat->desc_lock);
+	list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
+		dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
+			__func__, desc_id(desc));
+		dump_desc_dbg(ioat, desc);
+		in_use_descs++;
+		list_del(&desc->node);
+		pci_pool_free(ioatdma_device->dma_pool, desc->hw,
+			      desc->txd.phys);
+		kfree(desc);
+	}
+	list_for_each_entry_safe(desc, _desc,
+				 &ioat->free_desc, node) {
+		list_del(&desc->node);
+		pci_pool_free(ioatdma_device->dma_pool, desc->hw,
+			      desc->txd.phys);
+		kfree(desc);
+	}
+	spin_unlock_bh(&ioat->desc_lock);
+
+	pci_pool_free(ioatdma_device->completion_pool,
+		      chan->completion,
+		      chan->completion_dma);
+
+	/* one is ok since we left it on there on purpose */
+	if (in_use_descs > 1)
+		dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
+			in_use_descs - 1);
+
+	chan->last_completion = 0;
+	chan->completion_dma = 0;
+	ioat->pending = 0;
+	ioat->desccount = 0;
+}
+
+/**
+ * ioat1_dma_get_next_descriptor - return the next available descriptor
+ * @ioat: IOAT DMA channel handle
+ *
+ * Gets the next descriptor from the chain, and must be called with the
+ * channel's desc_lock held.  Allocates more descriptors if the channel
+ * has run out.
+ */
+static struct ioat_desc_sw *
+ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
+{
+	struct ioat_desc_sw *new;
+
+	if (!list_empty(&ioat->free_desc)) {
+		new = to_ioat_desc(ioat->free_desc.next);
+		list_del(&new->node);
+	} else {
+		/* try to get another desc */
+		new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
+		if (!new) {
+			dev_err(to_dev(&ioat->base), "alloc failed\n");
+			return NULL;
+		}
+	}
+	dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
+		__func__, desc_id(new));
+	prefetch(new->hw);
+	return new;
+}
+
+static struct dma_async_tx_descriptor *
+ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
+		      dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+	struct ioat_dma_chan *ioat = to_ioat_chan(c);
+	struct ioat_desc_sw *desc;
+	size_t copy;
+	LIST_HEAD(chain);
+	dma_addr_t src = dma_src;
+	dma_addr_t dest = dma_dest;
+	size_t total_len = len;
+	struct ioat_dma_descriptor *hw = NULL;
+	int tx_cnt = 0;
+
+	spin_lock_bh(&ioat->desc_lock);
+	desc = ioat1_dma_get_next_descriptor(ioat);
+	do {
+		if (!desc)
+			break;
+
+		tx_cnt++;
+		copy = min_t(size_t, len, ioat->xfercap);
+
+		hw = desc->hw;
+		hw->size = copy;
+		hw->ctl = 0;
+		hw->src_addr = src;
+		hw->dst_addr = dest;
+
+		list_add_tail(&desc->node, &chain);
+
+		len -= copy;
+		dest += copy;
+		src += copy;
+		if (len) {
+			struct ioat_desc_sw *next;
+
+			async_tx_ack(&desc->txd);
+			next = ioat1_dma_get_next_descriptor(ioat);
+			hw->next = next ? next->txd.phys : 0;
+			dump_desc_dbg(ioat, desc);
+			desc = next;
+		} else
+			hw->next = 0;
+	} while (len);
+
+	if (!desc) {
+		struct ioat_chan_common *chan = &ioat->base;
+
+		dev_err(to_dev(chan),
+			"chan%d - get_next_desc failed\n", chan_num(chan));
+		list_splice(&chain, &ioat->free_desc);
+		spin_unlock_bh(&ioat->desc_lock);
+		return NULL;
+	}
+	spin_unlock_bh(&ioat->desc_lock);
+
+	desc->txd.flags = flags;
+	desc->len = total_len;
+	list_splice(&chain, &desc->txd.tx_list);
+	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+	hw->ctl_f.compl_write = 1;
+	hw->tx_cnt = tx_cnt;
+	dump_desc_dbg(ioat, desc);
+
+	return &desc->txd;
+}
+
+static void ioat1_cleanup_tasklet(unsigned long data)
+{
+	struct ioat_dma_chan *chan = (void *)data;
+
+	ioat1_cleanup(chan);
+	writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET);
+}
+
+static void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len,
+		       int direction, enum dma_ctrl_flags flags, bool dst)
+{
+	if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) ||
+	    (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE)))
+		pci_unmap_single(pdev, addr, len, direction);
+	else
+		pci_unmap_page(pdev, addr, len, direction);
+}
+
+
+void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
+		    size_t len, struct ioat_dma_descriptor *hw)
+{
+	struct pci_dev *pdev = chan->device->pdev;
+	size_t offset = len - hw->size;
+
+	if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
+		ioat_unmap(pdev, hw->dst_addr - offset, len,
+			   PCI_DMA_FROMDEVICE, flags, 1);
+
+	if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP))
+		ioat_unmap(pdev, hw->src_addr - offset, len,
+			   PCI_DMA_TODEVICE, flags, 0);
+}
+
+unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
+{
+	unsigned long phys_complete;
+	u64 completion;
+
+	completion = *chan->completion;
+	phys_complete = ioat_chansts_to_addr(completion);
+
+	dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
+		(unsigned long long) phys_complete);
+
+	if (is_ioat_halted(completion)) {
+		u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+		dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
+			chanerr);
+
+		/* TODO do something to salvage the situation */
+	}
+
+	return phys_complete;
+}
+
+bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
+			   unsigned long *phys_complete)
+{
+	*phys_complete = ioat_get_current_completion(chan);
+	if (*phys_complete == chan->last_completion)
+		return false;
+	clear_bit(IOAT_COMPLETION_ACK, &chan->state);
+	mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+	return true;
+}
+
+static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
+{
+	struct ioat_chan_common *chan = &ioat->base;
+	struct list_head *_desc, *n;
+	struct dma_async_tx_descriptor *tx;
+
+	dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n",
+		 __func__, phys_complete);
+	list_for_each_safe(_desc, n, &ioat->used_desc) {
+		struct ioat_desc_sw *desc;
+
+		prefetch(n);
+		desc = list_entry(_desc, typeof(*desc), node);
+		tx = &desc->txd;
+		/*
+		 * Incoming DMA requests may use multiple descriptors,
+		 * due to exceeding xfercap, perhaps. If so, only the
+		 * last one will have a cookie, and require unmapping.
+		 */
+		dump_desc_dbg(ioat, desc);
+		if (tx->cookie) {
+			chan->completed_cookie = tx->cookie;
+			tx->cookie = 0;
+			ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
+			if (tx->callback) {
+				tx->callback(tx->callback_param);
+				tx->callback = NULL;
+			}
+		}
+
+		if (tx->phys != phys_complete) {
+			/*
+			 * a completed entry, but not the last, so clean
+			 * up if the client is done with the descriptor
+			 */
+			if (async_tx_test_ack(tx))
+				list_move_tail(&desc->node, &ioat->free_desc);
+		} else {
+			/*
+			 * last used desc. Do not remove, so we can
+			 * append from it.
+			 */
+
+			/* if nothing else is pending, cancel the
+			 * completion timeout
+			 */
+			if (n == &ioat->used_desc) {
+				dev_dbg(to_dev(chan),
+					"%s cancel completion timeout\n",
+					__func__);
+				clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
+			}
+
+			/* TODO check status bits? */
+			break;
+		}
+	}
+
+	chan->last_completion = phys_complete;
+}
+
+/**
+ * ioat1_cleanup - cleanup up finished descriptors
+ * @chan: ioat channel to be cleaned up
+ *
+ * To prevent lock contention we defer cleanup when the locks are
+ * contended with a terminal timeout that forces cleanup and catches
+ * completion notification errors.
+ */
+static void ioat1_cleanup(struct ioat_dma_chan *ioat)
+{
+	struct ioat_chan_common *chan = &ioat->base;
+	unsigned long phys_complete;
+
+	prefetch(chan->completion);
+
+	if (!spin_trylock_bh(&chan->cleanup_lock))
+		return;
+
+	if (!ioat_cleanup_preamble(chan, &phys_complete)) {
+		spin_unlock_bh(&chan->cleanup_lock);
+		return;
+	}
+
+	if (!spin_trylock_bh(&ioat->desc_lock)) {
+		spin_unlock_bh(&chan->cleanup_lock);
+		return;
+	}
+
+	__cleanup(ioat, phys_complete);
+
+	spin_unlock_bh(&ioat->desc_lock);
+	spin_unlock_bh(&chan->cleanup_lock);
+}
+
+static void ioat1_timer_event(unsigned long data)
+{
+	struct ioat_dma_chan *ioat = (void *) data;
+	struct ioat_chan_common *chan = &ioat->base;
+
+	dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);
+
+	spin_lock_bh(&chan->cleanup_lock);
+	if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) {
+		struct ioat_desc_sw *desc;
+
+		spin_lock_bh(&ioat->desc_lock);
+
+		/* restart active descriptors */
+		desc = to_ioat_desc(ioat->used_desc.prev);
+		ioat_set_chainaddr(ioat, desc->txd.phys);
+		ioat_start(chan);
+
+		ioat->pending = 0;
+		set_bit(IOAT_COMPLETION_PENDING, &chan->state);
+		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+		spin_unlock_bh(&ioat->desc_lock);
+	} else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
+		unsigned long phys_complete;
+
+		spin_lock_bh(&ioat->desc_lock);
+		/* if we haven't made progress and we have already
+		 * acknowledged a pending completion once, then be more
+		 * forceful with a restart
+		 */
+		if (ioat_cleanup_preamble(chan, &phys_complete))
+			__cleanup(ioat, phys_complete);
+		else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
+			ioat1_reset_channel(ioat);
+		else {
+			u64 status = ioat_chansts(chan);
+
+			/* manually update the last completion address */
+			if (ioat_chansts_to_addr(status) != 0)
+				*chan->completion = status;
+
+			set_bit(IOAT_COMPLETION_ACK, &chan->state);
+			mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+		}
+		spin_unlock_bh(&ioat->desc_lock);
+	}
+	spin_unlock_bh(&chan->cleanup_lock);
+}
+
+static enum dma_status
+ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie,
+		      dma_cookie_t *done, dma_cookie_t *used)
+{
+	struct ioat_dma_chan *ioat = to_ioat_chan(c);
+
+	if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
+		return DMA_SUCCESS;
+
+	ioat1_cleanup(ioat);
+
+	return ioat_is_complete(c, cookie, done, used);
+}
+
+static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
+{
+	struct ioat_chan_common *chan = &ioat->base;
+	struct ioat_desc_sw *desc;
+	struct ioat_dma_descriptor *hw;
+
+	spin_lock_bh(&ioat->desc_lock);
+
+	desc = ioat1_dma_get_next_descriptor(ioat);
+
+	if (!desc) {
+		dev_err(to_dev(chan),
+			"Unable to start null desc - get next desc failed\n");
+		spin_unlock_bh(&ioat->desc_lock);
+		return;
+	}
+
+	hw = desc->hw;
+	hw->ctl = 0;
+	hw->ctl_f.null = 1;
+	hw->ctl_f.int_en = 1;
+	hw->ctl_f.compl_write = 1;
+	/* set size to non-zero value (channel returns error when size is 0) */
+	hw->size = NULL_DESC_BUFFER_SIZE;
+	hw->src_addr = 0;
+	hw->dst_addr = 0;
+	async_tx_ack(&desc->txd);
+	hw->next = 0;
+	list_add_tail(&desc->node, &ioat->used_desc);
+	dump_desc_dbg(ioat, desc);
+
+	ioat_set_chainaddr(ioat, desc->txd.phys);
+	ioat_start(chan);
+	spin_unlock_bh(&ioat->desc_lock);
+}
+
+/*
+ * Perform a IOAT transaction to verify the HW works.
+ */
+#define IOAT_TEST_SIZE 2000
+
+static void __devinit ioat_dma_test_callback(void *dma_async_param)
+{
+	struct completion *cmp = dma_async_param;
+
+	complete(cmp);
+}
+
+/**
+ * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
+ * @device: device to be tested
+ */
+static int __devinit ioat_dma_self_test(struct ioatdma_device *device)
+{
+	int i;
+	u8 *src;
+	u8 *dest;
+	struct dma_device *dma = &device->common;
+	struct device *dev = &device->pdev->dev;
+	struct dma_chan *dma_chan;
+	struct dma_async_tx_descriptor *tx;
+	dma_addr_t dma_dest, dma_src;
+	dma_cookie_t cookie;
+	int err = 0;
+	struct completion cmp;
+	unsigned long tmo;
+	unsigned long flags;
+
+	src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
+	if (!src)
+		return -ENOMEM;
+	dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
+	if (!dest) {
+		kfree(src);
+		return -ENOMEM;
+	}
+
+	/* Fill in src buffer */
+	for (i = 0; i < IOAT_TEST_SIZE; i++)
+		src[i] = (u8)i;
+
+	/* Start copy, using first DMA channel */
+	dma_chan = container_of(dma->channels.next, struct dma_chan,
+				device_node);
+	if (dma->device_alloc_chan_resources(dma_chan) < 1) {
+		dev_err(dev, "selftest cannot allocate chan resource\n");
+		err = -ENODEV;
+		goto out;
+	}
+
+	dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
+	dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
+	flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE |
+		DMA_PREP_INTERRUPT;
+	tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
+						   IOAT_TEST_SIZE, flags);
+	if (!tx) {
+		dev_err(dev, "Self-test prep failed, disabling\n");
+		err = -ENODEV;
+		goto free_resources;
+	}
+
+	async_tx_ack(tx);
+	init_completion(&cmp);
+	tx->callback = ioat_dma_test_callback;
+	tx->callback_param = &cmp;
+	cookie = tx->tx_submit(tx);
+	if (cookie < 0) {
+		dev_err(dev, "Self-test setup failed, disabling\n");
+		err = -ENODEV;
+		goto free_resources;
+	}
+	dma->device_issue_pending(dma_chan);
+
+	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+
+	if (tmo == 0 ||
+	    dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL)
+					!= DMA_SUCCESS) {
+		dev_err(dev, "Self-test copy timed out, disabling\n");
+		err = -ENODEV;
+		goto free_resources;
+	}
+	if (memcmp(src, dest, IOAT_TEST_SIZE)) {
+		dev_err(dev, "Self-test copy failed compare, disabling\n");
+		err = -ENODEV;
+		goto free_resources;
+	}
+
+free_resources:
+	dma->device_free_chan_resources(dma_chan);
+out:
+	kfree(src);
+	kfree(dest);
+	return err;
+}
+
+static char ioat_interrupt_style[32] = "msix";
+module_param_string(ioat_interrupt_style, ioat_interrupt_style,
+		    sizeof(ioat_interrupt_style), 0644);
+MODULE_PARM_DESC(ioat_interrupt_style,
+		 "set ioat interrupt style: msix (default), "
+		 "msix-single-vector, msi, intx)");
+
+/**
+ * ioat_dma_setup_interrupts - setup interrupt handler
+ * @device: ioat device
+ */
+static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
+{
+	struct ioat_chan_common *chan;
+	struct pci_dev *pdev = device->pdev;
+	struct device *dev = &pdev->dev;
+	struct msix_entry *msix;
+	int i, j, msixcnt;
+	int err = -EINVAL;
+	u8 intrctrl = 0;
+
+	if (!strcmp(ioat_interrupt_style, "msix"))
+		goto msix;
+	if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
+		goto msix_single_vector;
+	if (!strcmp(ioat_interrupt_style, "msi"))
+		goto msi;
+	if (!strcmp(ioat_interrupt_style, "intx"))
+		goto intx;
+	dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
+	goto err_no_irq;
+
+msix:
+	/* The number of MSI-X vectors should equal the number of channels */
+	msixcnt = device->common.chancnt;
+	for (i = 0; i < msixcnt; i++)
+		device->msix_entries[i].entry = i;
+
+	err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
+	if (err < 0)
+		goto msi;
+	if (err > 0)
+		goto msix_single_vector;
+
+	for (i = 0; i < msixcnt; i++) {
+		msix = &device->msix_entries[i];
+		chan = ioat_chan_by_index(device, i);
+		err = devm_request_irq(dev, msix->vector,
+				       ioat_dma_do_interrupt_msix, 0,
+				       "ioat-msix", chan);
+		if (err) {
+			for (j = 0; j < i; j++) {
+				msix = &device->msix_entries[j];
+				chan = ioat_chan_by_index(device, j);
+				devm_free_irq(dev, msix->vector, chan);
+			}
+			goto msix_single_vector;
+		}
+	}
+	intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
+	goto done;
+
+msix_single_vector:
+	msix = &device->msix_entries[0];
+	msix->entry = 0;
+	err = pci_enable_msix(pdev, device->msix_entries, 1);
+	if (err)
+		goto msi;
+
+	err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
+			       "ioat-msix", device);
+	if (err) {
+		pci_disable_msix(pdev);
+		goto msi;
+	}
+	goto done;
+
+msi:
+	err = pci_enable_msi(pdev);
+	if (err)
+		goto intx;
+
+	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
+			       "ioat-msi", device);
+	if (err) {
+		pci_disable_msi(pdev);
+		goto intx;
+	}
+	goto done;
+
+intx:
+	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
+			       IRQF_SHARED, "ioat-intx", device);
+	if (err)
+		goto err_no_irq;
+
+done:
+	if (device->intr_quirk)
+		device->intr_quirk(device);
+	intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
+	writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
+	return 0;
+
+err_no_irq:
+	/* Disable all interrupt generation */
+	writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
+	dev_err(dev, "no usable interrupts\n");
+	return err;
+}
+
+static void ioat_disable_interrupts(struct ioatdma_device *device)
+{
+	/* Disable all interrupt generation */
+	writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
+}
+
+int __devinit ioat_probe(struct ioatdma_device *device)
+{
+	int err = -ENODEV;
+	struct dma_device *dma = &device->common;
+	struct pci_dev *pdev = device->pdev;
+	struct device *dev = &pdev->dev;
+
+	/* DMA coherent memory pool for DMA descriptor allocations */
+	device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
+					   sizeof(struct ioat_dma_descriptor),
+					   64, 0);
+	if (!device->dma_pool) {
+		err = -ENOMEM;
+		goto err_dma_pool;
+	}
+
+	device->completion_pool = pci_pool_create("completion_pool", pdev,
+						  sizeof(u64), SMP_CACHE_BYTES,
+						  SMP_CACHE_BYTES);
+
+	if (!device->completion_pool) {
+		err = -ENOMEM;
+		goto err_completion_pool;
+	}
+
+	device->enumerate_channels(device);
+
+	dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+	dma->dev = &pdev->dev;
+
+	dev_err(dev, "Intel(R) I/OAT DMA Engine found,"
+		" %d channels, device version 0x%02x, driver version %s\n",
+		dma->chancnt, device->version, IOAT_DMA_VERSION);
+
+	if (!dma->chancnt) {
+		dev_err(dev, "Intel(R) I/OAT DMA Engine problem found: "
+			"zero channels detected\n");
+		goto err_setup_interrupts;
+	}
+
+	err = ioat_dma_setup_interrupts(device);
+	if (err)
+		goto err_setup_interrupts;
+
+	err = ioat_dma_self_test(device);
+	if (err)
+		goto err_self_test;
+
+	return 0;
+
+err_self_test:
+	ioat_disable_interrupts(device);
+err_setup_interrupts:
+	pci_pool_destroy(device->completion_pool);
+err_completion_pool:
+	pci_pool_destroy(device->dma_pool);
+err_dma_pool:
+	return err;
+}
+
+int __devinit ioat_register(struct ioatdma_device *device)
+{
+	int err = dma_async_device_register(&device->common);
+
+	if (err) {
+		ioat_disable_interrupts(device);
+		pci_pool_destroy(device->completion_pool);
+		pci_pool_destroy(device->dma_pool);
+	}
+
+	return err;
+}
+
+/* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
+static void ioat1_intr_quirk(struct ioatdma_device *device)
+{
+	struct pci_dev *pdev = device->pdev;
+	u32 dmactrl;
+
+	pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
+	if (pdev->msi_enabled)
+		dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
+	else
+		dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
+	pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
+}
+
+int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
+{
+	struct pci_dev *pdev = device->pdev;
+	struct dma_device *dma;
+	int err;
+
+	device->intr_quirk = ioat1_intr_quirk;
+	device->enumerate_channels = ioat1_enumerate_channels;
+	dma = &device->common;
+	dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
+	dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
+	dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
+	dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
+	dma->device_is_tx_complete = ioat1_dma_is_complete;
+
+	err = ioat_probe(device);
+	if (err)
+		return err;
+	ioat_set_tcp_copy_break(4096);
+	err = ioat_register(device);
+	if (err)
+		return err;
+	if (dca)
+		device->dca = ioat_dca_init(pdev, device->reg_base);
+
+	return err;
+}
+
+void __devexit ioat_dma_remove(struct ioatdma_device *device)
+{
+	struct dma_device *dma = &device->common;
+
+	ioat_disable_interrupts(device);
+
+	dma_async_device_unregister(dma);
+
+	pci_pool_destroy(device->dma_pool);
+	pci_pool_destroy(device->completion_pool);
+
+	INIT_LIST_HEAD(&dma->channels);
+}

+ 306 - 0
drivers/dma/ioat/dma.h

@@ -0,0 +1,306 @@
+/*
+ * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+#ifndef IOATDMA_H
+#define IOATDMA_H
+
+#include <linux/dmaengine.h>
+#include "hw.h"
+#include "registers.h"
+#include <linux/init.h>
+#include <linux/dmapool.h>
+#include <linux/cache.h>
+#include <linux/pci_ids.h>
+#include <net/tcp.h>
+
+#define IOAT_DMA_VERSION  "3.64"
+
+#define IOAT_LOW_COMPLETION_MASK	0xffffffc0
+#define IOAT_DMA_DCA_ANY_CPU		~0
+
+#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
+#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
+#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd)
+#define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev)
+
+#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
+
+/*
+ * workaround for IOAT ver.3.0 null descriptor issue
+ * (channel returns error when size is 0)
+ */
+#define NULL_DESC_BUFFER_SIZE 1
+
+/**
+ * struct ioatdma_device - internal representation of a IOAT device
+ * @pdev: PCI-Express device
+ * @reg_base: MMIO register space base address
+ * @dma_pool: for allocating DMA descriptors
+ * @common: embedded struct dma_device
+ * @version: version of ioatdma device
+ * @msix_entries: irq handlers
+ * @idx: per channel data
+ * @dca: direct cache access context
+ * @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
+ * @enumerate_channels: hw version specific channel enumeration
+ */
+
+struct ioatdma_device {
+	struct pci_dev *pdev;
+	void __iomem *reg_base;
+	struct pci_pool *dma_pool;
+	struct pci_pool *completion_pool;
+	struct dma_device common;
+	u8 version;
+	struct msix_entry msix_entries[4];
+	struct ioat_chan_common *idx[4];
+	struct dca_provider *dca;
+	void (*intr_quirk)(struct ioatdma_device *device);
+	int (*enumerate_channels)(struct ioatdma_device *device);
+};
+
+struct ioat_chan_common {
+	struct dma_chan common;
+	void __iomem *reg_base;
+	unsigned long last_completion;
+	spinlock_t cleanup_lock;
+	dma_cookie_t completed_cookie;
+	unsigned long state;
+	#define IOAT_COMPLETION_PENDING 0
+	#define IOAT_COMPLETION_ACK 1
+	#define IOAT_RESET_PENDING 2
+	struct timer_list timer;
+	#define COMPLETION_TIMEOUT msecs_to_jiffies(100)
+	#define IDLE_TIMEOUT msecs_to_jiffies(2000)
+	#define RESET_DELAY msecs_to_jiffies(100)
+	struct ioatdma_device *device;
+	dma_addr_t completion_dma;
+	u64 *completion;
+	struct tasklet_struct cleanup_task;
+};
+
+
+/**
+ * struct ioat_dma_chan - internal representation of a DMA channel
+ */
+struct ioat_dma_chan {
+	struct ioat_chan_common base;
+
+	size_t xfercap;	/* XFERCAP register value expanded out */
+
+	spinlock_t desc_lock;
+	struct list_head free_desc;
+	struct list_head used_desc;
+
+	int pending;
+	u16 desccount;
+};
+
+static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c)
+{
+	return container_of(c, struct ioat_chan_common, common);
+}
+
+static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c)
+{
+	struct ioat_chan_common *chan = to_chan_common(c);
+
+	return container_of(chan, struct ioat_dma_chan, base);
+}
+
+/**
+ * ioat_is_complete - poll the status of an ioat transaction
+ * @c: channel handle
+ * @cookie: transaction identifier
+ * @done: if set, updated with last completed transaction
+ * @used: if set, updated with last used transaction
+ */
+static inline enum dma_status
+ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie,
+		 dma_cookie_t *done, dma_cookie_t *used)
+{
+	struct ioat_chan_common *chan = to_chan_common(c);
+	dma_cookie_t last_used;
+	dma_cookie_t last_complete;
+
+	last_used = c->cookie;
+	last_complete = chan->completed_cookie;
+
+	if (done)
+		*done = last_complete;
+	if (used)
+		*used = last_used;
+
+	return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
+/* wrapper around hardware descriptor format + additional software fields */
+
+/**
+ * struct ioat_desc_sw - wrapper around hardware descriptor
+ * @hw: hardware DMA descriptor
+ * @node: this descriptor will either be on the free list,
+ *     or attached to a transaction list (async_tx.tx_list)
+ * @txd: the generic software descriptor for all engines
+ * @id: identifier for debug
+ */
+struct ioat_desc_sw {
+	struct ioat_dma_descriptor *hw;
+	struct list_head node;
+	size_t len;
+	struct dma_async_tx_descriptor txd;
+	#ifdef DEBUG
+	int id;
+	#endif
+};
+
+#ifdef DEBUG
+#define set_desc_id(desc, i) ((desc)->id = (i))
+#define desc_id(desc) ((desc)->id)
+#else
+#define set_desc_id(desc, i)
+#define desc_id(desc) (0)
+#endif
+
+static inline void
+__dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw,
+		struct dma_async_tx_descriptor *tx, int id)
+{
+	struct device *dev = to_dev(chan);
+
+	dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x"
+		" ctl: %#x (op: %d int_en: %d compl: %d)\n", id,
+		(unsigned long long) tx->phys,
+		(unsigned long long) hw->next, tx->cookie, tx->flags,
+		hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write);
+}
+
+#define dump_desc_dbg(c, d) \
+	({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; })
+
+static inline void ioat_set_tcp_copy_break(unsigned long copybreak)
+{
+	#ifdef CONFIG_NET_DMA
+	sysctl_tcp_dma_copybreak = copybreak;
+	#endif
+}
+
+static inline struct ioat_chan_common *
+ioat_chan_by_index(struct ioatdma_device *device, int index)
+{
+	return device->idx[index];
+}
+
+static inline u64 ioat_chansts(struct ioat_chan_common *chan)
+{
+	u8 ver = chan->device->version;
+	u64 status;
+	u32 status_lo;
+
+	/* We need to read the low address first as this causes the
+	 * chipset to latch the upper bits for the subsequent read
+	 */
+	status_lo = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver));
+	status = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver));
+	status <<= 32;
+	status |= status_lo;
+
+	return status;
+}
+
+static inline void ioat_start(struct ioat_chan_common *chan)
+{
+	u8 ver = chan->device->version;
+
+	writeb(IOAT_CHANCMD_START, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
+}
+
+static inline u64 ioat_chansts_to_addr(u64 status)
+{
+	return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
+}
+
+static inline u32 ioat_chanerr(struct ioat_chan_common *chan)
+{
+	return readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+}
+
+static inline void ioat_suspend(struct ioat_chan_common *chan)
+{
+	u8 ver = chan->device->version;
+
+	writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
+}
+
+static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr)
+{
+	struct ioat_chan_common *chan = &ioat->base;
+
+	writel(addr & 0x00000000FFFFFFFF,
+	       chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
+	writel(addr >> 32,
+	       chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
+}
+
+static inline bool is_ioat_active(unsigned long status)
+{
+	return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE);
+}
+
+static inline bool is_ioat_idle(unsigned long status)
+{
+	return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE);
+}
+
+static inline bool is_ioat_halted(unsigned long status)
+{
+	return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED);
+}
+
+static inline bool is_ioat_suspended(unsigned long status)
+{
+	return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED);
+}
+
+/* channel was fatally programmed */
+static inline bool is_ioat_bug(unsigned long err)
+{
+	return !!(err & (IOAT_CHANERR_SRC_ADDR_ERR|IOAT_CHANERR_DEST_ADDR_ERR|
+			 IOAT_CHANERR_NEXT_ADDR_ERR|IOAT_CHANERR_CONTROL_ERR|
+			 IOAT_CHANERR_LENGTH_ERR));
+}
+
+int __devinit ioat_probe(struct ioatdma_device *device);
+int __devinit ioat_register(struct ioatdma_device *device);
+int __devinit ioat1_dma_probe(struct ioatdma_device *dev, int dca);
+void __devexit ioat_dma_remove(struct ioatdma_device *device);
+struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
+					      void __iomem *iobase);
+unsigned long ioat_get_current_completion(struct ioat_chan_common *chan);
+void ioat_init_channel(struct ioatdma_device *device,
+		       struct ioat_chan_common *chan, int idx,
+		       void (*timer_fn)(unsigned long),
+		       void (*tasklet)(unsigned long),
+		       unsigned long ioat);
+void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
+		    size_t len, struct ioat_dma_descriptor *hw);
+bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
+			   unsigned long *phys_complete);
+#endif /* IOATDMA_H */

+ 881 - 0
drivers/dma/ioat/dma_v2.c

@@ -0,0 +1,881 @@
+/*
+ * Intel I/OAT DMA Linux driver
+ * Copyright(c) 2004 - 2009 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+/*
+ * This driver supports an Intel I/OAT DMA engine (versions >= 2), which
+ * does asynchronous data movement and checksumming operations.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/i7300_idle.h>
+#include "dma.h"
+#include "dma_v2.h"
+#include "registers.h"
+#include "hw.h"
+
+static int ioat_ring_alloc_order = 8;
+module_param(ioat_ring_alloc_order, int, 0644);
+MODULE_PARM_DESC(ioat_ring_alloc_order,
+		 "ioat2+: allocate 2^n descriptors per channel (default: n=8)");
+static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
+module_param(ioat_ring_max_alloc_order, int, 0644);
+MODULE_PARM_DESC(ioat_ring_max_alloc_order,
+		 "ioat2+: upper limit for dynamic ring resizing (default: n=16)");
+
+static void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
+{
+	void * __iomem reg_base = ioat->base.reg_base;
+
+	ioat->pending = 0;
+	ioat->dmacount += ioat2_ring_pending(ioat);;
+	ioat->issued = ioat->head;
+	/* make descriptor updates globally visible before notifying channel */
+	wmb();
+	writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
+	dev_dbg(to_dev(&ioat->base),
+		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
+		__func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
+}
+
+static void ioat2_issue_pending(struct dma_chan *chan)
+{
+	struct ioat2_dma_chan *ioat = to_ioat2_chan(chan);
+
+	spin_lock_bh(&ioat->ring_lock);
+	if (ioat->pending == 1)
+		__ioat2_issue_pending(ioat);
+	spin_unlock_bh(&ioat->ring_lock);
+}
+
+/**
+ * ioat2_update_pending - log pending descriptors
+ * @ioat: ioat2+ channel
+ *
+ * set pending to '1' unless pending is already set to '2', pending == 2
+ * indicates that submission is temporarily blocked due to an in-flight
+ * reset.  If we are already above the ioat_pending_level threshold then
+ * just issue pending.
+ *
+ * called with ring_lock held
+ */
+static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
+{
+	if (unlikely(ioat->pending == 2))
+		return;
+	else if (ioat2_ring_pending(ioat) > ioat_pending_level)
+		__ioat2_issue_pending(ioat);
+	else
+		ioat->pending = 1;
+}
+
+static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
+{
+	struct ioat_ring_ent *desc;
+	struct ioat_dma_descriptor *hw;
+	int idx;
+
+	if (ioat2_ring_space(ioat) < 1) {
+		dev_err(to_dev(&ioat->base),
+			"Unable to start null desc - ring full\n");
+		return;
+	}
+
+	dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n",
+		__func__, ioat->head, ioat->tail, ioat->issued);
+	idx = ioat2_desc_alloc(ioat, 1);
+	desc = ioat2_get_ring_ent(ioat, idx);
+
+	hw = desc->hw;
+	hw->ctl = 0;
+	hw->ctl_f.null = 1;
+	hw->ctl_f.int_en = 1;
+	hw->ctl_f.compl_write = 1;
+	/* set size to non-zero value (channel returns error when size is 0) */
+	hw->size = NULL_DESC_BUFFER_SIZE;
+	hw->src_addr = 0;
+	hw->dst_addr = 0;
+	async_tx_ack(&desc->txd);
+	ioat2_set_chainaddr(ioat, desc->txd.phys);
+	dump_desc_dbg(ioat, desc);
+	__ioat2_issue_pending(ioat);
+}
+
+static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
+{
+	spin_lock_bh(&ioat->ring_lock);
+	__ioat2_start_null_desc(ioat);
+	spin_unlock_bh(&ioat->ring_lock);
+}
+
+static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
+{
+	struct ioat_chan_common *chan = &ioat->base;
+	struct dma_async_tx_descriptor *tx;
+	struct ioat_ring_ent *desc;
+	bool seen_current = false;
+	u16 active;
+	int i;
+
+	dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
+		__func__, ioat->head, ioat->tail, ioat->issued);
+
+	active = ioat2_ring_active(ioat);
+	for (i = 0; i < active && !seen_current; i++) {
+		prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1));
+		desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
+		tx = &desc->txd;
+		dump_desc_dbg(ioat, desc);
+		if (tx->cookie) {
+			ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
+			chan->completed_cookie = tx->cookie;
+			tx->cookie = 0;
+			if (tx->callback) {
+				tx->callback(tx->callback_param);
+				tx->callback = NULL;
+			}
+		}
+
+		if (tx->phys == phys_complete)
+			seen_current = true;
+	}
+	ioat->tail += i;
+	BUG_ON(!seen_current); /* no active descs have written a completion? */
+
+	chan->last_completion = phys_complete;
+	if (ioat->head == ioat->tail) {
+		dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
+			__func__);
+		clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
+		mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
+	}
+}
+
+/**
+ * ioat2_cleanup - clean finished descriptors (advance tail pointer)
+ * @chan: ioat channel to be cleaned up
+ */
+static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
+{
+	struct ioat_chan_common *chan = &ioat->base;
+	unsigned long phys_complete;
+
+	prefetch(chan->completion);
+
+	if (!spin_trylock_bh(&chan->cleanup_lock))
+		return;
+
+	if (!ioat_cleanup_preamble(chan, &phys_complete)) {
+		spin_unlock_bh(&chan->cleanup_lock);
+		return;
+	}
+
+	if (!spin_trylock_bh(&ioat->ring_lock)) {
+		spin_unlock_bh(&chan->cleanup_lock);
+		return;
+	}
+
+	__cleanup(ioat, phys_complete);
+
+	spin_unlock_bh(&ioat->ring_lock);
+	spin_unlock_bh(&chan->cleanup_lock);
+}
+
+static void ioat2_cleanup_tasklet(unsigned long data)
+{
+	struct ioat2_dma_chan *ioat = (void *) data;
+
+	ioat2_cleanup(ioat);
+	writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
+}
+
+static void __restart_chan(struct ioat2_dma_chan *ioat)
+{
+	struct ioat_chan_common *chan = &ioat->base;
+
+	/* set the tail to be re-issued */
+	ioat->issued = ioat->tail;
+	ioat->dmacount = 0;
+	set_bit(IOAT_COMPLETION_PENDING, &chan->state);
+	mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+	dev_dbg(to_dev(chan),
+		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
+		__func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
+
+	if (ioat2_ring_pending(ioat)) {
+		struct ioat_ring_ent *desc;
+
+		desc = ioat2_get_ring_ent(ioat, ioat->tail);
+		ioat2_set_chainaddr(ioat, desc->txd.phys);
+		__ioat2_issue_pending(ioat);
+	} else
+		__ioat2_start_null_desc(ioat);
+}
+
+static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
+{
+	struct ioat_chan_common *chan = &ioat->base;
+	unsigned long phys_complete;
+	u32 status;
+
+	status = ioat_chansts(chan);
+	if (is_ioat_active(status) || is_ioat_idle(status))
+		ioat_suspend(chan);
+	while (is_ioat_active(status) || is_ioat_idle(status)) {
+		status = ioat_chansts(chan);
+		cpu_relax();
+	}
+
+	if (ioat_cleanup_preamble(chan, &phys_complete))
+		__cleanup(ioat, phys_complete);
+
+	__restart_chan(ioat);
+}
+
+static bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
+
+static void ioat2_timer_event(unsigned long data)
+{
+	struct ioat2_dma_chan *ioat = (void *) data;
+	struct ioat_chan_common *chan = &ioat->base;
+
+	spin_lock_bh(&chan->cleanup_lock);
+	if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
+		unsigned long phys_complete;
+		u64 status;
+
+		spin_lock_bh(&ioat->ring_lock);
+		status = ioat_chansts(chan);
+
+		/* when halted due to errors check for channel
+		 * programming errors before advancing the completion state
+		 */
+		if (is_ioat_halted(status)) {
+			u32 chanerr;
+
+			chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+			BUG_ON(is_ioat_bug(chanerr));
+		}
+
+		/* if we haven't made progress and we have already
+		 * acknowledged a pending completion once, then be more
+		 * forceful with a restart
+		 */
+		if (ioat_cleanup_preamble(chan, &phys_complete))
+			__cleanup(ioat, phys_complete);
+		else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
+			ioat2_restart_channel(ioat);
+		else {
+			set_bit(IOAT_COMPLETION_ACK, &chan->state);
+			mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+		}
+		spin_unlock_bh(&ioat->ring_lock);
+	} else {
+		u16 active;
+
+		/* if the ring is idle, empty, and oversized try to step
+		 * down the size
+		 */
+		spin_lock_bh(&ioat->ring_lock);
+		active = ioat2_ring_active(ioat);
+		if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
+			reshape_ring(ioat, ioat->alloc_order-1);
+		spin_unlock_bh(&ioat->ring_lock);
+
+		/* keep shrinking until we get back to our minimum
+		 * default size
+		 */
+		if (ioat->alloc_order > ioat_get_alloc_order())
+			mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
+	}
+	spin_unlock_bh(&chan->cleanup_lock);
+}
+
+/**
+ * ioat2_enumerate_channels - find and initialize the device's channels
+ * @device: the device to be enumerated
+ */
+static int ioat2_enumerate_channels(struct ioatdma_device *device)
+{
+	struct ioat2_dma_chan *ioat;
+	struct device *dev = &device->pdev->dev;
+	struct dma_device *dma = &device->common;
+	u8 xfercap_log;
+	int i;
+
+	INIT_LIST_HEAD(&dma->channels);
+	dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
+	dma->chancnt &= 0x1f; /* bits [4:0] valid */
+	if (dma->chancnt > ARRAY_SIZE(device->idx)) {
+		dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
+			 dma->chancnt, ARRAY_SIZE(device->idx));
+		dma->chancnt = ARRAY_SIZE(device->idx);
+	}
+	xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
+	xfercap_log &= 0x1f; /* bits [4:0] valid */
+	if (xfercap_log == 0)
+		return 0;
+	dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
+
+	/* FIXME which i/oat version is i7300? */
+#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
+	if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
+		dma->chancnt--;
+#endif
+	for (i = 0; i < dma->chancnt; i++) {
+		ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
+		if (!ioat)
+			break;
+
+		ioat_init_channel(device, &ioat->base, i,
+				  ioat2_timer_event,
+				  ioat2_cleanup_tasklet,
+				  (unsigned long) ioat);
+		ioat->xfercap_log = xfercap_log;
+		spin_lock_init(&ioat->ring_lock);
+	}
+	dma->chancnt = i;
+	return i;
+}
+
+static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
+{
+	struct dma_chan *c = tx->chan;
+	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
+	struct ioat_chan_common *chan = &ioat->base;
+	dma_cookie_t cookie = c->cookie;
+
+	cookie++;
+	if (cookie < 0)
+		cookie = 1;
+	tx->cookie = cookie;
+	c->cookie = cookie;
+	dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
+
+	if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
+		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+	ioat2_update_pending(ioat);
+	spin_unlock_bh(&ioat->ring_lock);
+
+	return cookie;
+}
+
+static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
+{
+	struct ioat_dma_descriptor *hw;
+	struct ioat_ring_ent *desc;
+	struct ioatdma_device *dma;
+	dma_addr_t phys;
+
+	dma = to_ioatdma_device(chan->device);
+	hw = pci_pool_alloc(dma->dma_pool, flags, &phys);
+	if (!hw)
+		return NULL;
+	memset(hw, 0, sizeof(*hw));
+
+	desc = kzalloc(sizeof(*desc), flags);
+	if (!desc) {
+		pci_pool_free(dma->dma_pool, hw, phys);
+		return NULL;
+	}
+
+	dma_async_tx_descriptor_init(&desc->txd, chan);
+	desc->txd.tx_submit = ioat2_tx_submit_unlock;
+	desc->hw = hw;
+	desc->txd.phys = phys;
+	return desc;
+}
+
+static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
+{
+	struct ioatdma_device *dma;
+
+	dma = to_ioatdma_device(chan->device);
+	pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys);
+	kfree(desc);
+}
+
+static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
+{
+	struct ioat_ring_ent **ring;
+	int descs = 1 << order;
+	int i;
+
+	if (order > ioat_get_max_alloc_order())
+		return NULL;
+
+	/* allocate the array to hold the software ring */
+	ring = kcalloc(descs, sizeof(*ring), flags);
+	if (!ring)
+		return NULL;
+	for (i = 0; i < descs; i++) {
+		ring[i] = ioat2_alloc_ring_ent(c, flags);
+		if (!ring[i]) {
+			while (i--)
+				ioat2_free_ring_ent(ring[i], c);
+			kfree(ring);
+			return NULL;
+		}
+		set_desc_id(ring[i], i);
+	}
+
+	/* link descs */
+	for (i = 0; i < descs-1; i++) {
+		struct ioat_ring_ent *next = ring[i+1];
+		struct ioat_dma_descriptor *hw = ring[i]->hw;
+
+		hw->next = next->txd.phys;
+	}
+	ring[i]->hw->next = ring[0]->txd.phys;
+
+	return ring;
+}
+
+/* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
+ * @chan: channel to be initialized
+ */
+static int ioat2_alloc_chan_resources(struct dma_chan *c)
+{
+	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
+	struct ioat_chan_common *chan = &ioat->base;
+	struct ioat_ring_ent **ring;
+	u32 chanerr;
+	int order;
+
+	/* have we already been set up? */
+	if (ioat->ring)
+		return 1 << ioat->alloc_order;
+
+	/* Setup register to interrupt and write completion status on error */
+	writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
+
+	chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+	if (chanerr) {
+		dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
+		writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
+	}
+
+	/* allocate a completion writeback area */
+	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
+	chan->completion = pci_pool_alloc(chan->device->completion_pool,
+					  GFP_KERNEL, &chan->completion_dma);
+	if (!chan->completion)
+		return -ENOMEM;
+
+	memset(chan->completion, 0, sizeof(*chan->completion));
+	writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
+	       chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
+	writel(((u64) chan->completion_dma) >> 32,
+	       chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
+
+	order = ioat_get_alloc_order();
+	ring = ioat2_alloc_ring(c, order, GFP_KERNEL);
+	if (!ring)
+		return -ENOMEM;
+
+	spin_lock_bh(&ioat->ring_lock);
+	ioat->ring = ring;
+	ioat->head = 0;
+	ioat->issued = 0;
+	ioat->tail = 0;
+	ioat->pending = 0;
+	ioat->alloc_order = order;
+	spin_unlock_bh(&ioat->ring_lock);
+
+	tasklet_enable(&chan->cleanup_task);
+	ioat2_start_null_desc(ioat);
+
+	return 1 << ioat->alloc_order;
+}
+
+static bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
+{
+	/* reshape differs from normal ring allocation in that we want
+	 * to allocate a new software ring while only
+	 * extending/truncating the hardware ring
+	 */
+	struct ioat_chan_common *chan = &ioat->base;
+	struct dma_chan *c = &chan->common;
+	const u16 curr_size = ioat2_ring_mask(ioat) + 1;
+	const u16 active = ioat2_ring_active(ioat);
+	const u16 new_size = 1 << order;
+	struct ioat_ring_ent **ring;
+	u16 i;
+
+	if (order > ioat_get_max_alloc_order())
+		return false;
+
+	/* double check that we have at least 1 free descriptor */
+	if (active == curr_size)
+		return false;
+
+	/* when shrinking, verify that we can hold the current active
+	 * set in the new ring
+	 */
+	if (active >= new_size)
+		return false;
+
+	/* allocate the array to hold the software ring */
+	ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
+	if (!ring)
+		return false;
+
+	/* allocate/trim descriptors as needed */
+	if (new_size > curr_size) {
+		/* copy current descriptors to the new ring */
+		for (i = 0; i < curr_size; i++) {
+			u16 curr_idx = (ioat->tail+i) & (curr_size-1);
+			u16 new_idx = (ioat->tail+i) & (new_size-1);
+
+			ring[new_idx] = ioat->ring[curr_idx];
+			set_desc_id(ring[new_idx], new_idx);
+		}
+
+		/* add new descriptors to the ring */
+		for (i = curr_size; i < new_size; i++) {
+			u16 new_idx = (ioat->tail+i) & (new_size-1);
+
+			ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT);
+			if (!ring[new_idx]) {
+				while (i--) {
+					u16 new_idx = (ioat->tail+i) & (new_size-1);
+
+					ioat2_free_ring_ent(ring[new_idx], c);
+				}
+				kfree(ring);
+				return false;
+			}
+			set_desc_id(ring[new_idx], new_idx);
+		}
+
+		/* hw link new descriptors */
+		for (i = curr_size-1; i < new_size; i++) {
+			u16 new_idx = (ioat->tail+i) & (new_size-1);
+			struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)];
+			struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
+
+			hw->next = next->txd.phys;
+		}
+	} else {
+		struct ioat_dma_descriptor *hw;
+		struct ioat_ring_ent *next;
+
+		/* copy current descriptors to the new ring, dropping the
+		 * removed descriptors
+		 */
+		for (i = 0; i < new_size; i++) {
+			u16 curr_idx = (ioat->tail+i) & (curr_size-1);
+			u16 new_idx = (ioat->tail+i) & (new_size-1);
+
+			ring[new_idx] = ioat->ring[curr_idx];
+			set_desc_id(ring[new_idx], new_idx);
+		}
+
+		/* free deleted descriptors */
+		for (i = new_size; i < curr_size; i++) {
+			struct ioat_ring_ent *ent;
+
+			ent = ioat2_get_ring_ent(ioat, ioat->tail+i);
+			ioat2_free_ring_ent(ent, c);
+		}
+
+		/* fix up hardware ring */
+		hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw;
+		next = ring[(ioat->tail+new_size) & (new_size-1)];
+		hw->next = next->txd.phys;
+	}
+
+	dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
+		__func__, new_size);
+
+	kfree(ioat->ring);
+	ioat->ring = ring;
+	ioat->alloc_order = order;
+
+	return true;
+}
+
+/**
+ * ioat2_alloc_and_lock - common descriptor alloc boilerplate for ioat2,3 ops
+ * @idx: gets starting descriptor index on successful allocation
+ * @ioat: ioat2,3 channel (ring) to operate on
+ * @num_descs: allocation length
+ */
+static int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs)
+{
+	struct ioat_chan_common *chan = &ioat->base;
+
+	spin_lock_bh(&ioat->ring_lock);
+	/* never allow the last descriptor to be consumed, we need at
+	 * least one free at all times to allow for on-the-fly ring
+	 * resizing.
+	 */
+	while (unlikely(ioat2_ring_space(ioat) <= num_descs)) {
+		if (reshape_ring(ioat, ioat->alloc_order + 1) &&
+		    ioat2_ring_space(ioat) > num_descs)
+				break;
+
+		if (printk_ratelimit())
+			dev_dbg(to_dev(chan),
+				"%s: ring full! num_descs: %d (%x:%x:%x)\n",
+				__func__, num_descs, ioat->head, ioat->tail,
+				ioat->issued);
+		spin_unlock_bh(&ioat->ring_lock);
+
+		/* progress reclaim in the allocation failure case we
+		 * may be called under bh_disabled so we need to trigger
+		 * the timer event directly
+		 */
+		spin_lock_bh(&chan->cleanup_lock);
+		if (jiffies > chan->timer.expires &&
+		    timer_pending(&chan->timer)) {
+			mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+			spin_unlock_bh(&chan->cleanup_lock);
+			ioat2_timer_event((unsigned long) ioat);
+		} else
+			spin_unlock_bh(&chan->cleanup_lock);
+		return -ENOMEM;
+	}
+
+	dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n",
+		__func__, num_descs, ioat->head, ioat->tail, ioat->issued);
+
+	*idx = ioat2_desc_alloc(ioat, num_descs);
+	return 0;  /* with ioat->ring_lock held */
+}
+
+static struct dma_async_tx_descriptor *
+ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
+			   dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
+	struct ioat_dma_descriptor *hw;
+	struct ioat_ring_ent *desc;
+	dma_addr_t dst = dma_dest;
+	dma_addr_t src = dma_src;
+	size_t total_len = len;
+	int num_descs;
+	u16 idx;
+	int i;
+
+	num_descs = ioat2_xferlen_to_descs(ioat, len);
+	if (likely(num_descs) &&
+	    ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0)
+		/* pass */;
+	else
+		return NULL;
+	for (i = 0; i < num_descs; i++) {
+		size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log);
+
+		desc = ioat2_get_ring_ent(ioat, idx + i);
+		hw = desc->hw;
+
+		hw->size = copy;
+		hw->ctl = 0;
+		hw->src_addr = src;
+		hw->dst_addr = dst;
+
+		len -= copy;
+		dst += copy;
+		src += copy;
+		dump_desc_dbg(ioat, desc);
+	}
+
+	desc->txd.flags = flags;
+	desc->len = total_len;
+	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+	hw->ctl_f.compl_write = 1;
+	dump_desc_dbg(ioat, desc);
+	/* we leave the channel locked to ensure in order submission */
+
+	return &desc->txd;
+}
+
+/**
+ * ioat2_free_chan_resources - release all the descriptors
+ * @chan: the channel to be cleaned
+ */
+static void ioat2_free_chan_resources(struct dma_chan *c)
+{
+	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
+	struct ioat_chan_common *chan = &ioat->base;
+	struct ioatdma_device *ioatdma_device = chan->device;
+	struct ioat_ring_ent *desc;
+	const u16 total_descs = 1 << ioat->alloc_order;
+	int descs;
+	int i;
+
+	/* Before freeing channel resources first check
+	 * if they have been previously allocated for this channel.
+	 */
+	if (!ioat->ring)
+		return;
+
+	tasklet_disable(&chan->cleanup_task);
+	del_timer_sync(&chan->timer);
+	ioat2_cleanup(ioat);
+
+	/* Delay 100ms after reset to allow internal DMA logic to quiesce
+	 * before removing DMA descriptor resources.
+	 */
+	writeb(IOAT_CHANCMD_RESET,
+	       chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
+	mdelay(100);
+
+	spin_lock_bh(&ioat->ring_lock);
+	descs = ioat2_ring_space(ioat);
+	dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs);
+	for (i = 0; i < descs; i++) {
+		desc = ioat2_get_ring_ent(ioat, ioat->head + i);
+		ioat2_free_ring_ent(desc, c);
+	}
+
+	if (descs < total_descs)
+		dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
+			total_descs - descs);
+
+	for (i = 0; i < total_descs - descs; i++) {
+		desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
+		dump_desc_dbg(ioat, desc);
+		ioat2_free_ring_ent(desc, c);
+	}
+
+	kfree(ioat->ring);
+	ioat->ring = NULL;
+	ioat->alloc_order = 0;
+	pci_pool_free(ioatdma_device->completion_pool,
+		      chan->completion,
+		      chan->completion_dma);
+	spin_unlock_bh(&ioat->ring_lock);
+
+	chan->last_completion = 0;
+	chan->completion_dma = 0;
+	ioat->pending = 0;
+	ioat->dmacount = 0;
+}
+
+static enum dma_status
+ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
+		     dma_cookie_t *done, dma_cookie_t *used)
+{
+	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
+
+	if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
+		return DMA_SUCCESS;
+
+	ioat2_cleanup(ioat);
+
+	return ioat_is_complete(c, cookie, done, used);
+}
+
+int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
+{
+	struct pci_dev *pdev = device->pdev;
+	struct dma_device *dma;
+	struct dma_chan *c;
+	struct ioat_chan_common *chan;
+	int err;
+
+	device->enumerate_channels = ioat2_enumerate_channels;
+	dma = &device->common;
+	dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
+	dma->device_issue_pending = ioat2_issue_pending;
+	dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
+	dma->device_free_chan_resources = ioat2_free_chan_resources;
+	dma->device_is_tx_complete = ioat2_is_complete;
+
+	err = ioat_probe(device);
+	if (err)
+		return err;
+	ioat_set_tcp_copy_break(2048);
+
+	list_for_each_entry(c, &dma->channels, device_node) {
+		chan = to_chan_common(c);
+		writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU,
+		       chan->reg_base + IOAT_DCACTRL_OFFSET);
+	}
+
+	err = ioat_register(device);
+	if (err)
+		return err;
+	if (dca)
+		device->dca = ioat2_dca_init(pdev, device->reg_base);
+
+	return err;
+}
+
+int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
+{
+	struct pci_dev *pdev = device->pdev;
+	struct dma_device *dma;
+	struct dma_chan *c;
+	struct ioat_chan_common *chan;
+	int err;
+	u16 dev_id;
+
+	device->enumerate_channels = ioat2_enumerate_channels;
+	dma = &device->common;
+	dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
+	dma->device_issue_pending = ioat2_issue_pending;
+	dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
+	dma->device_free_chan_resources = ioat2_free_chan_resources;
+	dma->device_is_tx_complete = ioat2_is_complete;
+
+	/* -= IOAT ver.3 workarounds =- */
+	/* Write CHANERRMSK_INT with 3E07h to mask out the errors
+	 * that can cause stability issues for IOAT ver.3
+	 */
+	pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
+
+	/* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
+	 * (workaround for spurious config parity error after restart)
+	 */
+	pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
+	if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
+		pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
+
+	err = ioat_probe(device);
+	if (err)
+		return err;
+	ioat_set_tcp_copy_break(262144);
+
+	list_for_each_entry(c, &dma->channels, device_node) {
+		chan = to_chan_common(c);
+		writel(IOAT_DMA_DCA_ANY_CPU,
+		       chan->reg_base + IOAT_DCACTRL_OFFSET);
+	}
+
+	err = ioat_register(device);
+	if (err)
+		return err;
+	if (dca)
+		device->dca = ioat3_dca_init(pdev, device->reg_base);
+
+	return err;
+}

+ 146 - 0
drivers/dma/ioat/dma_v2.h

@@ -0,0 +1,146 @@
+/*
+ * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+#ifndef IOATDMA_V2_H
+#define IOATDMA_V2_H
+
+#include <linux/dmaengine.h>
+#include "dma.h"
+#include "hw.h"
+
+
+extern int ioat_pending_level;
+
+/*
+ * workaround for IOAT ver.3.0 null descriptor issue
+ * (channel returns error when size is 0)
+ */
+#define NULL_DESC_BUFFER_SIZE 1
+
+#define IOAT_MAX_ORDER 16
+#define ioat_get_alloc_order() \
+	(min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
+#define ioat_get_max_alloc_order() \
+	(min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
+
+/* struct ioat2_dma_chan - ioat v2 / v3 channel attributes
+ * @base: common ioat channel parameters
+ * @xfercap_log; log2 of channel max transfer length (for fast division)
+ * @head: allocated index
+ * @issued: hardware notification point
+ * @tail: cleanup index
+ * @pending: lock free indicator for issued != head
+ * @dmacount: identical to 'head' except for occasionally resetting to zero
+ * @alloc_order: log2 of the number of allocated descriptors
+ * @ring: software ring buffer implementation of hardware ring
+ * @ring_lock: protects ring attributes
+ */
+struct ioat2_dma_chan {
+	struct ioat_chan_common base;
+	size_t xfercap_log;
+	u16 head;
+	u16 issued;
+	u16 tail;
+	u16 dmacount;
+	u16 alloc_order;
+	int pending;
+	struct ioat_ring_ent **ring;
+	spinlock_t ring_lock;
+};
+
+static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
+{
+	struct ioat_chan_common *chan = to_chan_common(c);
+
+	return container_of(chan, struct ioat2_dma_chan, base);
+}
+
+static inline u16 ioat2_ring_mask(struct ioat2_dma_chan *ioat)
+{
+	return (1 << ioat->alloc_order) - 1;
+}
+
+/* count of descriptors in flight with the engine */
+static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat)
+{
+	return (ioat->head - ioat->tail) & ioat2_ring_mask(ioat);
+}
+
+/* count of descriptors pending submission to hardware */
+static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat)
+{
+	return (ioat->head - ioat->issued) & ioat2_ring_mask(ioat);
+}
+
+static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat)
+{
+	u16 num_descs = ioat2_ring_mask(ioat) + 1;
+	u16 active = ioat2_ring_active(ioat);
+
+	BUG_ON(active > num_descs);
+
+	return num_descs - active;
+}
+
+/* assumes caller already checked space */
+static inline u16 ioat2_desc_alloc(struct ioat2_dma_chan *ioat, u16 len)
+{
+	ioat->head += len;
+	return ioat->head - len;
+}
+
+static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len)
+{
+	u16 num_descs = len >> ioat->xfercap_log;
+
+	num_descs += !!(len & ((1 << ioat->xfercap_log) - 1));
+	return num_descs;
+}
+
+struct ioat_ring_ent {
+	struct ioat_dma_descriptor *hw;
+	struct dma_async_tx_descriptor txd;
+	size_t len;
+	#ifdef DEBUG
+	int id;
+	#endif
+};
+
+static inline struct ioat_ring_ent *
+ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx)
+{
+	return ioat->ring[idx & ioat2_ring_mask(ioat)];
+}
+
+static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr)
+{
+	struct ioat_chan_common *chan = &ioat->base;
+
+	writel(addr & 0x00000000FFFFFFFF,
+	       chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
+	writel(addr >> 32,
+	       chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
+}
+
+int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca);
+int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca);
+struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
+struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
+#endif /* IOATDMA_V2_H */

+ 24 - 21
drivers/dma/ioatdma_hw.h → drivers/dma/ioat/hw.h

@@ -23,6 +23,7 @@
 
 /* PCI Configuration Space Values */
 #define IOAT_PCI_VID            0x8086
+#define IOAT_MMIO_BAR		0
 
 /* CB device ID's */
 #define IOAT_PCI_DID_5000       0x1A38
@@ -39,32 +40,34 @@
 
 struct ioat_dma_descriptor {
 	uint32_t	size;
-	uint32_t	ctl;
+	union {
+		uint32_t ctl;
+		struct {
+			unsigned int int_en:1;
+			unsigned int src_snoop_dis:1;
+			unsigned int dest_snoop_dis:1;
+			unsigned int compl_write:1;
+			unsigned int fence:1;
+			unsigned int null:1;
+			unsigned int src_brk:1;
+			unsigned int dest_brk:1;
+			unsigned int bundle:1;
+			unsigned int dest_dca:1;
+			unsigned int hint:1;
+			unsigned int rsvd2:13;
+			unsigned int op:8;
+		} ctl_f;
+	};
 	uint64_t	src_addr;
 	uint64_t	dst_addr;
 	uint64_t	next;
 	uint64_t	rsv1;
 	uint64_t	rsv2;
-	uint64_t	user1;
+	/* store some driver data in an unused portion of the descriptor */
+	union {
+		uint64_t	user1;
+		uint64_t	tx_cnt;
+	};
 	uint64_t	user2;
 };
-
-#define IOAT_DMA_DESCRIPTOR_CTL_INT_GN	0x00000001
-#define IOAT_DMA_DESCRIPTOR_CTL_SRC_SN	0x00000002
-#define IOAT_DMA_DESCRIPTOR_CTL_DST_SN	0x00000004
-#define IOAT_DMA_DESCRIPTOR_CTL_CP_STS	0x00000008
-#define IOAT_DMA_DESCRIPTOR_CTL_FRAME	0x00000010
-#define IOAT_DMA_DESCRIPTOR_NUL		0x00000020
-#define IOAT_DMA_DESCRIPTOR_CTL_SP_BRK	0x00000040
-#define IOAT_DMA_DESCRIPTOR_CTL_DP_BRK	0x00000080
-#define IOAT_DMA_DESCRIPTOR_CTL_BNDL	0x00000100
-#define IOAT_DMA_DESCRIPTOR_CTL_DCA	0x00000200
-#define IOAT_DMA_DESCRIPTOR_CTL_BUFHINT	0x00000400
-
-#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_CONTEXT	0xFF000000
-#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_DMA	0x00000000
-
-#define IOAT_DMA_DESCRIPTOR_CTL_CONTEXT_DCA	0x00000001
-#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_MASK	0xFF000000
-
 #endif

+ 62 - 85
drivers/dma/ioat.c → drivers/dma/ioat/pci.c

@@ -30,9 +30,10 @@
 #include <linux/pci.h>
 #include <linux/interrupt.h>
 #include <linux/dca.h>
-#include "ioatdma.h"
-#include "ioatdma_registers.h"
-#include "ioatdma_hw.h"
+#include "dma.h"
+#include "dma_v2.h"
+#include "registers.h"
+#include "hw.h"
 
 MODULE_VERSION(IOAT_DMA_VERSION);
 MODULE_LICENSE("GPL");
@@ -60,119 +61,101 @@ static struct pci_device_id ioat_pci_tbl[] = {
 	{ 0, }
 };
 
-struct ioat_device {
-	struct pci_dev		*pdev;
-	void __iomem		*iobase;
-	struct ioatdma_device	*dma;
-	struct dca_provider	*dca;
-};
-
-static int __devinit ioat_probe(struct pci_dev *pdev,
-				const struct pci_device_id *id);
+static int __devinit ioat_pci_probe(struct pci_dev *pdev,
+				    const struct pci_device_id *id);
 static void __devexit ioat_remove(struct pci_dev *pdev);
 
 static int ioat_dca_enabled = 1;
 module_param(ioat_dca_enabled, int, 0644);
 MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
 
+#define DRV_NAME "ioatdma"
+
 static struct pci_driver ioat_pci_driver = {
-	.name		= "ioatdma",
+	.name		= DRV_NAME,
 	.id_table	= ioat_pci_tbl,
-	.probe		= ioat_probe,
+	.probe		= ioat_pci_probe,
 	.remove		= __devexit_p(ioat_remove),
 };
 
-static int __devinit ioat_probe(struct pci_dev *pdev,
-				const struct pci_device_id *id)
+static struct ioatdma_device *
+alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
 {
-	void __iomem *iobase;
-	struct ioat_device *device;
-	unsigned long mmio_start, mmio_len;
+	struct device *dev = &pdev->dev;
+	struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+
+	if (!d)
+		return NULL;
+	d->pdev = pdev;
+	d->reg_base = iobase;
+	return d;
+}
+
+static int __devinit ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	void __iomem * const *iomap;
+	struct device *dev = &pdev->dev;
+	struct ioatdma_device *device;
 	int err;
 
-	err = pci_enable_device(pdev);
+	err = pcim_enable_device(pdev);
 	if (err)
-		goto err_enable_device;
+		return err;
 
-	err = pci_request_regions(pdev, ioat_pci_driver.name);
+	err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME);
 	if (err)
-		goto err_request_regions;
+		return err;
+	iomap = pcim_iomap_table(pdev);
+	if (!iomap)
+		return -ENOMEM;
 
 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
 	if (err)
 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 	if (err)
-		goto err_set_dma_mask;
+		return err;
 
 	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
 	if (err)
 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
 	if (err)
-		goto err_set_dma_mask;
-
-	mmio_start = pci_resource_start(pdev, 0);
-	mmio_len = pci_resource_len(pdev, 0);
-	iobase = ioremap(mmio_start, mmio_len);
-	if (!iobase) {
-		err = -ENOMEM;
-		goto err_ioremap;
-	}
+		return err;
 
-	device = kzalloc(sizeof(*device), GFP_KERNEL);
-	if (!device) {
-		err = -ENOMEM;
-		goto err_kzalloc;
-	}
-	device->pdev = pdev;
-	pci_set_drvdata(pdev, device);
-	device->iobase = iobase;
+	device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL);
+	if (!device)
+		return -ENOMEM;
 
 	pci_set_master(pdev);
 
-	switch (readb(iobase + IOAT_VER_OFFSET)) {
-	case IOAT_VER_1_2:
-		device->dma = ioat_dma_probe(pdev, iobase);
-		if (device->dma && ioat_dca_enabled)
-			device->dca = ioat_dca_init(pdev, iobase);
-		break;
-	case IOAT_VER_2_0:
-		device->dma = ioat_dma_probe(pdev, iobase);
-		if (device->dma && ioat_dca_enabled)
-			device->dca = ioat2_dca_init(pdev, iobase);
-		break;
-	case IOAT_VER_3_0:
-		device->dma = ioat_dma_probe(pdev, iobase);
-		if (device->dma && ioat_dca_enabled)
-			device->dca = ioat3_dca_init(pdev, iobase);
-		break;
-	default:
-		err = -ENODEV;
-		break;
-	}
-	if (!device->dma)
-		err = -ENODEV;
+	device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
+	if (!device)
+		return -ENOMEM;
+	pci_set_drvdata(pdev, device);
 
-	if (err)
-		goto err_version;
+	device->version = readb(device->reg_base + IOAT_VER_OFFSET);
+	if (device->version == IOAT_VER_1_2)
+		err = ioat1_dma_probe(device, ioat_dca_enabled);
+	else if (device->version == IOAT_VER_2_0)
+		err = ioat2_dma_probe(device, ioat_dca_enabled);
+	else if (device->version >= IOAT_VER_3_0)
+		err = ioat3_dma_probe(device, ioat_dca_enabled);
+	else
+		return -ENODEV;
+
+	if (err) {
+		dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
+		return -ENODEV;
+	}
 
 	return 0;
-
-err_version:
-	kfree(device);
-err_kzalloc:
-	iounmap(iobase);
-err_ioremap:
-err_set_dma_mask:
-	pci_release_regions(pdev);
-	pci_disable_device(pdev);
-err_request_regions:
-err_enable_device:
-	return err;
 }
 
 static void __devexit ioat_remove(struct pci_dev *pdev)
 {
-	struct ioat_device *device = pci_get_drvdata(pdev);
+	struct ioatdma_device *device = pci_get_drvdata(pdev);
+
+	if (!device)
+		return;
 
 	dev_err(&pdev->dev, "Removing dma and dca services\n");
 	if (device->dca) {
@@ -180,13 +163,7 @@ static void __devexit ioat_remove(struct pci_dev *pdev)
 		free_dca_provider(device->dca);
 		device->dca = NULL;
 	}
-
-	if (device->dma) {
-		ioat_dma_remove(device->dma);
-		device->dma = NULL;
-	}
-
-	kfree(device);
+	ioat_dma_remove(device);
 }
 
 static int __init ioat_init_module(void)

+ 19 - 15
drivers/dma/ioatdma_registers.h → drivers/dma/ioat/registers.h

@@ -75,7 +75,11 @@
 #define IOAT_CHANCTRL_ERR_INT_EN		0x0010
 #define IOAT_CHANCTRL_ANY_ERR_ABORT_EN		0x0008
 #define IOAT_CHANCTRL_ERR_COMPLETION_EN		0x0004
-#define IOAT_CHANCTRL_INT_DISABLE		0x0001
+#define IOAT_CHANCTRL_INT_REARM			0x0001
+#define IOAT_CHANCTRL_RUN			(IOAT_CHANCTRL_INT_REARM |\
+						 IOAT_CHANCTRL_ERR_COMPLETION_EN |\
+						 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |\
+						 IOAT_CHANCTRL_ERR_INT_EN)
 
 #define IOAT_DMA_COMP_OFFSET			0x02	/* 16-bit DMA channel compatibility */
 #define IOAT_DMA_COMP_V1			0x0001	/* Compatibility with DMA version 1 */
@@ -94,14 +98,14 @@
 #define IOAT2_CHANSTS_OFFSET_HIGH	0x0C
 #define IOAT_CHANSTS_OFFSET_HIGH(ver)		((ver) < IOAT_VER_2_0 \
 						? IOAT1_CHANSTS_OFFSET_HIGH : IOAT2_CHANSTS_OFFSET_HIGH)
-#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR	~0x3F
-#define IOAT_CHANSTS_SOFT_ERR			0x0000000000000010
-#define IOAT_CHANSTS_UNAFFILIATED_ERR		0x0000000000000008
-#define IOAT_CHANSTS_DMA_TRANSFER_STATUS	0x0000000000000007
-#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE	0x0
-#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE	0x1
-#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_SUSPENDED	0x2
-#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED	0x3
+#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR	(~0x3fULL)
+#define IOAT_CHANSTS_SOFT_ERR			0x10ULL
+#define IOAT_CHANSTS_UNAFFILIATED_ERR		0x8ULL
+#define IOAT_CHANSTS_STATUS	0x7ULL
+#define IOAT_CHANSTS_ACTIVE	0x0
+#define IOAT_CHANSTS_DONE	0x1
+#define IOAT_CHANSTS_SUSPENDED	0x2
+#define IOAT_CHANSTS_HALTED	0x3
 
 
 
@@ -204,18 +208,18 @@
 #define IOAT_CDAR_OFFSET_HIGH			0x24
 
 #define IOAT_CHANERR_OFFSET			0x28	/* 32-bit Channel Error Register */
-#define IOAT_CHANERR_DMA_TRANSFER_SRC_ADDR_ERR	0x0001
-#define IOAT_CHANERR_DMA_TRANSFER_DEST_ADDR_ERR	0x0002
-#define IOAT_CHANERR_NEXT_DESCRIPTOR_ADDR_ERR	0x0004
-#define IOAT_CHANERR_NEXT_DESCRIPTOR_ALIGNMENT_ERR	0x0008
+#define IOAT_CHANERR_SRC_ADDR_ERR	0x0001
+#define IOAT_CHANERR_DEST_ADDR_ERR	0x0002
+#define IOAT_CHANERR_NEXT_ADDR_ERR	0x0004
+#define IOAT_CHANERR_NEXT_DESC_ALIGN_ERR	0x0008
 #define IOAT_CHANERR_CHAIN_ADDR_VALUE_ERR	0x0010
 #define IOAT_CHANERR_CHANCMD_ERR		0x0020
 #define IOAT_CHANERR_CHIPSET_UNCORRECTABLE_DATA_INTEGRITY_ERR	0x0040
 #define IOAT_CHANERR_DMA_UNCORRECTABLE_DATA_INTEGRITY_ERR	0x0080
 #define IOAT_CHANERR_READ_DATA_ERR		0x0100
 #define IOAT_CHANERR_WRITE_DATA_ERR		0x0200
-#define IOAT_CHANERR_DESCRIPTOR_CONTROL_ERR	0x0400
-#define IOAT_CHANERR_DESCRIPTOR_LENGTH_ERR	0x0800
+#define IOAT_CHANERR_CONTROL_ERR	0x0400
+#define IOAT_CHANERR_LENGTH_ERR	0x0800
 #define IOAT_CHANERR_COMPLETION_ADDR_ERR	0x1000
 #define IOAT_CHANERR_INT_CONFIGURATION_ERR	0x2000
 #define IOAT_CHANERR_SOFT_ERR			0x4000

+ 0 - 1741
drivers/dma/ioat_dma.c

@@ -1,1741 +0,0 @@
-/*
- * Intel I/OAT DMA Linux driver
- * Copyright(c) 2004 - 2009 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- */
-
-/*
- * This driver supports an Intel I/OAT DMA engine, which does asynchronous
- * copy operations.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/dmaengine.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/workqueue.h>
-#include <linux/i7300_idle.h>
-#include "ioatdma.h"
-#include "ioatdma_registers.h"
-#include "ioatdma_hw.h"
-
-#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
-#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
-#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
-#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
-
-#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
-static int ioat_pending_level = 4;
-module_param(ioat_pending_level, int, 0644);
-MODULE_PARM_DESC(ioat_pending_level,
-		 "high-water mark for pushing ioat descriptors (default: 4)");
-
-#define RESET_DELAY  msecs_to_jiffies(100)
-#define WATCHDOG_DELAY  round_jiffies(msecs_to_jiffies(2000))
-static void ioat_dma_chan_reset_part2(struct work_struct *work);
-static void ioat_dma_chan_watchdog(struct work_struct *work);
-
-/*
- * workaround for IOAT ver.3.0 null descriptor issue
- * (channel returns error when size is 0)
- */
-#define NULL_DESC_BUFFER_SIZE 1
-
-/* internal functions */
-static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
-static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
-
-static struct ioat_desc_sw *
-ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
-static struct ioat_desc_sw *
-ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
-
-static inline struct ioat_dma_chan *ioat_lookup_chan_by_index(
-						struct ioatdma_device *device,
-						int index)
-{
-	return device->idx[index];
-}
-
-/**
- * ioat_dma_do_interrupt - handler used for single vector interrupt mode
- * @irq: interrupt id
- * @data: interrupt data
- */
-static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
-{
-	struct ioatdma_device *instance = data;
-	struct ioat_dma_chan *ioat_chan;
-	unsigned long attnstatus;
-	int bit;
-	u8 intrctrl;
-
-	intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
-
-	if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
-		return IRQ_NONE;
-
-	if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
-		writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
-		return IRQ_NONE;
-	}
-
-	attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
-	for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
-		ioat_chan = ioat_lookup_chan_by_index(instance, bit);
-		tasklet_schedule(&ioat_chan->cleanup_task);
-	}
-
-	writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
-	return IRQ_HANDLED;
-}
-
-/**
- * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
- * @irq: interrupt id
- * @data: interrupt data
- */
-static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
-{
-	struct ioat_dma_chan *ioat_chan = data;
-
-	tasklet_schedule(&ioat_chan->cleanup_task);
-
-	return IRQ_HANDLED;
-}
-
-static void ioat_dma_cleanup_tasklet(unsigned long data);
-
-/**
- * ioat_dma_enumerate_channels - find and initialize the device's channels
- * @device: the device to be enumerated
- */
-static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
-{
-	u8 xfercap_scale;
-	u32 xfercap;
-	int i;
-	struct ioat_dma_chan *ioat_chan;
-
-	/*
-	 * IOAT ver.3 workarounds
-	 */
-	if (device->version == IOAT_VER_3_0) {
-		u32 chan_err_mask;
-		u16 dev_id;
-		u32 dmauncerrsts;
-
-		/*
-		 * Write CHANERRMSK_INT with 3E07h to mask out the errors
-		 * that can cause stability issues for IOAT ver.3
-		 */
-		chan_err_mask = 0x3E07;
-		pci_write_config_dword(device->pdev,
-			IOAT_PCI_CHANERRMASK_INT_OFFSET,
-			chan_err_mask);
-
-		/*
-		 * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
-		 * (workaround for spurious config parity error after restart)
-		 */
-		pci_read_config_word(device->pdev,
-			IOAT_PCI_DEVICE_ID_OFFSET,
-			&dev_id);
-		if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
-			dmauncerrsts = 0x10;
-			pci_write_config_dword(device->pdev,
-				IOAT_PCI_DMAUNCERRSTS_OFFSET,
-				dmauncerrsts);
-		}
-	}
-
-	device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
-	xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
-	xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
-
-#ifdef  CONFIG_I7300_IDLE_IOAT_CHANNEL
-	if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) {
-		device->common.chancnt--;
-	}
-#endif
-	for (i = 0; i < device->common.chancnt; i++) {
-		ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
-		if (!ioat_chan) {
-			device->common.chancnt = i;
-			break;
-		}
-
-		ioat_chan->device = device;
-		ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
-		ioat_chan->xfercap = xfercap;
-		ioat_chan->desccount = 0;
-		INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2);
-		if (ioat_chan->device->version == IOAT_VER_2_0)
-			writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE |
-			       IOAT_DMA_DCA_ANY_CPU,
-			       ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
-		else if (ioat_chan->device->version == IOAT_VER_3_0)
-			writel(IOAT_DMA_DCA_ANY_CPU,
-			       ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
-		spin_lock_init(&ioat_chan->cleanup_lock);
-		spin_lock_init(&ioat_chan->desc_lock);
-		INIT_LIST_HEAD(&ioat_chan->free_desc);
-		INIT_LIST_HEAD(&ioat_chan->used_desc);
-		/* This should be made common somewhere in dmaengine.c */
-		ioat_chan->common.device = &device->common;
-		list_add_tail(&ioat_chan->common.device_node,
-			      &device->common.channels);
-		device->idx[i] = ioat_chan;
-		tasklet_init(&ioat_chan->cleanup_task,
-			     ioat_dma_cleanup_tasklet,
-			     (unsigned long) ioat_chan);
-		tasklet_disable(&ioat_chan->cleanup_task);
-	}
-	return device->common.chancnt;
-}
-
-/**
- * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
- *                                 descriptors to hw
- * @chan: DMA channel handle
- */
-static inline void __ioat1_dma_memcpy_issue_pending(
-						struct ioat_dma_chan *ioat_chan)
-{
-	ioat_chan->pending = 0;
-	writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
-}
-
-static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
-{
-	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-
-	if (ioat_chan->pending > 0) {
-		spin_lock_bh(&ioat_chan->desc_lock);
-		__ioat1_dma_memcpy_issue_pending(ioat_chan);
-		spin_unlock_bh(&ioat_chan->desc_lock);
-	}
-}
-
-static inline void __ioat2_dma_memcpy_issue_pending(
-						struct ioat_dma_chan *ioat_chan)
-{
-	ioat_chan->pending = 0;
-	writew(ioat_chan->dmacount,
-	       ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
-}
-
-static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
-{
-	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-
-	if (ioat_chan->pending > 0) {
-		spin_lock_bh(&ioat_chan->desc_lock);
-		__ioat2_dma_memcpy_issue_pending(ioat_chan);
-		spin_unlock_bh(&ioat_chan->desc_lock);
-	}
-}
-
-
-/**
- * ioat_dma_chan_reset_part2 - reinit the channel after a reset
- */
-static void ioat_dma_chan_reset_part2(struct work_struct *work)
-{
-	struct ioat_dma_chan *ioat_chan =
-		container_of(work, struct ioat_dma_chan, work.work);
-	struct ioat_desc_sw *desc;
-
-	spin_lock_bh(&ioat_chan->cleanup_lock);
-	spin_lock_bh(&ioat_chan->desc_lock);
-
-	ioat_chan->completion_virt->low = 0;
-	ioat_chan->completion_virt->high = 0;
-	ioat_chan->pending = 0;
-
-	/*
-	 * count the descriptors waiting, and be sure to do it
-	 * right for both the CB1 line and the CB2 ring
-	 */
-	ioat_chan->dmacount = 0;
-	if (ioat_chan->used_desc.prev) {
-		desc = to_ioat_desc(ioat_chan->used_desc.prev);
-		do {
-			ioat_chan->dmacount++;
-			desc = to_ioat_desc(desc->node.next);
-		} while (&desc->node != ioat_chan->used_desc.next);
-	}
-
-	/*
-	 * write the new starting descriptor address
-	 * this puts channel engine into ARMED state
-	 */
-	desc = to_ioat_desc(ioat_chan->used_desc.prev);
-	switch (ioat_chan->device->version) {
-	case IOAT_VER_1_2:
-		writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
-		       ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
-		writel(((u64) desc->async_tx.phys) >> 32,
-		       ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
-
-		writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
-			+ IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
-		break;
-	case IOAT_VER_2_0:
-		writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
-		       ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
-		writel(((u64) desc->async_tx.phys) >> 32,
-		       ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
-
-		/* tell the engine to go with what's left to be done */
-		writew(ioat_chan->dmacount,
-		       ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
-
-		break;
-	}
-	dev_err(&ioat_chan->device->pdev->dev,
-		"chan%d reset - %d descs waiting, %d total desc\n",
-		chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
-
-	spin_unlock_bh(&ioat_chan->desc_lock);
-	spin_unlock_bh(&ioat_chan->cleanup_lock);
-}
-
-/**
- * ioat_dma_reset_channel - restart a channel
- * @ioat_chan: IOAT DMA channel handle
- */
-static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan)
-{
-	u32 chansts, chanerr;
-
-	if (!ioat_chan->used_desc.prev)
-		return;
-
-	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
-	chansts = (ioat_chan->completion_virt->low
-					& IOAT_CHANSTS_DMA_TRANSFER_STATUS);
-	if (chanerr) {
-		dev_err(&ioat_chan->device->pdev->dev,
-			"chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
-			chan_num(ioat_chan), chansts, chanerr);
-		writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
-	}
-
-	/*
-	 * whack it upside the head with a reset
-	 * and wait for things to settle out.
-	 * force the pending count to a really big negative
-	 * to make sure no one forces an issue_pending
-	 * while we're waiting.
-	 */
-
-	spin_lock_bh(&ioat_chan->desc_lock);
-	ioat_chan->pending = INT_MIN;
-	writeb(IOAT_CHANCMD_RESET,
-	       ioat_chan->reg_base
-	       + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
-	spin_unlock_bh(&ioat_chan->desc_lock);
-
-	/* schedule the 2nd half instead of sleeping a long time */
-	schedule_delayed_work(&ioat_chan->work, RESET_DELAY);
-}
-
-/**
- * ioat_dma_chan_watchdog - watch for stuck channels
- */
-static void ioat_dma_chan_watchdog(struct work_struct *work)
-{
-	struct ioatdma_device *device =
-		container_of(work, struct ioatdma_device, work.work);
-	struct ioat_dma_chan *ioat_chan;
-	int i;
-
-	union {
-		u64 full;
-		struct {
-			u32 low;
-			u32 high;
-		};
-	} completion_hw;
-	unsigned long compl_desc_addr_hw;
-
-	for (i = 0; i < device->common.chancnt; i++) {
-		ioat_chan = ioat_lookup_chan_by_index(device, i);
-
-		if (ioat_chan->device->version == IOAT_VER_1_2
-			/* have we started processing anything yet */
-		    && ioat_chan->last_completion
-			/* have we completed any since last watchdog cycle? */
-		    && (ioat_chan->last_completion ==
-				ioat_chan->watchdog_completion)
-			/* has TCP stuck on one cookie since last watchdog? */
-		    && (ioat_chan->watchdog_tcp_cookie ==
-				ioat_chan->watchdog_last_tcp_cookie)
-		    && (ioat_chan->watchdog_tcp_cookie !=
-				ioat_chan->completed_cookie)
-			/* is there something in the chain to be processed? */
-			/* CB1 chain always has at least the last one processed */
-		    && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next)
-		    && ioat_chan->pending == 0) {
-
-			/*
-			 * check CHANSTS register for completed
-			 * descriptor address.
-			 * if it is different than completion writeback,
-			 * it is not zero
-			 * and it has changed since the last watchdog
-			 *     we can assume that channel
-			 *     is still working correctly
-			 *     and the problem is in completion writeback.
-			 *     update completion writeback
-			 *     with actual CHANSTS value
-			 * else
-			 *     try resetting the channel
-			 */
-
-			completion_hw.low = readl(ioat_chan->reg_base +
-				IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version));
-			completion_hw.high = readl(ioat_chan->reg_base +
-				IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version));
-#if (BITS_PER_LONG == 64)
-			compl_desc_addr_hw =
-				completion_hw.full
-				& IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
-#else
-			compl_desc_addr_hw =
-				completion_hw.low & IOAT_LOW_COMPLETION_MASK;
-#endif
-
-			if ((compl_desc_addr_hw != 0)
-			   && (compl_desc_addr_hw != ioat_chan->watchdog_completion)
-			   && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) {
-				ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
-				ioat_chan->completion_virt->low = completion_hw.low;
-				ioat_chan->completion_virt->high = completion_hw.high;
-			} else {
-				ioat_dma_reset_channel(ioat_chan);
-				ioat_chan->watchdog_completion = 0;
-				ioat_chan->last_compl_desc_addr_hw = 0;
-			}
-
-		/*
-		 * for version 2.0 if there are descriptors yet to be processed
-		 * and the last completed hasn't changed since the last watchdog
-		 *      if they haven't hit the pending level
-		 *          issue the pending to push them through
-		 *      else
-		 *          try resetting the channel
-		 */
-		} else if (ioat_chan->device->version == IOAT_VER_2_0
-		    && ioat_chan->used_desc.prev
-		    && ioat_chan->last_completion
-		    && ioat_chan->last_completion == ioat_chan->watchdog_completion) {
-
-			if (ioat_chan->pending < ioat_pending_level)
-				ioat2_dma_memcpy_issue_pending(&ioat_chan->common);
-			else {
-				ioat_dma_reset_channel(ioat_chan);
-				ioat_chan->watchdog_completion = 0;
-			}
-		} else {
-			ioat_chan->last_compl_desc_addr_hw = 0;
-			ioat_chan->watchdog_completion
-					= ioat_chan->last_completion;
-		}
-
-		ioat_chan->watchdog_last_tcp_cookie =
-			ioat_chan->watchdog_tcp_cookie;
-	}
-
-	schedule_delayed_work(&device->work, WATCHDOG_DELAY);
-}
-
-static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
-{
-	struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
-	struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
-	struct ioat_desc_sw *prev, *new;
-	struct ioat_dma_descriptor *hw;
-	dma_cookie_t cookie;
-	LIST_HEAD(new_chain);
-	u32 copy;
-	size_t len;
-	dma_addr_t src, dst;
-	unsigned long orig_flags;
-	unsigned int desc_count = 0;
-
-	/* src and dest and len are stored in the initial descriptor */
-	len = first->len;
-	src = first->src;
-	dst = first->dst;
-	orig_flags = first->async_tx.flags;
-	new = first;
-
-	spin_lock_bh(&ioat_chan->desc_lock);
-	prev = to_ioat_desc(ioat_chan->used_desc.prev);
-	prefetch(prev->hw);
-	do {
-		copy = min_t(size_t, len, ioat_chan->xfercap);
-
-		async_tx_ack(&new->async_tx);
-
-		hw = new->hw;
-		hw->size = copy;
-		hw->ctl = 0;
-		hw->src_addr = src;
-		hw->dst_addr = dst;
-		hw->next = 0;
-
-		/* chain together the physical address list for the HW */
-		wmb();
-		prev->hw->next = (u64) new->async_tx.phys;
-
-		len -= copy;
-		dst += copy;
-		src += copy;
-
-		list_add_tail(&new->node, &new_chain);
-		desc_count++;
-		prev = new;
-	} while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
-
-	if (!new) {
-		dev_err(&ioat_chan->device->pdev->dev,
-			"tx submit failed\n");
-		spin_unlock_bh(&ioat_chan->desc_lock);
-		return -ENOMEM;
-	}
-
-	hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
-	if (first->async_tx.callback) {
-		hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
-		if (first != new) {
-			/* move callback into to last desc */
-			new->async_tx.callback = first->async_tx.callback;
-			new->async_tx.callback_param
-					= first->async_tx.callback_param;
-			first->async_tx.callback = NULL;
-			first->async_tx.callback_param = NULL;
-		}
-	}
-
-	new->tx_cnt = desc_count;
-	new->async_tx.flags = orig_flags; /* client is in control of this ack */
-
-	/* store the original values for use in later cleanup */
-	if (new != first) {
-		new->src = first->src;
-		new->dst = first->dst;
-		new->len = first->len;
-	}
-
-	/* cookie incr and addition to used_list must be atomic */
-	cookie = ioat_chan->common.cookie;
-	cookie++;
-	if (cookie < 0)
-		cookie = 1;
-	ioat_chan->common.cookie = new->async_tx.cookie = cookie;
-
-	/* write address into NextDescriptor field of last desc in chain */
-	to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
-							first->async_tx.phys;
-	list_splice_tail(&new_chain, &ioat_chan->used_desc);
-
-	ioat_chan->dmacount += desc_count;
-	ioat_chan->pending += desc_count;
-	if (ioat_chan->pending >= ioat_pending_level)
-		__ioat1_dma_memcpy_issue_pending(ioat_chan);
-	spin_unlock_bh(&ioat_chan->desc_lock);
-
-	return cookie;
-}
-
-static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
-{
-	struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
-	struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
-	struct ioat_desc_sw *new;
-	struct ioat_dma_descriptor *hw;
-	dma_cookie_t cookie;
-	u32 copy;
-	size_t len;
-	dma_addr_t src, dst;
-	unsigned long orig_flags;
-	unsigned int desc_count = 0;
-
-	/* src and dest and len are stored in the initial descriptor */
-	len = first->len;
-	src = first->src;
-	dst = first->dst;
-	orig_flags = first->async_tx.flags;
-	new = first;
-
-	/*
-	 * ioat_chan->desc_lock is still in force in version 2 path
-	 * it gets unlocked at end of this function
-	 */
-	do {
-		copy = min_t(size_t, len, ioat_chan->xfercap);
-
-		async_tx_ack(&new->async_tx);
-
-		hw = new->hw;
-		hw->size = copy;
-		hw->ctl = 0;
-		hw->src_addr = src;
-		hw->dst_addr = dst;
-
-		len -= copy;
-		dst += copy;
-		src += copy;
-		desc_count++;
-	} while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
-
-	if (!new) {
-		dev_err(&ioat_chan->device->pdev->dev,
-			"tx submit failed\n");
-		spin_unlock_bh(&ioat_chan->desc_lock);
-		return -ENOMEM;
-	}
-
-	hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
-	if (first->async_tx.callback) {
-		hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
-		if (first != new) {
-			/* move callback into to last desc */
-			new->async_tx.callback = first->async_tx.callback;
-			new->async_tx.callback_param
-					= first->async_tx.callback_param;
-			first->async_tx.callback = NULL;
-			first->async_tx.callback_param = NULL;
-		}
-	}
-
-	new->tx_cnt = desc_count;
-	new->async_tx.flags = orig_flags; /* client is in control of this ack */
-
-	/* store the original values for use in later cleanup */
-	if (new != first) {
-		new->src = first->src;
-		new->dst = first->dst;
-		new->len = first->len;
-	}
-
-	/* cookie incr and addition to used_list must be atomic */
-	cookie = ioat_chan->common.cookie;
-	cookie++;
-	if (cookie < 0)
-		cookie = 1;
-	ioat_chan->common.cookie = new->async_tx.cookie = cookie;
-
-	ioat_chan->dmacount += desc_count;
-	ioat_chan->pending += desc_count;
-	if (ioat_chan->pending >= ioat_pending_level)
-		__ioat2_dma_memcpy_issue_pending(ioat_chan);
-	spin_unlock_bh(&ioat_chan->desc_lock);
-
-	return cookie;
-}
-
-/**
- * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
- * @ioat_chan: the channel supplying the memory pool for the descriptors
- * @flags: allocation flags
- */
-static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
-					struct ioat_dma_chan *ioat_chan,
-					gfp_t flags)
-{
-	struct ioat_dma_descriptor *desc;
-	struct ioat_desc_sw *desc_sw;
-	struct ioatdma_device *ioatdma_device;
-	dma_addr_t phys;
-
-	ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
-	desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
-	if (unlikely(!desc))
-		return NULL;
-
-	desc_sw = kzalloc(sizeof(*desc_sw), flags);
-	if (unlikely(!desc_sw)) {
-		pci_pool_free(ioatdma_device->dma_pool, desc, phys);
-		return NULL;
-	}
-
-	memset(desc, 0, sizeof(*desc));
-	dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
-	switch (ioat_chan->device->version) {
-	case IOAT_VER_1_2:
-		desc_sw->async_tx.tx_submit = ioat1_tx_submit;
-		break;
-	case IOAT_VER_2_0:
-	case IOAT_VER_3_0:
-		desc_sw->async_tx.tx_submit = ioat2_tx_submit;
-		break;
-	}
-
-	desc_sw->hw = desc;
-	desc_sw->async_tx.phys = phys;
-
-	return desc_sw;
-}
-
-static int ioat_initial_desc_count = 256;
-module_param(ioat_initial_desc_count, int, 0644);
-MODULE_PARM_DESC(ioat_initial_desc_count,
-		 "initial descriptors per channel (default: 256)");
-
-/**
- * ioat2_dma_massage_chan_desc - link the descriptors into a circle
- * @ioat_chan: the channel to be massaged
- */
-static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
-{
-	struct ioat_desc_sw *desc, *_desc;
-
-	/* setup used_desc */
-	ioat_chan->used_desc.next = ioat_chan->free_desc.next;
-	ioat_chan->used_desc.prev = NULL;
-
-	/* pull free_desc out of the circle so that every node is a hw
-	 * descriptor, but leave it pointing to the list
-	 */
-	ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next;
-	ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev;
-
-	/* circle link the hw descriptors */
-	desc = to_ioat_desc(ioat_chan->free_desc.next);
-	desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
-	list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) {
-		desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
-	}
-}
-
-/**
- * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
- * @chan: the channel to be filled out
- */
-static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
-{
-	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-	struct ioat_desc_sw *desc;
-	u16 chanctrl;
-	u32 chanerr;
-	int i;
-	LIST_HEAD(tmp_list);
-
-	/* have we already been set up? */
-	if (!list_empty(&ioat_chan->free_desc))
-		return ioat_chan->desccount;
-
-	/* Setup register to interrupt and write completion status on error */
-	chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
-		IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
-		IOAT_CHANCTRL_ERR_COMPLETION_EN;
-	writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
-
-	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
-	if (chanerr) {
-		dev_err(&ioat_chan->device->pdev->dev,
-			"CHANERR = %x, clearing\n", chanerr);
-		writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
-	}
-
-	/* Allocate descriptors */
-	for (i = 0; i < ioat_initial_desc_count; i++) {
-		desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
-		if (!desc) {
-			dev_err(&ioat_chan->device->pdev->dev,
-				"Only %d initial descriptors\n", i);
-			break;
-		}
-		list_add_tail(&desc->node, &tmp_list);
-	}
-	spin_lock_bh(&ioat_chan->desc_lock);
-	ioat_chan->desccount = i;
-	list_splice(&tmp_list, &ioat_chan->free_desc);
-	if (ioat_chan->device->version != IOAT_VER_1_2)
-		ioat2_dma_massage_chan_desc(ioat_chan);
-	spin_unlock_bh(&ioat_chan->desc_lock);
-
-	/* allocate a completion writeback area */
-	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
-	ioat_chan->completion_virt =
-		pci_pool_alloc(ioat_chan->device->completion_pool,
-			       GFP_KERNEL,
-			       &ioat_chan->completion_addr);
-	memset(ioat_chan->completion_virt, 0,
-	       sizeof(*ioat_chan->completion_virt));
-	writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
-	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
-	writel(((u64) ioat_chan->completion_addr) >> 32,
-	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
-
-	tasklet_enable(&ioat_chan->cleanup_task);
-	ioat_dma_start_null_desc(ioat_chan);  /* give chain to dma device */
-	return ioat_chan->desccount;
-}
-
-/**
- * ioat_dma_free_chan_resources - release all the descriptors
- * @chan: the channel to be cleaned
- */
-static void ioat_dma_free_chan_resources(struct dma_chan *chan)
-{
-	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-	struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
-	struct ioat_desc_sw *desc, *_desc;
-	int in_use_descs = 0;
-
-	/* Before freeing channel resources first check
-	 * if they have been previously allocated for this channel.
-	 */
-	if (ioat_chan->desccount == 0)
-		return;
-
-	tasklet_disable(&ioat_chan->cleanup_task);
-	ioat_dma_memcpy_cleanup(ioat_chan);
-
-	/* Delay 100ms after reset to allow internal DMA logic to quiesce
-	 * before removing DMA descriptor resources.
-	 */
-	writeb(IOAT_CHANCMD_RESET,
-	       ioat_chan->reg_base
-			+ IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
-	mdelay(100);
-
-	spin_lock_bh(&ioat_chan->desc_lock);
-	switch (ioat_chan->device->version) {
-	case IOAT_VER_1_2:
-		list_for_each_entry_safe(desc, _desc,
-					 &ioat_chan->used_desc, node) {
-			in_use_descs++;
-			list_del(&desc->node);
-			pci_pool_free(ioatdma_device->dma_pool, desc->hw,
-				      desc->async_tx.phys);
-			kfree(desc);
-		}
-		list_for_each_entry_safe(desc, _desc,
-					 &ioat_chan->free_desc, node) {
-			list_del(&desc->node);
-			pci_pool_free(ioatdma_device->dma_pool, desc->hw,
-				      desc->async_tx.phys);
-			kfree(desc);
-		}
-		break;
-	case IOAT_VER_2_0:
-	case IOAT_VER_3_0:
-		list_for_each_entry_safe(desc, _desc,
-					 ioat_chan->free_desc.next, node) {
-			list_del(&desc->node);
-			pci_pool_free(ioatdma_device->dma_pool, desc->hw,
-				      desc->async_tx.phys);
-			kfree(desc);
-		}
-		desc = to_ioat_desc(ioat_chan->free_desc.next);
-		pci_pool_free(ioatdma_device->dma_pool, desc->hw,
-			      desc->async_tx.phys);
-		kfree(desc);
-		INIT_LIST_HEAD(&ioat_chan->free_desc);
-		INIT_LIST_HEAD(&ioat_chan->used_desc);
-		break;
-	}
-	spin_unlock_bh(&ioat_chan->desc_lock);
-
-	pci_pool_free(ioatdma_device->completion_pool,
-		      ioat_chan->completion_virt,
-		      ioat_chan->completion_addr);
-
-	/* one is ok since we left it on there on purpose */
-	if (in_use_descs > 1)
-		dev_err(&ioat_chan->device->pdev->dev,
-			"Freeing %d in use descriptors!\n",
-			in_use_descs - 1);
-
-	ioat_chan->last_completion = ioat_chan->completion_addr = 0;
-	ioat_chan->pending = 0;
-	ioat_chan->dmacount = 0;
-	ioat_chan->desccount = 0;
-	ioat_chan->watchdog_completion = 0;
-	ioat_chan->last_compl_desc_addr_hw = 0;
-	ioat_chan->watchdog_tcp_cookie =
-		ioat_chan->watchdog_last_tcp_cookie = 0;
-}
-
-/**
- * ioat_dma_get_next_descriptor - return the next available descriptor
- * @ioat_chan: IOAT DMA channel handle
- *
- * Gets the next descriptor from the chain, and must be called with the
- * channel's desc_lock held.  Allocates more descriptors if the channel
- * has run out.
- */
-static struct ioat_desc_sw *
-ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
-{
-	struct ioat_desc_sw *new;
-
-	if (!list_empty(&ioat_chan->free_desc)) {
-		new = to_ioat_desc(ioat_chan->free_desc.next);
-		list_del(&new->node);
-	} else {
-		/* try to get another desc */
-		new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
-		if (!new) {
-			dev_err(&ioat_chan->device->pdev->dev,
-				"alloc failed\n");
-			return NULL;
-		}
-	}
-
-	prefetch(new->hw);
-	return new;
-}
-
-static struct ioat_desc_sw *
-ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
-{
-	struct ioat_desc_sw *new;
-
-	/*
-	 * used.prev points to where to start processing
-	 * used.next points to next free descriptor
-	 * if used.prev == NULL, there are none waiting to be processed
-	 * if used.next == used.prev.prev, there is only one free descriptor,
-	 *      and we need to use it to as a noop descriptor before
-	 *      linking in a new set of descriptors, since the device
-	 *      has probably already read the pointer to it
-	 */
-	if (ioat_chan->used_desc.prev &&
-	    ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
-
-		struct ioat_desc_sw *desc;
-		struct ioat_desc_sw *noop_desc;
-		int i;
-
-		/* set up the noop descriptor */
-		noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
-		/* set size to non-zero value (channel returns error when size is 0) */
-		noop_desc->hw->size = NULL_DESC_BUFFER_SIZE;
-		noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
-		noop_desc->hw->src_addr = 0;
-		noop_desc->hw->dst_addr = 0;
-
-		ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;
-		ioat_chan->pending++;
-		ioat_chan->dmacount++;
-
-		/* try to get a few more descriptors */
-		for (i = 16; i; i--) {
-			desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
-			if (!desc) {
-				dev_err(&ioat_chan->device->pdev->dev,
-					"alloc failed\n");
-				break;
-			}
-			list_add_tail(&desc->node, ioat_chan->used_desc.next);
-
-			desc->hw->next
-				= to_ioat_desc(desc->node.next)->async_tx.phys;
-			to_ioat_desc(desc->node.prev)->hw->next
-				= desc->async_tx.phys;
-			ioat_chan->desccount++;
-		}
-
-		ioat_chan->used_desc.next = noop_desc->node.next;
-	}
-	new = to_ioat_desc(ioat_chan->used_desc.next);
-	prefetch(new);
-	ioat_chan->used_desc.next = new->node.next;
-
-	if (ioat_chan->used_desc.prev == NULL)
-		ioat_chan->used_desc.prev = &new->node;
-
-	prefetch(new->hw);
-	return new;
-}
-
-static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
-						struct ioat_dma_chan *ioat_chan)
-{
-	if (!ioat_chan)
-		return NULL;
-
-	switch (ioat_chan->device->version) {
-	case IOAT_VER_1_2:
-		return ioat1_dma_get_next_descriptor(ioat_chan);
-	case IOAT_VER_2_0:
-	case IOAT_VER_3_0:
-		return ioat2_dma_get_next_descriptor(ioat_chan);
-	}
-	return NULL;
-}
-
-static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
-						struct dma_chan *chan,
-						dma_addr_t dma_dest,
-						dma_addr_t dma_src,
-						size_t len,
-						unsigned long flags)
-{
-	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-	struct ioat_desc_sw *new;
-
-	spin_lock_bh(&ioat_chan->desc_lock);
-	new = ioat_dma_get_next_descriptor(ioat_chan);
-	spin_unlock_bh(&ioat_chan->desc_lock);
-
-	if (new) {
-		new->len = len;
-		new->dst = dma_dest;
-		new->src = dma_src;
-		new->async_tx.flags = flags;
-		return &new->async_tx;
-	} else {
-		dev_err(&ioat_chan->device->pdev->dev,
-			"chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
-			chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
-		return NULL;
-	}
-}
-
-static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
-						struct dma_chan *chan,
-						dma_addr_t dma_dest,
-						dma_addr_t dma_src,
-						size_t len,
-						unsigned long flags)
-{
-	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-	struct ioat_desc_sw *new;
-
-	spin_lock_bh(&ioat_chan->desc_lock);
-	new = ioat2_dma_get_next_descriptor(ioat_chan);
-
-	/*
-	 * leave ioat_chan->desc_lock set in ioat 2 path
-	 * it will get unlocked at end of tx_submit
-	 */
-
-	if (new) {
-		new->len = len;
-		new->dst = dma_dest;
-		new->src = dma_src;
-		new->async_tx.flags = flags;
-		return &new->async_tx;
-	} else {
-		spin_unlock_bh(&ioat_chan->desc_lock);
-		dev_err(&ioat_chan->device->pdev->dev,
-			"chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
-			chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
-		return NULL;
-	}
-}
-
-static void ioat_dma_cleanup_tasklet(unsigned long data)
-{
-	struct ioat_dma_chan *chan = (void *)data;
-	ioat_dma_memcpy_cleanup(chan);
-	writew(IOAT_CHANCTRL_INT_DISABLE,
-	       chan->reg_base + IOAT_CHANCTRL_OFFSET);
-}
-
-static void
-ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
-{
-	if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
-		if (desc->async_tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
-			pci_unmap_single(ioat_chan->device->pdev,
-					 pci_unmap_addr(desc, dst),
-					 pci_unmap_len(desc, len),
-					 PCI_DMA_FROMDEVICE);
-		else
-			pci_unmap_page(ioat_chan->device->pdev,
-				       pci_unmap_addr(desc, dst),
-				       pci_unmap_len(desc, len),
-				       PCI_DMA_FROMDEVICE);
-	}
-
-	if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
-		if (desc->async_tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
-			pci_unmap_single(ioat_chan->device->pdev,
-					 pci_unmap_addr(desc, src),
-					 pci_unmap_len(desc, len),
-					 PCI_DMA_TODEVICE);
-		else
-			pci_unmap_page(ioat_chan->device->pdev,
-				       pci_unmap_addr(desc, src),
-				       pci_unmap_len(desc, len),
-				       PCI_DMA_TODEVICE);
-	}
-}
-
-/**
- * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
- * @chan: ioat channel to be cleaned up
- */
-static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
-{
-	unsigned long phys_complete;
-	struct ioat_desc_sw *desc, *_desc;
-	dma_cookie_t cookie = 0;
-	unsigned long desc_phys;
-	struct ioat_desc_sw *latest_desc;
-
-	prefetch(ioat_chan->completion_virt);
-
-	if (!spin_trylock_bh(&ioat_chan->cleanup_lock))
-		return;
-
-	/* The completion writeback can happen at any time,
-	   so reads by the driver need to be atomic operations
-	   The descriptor physical addresses are limited to 32-bits
-	   when the CPU can only do a 32-bit mov */
-
-#if (BITS_PER_LONG == 64)
-	phys_complete =
-		ioat_chan->completion_virt->full
-		& IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
-#else
-	phys_complete =
-		ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
-#endif
-
-	if ((ioat_chan->completion_virt->full
-		& IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
-				IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
-		dev_err(&ioat_chan->device->pdev->dev,
-			"Channel halted, chanerr = %x\n",
-			readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
-
-		/* TODO do something to salvage the situation */
-	}
-
-	if (phys_complete == ioat_chan->last_completion) {
-		spin_unlock_bh(&ioat_chan->cleanup_lock);
-		/*
-		 * perhaps we're stuck so hard that the watchdog can't go off?
-		 * try to catch it after 2 seconds
-		 */
-		if (ioat_chan->device->version != IOAT_VER_3_0) {
-			if (time_after(jiffies,
-				       ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
-				ioat_dma_chan_watchdog(&(ioat_chan->device->work.work));
-				ioat_chan->last_completion_time = jiffies;
-			}
-		}
-		return;
-	}
-	ioat_chan->last_completion_time = jiffies;
-
-	cookie = 0;
-	if (!spin_trylock_bh(&ioat_chan->desc_lock)) {
-		spin_unlock_bh(&ioat_chan->cleanup_lock);
-		return;
-	}
-
-	switch (ioat_chan->device->version) {
-	case IOAT_VER_1_2:
-		list_for_each_entry_safe(desc, _desc,
-					 &ioat_chan->used_desc, node) {
-
-			/*
-			 * Incoming DMA requests may use multiple descriptors,
-			 * due to exceeding xfercap, perhaps. If so, only the
-			 * last one will have a cookie, and require unmapping.
-			 */
-			if (desc->async_tx.cookie) {
-				cookie = desc->async_tx.cookie;
-				ioat_dma_unmap(ioat_chan, desc);
-				if (desc->async_tx.callback) {
-					desc->async_tx.callback(desc->async_tx.callback_param);
-					desc->async_tx.callback = NULL;
-				}
-			}
-
-			if (desc->async_tx.phys != phys_complete) {
-				/*
-				 * a completed entry, but not the last, so clean
-				 * up if the client is done with the descriptor
-				 */
-				if (async_tx_test_ack(&desc->async_tx)) {
-					list_move_tail(&desc->node,
-						       &ioat_chan->free_desc);
-				} else
-					desc->async_tx.cookie = 0;
-			} else {
-				/*
-				 * last used desc. Do not remove, so we can
-				 * append from it, but don't look at it next
-				 * time, either
-				 */
-				desc->async_tx.cookie = 0;
-
-				/* TODO check status bits? */
-				break;
-			}
-		}
-		break;
-	case IOAT_VER_2_0:
-	case IOAT_VER_3_0:
-		/* has some other thread has already cleaned up? */
-		if (ioat_chan->used_desc.prev == NULL)
-			break;
-
-		/* work backwards to find latest finished desc */
-		desc = to_ioat_desc(ioat_chan->used_desc.next);
-		latest_desc = NULL;
-		do {
-			desc = to_ioat_desc(desc->node.prev);
-			desc_phys = (unsigned long)desc->async_tx.phys
-				       & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
-			if (desc_phys == phys_complete) {
-				latest_desc = desc;
-				break;
-			}
-		} while (&desc->node != ioat_chan->used_desc.prev);
-
-		if (latest_desc != NULL) {
-
-			/* work forwards to clear finished descriptors */
-			for (desc = to_ioat_desc(ioat_chan->used_desc.prev);
-			     &desc->node != latest_desc->node.next &&
-			     &desc->node != ioat_chan->used_desc.next;
-			     desc = to_ioat_desc(desc->node.next)) {
-				if (desc->async_tx.cookie) {
-					cookie = desc->async_tx.cookie;
-					desc->async_tx.cookie = 0;
-					ioat_dma_unmap(ioat_chan, desc);
-					if (desc->async_tx.callback) {
-						desc->async_tx.callback(desc->async_tx.callback_param);
-						desc->async_tx.callback = NULL;
-					}
-				}
-			}
-
-			/* move used.prev up beyond those that are finished */
-			if (&desc->node == ioat_chan->used_desc.next)
-				ioat_chan->used_desc.prev = NULL;
-			else
-				ioat_chan->used_desc.prev = &desc->node;
-		}
-		break;
-	}
-
-	spin_unlock_bh(&ioat_chan->desc_lock);
-
-	ioat_chan->last_completion = phys_complete;
-	if (cookie != 0)
-		ioat_chan->completed_cookie = cookie;
-
-	spin_unlock_bh(&ioat_chan->cleanup_lock);
-}
-
-/**
- * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
- * @chan: IOAT DMA channel handle
- * @cookie: DMA transaction identifier
- * @done: if not %NULL, updated with last completed transaction
- * @used: if not %NULL, updated with last used transaction
- */
-static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
-					    dma_cookie_t cookie,
-					    dma_cookie_t *done,
-					    dma_cookie_t *used)
-{
-	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-	dma_cookie_t last_used;
-	dma_cookie_t last_complete;
-	enum dma_status ret;
-
-	last_used = chan->cookie;
-	last_complete = ioat_chan->completed_cookie;
-	ioat_chan->watchdog_tcp_cookie = cookie;
-
-	if (done)
-		*done = last_complete;
-	if (used)
-		*used = last_used;
-
-	ret = dma_async_is_complete(cookie, last_complete, last_used);
-	if (ret == DMA_SUCCESS)
-		return ret;
-
-	ioat_dma_memcpy_cleanup(ioat_chan);
-
-	last_used = chan->cookie;
-	last_complete = ioat_chan->completed_cookie;
-
-	if (done)
-		*done = last_complete;
-	if (used)
-		*used = last_used;
-
-	return dma_async_is_complete(cookie, last_complete, last_used);
-}
-
-static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
-{
-	struct ioat_desc_sw *desc;
-
-	spin_lock_bh(&ioat_chan->desc_lock);
-
-	desc = ioat_dma_get_next_descriptor(ioat_chan);
-
-	if (!desc) {
-		dev_err(&ioat_chan->device->pdev->dev,
-			"Unable to start null desc - get next desc failed\n");
-		spin_unlock_bh(&ioat_chan->desc_lock);
-		return;
-	}
-
-	desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
-				| IOAT_DMA_DESCRIPTOR_CTL_INT_GN
-				| IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
-	/* set size to non-zero value (channel returns error when size is 0) */
-	desc->hw->size = NULL_DESC_BUFFER_SIZE;
-	desc->hw->src_addr = 0;
-	desc->hw->dst_addr = 0;
-	async_tx_ack(&desc->async_tx);
-	switch (ioat_chan->device->version) {
-	case IOAT_VER_1_2:
-		desc->hw->next = 0;
-		list_add_tail(&desc->node, &ioat_chan->used_desc);
-
-		writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
-		       ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
-		writel(((u64) desc->async_tx.phys) >> 32,
-		       ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
-
-		writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
-			+ IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
-		break;
-	case IOAT_VER_2_0:
-	case IOAT_VER_3_0:
-		writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
-		       ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
-		writel(((u64) desc->async_tx.phys) >> 32,
-		       ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
-
-		ioat_chan->dmacount++;
-		__ioat2_dma_memcpy_issue_pending(ioat_chan);
-		break;
-	}
-	spin_unlock_bh(&ioat_chan->desc_lock);
-}
-
-/*
- * Perform a IOAT transaction to verify the HW works.
- */
-#define IOAT_TEST_SIZE 2000
-
-static void ioat_dma_test_callback(void *dma_async_param)
-{
-	struct completion *cmp = dma_async_param;
-
-	complete(cmp);
-}
-
-/**
- * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
- * @device: device to be tested
- */
-static int ioat_dma_self_test(struct ioatdma_device *device)
-{
-	int i;
-	u8 *src;
-	u8 *dest;
-	struct dma_chan *dma_chan;
-	struct dma_async_tx_descriptor *tx;
-	dma_addr_t dma_dest, dma_src;
-	dma_cookie_t cookie;
-	int err = 0;
-	struct completion cmp;
-	unsigned long tmo;
-	unsigned long flags;
-
-	src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
-	if (!src)
-		return -ENOMEM;
-	dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
-	if (!dest) {
-		kfree(src);
-		return -ENOMEM;
-	}
-
-	/* Fill in src buffer */
-	for (i = 0; i < IOAT_TEST_SIZE; i++)
-		src[i] = (u8)i;
-
-	/* Start copy, using first DMA channel */
-	dma_chan = container_of(device->common.channels.next,
-				struct dma_chan,
-				device_node);
-	if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
-		dev_err(&device->pdev->dev,
-			"selftest cannot allocate chan resource\n");
-		err = -ENODEV;
-		goto out;
-	}
-
-	dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
-				 DMA_TO_DEVICE);
-	dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
-				  DMA_FROM_DEVICE);
-	flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE;
-	tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
-						   IOAT_TEST_SIZE, flags);
-	if (!tx) {
-		dev_err(&device->pdev->dev,
-			"Self-test prep failed, disabling\n");
-		err = -ENODEV;
-		goto free_resources;
-	}
-
-	async_tx_ack(tx);
-	init_completion(&cmp);
-	tx->callback = ioat_dma_test_callback;
-	tx->callback_param = &cmp;
-	cookie = tx->tx_submit(tx);
-	if (cookie < 0) {
-		dev_err(&device->pdev->dev,
-			"Self-test setup failed, disabling\n");
-		err = -ENODEV;
-		goto free_resources;
-	}
-	device->common.device_issue_pending(dma_chan);
-
-	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
-
-	if (tmo == 0 ||
-	    device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
-					!= DMA_SUCCESS) {
-		dev_err(&device->pdev->dev,
-			"Self-test copy timed out, disabling\n");
-		err = -ENODEV;
-		goto free_resources;
-	}
-	if (memcmp(src, dest, IOAT_TEST_SIZE)) {
-		dev_err(&device->pdev->dev,
-			"Self-test copy failed compare, disabling\n");
-		err = -ENODEV;
-		goto free_resources;
-	}
-
-free_resources:
-	device->common.device_free_chan_resources(dma_chan);
-out:
-	kfree(src);
-	kfree(dest);
-	return err;
-}
-
-static char ioat_interrupt_style[32] = "msix";
-module_param_string(ioat_interrupt_style, ioat_interrupt_style,
-		    sizeof(ioat_interrupt_style), 0644);
-MODULE_PARM_DESC(ioat_interrupt_style,
-		 "set ioat interrupt style: msix (default), "
-		 "msix-single-vector, msi, intx)");
-
-/**
- * ioat_dma_setup_interrupts - setup interrupt handler
- * @device: ioat device
- */
-static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
-{
-	struct ioat_dma_chan *ioat_chan;
-	int err, i, j, msixcnt;
-	u8 intrctrl = 0;
-
-	if (!strcmp(ioat_interrupt_style, "msix"))
-		goto msix;
-	if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
-		goto msix_single_vector;
-	if (!strcmp(ioat_interrupt_style, "msi"))
-		goto msi;
-	if (!strcmp(ioat_interrupt_style, "intx"))
-		goto intx;
-	dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n",
-		ioat_interrupt_style);
-	goto err_no_irq;
-
-msix:
-	/* The number of MSI-X vectors should equal the number of channels */
-	msixcnt = device->common.chancnt;
-	for (i = 0; i < msixcnt; i++)
-		device->msix_entries[i].entry = i;
-
-	err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt);
-	if (err < 0)
-		goto msi;
-	if (err > 0)
-		goto msix_single_vector;
-
-	for (i = 0; i < msixcnt; i++) {
-		ioat_chan = ioat_lookup_chan_by_index(device, i);
-		err = request_irq(device->msix_entries[i].vector,
-				  ioat_dma_do_interrupt_msix,
-				  0, "ioat-msix", ioat_chan);
-		if (err) {
-			for (j = 0; j < i; j++) {
-				ioat_chan =
-					ioat_lookup_chan_by_index(device, j);
-				free_irq(device->msix_entries[j].vector,
-					 ioat_chan);
-			}
-			goto msix_single_vector;
-		}
-	}
-	intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
-	device->irq_mode = msix_multi_vector;
-	goto done;
-
-msix_single_vector:
-	device->msix_entries[0].entry = 0;
-	err = pci_enable_msix(device->pdev, device->msix_entries, 1);
-	if (err)
-		goto msi;
-
-	err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt,
-			  0, "ioat-msix", device);
-	if (err) {
-		pci_disable_msix(device->pdev);
-		goto msi;
-	}
-	device->irq_mode = msix_single_vector;
-	goto done;
-
-msi:
-	err = pci_enable_msi(device->pdev);
-	if (err)
-		goto intx;
-
-	err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
-			  0, "ioat-msi", device);
-	if (err) {
-		pci_disable_msi(device->pdev);
-		goto intx;
-	}
-	/*
-	 * CB 1.2 devices need a bit set in configuration space to enable MSI
-	 */
-	if (device->version == IOAT_VER_1_2) {
-		u32 dmactrl;
-		pci_read_config_dword(device->pdev,
-				      IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
-		dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
-		pci_write_config_dword(device->pdev,
-				       IOAT_PCI_DMACTRL_OFFSET, dmactrl);
-	}
-	device->irq_mode = msi;
-	goto done;
-
-intx:
-	err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
-			  IRQF_SHARED, "ioat-intx", device);
-	if (err)
-		goto err_no_irq;
-	device->irq_mode = intx;
-
-done:
-	intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
-	writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
-	return 0;
-
-err_no_irq:
-	/* Disable all interrupt generation */
-	writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
-	dev_err(&device->pdev->dev, "no usable interrupts\n");
-	device->irq_mode = none;
-	return -1;
-}
-
-/**
- * ioat_dma_remove_interrupts - remove whatever interrupts were set
- * @device: ioat device
- */
-static void ioat_dma_remove_interrupts(struct ioatdma_device *device)
-{
-	struct ioat_dma_chan *ioat_chan;
-	int i;
-
-	/* Disable all interrupt generation */
-	writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
-
-	switch (device->irq_mode) {
-	case msix_multi_vector:
-		for (i = 0; i < device->common.chancnt; i++) {
-			ioat_chan = ioat_lookup_chan_by_index(device, i);
-			free_irq(device->msix_entries[i].vector, ioat_chan);
-		}
-		pci_disable_msix(device->pdev);
-		break;
-	case msix_single_vector:
-		free_irq(device->msix_entries[0].vector, device);
-		pci_disable_msix(device->pdev);
-		break;
-	case msi:
-		free_irq(device->pdev->irq, device);
-		pci_disable_msi(device->pdev);
-		break;
-	case intx:
-		free_irq(device->pdev->irq, device);
-		break;
-	case none:
-		dev_warn(&device->pdev->dev,
-			 "call to %s without interrupts setup\n", __func__);
-	}
-	device->irq_mode = none;
-}
-
-struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
-				      void __iomem *iobase)
-{
-	int err;
-	struct ioatdma_device *device;
-
-	device = kzalloc(sizeof(*device), GFP_KERNEL);
-	if (!device) {
-		err = -ENOMEM;
-		goto err_kzalloc;
-	}
-	device->pdev = pdev;
-	device->reg_base = iobase;
-	device->version = readb(device->reg_base + IOAT_VER_OFFSET);
-
-	/* DMA coherent memory pool for DMA descriptor allocations */
-	device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
-					   sizeof(struct ioat_dma_descriptor),
-					   64, 0);
-	if (!device->dma_pool) {
-		err = -ENOMEM;
-		goto err_dma_pool;
-	}
-
-	device->completion_pool = pci_pool_create("completion_pool", pdev,
-						  sizeof(u64), SMP_CACHE_BYTES,
-						  SMP_CACHE_BYTES);
-	if (!device->completion_pool) {
-		err = -ENOMEM;
-		goto err_completion_pool;
-	}
-
-	INIT_LIST_HEAD(&device->common.channels);
-	ioat_dma_enumerate_channels(device);
-
-	device->common.device_alloc_chan_resources =
-						ioat_dma_alloc_chan_resources;
-	device->common.device_free_chan_resources =
-						ioat_dma_free_chan_resources;
-	device->common.dev = &pdev->dev;
-
-	dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
-	device->common.device_is_tx_complete = ioat_dma_is_complete;
-	switch (device->version) {
-	case IOAT_VER_1_2:
-		device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
-		device->common.device_issue_pending =
-						ioat1_dma_memcpy_issue_pending;
-		break;
-	case IOAT_VER_2_0:
-	case IOAT_VER_3_0:
-		device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
-		device->common.device_issue_pending =
-						ioat2_dma_memcpy_issue_pending;
-		break;
-	}
-
-	dev_err(&device->pdev->dev,
-		"Intel(R) I/OAT DMA Engine found,"
-		" %d channels, device version 0x%02x, driver version %s\n",
-		device->common.chancnt, device->version, IOAT_DMA_VERSION);
-
-	if (!device->common.chancnt) {
-		dev_err(&device->pdev->dev,
-			"Intel(R) I/OAT DMA Engine problem found: "
-			"zero channels detected\n");
-		goto err_setup_interrupts;
-	}
-
-	err = ioat_dma_setup_interrupts(device);
-	if (err)
-		goto err_setup_interrupts;
-
-	err = ioat_dma_self_test(device);
-	if (err)
-		goto err_self_test;
-
-	ioat_set_tcp_copy_break(device);
-
-	dma_async_device_register(&device->common);
-
-	if (device->version != IOAT_VER_3_0) {
-		INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
-		schedule_delayed_work(&device->work,
-				      WATCHDOG_DELAY);
-	}
-
-	return device;
-
-err_self_test:
-	ioat_dma_remove_interrupts(device);
-err_setup_interrupts:
-	pci_pool_destroy(device->completion_pool);
-err_completion_pool:
-	pci_pool_destroy(device->dma_pool);
-err_dma_pool:
-	kfree(device);
-err_kzalloc:
-	dev_err(&pdev->dev,
-		"Intel(R) I/OAT DMA Engine initialization failed\n");
-	return NULL;
-}
-
-void ioat_dma_remove(struct ioatdma_device *device)
-{
-	struct dma_chan *chan, *_chan;
-	struct ioat_dma_chan *ioat_chan;
-
-	if (device->version != IOAT_VER_3_0)
-		cancel_delayed_work(&device->work);
-
-	ioat_dma_remove_interrupts(device);
-
-	dma_async_device_unregister(&device->common);
-
-	pci_pool_destroy(device->dma_pool);
-	pci_pool_destroy(device->completion_pool);
-
-	iounmap(device->reg_base);
-	pci_release_regions(device->pdev);
-	pci_disable_device(device->pdev);
-
-	list_for_each_entry_safe(chan, _chan,
-				 &device->common.channels, device_node) {
-		ioat_chan = to_ioat_chan(chan);
-		list_del(&chan->device_node);
-		kfree(ioat_chan);
-	}
-	kfree(device);
-}
-

+ 0 - 165
drivers/dma/ioatdma.h

@@ -1,165 +0,0 @@
-/*
- * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called COPYING.
- */
-#ifndef IOATDMA_H
-#define IOATDMA_H
-
-#include <linux/dmaengine.h>
-#include "ioatdma_hw.h"
-#include <linux/init.h>
-#include <linux/dmapool.h>
-#include <linux/cache.h>
-#include <linux/pci_ids.h>
-#include <net/tcp.h>
-
-#define IOAT_DMA_VERSION  "3.64"
-
-enum ioat_interrupt {
-	none = 0,
-	msix_multi_vector = 1,
-	msix_single_vector = 2,
-	msi = 3,
-	intx = 4,
-};
-
-#define IOAT_LOW_COMPLETION_MASK	0xffffffc0
-#define IOAT_DMA_DCA_ANY_CPU		~0
-#define IOAT_WATCHDOG_PERIOD		(2 * HZ)
-
-
-/**
- * struct ioatdma_device - internal representation of a IOAT device
- * @pdev: PCI-Express device
- * @reg_base: MMIO register space base address
- * @dma_pool: for allocating DMA descriptors
- * @common: embedded struct dma_device
- * @version: version of ioatdma device
- * @irq_mode: which style irq to use
- * @msix_entries: irq handlers
- * @idx: per channel data
- */
-
-struct ioatdma_device {
-	struct pci_dev *pdev;
-	void __iomem *reg_base;
-	struct pci_pool *dma_pool;
-	struct pci_pool *completion_pool;
-	struct dma_device common;
-	u8 version;
-	enum ioat_interrupt irq_mode;
-	struct delayed_work work;
-	struct msix_entry msix_entries[4];
-	struct ioat_dma_chan *idx[4];
-};
-
-/**
- * struct ioat_dma_chan - internal representation of a DMA channel
- */
-struct ioat_dma_chan {
-
-	void __iomem *reg_base;
-
-	dma_cookie_t completed_cookie;
-	unsigned long last_completion;
-	unsigned long last_completion_time;
-
-	size_t xfercap;	/* XFERCAP register value expanded out */
-
-	spinlock_t cleanup_lock;
-	spinlock_t desc_lock;
-	struct list_head free_desc;
-	struct list_head used_desc;
-	unsigned long watchdog_completion;
-	int watchdog_tcp_cookie;
-	u32 watchdog_last_tcp_cookie;
-	struct delayed_work work;
-
-	int pending;
-	int dmacount;
-	int desccount;
-
-	struct ioatdma_device *device;
-	struct dma_chan common;
-
-	dma_addr_t completion_addr;
-	union {
-		u64 full; /* HW completion writeback */
-		struct {
-			u32 low;
-			u32 high;
-		};
-	} *completion_virt;
-	unsigned long last_compl_desc_addr_hw;
-	struct tasklet_struct cleanup_task;
-};
-
-/* wrapper around hardware descriptor format + additional software fields */
-
-/**
- * struct ioat_desc_sw - wrapper around hardware descriptor
- * @hw: hardware DMA descriptor
- * @node: this descriptor will either be on the free list,
- *     or attached to a transaction list (async_tx.tx_list)
- * @tx_cnt: number of descriptors required to complete the transaction
- * @async_tx: the generic software descriptor for all engines
- */
-struct ioat_desc_sw {
-	struct ioat_dma_descriptor *hw;
-	struct list_head node;
-	int tx_cnt;
-	size_t len;
-	dma_addr_t src;
-	dma_addr_t dst;
-	struct dma_async_tx_descriptor async_tx;
-};
-
-static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev)
-{
-	#ifdef CONFIG_NET_DMA
-	switch (dev->version) {
-	case IOAT_VER_1_2:
-		sysctl_tcp_dma_copybreak = 4096;
-		break;
-	case IOAT_VER_2_0:
-		sysctl_tcp_dma_copybreak = 2048;
-		break;
-	case IOAT_VER_3_0:
-		sysctl_tcp_dma_copybreak = 262144;
-		break;
-	}
-	#endif
-}
-
-#if defined(CONFIG_INTEL_IOATDMA) || defined(CONFIG_INTEL_IOATDMA_MODULE)
-struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
-				      void __iomem *iobase);
-void ioat_dma_remove(struct ioatdma_device *device);
-struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-#else
-#define ioat_dma_probe(pdev, iobase)    NULL
-#define ioat_dma_remove(device)         do { } while (0)
-#define ioat_dca_init(pdev, iobase)	NULL
-#define ioat2_dca_init(pdev, iobase)	NULL
-#define ioat3_dca_init(pdev, iobase)	NULL
-#endif
-
-#endif /* IOATDMA_H */

+ 10 - 0
drivers/dma/iovlock.c

@@ -183,6 +183,11 @@ dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
 					iov_byte_offset,
 					kdata,
 					copy);
+			/* poll for a descriptor slot */
+			if (unlikely(dma_cookie < 0)) {
+				dma_async_issue_pending(chan);
+				continue;
+			}
 
 			len -= copy;
 			iov[iovec_idx].iov_len -= copy;
@@ -248,6 +253,11 @@ dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
 					page,
 					offset,
 					copy);
+			/* poll for a descriptor slot */
+			if (unlikely(dma_cookie < 0)) {
+				dma_async_issue_pending(chan);
+				continue;
+			}
 
 			len -= copy;
 			iov[iovec_idx].iov_len -= copy;

+ 10 - 10
drivers/idle/i7300_idle.c

@@ -29,8 +29,8 @@
 
 #include <asm/idle.h>
 
-#include "../dma/ioatdma_hw.h"
-#include "../dma/ioatdma_registers.h"
+#include "../dma/ioat/hw.h"
+#include "../dma/ioat/registers.h"
 
 #define I7300_IDLE_DRIVER_VERSION	"1.55"
 #define I7300_PRINT			"i7300_idle:"
@@ -126,9 +126,9 @@ static void i7300_idle_ioat_stop(void)
 		udelay(10);
 
 		sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
-			IOAT_CHANSTS_DMA_TRANSFER_STATUS;
+			IOAT_CHANSTS_STATUS;
 
-		if (sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE)
+		if (sts != IOAT_CHANSTS_ACTIVE)
 			break;
 
 	}
@@ -160,9 +160,9 @@ static int __init i7300_idle_ioat_selftest(u8 *ctl,
 	udelay(1000);
 
 	chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
-			IOAT_CHANSTS_DMA_TRANSFER_STATUS;
+			IOAT_CHANSTS_STATUS;
 
-	if (chan_sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE) {
+	if (chan_sts != IOAT_CHANSTS_DONE) {
 		/* Not complete, reset the channel */
 		writeb(IOAT_CHANCMD_RESET,
 		       ioat_chanbase + IOAT1_CHANCMD_OFFSET);
@@ -288,9 +288,9 @@ static void __exit i7300_idle_ioat_exit(void)
 		       ioat_chanbase + IOAT1_CHANCMD_OFFSET);
 
 		chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
-			IOAT_CHANSTS_DMA_TRANSFER_STATUS;
+			IOAT_CHANSTS_STATUS;
 
-		if (chan_sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) {
+		if (chan_sts != IOAT_CHANSTS_ACTIVE) {
 			writew(0, ioat_chanbase + IOAT_CHANCTRL_OFFSET);
 			break;
 		}
@@ -298,14 +298,14 @@ static void __exit i7300_idle_ioat_exit(void)
 	}
 
 	chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
-			IOAT_CHANSTS_DMA_TRANSFER_STATUS;
+			IOAT_CHANSTS_STATUS;
 
 	/*
 	 * We tried to reset multiple times. If IO A/T channel is still active
 	 * flag an error and return without cleanup. Memory leak is better
 	 * than random corruption in that extreme error situation.
 	 */
-	if (chan_sts == IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) {
+	if (chan_sts == IOAT_CHANSTS_ACTIVE) {
 		printk(KERN_ERR I7300_PRINT "Unable to stop IO A/T channels."
 			" Not freeing resources\n");
 		return;