2  * Intel I/OAT DMA Linux driver
 
   3  * Copyright(c) 2004 - 2007 Intel Corporation.
 
   5  * This program is free software; you can redistribute it and/or modify it
 
   6  * under the terms and conditions of the GNU General Public License,
 
   7  * version 2, as published by the Free Software Foundation.
 
   9  * This program is distributed in the hope that it will be useful, but WITHOUT
 
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 
  14  * You should have received a copy of the GNU General Public License along with
 
  15  * this program; if not, write to the Free Software Foundation, Inc.,
 
  16  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 
  18  * The full GNU General Public License is included in this distribution in
 
  19  * the file called "COPYING".
 
  24  * This driver supports an Intel I/OAT DMA engine, which does asynchronous
 
  28 #include <linux/init.h>
 
  29 #include <linux/module.h>
 
  30 #include <linux/pci.h>
 
  31 #include <linux/interrupt.h>
 
  32 #include <linux/dmaengine.h>
 
  33 #include <linux/delay.h>
 
  34 #include <linux/dma-mapping.h>
 
  36 #include "ioatdma_registers.h"
 
  37 #include "ioatdma_hw.h"
 
  39 #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
 
  40 #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
 
  41 #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
 
  42 #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
 
  44 static int ioat_pending_level = 4;
 
  45 module_param(ioat_pending_level, int, 0644);
 
  46 MODULE_PARM_DESC(ioat_pending_level,
 
  47                  "high-water mark for pushing ioat descriptors (default: 4)");
 
  49 /* internal functions */
 
  50 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
 
  51 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
 
  53 static struct ioat_desc_sw *
 
  54 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
 
  55 static struct ioat_desc_sw *
 
  56 ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
 
  58 static inline struct ioat_dma_chan *ioat_lookup_chan_by_index(
 
  59                                                 struct ioatdma_device *device,
 
  62         return device->idx[index];
 
  66  * ioat_dma_do_interrupt - handler used for single vector interrupt mode
 
  68  * @data: interrupt data
 
  70 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
 
  72         struct ioatdma_device *instance = data;
 
  73         struct ioat_dma_chan *ioat_chan;
 
  74         unsigned long attnstatus;
 
  78         intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
 
  80         if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
 
  83         if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
 
  84                 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
 
  88         attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
 
  89         for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
 
  90                 ioat_chan = ioat_lookup_chan_by_index(instance, bit);
 
  91                 tasklet_schedule(&ioat_chan->cleanup_task);
 
  94         writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
 
  99  * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
 
 101  * @data: interrupt data
 
 103 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
 
 105         struct ioat_dma_chan *ioat_chan = data;
 
 107         tasklet_schedule(&ioat_chan->cleanup_task);
 
 112 static void ioat_dma_cleanup_tasklet(unsigned long data);
 
 115  * ioat_dma_enumerate_channels - find and initialize the device's channels
 
 116  * @device: the device to be enumerated
 
 118 static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
 
 123         struct ioat_dma_chan *ioat_chan;
 
 125         device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
 
 126         xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
 
 127         xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
 
 129         for (i = 0; i < device->common.chancnt; i++) {
 
 130                 ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
 
 132                         device->common.chancnt = i;
 
 136                 ioat_chan->device = device;
 
 137                 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
 
 138                 ioat_chan->xfercap = xfercap;
 
 139                 ioat_chan->desccount = 0;
 
 140                 if (ioat_chan->device->version != IOAT_VER_1_2) {
 
 141                         writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
 
 142                                         | IOAT_DMA_DCA_ANY_CPU,
 
 143                                 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
 
 145                 spin_lock_init(&ioat_chan->cleanup_lock);
 
 146                 spin_lock_init(&ioat_chan->desc_lock);
 
 147                 INIT_LIST_HEAD(&ioat_chan->free_desc);
 
 148                 INIT_LIST_HEAD(&ioat_chan->used_desc);
 
 149                 /* This should be made common somewhere in dmaengine.c */
 
 150                 ioat_chan->common.device = &device->common;
 
 151                 list_add_tail(&ioat_chan->common.device_node,
 
 152                               &device->common.channels);
 
 153                 device->idx[i] = ioat_chan;
 
 154                 tasklet_init(&ioat_chan->cleanup_task,
 
 155                              ioat_dma_cleanup_tasklet,
 
 156                              (unsigned long) ioat_chan);
 
 157                 tasklet_disable(&ioat_chan->cleanup_task);
 
 159         return device->common.chancnt;
 
 162 static void ioat_set_src(dma_addr_t addr,
 
 163                          struct dma_async_tx_descriptor *tx,
 
 166         tx_to_ioat_desc(tx)->src = addr;
 
 169 static void ioat_set_dest(dma_addr_t addr,
 
 170                           struct dma_async_tx_descriptor *tx,
 
 173         tx_to_ioat_desc(tx)->dst = addr;
 
 177  * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
 
 179  * @chan: DMA channel handle
 
 181 static inline void __ioat1_dma_memcpy_issue_pending(
 
 182                                                 struct ioat_dma_chan *ioat_chan)
 
 184         ioat_chan->pending = 0;
 
 185         writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
 
 188 static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
 
 190         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
 
 192         if (ioat_chan->pending != 0) {
 
 193                 spin_lock_bh(&ioat_chan->desc_lock);
 
 194                 __ioat1_dma_memcpy_issue_pending(ioat_chan);
 
 195                 spin_unlock_bh(&ioat_chan->desc_lock);
 
 199 static inline void __ioat2_dma_memcpy_issue_pending(
 
 200                                                 struct ioat_dma_chan *ioat_chan)
 
 202         ioat_chan->pending = 0;
 
 203         writew(ioat_chan->dmacount,
 
 204                ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
 
 207 static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
 
 209         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
 
 211         if (ioat_chan->pending != 0) {
 
 212                 spin_lock_bh(&ioat_chan->desc_lock);
 
 213                 __ioat2_dma_memcpy_issue_pending(ioat_chan);
 
 214                 spin_unlock_bh(&ioat_chan->desc_lock);
 
 218 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
 
 220         struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
 
 221         struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
 
 222         struct ioat_desc_sw *prev, *new;
 
 223         struct ioat_dma_descriptor *hw;
 
 225         LIST_HEAD(new_chain);
 
 230         unsigned int desc_count = 0;
 
 232         /* src and dest and len are stored in the initial descriptor */
 
 236         orig_ack = first->async_tx.ack;
 
 239         spin_lock_bh(&ioat_chan->desc_lock);
 
 240         prev = to_ioat_desc(ioat_chan->used_desc.prev);
 
 243                 copy = min_t(size_t, len, ioat_chan->xfercap);
 
 245                 new->async_tx.ack = 1;
 
 254                 /* chain together the physical address list for the HW */
 
 256                 prev->hw->next = (u64) new->async_tx.phys;
 
 262                 list_add_tail(&new->node, &new_chain);
 
 265         } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
 
 267         hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
 
 268         if (new->async_tx.callback) {
 
 269                 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
 
 271                         /* move callback into to last desc */
 
 272                         new->async_tx.callback = first->async_tx.callback;
 
 273                         new->async_tx.callback_param
 
 274                                         = first->async_tx.callback_param;
 
 275                         first->async_tx.callback = NULL;
 
 276                         first->async_tx.callback_param = NULL;
 
 280         new->tx_cnt = desc_count;
 
 281         new->async_tx.ack = orig_ack; /* client is in control of this ack */
 
 283         /* store the original values for use in later cleanup */
 
 285                 new->src = first->src;
 
 286                 new->dst = first->dst;
 
 287                 new->len = first->len;
 
 290         /* cookie incr and addition to used_list must be atomic */
 
 291         cookie = ioat_chan->common.cookie;
 
 295         ioat_chan->common.cookie = new->async_tx.cookie = cookie;
 
 297         /* write address into NextDescriptor field of last desc in chain */
 
 298         to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
 
 299                                                         first->async_tx.phys;
 
 300         __list_splice(&new_chain, ioat_chan->used_desc.prev);
 
 302         ioat_chan->dmacount += desc_count;
 
 303         ioat_chan->pending += desc_count;
 
 304         if (ioat_chan->pending >= ioat_pending_level)
 
 305                 __ioat1_dma_memcpy_issue_pending(ioat_chan);
 
 306         spin_unlock_bh(&ioat_chan->desc_lock);
 
 311 static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
 
 313         struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
 
 314         struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
 
 315         struct ioat_desc_sw *new;
 
 316         struct ioat_dma_descriptor *hw;
 
 322         unsigned int desc_count = 0;
 
 324         /* src and dest and len are stored in the initial descriptor */
 
 328         orig_ack = first->async_tx.ack;
 
 332          * ioat_chan->desc_lock is still in force in version 2 path
 
 333          * it gets unlocked at end of this function
 
 336                 copy = min_t(size_t, len, ioat_chan->xfercap);
 
 338                 new->async_tx.ack = 1;
 
 350         } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
 
 352         hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
 
 353         if (new->async_tx.callback) {
 
 354                 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
 
 356                         /* move callback into to last desc */
 
 357                         new->async_tx.callback = first->async_tx.callback;
 
 358                         new->async_tx.callback_param
 
 359                                         = first->async_tx.callback_param;
 
 360                         first->async_tx.callback = NULL;
 
 361                         first->async_tx.callback_param = NULL;
 
 365         new->tx_cnt = desc_count;
 
 366         new->async_tx.ack = orig_ack; /* client is in control of this ack */
 
 368         /* store the original values for use in later cleanup */
 
 370                 new->src = first->src;
 
 371                 new->dst = first->dst;
 
 372                 new->len = first->len;
 
 375         /* cookie incr and addition to used_list must be atomic */
 
 376         cookie = ioat_chan->common.cookie;
 
 380         ioat_chan->common.cookie = new->async_tx.cookie = cookie;
 
 382         ioat_chan->dmacount += desc_count;
 
 383         ioat_chan->pending += desc_count;
 
 384         if (ioat_chan->pending >= ioat_pending_level)
 
 385                 __ioat2_dma_memcpy_issue_pending(ioat_chan);
 
 386         spin_unlock_bh(&ioat_chan->desc_lock);
 
 392  * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
 
 393  * @ioat_chan: the channel supplying the memory pool for the descriptors
 
 394  * @flags: allocation flags
 
 396 static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
 
 397                                         struct ioat_dma_chan *ioat_chan,
 
 400         struct ioat_dma_descriptor *desc;
 
 401         struct ioat_desc_sw *desc_sw;
 
 402         struct ioatdma_device *ioatdma_device;
 
 405         ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
 
 406         desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
 
 410         desc_sw = kzalloc(sizeof(*desc_sw), flags);
 
 411         if (unlikely(!desc_sw)) {
 
 412                 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
 
 416         memset(desc, 0, sizeof(*desc));
 
 417         dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
 
 418         desc_sw->async_tx.tx_set_src = ioat_set_src;
 
 419         desc_sw->async_tx.tx_set_dest = ioat_set_dest;
 
 420         switch (ioat_chan->device->version) {
 
 422                 desc_sw->async_tx.tx_submit = ioat1_tx_submit;
 
 425                 desc_sw->async_tx.tx_submit = ioat2_tx_submit;
 
 428         INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
 
 431         desc_sw->async_tx.phys = phys;
 
 436 static int ioat_initial_desc_count = 256;
 
 437 module_param(ioat_initial_desc_count, int, 0644);
 
 438 MODULE_PARM_DESC(ioat_initial_desc_count,
 
 439                  "initial descriptors per channel (default: 256)");
 
 442  * ioat2_dma_massage_chan_desc - link the descriptors into a circle
 
 443  * @ioat_chan: the channel to be massaged
 
 445 static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
 
 447         struct ioat_desc_sw *desc, *_desc;
 
 449         /* setup used_desc */
 
 450         ioat_chan->used_desc.next = ioat_chan->free_desc.next;
 
 451         ioat_chan->used_desc.prev = NULL;
 
 453         /* pull free_desc out of the circle so that every node is a hw
 
 454          * descriptor, but leave it pointing to the list
 
 456         ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next;
 
 457         ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev;
 
 459         /* circle link the hw descriptors */
 
 460         desc = to_ioat_desc(ioat_chan->free_desc.next);
 
 461         desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
 
 462         list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) {
 
 463                 desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
 
 468  * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
 
 469  * @chan: the channel to be filled out
 
 471 static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
 
 473         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
 
 474         struct ioat_desc_sw *desc;
 
 480         /* have we already been set up? */
 
 481         if (!list_empty(&ioat_chan->free_desc))
 
 482                 return ioat_chan->desccount;
 
 484         /* Setup register to interrupt and write completion status on error */
 
 485         chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
 
 486                 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
 
 487                 IOAT_CHANCTRL_ERR_COMPLETION_EN;
 
 488         writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
 
 490         chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
 
 492                 dev_err(&ioat_chan->device->pdev->dev,
 
 493                         "CHANERR = %x, clearing\n", chanerr);
 
 494                 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
 
 497         /* Allocate descriptors */
 
 498         for (i = 0; i < ioat_initial_desc_count; i++) {
 
 499                 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
 
 501                         dev_err(&ioat_chan->device->pdev->dev,
 
 502                                 "Only %d initial descriptors\n", i);
 
 505                 list_add_tail(&desc->node, &tmp_list);
 
 507         spin_lock_bh(&ioat_chan->desc_lock);
 
 508         ioat_chan->desccount = i;
 
 509         list_splice(&tmp_list, &ioat_chan->free_desc);
 
 510         if (ioat_chan->device->version != IOAT_VER_1_2)
 
 511                 ioat2_dma_massage_chan_desc(ioat_chan);
 
 512         spin_unlock_bh(&ioat_chan->desc_lock);
 
 514         /* allocate a completion writeback area */
 
 515         /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
 
 516         ioat_chan->completion_virt =
 
 517                 pci_pool_alloc(ioat_chan->device->completion_pool,
 
 519                                &ioat_chan->completion_addr);
 
 520         memset(ioat_chan->completion_virt, 0,
 
 521                sizeof(*ioat_chan->completion_virt));
 
 522         writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
 
 523                ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
 
 524         writel(((u64) ioat_chan->completion_addr) >> 32,
 
 525                ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
 
 527         tasklet_enable(&ioat_chan->cleanup_task);
 
 528         ioat_dma_start_null_desc(ioat_chan);  /* give chain to dma device */
 
 529         return ioat_chan->desccount;
 
 533  * ioat_dma_free_chan_resources - release all the descriptors
 
 534  * @chan: the channel to be cleaned
 
 536 static void ioat_dma_free_chan_resources(struct dma_chan *chan)
 
 538         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
 
 539         struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
 
 540         struct ioat_desc_sw *desc, *_desc;
 
 541         int in_use_descs = 0;
 
 543         tasklet_disable(&ioat_chan->cleanup_task);
 
 544         ioat_dma_memcpy_cleanup(ioat_chan);
 
 546         /* Delay 100ms after reset to allow internal DMA logic to quiesce
 
 547          * before removing DMA descriptor resources.
 
 549         writeb(IOAT_CHANCMD_RESET,
 
 551                         + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
 
 554         spin_lock_bh(&ioat_chan->desc_lock);
 
 555         switch (ioat_chan->device->version) {
 
 557                 list_for_each_entry_safe(desc, _desc,
 
 558                                          &ioat_chan->used_desc, node) {
 
 560                         list_del(&desc->node);
 
 561                         pci_pool_free(ioatdma_device->dma_pool, desc->hw,
 
 562                                       desc->async_tx.phys);
 
 565                 list_for_each_entry_safe(desc, _desc,
 
 566                                          &ioat_chan->free_desc, node) {
 
 567                         list_del(&desc->node);
 
 568                         pci_pool_free(ioatdma_device->dma_pool, desc->hw,
 
 569                                       desc->async_tx.phys);
 
 574                 list_for_each_entry_safe(desc, _desc,
 
 575                                          ioat_chan->free_desc.next, node) {
 
 576                         list_del(&desc->node);
 
 577                         pci_pool_free(ioatdma_device->dma_pool, desc->hw,
 
 578                                       desc->async_tx.phys);
 
 581                 desc = to_ioat_desc(ioat_chan->free_desc.next);
 
 582                 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
 
 583                               desc->async_tx.phys);
 
 585                 INIT_LIST_HEAD(&ioat_chan->free_desc);
 
 586                 INIT_LIST_HEAD(&ioat_chan->used_desc);
 
 589         spin_unlock_bh(&ioat_chan->desc_lock);
 
 591         pci_pool_free(ioatdma_device->completion_pool,
 
 592                       ioat_chan->completion_virt,
 
 593                       ioat_chan->completion_addr);
 
 595         /* one is ok since we left it on there on purpose */
 
 596         if (in_use_descs > 1)
 
 597                 dev_err(&ioat_chan->device->pdev->dev,
 
 598                         "Freeing %d in use descriptors!\n",
 
 601         ioat_chan->last_completion = ioat_chan->completion_addr = 0;
 
 602         ioat_chan->pending = 0;
 
 603         ioat_chan->dmacount = 0;
 
 607  * ioat_dma_get_next_descriptor - return the next available descriptor
 
 608  * @ioat_chan: IOAT DMA channel handle
 
 610  * Gets the next descriptor from the chain, and must be called with the
 
 611  * channel's desc_lock held.  Allocates more descriptors if the channel
 
 614 static struct ioat_desc_sw *
 
 615 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
 
 617         struct ioat_desc_sw *new;
 
 619         if (!list_empty(&ioat_chan->free_desc)) {
 
 620                 new = to_ioat_desc(ioat_chan->free_desc.next);
 
 621                 list_del(&new->node);
 
 623                 /* try to get another desc */
 
 624                 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
 
 626                         dev_err(&ioat_chan->device->pdev->dev,
 
 636 static struct ioat_desc_sw *
 
 637 ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
 
 639         struct ioat_desc_sw *new;
 
 642          * used.prev points to where to start processing
 
 643          * used.next points to next free descriptor
 
 644          * if used.prev == NULL, there are none waiting to be processed
 
 645          * if used.next == used.prev.prev, there is only one free descriptor,
 
 646          *      and we need to use it to as a noop descriptor before
 
 647          *      linking in a new set of descriptors, since the device
 
 648          *      has probably already read the pointer to it
 
 650         if (ioat_chan->used_desc.prev &&
 
 651             ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
 
 653                 struct ioat_desc_sw *desc;
 
 654                 struct ioat_desc_sw *noop_desc;
 
 657                 /* set up the noop descriptor */
 
 658                 noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
 
 659                 noop_desc->hw->size = 0;
 
 660                 noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
 
 661                 noop_desc->hw->src_addr = 0;
 
 662                 noop_desc->hw->dst_addr = 0;
 
 664                 ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;
 
 665                 ioat_chan->pending++;
 
 666                 ioat_chan->dmacount++;
 
 668                 /* try to get a few more descriptors */
 
 669                 for (i = 16; i; i--) {
 
 670                         desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
 
 672                                 dev_err(&ioat_chan->device->pdev->dev,
 
 676                         list_add_tail(&desc->node, ioat_chan->used_desc.next);
 
 679                                 = to_ioat_desc(desc->node.next)->async_tx.phys;
 
 680                         to_ioat_desc(desc->node.prev)->hw->next
 
 681                                 = desc->async_tx.phys;
 
 682                         ioat_chan->desccount++;
 
 685                 ioat_chan->used_desc.next = noop_desc->node.next;
 
 687         new = to_ioat_desc(ioat_chan->used_desc.next);
 
 689         ioat_chan->used_desc.next = new->node.next;
 
 691         if (ioat_chan->used_desc.prev == NULL)
 
 692                 ioat_chan->used_desc.prev = &new->node;
 
 698 static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
 
 699                                                 struct ioat_dma_chan *ioat_chan)
 
 704         switch (ioat_chan->device->version) {
 
 706                 return ioat1_dma_get_next_descriptor(ioat_chan);
 
 709                 return ioat2_dma_get_next_descriptor(ioat_chan);
 
 715 static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
 
 716                                                 struct dma_chan *chan,
 
 720         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
 
 721         struct ioat_desc_sw *new;
 
 723         spin_lock_bh(&ioat_chan->desc_lock);
 
 724         new = ioat_dma_get_next_descriptor(ioat_chan);
 
 725         spin_unlock_bh(&ioat_chan->desc_lock);
 
 729                 return &new->async_tx;
 
 734 static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
 
 735                                                 struct dma_chan *chan,
 
 739         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
 
 740         struct ioat_desc_sw *new;
 
 742         spin_lock_bh(&ioat_chan->desc_lock);
 
 743         new = ioat2_dma_get_next_descriptor(ioat_chan);
 
 746          * leave ioat_chan->desc_lock set in ioat 2 path
 
 747          * it will get unlocked at end of tx_submit
 
 752                 return &new->async_tx;
 
 757 static void ioat_dma_cleanup_tasklet(unsigned long data)
 
 759         struct ioat_dma_chan *chan = (void *)data;
 
 760         ioat_dma_memcpy_cleanup(chan);
 
 761         writew(IOAT_CHANCTRL_INT_DISABLE,
 
 762                chan->reg_base + IOAT_CHANCTRL_OFFSET);
 
 766  * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
 
 767  * @chan: ioat channel to be cleaned up
 
 769 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
 
 771         unsigned long phys_complete;
 
 772         struct ioat_desc_sw *desc, *_desc;
 
 773         dma_cookie_t cookie = 0;
 
 774         unsigned long desc_phys;
 
 775         struct ioat_desc_sw *latest_desc;
 
 777         prefetch(ioat_chan->completion_virt);
 
 779         if (!spin_trylock_bh(&ioat_chan->cleanup_lock))
 
 782         /* The completion writeback can happen at any time,
 
 783            so reads by the driver need to be atomic operations
 
 784            The descriptor physical addresses are limited to 32-bits
 
 785            when the CPU can only do a 32-bit mov */
 
 787 #if (BITS_PER_LONG == 64)
 
 789                 ioat_chan->completion_virt->full
 
 790                 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
 
 793                 ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
 
 796         if ((ioat_chan->completion_virt->full
 
 797                 & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
 
 798                                 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
 
 799                 dev_err(&ioat_chan->device->pdev->dev,
 
 800                         "Channel halted, chanerr = %x\n",
 
 801                         readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
 
 803                 /* TODO do something to salvage the situation */
 
 806         if (phys_complete == ioat_chan->last_completion) {
 
 807                 spin_unlock_bh(&ioat_chan->cleanup_lock);
 
 812         spin_lock_bh(&ioat_chan->desc_lock);
 
 813         switch (ioat_chan->device->version) {
 
 815                 list_for_each_entry_safe(desc, _desc,
 
 816                                          &ioat_chan->used_desc, node) {
 
 819                          * Incoming DMA requests may use multiple descriptors,
 
 820                          * due to exceeding xfercap, perhaps. If so, only the
 
 821                          * last one will have a cookie, and require unmapping.
 
 823                         if (desc->async_tx.cookie) {
 
 824                                 cookie = desc->async_tx.cookie;
 
 827                                  * yes we are unmapping both _page and _single
 
 828                                  * alloc'd regions with unmap_page. Is this
 
 831                                 pci_unmap_page(ioat_chan->device->pdev,
 
 832                                                 pci_unmap_addr(desc, dst),
 
 833                                                 pci_unmap_len(desc, len),
 
 835                                 pci_unmap_page(ioat_chan->device->pdev,
 
 836                                                 pci_unmap_addr(desc, src),
 
 837                                                 pci_unmap_len(desc, len),
 
 840                                 if (desc->async_tx.callback) {
 
 841                                         desc->async_tx.callback(desc->async_tx.callback_param);
 
 842                                         desc->async_tx.callback = NULL;
 
 846                         if (desc->async_tx.phys != phys_complete) {
 
 848                                  * a completed entry, but not the last, so clean
 
 849                                  * up if the client is done with the descriptor
 
 851                                 if (desc->async_tx.ack) {
 
 852                                         list_del(&desc->node);
 
 853                                         list_add_tail(&desc->node,
 
 854                                                       &ioat_chan->free_desc);
 
 856                                         desc->async_tx.cookie = 0;
 
 859                                  * last used desc. Do not remove, so we can
 
 860                                  * append from it, but don't look at it next
 
 863                                 desc->async_tx.cookie = 0;
 
 865                                 /* TODO check status bits? */
 
 871                 /* has some other thread has already cleaned up? */
 
 872                 if (ioat_chan->used_desc.prev == NULL)
 
 875                 /* work backwards to find latest finished desc */
 
 876                 desc = to_ioat_desc(ioat_chan->used_desc.next);
 
 879                         desc = to_ioat_desc(desc->node.prev);
 
 880                         desc_phys = (unsigned long)desc->async_tx.phys
 
 881                                        & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
 
 882                         if (desc_phys == phys_complete) {
 
 886                 } while (&desc->node != ioat_chan->used_desc.prev);
 
 888                 if (latest_desc != NULL) {
 
 890                         /* work forwards to clear finished descriptors */
 
 891                         for (desc = to_ioat_desc(ioat_chan->used_desc.prev);
 
 892                              &desc->node != latest_desc->node.next &&
 
 893                              &desc->node != ioat_chan->used_desc.next;
 
 894                              desc = to_ioat_desc(desc->node.next)) {
 
 895                                 if (desc->async_tx.cookie) {
 
 896                                         cookie = desc->async_tx.cookie;
 
 897                                         desc->async_tx.cookie = 0;
 
 899                                         pci_unmap_page(ioat_chan->device->pdev,
 
 900                                                       pci_unmap_addr(desc, dst),
 
 901                                                       pci_unmap_len(desc, len),
 
 903                                         pci_unmap_page(ioat_chan->device->pdev,
 
 904                                                       pci_unmap_addr(desc, src),
 
 905                                                       pci_unmap_len(desc, len),
 
 908                                         if (desc->async_tx.callback) {
 
 909                                                 desc->async_tx.callback(desc->async_tx.callback_param);
 
 910                                                 desc->async_tx.callback = NULL;
 
 915                         /* move used.prev up beyond those that are finished */
 
 916                         if (&desc->node == ioat_chan->used_desc.next)
 
 917                                 ioat_chan->used_desc.prev = NULL;
 
 919                                 ioat_chan->used_desc.prev = &desc->node;
 
 924         spin_unlock_bh(&ioat_chan->desc_lock);
 
 926         ioat_chan->last_completion = phys_complete;
 
 928                 ioat_chan->completed_cookie = cookie;
 
 930         spin_unlock_bh(&ioat_chan->cleanup_lock);
 
 933 static void ioat_dma_dependency_added(struct dma_chan *chan)
 
 935         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
 
 936         spin_lock_bh(&ioat_chan->desc_lock);
 
 937         if (ioat_chan->pending == 0) {
 
 938                 spin_unlock_bh(&ioat_chan->desc_lock);
 
 939                 ioat_dma_memcpy_cleanup(ioat_chan);
 
 941                 spin_unlock_bh(&ioat_chan->desc_lock);
 
 945  * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
 
 946  * @chan: IOAT DMA channel handle
 
 947  * @cookie: DMA transaction identifier
 
 948  * @done: if not %NULL, updated with last completed transaction
 
 949  * @used: if not %NULL, updated with last used transaction
 
 951 static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
 
 956         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
 
 957         dma_cookie_t last_used;
 
 958         dma_cookie_t last_complete;
 
 961         last_used = chan->cookie;
 
 962         last_complete = ioat_chan->completed_cookie;
 
 965                 *done = last_complete;
 
 969         ret = dma_async_is_complete(cookie, last_complete, last_used);
 
 970         if (ret == DMA_SUCCESS)
 
 973         ioat_dma_memcpy_cleanup(ioat_chan);
 
 975         last_used = chan->cookie;
 
 976         last_complete = ioat_chan->completed_cookie;
 
 979                 *done = last_complete;
 
 983         return dma_async_is_complete(cookie, last_complete, last_used);
 
 986 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
 
 988         struct ioat_desc_sw *desc;
 
 990         spin_lock_bh(&ioat_chan->desc_lock);
 
 992         desc = ioat_dma_get_next_descriptor(ioat_chan);
 
 993         desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
 
 994                                 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
 
 995                                 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
 
 997         desc->hw->src_addr = 0;
 
 998         desc->hw->dst_addr = 0;
 
 999         desc->async_tx.ack = 1;
 
1000         switch (ioat_chan->device->version) {
 
1003                 list_add_tail(&desc->node, &ioat_chan->used_desc);
 
1005                 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
 
1006                        ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
 
1007                 writel(((u64) desc->async_tx.phys) >> 32,
 
1008                        ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
 
1010                 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
 
1011                         + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
 
1014                 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
 
1015                        ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
 
1016                 writel(((u64) desc->async_tx.phys) >> 32,
 
1017                        ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
 
1019                 ioat_chan->dmacount++;
 
1020                 __ioat2_dma_memcpy_issue_pending(ioat_chan);
 
1023         spin_unlock_bh(&ioat_chan->desc_lock);
 
1027  * Perform a IOAT transaction to verify the HW works.
 
1029 #define IOAT_TEST_SIZE 2000
 
1031 static void ioat_dma_test_callback(void *dma_async_param)
 
1033         printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n",
 
1038  * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
 
1039  * @device: device to be tested
 
1041 static int ioat_dma_self_test(struct ioatdma_device *device)
 
1046         struct dma_chan *dma_chan;
 
1047         struct dma_async_tx_descriptor *tx;
 
1049         dma_cookie_t cookie;
 
1052         src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
 
1055         dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
 
1061         /* Fill in src buffer */
 
1062         for (i = 0; i < IOAT_TEST_SIZE; i++)
 
1065         /* Start copy, using first DMA channel */
 
1066         dma_chan = container_of(device->common.channels.next,
 
1069         if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
 
1070                 dev_err(&device->pdev->dev,
 
1071                         "selftest cannot allocate chan resource\n");
 
1076         tx = device->common.device_prep_dma_memcpy(dma_chan, IOAT_TEST_SIZE, 0);
 
1078                 dev_err(&device->pdev->dev,
 
1079                         "Self-test prep failed, disabling\n");
 
1081                 goto free_resources;
 
1085         addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
 
1087         tx->tx_set_src(addr, tx, 0);
 
1088         addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
 
1090         tx->tx_set_dest(addr, tx, 0);
 
1091         tx->callback = ioat_dma_test_callback;
 
1092         tx->callback_param = (void *)0x8086;
 
1093         cookie = tx->tx_submit(tx);
 
1095                 dev_err(&device->pdev->dev,
 
1096                         "Self-test setup failed, disabling\n");
 
1098                 goto free_resources;
 
1100         device->common.device_issue_pending(dma_chan);
 
1103         if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
 
1105                 dev_err(&device->pdev->dev,
 
1106                         "Self-test copy timed out, disabling\n");
 
1108                 goto free_resources;
 
1110         if (memcmp(src, dest, IOAT_TEST_SIZE)) {
 
1111                 dev_err(&device->pdev->dev,
 
1112                         "Self-test copy failed compare, disabling\n");
 
1114                 goto free_resources;
 
1118         device->common.device_free_chan_resources(dma_chan);
 
1125 static char ioat_interrupt_style[32] = "msix";
 
1126 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
 
1127                     sizeof(ioat_interrupt_style), 0644);
 
1128 MODULE_PARM_DESC(ioat_interrupt_style,
 
1129                  "set ioat interrupt style: msix (default), "
 
1130                  "msix-single-vector, msi, intx)");
 
1133  * ioat_dma_setup_interrupts - setup interrupt handler
 
1134  * @device: ioat device
 
1136 static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
 
1138         struct ioat_dma_chan *ioat_chan;
 
1139         int err, i, j, msixcnt;
 
1142         if (!strcmp(ioat_interrupt_style, "msix"))
 
1144         if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
 
1145                 goto msix_single_vector;
 
1146         if (!strcmp(ioat_interrupt_style, "msi"))
 
1148         if (!strcmp(ioat_interrupt_style, "intx"))
 
1150         dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n",
 
1151                 ioat_interrupt_style);
 
1155         /* The number of MSI-X vectors should equal the number of channels */
 
1156         msixcnt = device->common.chancnt;
 
1157         for (i = 0; i < msixcnt; i++)
 
1158                 device->msix_entries[i].entry = i;
 
1160         err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt);
 
1164                 goto msix_single_vector;
 
1166         for (i = 0; i < msixcnt; i++) {
 
1167                 ioat_chan = ioat_lookup_chan_by_index(device, i);
 
1168                 err = request_irq(device->msix_entries[i].vector,
 
1169                                   ioat_dma_do_interrupt_msix,
 
1170                                   0, "ioat-msix", ioat_chan);
 
1172                         for (j = 0; j < i; j++) {
 
1174                                         ioat_lookup_chan_by_index(device, j);
 
1175                                 free_irq(device->msix_entries[j].vector,
 
1178                         goto msix_single_vector;
 
1181         intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
 
1182         device->irq_mode = msix_multi_vector;
 
1186         device->msix_entries[0].entry = 0;
 
1187         err = pci_enable_msix(device->pdev, device->msix_entries, 1);
 
1191         err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt,
 
1192                           0, "ioat-msix", device);
 
1194                 pci_disable_msix(device->pdev);
 
1197         device->irq_mode = msix_single_vector;
 
1201         err = pci_enable_msi(device->pdev);
 
1205         err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
 
1206                           0, "ioat-msi", device);
 
1208                 pci_disable_msi(device->pdev);
 
1212          * CB 1.2 devices need a bit set in configuration space to enable MSI
 
1214         if (device->version == IOAT_VER_1_2) {
 
1216                 pci_read_config_dword(device->pdev,
 
1217                                       IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
 
1218                 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
 
1219                 pci_write_config_dword(device->pdev,
 
1220                                        IOAT_PCI_DMACTRL_OFFSET, dmactrl);
 
1222         device->irq_mode = msi;
 
1226         err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
 
1227                           IRQF_SHARED, "ioat-intx", device);
 
1230         device->irq_mode = intx;
 
1233         intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
 
1234         writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
 
1238         /* Disable all interrupt generation */
 
1239         writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
 
1240         dev_err(&device->pdev->dev, "no usable interrupts\n");
 
1241         device->irq_mode = none;
 
1246  * ioat_dma_remove_interrupts - remove whatever interrupts were set
 
1247  * @device: ioat device
 
1249 static void ioat_dma_remove_interrupts(struct ioatdma_device *device)
 
1251         struct ioat_dma_chan *ioat_chan;
 
1254         /* Disable all interrupt generation */
 
1255         writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
 
1257         switch (device->irq_mode) {
 
1258         case msix_multi_vector:
 
1259                 for (i = 0; i < device->common.chancnt; i++) {
 
1260                         ioat_chan = ioat_lookup_chan_by_index(device, i);
 
1261                         free_irq(device->msix_entries[i].vector, ioat_chan);
 
1263                 pci_disable_msix(device->pdev);
 
1265         case msix_single_vector:
 
1266                 free_irq(device->msix_entries[0].vector, device);
 
1267                 pci_disable_msix(device->pdev);
 
1270                 free_irq(device->pdev->irq, device);
 
1271                 pci_disable_msi(device->pdev);
 
1274                 free_irq(device->pdev->irq, device);
 
1277                 dev_warn(&device->pdev->dev,
 
1278                          "call to %s without interrupts setup\n", __func__);
 
1280         device->irq_mode = none;
 
1283 struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
 
1284                                       void __iomem *iobase)
 
1287         struct ioatdma_device *device;
 
1289         device = kzalloc(sizeof(*device), GFP_KERNEL);
 
1294         device->pdev = pdev;
 
1295         device->reg_base = iobase;
 
1296         device->version = readb(device->reg_base + IOAT_VER_OFFSET);
 
1298         /* DMA coherent memory pool for DMA descriptor allocations */
 
1299         device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
 
1300                                            sizeof(struct ioat_dma_descriptor),
 
1302         if (!device->dma_pool) {
 
1307         device->completion_pool = pci_pool_create("completion_pool", pdev,
 
1308                                                   sizeof(u64), SMP_CACHE_BYTES,
 
1310         if (!device->completion_pool) {
 
1312                 goto err_completion_pool;
 
1315         INIT_LIST_HEAD(&device->common.channels);
 
1316         ioat_dma_enumerate_channels(device);
 
1318         device->common.device_alloc_chan_resources =
 
1319                                                 ioat_dma_alloc_chan_resources;
 
1320         device->common.device_free_chan_resources =
 
1321                                                 ioat_dma_free_chan_resources;
 
1322         device->common.dev = &pdev->dev;
 
1324         dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
 
1325         device->common.device_is_tx_complete = ioat_dma_is_complete;
 
1326         device->common.device_dependency_added = ioat_dma_dependency_added;
 
1327         switch (device->version) {
 
1329                 device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
 
1330                 device->common.device_issue_pending =
 
1331                                                 ioat1_dma_memcpy_issue_pending;
 
1334                 device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
 
1335                 device->common.device_issue_pending =
 
1336                                                 ioat2_dma_memcpy_issue_pending;
 
1340         dev_err(&device->pdev->dev,
 
1341                 "Intel(R) I/OAT DMA Engine found,"
 
1342                 " %d channels, device version 0x%02x, driver version %s\n",
 
1343                 device->common.chancnt, device->version, IOAT_DMA_VERSION);
 
1345         err = ioat_dma_setup_interrupts(device);
 
1347                 goto err_setup_interrupts;
 
1349         err = ioat_dma_self_test(device);
 
1353         dma_async_device_register(&device->common);
 
1358         ioat_dma_remove_interrupts(device);
 
1359 err_setup_interrupts:
 
1360         pci_pool_destroy(device->completion_pool);
 
1361 err_completion_pool:
 
1362         pci_pool_destroy(device->dma_pool);
 
1367                 "Intel(R) I/OAT DMA Engine initialization failed\n");
 
1371 void ioat_dma_remove(struct ioatdma_device *device)
 
1373         struct dma_chan *chan, *_chan;
 
1374         struct ioat_dma_chan *ioat_chan;
 
1376         ioat_dma_remove_interrupts(device);
 
1378         dma_async_device_unregister(&device->common);
 
1380         pci_pool_destroy(device->dma_pool);
 
1381         pci_pool_destroy(device->completion_pool);
 
1383         iounmap(device->reg_base);
 
1384         pci_release_regions(device->pdev);
 
1385         pci_disable_device(device->pdev);
 
1387         list_for_each_entry_safe(chan, _chan,
 
1388                                  &device->common.channels, device_node) {
 
1389                 ioat_chan = to_ioat_chan(chan);
 
1390                 list_del(&chan->device_node);