Merge branch 'pxa-fixes'
[linux-2.6] / drivers / dma / ioat_dma.c
1 /*
2  * Intel I/OAT DMA Linux driver
3  * Copyright(c) 2004 - 2007 Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  */
22
23 /*
24  * This driver supports an Intel I/OAT DMA engine, which does asynchronous
25  * copy operations.
26  */
27
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include "ioatdma.h"
36 #include "ioatdma_registers.h"
37 #include "ioatdma_hw.h"
38
39 #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
40 #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
41 #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
42 #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
43
44 static int ioat_pending_level = 4;
45 module_param(ioat_pending_level, int, 0644);
46 MODULE_PARM_DESC(ioat_pending_level,
47                  "high-water mark for pushing ioat descriptors (default: 4)");
48
49 /* internal functions */
50 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
51 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
52
53 static struct ioat_desc_sw *
54 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
55 static struct ioat_desc_sw *
56 ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
57
58 static inline struct ioat_dma_chan *ioat_lookup_chan_by_index(
59                                                 struct ioatdma_device *device,
60                                                 int index)
61 {
62         return device->idx[index];
63 }
64
65 /**
66  * ioat_dma_do_interrupt - handler used for single vector interrupt mode
67  * @irq: interrupt id
68  * @data: interrupt data
69  */
70 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
71 {
72         struct ioatdma_device *instance = data;
73         struct ioat_dma_chan *ioat_chan;
74         unsigned long attnstatus;
75         int bit;
76         u8 intrctrl;
77
78         intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
79
80         if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
81                 return IRQ_NONE;
82
83         if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
84                 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
85                 return IRQ_NONE;
86         }
87
88         attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
89         for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
90                 ioat_chan = ioat_lookup_chan_by_index(instance, bit);
91                 tasklet_schedule(&ioat_chan->cleanup_task);
92         }
93
94         writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
95         return IRQ_HANDLED;
96 }
97
98 /**
99  * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
100  * @irq: interrupt id
101  * @data: interrupt data
102  */
103 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
104 {
105         struct ioat_dma_chan *ioat_chan = data;
106
107         tasklet_schedule(&ioat_chan->cleanup_task);
108
109         return IRQ_HANDLED;
110 }
111
112 static void ioat_dma_cleanup_tasklet(unsigned long data);
113
114 /**
115  * ioat_dma_enumerate_channels - find and initialize the device's channels
116  * @device: the device to be enumerated
117  */
118 static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
119 {
120         u8 xfercap_scale;
121         u32 xfercap;
122         int i;
123         struct ioat_dma_chan *ioat_chan;
124
125         device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
126         xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
127         xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
128
129         for (i = 0; i < device->common.chancnt; i++) {
130                 ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
131                 if (!ioat_chan) {
132                         device->common.chancnt = i;
133                         break;
134                 }
135
136                 ioat_chan->device = device;
137                 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
138                 ioat_chan->xfercap = xfercap;
139                 ioat_chan->desccount = 0;
140                 if (ioat_chan->device->version != IOAT_VER_1_2) {
141                         writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
142                                         | IOAT_DMA_DCA_ANY_CPU,
143                                 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
144                 }
145                 spin_lock_init(&ioat_chan->cleanup_lock);
146                 spin_lock_init(&ioat_chan->desc_lock);
147                 INIT_LIST_HEAD(&ioat_chan->free_desc);
148                 INIT_LIST_HEAD(&ioat_chan->used_desc);
149                 /* This should be made common somewhere in dmaengine.c */
150                 ioat_chan->common.device = &device->common;
151                 list_add_tail(&ioat_chan->common.device_node,
152                               &device->common.channels);
153                 device->idx[i] = ioat_chan;
154                 tasklet_init(&ioat_chan->cleanup_task,
155                              ioat_dma_cleanup_tasklet,
156                              (unsigned long) ioat_chan);
157                 tasklet_disable(&ioat_chan->cleanup_task);
158         }
159         return device->common.chancnt;
160 }
161
162 static void ioat_set_src(dma_addr_t addr,
163                          struct dma_async_tx_descriptor *tx,
164                          int index)
165 {
166         tx_to_ioat_desc(tx)->src = addr;
167 }
168
169 static void ioat_set_dest(dma_addr_t addr,
170                           struct dma_async_tx_descriptor *tx,
171                           int index)
172 {
173         tx_to_ioat_desc(tx)->dst = addr;
174 }
175
176 static inline void __ioat1_dma_memcpy_issue_pending(
177                                                struct ioat_dma_chan *ioat_chan);
178 static inline void __ioat2_dma_memcpy_issue_pending(
179                                                struct ioat_dma_chan *ioat_chan);
180
181 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
182 {
183         struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
184         struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
185         struct ioat_desc_sw *prev, *new;
186         struct ioat_dma_descriptor *hw;
187         dma_cookie_t cookie;
188         LIST_HEAD(new_chain);
189         u32 copy;
190         size_t len;
191         dma_addr_t src, dst;
192         int orig_ack;
193         unsigned int desc_count = 0;
194
195         /* src and dest and len are stored in the initial descriptor */
196         len = first->len;
197         src = first->src;
198         dst = first->dst;
199         orig_ack = first->async_tx.ack;
200         new = first;
201
202         spin_lock_bh(&ioat_chan->desc_lock);
203         prev = to_ioat_desc(ioat_chan->used_desc.prev);
204         prefetch(prev->hw);
205         do {
206                 copy = min((u32) len, ioat_chan->xfercap);
207
208                 new->async_tx.ack = 1;
209
210                 hw = new->hw;
211                 hw->size = copy;
212                 hw->ctl = 0;
213                 hw->src_addr = src;
214                 hw->dst_addr = dst;
215                 hw->next = 0;
216
217                 /* chain together the physical address list for the HW */
218                 wmb();
219                 prev->hw->next = (u64) new->async_tx.phys;
220
221                 len -= copy;
222                 dst += copy;
223                 src += copy;
224
225                 list_add_tail(&new->node, &new_chain);
226                 desc_count++;
227                 prev = new;
228         } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
229
230         hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
231         if (new->async_tx.callback) {
232                 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
233                 if (first != new) {
234                         /* move callback into to last desc */
235                         new->async_tx.callback = first->async_tx.callback;
236                         new->async_tx.callback_param
237                                         = first->async_tx.callback_param;
238                         first->async_tx.callback = NULL;
239                         first->async_tx.callback_param = NULL;
240                 }
241         }
242
243         new->tx_cnt = desc_count;
244         new->async_tx.ack = orig_ack; /* client is in control of this ack */
245
246         /* store the original values for use in later cleanup */
247         if (new != first) {
248                 new->src = first->src;
249                 new->dst = first->dst;
250                 new->len = first->len;
251         }
252
253         /* cookie incr and addition to used_list must be atomic */
254         cookie = ioat_chan->common.cookie;
255         cookie++;
256         if (cookie < 0)
257                 cookie = 1;
258         ioat_chan->common.cookie = new->async_tx.cookie = cookie;
259
260         /* write address into NextDescriptor field of last desc in chain */
261         to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
262                                                         first->async_tx.phys;
263         __list_splice(&new_chain, ioat_chan->used_desc.prev);
264
265         ioat_chan->dmacount += desc_count;
266         ioat_chan->pending += desc_count;
267         if (ioat_chan->pending >= ioat_pending_level)
268                 __ioat1_dma_memcpy_issue_pending(ioat_chan);
269         spin_unlock_bh(&ioat_chan->desc_lock);
270
271         return cookie;
272 }
273
274 static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
275 {
276         struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
277         struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
278         struct ioat_desc_sw *new;
279         struct ioat_dma_descriptor *hw;
280         dma_cookie_t cookie;
281         u32 copy;
282         size_t len;
283         dma_addr_t src, dst;
284         int orig_ack;
285         unsigned int desc_count = 0;
286
287         /* src and dest and len are stored in the initial descriptor */
288         len = first->len;
289         src = first->src;
290         dst = first->dst;
291         orig_ack = first->async_tx.ack;
292         new = first;
293
294         /* ioat_chan->desc_lock is still in force in version 2 path */
295
296         do {
297                 copy = min((u32) len, ioat_chan->xfercap);
298
299                 new->async_tx.ack = 1;
300
301                 hw = new->hw;
302                 hw->size = copy;
303                 hw->ctl = 0;
304                 hw->src_addr = src;
305                 hw->dst_addr = dst;
306
307                 len -= copy;
308                 dst += copy;
309                 src += copy;
310                 desc_count++;
311         } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
312
313         hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
314         if (new->async_tx.callback) {
315                 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
316                 if (first != new) {
317                         /* move callback into to last desc */
318                         new->async_tx.callback = first->async_tx.callback;
319                         new->async_tx.callback_param
320                                         = first->async_tx.callback_param;
321                         first->async_tx.callback = NULL;
322                         first->async_tx.callback_param = NULL;
323                 }
324         }
325
326         new->tx_cnt = desc_count;
327         new->async_tx.ack = orig_ack; /* client is in control of this ack */
328
329         /* store the original values for use in later cleanup */
330         if (new != first) {
331                 new->src = first->src;
332                 new->dst = first->dst;
333                 new->len = first->len;
334         }
335
336         /* cookie incr and addition to used_list must be atomic */
337         cookie = ioat_chan->common.cookie;
338         cookie++;
339         if (cookie < 0)
340                 cookie = 1;
341         ioat_chan->common.cookie = new->async_tx.cookie = cookie;
342
343         ioat_chan->dmacount += desc_count;
344         ioat_chan->pending += desc_count;
345         if (ioat_chan->pending >= ioat_pending_level)
346                 __ioat2_dma_memcpy_issue_pending(ioat_chan);
347         spin_unlock_bh(&ioat_chan->desc_lock);
348
349         return cookie;
350 }
351
352 /**
353  * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
354  * @ioat_chan: the channel supplying the memory pool for the descriptors
355  * @flags: allocation flags
356  */
357 static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
358                                         struct ioat_dma_chan *ioat_chan,
359                                         gfp_t flags)
360 {
361         struct ioat_dma_descriptor *desc;
362         struct ioat_desc_sw *desc_sw;
363         struct ioatdma_device *ioatdma_device;
364         dma_addr_t phys;
365
366         ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
367         desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
368         if (unlikely(!desc))
369                 return NULL;
370
371         desc_sw = kzalloc(sizeof(*desc_sw), flags);
372         if (unlikely(!desc_sw)) {
373                 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
374                 return NULL;
375         }
376
377         memset(desc, 0, sizeof(*desc));
378         dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
379         desc_sw->async_tx.tx_set_src = ioat_set_src;
380         desc_sw->async_tx.tx_set_dest = ioat_set_dest;
381         switch (ioat_chan->device->version) {
382         case IOAT_VER_1_2:
383                 desc_sw->async_tx.tx_submit = ioat1_tx_submit;
384                 break;
385         case IOAT_VER_2_0:
386                 desc_sw->async_tx.tx_submit = ioat2_tx_submit;
387                 break;
388         }
389         INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
390
391         desc_sw->hw = desc;
392         desc_sw->async_tx.phys = phys;
393
394         return desc_sw;
395 }
396
397 static int ioat_initial_desc_count = 256;
398 module_param(ioat_initial_desc_count, int, 0644);
399 MODULE_PARM_DESC(ioat_initial_desc_count,
400                  "initial descriptors per channel (default: 256)");
401
402 /**
403  * ioat2_dma_massage_chan_desc - link the descriptors into a circle
404  * @ioat_chan: the channel to be massaged
405  */
406 static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
407 {
408         struct ioat_desc_sw *desc, *_desc;
409
410         /* setup used_desc */
411         ioat_chan->used_desc.next = ioat_chan->free_desc.next;
412         ioat_chan->used_desc.prev = NULL;
413
414         /* pull free_desc out of the circle so that every node is a hw
415          * descriptor, but leave it pointing to the list
416          */
417         ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next;
418         ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev;
419
420         /* circle link the hw descriptors */
421         desc = to_ioat_desc(ioat_chan->free_desc.next);
422         desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
423         list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) {
424                 desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
425         }
426 }
427
428 /**
429  * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
430  * @chan: the channel to be filled out
431  */
432 static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
433 {
434         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
435         struct ioat_desc_sw *desc = NULL;
436         u16 chanctrl;
437         u32 chanerr;
438         int i;
439         LIST_HEAD(tmp_list);
440
441         /* have we already been set up? */
442         if (!list_empty(&ioat_chan->free_desc))
443                 return ioat_chan->desccount;
444
445         /* Setup register to interrupt and write completion status on error */
446         chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
447                 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
448                 IOAT_CHANCTRL_ERR_COMPLETION_EN;
449         writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
450
451         chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
452         if (chanerr) {
453                 dev_err(&ioat_chan->device->pdev->dev,
454                         "CHANERR = %x, clearing\n", chanerr);
455                 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
456         }
457
458         /* Allocate descriptors */
459         for (i = 0; i < ioat_initial_desc_count; i++) {
460                 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
461                 if (!desc) {
462                         dev_err(&ioat_chan->device->pdev->dev,
463                                 "Only %d initial descriptors\n", i);
464                         break;
465                 }
466                 list_add_tail(&desc->node, &tmp_list);
467         }
468         spin_lock_bh(&ioat_chan->desc_lock);
469         ioat_chan->desccount = i;
470         list_splice(&tmp_list, &ioat_chan->free_desc);
471         if (ioat_chan->device->version != IOAT_VER_1_2)
472                 ioat2_dma_massage_chan_desc(ioat_chan);
473         spin_unlock_bh(&ioat_chan->desc_lock);
474
475         /* allocate a completion writeback area */
476         /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
477         ioat_chan->completion_virt =
478                 pci_pool_alloc(ioat_chan->device->completion_pool,
479                                GFP_KERNEL,
480                                &ioat_chan->completion_addr);
481         memset(ioat_chan->completion_virt, 0,
482                sizeof(*ioat_chan->completion_virt));
483         writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
484                ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
485         writel(((u64) ioat_chan->completion_addr) >> 32,
486                ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
487
488         tasklet_enable(&ioat_chan->cleanup_task);
489         ioat_dma_start_null_desc(ioat_chan);  /* give chain to dma device */
490         return ioat_chan->desccount;
491 }
492
493 /**
494  * ioat_dma_free_chan_resources - release all the descriptors
495  * @chan: the channel to be cleaned
496  */
497 static void ioat_dma_free_chan_resources(struct dma_chan *chan)
498 {
499         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
500         struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
501         struct ioat_desc_sw *desc, *_desc;
502         int in_use_descs = 0;
503
504         tasklet_disable(&ioat_chan->cleanup_task);
505         ioat_dma_memcpy_cleanup(ioat_chan);
506
507         /* Delay 100ms after reset to allow internal DMA logic to quiesce
508          * before removing DMA descriptor resources.
509          */
510         writeb(IOAT_CHANCMD_RESET,
511                ioat_chan->reg_base
512                         + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
513         mdelay(100);
514
515         spin_lock_bh(&ioat_chan->desc_lock);
516         switch (ioat_chan->device->version) {
517         case IOAT_VER_1_2:
518                 list_for_each_entry_safe(desc, _desc,
519                                          &ioat_chan->used_desc, node) {
520                         in_use_descs++;
521                         list_del(&desc->node);
522                         pci_pool_free(ioatdma_device->dma_pool, desc->hw,
523                                       desc->async_tx.phys);
524                         kfree(desc);
525                 }
526                 list_for_each_entry_safe(desc, _desc,
527                                          &ioat_chan->free_desc, node) {
528                         list_del(&desc->node);
529                         pci_pool_free(ioatdma_device->dma_pool, desc->hw,
530                                       desc->async_tx.phys);
531                         kfree(desc);
532                 }
533                 break;
534         case IOAT_VER_2_0:
535                 list_for_each_entry_safe(desc, _desc,
536                                          ioat_chan->free_desc.next, node) {
537                         list_del(&desc->node);
538                         pci_pool_free(ioatdma_device->dma_pool, desc->hw,
539                                       desc->async_tx.phys);
540                         kfree(desc);
541                 }
542                 desc = to_ioat_desc(ioat_chan->free_desc.next);
543                 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
544                               desc->async_tx.phys);
545                 kfree(desc);
546                 INIT_LIST_HEAD(&ioat_chan->free_desc);
547                 INIT_LIST_HEAD(&ioat_chan->used_desc);
548                 break;
549         }
550         spin_unlock_bh(&ioat_chan->desc_lock);
551
552         pci_pool_free(ioatdma_device->completion_pool,
553                       ioat_chan->completion_virt,
554                       ioat_chan->completion_addr);
555
556         /* one is ok since we left it on there on purpose */
557         if (in_use_descs > 1)
558                 dev_err(&ioat_chan->device->pdev->dev,
559                         "Freeing %d in use descriptors!\n",
560                         in_use_descs - 1);
561
562         ioat_chan->last_completion = ioat_chan->completion_addr = 0;
563         ioat_chan->pending = 0;
564         ioat_chan->dmacount = 0;
565 }
566
567 /**
568  * ioat_dma_get_next_descriptor - return the next available descriptor
569  * @ioat_chan: IOAT DMA channel handle
570  *
571  * Gets the next descriptor from the chain, and must be called with the
572  * channel's desc_lock held.  Allocates more descriptors if the channel
573  * has run out.
574  */
575 static struct ioat_desc_sw *
576 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
577 {
578         struct ioat_desc_sw *new = NULL;
579
580         if (!list_empty(&ioat_chan->free_desc)) {
581                 new = to_ioat_desc(ioat_chan->free_desc.next);
582                 list_del(&new->node);
583         } else {
584                 /* try to get another desc */
585                 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
586                 /* will this ever happen? */
587                 /* TODO add upper limit on these */
588                 BUG_ON(!new);
589         }
590
591         prefetch(new->hw);
592         return new;
593 }
594
595 static struct ioat_desc_sw *
596 ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
597 {
598         struct ioat_desc_sw *new = NULL;
599
600         /*
601          * used.prev points to where to start processing
602          * used.next points to next free descriptor
603          * if used.prev == NULL, there are none waiting to be processed
604          * if used.next == used.prev.prev, there is only one free descriptor,
605          *      and we need to use it to as a noop descriptor before
606          *      linking in a new set of descriptors, since the device
607          *      has probably already read the pointer to it
608          */
609         if (ioat_chan->used_desc.prev &&
610             ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
611
612                 struct ioat_desc_sw *desc = NULL;
613                 struct ioat_desc_sw *noop_desc = NULL;
614                 int i;
615
616                 /* set up the noop descriptor */
617                 noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
618                 noop_desc->hw->size = 0;
619                 noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
620                 noop_desc->hw->src_addr = 0;
621                 noop_desc->hw->dst_addr = 0;
622
623                 ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;
624                 ioat_chan->pending++;
625                 ioat_chan->dmacount++;
626
627                 /* get a few more descriptors */
628                 for (i = 16; i; i--) {
629                         desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
630                         BUG_ON(!desc);
631                         list_add_tail(&desc->node, ioat_chan->used_desc.next);
632
633                         desc->hw->next
634                                 = to_ioat_desc(desc->node.next)->async_tx.phys;
635                         to_ioat_desc(desc->node.prev)->hw->next
636                                 = desc->async_tx.phys;
637                         ioat_chan->desccount++;
638                 }
639
640                 ioat_chan->used_desc.next = noop_desc->node.next;
641         }
642         new = to_ioat_desc(ioat_chan->used_desc.next);
643         prefetch(new);
644         ioat_chan->used_desc.next = new->node.next;
645
646         if (ioat_chan->used_desc.prev == NULL)
647                 ioat_chan->used_desc.prev = &new->node;
648
649         prefetch(new->hw);
650         return new;
651 }
652
653 static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
654                                                 struct ioat_dma_chan *ioat_chan)
655 {
656         if (!ioat_chan)
657                 return NULL;
658
659         switch (ioat_chan->device->version) {
660         case IOAT_VER_1_2:
661                 return ioat1_dma_get_next_descriptor(ioat_chan);
662                 break;
663         case IOAT_VER_2_0:
664                 return ioat2_dma_get_next_descriptor(ioat_chan);
665                 break;
666         }
667         return NULL;
668 }
669
670 static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
671                                                 struct dma_chan *chan,
672                                                 size_t len,
673                                                 int int_en)
674 {
675         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
676         struct ioat_desc_sw *new;
677
678         spin_lock_bh(&ioat_chan->desc_lock);
679         new = ioat_dma_get_next_descriptor(ioat_chan);
680         new->len = len;
681         spin_unlock_bh(&ioat_chan->desc_lock);
682
683         return new ? &new->async_tx : NULL;
684 }
685
686 static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
687                                                 struct dma_chan *chan,
688                                                 size_t len,
689                                                 int int_en)
690 {
691         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
692         struct ioat_desc_sw *new;
693
694         spin_lock_bh(&ioat_chan->desc_lock);
695         new = ioat2_dma_get_next_descriptor(ioat_chan);
696         new->len = len;
697
698         /* leave ioat_chan->desc_lock set in version 2 path */
699         return new ? &new->async_tx : NULL;
700 }
701
702
703 /**
704  * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
705  *                                 descriptors to hw
706  * @chan: DMA channel handle
707  */
708 static inline void __ioat1_dma_memcpy_issue_pending(
709                                                 struct ioat_dma_chan *ioat_chan)
710 {
711         ioat_chan->pending = 0;
712         writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
713 }
714
715 static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
716 {
717         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
718
719         if (ioat_chan->pending != 0) {
720                 spin_lock_bh(&ioat_chan->desc_lock);
721                 __ioat1_dma_memcpy_issue_pending(ioat_chan);
722                 spin_unlock_bh(&ioat_chan->desc_lock);
723         }
724 }
725
726 static inline void __ioat2_dma_memcpy_issue_pending(
727                                                 struct ioat_dma_chan *ioat_chan)
728 {
729         ioat_chan->pending = 0;
730         writew(ioat_chan->dmacount,
731                ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
732 }
733
734 static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
735 {
736         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
737
738         if (ioat_chan->pending != 0) {
739                 spin_lock_bh(&ioat_chan->desc_lock);
740                 __ioat2_dma_memcpy_issue_pending(ioat_chan);
741                 spin_unlock_bh(&ioat_chan->desc_lock);
742         }
743 }
744
745 static void ioat_dma_cleanup_tasklet(unsigned long data)
746 {
747         struct ioat_dma_chan *chan = (void *)data;
748         ioat_dma_memcpy_cleanup(chan);
749         writew(IOAT_CHANCTRL_INT_DISABLE,
750                chan->reg_base + IOAT_CHANCTRL_OFFSET);
751 }
752
753 /**
754  * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
755  * @chan: ioat channel to be cleaned up
756  */
757 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
758 {
759         unsigned long phys_complete;
760         struct ioat_desc_sw *desc, *_desc;
761         dma_cookie_t cookie = 0;
762         unsigned long desc_phys;
763         struct ioat_desc_sw *latest_desc;
764
765         prefetch(ioat_chan->completion_virt);
766
767         if (!spin_trylock_bh(&ioat_chan->cleanup_lock))
768                 return;
769
770         /* The completion writeback can happen at any time,
771            so reads by the driver need to be atomic operations
772            The descriptor physical addresses are limited to 32-bits
773            when the CPU can only do a 32-bit mov */
774
775 #if (BITS_PER_LONG == 64)
776         phys_complete =
777                 ioat_chan->completion_virt->full
778                 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
779 #else
780         phys_complete =
781                 ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
782 #endif
783
784         if ((ioat_chan->completion_virt->full
785                 & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
786                                 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
787                 dev_err(&ioat_chan->device->pdev->dev,
788                         "Channel halted, chanerr = %x\n",
789                         readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
790
791                 /* TODO do something to salvage the situation */
792         }
793
794         if (phys_complete == ioat_chan->last_completion) {
795                 spin_unlock_bh(&ioat_chan->cleanup_lock);
796                 return;
797         }
798
799         cookie = 0;
800         spin_lock_bh(&ioat_chan->desc_lock);
801         switch (ioat_chan->device->version) {
802         case IOAT_VER_1_2:
803                 list_for_each_entry_safe(desc, _desc,
804                                          &ioat_chan->used_desc, node) {
805
806                         /*
807                          * Incoming DMA requests may use multiple descriptors,
808                          * due to exceeding xfercap, perhaps. If so, only the
809                          * last one will have a cookie, and require unmapping.
810                          */
811                         if (desc->async_tx.cookie) {
812                                 cookie = desc->async_tx.cookie;
813
814                                 /*
815                                  * yes we are unmapping both _page and _single
816                                  * alloc'd regions with unmap_page. Is this
817                                  * *really* that bad?
818                                  */
819                                 pci_unmap_page(ioat_chan->device->pdev,
820                                                 pci_unmap_addr(desc, dst),
821                                                 pci_unmap_len(desc, len),
822                                                 PCI_DMA_FROMDEVICE);
823                                 pci_unmap_page(ioat_chan->device->pdev,
824                                                 pci_unmap_addr(desc, src),
825                                                 pci_unmap_len(desc, len),
826                                                 PCI_DMA_TODEVICE);
827
828                                 if (desc->async_tx.callback) {
829                                         desc->async_tx.callback(desc->async_tx.callback_param);
830                                         desc->async_tx.callback = NULL;
831                                 }
832                         }
833
834                         if (desc->async_tx.phys != phys_complete) {
835                                 /*
836                                  * a completed entry, but not the last, so clean
837                                  * up if the client is done with the descriptor
838                                  */
839                                 if (desc->async_tx.ack) {
840                                         list_del(&desc->node);
841                                         list_add_tail(&desc->node,
842                                                       &ioat_chan->free_desc);
843                                 } else
844                                         desc->async_tx.cookie = 0;
845                         } else {
846                                 /*
847                                  * last used desc. Do not remove, so we can
848                                  * append from it, but don't look at it next
849                                  * time, either
850                                  */
851                                 desc->async_tx.cookie = 0;
852
853                                 /* TODO check status bits? */
854                                 break;
855                         }
856                 }
857                 break;
858         case IOAT_VER_2_0:
859                 /* has some other thread has already cleaned up? */
860                 if (ioat_chan->used_desc.prev == NULL)
861                         break;
862
863                 /* work backwards to find latest finished desc */
864                 desc = to_ioat_desc(ioat_chan->used_desc.next);
865                 latest_desc = NULL;
866                 do {
867                         desc = to_ioat_desc(desc->node.prev);
868                         desc_phys = (unsigned long)desc->async_tx.phys
869                                        & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
870                         if (desc_phys == phys_complete) {
871                                 latest_desc = desc;
872                                 break;
873                         }
874                 } while (&desc->node != ioat_chan->used_desc.prev);
875
876                 if (latest_desc != NULL) {
877
878                         /* work forwards to clear finished descriptors */
879                         for (desc = to_ioat_desc(ioat_chan->used_desc.prev);
880                              &desc->node != latest_desc->node.next &&
881                              &desc->node != ioat_chan->used_desc.next;
882                              desc = to_ioat_desc(desc->node.next)) {
883                                 if (desc->async_tx.cookie) {
884                                         cookie = desc->async_tx.cookie;
885                                         desc->async_tx.cookie = 0;
886
887                                         pci_unmap_page(ioat_chan->device->pdev,
888                                                       pci_unmap_addr(desc, dst),
889                                                       pci_unmap_len(desc, len),
890                                                       PCI_DMA_FROMDEVICE);
891                                         pci_unmap_page(ioat_chan->device->pdev,
892                                                       pci_unmap_addr(desc, src),
893                                                       pci_unmap_len(desc, len),
894                                                       PCI_DMA_TODEVICE);
895
896                                         if (desc->async_tx.callback) {
897                                                 desc->async_tx.callback(desc->async_tx.callback_param);
898                                                 desc->async_tx.callback = NULL;
899                                         }
900                                 }
901                         }
902
903                         /* move used.prev up beyond those that are finished */
904                         if (&desc->node == ioat_chan->used_desc.next)
905                                 ioat_chan->used_desc.prev = NULL;
906                         else
907                                 ioat_chan->used_desc.prev = &desc->node;
908                 }
909                 break;
910         }
911
912         spin_unlock_bh(&ioat_chan->desc_lock);
913
914         ioat_chan->last_completion = phys_complete;
915         if (cookie != 0)
916                 ioat_chan->completed_cookie = cookie;
917
918         spin_unlock_bh(&ioat_chan->cleanup_lock);
919 }
920
921 static void ioat_dma_dependency_added(struct dma_chan *chan)
922 {
923         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
924         spin_lock_bh(&ioat_chan->desc_lock);
925         if (ioat_chan->pending == 0) {
926                 spin_unlock_bh(&ioat_chan->desc_lock);
927                 ioat_dma_memcpy_cleanup(ioat_chan);
928         } else
929                 spin_unlock_bh(&ioat_chan->desc_lock);
930 }
931
932 /**
933  * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
934  * @chan: IOAT DMA channel handle
935  * @cookie: DMA transaction identifier
936  * @done: if not %NULL, updated with last completed transaction
937  * @used: if not %NULL, updated with last used transaction
938  */
939 static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
940                                             dma_cookie_t cookie,
941                                             dma_cookie_t *done,
942                                             dma_cookie_t *used)
943 {
944         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
945         dma_cookie_t last_used;
946         dma_cookie_t last_complete;
947         enum dma_status ret;
948
949         last_used = chan->cookie;
950         last_complete = ioat_chan->completed_cookie;
951
952         if (done)
953                 *done = last_complete;
954         if (used)
955                 *used = last_used;
956
957         ret = dma_async_is_complete(cookie, last_complete, last_used);
958         if (ret == DMA_SUCCESS)
959                 return ret;
960
961         ioat_dma_memcpy_cleanup(ioat_chan);
962
963         last_used = chan->cookie;
964         last_complete = ioat_chan->completed_cookie;
965
966         if (done)
967                 *done = last_complete;
968         if (used)
969                 *used = last_used;
970
971         return dma_async_is_complete(cookie, last_complete, last_used);
972 }
973
974 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
975 {
976         struct ioat_desc_sw *desc;
977
978         spin_lock_bh(&ioat_chan->desc_lock);
979
980         desc = ioat_dma_get_next_descriptor(ioat_chan);
981         desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
982                                 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
983                                 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
984         desc->hw->size = 0;
985         desc->hw->src_addr = 0;
986         desc->hw->dst_addr = 0;
987         desc->async_tx.ack = 1;
988         switch (ioat_chan->device->version) {
989         case IOAT_VER_1_2:
990                 desc->hw->next = 0;
991                 list_add_tail(&desc->node, &ioat_chan->used_desc);
992
993                 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
994                        ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
995                 writel(((u64) desc->async_tx.phys) >> 32,
996                        ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
997
998                 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
999                         + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
1000                 break;
1001         case IOAT_VER_2_0:
1002                 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
1003                        ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
1004                 writel(((u64) desc->async_tx.phys) >> 32,
1005                        ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
1006
1007                 ioat_chan->dmacount++;
1008                 __ioat2_dma_memcpy_issue_pending(ioat_chan);
1009                 break;
1010         }
1011         spin_unlock_bh(&ioat_chan->desc_lock);
1012 }
1013
1014 /*
1015  * Perform a IOAT transaction to verify the HW works.
1016  */
1017 #define IOAT_TEST_SIZE 2000
1018
1019 static void ioat_dma_test_callback(void *dma_async_param)
1020 {
1021         printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n",
1022                         dma_async_param);
1023 }
1024
1025 /**
1026  * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
1027  * @device: device to be tested
1028  */
1029 static int ioat_dma_self_test(struct ioatdma_device *device)
1030 {
1031         int i;
1032         u8 *src;
1033         u8 *dest;
1034         struct dma_chan *dma_chan;
1035         struct dma_async_tx_descriptor *tx = NULL;
1036         dma_addr_t addr;
1037         dma_cookie_t cookie;
1038         int err = 0;
1039
1040         src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
1041         if (!src)
1042                 return -ENOMEM;
1043         dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
1044         if (!dest) {
1045                 kfree(src);
1046                 return -ENOMEM;
1047         }
1048
1049         /* Fill in src buffer */
1050         for (i = 0; i < IOAT_TEST_SIZE; i++)
1051                 src[i] = (u8)i;
1052
1053         /* Start copy, using first DMA channel */
1054         dma_chan = container_of(device->common.channels.next,
1055                                 struct dma_chan,
1056                                 device_node);
1057         if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
1058                 dev_err(&device->pdev->dev,
1059                         "selftest cannot allocate chan resource\n");
1060                 err = -ENODEV;
1061                 goto out;
1062         }
1063
1064         tx = device->common.device_prep_dma_memcpy(dma_chan, IOAT_TEST_SIZE, 0);
1065         if (!tx) {
1066                 dev_err(&device->pdev->dev,
1067                         "Self-test prep failed, disabling\n");
1068                 err = -ENODEV;
1069                 goto free_resources;
1070         }
1071
1072         async_tx_ack(tx);
1073         addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
1074                               DMA_TO_DEVICE);
1075         tx->tx_set_src(addr, tx, 0);
1076         addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
1077                               DMA_FROM_DEVICE);
1078         tx->tx_set_dest(addr, tx, 0);
1079         tx->callback = ioat_dma_test_callback;
1080         tx->callback_param = (void *)0x8086;
1081         cookie = tx->tx_submit(tx);
1082         if (cookie < 0) {
1083                 dev_err(&device->pdev->dev,
1084                         "Self-test setup failed, disabling\n");
1085                 err = -ENODEV;
1086                 goto free_resources;
1087         }
1088         device->common.device_issue_pending(dma_chan);
1089         msleep(1);
1090
1091         if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
1092                                         != DMA_SUCCESS) {
1093                 dev_err(&device->pdev->dev,
1094                         "Self-test copy timed out, disabling\n");
1095                 err = -ENODEV;
1096                 goto free_resources;
1097         }
1098         if (memcmp(src, dest, IOAT_TEST_SIZE)) {
1099                 dev_err(&device->pdev->dev,
1100                         "Self-test copy failed compare, disabling\n");
1101                 err = -ENODEV;
1102                 goto free_resources;
1103         }
1104
1105 free_resources:
1106         device->common.device_free_chan_resources(dma_chan);
1107 out:
1108         kfree(src);
1109         kfree(dest);
1110         return err;
1111 }
1112
1113 static char ioat_interrupt_style[32] = "msix";
1114 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
1115                     sizeof(ioat_interrupt_style), 0644);
1116 MODULE_PARM_DESC(ioat_interrupt_style,
1117                  "set ioat interrupt style: msix (default), "
1118                  "msix-single-vector, msi, intx)");
1119
1120 /**
1121  * ioat_dma_setup_interrupts - setup interrupt handler
1122  * @device: ioat device
1123  */
1124 static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
1125 {
1126         struct ioat_dma_chan *ioat_chan;
1127         int err, i, j, msixcnt;
1128         u8 intrctrl = 0;
1129
1130         if (!strcmp(ioat_interrupt_style, "msix"))
1131                 goto msix;
1132         if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
1133                 goto msix_single_vector;
1134         if (!strcmp(ioat_interrupt_style, "msi"))
1135                 goto msi;
1136         if (!strcmp(ioat_interrupt_style, "intx"))
1137                 goto intx;
1138         dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n",
1139                 ioat_interrupt_style);
1140         goto err_no_irq;
1141
1142 msix:
1143         /* The number of MSI-X vectors should equal the number of channels */
1144         msixcnt = device->common.chancnt;
1145         for (i = 0; i < msixcnt; i++)
1146                 device->msix_entries[i].entry = i;
1147
1148         err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt);
1149         if (err < 0)
1150                 goto msi;
1151         if (err > 0)
1152                 goto msix_single_vector;
1153
1154         for (i = 0; i < msixcnt; i++) {
1155                 ioat_chan = ioat_lookup_chan_by_index(device, i);
1156                 err = request_irq(device->msix_entries[i].vector,
1157                                   ioat_dma_do_interrupt_msix,
1158                                   0, "ioat-msix", ioat_chan);
1159                 if (err) {
1160                         for (j = 0; j < i; j++) {
1161                                 ioat_chan =
1162                                         ioat_lookup_chan_by_index(device, j);
1163                                 free_irq(device->msix_entries[j].vector,
1164                                          ioat_chan);
1165                         }
1166                         goto msix_single_vector;
1167                 }
1168         }
1169         intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
1170         device->irq_mode = msix_multi_vector;
1171         goto done;
1172
1173 msix_single_vector:
1174         device->msix_entries[0].entry = 0;
1175         err = pci_enable_msix(device->pdev, device->msix_entries, 1);
1176         if (err)
1177                 goto msi;
1178
1179         err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt,
1180                           0, "ioat-msix", device);
1181         if (err) {
1182                 pci_disable_msix(device->pdev);
1183                 goto msi;
1184         }
1185         device->irq_mode = msix_single_vector;
1186         goto done;
1187
1188 msi:
1189         err = pci_enable_msi(device->pdev);
1190         if (err)
1191                 goto intx;
1192
1193         err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
1194                           0, "ioat-msi", device);
1195         if (err) {
1196                 pci_disable_msi(device->pdev);
1197                 goto intx;
1198         }
1199         /*
1200          * CB 1.2 devices need a bit set in configuration space to enable MSI
1201          */
1202         if (device->version == IOAT_VER_1_2) {
1203                 u32 dmactrl;
1204                 pci_read_config_dword(device->pdev,
1205                                       IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1206                 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1207                 pci_write_config_dword(device->pdev,
1208                                        IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1209         }
1210         device->irq_mode = msi;
1211         goto done;
1212
1213 intx:
1214         err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
1215                           IRQF_SHARED, "ioat-intx", device);
1216         if (err)
1217                 goto err_no_irq;
1218         device->irq_mode = intx;
1219
1220 done:
1221         intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
1222         writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
1223         return 0;
1224
1225 err_no_irq:
1226         /* Disable all interrupt generation */
1227         writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1228         dev_err(&device->pdev->dev, "no usable interrupts\n");
1229         device->irq_mode = none;
1230         return -1;
1231 }
1232
1233 /**
1234  * ioat_dma_remove_interrupts - remove whatever interrupts were set
1235  * @device: ioat device
1236  */
1237 static void ioat_dma_remove_interrupts(struct ioatdma_device *device)
1238 {
1239         struct ioat_dma_chan *ioat_chan;
1240         int i;
1241
1242         /* Disable all interrupt generation */
1243         writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1244
1245         switch (device->irq_mode) {
1246         case msix_multi_vector:
1247                 for (i = 0; i < device->common.chancnt; i++) {
1248                         ioat_chan = ioat_lookup_chan_by_index(device, i);
1249                         free_irq(device->msix_entries[i].vector, ioat_chan);
1250                 }
1251                 pci_disable_msix(device->pdev);
1252                 break;
1253         case msix_single_vector:
1254                 free_irq(device->msix_entries[0].vector, device);
1255                 pci_disable_msix(device->pdev);
1256                 break;
1257         case msi:
1258                 free_irq(device->pdev->irq, device);
1259                 pci_disable_msi(device->pdev);
1260                 break;
1261         case intx:
1262                 free_irq(device->pdev->irq, device);
1263                 break;
1264         case none:
1265                 dev_warn(&device->pdev->dev,
1266                          "call to %s without interrupts setup\n", __func__);
1267         }
1268         device->irq_mode = none;
1269 }
1270
1271 struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1272                                       void __iomem *iobase)
1273 {
1274         int err;
1275         struct ioatdma_device *device;
1276
1277         device = kzalloc(sizeof(*device), GFP_KERNEL);
1278         if (!device) {
1279                 err = -ENOMEM;
1280                 goto err_kzalloc;
1281         }
1282         device->pdev = pdev;
1283         device->reg_base = iobase;
1284         device->version = readb(device->reg_base + IOAT_VER_OFFSET);
1285
1286         /* DMA coherent memory pool for DMA descriptor allocations */
1287         device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
1288                                            sizeof(struct ioat_dma_descriptor),
1289                                            64, 0);
1290         if (!device->dma_pool) {
1291                 err = -ENOMEM;
1292                 goto err_dma_pool;
1293         }
1294
1295         device->completion_pool = pci_pool_create("completion_pool", pdev,
1296                                                   sizeof(u64), SMP_CACHE_BYTES,
1297                                                   SMP_CACHE_BYTES);
1298         if (!device->completion_pool) {
1299                 err = -ENOMEM;
1300                 goto err_completion_pool;
1301         }
1302
1303         INIT_LIST_HEAD(&device->common.channels);
1304         ioat_dma_enumerate_channels(device);
1305
1306         device->common.device_alloc_chan_resources =
1307                                                 ioat_dma_alloc_chan_resources;
1308         device->common.device_free_chan_resources =
1309                                                 ioat_dma_free_chan_resources;
1310         device->common.dev = &pdev->dev;
1311
1312         dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
1313         device->common.device_is_tx_complete = ioat_dma_is_complete;
1314         device->common.device_dependency_added = ioat_dma_dependency_added;
1315         switch (device->version) {
1316         case IOAT_VER_1_2:
1317                 device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1318                 device->common.device_issue_pending =
1319                                                 ioat1_dma_memcpy_issue_pending;
1320                 break;
1321         case IOAT_VER_2_0:
1322                 device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
1323                 device->common.device_issue_pending =
1324                                                 ioat2_dma_memcpy_issue_pending;
1325                 break;
1326         }
1327
1328         dev_err(&device->pdev->dev,
1329                 "Intel(R) I/OAT DMA Engine found,"
1330                 " %d channels, device version 0x%02x, driver version %s\n",
1331                 device->common.chancnt, device->version, IOAT_DMA_VERSION);
1332
1333         err = ioat_dma_setup_interrupts(device);
1334         if (err)
1335                 goto err_setup_interrupts;
1336
1337         err = ioat_dma_self_test(device);
1338         if (err)
1339                 goto err_self_test;
1340
1341         dma_async_device_register(&device->common);
1342
1343         return device;
1344
1345 err_self_test:
1346         ioat_dma_remove_interrupts(device);
1347 err_setup_interrupts:
1348         pci_pool_destroy(device->completion_pool);
1349 err_completion_pool:
1350         pci_pool_destroy(device->dma_pool);
1351 err_dma_pool:
1352         kfree(device);
1353 err_kzalloc:
1354         dev_err(&device->pdev->dev,
1355                 "Intel(R) I/OAT DMA Engine initialization failed\n");
1356         return NULL;
1357 }
1358
1359 void ioat_dma_remove(struct ioatdma_device *device)
1360 {
1361         struct dma_chan *chan, *_chan;
1362         struct ioat_dma_chan *ioat_chan;
1363
1364         ioat_dma_remove_interrupts(device);
1365
1366         dma_async_device_unregister(&device->common);
1367
1368         pci_pool_destroy(device->dma_pool);
1369         pci_pool_destroy(device->completion_pool);
1370
1371         iounmap(device->reg_base);
1372         pci_release_regions(device->pdev);
1373         pci_disable_device(device->pdev);
1374
1375         list_for_each_entry_safe(chan, _chan,
1376                                  &device->common.channels, device_node) {
1377                 ioat_chan = to_ioat_chan(chan);
1378                 list_del(&chan->device_node);
1379                 kfree(ioat_chan);
1380         }
1381         kfree(device);
1382 }
1383