rt2x00: Replace DRV_NAME with KBUILD_MODNAME
[linux-2.6] / drivers / dma / ioat_dma.c
1 /*
2  * Intel I/OAT DMA Linux driver
3  * Copyright(c) 2004 - 2007 Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  */
22
23 /*
24  * This driver supports an Intel I/OAT DMA engine, which does asynchronous
25  * copy operations.
26  */
27
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include "ioatdma.h"
36 #include "ioatdma_registers.h"
37 #include "ioatdma_hw.h"
38
39 #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
40 #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
41 #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
42 #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
43
44 static int ioat_pending_level = 4;
45 module_param(ioat_pending_level, int, 0644);
46 MODULE_PARM_DESC(ioat_pending_level,
47                  "high-water mark for pushing ioat descriptors (default: 4)");
48
49 /* internal functions */
50 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
51 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
52
53 static struct ioat_desc_sw *
54 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
55 static struct ioat_desc_sw *
56 ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
57
58 static inline struct ioat_dma_chan *ioat_lookup_chan_by_index(
59                                                 struct ioatdma_device *device,
60                                                 int index)
61 {
62         return device->idx[index];
63 }
64
65 /**
66  * ioat_dma_do_interrupt - handler used for single vector interrupt mode
67  * @irq: interrupt id
68  * @data: interrupt data
69  */
70 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
71 {
72         struct ioatdma_device *instance = data;
73         struct ioat_dma_chan *ioat_chan;
74         unsigned long attnstatus;
75         int bit;
76         u8 intrctrl;
77
78         intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
79
80         if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
81                 return IRQ_NONE;
82
83         if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
84                 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
85                 return IRQ_NONE;
86         }
87
88         attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
89         for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
90                 ioat_chan = ioat_lookup_chan_by_index(instance, bit);
91                 tasklet_schedule(&ioat_chan->cleanup_task);
92         }
93
94         writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
95         return IRQ_HANDLED;
96 }
97
98 /**
99  * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
100  * @irq: interrupt id
101  * @data: interrupt data
102  */
103 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
104 {
105         struct ioat_dma_chan *ioat_chan = data;
106
107         tasklet_schedule(&ioat_chan->cleanup_task);
108
109         return IRQ_HANDLED;
110 }
111
112 static void ioat_dma_cleanup_tasklet(unsigned long data);
113
114 /**
115  * ioat_dma_enumerate_channels - find and initialize the device's channels
116  * @device: the device to be enumerated
117  */
118 static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
119 {
120         u8 xfercap_scale;
121         u32 xfercap;
122         int i;
123         struct ioat_dma_chan *ioat_chan;
124
125         device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
126         xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
127         xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
128
129         for (i = 0; i < device->common.chancnt; i++) {
130                 ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
131                 if (!ioat_chan) {
132                         device->common.chancnt = i;
133                         break;
134                 }
135
136                 ioat_chan->device = device;
137                 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
138                 ioat_chan->xfercap = xfercap;
139                 ioat_chan->desccount = 0;
140                 if (ioat_chan->device->version != IOAT_VER_1_2) {
141                         writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
142                                         | IOAT_DMA_DCA_ANY_CPU,
143                                 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
144                 }
145                 spin_lock_init(&ioat_chan->cleanup_lock);
146                 spin_lock_init(&ioat_chan->desc_lock);
147                 INIT_LIST_HEAD(&ioat_chan->free_desc);
148                 INIT_LIST_HEAD(&ioat_chan->used_desc);
149                 /* This should be made common somewhere in dmaengine.c */
150                 ioat_chan->common.device = &device->common;
151                 list_add_tail(&ioat_chan->common.device_node,
152                               &device->common.channels);
153                 device->idx[i] = ioat_chan;
154                 tasklet_init(&ioat_chan->cleanup_task,
155                              ioat_dma_cleanup_tasklet,
156                              (unsigned long) ioat_chan);
157                 tasklet_disable(&ioat_chan->cleanup_task);
158         }
159         return device->common.chancnt;
160 }
161
162 static void ioat_set_src(dma_addr_t addr,
163                          struct dma_async_tx_descriptor *tx,
164                          int index)
165 {
166         tx_to_ioat_desc(tx)->src = addr;
167 }
168
169 static void ioat_set_dest(dma_addr_t addr,
170                           struct dma_async_tx_descriptor *tx,
171                           int index)
172 {
173         tx_to_ioat_desc(tx)->dst = addr;
174 }
175
176 /**
177  * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
178  *                                 descriptors to hw
179  * @chan: DMA channel handle
180  */
181 static inline void __ioat1_dma_memcpy_issue_pending(
182                                                 struct ioat_dma_chan *ioat_chan)
183 {
184         ioat_chan->pending = 0;
185         writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
186 }
187
188 static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
189 {
190         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
191
192         if (ioat_chan->pending != 0) {
193                 spin_lock_bh(&ioat_chan->desc_lock);
194                 __ioat1_dma_memcpy_issue_pending(ioat_chan);
195                 spin_unlock_bh(&ioat_chan->desc_lock);
196         }
197 }
198
199 static inline void __ioat2_dma_memcpy_issue_pending(
200                                                 struct ioat_dma_chan *ioat_chan)
201 {
202         ioat_chan->pending = 0;
203         writew(ioat_chan->dmacount,
204                ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
205 }
206
207 static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
208 {
209         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
210
211         if (ioat_chan->pending != 0) {
212                 spin_lock_bh(&ioat_chan->desc_lock);
213                 __ioat2_dma_memcpy_issue_pending(ioat_chan);
214                 spin_unlock_bh(&ioat_chan->desc_lock);
215         }
216 }
217
218 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
219 {
220         struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
221         struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
222         struct ioat_desc_sw *prev, *new;
223         struct ioat_dma_descriptor *hw;
224         dma_cookie_t cookie;
225         LIST_HEAD(new_chain);
226         u32 copy;
227         size_t len;
228         dma_addr_t src, dst;
229         int orig_ack;
230         unsigned int desc_count = 0;
231
232         /* src and dest and len are stored in the initial descriptor */
233         len = first->len;
234         src = first->src;
235         dst = first->dst;
236         orig_ack = first->async_tx.ack;
237         new = first;
238
239         spin_lock_bh(&ioat_chan->desc_lock);
240         prev = to_ioat_desc(ioat_chan->used_desc.prev);
241         prefetch(prev->hw);
242         do {
243                 copy = min_t(size_t, len, ioat_chan->xfercap);
244
245                 new->async_tx.ack = 1;
246
247                 hw = new->hw;
248                 hw->size = copy;
249                 hw->ctl = 0;
250                 hw->src_addr = src;
251                 hw->dst_addr = dst;
252                 hw->next = 0;
253
254                 /* chain together the physical address list for the HW */
255                 wmb();
256                 prev->hw->next = (u64) new->async_tx.phys;
257
258                 len -= copy;
259                 dst += copy;
260                 src += copy;
261
262                 list_add_tail(&new->node, &new_chain);
263                 desc_count++;
264                 prev = new;
265         } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
266
267         hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
268         if (new->async_tx.callback) {
269                 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
270                 if (first != new) {
271                         /* move callback into to last desc */
272                         new->async_tx.callback = first->async_tx.callback;
273                         new->async_tx.callback_param
274                                         = first->async_tx.callback_param;
275                         first->async_tx.callback = NULL;
276                         first->async_tx.callback_param = NULL;
277                 }
278         }
279
280         new->tx_cnt = desc_count;
281         new->async_tx.ack = orig_ack; /* client is in control of this ack */
282
283         /* store the original values for use in later cleanup */
284         if (new != first) {
285                 new->src = first->src;
286                 new->dst = first->dst;
287                 new->len = first->len;
288         }
289
290         /* cookie incr and addition to used_list must be atomic */
291         cookie = ioat_chan->common.cookie;
292         cookie++;
293         if (cookie < 0)
294                 cookie = 1;
295         ioat_chan->common.cookie = new->async_tx.cookie = cookie;
296
297         /* write address into NextDescriptor field of last desc in chain */
298         to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
299                                                         first->async_tx.phys;
300         __list_splice(&new_chain, ioat_chan->used_desc.prev);
301
302         ioat_chan->dmacount += desc_count;
303         ioat_chan->pending += desc_count;
304         if (ioat_chan->pending >= ioat_pending_level)
305                 __ioat1_dma_memcpy_issue_pending(ioat_chan);
306         spin_unlock_bh(&ioat_chan->desc_lock);
307
308         return cookie;
309 }
310
311 static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
312 {
313         struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
314         struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
315         struct ioat_desc_sw *new;
316         struct ioat_dma_descriptor *hw;
317         dma_cookie_t cookie;
318         u32 copy;
319         size_t len;
320         dma_addr_t src, dst;
321         int orig_ack;
322         unsigned int desc_count = 0;
323
324         /* src and dest and len are stored in the initial descriptor */
325         len = first->len;
326         src = first->src;
327         dst = first->dst;
328         orig_ack = first->async_tx.ack;
329         new = first;
330
331         /*
332          * ioat_chan->desc_lock is still in force in version 2 path
333          * it gets unlocked at end of this function
334          */
335         do {
336                 copy = min_t(size_t, len, ioat_chan->xfercap);
337
338                 new->async_tx.ack = 1;
339
340                 hw = new->hw;
341                 hw->size = copy;
342                 hw->ctl = 0;
343                 hw->src_addr = src;
344                 hw->dst_addr = dst;
345
346                 len -= copy;
347                 dst += copy;
348                 src += copy;
349                 desc_count++;
350         } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
351
352         hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
353         if (new->async_tx.callback) {
354                 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
355                 if (first != new) {
356                         /* move callback into to last desc */
357                         new->async_tx.callback = first->async_tx.callback;
358                         new->async_tx.callback_param
359                                         = first->async_tx.callback_param;
360                         first->async_tx.callback = NULL;
361                         first->async_tx.callback_param = NULL;
362                 }
363         }
364
365         new->tx_cnt = desc_count;
366         new->async_tx.ack = orig_ack; /* client is in control of this ack */
367
368         /* store the original values for use in later cleanup */
369         if (new != first) {
370                 new->src = first->src;
371                 new->dst = first->dst;
372                 new->len = first->len;
373         }
374
375         /* cookie incr and addition to used_list must be atomic */
376         cookie = ioat_chan->common.cookie;
377         cookie++;
378         if (cookie < 0)
379                 cookie = 1;
380         ioat_chan->common.cookie = new->async_tx.cookie = cookie;
381
382         ioat_chan->dmacount += desc_count;
383         ioat_chan->pending += desc_count;
384         if (ioat_chan->pending >= ioat_pending_level)
385                 __ioat2_dma_memcpy_issue_pending(ioat_chan);
386         spin_unlock_bh(&ioat_chan->desc_lock);
387
388         return cookie;
389 }
390
391 /**
392  * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
393  * @ioat_chan: the channel supplying the memory pool for the descriptors
394  * @flags: allocation flags
395  */
396 static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
397                                         struct ioat_dma_chan *ioat_chan,
398                                         gfp_t flags)
399 {
400         struct ioat_dma_descriptor *desc;
401         struct ioat_desc_sw *desc_sw;
402         struct ioatdma_device *ioatdma_device;
403         dma_addr_t phys;
404
405         ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
406         desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
407         if (unlikely(!desc))
408                 return NULL;
409
410         desc_sw = kzalloc(sizeof(*desc_sw), flags);
411         if (unlikely(!desc_sw)) {
412                 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
413                 return NULL;
414         }
415
416         memset(desc, 0, sizeof(*desc));
417         dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
418         desc_sw->async_tx.tx_set_src = ioat_set_src;
419         desc_sw->async_tx.tx_set_dest = ioat_set_dest;
420         switch (ioat_chan->device->version) {
421         case IOAT_VER_1_2:
422                 desc_sw->async_tx.tx_submit = ioat1_tx_submit;
423                 break;
424         case IOAT_VER_2_0:
425                 desc_sw->async_tx.tx_submit = ioat2_tx_submit;
426                 break;
427         }
428         INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
429
430         desc_sw->hw = desc;
431         desc_sw->async_tx.phys = phys;
432
433         return desc_sw;
434 }
435
436 static int ioat_initial_desc_count = 256;
437 module_param(ioat_initial_desc_count, int, 0644);
438 MODULE_PARM_DESC(ioat_initial_desc_count,
439                  "initial descriptors per channel (default: 256)");
440
441 /**
442  * ioat2_dma_massage_chan_desc - link the descriptors into a circle
443  * @ioat_chan: the channel to be massaged
444  */
445 static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
446 {
447         struct ioat_desc_sw *desc, *_desc;
448
449         /* setup used_desc */
450         ioat_chan->used_desc.next = ioat_chan->free_desc.next;
451         ioat_chan->used_desc.prev = NULL;
452
453         /* pull free_desc out of the circle so that every node is a hw
454          * descriptor, but leave it pointing to the list
455          */
456         ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next;
457         ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev;
458
459         /* circle link the hw descriptors */
460         desc = to_ioat_desc(ioat_chan->free_desc.next);
461         desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
462         list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) {
463                 desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
464         }
465 }
466
467 /**
468  * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
469  * @chan: the channel to be filled out
470  */
471 static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
472 {
473         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
474         struct ioat_desc_sw *desc;
475         u16 chanctrl;
476         u32 chanerr;
477         int i;
478         LIST_HEAD(tmp_list);
479
480         /* have we already been set up? */
481         if (!list_empty(&ioat_chan->free_desc))
482                 return ioat_chan->desccount;
483
484         /* Setup register to interrupt and write completion status on error */
485         chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
486                 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
487                 IOAT_CHANCTRL_ERR_COMPLETION_EN;
488         writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
489
490         chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
491         if (chanerr) {
492                 dev_err(&ioat_chan->device->pdev->dev,
493                         "CHANERR = %x, clearing\n", chanerr);
494                 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
495         }
496
497         /* Allocate descriptors */
498         for (i = 0; i < ioat_initial_desc_count; i++) {
499                 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
500                 if (!desc) {
501                         dev_err(&ioat_chan->device->pdev->dev,
502                                 "Only %d initial descriptors\n", i);
503                         break;
504                 }
505                 list_add_tail(&desc->node, &tmp_list);
506         }
507         spin_lock_bh(&ioat_chan->desc_lock);
508         ioat_chan->desccount = i;
509         list_splice(&tmp_list, &ioat_chan->free_desc);
510         if (ioat_chan->device->version != IOAT_VER_1_2)
511                 ioat2_dma_massage_chan_desc(ioat_chan);
512         spin_unlock_bh(&ioat_chan->desc_lock);
513
514         /* allocate a completion writeback area */
515         /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
516         ioat_chan->completion_virt =
517                 pci_pool_alloc(ioat_chan->device->completion_pool,
518                                GFP_KERNEL,
519                                &ioat_chan->completion_addr);
520         memset(ioat_chan->completion_virt, 0,
521                sizeof(*ioat_chan->completion_virt));
522         writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
523                ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
524         writel(((u64) ioat_chan->completion_addr) >> 32,
525                ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
526
527         tasklet_enable(&ioat_chan->cleanup_task);
528         ioat_dma_start_null_desc(ioat_chan);  /* give chain to dma device */
529         return ioat_chan->desccount;
530 }
531
532 /**
533  * ioat_dma_free_chan_resources - release all the descriptors
534  * @chan: the channel to be cleaned
535  */
536 static void ioat_dma_free_chan_resources(struct dma_chan *chan)
537 {
538         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
539         struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
540         struct ioat_desc_sw *desc, *_desc;
541         int in_use_descs = 0;
542
543         tasklet_disable(&ioat_chan->cleanup_task);
544         ioat_dma_memcpy_cleanup(ioat_chan);
545
546         /* Delay 100ms after reset to allow internal DMA logic to quiesce
547          * before removing DMA descriptor resources.
548          */
549         writeb(IOAT_CHANCMD_RESET,
550                ioat_chan->reg_base
551                         + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
552         mdelay(100);
553
554         spin_lock_bh(&ioat_chan->desc_lock);
555         switch (ioat_chan->device->version) {
556         case IOAT_VER_1_2:
557                 list_for_each_entry_safe(desc, _desc,
558                                          &ioat_chan->used_desc, node) {
559                         in_use_descs++;
560                         list_del(&desc->node);
561                         pci_pool_free(ioatdma_device->dma_pool, desc->hw,
562                                       desc->async_tx.phys);
563                         kfree(desc);
564                 }
565                 list_for_each_entry_safe(desc, _desc,
566                                          &ioat_chan->free_desc, node) {
567                         list_del(&desc->node);
568                         pci_pool_free(ioatdma_device->dma_pool, desc->hw,
569                                       desc->async_tx.phys);
570                         kfree(desc);
571                 }
572                 break;
573         case IOAT_VER_2_0:
574                 list_for_each_entry_safe(desc, _desc,
575                                          ioat_chan->free_desc.next, node) {
576                         list_del(&desc->node);
577                         pci_pool_free(ioatdma_device->dma_pool, desc->hw,
578                                       desc->async_tx.phys);
579                         kfree(desc);
580                 }
581                 desc = to_ioat_desc(ioat_chan->free_desc.next);
582                 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
583                               desc->async_tx.phys);
584                 kfree(desc);
585                 INIT_LIST_HEAD(&ioat_chan->free_desc);
586                 INIT_LIST_HEAD(&ioat_chan->used_desc);
587                 break;
588         }
589         spin_unlock_bh(&ioat_chan->desc_lock);
590
591         pci_pool_free(ioatdma_device->completion_pool,
592                       ioat_chan->completion_virt,
593                       ioat_chan->completion_addr);
594
595         /* one is ok since we left it on there on purpose */
596         if (in_use_descs > 1)
597                 dev_err(&ioat_chan->device->pdev->dev,
598                         "Freeing %d in use descriptors!\n",
599                         in_use_descs - 1);
600
601         ioat_chan->last_completion = ioat_chan->completion_addr = 0;
602         ioat_chan->pending = 0;
603         ioat_chan->dmacount = 0;
604 }
605
606 /**
607  * ioat_dma_get_next_descriptor - return the next available descriptor
608  * @ioat_chan: IOAT DMA channel handle
609  *
610  * Gets the next descriptor from the chain, and must be called with the
611  * channel's desc_lock held.  Allocates more descriptors if the channel
612  * has run out.
613  */
614 static struct ioat_desc_sw *
615 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
616 {
617         struct ioat_desc_sw *new;
618
619         if (!list_empty(&ioat_chan->free_desc)) {
620                 new = to_ioat_desc(ioat_chan->free_desc.next);
621                 list_del(&new->node);
622         } else {
623                 /* try to get another desc */
624                 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
625                 if (!new) {
626                         dev_err(&ioat_chan->device->pdev->dev,
627                                 "alloc failed\n");
628                         return NULL;
629                 }
630         }
631
632         prefetch(new->hw);
633         return new;
634 }
635
636 static struct ioat_desc_sw *
637 ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
638 {
639         struct ioat_desc_sw *new;
640
641         /*
642          * used.prev points to where to start processing
643          * used.next points to next free descriptor
644          * if used.prev == NULL, there are none waiting to be processed
645          * if used.next == used.prev.prev, there is only one free descriptor,
646          *      and we need to use it to as a noop descriptor before
647          *      linking in a new set of descriptors, since the device
648          *      has probably already read the pointer to it
649          */
650         if (ioat_chan->used_desc.prev &&
651             ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
652
653                 struct ioat_desc_sw *desc;
654                 struct ioat_desc_sw *noop_desc;
655                 int i;
656
657                 /* set up the noop descriptor */
658                 noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
659                 noop_desc->hw->size = 0;
660                 noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
661                 noop_desc->hw->src_addr = 0;
662                 noop_desc->hw->dst_addr = 0;
663
664                 ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;
665                 ioat_chan->pending++;
666                 ioat_chan->dmacount++;
667
668                 /* try to get a few more descriptors */
669                 for (i = 16; i; i--) {
670                         desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
671                         if (!desc) {
672                                 dev_err(&ioat_chan->device->pdev->dev,
673                                         "alloc failed\n");
674                                 break;
675                         }
676                         list_add_tail(&desc->node, ioat_chan->used_desc.next);
677
678                         desc->hw->next
679                                 = to_ioat_desc(desc->node.next)->async_tx.phys;
680                         to_ioat_desc(desc->node.prev)->hw->next
681                                 = desc->async_tx.phys;
682                         ioat_chan->desccount++;
683                 }
684
685                 ioat_chan->used_desc.next = noop_desc->node.next;
686         }
687         new = to_ioat_desc(ioat_chan->used_desc.next);
688         prefetch(new);
689         ioat_chan->used_desc.next = new->node.next;
690
691         if (ioat_chan->used_desc.prev == NULL)
692                 ioat_chan->used_desc.prev = &new->node;
693
694         prefetch(new->hw);
695         return new;
696 }
697
698 static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
699                                                 struct ioat_dma_chan *ioat_chan)
700 {
701         if (!ioat_chan)
702                 return NULL;
703
704         switch (ioat_chan->device->version) {
705         case IOAT_VER_1_2:
706                 return ioat1_dma_get_next_descriptor(ioat_chan);
707                 break;
708         case IOAT_VER_2_0:
709                 return ioat2_dma_get_next_descriptor(ioat_chan);
710                 break;
711         }
712         return NULL;
713 }
714
715 static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
716                                                 struct dma_chan *chan,
717                                                 size_t len,
718                                                 int int_en)
719 {
720         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
721         struct ioat_desc_sw *new;
722
723         spin_lock_bh(&ioat_chan->desc_lock);
724         new = ioat_dma_get_next_descriptor(ioat_chan);
725         spin_unlock_bh(&ioat_chan->desc_lock);
726
727         if (new) {
728                 new->len = len;
729                 return &new->async_tx;
730         } else
731                 return NULL;
732 }
733
734 static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
735                                                 struct dma_chan *chan,
736                                                 size_t len,
737                                                 int int_en)
738 {
739         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
740         struct ioat_desc_sw *new;
741
742         spin_lock_bh(&ioat_chan->desc_lock);
743         new = ioat2_dma_get_next_descriptor(ioat_chan);
744
745         /*
746          * leave ioat_chan->desc_lock set in ioat 2 path
747          * it will get unlocked at end of tx_submit
748          */
749
750         if (new) {
751                 new->len = len;
752                 return &new->async_tx;
753         } else
754                 return NULL;
755 }
756
757 static void ioat_dma_cleanup_tasklet(unsigned long data)
758 {
759         struct ioat_dma_chan *chan = (void *)data;
760         ioat_dma_memcpy_cleanup(chan);
761         writew(IOAT_CHANCTRL_INT_DISABLE,
762                chan->reg_base + IOAT_CHANCTRL_OFFSET);
763 }
764
765 /**
766  * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
767  * @chan: ioat channel to be cleaned up
768  */
769 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
770 {
771         unsigned long phys_complete;
772         struct ioat_desc_sw *desc, *_desc;
773         dma_cookie_t cookie = 0;
774         unsigned long desc_phys;
775         struct ioat_desc_sw *latest_desc;
776
777         prefetch(ioat_chan->completion_virt);
778
779         if (!spin_trylock_bh(&ioat_chan->cleanup_lock))
780                 return;
781
782         /* The completion writeback can happen at any time,
783            so reads by the driver need to be atomic operations
784            The descriptor physical addresses are limited to 32-bits
785            when the CPU can only do a 32-bit mov */
786
787 #if (BITS_PER_LONG == 64)
788         phys_complete =
789                 ioat_chan->completion_virt->full
790                 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
791 #else
792         phys_complete =
793                 ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
794 #endif
795
796         if ((ioat_chan->completion_virt->full
797                 & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
798                                 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
799                 dev_err(&ioat_chan->device->pdev->dev,
800                         "Channel halted, chanerr = %x\n",
801                         readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
802
803                 /* TODO do something to salvage the situation */
804         }
805
806         if (phys_complete == ioat_chan->last_completion) {
807                 spin_unlock_bh(&ioat_chan->cleanup_lock);
808                 return;
809         }
810
811         cookie = 0;
812         spin_lock_bh(&ioat_chan->desc_lock);
813         switch (ioat_chan->device->version) {
814         case IOAT_VER_1_2:
815                 list_for_each_entry_safe(desc, _desc,
816                                          &ioat_chan->used_desc, node) {
817
818                         /*
819                          * Incoming DMA requests may use multiple descriptors,
820                          * due to exceeding xfercap, perhaps. If so, only the
821                          * last one will have a cookie, and require unmapping.
822                          */
823                         if (desc->async_tx.cookie) {
824                                 cookie = desc->async_tx.cookie;
825
826                                 /*
827                                  * yes we are unmapping both _page and _single
828                                  * alloc'd regions with unmap_page. Is this
829                                  * *really* that bad?
830                                  */
831                                 pci_unmap_page(ioat_chan->device->pdev,
832                                                 pci_unmap_addr(desc, dst),
833                                                 pci_unmap_len(desc, len),
834                                                 PCI_DMA_FROMDEVICE);
835                                 pci_unmap_page(ioat_chan->device->pdev,
836                                                 pci_unmap_addr(desc, src),
837                                                 pci_unmap_len(desc, len),
838                                                 PCI_DMA_TODEVICE);
839
840                                 if (desc->async_tx.callback) {
841                                         desc->async_tx.callback(desc->async_tx.callback_param);
842                                         desc->async_tx.callback = NULL;
843                                 }
844                         }
845
846                         if (desc->async_tx.phys != phys_complete) {
847                                 /*
848                                  * a completed entry, but not the last, so clean
849                                  * up if the client is done with the descriptor
850                                  */
851                                 if (desc->async_tx.ack) {
852                                         list_del(&desc->node);
853                                         list_add_tail(&desc->node,
854                                                       &ioat_chan->free_desc);
855                                 } else
856                                         desc->async_tx.cookie = 0;
857                         } else {
858                                 /*
859                                  * last used desc. Do not remove, so we can
860                                  * append from it, but don't look at it next
861                                  * time, either
862                                  */
863                                 desc->async_tx.cookie = 0;
864
865                                 /* TODO check status bits? */
866                                 break;
867                         }
868                 }
869                 break;
870         case IOAT_VER_2_0:
871                 /* has some other thread has already cleaned up? */
872                 if (ioat_chan->used_desc.prev == NULL)
873                         break;
874
875                 /* work backwards to find latest finished desc */
876                 desc = to_ioat_desc(ioat_chan->used_desc.next);
877                 latest_desc = NULL;
878                 do {
879                         desc = to_ioat_desc(desc->node.prev);
880                         desc_phys = (unsigned long)desc->async_tx.phys
881                                        & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
882                         if (desc_phys == phys_complete) {
883                                 latest_desc = desc;
884                                 break;
885                         }
886                 } while (&desc->node != ioat_chan->used_desc.prev);
887
888                 if (latest_desc != NULL) {
889
890                         /* work forwards to clear finished descriptors */
891                         for (desc = to_ioat_desc(ioat_chan->used_desc.prev);
892                              &desc->node != latest_desc->node.next &&
893                              &desc->node != ioat_chan->used_desc.next;
894                              desc = to_ioat_desc(desc->node.next)) {
895                                 if (desc->async_tx.cookie) {
896                                         cookie = desc->async_tx.cookie;
897                                         desc->async_tx.cookie = 0;
898
899                                         pci_unmap_page(ioat_chan->device->pdev,
900                                                       pci_unmap_addr(desc, dst),
901                                                       pci_unmap_len(desc, len),
902                                                       PCI_DMA_FROMDEVICE);
903                                         pci_unmap_page(ioat_chan->device->pdev,
904                                                       pci_unmap_addr(desc, src),
905                                                       pci_unmap_len(desc, len),
906                                                       PCI_DMA_TODEVICE);
907
908                                         if (desc->async_tx.callback) {
909                                                 desc->async_tx.callback(desc->async_tx.callback_param);
910                                                 desc->async_tx.callback = NULL;
911                                         }
912                                 }
913                         }
914
915                         /* move used.prev up beyond those that are finished */
916                         if (&desc->node == ioat_chan->used_desc.next)
917                                 ioat_chan->used_desc.prev = NULL;
918                         else
919                                 ioat_chan->used_desc.prev = &desc->node;
920                 }
921                 break;
922         }
923
924         spin_unlock_bh(&ioat_chan->desc_lock);
925
926         ioat_chan->last_completion = phys_complete;
927         if (cookie != 0)
928                 ioat_chan->completed_cookie = cookie;
929
930         spin_unlock_bh(&ioat_chan->cleanup_lock);
931 }
932
933 static void ioat_dma_dependency_added(struct dma_chan *chan)
934 {
935         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
936         spin_lock_bh(&ioat_chan->desc_lock);
937         if (ioat_chan->pending == 0) {
938                 spin_unlock_bh(&ioat_chan->desc_lock);
939                 ioat_dma_memcpy_cleanup(ioat_chan);
940         } else
941                 spin_unlock_bh(&ioat_chan->desc_lock);
942 }
943
944 /**
945  * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
946  * @chan: IOAT DMA channel handle
947  * @cookie: DMA transaction identifier
948  * @done: if not %NULL, updated with last completed transaction
949  * @used: if not %NULL, updated with last used transaction
950  */
951 static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
952                                             dma_cookie_t cookie,
953                                             dma_cookie_t *done,
954                                             dma_cookie_t *used)
955 {
956         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
957         dma_cookie_t last_used;
958         dma_cookie_t last_complete;
959         enum dma_status ret;
960
961         last_used = chan->cookie;
962         last_complete = ioat_chan->completed_cookie;
963
964         if (done)
965                 *done = last_complete;
966         if (used)
967                 *used = last_used;
968
969         ret = dma_async_is_complete(cookie, last_complete, last_used);
970         if (ret == DMA_SUCCESS)
971                 return ret;
972
973         ioat_dma_memcpy_cleanup(ioat_chan);
974
975         last_used = chan->cookie;
976         last_complete = ioat_chan->completed_cookie;
977
978         if (done)
979                 *done = last_complete;
980         if (used)
981                 *used = last_used;
982
983         return dma_async_is_complete(cookie, last_complete, last_used);
984 }
985
986 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
987 {
988         struct ioat_desc_sw *desc;
989
990         spin_lock_bh(&ioat_chan->desc_lock);
991
992         desc = ioat_dma_get_next_descriptor(ioat_chan);
993         desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
994                                 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
995                                 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
996         desc->hw->size = 0;
997         desc->hw->src_addr = 0;
998         desc->hw->dst_addr = 0;
999         desc->async_tx.ack = 1;
1000         switch (ioat_chan->device->version) {
1001         case IOAT_VER_1_2:
1002                 desc->hw->next = 0;
1003                 list_add_tail(&desc->node, &ioat_chan->used_desc);
1004
1005                 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
1006                        ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
1007                 writel(((u64) desc->async_tx.phys) >> 32,
1008                        ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
1009
1010                 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
1011                         + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
1012                 break;
1013         case IOAT_VER_2_0:
1014                 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
1015                        ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
1016                 writel(((u64) desc->async_tx.phys) >> 32,
1017                        ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
1018
1019                 ioat_chan->dmacount++;
1020                 __ioat2_dma_memcpy_issue_pending(ioat_chan);
1021                 break;
1022         }
1023         spin_unlock_bh(&ioat_chan->desc_lock);
1024 }
1025
1026 /*
1027  * Perform a IOAT transaction to verify the HW works.
1028  */
1029 #define IOAT_TEST_SIZE 2000
1030
1031 static void ioat_dma_test_callback(void *dma_async_param)
1032 {
1033         printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n",
1034                 dma_async_param);
1035 }
1036
1037 /**
1038  * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
1039  * @device: device to be tested
1040  */
1041 static int ioat_dma_self_test(struct ioatdma_device *device)
1042 {
1043         int i;
1044         u8 *src;
1045         u8 *dest;
1046         struct dma_chan *dma_chan;
1047         struct dma_async_tx_descriptor *tx;
1048         dma_addr_t addr;
1049         dma_cookie_t cookie;
1050         int err = 0;
1051
1052         src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
1053         if (!src)
1054                 return -ENOMEM;
1055         dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
1056         if (!dest) {
1057                 kfree(src);
1058                 return -ENOMEM;
1059         }
1060
1061         /* Fill in src buffer */
1062         for (i = 0; i < IOAT_TEST_SIZE; i++)
1063                 src[i] = (u8)i;
1064
1065         /* Start copy, using first DMA channel */
1066         dma_chan = container_of(device->common.channels.next,
1067                                 struct dma_chan,
1068                                 device_node);
1069         if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
1070                 dev_err(&device->pdev->dev,
1071                         "selftest cannot allocate chan resource\n");
1072                 err = -ENODEV;
1073                 goto out;
1074         }
1075
1076         tx = device->common.device_prep_dma_memcpy(dma_chan, IOAT_TEST_SIZE, 0);
1077         if (!tx) {
1078                 dev_err(&device->pdev->dev,
1079                         "Self-test prep failed, disabling\n");
1080                 err = -ENODEV;
1081                 goto free_resources;
1082         }
1083
1084         async_tx_ack(tx);
1085         addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
1086                               DMA_TO_DEVICE);
1087         tx->tx_set_src(addr, tx, 0);
1088         addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
1089                               DMA_FROM_DEVICE);
1090         tx->tx_set_dest(addr, tx, 0);
1091         tx->callback = ioat_dma_test_callback;
1092         tx->callback_param = (void *)0x8086;
1093         cookie = tx->tx_submit(tx);
1094         if (cookie < 0) {
1095                 dev_err(&device->pdev->dev,
1096                         "Self-test setup failed, disabling\n");
1097                 err = -ENODEV;
1098                 goto free_resources;
1099         }
1100         device->common.device_issue_pending(dma_chan);
1101         msleep(1);
1102
1103         if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
1104                                         != DMA_SUCCESS) {
1105                 dev_err(&device->pdev->dev,
1106                         "Self-test copy timed out, disabling\n");
1107                 err = -ENODEV;
1108                 goto free_resources;
1109         }
1110         if (memcmp(src, dest, IOAT_TEST_SIZE)) {
1111                 dev_err(&device->pdev->dev,
1112                         "Self-test copy failed compare, disabling\n");
1113                 err = -ENODEV;
1114                 goto free_resources;
1115         }
1116
1117 free_resources:
1118         device->common.device_free_chan_resources(dma_chan);
1119 out:
1120         kfree(src);
1121         kfree(dest);
1122         return err;
1123 }
1124
1125 static char ioat_interrupt_style[32] = "msix";
1126 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
1127                     sizeof(ioat_interrupt_style), 0644);
1128 MODULE_PARM_DESC(ioat_interrupt_style,
1129                  "set ioat interrupt style: msix (default), "
1130                  "msix-single-vector, msi, intx)");
1131
1132 /**
1133  * ioat_dma_setup_interrupts - setup interrupt handler
1134  * @device: ioat device
1135  */
1136 static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
1137 {
1138         struct ioat_dma_chan *ioat_chan;
1139         int err, i, j, msixcnt;
1140         u8 intrctrl = 0;
1141
1142         if (!strcmp(ioat_interrupt_style, "msix"))
1143                 goto msix;
1144         if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
1145                 goto msix_single_vector;
1146         if (!strcmp(ioat_interrupt_style, "msi"))
1147                 goto msi;
1148         if (!strcmp(ioat_interrupt_style, "intx"))
1149                 goto intx;
1150         dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n",
1151                 ioat_interrupt_style);
1152         goto err_no_irq;
1153
1154 msix:
1155         /* The number of MSI-X vectors should equal the number of channels */
1156         msixcnt = device->common.chancnt;
1157         for (i = 0; i < msixcnt; i++)
1158                 device->msix_entries[i].entry = i;
1159
1160         err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt);
1161         if (err < 0)
1162                 goto msi;
1163         if (err > 0)
1164                 goto msix_single_vector;
1165
1166         for (i = 0; i < msixcnt; i++) {
1167                 ioat_chan = ioat_lookup_chan_by_index(device, i);
1168                 err = request_irq(device->msix_entries[i].vector,
1169                                   ioat_dma_do_interrupt_msix,
1170                                   0, "ioat-msix", ioat_chan);
1171                 if (err) {
1172                         for (j = 0; j < i; j++) {
1173                                 ioat_chan =
1174                                         ioat_lookup_chan_by_index(device, j);
1175                                 free_irq(device->msix_entries[j].vector,
1176                                          ioat_chan);
1177                         }
1178                         goto msix_single_vector;
1179                 }
1180         }
1181         intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
1182         device->irq_mode = msix_multi_vector;
1183         goto done;
1184
1185 msix_single_vector:
1186         device->msix_entries[0].entry = 0;
1187         err = pci_enable_msix(device->pdev, device->msix_entries, 1);
1188         if (err)
1189                 goto msi;
1190
1191         err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt,
1192                           0, "ioat-msix", device);
1193         if (err) {
1194                 pci_disable_msix(device->pdev);
1195                 goto msi;
1196         }
1197         device->irq_mode = msix_single_vector;
1198         goto done;
1199
1200 msi:
1201         err = pci_enable_msi(device->pdev);
1202         if (err)
1203                 goto intx;
1204
1205         err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
1206                           0, "ioat-msi", device);
1207         if (err) {
1208                 pci_disable_msi(device->pdev);
1209                 goto intx;
1210         }
1211         /*
1212          * CB 1.2 devices need a bit set in configuration space to enable MSI
1213          */
1214         if (device->version == IOAT_VER_1_2) {
1215                 u32 dmactrl;
1216                 pci_read_config_dword(device->pdev,
1217                                       IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1218                 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1219                 pci_write_config_dword(device->pdev,
1220                                        IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1221         }
1222         device->irq_mode = msi;
1223         goto done;
1224
1225 intx:
1226         err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
1227                           IRQF_SHARED, "ioat-intx", device);
1228         if (err)
1229                 goto err_no_irq;
1230         device->irq_mode = intx;
1231
1232 done:
1233         intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
1234         writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
1235         return 0;
1236
1237 err_no_irq:
1238         /* Disable all interrupt generation */
1239         writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1240         dev_err(&device->pdev->dev, "no usable interrupts\n");
1241         device->irq_mode = none;
1242         return -1;
1243 }
1244
1245 /**
1246  * ioat_dma_remove_interrupts - remove whatever interrupts were set
1247  * @device: ioat device
1248  */
1249 static void ioat_dma_remove_interrupts(struct ioatdma_device *device)
1250 {
1251         struct ioat_dma_chan *ioat_chan;
1252         int i;
1253
1254         /* Disable all interrupt generation */
1255         writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1256
1257         switch (device->irq_mode) {
1258         case msix_multi_vector:
1259                 for (i = 0; i < device->common.chancnt; i++) {
1260                         ioat_chan = ioat_lookup_chan_by_index(device, i);
1261                         free_irq(device->msix_entries[i].vector, ioat_chan);
1262                 }
1263                 pci_disable_msix(device->pdev);
1264                 break;
1265         case msix_single_vector:
1266                 free_irq(device->msix_entries[0].vector, device);
1267                 pci_disable_msix(device->pdev);
1268                 break;
1269         case msi:
1270                 free_irq(device->pdev->irq, device);
1271                 pci_disable_msi(device->pdev);
1272                 break;
1273         case intx:
1274                 free_irq(device->pdev->irq, device);
1275                 break;
1276         case none:
1277                 dev_warn(&device->pdev->dev,
1278                          "call to %s without interrupts setup\n", __func__);
1279         }
1280         device->irq_mode = none;
1281 }
1282
1283 struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1284                                       void __iomem *iobase)
1285 {
1286         int err;
1287         struct ioatdma_device *device;
1288
1289         device = kzalloc(sizeof(*device), GFP_KERNEL);
1290         if (!device) {
1291                 err = -ENOMEM;
1292                 goto err_kzalloc;
1293         }
1294         device->pdev = pdev;
1295         device->reg_base = iobase;
1296         device->version = readb(device->reg_base + IOAT_VER_OFFSET);
1297
1298         /* DMA coherent memory pool for DMA descriptor allocations */
1299         device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
1300                                            sizeof(struct ioat_dma_descriptor),
1301                                            64, 0);
1302         if (!device->dma_pool) {
1303                 err = -ENOMEM;
1304                 goto err_dma_pool;
1305         }
1306
1307         device->completion_pool = pci_pool_create("completion_pool", pdev,
1308                                                   sizeof(u64), SMP_CACHE_BYTES,
1309                                                   SMP_CACHE_BYTES);
1310         if (!device->completion_pool) {
1311                 err = -ENOMEM;
1312                 goto err_completion_pool;
1313         }
1314
1315         INIT_LIST_HEAD(&device->common.channels);
1316         ioat_dma_enumerate_channels(device);
1317
1318         device->common.device_alloc_chan_resources =
1319                                                 ioat_dma_alloc_chan_resources;
1320         device->common.device_free_chan_resources =
1321                                                 ioat_dma_free_chan_resources;
1322         device->common.dev = &pdev->dev;
1323
1324         dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
1325         device->common.device_is_tx_complete = ioat_dma_is_complete;
1326         device->common.device_dependency_added = ioat_dma_dependency_added;
1327         switch (device->version) {
1328         case IOAT_VER_1_2:
1329                 device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1330                 device->common.device_issue_pending =
1331                                                 ioat1_dma_memcpy_issue_pending;
1332                 break;
1333         case IOAT_VER_2_0:
1334                 device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
1335                 device->common.device_issue_pending =
1336                                                 ioat2_dma_memcpy_issue_pending;
1337                 break;
1338         }
1339
1340         dev_err(&device->pdev->dev,
1341                 "Intel(R) I/OAT DMA Engine found,"
1342                 " %d channels, device version 0x%02x, driver version %s\n",
1343                 device->common.chancnt, device->version, IOAT_DMA_VERSION);
1344
1345         err = ioat_dma_setup_interrupts(device);
1346         if (err)
1347                 goto err_setup_interrupts;
1348
1349         err = ioat_dma_self_test(device);
1350         if (err)
1351                 goto err_self_test;
1352
1353         dma_async_device_register(&device->common);
1354
1355         return device;
1356
1357 err_self_test:
1358         ioat_dma_remove_interrupts(device);
1359 err_setup_interrupts:
1360         pci_pool_destroy(device->completion_pool);
1361 err_completion_pool:
1362         pci_pool_destroy(device->dma_pool);
1363 err_dma_pool:
1364         kfree(device);
1365 err_kzalloc:
1366         dev_err(&pdev->dev,
1367                 "Intel(R) I/OAT DMA Engine initialization failed\n");
1368         return NULL;
1369 }
1370
1371 void ioat_dma_remove(struct ioatdma_device *device)
1372 {
1373         struct dma_chan *chan, *_chan;
1374         struct ioat_dma_chan *ioat_chan;
1375
1376         ioat_dma_remove_interrupts(device);
1377
1378         dma_async_device_unregister(&device->common);
1379
1380         pci_pool_destroy(device->dma_pool);
1381         pci_pool_destroy(device->completion_pool);
1382
1383         iounmap(device->reg_base);
1384         pci_release_regions(device->pdev);
1385         pci_disable_device(device->pdev);
1386
1387         list_for_each_entry_safe(chan, _chan,
1388                                  &device->common.channels, device_node) {
1389                 ioat_chan = to_ioat_chan(chan);
1390                 list_del(&chan->device_node);
1391                 kfree(ioat_chan);
1392         }
1393         kfree(device);
1394 }
1395