Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
[linux-2.6] / drivers / dma / ioat_dma.c
1 /*
2  * Intel I/OAT DMA Linux driver
3  * Copyright(c) 2004 - 2007 Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  */
22
23 /*
24  * This driver supports an Intel I/OAT DMA engine, which does asynchronous
25  * copy operations.
26  */
27
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include "ioatdma.h"
36 #include "ioatdma_registers.h"
37 #include "ioatdma_hw.h"
38
39 #define INITIAL_IOAT_DESC_COUNT 128
40
41 #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
42 #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
43 #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
44 #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
45
46 /* internal functions */
47 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
48 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
49 static struct ioat_desc_sw *
50 ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
51
52 static inline struct ioat_dma_chan *ioat_lookup_chan_by_index(
53                                                 struct ioatdma_device *device,
54                                                 int index)
55 {
56         return device->idx[index];
57 }
58
59 /**
60  * ioat_dma_do_interrupt - handler used for single vector interrupt mode
61  * @irq: interrupt id
62  * @data: interrupt data
63  */
64 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
65 {
66         struct ioatdma_device *instance = data;
67         struct ioat_dma_chan *ioat_chan;
68         unsigned long attnstatus;
69         int bit;
70         u8 intrctrl;
71
72         intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
73
74         if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
75                 return IRQ_NONE;
76
77         if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
78                 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
79                 return IRQ_NONE;
80         }
81
82         attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
83         for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
84                 ioat_chan = ioat_lookup_chan_by_index(instance, bit);
85                 tasklet_schedule(&ioat_chan->cleanup_task);
86         }
87
88         writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
89         return IRQ_HANDLED;
90 }
91
92 /**
93  * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
94  * @irq: interrupt id
95  * @data: interrupt data
96  */
97 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
98 {
99         struct ioat_dma_chan *ioat_chan = data;
100
101         tasklet_schedule(&ioat_chan->cleanup_task);
102
103         return IRQ_HANDLED;
104 }
105
106 static void ioat_dma_cleanup_tasklet(unsigned long data);
107
108 /**
109  * ioat_dma_enumerate_channels - find and initialize the device's channels
110  * @device: the device to be enumerated
111  */
112 static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
113 {
114         u8 xfercap_scale;
115         u32 xfercap;
116         int i;
117         struct ioat_dma_chan *ioat_chan;
118
119         device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
120         xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
121         xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
122
123         for (i = 0; i < device->common.chancnt; i++) {
124                 ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
125                 if (!ioat_chan) {
126                         device->common.chancnt = i;
127                         break;
128                 }
129
130                 ioat_chan->device = device;
131                 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
132                 ioat_chan->xfercap = xfercap;
133                 spin_lock_init(&ioat_chan->cleanup_lock);
134                 spin_lock_init(&ioat_chan->desc_lock);
135                 INIT_LIST_HEAD(&ioat_chan->free_desc);
136                 INIT_LIST_HEAD(&ioat_chan->used_desc);
137                 /* This should be made common somewhere in dmaengine.c */
138                 ioat_chan->common.device = &device->common;
139                 list_add_tail(&ioat_chan->common.device_node,
140                               &device->common.channels);
141                 device->idx[i] = ioat_chan;
142                 tasklet_init(&ioat_chan->cleanup_task,
143                              ioat_dma_cleanup_tasklet,
144                              (unsigned long) ioat_chan);
145                 tasklet_disable(&ioat_chan->cleanup_task);
146         }
147         return device->common.chancnt;
148 }
149
150 static void ioat_set_src(dma_addr_t addr,
151                          struct dma_async_tx_descriptor *tx,
152                          int index)
153 {
154         tx_to_ioat_desc(tx)->src = addr;
155 }
156
157 static void ioat_set_dest(dma_addr_t addr,
158                           struct dma_async_tx_descriptor *tx,
159                           int index)
160 {
161         tx_to_ioat_desc(tx)->dst = addr;
162 }
163
164 static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx)
165 {
166         struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
167         struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
168         struct ioat_desc_sw *prev, *new;
169         struct ioat_dma_descriptor *hw;
170         int append = 0;
171         dma_cookie_t cookie;
172         LIST_HEAD(new_chain);
173         u32 copy;
174         size_t len;
175         dma_addr_t src, dst;
176         int orig_ack;
177         unsigned int desc_count = 0;
178
179         /* src and dest and len are stored in the initial descriptor */
180         len = first->len;
181         src = first->src;
182         dst = first->dst;
183         orig_ack = first->async_tx.ack;
184         new = first;
185
186         spin_lock_bh(&ioat_chan->desc_lock);
187         prev = to_ioat_desc(ioat_chan->used_desc.prev);
188         prefetch(prev->hw);
189         do {
190                 copy = min((u32) len, ioat_chan->xfercap);
191
192                 new->async_tx.ack = 1;
193
194                 hw = new->hw;
195                 hw->size = copy;
196                 hw->ctl = 0;
197                 hw->src_addr = src;
198                 hw->dst_addr = dst;
199                 hw->next = 0;
200
201                 /* chain together the physical address list for the HW */
202                 wmb();
203                 prev->hw->next = (u64) new->async_tx.phys;
204
205                 len -= copy;
206                 dst += copy;
207                 src += copy;
208
209                 list_add_tail(&new->node, &new_chain);
210                 desc_count++;
211                 prev = new;
212         } while (len && (new = ioat_dma_get_next_descriptor(ioat_chan)));
213
214         hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
215         if (new->async_tx.callback) {
216                 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
217                 if (first != new) {
218                         /* move callback into to last desc */
219                         new->async_tx.callback = first->async_tx.callback;
220                         new->async_tx.callback_param
221                                         = first->async_tx.callback_param;
222                         first->async_tx.callback = NULL;
223                         first->async_tx.callback_param = NULL;
224                 }
225         }
226
227         new->tx_cnt = desc_count;
228         new->async_tx.ack = orig_ack; /* client is in control of this ack */
229
230         /* store the original values for use in later cleanup */
231         if (new != first) {
232                 new->src = first->src;
233                 new->dst = first->dst;
234                 new->len = first->len;
235         }
236
237         /* cookie incr and addition to used_list must be atomic */
238         cookie = ioat_chan->common.cookie;
239         cookie++;
240         if (cookie < 0)
241                 cookie = 1;
242         ioat_chan->common.cookie = new->async_tx.cookie = cookie;
243
244         /* write address into NextDescriptor field of last desc in chain */
245         to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
246                                                         first->async_tx.phys;
247         __list_splice(&new_chain, ioat_chan->used_desc.prev);
248
249         ioat_chan->pending += desc_count;
250         if (ioat_chan->pending >= 4) {
251                 append = 1;
252                 ioat_chan->pending = 0;
253         }
254         spin_unlock_bh(&ioat_chan->desc_lock);
255
256         if (append)
257                 writeb(IOAT_CHANCMD_APPEND,
258                         ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
259
260         return cookie;
261 }
262
263 static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
264                                         struct ioat_dma_chan *ioat_chan,
265                                         gfp_t flags)
266 {
267         struct ioat_dma_descriptor *desc;
268         struct ioat_desc_sw *desc_sw;
269         struct ioatdma_device *ioatdma_device;
270         dma_addr_t phys;
271
272         ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
273         desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
274         if (unlikely(!desc))
275                 return NULL;
276
277         desc_sw = kzalloc(sizeof(*desc_sw), flags);
278         if (unlikely(!desc_sw)) {
279                 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
280                 return NULL;
281         }
282
283         memset(desc, 0, sizeof(*desc));
284         dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
285         desc_sw->async_tx.tx_set_src = ioat_set_src;
286         desc_sw->async_tx.tx_set_dest = ioat_set_dest;
287         desc_sw->async_tx.tx_submit = ioat_tx_submit;
288         INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
289         desc_sw->hw = desc;
290         desc_sw->async_tx.phys = phys;
291
292         return desc_sw;
293 }
294
295 /* returns the actual number of allocated descriptors */
296 static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
297 {
298         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
299         struct ioat_desc_sw *desc = NULL;
300         u16 chanctrl;
301         u32 chanerr;
302         int i;
303         LIST_HEAD(tmp_list);
304
305         /* have we already been set up? */
306         if (!list_empty(&ioat_chan->free_desc))
307                 return INITIAL_IOAT_DESC_COUNT;
308
309         /* Setup register to interrupt and write completion status on error */
310         chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
311                 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
312                 IOAT_CHANCTRL_ERR_COMPLETION_EN;
313         writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
314
315         chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
316         if (chanerr) {
317                 dev_err(&ioat_chan->device->pdev->dev,
318                         "CHANERR = %x, clearing\n", chanerr);
319                 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
320         }
321
322         /* Allocate descriptors */
323         for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) {
324                 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
325                 if (!desc) {
326                         dev_err(&ioat_chan->device->pdev->dev,
327                                 "Only %d initial descriptors\n", i);
328                         break;
329                 }
330                 list_add_tail(&desc->node, &tmp_list);
331         }
332         spin_lock_bh(&ioat_chan->desc_lock);
333         list_splice(&tmp_list, &ioat_chan->free_desc);
334         spin_unlock_bh(&ioat_chan->desc_lock);
335
336         /* allocate a completion writeback area */
337         /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
338         ioat_chan->completion_virt =
339                 pci_pool_alloc(ioat_chan->device->completion_pool,
340                                GFP_KERNEL,
341                                &ioat_chan->completion_addr);
342         memset(ioat_chan->completion_virt, 0,
343                sizeof(*ioat_chan->completion_virt));
344         writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
345                ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
346         writel(((u64) ioat_chan->completion_addr) >> 32,
347                ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
348
349         tasklet_enable(&ioat_chan->cleanup_task);
350         ioat_dma_start_null_desc(ioat_chan);
351         return i;
352 }
353
354 static void ioat_dma_free_chan_resources(struct dma_chan *chan)
355 {
356         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
357         struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
358         struct ioat_desc_sw *desc, *_desc;
359         int in_use_descs = 0;
360
361         tasklet_disable(&ioat_chan->cleanup_task);
362         ioat_dma_memcpy_cleanup(ioat_chan);
363
364         /* Delay 100ms after reset to allow internal DMA logic to quiesce
365          * before removing DMA descriptor resources.
366          */
367         writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
368         mdelay(100);
369
370         spin_lock_bh(&ioat_chan->desc_lock);
371         list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
372                 in_use_descs++;
373                 list_del(&desc->node);
374                 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
375                               desc->async_tx.phys);
376                 kfree(desc);
377         }
378         list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) {
379                 list_del(&desc->node);
380                 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
381                               desc->async_tx.phys);
382                 kfree(desc);
383         }
384         spin_unlock_bh(&ioat_chan->desc_lock);
385
386         pci_pool_free(ioatdma_device->completion_pool,
387                       ioat_chan->completion_virt,
388                       ioat_chan->completion_addr);
389
390         /* one is ok since we left it on there on purpose */
391         if (in_use_descs > 1)
392                 dev_err(&ioat_chan->device->pdev->dev,
393                         "Freeing %d in use descriptors!\n",
394                         in_use_descs - 1);
395
396         ioat_chan->last_completion = ioat_chan->completion_addr = 0;
397         ioat_chan->pending = 0;
398 }
399
400 /**
401  * ioat_dma_get_next_descriptor - return the next available descriptor
402  * @ioat_chan: IOAT DMA channel handle
403  *
404  * Gets the next descriptor from the chain, and must be called with the
405  * channel's desc_lock held.  Allocates more descriptors if the channel
406  * has run out.
407  */
408 static struct ioat_desc_sw *
409 ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
410 {
411         struct ioat_desc_sw *new = NULL;
412
413         if (!list_empty(&ioat_chan->free_desc)) {
414                 new = to_ioat_desc(ioat_chan->free_desc.next);
415                 list_del(&new->node);
416         } else {
417                 /* try to get another desc */
418                 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
419                 /* will this ever happen? */
420                 /* TODO add upper limit on these */
421                 BUG_ON(!new);
422         }
423
424         prefetch(new->hw);
425         return new;
426 }
427
428 static struct dma_async_tx_descriptor *ioat_dma_prep_memcpy(
429                                                 struct dma_chan *chan,
430                                                 size_t len,
431                                                 int int_en)
432 {
433         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
434         struct ioat_desc_sw *new;
435
436         spin_lock_bh(&ioat_chan->desc_lock);
437         new = ioat_dma_get_next_descriptor(ioat_chan);
438         new->len = len;
439         spin_unlock_bh(&ioat_chan->desc_lock);
440
441         return new ? &new->async_tx : NULL;
442 }
443
444 /**
445  * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
446  *                                 descriptors to hw
447  * @chan: DMA channel handle
448  */
449 static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan)
450 {
451         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
452
453         if (ioat_chan->pending != 0) {
454                 ioat_chan->pending = 0;
455                 writeb(IOAT_CHANCMD_APPEND,
456                        ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
457         }
458 }
459
460 static void ioat_dma_cleanup_tasklet(unsigned long data)
461 {
462         struct ioat_dma_chan *chan = (void *)data;
463         ioat_dma_memcpy_cleanup(chan);
464         writew(IOAT_CHANCTRL_INT_DISABLE,
465                chan->reg_base + IOAT_CHANCTRL_OFFSET);
466 }
467
468 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
469 {
470         unsigned long phys_complete;
471         struct ioat_desc_sw *desc, *_desc;
472         dma_cookie_t cookie = 0;
473
474         prefetch(ioat_chan->completion_virt);
475
476         if (!spin_trylock_bh(&ioat_chan->cleanup_lock))
477                 return;
478
479         /* The completion writeback can happen at any time,
480            so reads by the driver need to be atomic operations
481            The descriptor physical addresses are limited to 32-bits
482            when the CPU can only do a 32-bit mov */
483
484 #if (BITS_PER_LONG == 64)
485         phys_complete =
486                 ioat_chan->completion_virt->full
487                 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
488 #else
489         phys_complete =
490                 ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
491 #endif
492
493         if ((ioat_chan->completion_virt->full
494                 & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
495                                 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
496                 dev_err(&ioat_chan->device->pdev->dev,
497                         "Channel halted, chanerr = %x\n",
498                         readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
499
500                 /* TODO do something to salvage the situation */
501         }
502
503         if (phys_complete == ioat_chan->last_completion) {
504                 spin_unlock_bh(&ioat_chan->cleanup_lock);
505                 return;
506         }
507
508         cookie = 0;
509         spin_lock_bh(&ioat_chan->desc_lock);
510         list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
511
512                 /*
513                  * Incoming DMA requests may use multiple descriptors, due to
514                  * exceeding xfercap, perhaps. If so, only the last one will
515                  * have a cookie, and require unmapping.
516                  */
517                 if (desc->async_tx.cookie) {
518                         cookie = desc->async_tx.cookie;
519
520                         /*
521                          * yes we are unmapping both _page and _single alloc'd
522                          * regions with unmap_page. Is this *really* that bad?
523                          */
524                         pci_unmap_page(ioat_chan->device->pdev,
525                                         pci_unmap_addr(desc, dst),
526                                         pci_unmap_len(desc, len),
527                                         PCI_DMA_FROMDEVICE);
528                         pci_unmap_page(ioat_chan->device->pdev,
529                                         pci_unmap_addr(desc, src),
530                                         pci_unmap_len(desc, len),
531                                         PCI_DMA_TODEVICE);
532                         if (desc->async_tx.callback) {
533                                 desc->async_tx.callback(
534                                                 desc->async_tx.callback_param);
535                                 desc->async_tx.callback = NULL;
536                         }
537                 }
538
539                 if (desc->async_tx.phys != phys_complete) {
540                         /*
541                          * a completed entry, but not the last, so cleanup
542                          * if the client is done with the descriptor
543                          */
544                         if (desc->async_tx.ack) {
545                                 list_del(&desc->node);
546                                 list_add_tail(&desc->node,
547                                               &ioat_chan->free_desc);
548                         } else
549                                 desc->async_tx.cookie = 0;
550                 } else {
551                         /*
552                          * last used desc. Do not remove, so we can append from
553                          * it, but don't look at it next time, either
554                          */
555                         desc->async_tx.cookie = 0;
556
557                         /* TODO check status bits? */
558                         break;
559                 }
560         }
561
562         spin_unlock_bh(&ioat_chan->desc_lock);
563
564         ioat_chan->last_completion = phys_complete;
565         if (cookie != 0)
566                 ioat_chan->completed_cookie = cookie;
567
568         spin_unlock_bh(&ioat_chan->cleanup_lock);
569 }
570
571 static void ioat_dma_dependency_added(struct dma_chan *chan)
572 {
573         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
574         spin_lock_bh(&ioat_chan->desc_lock);
575         if (ioat_chan->pending == 0) {
576                 spin_unlock_bh(&ioat_chan->desc_lock);
577                 ioat_dma_memcpy_cleanup(ioat_chan);
578         } else
579                 spin_unlock_bh(&ioat_chan->desc_lock);
580 }
581
582 /**
583  * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
584  * @chan: IOAT DMA channel handle
585  * @cookie: DMA transaction identifier
586  * @done: if not %NULL, updated with last completed transaction
587  * @used: if not %NULL, updated with last used transaction
588  */
589 static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
590                                             dma_cookie_t cookie,
591                                             dma_cookie_t *done,
592                                             dma_cookie_t *used)
593 {
594         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
595         dma_cookie_t last_used;
596         dma_cookie_t last_complete;
597         enum dma_status ret;
598
599         last_used = chan->cookie;
600         last_complete = ioat_chan->completed_cookie;
601
602         if (done)
603                 *done = last_complete;
604         if (used)
605                 *used = last_used;
606
607         ret = dma_async_is_complete(cookie, last_complete, last_used);
608         if (ret == DMA_SUCCESS)
609                 return ret;
610
611         ioat_dma_memcpy_cleanup(ioat_chan);
612
613         last_used = chan->cookie;
614         last_complete = ioat_chan->completed_cookie;
615
616         if (done)
617                 *done = last_complete;
618         if (used)
619                 *used = last_used;
620
621         return dma_async_is_complete(cookie, last_complete, last_used);
622 }
623
624 /* PCI API */
625
626 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
627 {
628         struct ioat_desc_sw *desc;
629
630         spin_lock_bh(&ioat_chan->desc_lock);
631
632         desc = ioat_dma_get_next_descriptor(ioat_chan);
633         desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
634                                 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
635                                 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
636         desc->hw->next = 0;
637         desc->hw->size = 0;
638         desc->hw->src_addr = 0;
639         desc->hw->dst_addr = 0;
640         desc->async_tx.ack = 1;
641
642         list_add_tail(&desc->node, &ioat_chan->used_desc);
643         spin_unlock_bh(&ioat_chan->desc_lock);
644
645         writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
646                ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_LOW);
647         writel(((u64) desc->async_tx.phys) >> 32,
648                ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_HIGH);
649
650         writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
651 }
652
653 /*
654  * Perform a IOAT transaction to verify the HW works.
655  */
656 #define IOAT_TEST_SIZE 2000
657
658 static void ioat_dma_test_callback(void *dma_async_param)
659 {
660         printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n",
661                         dma_async_param);
662 }
663
664 /**
665  * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
666  * @device: device to be tested
667  */
668 static int ioat_dma_self_test(struct ioatdma_device *device)
669 {
670         int i;
671         u8 *src;
672         u8 *dest;
673         struct dma_chan *dma_chan;
674         struct dma_async_tx_descriptor *tx = NULL;
675         dma_addr_t addr;
676         dma_cookie_t cookie;
677         int err = 0;
678
679         src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
680         if (!src)
681                 return -ENOMEM;
682         dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
683         if (!dest) {
684                 kfree(src);
685                 return -ENOMEM;
686         }
687
688         /* Fill in src buffer */
689         for (i = 0; i < IOAT_TEST_SIZE; i++)
690                 src[i] = (u8)i;
691
692         /* Start copy, using first DMA channel */
693         dma_chan = container_of(device->common.channels.next,
694                                 struct dma_chan,
695                                 device_node);
696         if (ioat_dma_alloc_chan_resources(dma_chan) < 1) {
697                 dev_err(&device->pdev->dev,
698                         "selftest cannot allocate chan resource\n");
699                 err = -ENODEV;
700                 goto out;
701         }
702
703         tx = ioat_dma_prep_memcpy(dma_chan, IOAT_TEST_SIZE, 0);
704         if (!tx) {
705                 dev_err(&device->pdev->dev,
706                         "Self-test prep failed, disabling\n");
707                 err = -ENODEV;
708                 goto free_resources;
709         }
710
711         async_tx_ack(tx);
712         addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
713                         DMA_TO_DEVICE);
714         ioat_set_src(addr, tx, 0);
715         addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
716                         DMA_FROM_DEVICE);
717         ioat_set_dest(addr, tx, 0);
718         tx->callback = ioat_dma_test_callback;
719         tx->callback_param = (void *)0x8086;
720         cookie = ioat_tx_submit(tx);
721         if (cookie < 0) {
722                 dev_err(&device->pdev->dev,
723                         "Self-test setup failed, disabling\n");
724                 err = -ENODEV;
725                 goto free_resources;
726         }
727         ioat_dma_memcpy_issue_pending(dma_chan);
728         msleep(1);
729
730         if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
731                 dev_err(&device->pdev->dev,
732                         "Self-test copy timed out, disabling\n");
733                 err = -ENODEV;
734                 goto free_resources;
735         }
736         if (memcmp(src, dest, IOAT_TEST_SIZE)) {
737                 dev_err(&device->pdev->dev,
738                         "Self-test copy failed compare, disabling\n");
739                 err = -ENODEV;
740                 goto free_resources;
741         }
742
743 free_resources:
744         ioat_dma_free_chan_resources(dma_chan);
745 out:
746         kfree(src);
747         kfree(dest);
748         return err;
749 }
750
751 static char ioat_interrupt_style[32] = "msix";
752 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
753                     sizeof(ioat_interrupt_style), 0644);
754 MODULE_PARM_DESC(ioat_interrupt_style,
755                  "set ioat interrupt style: msix (default), "
756                  "msix-single-vector, msi, intx)");
757
758 /**
759  * ioat_dma_setup_interrupts - setup interrupt handler
760  * @device: ioat device
761  */
762 static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
763 {
764         struct ioat_dma_chan *ioat_chan;
765         int err, i, j, msixcnt;
766         u8 intrctrl = 0;
767
768         if (!strcmp(ioat_interrupt_style, "msix"))
769                 goto msix;
770         if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
771                 goto msix_single_vector;
772         if (!strcmp(ioat_interrupt_style, "msi"))
773                 goto msi;
774         if (!strcmp(ioat_interrupt_style, "intx"))
775                 goto intx;
776         dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n",
777                 ioat_interrupt_style);
778         goto err_no_irq;
779
780 msix:
781         /* The number of MSI-X vectors should equal the number of channels */
782         msixcnt = device->common.chancnt;
783         for (i = 0; i < msixcnt; i++)
784                 device->msix_entries[i].entry = i;
785
786         err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt);
787         if (err < 0)
788                 goto msi;
789         if (err > 0)
790                 goto msix_single_vector;
791
792         for (i = 0; i < msixcnt; i++) {
793                 ioat_chan = ioat_lookup_chan_by_index(device, i);
794                 err = request_irq(device->msix_entries[i].vector,
795                                   ioat_dma_do_interrupt_msix,
796                                   0, "ioat-msix", ioat_chan);
797                 if (err) {
798                         for (j = 0; j < i; j++) {
799                                 ioat_chan =
800                                         ioat_lookup_chan_by_index(device, j);
801                                 free_irq(device->msix_entries[j].vector,
802                                          ioat_chan);
803                         }
804                         goto msix_single_vector;
805                 }
806         }
807         intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
808         device->irq_mode = msix_multi_vector;
809         goto done;
810
811 msix_single_vector:
812         device->msix_entries[0].entry = 0;
813         err = pci_enable_msix(device->pdev, device->msix_entries, 1);
814         if (err)
815                 goto msi;
816
817         err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt,
818                           0, "ioat-msix", device);
819         if (err) {
820                 pci_disable_msix(device->pdev);
821                 goto msi;
822         }
823         device->irq_mode = msix_single_vector;
824         goto done;
825
826 msi:
827         err = pci_enable_msi(device->pdev);
828         if (err)
829                 goto intx;
830
831         err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
832                           0, "ioat-msi", device);
833         if (err) {
834                 pci_disable_msi(device->pdev);
835                 goto intx;
836         }
837         /*
838          * CB 1.2 devices need a bit set in configuration space to enable MSI
839          */
840         if (device->version == IOAT_VER_1_2) {
841                 u32 dmactrl;
842                 pci_read_config_dword(device->pdev,
843                                       IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
844                 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
845                 pci_write_config_dword(device->pdev,
846                                        IOAT_PCI_DMACTRL_OFFSET, dmactrl);
847         }
848         device->irq_mode = msi;
849         goto done;
850
851 intx:
852         err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
853                           IRQF_SHARED, "ioat-intx", device);
854         if (err)
855                 goto err_no_irq;
856         device->irq_mode = intx;
857
858 done:
859         intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
860         writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
861         return 0;
862
863 err_no_irq:
864         /* Disable all interrupt generation */
865         writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
866         dev_err(&device->pdev->dev, "no usable interrupts\n");
867         device->irq_mode = none;
868         return -1;
869 }
870
871 /**
872  * ioat_dma_remove_interrupts - remove whatever interrupts were set
873  * @device: ioat device
874  */
875 static void ioat_dma_remove_interrupts(struct ioatdma_device *device)
876 {
877         struct ioat_dma_chan *ioat_chan;
878         int i;
879
880         /* Disable all interrupt generation */
881         writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
882
883         switch (device->irq_mode) {
884         case msix_multi_vector:
885                 for (i = 0; i < device->common.chancnt; i++) {
886                         ioat_chan = ioat_lookup_chan_by_index(device, i);
887                         free_irq(device->msix_entries[i].vector, ioat_chan);
888                 }
889                 pci_disable_msix(device->pdev);
890                 break;
891         case msix_single_vector:
892                 free_irq(device->msix_entries[0].vector, device);
893                 pci_disable_msix(device->pdev);
894                 break;
895         case msi:
896                 free_irq(device->pdev->irq, device);
897                 pci_disable_msi(device->pdev);
898                 break;
899         case intx:
900                 free_irq(device->pdev->irq, device);
901                 break;
902         case none:
903                 dev_warn(&device->pdev->dev,
904                          "call to %s without interrupts setup\n", __func__);
905         }
906         device->irq_mode = none;
907 }
908
909 struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
910                                       void __iomem *iobase)
911 {
912         int err;
913         struct ioatdma_device *device;
914
915         device = kzalloc(sizeof(*device), GFP_KERNEL);
916         if (!device) {
917                 err = -ENOMEM;
918                 goto err_kzalloc;
919         }
920         device->pdev = pdev;
921         device->reg_base = iobase;
922         device->version = readb(device->reg_base + IOAT_VER_OFFSET);
923
924         /* DMA coherent memory pool for DMA descriptor allocations */
925         device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
926                                            sizeof(struct ioat_dma_descriptor),
927                                            64, 0);
928         if (!device->dma_pool) {
929                 err = -ENOMEM;
930                 goto err_dma_pool;
931         }
932
933         device->completion_pool = pci_pool_create("completion_pool", pdev,
934                                                   sizeof(u64), SMP_CACHE_BYTES,
935                                                   SMP_CACHE_BYTES);
936         if (!device->completion_pool) {
937                 err = -ENOMEM;
938                 goto err_completion_pool;
939         }
940
941         INIT_LIST_HEAD(&device->common.channels);
942         ioat_dma_enumerate_channels(device);
943
944         dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
945         device->common.device_alloc_chan_resources =
946                                                 ioat_dma_alloc_chan_resources;
947         device->common.device_free_chan_resources =
948                                                 ioat_dma_free_chan_resources;
949         device->common.device_prep_dma_memcpy = ioat_dma_prep_memcpy;
950         device->common.device_is_tx_complete = ioat_dma_is_complete;
951         device->common.device_issue_pending = ioat_dma_memcpy_issue_pending;
952         device->common.device_dependency_added = ioat_dma_dependency_added;
953         device->common.dev = &pdev->dev;
954         dev_err(&device->pdev->dev,
955                 "Intel(R) I/OAT DMA Engine found,"
956                 " %d channels, device version 0x%02x, driver version %s\n",
957                 device->common.chancnt, device->version, IOAT_DMA_VERSION);
958
959         err = ioat_dma_setup_interrupts(device);
960         if (err)
961                 goto err_setup_interrupts;
962
963         err = ioat_dma_self_test(device);
964         if (err)
965                 goto err_self_test;
966
967         dma_async_device_register(&device->common);
968
969         return device;
970
971 err_self_test:
972         ioat_dma_remove_interrupts(device);
973 err_setup_interrupts:
974         pci_pool_destroy(device->completion_pool);
975 err_completion_pool:
976         pci_pool_destroy(device->dma_pool);
977 err_dma_pool:
978         kfree(device);
979 err_kzalloc:
980         dev_err(&device->pdev->dev,
981                 "Intel(R) I/OAT DMA Engine initialization failed\n");
982         return NULL;
983 }
984
985 void ioat_dma_remove(struct ioatdma_device *device)
986 {
987         struct dma_chan *chan, *_chan;
988         struct ioat_dma_chan *ioat_chan;
989
990         ioat_dma_remove_interrupts(device);
991
992         dma_async_device_unregister(&device->common);
993
994         pci_pool_destroy(device->dma_pool);
995         pci_pool_destroy(device->completion_pool);
996
997         iounmap(device->reg_base);
998         pci_release_regions(device->pdev);
999         pci_disable_device(device->pdev);
1000
1001         list_for_each_entry_safe(chan, _chan,
1002                                  &device->common.channels, device_node) {
1003                 ioat_chan = to_ioat_chan(chan);
1004                 list_del(&chan->device_node);
1005                 kfree(ioat_chan);
1006         }
1007         kfree(device);
1008 }
1009