2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
23 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/interrupt.h>
31 #include <linux/dmaengine.h>
32 #include <linux/delay.h>
33 #include <linux/dma-mapping.h>
35 #include "ioatdma_registers.h"
36 #include "ioatdma_hw.h"
38 #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
39 #define to_ioat_device(dev) container_of(dev, struct ioat_device, common)
40 #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
41 #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
43 /* internal functions */
44 static int __devinit ioat_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
45 static void ioat_shutdown(struct pci_dev *pdev);
46 static void __devexit ioat_remove(struct pci_dev *pdev);
48 static int enumerate_dma_channels(struct ioat_device *device)
53 struct ioat_dma_chan *ioat_chan;
55 device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
56 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
57 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
59 for (i = 0; i < device->common.chancnt; i++) {
60 ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
62 device->common.chancnt = i;
66 ioat_chan->device = device;
67 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
68 ioat_chan->xfercap = xfercap;
69 spin_lock_init(&ioat_chan->cleanup_lock);
70 spin_lock_init(&ioat_chan->desc_lock);
71 INIT_LIST_HEAD(&ioat_chan->free_desc);
72 INIT_LIST_HEAD(&ioat_chan->used_desc);
73 /* This should be made common somewhere in dmaengine.c */
74 ioat_chan->common.device = &device->common;
75 list_add_tail(&ioat_chan->common.device_node,
76 &device->common.channels);
78 return device->common.chancnt;
82 ioat_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx, int index)
84 struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx);
85 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
87 pci_unmap_addr_set(desc, src, addr);
89 list_for_each_entry(iter, &desc->async_tx.tx_list, node) {
90 iter->hw->src_addr = addr;
91 addr += ioat_chan->xfercap;
97 ioat_set_dest(dma_addr_t addr, struct dma_async_tx_descriptor *tx, int index)
99 struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx);
100 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
102 pci_unmap_addr_set(desc, dst, addr);
104 list_for_each_entry(iter, &desc->async_tx.tx_list, node) {
105 iter->hw->dst_addr = addr;
106 addr += ioat_chan->xfercap;
111 ioat_tx_submit(struct dma_async_tx_descriptor *tx)
113 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
114 struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
117 struct ioat_desc_sw *group_start;
119 group_start = list_entry(desc->async_tx.tx_list.next,
120 struct ioat_desc_sw, node);
121 spin_lock_bh(&ioat_chan->desc_lock);
122 /* cookie incr and addition to used_list must be atomic */
123 cookie = ioat_chan->common.cookie;
127 ioat_chan->common.cookie = desc->async_tx.cookie = cookie;
129 /* write address into NextDescriptor field of last desc in chain */
130 to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
131 group_start->async_tx.phys;
132 list_splice_init(&desc->async_tx.tx_list, ioat_chan->used_desc.prev);
134 ioat_chan->pending += desc->tx_cnt;
135 if (ioat_chan->pending >= 4) {
137 ioat_chan->pending = 0;
139 spin_unlock_bh(&ioat_chan->desc_lock);
142 writeb(IOAT_CHANCMD_APPEND,
143 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
148 static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
149 struct ioat_dma_chan *ioat_chan,
152 struct ioat_dma_descriptor *desc;
153 struct ioat_desc_sw *desc_sw;
154 struct ioat_device *ioat_device;
157 ioat_device = to_ioat_device(ioat_chan->common.device);
158 desc = pci_pool_alloc(ioat_device->dma_pool, flags, &phys);
162 desc_sw = kzalloc(sizeof(*desc_sw), flags);
163 if (unlikely(!desc_sw)) {
164 pci_pool_free(ioat_device->dma_pool, desc, phys);
168 memset(desc, 0, sizeof(*desc));
169 dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
170 desc_sw->async_tx.tx_set_src = ioat_set_src;
171 desc_sw->async_tx.tx_set_dest = ioat_set_dest;
172 desc_sw->async_tx.tx_submit = ioat_tx_submit;
173 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
175 desc_sw->async_tx.phys = phys;
180 #define INITIAL_IOAT_DESC_COUNT 128
182 static void ioat_start_null_desc(struct ioat_dma_chan *ioat_chan);
184 /* returns the actual number of allocated descriptors */
185 static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
187 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
188 struct ioat_desc_sw *desc = NULL;
195 * In-use bit automatically set by reading chanctrl
196 * If 0, we got it, if 1, someone else did
198 chanctrl = readw(ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
199 if (chanctrl & IOAT_CHANCTRL_CHANNEL_IN_USE)
202 /* Setup register to interrupt and write completion status on error */
203 chanctrl = IOAT_CHANCTRL_CHANNEL_IN_USE |
204 IOAT_CHANCTRL_ERR_INT_EN |
205 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
206 IOAT_CHANCTRL_ERR_COMPLETION_EN;
207 writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
209 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
211 printk("IOAT: CHANERR = %x, clearing\n", chanerr);
212 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
215 /* Allocate descriptors */
216 for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) {
217 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
219 printk(KERN_ERR "IOAT: Only %d initial descriptors\n", i);
222 list_add_tail(&desc->node, &tmp_list);
224 spin_lock_bh(&ioat_chan->desc_lock);
225 list_splice(&tmp_list, &ioat_chan->free_desc);
226 spin_unlock_bh(&ioat_chan->desc_lock);
228 /* allocate a completion writeback area */
229 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
230 ioat_chan->completion_virt =
231 pci_pool_alloc(ioat_chan->device->completion_pool,
233 &ioat_chan->completion_addr);
234 memset(ioat_chan->completion_virt, 0,
235 sizeof(*ioat_chan->completion_virt));
236 writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
237 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
238 writel(((u64) ioat_chan->completion_addr) >> 32,
239 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
241 ioat_start_null_desc(ioat_chan);
245 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
247 static void ioat_dma_free_chan_resources(struct dma_chan *chan)
249 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
250 struct ioat_device *ioat_device = to_ioat_device(chan->device);
251 struct ioat_desc_sw *desc, *_desc;
253 int in_use_descs = 0;
255 ioat_dma_memcpy_cleanup(ioat_chan);
257 writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
259 spin_lock_bh(&ioat_chan->desc_lock);
260 list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
262 list_del(&desc->node);
263 pci_pool_free(ioat_device->dma_pool, desc->hw,
264 desc->async_tx.phys);
267 list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) {
268 list_del(&desc->node);
269 pci_pool_free(ioat_device->dma_pool, desc->hw,
270 desc->async_tx.phys);
273 spin_unlock_bh(&ioat_chan->desc_lock);
275 pci_pool_free(ioat_device->completion_pool,
276 ioat_chan->completion_virt,
277 ioat_chan->completion_addr);
279 /* one is ok since we left it on there on purpose */
280 if (in_use_descs > 1)
281 printk(KERN_ERR "IOAT: Freeing %d in use descriptors!\n",
284 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
286 /* Tell hw the chan is free */
287 chanctrl = readw(ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
288 chanctrl &= ~IOAT_CHANCTRL_CHANNEL_IN_USE;
289 writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
292 static struct dma_async_tx_descriptor *
293 ioat_dma_prep_memcpy(struct dma_chan *chan, size_t len, int int_en)
295 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
296 struct ioat_desc_sw *first, *prev, *new;
297 LIST_HEAD(new_chain);
310 spin_lock_bh(&ioat_chan->desc_lock);
312 if (!list_empty(&ioat_chan->free_desc)) {
313 new = to_ioat_desc(ioat_chan->free_desc.next);
314 list_del(&new->node);
316 /* try to get another desc */
317 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
318 /* will this ever happen? */
319 /* TODO add upper limit on these */
323 copy = min((u32) len, ioat_chan->xfercap);
325 new->hw->size = copy;
327 new->async_tx.cookie = 0;
328 new->async_tx.ack = 1;
330 /* chain together the physical address list for the HW */
334 prev->hw->next = (u64) new->async_tx.phys;
338 list_add_tail(&new->node, &new_chain);
342 list_splice(&new_chain, &new->async_tx.tx_list);
344 new->hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
346 new->tx_cnt = desc_count;
347 new->async_tx.ack = 0; /* client is in control of this ack */
348 new->async_tx.cookie = -EBUSY;
350 pci_unmap_len_set(new, len, orig_len);
351 spin_unlock_bh(&ioat_chan->desc_lock);
353 return new ? &new->async_tx : NULL;
358 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended descriptors to hw
359 * @chan: DMA channel handle
362 static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan)
364 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
366 if (ioat_chan->pending != 0) {
367 ioat_chan->pending = 0;
368 writeb(IOAT_CHANCMD_APPEND,
369 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
373 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan)
375 unsigned long phys_complete;
376 struct ioat_desc_sw *desc, *_desc;
377 dma_cookie_t cookie = 0;
379 prefetch(chan->completion_virt);
381 if (!spin_trylock(&chan->cleanup_lock))
384 /* The completion writeback can happen at any time,
385 so reads by the driver need to be atomic operations
386 The descriptor physical addresses are limited to 32-bits
387 when the CPU can only do a 32-bit mov */
389 #if (BITS_PER_LONG == 64)
391 chan->completion_virt->full & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
393 phys_complete = chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
396 if ((chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
397 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
398 printk("IOAT: Channel halted, chanerr = %x\n",
399 readl(chan->reg_base + IOAT_CHANERR_OFFSET));
401 /* TODO do something to salvage the situation */
404 if (phys_complete == chan->last_completion) {
405 spin_unlock(&chan->cleanup_lock);
409 spin_lock_bh(&chan->desc_lock);
410 list_for_each_entry_safe(desc, _desc, &chan->used_desc, node) {
413 * Incoming DMA requests may use multiple descriptors, due to
414 * exceeding xfercap, perhaps. If so, only the last one will
415 * have a cookie, and require unmapping.
417 if (desc->async_tx.cookie) {
418 cookie = desc->async_tx.cookie;
420 /* yes we are unmapping both _page and _single alloc'd
421 regions with unmap_page. Is this *really* that bad?
423 pci_unmap_page(chan->device->pdev,
424 pci_unmap_addr(desc, dst),
425 pci_unmap_len(desc, len),
427 pci_unmap_page(chan->device->pdev,
428 pci_unmap_addr(desc, src),
429 pci_unmap_len(desc, len),
433 if (desc->async_tx.phys != phys_complete) {
434 /* a completed entry, but not the last, so cleanup
435 * if the client is done with the descriptor
437 if (desc->async_tx.ack) {
438 list_del(&desc->node);
439 list_add_tail(&desc->node, &chan->free_desc);
441 desc->async_tx.cookie = 0;
443 /* last used desc. Do not remove, so we can append from
444 it, but don't look at it next time, either */
445 desc->async_tx.cookie = 0;
447 /* TODO check status bits? */
452 spin_unlock_bh(&chan->desc_lock);
454 chan->last_completion = phys_complete;
456 chan->completed_cookie = cookie;
458 spin_unlock(&chan->cleanup_lock);
461 static void ioat_dma_dependency_added(struct dma_chan *chan)
463 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
464 spin_lock_bh(&ioat_chan->desc_lock);
465 if (ioat_chan->pending == 0) {
466 spin_unlock_bh(&ioat_chan->desc_lock);
467 ioat_dma_memcpy_cleanup(ioat_chan);
469 spin_unlock_bh(&ioat_chan->desc_lock);
473 * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
474 * @chan: IOAT DMA channel handle
475 * @cookie: DMA transaction identifier
476 * @done: if not %NULL, updated with last completed transaction
477 * @used: if not %NULL, updated with last used transaction
480 static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
485 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
486 dma_cookie_t last_used;
487 dma_cookie_t last_complete;
490 last_used = chan->cookie;
491 last_complete = ioat_chan->completed_cookie;
494 *done= last_complete;
498 ret = dma_async_is_complete(cookie, last_complete, last_used);
499 if (ret == DMA_SUCCESS)
502 ioat_dma_memcpy_cleanup(ioat_chan);
504 last_used = chan->cookie;
505 last_complete = ioat_chan->completed_cookie;
508 *done= last_complete;
512 return dma_async_is_complete(cookie, last_complete, last_used);
517 static struct pci_device_id ioat_pci_tbl[] = {
518 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
519 { PCI_DEVICE(PCI_VENDOR_ID_UNISYS,
520 PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) },
524 static struct pci_driver ioat_pci_driver = {
526 .id_table = ioat_pci_tbl,
528 .shutdown = ioat_shutdown,
529 .remove = __devexit_p(ioat_remove),
532 static irqreturn_t ioat_do_interrupt(int irq, void *data)
534 struct ioat_device *instance = data;
535 unsigned long attnstatus;
538 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
540 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
543 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
544 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
548 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
550 printk(KERN_ERR "ioatdma error: interrupt! status %lx\n", attnstatus);
552 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
556 static void ioat_start_null_desc(struct ioat_dma_chan *ioat_chan)
558 struct ioat_desc_sw *desc;
560 spin_lock_bh(&ioat_chan->desc_lock);
562 if (!list_empty(&ioat_chan->free_desc)) {
563 desc = to_ioat_desc(ioat_chan->free_desc.next);
564 list_del(&desc->node);
566 /* try to get another desc */
567 spin_unlock_bh(&ioat_chan->desc_lock);
568 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
569 spin_lock_bh(&ioat_chan->desc_lock);
570 /* will this ever happen? */
574 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
576 desc->async_tx.ack = 1;
578 list_add_tail(&desc->node, &ioat_chan->used_desc);
579 spin_unlock_bh(&ioat_chan->desc_lock);
581 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
582 ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_LOW);
583 writel(((u64) desc->async_tx.phys) >> 32,
584 ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_HIGH);
586 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
590 * Perform a IOAT transaction to verify the HW works.
592 #define IOAT_TEST_SIZE 2000
594 static int ioat_self_test(struct ioat_device *device)
599 struct dma_chan *dma_chan;
600 struct dma_async_tx_descriptor *tx;
605 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
608 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
614 /* Fill in src buffer */
615 for (i = 0; i < IOAT_TEST_SIZE; i++)
618 /* Start copy, using first DMA channel */
619 dma_chan = container_of(device->common.channels.next,
622 if (ioat_dma_alloc_chan_resources(dma_chan) < 1) {
627 tx = ioat_dma_prep_memcpy(dma_chan, IOAT_TEST_SIZE, 0);
629 addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
631 ioat_set_src(addr, tx, 0);
632 addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
634 ioat_set_dest(addr, tx, 0);
635 cookie = ioat_tx_submit(tx);
636 ioat_dma_memcpy_issue_pending(dma_chan);
639 if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
640 printk(KERN_ERR "ioatdma: Self-test copy timed out, disabling\n");
644 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
645 printk(KERN_ERR "ioatdma: Self-test copy failed compare, disabling\n");
651 ioat_dma_free_chan_resources(dma_chan);
658 static int __devinit ioat_probe(struct pci_dev *pdev,
659 const struct pci_device_id *ent)
662 unsigned long mmio_start, mmio_len;
663 void __iomem *reg_base;
664 struct ioat_device *device;
666 err = pci_enable_device(pdev);
668 goto err_enable_device;
670 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
672 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
674 goto err_set_dma_mask;
676 err = pci_request_regions(pdev, ioat_pci_driver.name);
678 goto err_request_regions;
680 mmio_start = pci_resource_start(pdev, 0);
681 mmio_len = pci_resource_len(pdev, 0);
683 reg_base = ioremap(mmio_start, mmio_len);
689 device = kzalloc(sizeof(*device), GFP_KERNEL);
695 /* DMA coherent memory pool for DMA descriptor allocations */
696 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
697 sizeof(struct ioat_dma_descriptor), 64, 0);
698 if (!device->dma_pool) {
703 device->completion_pool = pci_pool_create("completion_pool", pdev, sizeof(u64), SMP_CACHE_BYTES, SMP_CACHE_BYTES);
704 if (!device->completion_pool) {
706 goto err_completion_pool;
710 pci_set_drvdata(pdev, device);
711 #ifdef CONFIG_PCI_MSI
712 if (pci_enable_msi(pdev) == 0) {
718 err = request_irq(pdev->irq, &ioat_do_interrupt, IRQF_SHARED, "ioat",
723 device->reg_base = reg_base;
725 writeb(IOAT_INTRCTRL_MASTER_INT_EN, device->reg_base + IOAT_INTRCTRL_OFFSET);
726 pci_set_master(pdev);
728 INIT_LIST_HEAD(&device->common.channels);
729 enumerate_dma_channels(device);
731 dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
732 device->common.device_alloc_chan_resources = ioat_dma_alloc_chan_resources;
733 device->common.device_free_chan_resources = ioat_dma_free_chan_resources;
734 device->common.device_prep_dma_memcpy = ioat_dma_prep_memcpy;
735 device->common.device_is_tx_complete = ioat_dma_is_complete;
736 device->common.device_issue_pending = ioat_dma_memcpy_issue_pending;
737 device->common.device_dependency_added = ioat_dma_dependency_added;
738 device->common.dev = &pdev->dev;
739 printk(KERN_INFO "Intel(R) I/OAT DMA Engine found, %d channels\n",
740 device->common.chancnt);
742 err = ioat_self_test(device);
746 dma_async_device_register(&device->common);
752 pci_pool_destroy(device->completion_pool);
754 pci_pool_destroy(device->dma_pool);
760 pci_release_regions(pdev);
763 pci_disable_device(pdev);
766 printk(KERN_ERR "Intel(R) I/OAT DMA Engine initialization failed\n");
771 static void ioat_shutdown(struct pci_dev *pdev)
773 struct ioat_device *device;
774 device = pci_get_drvdata(pdev);
776 dma_async_device_unregister(&device->common);
779 static void __devexit ioat_remove(struct pci_dev *pdev)
781 struct ioat_device *device;
782 struct dma_chan *chan, *_chan;
783 struct ioat_dma_chan *ioat_chan;
785 device = pci_get_drvdata(pdev);
786 dma_async_device_unregister(&device->common);
788 free_irq(device->pdev->irq, device);
789 #ifdef CONFIG_PCI_MSI
791 pci_disable_msi(device->pdev);
793 pci_pool_destroy(device->dma_pool);
794 pci_pool_destroy(device->completion_pool);
795 iounmap(device->reg_base);
796 pci_release_regions(pdev);
797 pci_disable_device(pdev);
798 list_for_each_entry_safe(chan, _chan, &device->common.channels, device_node) {
799 ioat_chan = to_ioat_chan(chan);
800 list_del(&chan->device_node);
807 MODULE_VERSION("1.9");
808 MODULE_LICENSE("GPL");
809 MODULE_AUTHOR("Intel Corporation");
811 static int __init ioat_init_module(void)
813 /* it's currently unsafe to unload this module */
814 /* if forced, worst case is that rmmod hangs */
815 __unsafe(THIS_MODULE);
817 return pci_register_driver(&ioat_pci_driver);
820 module_init(ioat_init_module);
822 static void __exit ioat_exit_module(void)
824 pci_unregister_driver(&ioat_pci_driver);
827 module_exit(ioat_exit_module);