2  * offload engine driver for the Intel Xscale series of i/o processors
 
   3  * Copyright © 2006, Intel Corporation.
 
   5  * This program is free software; you can redistribute it and/or modify it
 
   6  * under the terms and conditions of the GNU General Public License,
 
   7  * version 2, as published by the Free Software Foundation.
 
   9  * This program is distributed in the hope it will be useful, but WITHOUT
 
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 
  14  * You should have received a copy of the GNU General Public License along with
 
  15  * this program; if not, write to the Free Software Foundation, Inc.,
 
  16  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 
  21  * This driver supports the asynchrounous DMA copy and RAID engines available
 
  22  * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
 
  25 #include <linux/init.h>
 
  26 #include <linux/module.h>
 
  27 #include <linux/async_tx.h>
 
  28 #include <linux/delay.h>
 
  29 #include <linux/dma-mapping.h>
 
  30 #include <linux/spinlock.h>
 
  31 #include <linux/interrupt.h>
 
  32 #include <linux/platform_device.h>
 
  33 #include <linux/memory.h>
 
  34 #include <linux/ioport.h>
 
  36 #include <asm/arch/adma.h>
 
  38 #define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
 
  39 #define to_iop_adma_device(dev) \
 
  40         container_of(dev, struct iop_adma_device, common)
 
  41 #define tx_to_iop_adma_slot(tx) \
 
  42         container_of(tx, struct iop_adma_desc_slot, async_tx)
 
  45  * iop_adma_free_slots - flags descriptor slots for reuse
 
  47  * Caller must hold &iop_chan->lock while calling this function
 
  49 static void iop_adma_free_slots(struct iop_adma_desc_slot *slot)
 
  51         int stride = slot->slots_per_op;
 
  54                 slot->slots_per_op = 0;
 
  55                 slot = list_entry(slot->slot_node.next,
 
  56                                 struct iop_adma_desc_slot,
 
  62 iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
 
  63         struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
 
  65         BUG_ON(desc->async_tx.cookie < 0);
 
  66         spin_lock_bh(&desc->async_tx.lock);
 
  67         if (desc->async_tx.cookie > 0) {
 
  68                 cookie = desc->async_tx.cookie;
 
  69                 desc->async_tx.cookie = 0;
 
  71                 /* call the callback (must not sleep or submit new
 
  72                  * operations to this channel)
 
  74                 if (desc->async_tx.callback)
 
  75                         desc->async_tx.callback(
 
  76                                 desc->async_tx.callback_param);
 
  78                 /* unmap dma addresses
 
  79                  * (unmap_single vs unmap_page?)
 
  81                 if (desc->group_head && desc->unmap_len) {
 
  82                         struct iop_adma_desc_slot *unmap = desc->group_head;
 
  84                                 &iop_chan->device->pdev->dev;
 
  85                         u32 len = unmap->unmap_len;
 
  86                         u32 src_cnt = unmap->unmap_src_cnt;
 
  87                         dma_addr_t addr = iop_desc_get_dest_addr(unmap,
 
  90                         dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
 
  92                                 addr = iop_desc_get_src_addr(unmap,
 
  95                                 dma_unmap_page(dev, addr, len,
 
  98                         desc->group_head = NULL;
 
 102         /* run dependent operations */
 
 103         async_tx_run_dependencies(&desc->async_tx);
 
 104         spin_unlock_bh(&desc->async_tx.lock);
 
 110 iop_adma_clean_slot(struct iop_adma_desc_slot *desc,
 
 111         struct iop_adma_chan *iop_chan)
 
 113         /* the client is allowed to attach dependent operations
 
 116         if (!desc->async_tx.ack)
 
 119         /* leave the last descriptor in the chain
 
 120          * so we can append to it
 
 122         if (desc->chain_node.next == &iop_chan->chain)
 
 125         dev_dbg(iop_chan->device->common.dev,
 
 126                 "\tfree slot: %d slots_per_op: %d\n",
 
 127                 desc->idx, desc->slots_per_op);
 
 129         list_del(&desc->chain_node);
 
 130         iop_adma_free_slots(desc);
 
 135 static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
 
 137         struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL;
 
 138         dma_cookie_t cookie = 0;
 
 139         u32 current_desc = iop_chan_get_current_descriptor(iop_chan);
 
 140         int busy = iop_chan_is_busy(iop_chan);
 
 141         int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
 
 143         dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__);
 
 144         /* free completed slots from the chain starting with
 
 145          * the oldest descriptor
 
 147         list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
 
 149                 pr_debug("\tcookie: %d slot: %d busy: %d "
 
 150                         "this_desc: %#x next_desc: %#x ack: %d\n",
 
 151                         iter->async_tx.cookie, iter->idx, busy,
 
 152                         iter->async_tx.phys, iop_desc_get_next_desc(iter),
 
 155                 prefetch(&_iter->async_tx);
 
 157                 /* do not advance past the current descriptor loaded into the
 
 158                  * hardware channel, subsequent descriptors are either in
 
 159                  * process or have not been submitted
 
 164                 /* stop the search if we reach the current descriptor and the
 
 165                  * channel is busy, or if it appears that the current descriptor
 
 166                  * needs to be re-read (i.e. has been appended to)
 
 168                 if (iter->async_tx.phys == current_desc) {
 
 169                         BUG_ON(seen_current++);
 
 170                         if (busy || iop_desc_get_next_desc(iter))
 
 174                 /* detect the start of a group transaction */
 
 175                 if (!slot_cnt && !slots_per_op) {
 
 176                         slot_cnt = iter->slot_cnt;
 
 177                         slots_per_op = iter->slots_per_op;
 
 178                         if (slot_cnt <= slots_per_op) {
 
 185                         pr_debug("\tgroup++\n");
 
 188                         slot_cnt -= slots_per_op;
 
 191                 /* all the members of a group are complete */
 
 192                 if (slots_per_op != 0 && slot_cnt == 0) {
 
 193                         struct iop_adma_desc_slot *grp_iter, *_grp_iter;
 
 194                         int end_of_chain = 0;
 
 195                         pr_debug("\tgroup end\n");
 
 197                         /* collect the total results */
 
 198                         if (grp_start->xor_check_result) {
 
 199                                 u32 zero_sum_result = 0;
 
 200                                 slot_cnt = grp_start->slot_cnt;
 
 201                                 grp_iter = grp_start;
 
 203                                 list_for_each_entry_from(grp_iter,
 
 204                                         &iop_chan->chain, chain_node) {
 
 206                                             iop_desc_get_zero_result(grp_iter);
 
 207                                             pr_debug("\titer%d result: %d\n",
 
 208                                             grp_iter->idx, zero_sum_result);
 
 209                                         slot_cnt -= slots_per_op;
 
 213                                 pr_debug("\tgrp_start->xor_check_result: %p\n",
 
 214                                         grp_start->xor_check_result);
 
 215                                 *grp_start->xor_check_result = zero_sum_result;
 
 218                         /* clean up the group */
 
 219                         slot_cnt = grp_start->slot_cnt;
 
 220                         grp_iter = grp_start;
 
 221                         list_for_each_entry_safe_from(grp_iter, _grp_iter,
 
 222                                 &iop_chan->chain, chain_node) {
 
 223                                 cookie = iop_adma_run_tx_complete_actions(
 
 224                                         grp_iter, iop_chan, cookie);
 
 226                                 slot_cnt -= slots_per_op;
 
 227                                 end_of_chain = iop_adma_clean_slot(grp_iter,
 
 230                                 if (slot_cnt == 0 || end_of_chain)
 
 234                         /* the group should be complete at this point */
 
 243                 } else if (slots_per_op) /* wait for group completion */
 
 246                 /* write back zero sum results (single descriptor case) */
 
 247                 if (iter->xor_check_result && iter->async_tx.cookie)
 
 248                         *iter->xor_check_result =
 
 249                                 iop_desc_get_zero_result(iter);
 
 251                 cookie = iop_adma_run_tx_complete_actions(
 
 252                                         iter, iop_chan, cookie);
 
 254                 if (iop_adma_clean_slot(iter, iop_chan))
 
 258         BUG_ON(!seen_current);
 
 260         iop_chan_idle(busy, iop_chan);
 
 263                 iop_chan->completed_cookie = cookie;
 
 264                 pr_debug("\tcompleted cookie %d\n", cookie);
 
 269 iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
 
 271         spin_lock_bh(&iop_chan->lock);
 
 272         __iop_adma_slot_cleanup(iop_chan);
 
 273         spin_unlock_bh(&iop_chan->lock);
 
 276 static void iop_adma_tasklet(unsigned long data)
 
 278         struct iop_adma_chan *chan = (struct iop_adma_chan *) data;
 
 279         __iop_adma_slot_cleanup(chan);
 
 282 static struct iop_adma_desc_slot *
 
 283 iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots,
 
 286         struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL;
 
 287         struct list_head chain = LIST_HEAD_INIT(chain);
 
 288         int slots_found, retry = 0;
 
 290         /* start search from the last allocated descrtiptor
 
 291          * if a contiguous allocation can not be found start searching
 
 292          * from the beginning of the list
 
 297                 iter = iop_chan->last_used;
 
 299                 iter = list_entry(&iop_chan->all_slots,
 
 300                         struct iop_adma_desc_slot,
 
 303         list_for_each_entry_safe_continue(
 
 304                 iter, _iter, &iop_chan->all_slots, slot_node) {
 
 306                 prefetch(&_iter->async_tx);
 
 307                 if (iter->slots_per_op) {
 
 308                         /* give up after finding the first busy slot
 
 309                          * on the second pass through the list
 
 318                 /* start the allocation if the slot is correctly aligned */
 
 319                 if (!slots_found++) {
 
 320                         if (iop_desc_is_aligned(iter, slots_per_op))
 
 328                 if (slots_found == num_slots) {
 
 329                         struct iop_adma_desc_slot *alloc_tail = NULL;
 
 330                         struct iop_adma_desc_slot *last_used = NULL;
 
 334                                 dev_dbg(iop_chan->device->common.dev,
 
 335                                         "allocated slot: %d "
 
 336                                         "(desc %p phys: %#x) slots_per_op %d\n",
 
 337                                         iter->idx, iter->hw_desc,
 
 338                                         iter->async_tx.phys, slots_per_op);
 
 340                                 /* pre-ack all but the last descriptor */
 
 341                                 if (num_slots != slots_per_op)
 
 342                                         iter->async_tx.ack = 1;
 
 344                                         iter->async_tx.ack = 0;
 
 346                                 list_add_tail(&iter->chain_node, &chain);
 
 348                                 iter->async_tx.cookie = 0;
 
 349                                 iter->slot_cnt = num_slots;
 
 350                                 iter->xor_check_result = NULL;
 
 351                                 for (i = 0; i < slots_per_op; i++) {
 
 352                                         iter->slots_per_op = slots_per_op - i;
 
 354                                         iter = list_entry(iter->slot_node.next,
 
 355                                                 struct iop_adma_desc_slot,
 
 358                                 num_slots -= slots_per_op;
 
 360                         alloc_tail->group_head = alloc_start;
 
 361                         alloc_tail->async_tx.cookie = -EBUSY;
 
 362                         list_splice(&chain, &alloc_tail->async_tx.tx_list);
 
 363                         iop_chan->last_used = last_used;
 
 364                         iop_desc_clear_next_desc(alloc_start);
 
 365                         iop_desc_clear_next_desc(alloc_tail);
 
 372         /* try to free some slots if the allocation fails */
 
 373         tasklet_schedule(&iop_chan->irq_tasklet);
 
 379 iop_desc_assign_cookie(struct iop_adma_chan *iop_chan,
 
 380         struct iop_adma_desc_slot *desc)
 
 382         dma_cookie_t cookie = iop_chan->common.cookie;
 
 386         iop_chan->common.cookie = desc->async_tx.cookie = cookie;
 
 390 static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan)
 
 392         dev_dbg(iop_chan->device->common.dev, "pending: %d\n",
 
 395         if (iop_chan->pending >= IOP_ADMA_THRESHOLD) {
 
 396                 iop_chan->pending = 0;
 
 397                 iop_chan_append(iop_chan);
 
 402 iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
 
 404         struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
 
 405         struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
 
 406         struct iop_adma_desc_slot *grp_start, *old_chain_tail;
 
 411         grp_start = sw_desc->group_head;
 
 412         slot_cnt = grp_start->slot_cnt;
 
 413         slots_per_op = grp_start->slots_per_op;
 
 415         spin_lock_bh(&iop_chan->lock);
 
 416         cookie = iop_desc_assign_cookie(iop_chan, sw_desc);
 
 418         old_chain_tail = list_entry(iop_chan->chain.prev,
 
 419                 struct iop_adma_desc_slot, chain_node);
 
 420         list_splice_init(&sw_desc->async_tx.tx_list,
 
 421                          &old_chain_tail->chain_node);
 
 423         /* fix up the hardware chain */
 
 424         iop_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
 
 426         /* 1/ don't add pre-chained descriptors
 
 427          * 2/ dummy read to flush next_desc write
 
 429         BUG_ON(iop_desc_get_next_desc(sw_desc));
 
 431         /* increment the pending count by the number of slots
 
 432          * memcpy operations have a 1:1 (slot:operation) relation
 
 433          * other operations are heavier and will pop the threshold
 
 436         iop_chan->pending += slot_cnt;
 
 437         iop_adma_check_threshold(iop_chan);
 
 438         spin_unlock_bh(&iop_chan->lock);
 
 440         dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n",
 
 441                 __FUNCTION__, sw_desc->async_tx.cookie, sw_desc->idx);
 
 447 iop_adma_set_dest(dma_addr_t addr, struct dma_async_tx_descriptor *tx,
 
 450         struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
 
 451         struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
 
 453         /* to do: support transfers lengths > IOP_ADMA_MAX_BYTE_COUNT */
 
 454         iop_desc_set_dest_addr(sw_desc->group_head, iop_chan, addr);
 
 457 static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
 
 458 static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
 
 460 /* returns the number of allocated descriptors */
 
 461 static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
 
 465         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 
 466         struct iop_adma_desc_slot *slot = NULL;
 
 467         int init = iop_chan->slots_allocated ? 0 : 1;
 
 468         struct iop_adma_platform_data *plat_data =
 
 469                 iop_chan->device->pdev->dev.platform_data;
 
 470         int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE;
 
 472         /* Allocate descriptor slots */
 
 474                 idx = iop_chan->slots_allocated;
 
 475                 if (idx == num_descs_in_pool)
 
 478                 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
 
 480                         printk(KERN_INFO "IOP ADMA Channel only initialized"
 
 481                                 " %d descriptor slots", idx);
 
 484                 hw_desc = (char *) iop_chan->device->dma_desc_pool_virt;
 
 485                 slot->hw_desc = (void *) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
 
 487                 dma_async_tx_descriptor_init(&slot->async_tx, chan);
 
 488                 slot->async_tx.tx_submit = iop_adma_tx_submit;
 
 489                 slot->async_tx.tx_set_dest = iop_adma_set_dest;
 
 490                 INIT_LIST_HEAD(&slot->chain_node);
 
 491                 INIT_LIST_HEAD(&slot->slot_node);
 
 492                 INIT_LIST_HEAD(&slot->async_tx.tx_list);
 
 493                 hw_desc = (char *) iop_chan->device->dma_desc_pool;
 
 494                 slot->async_tx.phys =
 
 495                         (dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
 
 498                 spin_lock_bh(&iop_chan->lock);
 
 499                 iop_chan->slots_allocated++;
 
 500                 list_add_tail(&slot->slot_node, &iop_chan->all_slots);
 
 501                 spin_unlock_bh(&iop_chan->lock);
 
 502         } while (iop_chan->slots_allocated < num_descs_in_pool);
 
 504         if (idx && !iop_chan->last_used)
 
 505                 iop_chan->last_used = list_entry(iop_chan->all_slots.next,
 
 506                                         struct iop_adma_desc_slot,
 
 509         dev_dbg(iop_chan->device->common.dev,
 
 510                 "allocated %d descriptor slots last_used: %p\n",
 
 511                 iop_chan->slots_allocated, iop_chan->last_used);
 
 513         /* initialize the channel and the chain with a null operation */
 
 515                 if (dma_has_cap(DMA_MEMCPY,
 
 516                         iop_chan->device->common.cap_mask))
 
 517                         iop_chan_start_null_memcpy(iop_chan);
 
 518                 else if (dma_has_cap(DMA_XOR,
 
 519                         iop_chan->device->common.cap_mask))
 
 520                         iop_chan_start_null_xor(iop_chan);
 
 525         return (idx > 0) ? idx : -ENOMEM;
 
 528 static struct dma_async_tx_descriptor *
 
 529 iop_adma_prep_dma_interrupt(struct dma_chan *chan)
 
 531         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 
 532         struct iop_adma_desc_slot *sw_desc, *grp_start;
 
 533         int slot_cnt, slots_per_op;
 
 535         dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__);
 
 537         spin_lock_bh(&iop_chan->lock);
 
 538         slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan);
 
 539         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 
 541                 grp_start = sw_desc->group_head;
 
 542                 iop_desc_init_interrupt(grp_start, iop_chan);
 
 543                 grp_start->unmap_len = 0;
 
 545         spin_unlock_bh(&iop_chan->lock);
 
 547         return sw_desc ? &sw_desc->async_tx : NULL;
 
 551 iop_adma_memcpy_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx,
 
 554         struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
 
 555         struct iop_adma_desc_slot *grp_start = sw_desc->group_head;
 
 557         iop_desc_set_memcpy_src_addr(grp_start, addr);
 
 560 static struct dma_async_tx_descriptor *
 
 561 iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en)
 
 563         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 
 564         struct iop_adma_desc_slot *sw_desc, *grp_start;
 
 565         int slot_cnt, slots_per_op;
 
 569         BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
 
 571         dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
 
 574         spin_lock_bh(&iop_chan->lock);
 
 575         slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op);
 
 576         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 
 578                 grp_start = sw_desc->group_head;
 
 579                 iop_desc_init_memcpy(grp_start, int_en);
 
 580                 iop_desc_set_byte_count(grp_start, iop_chan, len);
 
 581                 sw_desc->unmap_src_cnt = 1;
 
 582                 sw_desc->unmap_len = len;
 
 583                 sw_desc->async_tx.tx_set_src = iop_adma_memcpy_set_src;
 
 585         spin_unlock_bh(&iop_chan->lock);
 
 587         return sw_desc ? &sw_desc->async_tx : NULL;
 
 590 static struct dma_async_tx_descriptor *
 
 591 iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len,
 
 594         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 
 595         struct iop_adma_desc_slot *sw_desc, *grp_start;
 
 596         int slot_cnt, slots_per_op;
 
 600         BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
 
 602         dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
 
 605         spin_lock_bh(&iop_chan->lock);
 
 606         slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op);
 
 607         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 
 609                 grp_start = sw_desc->group_head;
 
 610                 iop_desc_init_memset(grp_start, int_en);
 
 611                 iop_desc_set_byte_count(grp_start, iop_chan, len);
 
 612                 iop_desc_set_block_fill_val(grp_start, value);
 
 613                 sw_desc->unmap_src_cnt = 1;
 
 614                 sw_desc->unmap_len = len;
 
 616         spin_unlock_bh(&iop_chan->lock);
 
 618         return sw_desc ? &sw_desc->async_tx : NULL;
 
 622 iop_adma_xor_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx,
 
 625         struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
 
 626         struct iop_adma_desc_slot *grp_start = sw_desc->group_head;
 
 628         iop_desc_set_xor_src_addr(grp_start, index, addr);
 
 631 static struct dma_async_tx_descriptor *
 
 632 iop_adma_prep_dma_xor(struct dma_chan *chan, unsigned int src_cnt, size_t len,
 
 635         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 
 636         struct iop_adma_desc_slot *sw_desc, *grp_start;
 
 637         int slot_cnt, slots_per_op;
 
 641         BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT));
 
 643         dev_dbg(iop_chan->device->common.dev,
 
 644                 "%s src_cnt: %d len: %u int_en: %d\n",
 
 645                 __FUNCTION__, src_cnt, len, int_en);
 
 647         spin_lock_bh(&iop_chan->lock);
 
 648         slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
 
 649         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 
 651                 grp_start = sw_desc->group_head;
 
 652                 iop_desc_init_xor(grp_start, src_cnt, int_en);
 
 653                 iop_desc_set_byte_count(grp_start, iop_chan, len);
 
 654                 sw_desc->unmap_src_cnt = src_cnt;
 
 655                 sw_desc->unmap_len = len;
 
 656                 sw_desc->async_tx.tx_set_src = iop_adma_xor_set_src;
 
 658         spin_unlock_bh(&iop_chan->lock);
 
 660         return sw_desc ? &sw_desc->async_tx : NULL;
 
 664 iop_adma_xor_zero_sum_set_src(dma_addr_t addr,
 
 665                                 struct dma_async_tx_descriptor *tx,
 
 668         struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
 
 669         struct iop_adma_desc_slot *grp_start = sw_desc->group_head;
 
 671         iop_desc_set_zero_sum_src_addr(grp_start, index, addr);
 
 674 static struct dma_async_tx_descriptor *
 
 675 iop_adma_prep_dma_zero_sum(struct dma_chan *chan, unsigned int src_cnt,
 
 676         size_t len, u32 *result, int int_en)
 
 678         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 
 679         struct iop_adma_desc_slot *sw_desc, *grp_start;
 
 680         int slot_cnt, slots_per_op;
 
 685         dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
 
 686                 __FUNCTION__, src_cnt, len);
 
 688         spin_lock_bh(&iop_chan->lock);
 
 689         slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op);
 
 690         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 
 692                 grp_start = sw_desc->group_head;
 
 693                 iop_desc_init_zero_sum(grp_start, src_cnt, int_en);
 
 694                 iop_desc_set_zero_sum_byte_count(grp_start, len);
 
 695                 grp_start->xor_check_result = result;
 
 696                 pr_debug("\t%s: grp_start->xor_check_result: %p\n",
 
 697                         __FUNCTION__, grp_start->xor_check_result);
 
 698                 sw_desc->unmap_src_cnt = src_cnt;
 
 699                 sw_desc->unmap_len = len;
 
 700                 sw_desc->async_tx.tx_set_src = iop_adma_xor_zero_sum_set_src;
 
 702         spin_unlock_bh(&iop_chan->lock);
 
 704         return sw_desc ? &sw_desc->async_tx : NULL;
 
 707 static void iop_adma_dependency_added(struct dma_chan *chan)
 
 709         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 
 710         tasklet_schedule(&iop_chan->irq_tasklet);
 
 713 static void iop_adma_free_chan_resources(struct dma_chan *chan)
 
 715         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 
 716         struct iop_adma_desc_slot *iter, *_iter;
 
 717         int in_use_descs = 0;
 
 719         iop_adma_slot_cleanup(iop_chan);
 
 721         spin_lock_bh(&iop_chan->lock);
 
 722         list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
 
 725                 list_del(&iter->chain_node);
 
 727         list_for_each_entry_safe_reverse(
 
 728                 iter, _iter, &iop_chan->all_slots, slot_node) {
 
 729                 list_del(&iter->slot_node);
 
 731                 iop_chan->slots_allocated--;
 
 733         iop_chan->last_used = NULL;
 
 735         dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n",
 
 736                 __FUNCTION__, iop_chan->slots_allocated);
 
 737         spin_unlock_bh(&iop_chan->lock);
 
 739         /* one is ok since we left it on there on purpose */
 
 740         if (in_use_descs > 1)
 
 741                 printk(KERN_ERR "IOP: Freeing %d in use descriptors!\n",
 
 746  * iop_adma_is_complete - poll the status of an ADMA transaction
 
 747  * @chan: ADMA channel handle
 
 748  * @cookie: ADMA transaction identifier
 
 750 static enum dma_status iop_adma_is_complete(struct dma_chan *chan,
 
 755         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 
 756         dma_cookie_t last_used;
 
 757         dma_cookie_t last_complete;
 
 760         last_used = chan->cookie;
 
 761         last_complete = iop_chan->completed_cookie;
 
 764                 *done = last_complete;
 
 768         ret = dma_async_is_complete(cookie, last_complete, last_used);
 
 769         if (ret == DMA_SUCCESS)
 
 772         iop_adma_slot_cleanup(iop_chan);
 
 774         last_used = chan->cookie;
 
 775         last_complete = iop_chan->completed_cookie;
 
 778                 *done = last_complete;
 
 782         return dma_async_is_complete(cookie, last_complete, last_used);
 
 785 static irqreturn_t iop_adma_eot_handler(int irq, void *data)
 
 787         struct iop_adma_chan *chan = data;
 
 789         dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__);
 
 791         tasklet_schedule(&chan->irq_tasklet);
 
 793         iop_adma_device_clear_eot_status(chan);
 
 798 static irqreturn_t iop_adma_eoc_handler(int irq, void *data)
 
 800         struct iop_adma_chan *chan = data;
 
 802         dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__);
 
 804         tasklet_schedule(&chan->irq_tasklet);
 
 806         iop_adma_device_clear_eoc_status(chan);
 
 811 static irqreturn_t iop_adma_err_handler(int irq, void *data)
 
 813         struct iop_adma_chan *chan = data;
 
 814         unsigned long status = iop_chan_get_status(chan);
 
 816         dev_printk(KERN_ERR, chan->device->common.dev,
 
 817                 "error ( %s%s%s%s%s%s%s)\n",
 
 818                 iop_is_err_int_parity(status, chan) ? "int_parity " : "",
 
 819                 iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "",
 
 820                 iop_is_err_int_tabort(status, chan) ? "int_tabort " : "",
 
 821                 iop_is_err_int_mabort(status, chan) ? "int_mabort " : "",
 
 822                 iop_is_err_pci_tabort(status, chan) ? "pci_tabort " : "",
 
 823                 iop_is_err_pci_mabort(status, chan) ? "pci_mabort " : "",
 
 824                 iop_is_err_split_tx(status, chan) ? "split_tx " : "");
 
 826         iop_adma_device_clear_err_status(chan);
 
 833 static void iop_adma_issue_pending(struct dma_chan *chan)
 
 835         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 
 837         if (iop_chan->pending) {
 
 838                 iop_chan->pending = 0;
 
 839                 iop_chan_append(iop_chan);
 
 844  * Perform a transaction to verify the HW works.
 
 846 #define IOP_ADMA_TEST_SIZE 2000
 
 848 static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
 
 852         dma_addr_t src_dma, dest_dma;
 
 853         struct dma_chan *dma_chan;
 
 855         struct dma_async_tx_descriptor *tx;
 
 857         struct iop_adma_chan *iop_chan;
 
 859         dev_dbg(device->common.dev, "%s\n", __FUNCTION__);
 
 861         src = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL);
 
 864         dest = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL);
 
 870         /* Fill in src buffer */
 
 871         for (i = 0; i < IOP_ADMA_TEST_SIZE; i++)
 
 872                 ((u8 *) src)[i] = (u8)i;
 
 874         memset(dest, 0, IOP_ADMA_TEST_SIZE);
 
 876         /* Start copy, using first DMA channel */
 
 877         dma_chan = container_of(device->common.channels.next,
 
 880         if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
 
 885         tx = iop_adma_prep_dma_memcpy(dma_chan, IOP_ADMA_TEST_SIZE, 1);
 
 886         dest_dma = dma_map_single(dma_chan->device->dev, dest,
 
 887                                 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
 
 888         iop_adma_set_dest(dest_dma, tx, 0);
 
 889         src_dma = dma_map_single(dma_chan->device->dev, src,
 
 890                                 IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE);
 
 891         iop_adma_memcpy_set_src(src_dma, tx, 0);
 
 893         cookie = iop_adma_tx_submit(tx);
 
 894         iop_adma_issue_pending(dma_chan);
 
 898         if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
 
 900                 dev_printk(KERN_ERR, dma_chan->device->dev,
 
 901                         "Self-test copy timed out, disabling\n");
 
 906         iop_chan = to_iop_adma_chan(dma_chan);
 
 907         dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
 
 908                 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
 
 909         if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) {
 
 910                 dev_printk(KERN_ERR, dma_chan->device->dev,
 
 911                         "Self-test copy failed compare, disabling\n");
 
 917         iop_adma_free_chan_resources(dma_chan);
 
 924 #define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */
 
 926 iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
 
 930         struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];
 
 931         struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
 
 932         dma_addr_t dma_addr, dest_dma;
 
 933         struct dma_async_tx_descriptor *tx;
 
 934         struct dma_chan *dma_chan;
 
 940         struct iop_adma_chan *iop_chan;
 
 942         dev_dbg(device->common.dev, "%s\n", __FUNCTION__);
 
 944         for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
 
 945                 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
 
 946                 if (!xor_srcs[src_idx])
 
 948                                 __free_page(xor_srcs[src_idx]);
 
 953         dest = alloc_page(GFP_KERNEL);
 
 956                         __free_page(xor_srcs[src_idx]);
 
 960         /* Fill in src buffers */
 
 961         for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
 
 962                 u8 *ptr = page_address(xor_srcs[src_idx]);
 
 963                 for (i = 0; i < PAGE_SIZE; i++)
 
 964                         ptr[i] = (1 << src_idx);
 
 967         for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++)
 
 968                 cmp_byte ^= (u8) (1 << src_idx);
 
 970         cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
 
 971                         (cmp_byte << 8) | cmp_byte;
 
 973         memset(page_address(dest), 0, PAGE_SIZE);
 
 975         dma_chan = container_of(device->common.channels.next,
 
 978         if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
 
 984         tx = iop_adma_prep_dma_xor(dma_chan, IOP_ADMA_NUM_SRC_TEST,
 
 986         dest_dma = dma_map_page(dma_chan->device->dev, dest, 0,
 
 987                                 PAGE_SIZE, DMA_FROM_DEVICE);
 
 988         iop_adma_set_dest(dest_dma, tx, 0);
 
 990         for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) {
 
 991                 dma_addr = dma_map_page(dma_chan->device->dev, xor_srcs[i], 0,
 
 992                         PAGE_SIZE, DMA_TO_DEVICE);
 
 993                 iop_adma_xor_set_src(dma_addr, tx, i);
 
 996         cookie = iop_adma_tx_submit(tx);
 
 997         iop_adma_issue_pending(dma_chan);
 
1001         if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
 
1003                 dev_printk(KERN_ERR, dma_chan->device->dev,
 
1004                         "Self-test xor timed out, disabling\n");
 
1006                 goto free_resources;
 
1009         iop_chan = to_iop_adma_chan(dma_chan);
 
1010         dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
 
1011                 PAGE_SIZE, DMA_FROM_DEVICE);
 
1012         for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
 
1013                 u32 *ptr = page_address(dest);
 
1014                 if (ptr[i] != cmp_word) {
 
1015                         dev_printk(KERN_ERR, dma_chan->device->dev,
 
1016                                 "Self-test xor failed compare, disabling\n");
 
1018                         goto free_resources;
 
1021         dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma,
 
1022                 PAGE_SIZE, DMA_TO_DEVICE);
 
1024         /* skip zero sum if the capability is not present */
 
1025         if (!dma_has_cap(DMA_ZERO_SUM, dma_chan->device->cap_mask))
 
1026                 goto free_resources;
 
1028         /* zero sum the sources with the destintation page */
 
1029         for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
 
1030                 zero_sum_srcs[i] = xor_srcs[i];
 
1031         zero_sum_srcs[i] = dest;
 
1033         zero_sum_result = 1;
 
1035         tx = iop_adma_prep_dma_zero_sum(dma_chan, IOP_ADMA_NUM_SRC_TEST + 1,
 
1036                 PAGE_SIZE, &zero_sum_result, 1);
 
1037         for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) {
 
1038                 dma_addr = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i],
 
1039                         0, PAGE_SIZE, DMA_TO_DEVICE);
 
1040                 iop_adma_xor_zero_sum_set_src(dma_addr, tx, i);
 
1043         cookie = iop_adma_tx_submit(tx);
 
1044         iop_adma_issue_pending(dma_chan);
 
1048         if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
 
1049                 dev_printk(KERN_ERR, dma_chan->device->dev,
 
1050                         "Self-test zero sum timed out, disabling\n");
 
1052                 goto free_resources;
 
1055         if (zero_sum_result != 0) {
 
1056                 dev_printk(KERN_ERR, dma_chan->device->dev,
 
1057                         "Self-test zero sum failed compare, disabling\n");
 
1059                 goto free_resources;
 
1063         tx = iop_adma_prep_dma_memset(dma_chan, 0, PAGE_SIZE, 1);
 
1064         dma_addr = dma_map_page(dma_chan->device->dev, dest, 0,
 
1065                         PAGE_SIZE, DMA_FROM_DEVICE);
 
1066         iop_adma_set_dest(dma_addr, tx, 0);
 
1068         cookie = iop_adma_tx_submit(tx);
 
1069         iop_adma_issue_pending(dma_chan);
 
1073         if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
 
1074                 dev_printk(KERN_ERR, dma_chan->device->dev,
 
1075                         "Self-test memset timed out, disabling\n");
 
1077                 goto free_resources;
 
1080         for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
 
1081                 u32 *ptr = page_address(dest);
 
1083                         dev_printk(KERN_ERR, dma_chan->device->dev,
 
1084                                 "Self-test memset failed compare, disabling\n");
 
1086                         goto free_resources;
 
1090         /* test for non-zero parity sum */
 
1091         zero_sum_result = 0;
 
1092         tx = iop_adma_prep_dma_zero_sum(dma_chan, IOP_ADMA_NUM_SRC_TEST + 1,
 
1093                 PAGE_SIZE, &zero_sum_result, 1);
 
1094         for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) {
 
1095                 dma_addr = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i],
 
1096                         0, PAGE_SIZE, DMA_TO_DEVICE);
 
1097                 iop_adma_xor_zero_sum_set_src(dma_addr, tx, i);
 
1100         cookie = iop_adma_tx_submit(tx);
 
1101         iop_adma_issue_pending(dma_chan);
 
1105         if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
 
1106                 dev_printk(KERN_ERR, dma_chan->device->dev,
 
1107                         "Self-test non-zero sum timed out, disabling\n");
 
1109                 goto free_resources;
 
1112         if (zero_sum_result != 1) {
 
1113                 dev_printk(KERN_ERR, dma_chan->device->dev,
 
1114                         "Self-test non-zero sum failed compare, disabling\n");
 
1116                 goto free_resources;
 
1120         iop_adma_free_chan_resources(dma_chan);
 
1122         src_idx = IOP_ADMA_NUM_SRC_TEST;
 
1124                 __free_page(xor_srcs[src_idx]);
 
1129 static int __devexit iop_adma_remove(struct platform_device *dev)
 
1131         struct iop_adma_device *device = platform_get_drvdata(dev);
 
1132         struct dma_chan *chan, *_chan;
 
1133         struct iop_adma_chan *iop_chan;
 
1135         struct iop_adma_platform_data *plat_data = dev->dev.platform_data;
 
1137         dma_async_device_unregister(&device->common);
 
1139         for (i = 0; i < 3; i++) {
 
1141                 irq = platform_get_irq(dev, i);
 
1142                 free_irq(irq, device);
 
1145         dma_free_coherent(&dev->dev, plat_data->pool_size,
 
1146                         device->dma_desc_pool_virt, device->dma_desc_pool);
 
1149                 struct resource *res;
 
1150                 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
 
1151                 release_mem_region(res->start, res->end - res->start);
 
1154         list_for_each_entry_safe(chan, _chan, &device->common.channels,
 
1156                 iop_chan = to_iop_adma_chan(chan);
 
1157                 list_del(&chan->device_node);
 
1165 static int __devinit iop_adma_probe(struct platform_device *pdev)
 
1167         struct resource *res;
 
1169         struct iop_adma_device *adev;
 
1170         struct iop_adma_chan *iop_chan;
 
1171         struct dma_device *dma_dev;
 
1172         struct iop_adma_platform_data *plat_data = pdev->dev.platform_data;
 
1174         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
1178         if (!devm_request_mem_region(&pdev->dev, res->start,
 
1179                                 res->end - res->start, pdev->name))
 
1182         adev = kzalloc(sizeof(*adev), GFP_KERNEL);
 
1185         dma_dev = &adev->common;
 
1187         /* allocate coherent memory for hardware descriptors
 
1188          * note: writecombine gives slightly better performance, but
 
1189          * requires that we explicitly flush the writes
 
1191         if ((adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
 
1192                                         plat_data->pool_size,
 
1193                                         &adev->dma_desc_pool,
 
1194                                         GFP_KERNEL)) == NULL) {
 
1199         dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n",
 
1200                 __FUNCTION__, adev->dma_desc_pool_virt,
 
1201                 (void *) adev->dma_desc_pool);
 
1203         adev->id = plat_data->hw_id;
 
1205         /* discover transaction capabilites from the platform data */
 
1206         dma_dev->cap_mask = plat_data->cap_mask;
 
1209         platform_set_drvdata(pdev, adev);
 
1211         INIT_LIST_HEAD(&dma_dev->channels);
 
1213         /* set base routines */
 
1214         dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources;
 
1215         dma_dev->device_free_chan_resources = iop_adma_free_chan_resources;
 
1216         dma_dev->device_is_tx_complete = iop_adma_is_complete;
 
1217         dma_dev->device_issue_pending = iop_adma_issue_pending;
 
1218         dma_dev->device_dependency_added = iop_adma_dependency_added;
 
1219         dma_dev->dev = &pdev->dev;
 
1221         /* set prep routines based on capability */
 
1222         if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
 
1223                 dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy;
 
1224         if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
 
1225                 dma_dev->device_prep_dma_memset = iop_adma_prep_dma_memset;
 
1226         if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
 
1227                 dma_dev->max_xor = iop_adma_get_max_xor();
 
1228                 dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor;
 
1230         if (dma_has_cap(DMA_ZERO_SUM, dma_dev->cap_mask))
 
1231                 dma_dev->device_prep_dma_zero_sum =
 
1232                         iop_adma_prep_dma_zero_sum;
 
1233         if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
 
1234                 dma_dev->device_prep_dma_interrupt =
 
1235                         iop_adma_prep_dma_interrupt;
 
1237         iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL);
 
1242         iop_chan->device = adev;
 
1244         iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start,
 
1245                                         res->end - res->start);
 
1246         if (!iop_chan->mmr_base) {
 
1248                 goto err_free_iop_chan;
 
1250         tasklet_init(&iop_chan->irq_tasklet, iop_adma_tasklet, (unsigned long)
 
1253         /* clear errors before enabling interrupts */
 
1254         iop_adma_device_clear_err_status(iop_chan);
 
1256         for (i = 0; i < 3; i++) {
 
1257                 irq_handler_t handler[] = { iop_adma_eot_handler,
 
1258                                         iop_adma_eoc_handler,
 
1259                                         iop_adma_err_handler };
 
1260                 int irq = platform_get_irq(pdev, i);
 
1263                         goto err_free_iop_chan;
 
1265                         ret = devm_request_irq(&pdev->dev, irq,
 
1266                                         handler[i], 0, pdev->name, iop_chan);
 
1268                                 goto err_free_iop_chan;
 
1272         spin_lock_init(&iop_chan->lock);
 
1273         init_timer(&iop_chan->cleanup_watchdog);
 
1274         iop_chan->cleanup_watchdog.data = (unsigned long) iop_chan;
 
1275         iop_chan->cleanup_watchdog.function = iop_adma_tasklet;
 
1276         INIT_LIST_HEAD(&iop_chan->chain);
 
1277         INIT_LIST_HEAD(&iop_chan->all_slots);
 
1278         INIT_RCU_HEAD(&iop_chan->common.rcu);
 
1279         iop_chan->common.device = dma_dev;
 
1280         list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
 
1282         if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
 
1283                 ret = iop_adma_memcpy_self_test(adev);
 
1284                 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
 
1286                         goto err_free_iop_chan;
 
1289         if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) ||
 
1290                 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
 
1291                 ret = iop_adma_xor_zero_sum_self_test(adev);
 
1292                 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
 
1294                         goto err_free_iop_chan;
 
1297         dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: "
 
1298           "( %s%s%s%s%s%s%s%s%s%s)\n",
 
1299           dma_has_cap(DMA_PQ_XOR, dma_dev->cap_mask) ? "pq_xor " : "",
 
1300           dma_has_cap(DMA_PQ_UPDATE, dma_dev->cap_mask) ? "pq_update " : "",
 
1301           dma_has_cap(DMA_PQ_ZERO_SUM, dma_dev->cap_mask) ? "pq_zero_sum " : "",
 
1302           dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
 
1303           dma_has_cap(DMA_DUAL_XOR, dma_dev->cap_mask) ? "dual_xor " : "",
 
1304           dma_has_cap(DMA_ZERO_SUM, dma_dev->cap_mask) ? "xor_zero_sum " : "",
 
1305           dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)  ? "fill " : "",
 
1306           dma_has_cap(DMA_MEMCPY_CRC32C, dma_dev->cap_mask) ? "cpy+crc " : "",
 
1307           dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
 
1308           dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
 
1310         dma_async_device_register(dma_dev);
 
1316         dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
 
1317                         adev->dma_desc_pool_virt, adev->dma_desc_pool);
 
1324 static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
 
1326         struct iop_adma_desc_slot *sw_desc, *grp_start;
 
1327         dma_cookie_t cookie;
 
1328         int slot_cnt, slots_per_op;
 
1330         dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__);
 
1332         spin_lock_bh(&iop_chan->lock);
 
1333         slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op);
 
1334         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 
1336                 grp_start = sw_desc->group_head;
 
1338                 list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain);
 
1339                 sw_desc->async_tx.ack = 1;
 
1340                 iop_desc_init_memcpy(grp_start, 0);
 
1341                 iop_desc_set_byte_count(grp_start, iop_chan, 0);
 
1342                 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
 
1343                 iop_desc_set_memcpy_src_addr(grp_start, 0);
 
1345                 cookie = iop_chan->common.cookie;
 
1350                 /* initialize the completed cookie to be less than
 
1351                  * the most recently used cookie
 
1353                 iop_chan->completed_cookie = cookie - 1;
 
1354                 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
 
1356                 /* channel should not be busy */
 
1357                 BUG_ON(iop_chan_is_busy(iop_chan));
 
1359                 /* clear any prior error-status bits */
 
1360                 iop_adma_device_clear_err_status(iop_chan);
 
1362                 /* disable operation */
 
1363                 iop_chan_disable(iop_chan);
 
1365                 /* set the descriptor address */
 
1366                 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
 
1368                 /* 1/ don't add pre-chained descriptors
 
1369                  * 2/ dummy read to flush next_desc write
 
1371                 BUG_ON(iop_desc_get_next_desc(sw_desc));
 
1373                 /* run the descriptor */
 
1374                 iop_chan_enable(iop_chan);
 
1376                 dev_printk(KERN_ERR, iop_chan->device->common.dev,
 
1377                          "failed to allocate null descriptor\n");
 
1378         spin_unlock_bh(&iop_chan->lock);
 
1381 static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
 
1383         struct iop_adma_desc_slot *sw_desc, *grp_start;
 
1384         dma_cookie_t cookie;
 
1385         int slot_cnt, slots_per_op;
 
1387         dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__);
 
1389         spin_lock_bh(&iop_chan->lock);
 
1390         slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op);
 
1391         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 
1393                 grp_start = sw_desc->group_head;
 
1394                 list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain);
 
1395                 sw_desc->async_tx.ack = 1;
 
1396                 iop_desc_init_null_xor(grp_start, 2, 0);
 
1397                 iop_desc_set_byte_count(grp_start, iop_chan, 0);
 
1398                 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
 
1399                 iop_desc_set_xor_src_addr(grp_start, 0, 0);
 
1400                 iop_desc_set_xor_src_addr(grp_start, 1, 0);
 
1402                 cookie = iop_chan->common.cookie;
 
1407                 /* initialize the completed cookie to be less than
 
1408                  * the most recently used cookie
 
1410                 iop_chan->completed_cookie = cookie - 1;
 
1411                 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
 
1413                 /* channel should not be busy */
 
1414                 BUG_ON(iop_chan_is_busy(iop_chan));
 
1416                 /* clear any prior error-status bits */
 
1417                 iop_adma_device_clear_err_status(iop_chan);
 
1419                 /* disable operation */
 
1420                 iop_chan_disable(iop_chan);
 
1422                 /* set the descriptor address */
 
1423                 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
 
1425                 /* 1/ don't add pre-chained descriptors
 
1426                  * 2/ dummy read to flush next_desc write
 
1428                 BUG_ON(iop_desc_get_next_desc(sw_desc));
 
1430                 /* run the descriptor */
 
1431                 iop_chan_enable(iop_chan);
 
1433                 dev_printk(KERN_ERR, iop_chan->device->common.dev,
 
1434                         "failed to allocate null descriptor\n");
 
1435         spin_unlock_bh(&iop_chan->lock);
 
1438 static struct platform_driver iop_adma_driver = {
 
1439         .probe          = iop_adma_probe,
 
1440         .remove         = iop_adma_remove,
 
1442                 .owner  = THIS_MODULE,
 
1447 static int __init iop_adma_init (void)
 
1449         return platform_driver_register(&iop_adma_driver);
 
1452 /* it's currently unsafe to unload this module */
 
1454 static void __exit iop_adma_exit (void)
 
1456         platform_driver_unregister(&iop_adma_driver);
 
1459 module_exit(iop_adma_exit);
 
1462 module_init(iop_adma_init);
 
1464 MODULE_AUTHOR("Intel Corporation");
 
1465 MODULE_DESCRIPTION("IOP ADMA Engine Driver");
 
1466 MODULE_LICENSE("GPL");