2  * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
 
   4  * This software is available to you under a choice of one of two
 
   5  * licenses.  You may choose to be licensed under the terms of the GNU
 
   6  * General Public License (GPL) Version 2, available from the file
 
   7  * COPYING in the main directory of this source tree, or the
 
   8  * OpenIB.org BSD license below:
 
  10  *     Redistribution and use in source and binary forms, with or
 
  11  *     without modification, are permitted provided that the following
 
  14  *      - Redistributions of source code must retain the above
 
  15  *        copyright notice, this list of conditions and the following
 
  18  *      - Redistributions in binary form must reproduce the above
 
  19  *        copyright notice, this list of conditions and the following
 
  20  *        disclaimer in the documentation and/or other materials
 
  21  *        provided with the distribution.
 
  23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 
  24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 
  25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 
  26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 
  27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 
  28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 
  29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 
  33 #include <linux/spinlock.h>
 
  35 #include "ipath_kernel.h"
 
  36 #include "ipath_verbs.h"
 
  37 #include "ipath_common.h"
 
  39 #define SDMA_DESCQ_SZ PAGE_SIZE /* 256 entries per 4KB page */
 
  41 static void vl15_watchdog_enq(struct ipath_devdata *dd)
 
  43         /* ipath_sdma_lock must already be held */
 
  44         if (atomic_inc_return(&dd->ipath_sdma_vl15_count) == 1) {
 
  45                 unsigned long interval = (HZ + 19) / 20;
 
  46                 dd->ipath_sdma_vl15_timer.expires = jiffies + interval;
 
  47                 add_timer(&dd->ipath_sdma_vl15_timer);
 
  51 static void vl15_watchdog_deq(struct ipath_devdata *dd)
 
  53         /* ipath_sdma_lock must already be held */
 
  54         if (atomic_dec_return(&dd->ipath_sdma_vl15_count) != 0) {
 
  55                 unsigned long interval = (HZ + 19) / 20;
 
  56                 mod_timer(&dd->ipath_sdma_vl15_timer, jiffies + interval);
 
  58                 del_timer(&dd->ipath_sdma_vl15_timer);
 
  62 static void vl15_watchdog_timeout(unsigned long opaque)
 
  64         struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
 
  66         if (atomic_read(&dd->ipath_sdma_vl15_count) != 0) {
 
  67                 ipath_dbg("vl15 watchdog timeout - clearing\n");
 
  68                 ipath_cancel_sends(dd, 1);
 
  71                 ipath_dbg("vl15 watchdog timeout - "
 
  72                           "condition already cleared\n");
 
  76 static void unmap_desc(struct ipath_devdata *dd, unsigned head)
 
  78         __le64 *descqp = &dd->ipath_sdma_descq[head].qw[0];
 
  83         desc[0] = le64_to_cpu(descqp[0]);
 
  84         desc[1] = le64_to_cpu(descqp[1]);
 
  86         addr = (desc[1] << 32) | (desc[0] >> 32);
 
  87         len = (desc[0] >> 14) & (0x7ffULL << 2);
 
  88         dma_unmap_single(&dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
 
  92  * ipath_sdma_lock should be locked before calling this.
 
  94 int ipath_sdma_make_progress(struct ipath_devdata *dd)
 
  96         struct list_head *lp = NULL;
 
  97         struct ipath_sdma_txreq *txp = NULL;
 
 102         if (!list_empty(&dd->ipath_sdma_activelist)) {
 
 103                 lp = dd->ipath_sdma_activelist.next;
 
 104                 txp = list_entry(lp, struct ipath_sdma_txreq, list);
 
 105                 start_idx = txp->start_idx;
 
 109          * Read the SDMA head register in order to know that the
 
 110          * interrupt clear has been written to the chip.
 
 111          * Otherwise, we may not get an interrupt for the last
 
 112          * descriptor in the queue.
 
 114         dmahead = (u16)ipath_read_kreg32(dd, dd->ipath_kregs->kr_senddmahead);
 
 115         /* sanity check return value for error handling (chip reset, etc.) */
 
 116         if (dmahead >= dd->ipath_sdma_descq_cnt)
 
 119         while (dd->ipath_sdma_descq_head != dmahead) {
 
 120                 if (txp && txp->flags & IPATH_SDMA_TXREQ_F_FREEDESC &&
 
 121                     dd->ipath_sdma_descq_head == start_idx) {
 
 122                         unmap_desc(dd, dd->ipath_sdma_descq_head);
 
 124                         if (start_idx == dd->ipath_sdma_descq_cnt)
 
 128                 /* increment free count and head */
 
 129                 dd->ipath_sdma_descq_removed++;
 
 130                 if (++dd->ipath_sdma_descq_head == dd->ipath_sdma_descq_cnt)
 
 131                         dd->ipath_sdma_descq_head = 0;
 
 133                 if (txp && txp->next_descq_idx == dd->ipath_sdma_descq_head) {
 
 134                         /* move to notify list */
 
 135                         if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
 
 136                                 vl15_watchdog_deq(dd);
 
 137                         list_move_tail(lp, &dd->ipath_sdma_notifylist);
 
 138                         if (!list_empty(&dd->ipath_sdma_activelist)) {
 
 139                                 lp = dd->ipath_sdma_activelist.next;
 
 140                                 txp = list_entry(lp, struct ipath_sdma_txreq,
 
 142                                 start_idx = txp->start_idx;
 
 152                 tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
 
 158 static void ipath_sdma_notify(struct ipath_devdata *dd, struct list_head *list)
 
 160         struct ipath_sdma_txreq *txp, *txp_next;
 
 162         list_for_each_entry_safe(txp, txp_next, list, list) {
 
 163                 list_del_init(&txp->list);
 
 166                         (*txp->callback)(txp->callback_cookie,
 
 167                                          txp->callback_status);
 
 171 static void sdma_notify_taskbody(struct ipath_devdata *dd)
 
 174         struct list_head list;
 
 176         INIT_LIST_HEAD(&list);
 
 178         spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
 
 180         list_splice_init(&dd->ipath_sdma_notifylist, &list);
 
 182         spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
 
 184         ipath_sdma_notify(dd, &list);
 
 187          * The IB verbs layer needs to see the callback before getting
 
 188          * the call to ipath_ib_piobufavail() because the callback
 
 189          * handles releasing resources the next send will need.
 
 190          * Otherwise, we could do these calls in
 
 191          * ipath_sdma_make_progress().
 
 193         ipath_ib_piobufavail(dd->verbs_dev);
 
 196 static void sdma_notify_task(unsigned long opaque)
 
 198         struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
 
 200         if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
 
 201                 sdma_notify_taskbody(dd);
 
 204 static void dump_sdma_state(struct ipath_devdata *dd)
 
 208         reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmastatus);
 
 209         ipath_cdbg(VERBOSE, "kr_senddmastatus: 0x%016lx\n", reg);
 
 211         reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendctrl);
 
 212         ipath_cdbg(VERBOSE, "kr_sendctrl: 0x%016lx\n", reg);
 
 214         reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask0);
 
 215         ipath_cdbg(VERBOSE, "kr_senddmabufmask0: 0x%016lx\n", reg);
 
 217         reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask1);
 
 218         ipath_cdbg(VERBOSE, "kr_senddmabufmask1: 0x%016lx\n", reg);
 
 220         reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask2);
 
 221         ipath_cdbg(VERBOSE, "kr_senddmabufmask2: 0x%016lx\n", reg);
 
 223         reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
 
 224         ipath_cdbg(VERBOSE, "kr_senddmatail: 0x%016lx\n", reg);
 
 226         reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
 
 227         ipath_cdbg(VERBOSE, "kr_senddmahead: 0x%016lx\n", reg);
 
 230 static void sdma_abort_task(unsigned long opaque)
 
 232         struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
 
 236         if (test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
 
 239         spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
 
 241         status = dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK;
 
 244         if (status == IPATH_SDMA_ABORT_NONE)
 
 247         /* ipath_sdma_abort() is done, waiting for interrupt */
 
 248         if (status == IPATH_SDMA_ABORT_DISARMED) {
 
 249                 if (jiffies < dd->ipath_sdma_abort_intr_timeout)
 
 250                         goto resched_noprint;
 
 251                 /* give up, intr got lost somewhere */
 
 252                 ipath_dbg("give up waiting for SDMADISABLED intr\n");
 
 253                 __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
 
 254                 status = IPATH_SDMA_ABORT_ABORTED;
 
 257         /* everything is stopped, time to clean up and restart */
 
 258         if (status == IPATH_SDMA_ABORT_ABORTED) {
 
 259                 struct ipath_sdma_txreq *txp, *txpnext;
 
 263                 hwstatus = ipath_read_kreg64(dd,
 
 264                                 dd->ipath_kregs->kr_senddmastatus);
 
 266                 if ((hwstatus & (IPATH_SDMA_STATUS_SCORE_BOARD_DRAIN_IN_PROG |
 
 267                                  IPATH_SDMA_STATUS_ABORT_IN_PROG             |
 
 268                                  IPATH_SDMA_STATUS_INTERNAL_SDMA_ENABLE)) ||
 
 269                     !(hwstatus & IPATH_SDMA_STATUS_SCB_EMPTY)) {
 
 270                         if (dd->ipath_sdma_reset_wait > 0) {
 
 271                                 /* not done shutting down sdma */
 
 272                                 --dd->ipath_sdma_reset_wait;
 
 275                         ipath_cdbg(VERBOSE, "gave up waiting for quiescent "
 
 276                                 "status after SDMA reset, continuing\n");
 
 280                 /* dequeue all "sent" requests */
 
 281                 list_for_each_entry_safe(txp, txpnext,
 
 282                                          &dd->ipath_sdma_activelist, list) {
 
 283                         txp->callback_status = IPATH_SDMA_TXREQ_S_ABORTED;
 
 284                         if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
 
 285                                 vl15_watchdog_deq(dd);
 
 286                         list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
 
 290                         tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
 
 292                 /* reset our notion of head and tail */
 
 293                 dd->ipath_sdma_descq_tail = 0;
 
 294                 dd->ipath_sdma_descq_head = 0;
 
 295                 dd->ipath_sdma_head_dma[0] = 0;
 
 296                 dd->ipath_sdma_generation = 0;
 
 297                 dd->ipath_sdma_descq_removed = dd->ipath_sdma_descq_added;
 
 299                 /* Reset SendDmaLenGen */
 
 300                 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen,
 
 301                         (u64) dd->ipath_sdma_descq_cnt | (1ULL << 18));
 
 303                 /* done with sdma state for a bit */
 
 304                 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
 
 307                  * Don't restart sdma here (with the exception
 
 308                  * below). Wait until link is up to ACTIVE.  VL15 MADs
 
 309                  * used to bring the link up use PIO, and multiple link
 
 310                  * transitions otherwise cause the sdma engine to be
 
 311                  * stopped and started multiple times.
 
 312                  * The disable is done here, including the shadow,
 
 313                  * so the state is kept consistent.
 
 314                  * See ipath_restart_sdma() for the actual starting
 
 317                 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
 
 318                 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
 
 319                 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
 
 321                 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
 
 322                 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
 
 324                 /* make sure I see next message */
 
 325                 dd->ipath_sdma_abort_jiffies = 0;
 
 328                  * Not everything that takes SDMA offline is a link
 
 329                  * status change.  If the link was up, restart SDMA.
 
 331                 if (dd->ipath_flags & IPATH_LINKACTIVE)
 
 332                         ipath_restart_sdma(dd);
 
 339          * for now, keep spinning
 
 340          * JAG - this is bad to just have default be a loop without
 
 343         if (jiffies > dd->ipath_sdma_abort_jiffies) {
 
 344                 ipath_dbg("looping with status 0x%08lx\n",
 
 345                           dd->ipath_sdma_status);
 
 346                 dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ;
 
 349         spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
 
 350         if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
 
 351                 tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
 
 355         spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
 
 361  * This is called from interrupt context.
 
 363 void ipath_sdma_intr(struct ipath_devdata *dd)
 
 367         spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
 
 369         (void) ipath_sdma_make_progress(dd);
 
 371         spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
 
 374 static int alloc_sdma(struct ipath_devdata *dd)
 
 378         /* Allocate memory for SendDMA descriptor FIFO */
 
 379         dd->ipath_sdma_descq = dma_alloc_coherent(&dd->pcidev->dev,
 
 380                 SDMA_DESCQ_SZ, &dd->ipath_sdma_descq_phys, GFP_KERNEL);
 
 382         if (!dd->ipath_sdma_descq) {
 
 383                 ipath_dev_err(dd, "failed to allocate SendDMA descriptor "
 
 389         dd->ipath_sdma_descq_cnt =
 
 390                 SDMA_DESCQ_SZ / sizeof(struct ipath_sdma_desc);
 
 392         /* Allocate memory for DMA of head register to memory */
 
 393         dd->ipath_sdma_head_dma = dma_alloc_coherent(&dd->pcidev->dev,
 
 394                 PAGE_SIZE, &dd->ipath_sdma_head_phys, GFP_KERNEL);
 
 395         if (!dd->ipath_sdma_head_dma) {
 
 396                 ipath_dev_err(dd, "failed to allocate SendDMA head memory\n");
 
 400         dd->ipath_sdma_head_dma[0] = 0;
 
 402         init_timer(&dd->ipath_sdma_vl15_timer);
 
 403         dd->ipath_sdma_vl15_timer.function = vl15_watchdog_timeout;
 
 404         dd->ipath_sdma_vl15_timer.data = (unsigned long)dd;
 
 405         atomic_set(&dd->ipath_sdma_vl15_count, 0);
 
 410         dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
 
 411                 (void *)dd->ipath_sdma_descq, dd->ipath_sdma_descq_phys);
 
 412         dd->ipath_sdma_descq = NULL;
 
 413         dd->ipath_sdma_descq_phys = 0;
 
 418 int setup_sdma(struct ipath_devdata *dd)
 
 423         u64 senddmabufmask[3] = { 0 };
 
 426         ret = alloc_sdma(dd);
 
 430         if (!dd->ipath_sdma_descq) {
 
 431                 ipath_dev_err(dd, "SendDMA memory not allocated\n");
 
 436          * Set initial status as if we had been up, then gone down.
 
 437          * This lets initial start on transition to ACTIVE be the
 
 438          * same as restart after link flap.
 
 440         dd->ipath_sdma_status = IPATH_SDMA_ABORT_ABORTED;
 
 441         dd->ipath_sdma_abort_jiffies = 0;
 
 442         dd->ipath_sdma_generation = 0;
 
 443         dd->ipath_sdma_descq_tail = 0;
 
 444         dd->ipath_sdma_descq_head = 0;
 
 445         dd->ipath_sdma_descq_removed = 0;
 
 446         dd->ipath_sdma_descq_added = 0;
 
 448         /* Set SendDmaBase */
 
 449         ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase,
 
 450                          dd->ipath_sdma_descq_phys);
 
 451         /* Set SendDmaLenGen */
 
 452         tmp64 = dd->ipath_sdma_descq_cnt;
 
 453         tmp64 |= 1<<18; /* enable generation checking */
 
 454         ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, tmp64);
 
 455         /* Set SendDmaTail */
 
 456         ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail,
 
 457                          dd->ipath_sdma_descq_tail);
 
 458         /* Set SendDmaHeadAddr */
 
 459         ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr,
 
 460                          dd->ipath_sdma_head_phys);
 
 463          * Reserve all the former "kernel" piobufs, using high number range
 
 464          * so we get as many 4K buffers as possible
 
 466         n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
 
 467         i = dd->ipath_lastport_piobuf + dd->ipath_pioreserved;
 
 468         ipath_chg_pioavailkernel(dd, i, n - i , 0);
 
 470                 unsigned word = i / 64;
 
 471                 unsigned bit = i & 63;
 
 473                 senddmabufmask[word] |= 1ULL << bit;
 
 475         ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0,
 
 477         ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1,
 
 479         ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2,
 
 482         INIT_LIST_HEAD(&dd->ipath_sdma_activelist);
 
 483         INIT_LIST_HEAD(&dd->ipath_sdma_notifylist);
 
 485         tasklet_init(&dd->ipath_sdma_notify_task, sdma_notify_task,
 
 487         tasklet_init(&dd->ipath_sdma_abort_task, sdma_abort_task,
 
 491          * No use to turn on SDMA here, as link is probably not ACTIVE
 
 492          * Just mark it RUNNING and enable the interrupt, and let the
 
 493          * ipath_restart_sdma() on link transition to ACTIVE actually
 
 496         spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
 
 497         dd->ipath_sendctrl |= INFINIPATH_S_SDMAINTENABLE;
 
 498         ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
 
 499         ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
 
 500         __set_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
 
 501         spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
 
 507 void teardown_sdma(struct ipath_devdata *dd)
 
 509         struct ipath_sdma_txreq *txp, *txpnext;
 
 511         dma_addr_t sdma_head_phys = 0;
 
 512         dma_addr_t sdma_descq_phys = 0;
 
 513         void *sdma_descq = NULL;
 
 514         void *sdma_head_dma = NULL;
 
 516         spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
 
 517         __clear_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
 
 518         __set_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
 
 519         __set_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status);
 
 520         spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
 
 522         tasklet_kill(&dd->ipath_sdma_abort_task);
 
 523         tasklet_kill(&dd->ipath_sdma_notify_task);
 
 526         spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
 
 527         dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
 
 528         ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
 
 530         ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
 
 531         spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
 
 533         spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
 
 534         /* dequeue all "sent" requests */
 
 535         list_for_each_entry_safe(txp, txpnext, &dd->ipath_sdma_activelist,
 
 537                 txp->callback_status = IPATH_SDMA_TXREQ_S_SHUTDOWN;
 
 538                 if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
 
 539                         vl15_watchdog_deq(dd);
 
 540                 list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
 
 542         spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
 
 544         sdma_notify_taskbody(dd);
 
 546         del_timer_sync(&dd->ipath_sdma_vl15_timer);
 
 548         spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
 
 550         dd->ipath_sdma_abort_jiffies = 0;
 
 552         ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase, 0);
 
 553         ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, 0);
 
 554         ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, 0);
 
 555         ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, 0);
 
 556         ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, 0);
 
 557         ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, 0);
 
 558         ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2, 0);
 
 560         if (dd->ipath_sdma_head_dma) {
 
 561                 sdma_head_dma = (void *) dd->ipath_sdma_head_dma;
 
 562                 sdma_head_phys = dd->ipath_sdma_head_phys;
 
 563                 dd->ipath_sdma_head_dma = NULL;
 
 564                 dd->ipath_sdma_head_phys = 0;
 
 567         if (dd->ipath_sdma_descq) {
 
 568                 sdma_descq = dd->ipath_sdma_descq;
 
 569                 sdma_descq_phys = dd->ipath_sdma_descq_phys;
 
 570                 dd->ipath_sdma_descq = NULL;
 
 571                 dd->ipath_sdma_descq_phys = 0;
 
 574         spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
 
 577                 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
 
 578                                   sdma_head_dma, sdma_head_phys);
 
 581                 dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
 
 582                                   sdma_descq, sdma_descq_phys);
 
 586  * [Re]start SDMA, if we use it, and it's not already OK.
 
 587  * This is called on transition to link ACTIVE, either the first or
 
 590 void ipath_restart_sdma(struct ipath_devdata *dd)
 
 595         if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA))
 
 599          * First, make sure we should, which is to say,
 
 600          * check that we are "RUNNING" (not in teardown)
 
 603         spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
 
 604         if (!test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)
 
 605                 || test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
 
 608                 __clear_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
 
 609                 __clear_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
 
 610                 __clear_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
 
 612         spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
 
 614                 ipath_dbg("invalid attempt to restart SDMA, status 0x%08lx\n",
 
 615                         dd->ipath_sdma_status);
 
 618         spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
 
 620          * First clear, just to be safe. Enable is only done
 
 621          * in chip on 0->1 transition
 
 623         dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
 
 624         ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
 
 625         ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
 
 626         dd->ipath_sendctrl |= INFINIPATH_S_SDMAENABLE;
 
 627         ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
 
 628         ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
 
 629         spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
 
 631         /* notify upper layers */
 
 632         ipath_ib_piobufavail(dd->verbs_dev);
 
 638 static inline void make_sdma_desc(struct ipath_devdata *dd,
 
 639         u64 *sdmadesc, u64 addr, u64 dwlen, u64 dwoffset)
 
 642         /* SDmaPhyAddr[47:32] */
 
 643         sdmadesc[1] = addr >> 32;
 
 644         /* SDmaPhyAddr[31:0] */
 
 645         sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
 
 646         /* SDmaGeneration[1:0] */
 
 647         sdmadesc[0] |= (dd->ipath_sdma_generation & 3ULL) << 30;
 
 648         /* SDmaDwordCount[10:0] */
 
 649         sdmadesc[0] |= (dwlen & 0x7ffULL) << 16;
 
 650         /* SDmaBufOffset[12:2] */
 
 651         sdmadesc[0] |= dwoffset & 0x7ffULL;
 
 655  * This function queues one IB packet onto the send DMA queue per call.
 
 656  * The caller is responsible for checking:
 
 657  * 1) The number of send DMA descriptor entries is less than the size of
 
 658  *    the descriptor queue.
 
 659  * 2) The IB SGE addresses and lengths are 32-bit aligned
 
 660  *    (except possibly the last SGE's length)
 
 661  * 3) The SGE addresses are suitable for passing to dma_map_single().
 
 663 int ipath_sdma_verbs_send(struct ipath_devdata *dd,
 
 664         struct ipath_sge_state *ss, u32 dwords,
 
 665         struct ipath_verbs_txreq *tx)
 
 669         struct ipath_sge *sge;
 
 677         if ((tx->map_len + (dwords<<2)) > dd->ipath_ibmaxlen) {
 
 678                 ipath_dbg("packet size %X > ibmax %X, fail\n",
 
 679                         tx->map_len + (dwords<<2), dd->ipath_ibmaxlen);
 
 684         spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
 
 687         if (unlikely(test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status))) {
 
 692         if (tx->txreq.sg_count > ipath_sdma_descq_freecnt(dd)) {
 
 693                 if (ipath_sdma_make_progress(dd))
 
 699         addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
 
 700                               tx->map_len, DMA_TO_DEVICE);
 
 701         if (dma_mapping_error(&dd->pcidev->dev, addr))
 
 704         dwoffset = tx->map_len >> 2;
 
 705         make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0);
 
 708         sdmadesc[0] |= 1ULL << 12;
 
 709         if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
 
 710                 sdmadesc[0] |= 1ULL << 14;      /* SDmaUseLargeBuf */
 
 712         /* write to the descq */
 
 713         tail = dd->ipath_sdma_descq_tail;
 
 714         descqp = &dd->ipath_sdma_descq[tail].qw[0];
 
 715         *descqp++ = cpu_to_le64(sdmadesc[0]);
 
 716         *descqp++ = cpu_to_le64(sdmadesc[1]);
 
 718         if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEDESC)
 
 719                 tx->txreq.start_idx = tail;
 
 721         /* increment the tail */
 
 722         if (++tail == dd->ipath_sdma_descq_cnt) {
 
 724                 descqp = &dd->ipath_sdma_descq[0].qw[0];
 
 725                 ++dd->ipath_sdma_generation;
 
 734                 if (len > sge->length)
 
 736                 if (len > sge->sge_length)
 
 737                         len = sge->sge_length;
 
 740                 addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
 
 742                 if (dma_mapping_error(&dd->pcidev->dev, addr))
 
 744                 make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
 
 745                 /* SDmaUseLargeBuf has to be set in every descriptor */
 
 746                 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
 
 747                         sdmadesc[0] |= 1ULL << 14;
 
 748                 /* write to the descq */
 
 749                 *descqp++ = cpu_to_le64(sdmadesc[0]);
 
 750                 *descqp++ = cpu_to_le64(sdmadesc[1]);
 
 752                 /* increment the tail */
 
 753                 if (++tail == dd->ipath_sdma_descq_cnt) {
 
 755                         descqp = &dd->ipath_sdma_descq[0].qw[0];
 
 756                         ++dd->ipath_sdma_generation;
 
 760                 sge->sge_length -= len;
 
 761                 if (sge->sge_length == 0) {
 
 763                                 *sge = *ss->sg_list++;
 
 764                 } else if (sge->length == 0 && sge->mr != NULL) {
 
 765                         if (++sge->n >= IPATH_SEGSZ) {
 
 766                                 if (++sge->m >= sge->mr->mapsz)
 
 771                                 sge->mr->map[sge->m]->segs[sge->n].vaddr;
 
 773                                 sge->mr->map[sge->m]->segs[sge->n].length;
 
 781                 descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0];
 
 784         descqp[0] |= __constant_cpu_to_le64(1ULL << 11);
 
 785         if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) {
 
 787                 descqp[0] |= __constant_cpu_to_le64(1ULL << 15);
 
 790         /* Commit writes to memory and advance the tail on the chip */
 
 792         ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
 
 794         tx->txreq.next_descq_idx = tail;
 
 795         tx->txreq.callback_status = IPATH_SDMA_TXREQ_S_OK;
 
 796         dd->ipath_sdma_descq_tail = tail;
 
 797         dd->ipath_sdma_descq_added += tx->txreq.sg_count;
 
 798         list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist);
 
 799         if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15)
 
 800                 vl15_watchdog_enq(dd);
 
 804         while (tail != dd->ipath_sdma_descq_tail) {
 
 806                         tail = dd->ipath_sdma_descq_cnt - 1;
 
 809                 unmap_desc(dd, tail);
 
 814         spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);