2 * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/spinlock.h>
35 #include "ipath_kernel.h"
36 #include "ipath_verbs.h"
37 #include "ipath_common.h"
39 #define SDMA_DESCQ_SZ PAGE_SIZE /* 256 entries per 4KB page */
41 static void vl15_watchdog_enq(struct ipath_devdata *dd)
43 /* ipath_sdma_lock must already be held */
44 if (atomic_inc_return(&dd->ipath_sdma_vl15_count) == 1) {
45 unsigned long interval = (HZ + 19) / 20;
46 dd->ipath_sdma_vl15_timer.expires = jiffies + interval;
47 add_timer(&dd->ipath_sdma_vl15_timer);
51 static void vl15_watchdog_deq(struct ipath_devdata *dd)
53 /* ipath_sdma_lock must already be held */
54 if (atomic_dec_return(&dd->ipath_sdma_vl15_count) != 0) {
55 unsigned long interval = (HZ + 19) / 20;
56 mod_timer(&dd->ipath_sdma_vl15_timer, jiffies + interval);
58 del_timer(&dd->ipath_sdma_vl15_timer);
62 static void vl15_watchdog_timeout(unsigned long opaque)
64 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
66 if (atomic_read(&dd->ipath_sdma_vl15_count) != 0) {
67 ipath_dbg("vl15 watchdog timeout - clearing\n");
68 ipath_cancel_sends(dd, 1);
71 ipath_dbg("vl15 watchdog timeout - "
72 "condition already cleared\n");
76 static void unmap_desc(struct ipath_devdata *dd, unsigned head)
78 __le64 *descqp = &dd->ipath_sdma_descq[head].qw[0];
83 desc[0] = le64_to_cpu(descqp[0]);
84 desc[1] = le64_to_cpu(descqp[1]);
86 addr = (desc[1] << 32) | (desc[0] >> 32);
87 len = (desc[0] >> 14) & (0x7ffULL << 2);
88 dma_unmap_single(&dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
92 * ipath_sdma_lock should be locked before calling this.
94 int ipath_sdma_make_progress(struct ipath_devdata *dd)
96 struct list_head *lp = NULL;
97 struct ipath_sdma_txreq *txp = NULL;
102 if (!list_empty(&dd->ipath_sdma_activelist)) {
103 lp = dd->ipath_sdma_activelist.next;
104 txp = list_entry(lp, struct ipath_sdma_txreq, list);
105 start_idx = txp->start_idx;
109 * Read the SDMA head register in order to know that the
110 * interrupt clear has been written to the chip.
111 * Otherwise, we may not get an interrupt for the last
112 * descriptor in the queue.
114 dmahead = (u16)ipath_read_kreg32(dd, dd->ipath_kregs->kr_senddmahead);
115 /* sanity check return value for error handling (chip reset, etc.) */
116 if (dmahead >= dd->ipath_sdma_descq_cnt)
119 while (dd->ipath_sdma_descq_head != dmahead) {
120 if (txp && txp->flags & IPATH_SDMA_TXREQ_F_FREEDESC &&
121 dd->ipath_sdma_descq_head == start_idx) {
122 unmap_desc(dd, dd->ipath_sdma_descq_head);
124 if (start_idx == dd->ipath_sdma_descq_cnt)
128 /* increment free count and head */
129 dd->ipath_sdma_descq_removed++;
130 if (++dd->ipath_sdma_descq_head == dd->ipath_sdma_descq_cnt)
131 dd->ipath_sdma_descq_head = 0;
133 if (txp && txp->next_descq_idx == dd->ipath_sdma_descq_head) {
134 /* move to notify list */
135 if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
136 vl15_watchdog_deq(dd);
137 list_move_tail(lp, &dd->ipath_sdma_notifylist);
138 if (!list_empty(&dd->ipath_sdma_activelist)) {
139 lp = dd->ipath_sdma_activelist.next;
140 txp = list_entry(lp, struct ipath_sdma_txreq,
142 start_idx = txp->start_idx;
152 tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
158 static void ipath_sdma_notify(struct ipath_devdata *dd, struct list_head *list)
160 struct ipath_sdma_txreq *txp, *txp_next;
162 list_for_each_entry_safe(txp, txp_next, list, list) {
163 list_del_init(&txp->list);
166 (*txp->callback)(txp->callback_cookie,
167 txp->callback_status);
171 static void sdma_notify_taskbody(struct ipath_devdata *dd)
174 struct list_head list;
176 INIT_LIST_HEAD(&list);
178 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
180 list_splice_init(&dd->ipath_sdma_notifylist, &list);
182 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
184 ipath_sdma_notify(dd, &list);
187 * The IB verbs layer needs to see the callback before getting
188 * the call to ipath_ib_piobufavail() because the callback
189 * handles releasing resources the next send will need.
190 * Otherwise, we could do these calls in
191 * ipath_sdma_make_progress().
193 ipath_ib_piobufavail(dd->verbs_dev);
196 static void sdma_notify_task(unsigned long opaque)
198 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
200 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
201 sdma_notify_taskbody(dd);
204 static void dump_sdma_state(struct ipath_devdata *dd)
208 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmastatus);
209 ipath_cdbg(VERBOSE, "kr_senddmastatus: 0x%016lx\n", reg);
211 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendctrl);
212 ipath_cdbg(VERBOSE, "kr_sendctrl: 0x%016lx\n", reg);
214 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask0);
215 ipath_cdbg(VERBOSE, "kr_senddmabufmask0: 0x%016lx\n", reg);
217 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask1);
218 ipath_cdbg(VERBOSE, "kr_senddmabufmask1: 0x%016lx\n", reg);
220 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask2);
221 ipath_cdbg(VERBOSE, "kr_senddmabufmask2: 0x%016lx\n", reg);
223 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
224 ipath_cdbg(VERBOSE, "kr_senddmatail: 0x%016lx\n", reg);
226 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
227 ipath_cdbg(VERBOSE, "kr_senddmahead: 0x%016lx\n", reg);
230 static void sdma_abort_task(unsigned long opaque)
232 struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
236 if (test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
239 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
241 status = dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK;
244 if (status == IPATH_SDMA_ABORT_NONE)
247 /* ipath_sdma_abort() is done, waiting for interrupt */
248 if (status == IPATH_SDMA_ABORT_DISARMED) {
249 if (jiffies < dd->ipath_sdma_abort_intr_timeout)
250 goto resched_noprint;
251 /* give up, intr got lost somewhere */
252 ipath_dbg("give up waiting for SDMADISABLED intr\n");
253 __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
254 status = IPATH_SDMA_ABORT_ABORTED;
257 /* everything is stopped, time to clean up and restart */
258 if (status == IPATH_SDMA_ABORT_ABORTED) {
259 struct ipath_sdma_txreq *txp, *txpnext;
263 hwstatus = ipath_read_kreg64(dd,
264 dd->ipath_kregs->kr_senddmastatus);
266 if (/* ScoreBoardDrainInProg */
267 test_bit(63, &hwstatus) ||
269 test_bit(62, &hwstatus) ||
270 /* InternalSDmaEnable */
271 test_bit(61, &hwstatus) ||
273 !test_bit(30, &hwstatus)) {
274 if (dd->ipath_sdma_reset_wait > 0) {
275 /* not done shutting down sdma */
276 --dd->ipath_sdma_reset_wait;
279 ipath_cdbg(VERBOSE, "gave up waiting for quiescent "
280 "status after SDMA reset, continuing\n");
284 /* dequeue all "sent" requests */
285 list_for_each_entry_safe(txp, txpnext,
286 &dd->ipath_sdma_activelist, list) {
287 txp->callback_status = IPATH_SDMA_TXREQ_S_ABORTED;
288 if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
289 vl15_watchdog_deq(dd);
290 list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
294 tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
296 /* reset our notion of head and tail */
297 dd->ipath_sdma_descq_tail = 0;
298 dd->ipath_sdma_descq_head = 0;
299 dd->ipath_sdma_head_dma[0] = 0;
300 dd->ipath_sdma_generation = 0;
301 dd->ipath_sdma_descq_removed = dd->ipath_sdma_descq_added;
303 /* Reset SendDmaLenGen */
304 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen,
305 (u64) dd->ipath_sdma_descq_cnt | (1ULL << 18));
307 /* done with sdma state for a bit */
308 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
311 * Don't restart sdma here. Wait until link is up to ACTIVE.
312 * VL15 MADs used to bring the link up use PIO, and multiple
313 * link transitions otherwise cause the sdma engine to be
314 * stopped and started multiple times.
315 * The disable is done here, including the shadow, so the
316 * state is kept consistent.
317 * See ipath_restart_sdma() for the actual starting of sdma.
319 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
320 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
321 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
323 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
324 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
326 /* make sure I see next message */
327 dd->ipath_sdma_abort_jiffies = 0;
334 * for now, keep spinning
335 * JAG - this is bad to just have default be a loop without
338 if (jiffies > dd->ipath_sdma_abort_jiffies) {
339 ipath_dbg("looping with status 0x%016llx\n",
340 dd->ipath_sdma_status);
341 dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ;
344 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
345 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
346 tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
350 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
356 * This is called from interrupt context.
358 void ipath_sdma_intr(struct ipath_devdata *dd)
362 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
364 (void) ipath_sdma_make_progress(dd);
366 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
369 static int alloc_sdma(struct ipath_devdata *dd)
373 /* Allocate memory for SendDMA descriptor FIFO */
374 dd->ipath_sdma_descq = dma_alloc_coherent(&dd->pcidev->dev,
375 SDMA_DESCQ_SZ, &dd->ipath_sdma_descq_phys, GFP_KERNEL);
377 if (!dd->ipath_sdma_descq) {
378 ipath_dev_err(dd, "failed to allocate SendDMA descriptor "
384 dd->ipath_sdma_descq_cnt =
385 SDMA_DESCQ_SZ / sizeof(struct ipath_sdma_desc);
387 /* Allocate memory for DMA of head register to memory */
388 dd->ipath_sdma_head_dma = dma_alloc_coherent(&dd->pcidev->dev,
389 PAGE_SIZE, &dd->ipath_sdma_head_phys, GFP_KERNEL);
390 if (!dd->ipath_sdma_head_dma) {
391 ipath_dev_err(dd, "failed to allocate SendDMA head memory\n");
395 dd->ipath_sdma_head_dma[0] = 0;
397 init_timer(&dd->ipath_sdma_vl15_timer);
398 dd->ipath_sdma_vl15_timer.function = vl15_watchdog_timeout;
399 dd->ipath_sdma_vl15_timer.data = (unsigned long)dd;
400 atomic_set(&dd->ipath_sdma_vl15_count, 0);
405 dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
406 (void *)dd->ipath_sdma_descq, dd->ipath_sdma_descq_phys);
407 dd->ipath_sdma_descq = NULL;
408 dd->ipath_sdma_descq_phys = 0;
413 int setup_sdma(struct ipath_devdata *dd)
418 u64 senddmabufmask[3] = { 0 };
421 ret = alloc_sdma(dd);
425 if (!dd->ipath_sdma_descq) {
426 ipath_dev_err(dd, "SendDMA memory not allocated\n");
430 dd->ipath_sdma_status = 0;
431 dd->ipath_sdma_abort_jiffies = 0;
432 dd->ipath_sdma_generation = 0;
433 dd->ipath_sdma_descq_tail = 0;
434 dd->ipath_sdma_descq_head = 0;
435 dd->ipath_sdma_descq_removed = 0;
436 dd->ipath_sdma_descq_added = 0;
438 /* Set SendDmaBase */
439 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase,
440 dd->ipath_sdma_descq_phys);
441 /* Set SendDmaLenGen */
442 tmp64 = dd->ipath_sdma_descq_cnt;
443 tmp64 |= 1<<18; /* enable generation checking */
444 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, tmp64);
445 /* Set SendDmaTail */
446 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail,
447 dd->ipath_sdma_descq_tail);
448 /* Set SendDmaHeadAddr */
449 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr,
450 dd->ipath_sdma_head_phys);
452 /* Reserve all the former "kernel" piobufs */
453 n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - dd->ipath_pioreserved;
454 for (i = dd->ipath_lastport_piobuf; i < n; ++i) {
455 unsigned word = i / 64;
456 unsigned bit = i & 63;
458 senddmabufmask[word] |= 1ULL << bit;
460 ipath_chg_pioavailkernel(dd, dd->ipath_lastport_piobuf,
461 n - dd->ipath_lastport_piobuf, 0);
462 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0,
464 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1,
466 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2,
469 INIT_LIST_HEAD(&dd->ipath_sdma_activelist);
470 INIT_LIST_HEAD(&dd->ipath_sdma_notifylist);
472 tasklet_init(&dd->ipath_sdma_notify_task, sdma_notify_task,
474 tasklet_init(&dd->ipath_sdma_abort_task, sdma_abort_task,
478 * No use to turn on SDMA here, as link is probably not ACTIVE
479 * Just mark it RUNNING and enable the interrupt, and let the
480 * ipath_restart_sdma() on link transition to ACTIVE actually
483 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
484 dd->ipath_sendctrl |= INFINIPATH_S_SDMAINTENABLE;
485 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
486 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
487 __set_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
488 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
494 void teardown_sdma(struct ipath_devdata *dd)
496 struct ipath_sdma_txreq *txp, *txpnext;
498 dma_addr_t sdma_head_phys = 0;
499 dma_addr_t sdma_descq_phys = 0;
500 void *sdma_descq = NULL;
501 void *sdma_head_dma = NULL;
503 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
504 __clear_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
505 __set_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
506 __set_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status);
507 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
509 tasklet_kill(&dd->ipath_sdma_abort_task);
510 tasklet_kill(&dd->ipath_sdma_notify_task);
513 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
514 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
515 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
517 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
518 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
520 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
521 /* dequeue all "sent" requests */
522 list_for_each_entry_safe(txp, txpnext, &dd->ipath_sdma_activelist,
524 txp->callback_status = IPATH_SDMA_TXREQ_S_SHUTDOWN;
525 if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
526 vl15_watchdog_deq(dd);
527 list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
529 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
531 sdma_notify_taskbody(dd);
533 del_timer_sync(&dd->ipath_sdma_vl15_timer);
535 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
537 dd->ipath_sdma_abort_jiffies = 0;
539 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase, 0);
540 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, 0);
541 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, 0);
542 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, 0);
543 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, 0);
544 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, 0);
545 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2, 0);
547 if (dd->ipath_sdma_head_dma) {
548 sdma_head_dma = (void *) dd->ipath_sdma_head_dma;
549 sdma_head_phys = dd->ipath_sdma_head_phys;
550 dd->ipath_sdma_head_dma = NULL;
551 dd->ipath_sdma_head_phys = 0;
554 if (dd->ipath_sdma_descq) {
555 sdma_descq = dd->ipath_sdma_descq;
556 sdma_descq_phys = dd->ipath_sdma_descq_phys;
557 dd->ipath_sdma_descq = NULL;
558 dd->ipath_sdma_descq_phys = 0;
561 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
564 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
565 sdma_head_dma, sdma_head_phys);
568 dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
569 sdma_descq, sdma_descq_phys);
573 * [Re]start SDMA, if we use it, and it's not already OK.
574 * This is called on transition to link ACTIVE, either the first or
577 void ipath_restart_sdma(struct ipath_devdata *dd)
582 if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA))
586 * First, make sure we should, which is to say,
587 * check that we are "RUNNING" (not in teardown)
590 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
591 if (!test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)
592 || test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
595 __clear_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
596 __clear_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
597 __clear_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
599 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
601 ipath_dbg("invalid attempt to restart SDMA, status 0x%016llx\n",
602 dd->ipath_sdma_status);
605 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
607 * First clear, just to be safe. Enable is only done
608 * in chip on 0->1 transition
610 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
611 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
612 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
613 dd->ipath_sendctrl |= INFINIPATH_S_SDMAENABLE;
614 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
615 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
616 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
622 static inline void make_sdma_desc(struct ipath_devdata *dd,
623 u64 *sdmadesc, u64 addr, u64 dwlen, u64 dwoffset)
626 /* SDmaPhyAddr[47:32] */
627 sdmadesc[1] = addr >> 32;
628 /* SDmaPhyAddr[31:0] */
629 sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
630 /* SDmaGeneration[1:0] */
631 sdmadesc[0] |= (dd->ipath_sdma_generation & 3ULL) << 30;
632 /* SDmaDwordCount[10:0] */
633 sdmadesc[0] |= (dwlen & 0x7ffULL) << 16;
634 /* SDmaBufOffset[12:2] */
635 sdmadesc[0] |= dwoffset & 0x7ffULL;
639 * This function queues one IB packet onto the send DMA queue per call.
640 * The caller is responsible for checking:
641 * 1) The number of send DMA descriptor entries is less than the size of
642 * the descriptor queue.
643 * 2) The IB SGE addresses and lengths are 32-bit aligned
644 * (except possibly the last SGE's length)
645 * 3) The SGE addresses are suitable for passing to dma_map_single().
647 int ipath_sdma_verbs_send(struct ipath_devdata *dd,
648 struct ipath_sge_state *ss, u32 dwords,
649 struct ipath_verbs_txreq *tx)
653 struct ipath_sge *sge;
661 if ((tx->map_len + (dwords<<2)) > dd->ipath_ibmaxlen) {
662 ipath_dbg("packet size %X > ibmax %X, fail\n",
663 tx->map_len + (dwords<<2), dd->ipath_ibmaxlen);
668 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
671 if (unlikely(test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status))) {
676 if (tx->txreq.sg_count > ipath_sdma_descq_freecnt(dd)) {
677 if (ipath_sdma_make_progress(dd))
683 addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
684 tx->map_len, DMA_TO_DEVICE);
685 if (dma_mapping_error(addr)) {
690 dwoffset = tx->map_len >> 2;
691 make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0);
694 sdmadesc[0] |= 1ULL << 12;
695 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
696 sdmadesc[0] |= 1ULL << 14; /* SDmaUseLargeBuf */
698 /* write to the descq */
699 tail = dd->ipath_sdma_descq_tail;
700 descqp = &dd->ipath_sdma_descq[tail].qw[0];
701 *descqp++ = cpu_to_le64(sdmadesc[0]);
702 *descqp++ = cpu_to_le64(sdmadesc[1]);
704 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEDESC)
705 tx->txreq.start_idx = tail;
707 /* increment the tail */
708 if (++tail == dd->ipath_sdma_descq_cnt) {
710 descqp = &dd->ipath_sdma_descq[0].qw[0];
711 ++dd->ipath_sdma_generation;
720 if (len > sge->length)
722 if (len > sge->sge_length)
723 len = sge->sge_length;
726 addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
728 make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
729 /* SDmaUseLargeBuf has to be set in every descriptor */
730 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
731 sdmadesc[0] |= 1ULL << 14;
732 /* write to the descq */
733 *descqp++ = cpu_to_le64(sdmadesc[0]);
734 *descqp++ = cpu_to_le64(sdmadesc[1]);
736 /* increment the tail */
737 if (++tail == dd->ipath_sdma_descq_cnt) {
739 descqp = &dd->ipath_sdma_descq[0].qw[0];
740 ++dd->ipath_sdma_generation;
744 sge->sge_length -= len;
745 if (sge->sge_length == 0) {
747 *sge = *ss->sg_list++;
748 } else if (sge->length == 0 && sge->mr != NULL) {
749 if (++sge->n >= IPATH_SEGSZ) {
750 if (++sge->m >= sge->mr->mapsz)
755 sge->mr->map[sge->m]->segs[sge->n].vaddr;
757 sge->mr->map[sge->m]->segs[sge->n].length;
765 descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0];
768 descqp[0] |= __constant_cpu_to_le64(1ULL << 11);
769 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) {
771 descqp[0] |= __constant_cpu_to_le64(1ULL << 15);
774 /* Commit writes to memory and advance the tail on the chip */
776 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
778 tx->txreq.next_descq_idx = tail;
779 tx->txreq.callback_status = IPATH_SDMA_TXREQ_S_OK;
780 dd->ipath_sdma_descq_tail = tail;
781 dd->ipath_sdma_descq_added += tx->txreq.sg_count;
782 list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist);
783 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15)
784 vl15_watchdog_enq(dd);
787 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);