2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/skbuff.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/if_vlan.h>
37 #include <linux/tcp.h>
38 #include <linux/dma-mapping.h>
43 #include "firmware_exports.h"
47 #define SGE_RX_SM_BUF_SIZE 1536
49 #define SGE_RX_COPY_THRES 256
50 #define SGE_RX_PULL_LEN 128
53 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
54 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
57 #define FL0_PG_CHUNK_SIZE 2048
59 #define SGE_RX_DROP_THRES 16
62 * Period of the Tx buffer reclaim timer. This timer does not need to run
63 * frequently as Tx buffers are usually reclaimed by new Tx packets.
65 #define TX_RECLAIM_PERIOD (HZ / 4)
67 /* WR size in bytes */
68 #define WR_LEN (WR_FLITS * 8)
71 * Types of Tx queues in each queue set. Order here matters, do not change.
73 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
75 /* Values for sge_txq.flags */
77 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
78 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
82 __be64 flit[TX_DESC_FLITS];
92 struct tx_sw_desc { /* SW state per Tx descriptor */
94 u8 eop; /* set if last descriptor for packet */
95 u8 addr_idx; /* buffer index of first SGL entry in descriptor */
96 u8 fragidx; /* first page fragment associated with descriptor */
97 s8 sflit; /* start flit of first SGL entry in descriptor */
100 struct rx_sw_desc { /* SW state per Rx descriptor */
103 struct fl_pg_chunk pg_chunk;
105 DECLARE_PCI_UNMAP_ADDR(dma_addr);
108 struct rsp_desc { /* response queue descriptor */
109 struct rss_header rss_hdr;
117 * Holds unmapping information for Tx packets that need deferred unmapping.
118 * This structure lives at skb->head and must be allocated by callers.
120 struct deferred_unmap_info {
121 struct pci_dev *pdev;
122 dma_addr_t addr[MAX_SKB_FRAGS + 1];
126 * Maps a number of flits to the number of Tx descriptors that can hold them.
129 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
131 * HW allows up to 4 descriptors to be combined into a WR.
133 static u8 flit_desc_map[] = {
135 #if SGE_NUM_GENBITS == 1
136 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
137 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
138 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
139 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
140 #elif SGE_NUM_GENBITS == 2
141 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
142 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
143 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
144 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
146 # error "SGE_NUM_GENBITS must be 1 or 2"
150 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
152 return container_of(q, struct sge_qset, fl[qidx]);
155 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
157 return container_of(q, struct sge_qset, rspq);
160 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
162 return container_of(q, struct sge_qset, txq[qidx]);
166 * refill_rspq - replenish an SGE response queue
167 * @adapter: the adapter
168 * @q: the response queue to replenish
169 * @credits: how many new responses to make available
171 * Replenishes a response queue by making the supplied number of responses
174 static inline void refill_rspq(struct adapter *adapter,
175 const struct sge_rspq *q, unsigned int credits)
178 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
179 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
183 * need_skb_unmap - does the platform need unmapping of sk_buffs?
185 * Returns true if the platfrom needs sk_buff unmapping. The compiler
186 * optimizes away unecessary code if this returns true.
188 static inline int need_skb_unmap(void)
191 * This structure is used to tell if the platfrom needs buffer
192 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
195 DECLARE_PCI_UNMAP_ADDR(addr);
198 return sizeof(struct dummy) != 0;
202 * unmap_skb - unmap a packet main body and its page fragments
204 * @q: the Tx queue containing Tx descriptors for the packet
205 * @cidx: index of Tx descriptor
206 * @pdev: the PCI device
208 * Unmap the main body of an sk_buff and its page fragments, if any.
209 * Because of the fairly complicated structure of our SGLs and the desire
210 * to conserve space for metadata, the information necessary to unmap an
211 * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
212 * descriptors (the physical addresses of the various data buffers), and
213 * the SW descriptor state (assorted indices). The send functions
214 * initialize the indices for the first packet descriptor so we can unmap
215 * the buffers held in the first Tx descriptor here, and we have enough
216 * information at this point to set the state for the next Tx descriptor.
218 * Note that it is possible to clean up the first descriptor of a packet
219 * before the send routines have written the next descriptors, but this
220 * race does not cause any problem. We just end up writing the unmapping
221 * info for the descriptor first.
223 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
224 unsigned int cidx, struct pci_dev *pdev)
226 const struct sg_ent *sgp;
227 struct tx_sw_desc *d = &q->sdesc[cidx];
228 int nfrags, frag_idx, curflit, j = d->addr_idx;
230 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
231 frag_idx = d->fragidx;
233 if (frag_idx == 0 && skb_headlen(skb)) {
234 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
235 skb_headlen(skb), PCI_DMA_TODEVICE);
239 curflit = d->sflit + 1 + j;
240 nfrags = skb_shinfo(skb)->nr_frags;
242 while (frag_idx < nfrags && curflit < WR_FLITS) {
243 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
244 skb_shinfo(skb)->frags[frag_idx].size,
255 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
256 d = cidx + 1 == q->size ? q->sdesc : d + 1;
257 d->fragidx = frag_idx;
259 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
264 * free_tx_desc - reclaims Tx descriptors and their buffers
265 * @adapter: the adapter
266 * @q: the Tx queue to reclaim descriptors from
267 * @n: the number of descriptors to reclaim
269 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
270 * Tx buffers. Called with the Tx queue lock held.
272 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
275 struct tx_sw_desc *d;
276 struct pci_dev *pdev = adapter->pdev;
277 unsigned int cidx = q->cidx;
279 const int need_unmap = need_skb_unmap() &&
280 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
284 if (d->skb) { /* an SGL is present */
286 unmap_skb(d->skb, q, cidx, pdev);
291 if (++cidx == q->size) {
300 * reclaim_completed_tx - reclaims completed Tx descriptors
301 * @adapter: the adapter
302 * @q: the Tx queue to reclaim completed descriptors from
304 * Reclaims Tx descriptors that the SGE has indicated it has processed,
305 * and frees the associated buffers if possible. Called with the Tx
308 static inline void reclaim_completed_tx(struct adapter *adapter,
311 unsigned int reclaim = q->processed - q->cleaned;
314 free_tx_desc(adapter, q, reclaim);
315 q->cleaned += reclaim;
316 q->in_use -= reclaim;
321 * should_restart_tx - are there enough resources to restart a Tx queue?
324 * Checks if there are enough descriptors to restart a suspended Tx queue.
326 static inline int should_restart_tx(const struct sge_txq *q)
328 unsigned int r = q->processed - q->cleaned;
330 return q->in_use - r < (q->size >> 1);
334 * free_rx_bufs - free the Rx buffers on an SGE free list
335 * @pdev: the PCI device associated with the adapter
336 * @rxq: the SGE free list to clean up
338 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
339 * this queue should be stopped before calling this function.
341 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
343 unsigned int cidx = q->cidx;
345 while (q->credits--) {
346 struct rx_sw_desc *d = &q->sdesc[cidx];
348 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
349 q->buf_size, PCI_DMA_FROMDEVICE);
351 put_page(d->pg_chunk.page);
352 d->pg_chunk.page = NULL;
357 if (++cidx == q->size)
361 if (q->pg_chunk.page) {
362 __free_page(q->pg_chunk.page);
363 q->pg_chunk.page = NULL;
368 * add_one_rx_buf - add a packet buffer to a free-buffer list
369 * @va: buffer start VA
370 * @len: the buffer length
371 * @d: the HW Rx descriptor to write
372 * @sd: the SW Rx descriptor to write
373 * @gen: the generation bit value
374 * @pdev: the PCI device associated with the adapter
376 * Add a buffer of the given length to the supplied HW and SW Rx
379 static inline void add_one_rx_buf(void *va, unsigned int len,
380 struct rx_desc *d, struct rx_sw_desc *sd,
381 unsigned int gen, struct pci_dev *pdev)
385 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
386 pci_unmap_addr_set(sd, dma_addr, mapping);
388 d->addr_lo = cpu_to_be32(mapping);
389 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
391 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
392 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
395 static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp)
397 if (!q->pg_chunk.page) {
398 q->pg_chunk.page = alloc_page(gfp);
399 if (unlikely(!q->pg_chunk.page))
401 q->pg_chunk.va = page_address(q->pg_chunk.page);
402 q->pg_chunk.offset = 0;
404 sd->pg_chunk = q->pg_chunk;
406 q->pg_chunk.offset += q->buf_size;
407 if (q->pg_chunk.offset == PAGE_SIZE)
408 q->pg_chunk.page = NULL;
410 q->pg_chunk.va += q->buf_size;
411 get_page(q->pg_chunk.page);
417 * refill_fl - refill an SGE free-buffer list
418 * @adapter: the adapter
419 * @q: the free-list to refill
420 * @n: the number of new buffers to allocate
421 * @gfp: the gfp flags for allocating new buffers
423 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
424 * allocated with the supplied gfp flags. The caller must assure that
425 * @n does not exceed the queue's capacity.
427 static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
430 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
431 struct rx_desc *d = &q->desc[q->pidx];
435 if (unlikely(alloc_pg_chunk(q, sd, gfp))) {
436 nomem: q->alloc_failed++;
439 buf_start = sd->pg_chunk.va;
441 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
447 buf_start = skb->data;
450 add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
454 if (++q->pidx == q->size) {
463 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
466 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
468 refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC);
472 * recycle_rx_buf - recycle a receive buffer
473 * @adapter: the adapter
474 * @q: the SGE free list
475 * @idx: index of buffer to recycle
477 * Recycles the specified buffer on the given free list by adding it at
478 * the next available slot on the list.
480 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
483 struct rx_desc *from = &q->desc[idx];
484 struct rx_desc *to = &q->desc[q->pidx];
486 q->sdesc[q->pidx] = q->sdesc[idx];
487 to->addr_lo = from->addr_lo; /* already big endian */
488 to->addr_hi = from->addr_hi; /* likewise */
490 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
491 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
494 if (++q->pidx == q->size) {
498 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
502 * alloc_ring - allocate resources for an SGE descriptor ring
503 * @pdev: the PCI device
504 * @nelem: the number of descriptors
505 * @elem_size: the size of each descriptor
506 * @sw_size: the size of the SW state associated with each ring element
507 * @phys: the physical address of the allocated ring
508 * @metadata: address of the array holding the SW state for the ring
510 * Allocates resources for an SGE descriptor ring, such as Tx queues,
511 * free buffer lists, or response queues. Each SGE ring requires
512 * space for its HW descriptors plus, optionally, space for the SW state
513 * associated with each HW entry (the metadata). The function returns
514 * three values: the virtual address for the HW ring (the return value
515 * of the function), the physical address of the HW ring, and the address
518 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
519 size_t sw_size, dma_addr_t * phys, void *metadata)
521 size_t len = nelem * elem_size;
523 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
528 s = kcalloc(nelem, sw_size, GFP_KERNEL);
531 dma_free_coherent(&pdev->dev, len, p, *phys);
536 *(void **)metadata = s;
542 * t3_reset_qset - reset a sge qset
545 * Reset the qset structure.
546 * the NAPI structure is preserved in the event of
547 * the qset's reincarnation, for example during EEH recovery.
549 static void t3_reset_qset(struct sge_qset *q)
552 !(q->adap->flags & NAPI_INIT)) {
553 memset(q, 0, sizeof(*q));
558 memset(&q->rspq, 0, sizeof(q->rspq));
559 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
560 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
562 memset(&q->tx_reclaim_timer, 0, sizeof(q->tx_reclaim_timer));
567 * free_qset - free the resources of an SGE queue set
568 * @adapter: the adapter owning the queue set
571 * Release the HW and SW resources associated with an SGE queue set, such
572 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
573 * queue set must be quiesced prior to calling this.
575 static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
578 struct pci_dev *pdev = adapter->pdev;
580 if (q->tx_reclaim_timer.function)
581 del_timer_sync(&q->tx_reclaim_timer);
583 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
585 spin_lock_irq(&adapter->sge.reg_lock);
586 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
587 spin_unlock_irq(&adapter->sge.reg_lock);
588 free_rx_bufs(pdev, &q->fl[i]);
589 kfree(q->fl[i].sdesc);
590 dma_free_coherent(&pdev->dev,
592 sizeof(struct rx_desc), q->fl[i].desc,
596 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
597 if (q->txq[i].desc) {
598 spin_lock_irq(&adapter->sge.reg_lock);
599 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
600 spin_unlock_irq(&adapter->sge.reg_lock);
601 if (q->txq[i].sdesc) {
602 free_tx_desc(adapter, &q->txq[i],
604 kfree(q->txq[i].sdesc);
606 dma_free_coherent(&pdev->dev,
608 sizeof(struct tx_desc),
609 q->txq[i].desc, q->txq[i].phys_addr);
610 __skb_queue_purge(&q->txq[i].sendq);
614 spin_lock_irq(&adapter->sge.reg_lock);
615 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
616 spin_unlock_irq(&adapter->sge.reg_lock);
617 dma_free_coherent(&pdev->dev,
618 q->rspq.size * sizeof(struct rsp_desc),
619 q->rspq.desc, q->rspq.phys_addr);
626 * init_qset_cntxt - initialize an SGE queue set context info
628 * @id: the queue set id
630 * Initializes the TIDs and context ids for the queues of a queue set.
632 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
634 qs->rspq.cntxt_id = id;
635 qs->fl[0].cntxt_id = 2 * id;
636 qs->fl[1].cntxt_id = 2 * id + 1;
637 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
638 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
639 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
640 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
641 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
645 * sgl_len - calculates the size of an SGL of the given capacity
646 * @n: the number of SGL entries
648 * Calculates the number of flits needed for a scatter/gather list that
649 * can hold the given number of entries.
651 static inline unsigned int sgl_len(unsigned int n)
653 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
654 return (3 * n) / 2 + (n & 1);
658 * flits_to_desc - returns the num of Tx descriptors for the given flits
659 * @n: the number of flits
661 * Calculates the number of Tx descriptors needed for the supplied number
664 static inline unsigned int flits_to_desc(unsigned int n)
666 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
667 return flit_desc_map[n];
671 * get_packet - return the next ingress packet buffer from a free list
672 * @adap: the adapter that received the packet
673 * @fl: the SGE free list holding the packet
674 * @len: the packet length including any SGE padding
675 * @drop_thres: # of remaining buffers before we start dropping packets
677 * Get the next packet from a free list and complete setup of the
678 * sk_buff. If the packet is small we make a copy and recycle the
679 * original buffer, otherwise we use the original buffer itself. If a
680 * positive drop threshold is supplied packets are dropped and their
681 * buffers recycled if (a) the number of remaining buffers is under the
682 * threshold and the packet is too big to copy, or (b) the packet should
683 * be copied but there is no memory for the copy.
685 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
686 unsigned int len, unsigned int drop_thres)
688 struct sk_buff *skb = NULL;
689 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
691 prefetch(sd->skb->data);
694 if (len <= SGE_RX_COPY_THRES) {
695 skb = alloc_skb(len, GFP_ATOMIC);
696 if (likely(skb != NULL)) {
698 pci_dma_sync_single_for_cpu(adap->pdev,
699 pci_unmap_addr(sd, dma_addr), len,
701 memcpy(skb->data, sd->skb->data, len);
702 pci_dma_sync_single_for_device(adap->pdev,
703 pci_unmap_addr(sd, dma_addr), len,
705 } else if (!drop_thres)
708 recycle_rx_buf(adap, fl, fl->cidx);
712 if (unlikely(fl->credits < drop_thres))
716 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
717 fl->buf_size, PCI_DMA_FROMDEVICE);
720 __refill_fl(adap, fl);
725 * get_packet_pg - return the next ingress packet buffer from a free list
726 * @adap: the adapter that received the packet
727 * @fl: the SGE free list holding the packet
728 * @len: the packet length including any SGE padding
729 * @drop_thres: # of remaining buffers before we start dropping packets
731 * Get the next packet from a free list populated with page chunks.
732 * If the packet is small we make a copy and recycle the original buffer,
733 * otherwise we attach the original buffer as a page fragment to a fresh
734 * sk_buff. If a positive drop threshold is supplied packets are dropped
735 * and their buffers recycled if (a) the number of remaining buffers is
736 * under the threshold and the packet is too big to copy, or (b) there's
739 * Note: this function is similar to @get_packet but deals with Rx buffers
740 * that are page chunks rather than sk_buffs.
742 static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
743 unsigned int len, unsigned int drop_thres)
745 struct sk_buff *skb = NULL;
746 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
748 if (len <= SGE_RX_COPY_THRES) {
749 skb = alloc_skb(len, GFP_ATOMIC);
750 if (likely(skb != NULL)) {
752 pci_dma_sync_single_for_cpu(adap->pdev,
753 pci_unmap_addr(sd, dma_addr), len,
755 memcpy(skb->data, sd->pg_chunk.va, len);
756 pci_dma_sync_single_for_device(adap->pdev,
757 pci_unmap_addr(sd, dma_addr), len,
759 } else if (!drop_thres)
763 recycle_rx_buf(adap, fl, fl->cidx);
767 if (unlikely(fl->credits <= drop_thres))
770 skb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
771 if (unlikely(!skb)) {
777 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
778 fl->buf_size, PCI_DMA_FROMDEVICE);
779 __skb_put(skb, SGE_RX_PULL_LEN);
780 memcpy(skb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
781 skb_fill_page_desc(skb, 0, sd->pg_chunk.page,
782 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
783 len - SGE_RX_PULL_LEN);
785 skb->data_len = len - SGE_RX_PULL_LEN;
786 skb->truesize += skb->data_len;
790 * We do not refill FLs here, we let the caller do it to overlap a
797 * get_imm_packet - return the next ingress packet buffer from a response
798 * @resp: the response descriptor containing the packet data
800 * Return a packet containing the immediate data of the given response.
802 static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
804 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
807 __skb_put(skb, IMMED_PKT_SIZE);
808 skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
814 * calc_tx_descs - calculate the number of Tx descriptors for a packet
817 * Returns the number of Tx descriptors needed for the given Ethernet
818 * packet. Ethernet packets require addition of WR and CPL headers.
820 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
824 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
827 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
828 if (skb_shinfo(skb)->gso_size)
830 return flits_to_desc(flits);
834 * make_sgl - populate a scatter/gather list for a packet
836 * @sgp: the SGL to populate
837 * @start: start address of skb main body data to include in the SGL
838 * @len: length of skb main body data to include in the SGL
839 * @pdev: the PCI device
841 * Generates a scatter/gather list for the buffers that make up a packet
842 * and returns the SGL size in 8-byte words. The caller must size the SGL
845 static inline unsigned int make_sgl(const struct sk_buff *skb,
846 struct sg_ent *sgp, unsigned char *start,
847 unsigned int len, struct pci_dev *pdev)
850 unsigned int i, j = 0, nfrags;
853 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
854 sgp->len[0] = cpu_to_be32(len);
855 sgp->addr[0] = cpu_to_be64(mapping);
859 nfrags = skb_shinfo(skb)->nr_frags;
860 for (i = 0; i < nfrags; i++) {
861 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
863 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
864 frag->size, PCI_DMA_TODEVICE);
865 sgp->len[j] = cpu_to_be32(frag->size);
866 sgp->addr[j] = cpu_to_be64(mapping);
873 return ((nfrags + (len != 0)) * 3) / 2 + j;
877 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
881 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
882 * where the HW is going to sleep just after we checked, however,
883 * then the interrupt handler will detect the outstanding TX packet
884 * and ring the doorbell for us.
886 * When GTS is disabled we unconditionally ring the doorbell.
888 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
891 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
892 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
893 set_bit(TXQ_LAST_PKT_DB, &q->flags);
894 t3_write_reg(adap, A_SG_KDOORBELL,
895 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
898 wmb(); /* write descriptors before telling HW */
899 t3_write_reg(adap, A_SG_KDOORBELL,
900 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
904 static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
906 #if SGE_NUM_GENBITS == 2
907 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
912 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
913 * @ndesc: number of Tx descriptors spanned by the SGL
914 * @skb: the packet corresponding to the WR
915 * @d: first Tx descriptor to be written
916 * @pidx: index of above descriptors
917 * @q: the SGE Tx queue
919 * @flits: number of flits to the start of the SGL in the first descriptor
920 * @sgl_flits: the SGL size in flits
921 * @gen: the Tx descriptor generation
922 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
923 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
925 * Write a work request header and an associated SGL. If the SGL is
926 * small enough to fit into one Tx descriptor it has already been written
927 * and we just need to write the WR header. Otherwise we distribute the
928 * SGL across the number of descriptors it spans.
930 static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
931 struct tx_desc *d, unsigned int pidx,
932 const struct sge_txq *q,
933 const struct sg_ent *sgl,
934 unsigned int flits, unsigned int sgl_flits,
935 unsigned int gen, __be32 wr_hi,
938 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
939 struct tx_sw_desc *sd = &q->sdesc[pidx];
942 if (need_skb_unmap()) {
948 if (likely(ndesc == 1)) {
950 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
951 V_WR_SGLSFLT(flits)) | wr_hi;
953 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
954 V_WR_GEN(gen)) | wr_lo;
957 unsigned int ogen = gen;
958 const u64 *fp = (const u64 *)sgl;
959 struct work_request_hdr *wp = wrp;
961 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
962 V_WR_SGLSFLT(flits)) | wr_hi;
965 unsigned int avail = WR_FLITS - flits;
967 if (avail > sgl_flits)
969 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
979 if (++pidx == q->size) {
987 wrp = (struct work_request_hdr *)d;
988 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
989 V_WR_SGLSFLT(1)) | wr_hi;
990 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
992 V_WR_GEN(gen)) | wr_lo;
997 wrp->wr_hi |= htonl(F_WR_EOP);
999 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1000 wr_gen2((struct tx_desc *)wp, ogen);
1001 WARN_ON(ndesc != 0);
1006 * write_tx_pkt_wr - write a TX_PKT work request
1007 * @adap: the adapter
1008 * @skb: the packet to send
1009 * @pi: the egress interface
1010 * @pidx: index of the first Tx descriptor to write
1011 * @gen: the generation value to use
1013 * @ndesc: number of descriptors the packet will occupy
1014 * @compl: the value of the COMPL bit to use
1016 * Generate a TX_PKT work request to send the supplied packet.
1018 static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1019 const struct port_info *pi,
1020 unsigned int pidx, unsigned int gen,
1021 struct sge_txq *q, unsigned int ndesc,
1024 unsigned int flits, sgl_flits, cntrl, tso_info;
1025 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1026 struct tx_desc *d = &q->desc[pidx];
1027 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1029 cpl->len = htonl(skb->len | 0x80000000);
1030 cntrl = V_TXPKT_INTF(pi->port_id);
1032 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1033 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
1035 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1038 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1041 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1042 hdr->cntrl = htonl(cntrl);
1043 eth_type = skb_network_offset(skb) == ETH_HLEN ?
1044 CPL_ETH_II : CPL_ETH_II_VLAN;
1045 tso_info |= V_LSO_ETH_TYPE(eth_type) |
1046 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
1047 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
1048 hdr->lso_info = htonl(tso_info);
1051 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1052 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
1053 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1054 cpl->cntrl = htonl(cntrl);
1056 if (skb->len <= WR_LEN - sizeof(*cpl)) {
1057 q->sdesc[pidx].skb = NULL;
1059 skb_copy_from_linear_data(skb, &d->flit[2],
1062 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1064 flits = (skb->len + 7) / 8 + 2;
1065 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1066 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1067 | F_WR_SOP | F_WR_EOP | compl);
1069 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1070 V_WR_TID(q->token));
1079 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1080 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
1082 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1083 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1084 htonl(V_WR_TID(q->token)));
1087 static inline void t3_stop_queue(struct net_device *dev, struct sge_qset *qs,
1090 netif_stop_queue(dev);
1091 set_bit(TXQ_ETH, &qs->txq_stopped);
1096 * eth_xmit - add a packet to the Ethernet Tx queue
1098 * @dev: the egress net device
1100 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
1102 int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1104 unsigned int ndesc, pidx, credits, gen, compl;
1105 const struct port_info *pi = netdev_priv(dev);
1106 struct adapter *adap = pi->adapter;
1107 struct sge_qset *qs = pi->qs;
1108 struct sge_txq *q = &qs->txq[TXQ_ETH];
1111 * The chip min packet length is 9 octets but play safe and reject
1112 * anything shorter than an Ethernet header.
1114 if (unlikely(skb->len < ETH_HLEN)) {
1116 return NETDEV_TX_OK;
1119 spin_lock(&q->lock);
1120 reclaim_completed_tx(adap, q);
1122 credits = q->size - q->in_use;
1123 ndesc = calc_tx_descs(skb);
1125 if (unlikely(credits < ndesc)) {
1126 t3_stop_queue(dev, qs, q);
1127 dev_err(&adap->pdev->dev,
1128 "%s: Tx ring %u full while queue awake!\n",
1129 dev->name, q->cntxt_id & 7);
1130 spin_unlock(&q->lock);
1131 return NETDEV_TX_BUSY;
1135 if (unlikely(credits - ndesc < q->stop_thres)) {
1136 t3_stop_queue(dev, qs, q);
1138 if (should_restart_tx(q) &&
1139 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1141 netif_wake_queue(dev);
1146 q->unacked += ndesc;
1147 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1151 if (q->pidx >= q->size) {
1156 /* update port statistics */
1157 if (skb->ip_summed == CHECKSUM_COMPLETE)
1158 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1159 if (skb_shinfo(skb)->gso_size)
1160 qs->port_stats[SGE_PSTAT_TSO]++;
1161 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1162 qs->port_stats[SGE_PSTAT_VLANINS]++;
1164 dev->trans_start = jiffies;
1165 spin_unlock(&q->lock);
1168 * We do not use Tx completion interrupts to free DMAd Tx packets.
1169 * This is good for performamce but means that we rely on new Tx
1170 * packets arriving to run the destructors of completed packets,
1171 * which open up space in their sockets' send queues. Sometimes
1172 * we do not get such new packets causing Tx to stall. A single
1173 * UDP transmitter is a good example of this situation. We have
1174 * a clean up timer that periodically reclaims completed packets
1175 * but it doesn't run often enough (nor do we want it to) to prevent
1176 * lengthy stalls. A solution to this problem is to run the
1177 * destructor early, after the packet is queued but before it's DMAd.
1178 * A cons is that we lie to socket memory accounting, but the amount
1179 * of extra memory is reasonable (limited by the number of Tx
1180 * descriptors), the packets do actually get freed quickly by new
1181 * packets almost always, and for protocols like TCP that wait for
1182 * acks to really free up the data the extra memory is even less.
1183 * On the positive side we run the destructors on the sending CPU
1184 * rather than on a potentially different completing CPU, usually a
1185 * good thing. We also run them without holding our Tx queue lock,
1186 * unlike what reclaim_completed_tx() would otherwise do.
1188 * Run the destructor before telling the DMA engine about the packet
1189 * to make sure it doesn't complete and get freed prematurely.
1191 if (likely(!skb_shared(skb)))
1194 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1195 check_ring_tx_db(adap, q);
1196 return NETDEV_TX_OK;
1200 * write_imm - write a packet into a Tx descriptor as immediate data
1201 * @d: the Tx descriptor to write
1203 * @len: the length of packet data to write as immediate data
1204 * @gen: the generation bit value to write
1206 * Writes a packet as immediate data into a Tx descriptor. The packet
1207 * contains a work request at its beginning. We must write the packet
1208 * carefully so the SGE doesn't read it accidentally before it's written
1211 static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1212 unsigned int len, unsigned int gen)
1214 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1215 struct work_request_hdr *to = (struct work_request_hdr *)d;
1217 if (likely(!skb->data_len))
1218 memcpy(&to[1], &from[1], len - sizeof(*from));
1220 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1222 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1223 V_WR_BCNTLFLT(len & 7));
1225 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1226 V_WR_LEN((len + 7) / 8));
1232 * check_desc_avail - check descriptor availability on a send queue
1233 * @adap: the adapter
1234 * @q: the send queue
1235 * @skb: the packet needing the descriptors
1236 * @ndesc: the number of Tx descriptors needed
1237 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1239 * Checks if the requested number of Tx descriptors is available on an
1240 * SGE send queue. If the queue is already suspended or not enough
1241 * descriptors are available the packet is queued for later transmission.
1242 * Must be called with the Tx queue locked.
1244 * Returns 0 if enough descriptors are available, 1 if there aren't
1245 * enough descriptors and the packet has been queued, and 2 if the caller
1246 * needs to retry because there weren't enough descriptors at the
1247 * beginning of the call but some freed up in the mean time.
1249 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1250 struct sk_buff *skb, unsigned int ndesc,
1253 if (unlikely(!skb_queue_empty(&q->sendq))) {
1254 addq_exit:__skb_queue_tail(&q->sendq, skb);
1257 if (unlikely(q->size - q->in_use < ndesc)) {
1258 struct sge_qset *qs = txq_to_qset(q, qid);
1260 set_bit(qid, &qs->txq_stopped);
1261 smp_mb__after_clear_bit();
1263 if (should_restart_tx(q) &&
1264 test_and_clear_bit(qid, &qs->txq_stopped))
1274 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1275 * @q: the SGE control Tx queue
1277 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1278 * that send only immediate data (presently just the control queues) and
1279 * thus do not have any sk_buffs to release.
1281 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1283 unsigned int reclaim = q->processed - q->cleaned;
1285 q->in_use -= reclaim;
1286 q->cleaned += reclaim;
1289 static inline int immediate(const struct sk_buff *skb)
1291 return skb->len <= WR_LEN;
1295 * ctrl_xmit - send a packet through an SGE control Tx queue
1296 * @adap: the adapter
1297 * @q: the control queue
1300 * Send a packet through an SGE control Tx queue. Packets sent through
1301 * a control queue must fit entirely as immediate data in a single Tx
1302 * descriptor and have no page fragments.
1304 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1305 struct sk_buff *skb)
1308 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1310 if (unlikely(!immediate(skb))) {
1313 return NET_XMIT_SUCCESS;
1316 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1317 wrp->wr_lo = htonl(V_WR_TID(q->token));
1319 spin_lock(&q->lock);
1320 again:reclaim_completed_tx_imm(q);
1322 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1323 if (unlikely(ret)) {
1325 spin_unlock(&q->lock);
1331 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1334 if (++q->pidx >= q->size) {
1338 spin_unlock(&q->lock);
1340 t3_write_reg(adap, A_SG_KDOORBELL,
1341 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1342 return NET_XMIT_SUCCESS;
1346 * restart_ctrlq - restart a suspended control queue
1347 * @qs: the queue set cotaining the control queue
1349 * Resumes transmission on a suspended Tx control queue.
1351 static void restart_ctrlq(unsigned long data)
1353 struct sk_buff *skb;
1354 struct sge_qset *qs = (struct sge_qset *)data;
1355 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1357 spin_lock(&q->lock);
1358 again:reclaim_completed_tx_imm(q);
1360 while (q->in_use < q->size &&
1361 (skb = __skb_dequeue(&q->sendq)) != NULL) {
1363 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1365 if (++q->pidx >= q->size) {
1372 if (!skb_queue_empty(&q->sendq)) {
1373 set_bit(TXQ_CTRL, &qs->txq_stopped);
1374 smp_mb__after_clear_bit();
1376 if (should_restart_tx(q) &&
1377 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1382 spin_unlock(&q->lock);
1384 t3_write_reg(qs->adap, A_SG_KDOORBELL,
1385 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1389 * Send a management message through control queue 0
1391 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1395 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1402 * deferred_unmap_destructor - unmap a packet when it is freed
1405 * This is the packet destructor used for Tx packets that need to remain
1406 * mapped until they are freed rather than until their Tx descriptors are
1409 static void deferred_unmap_destructor(struct sk_buff *skb)
1412 const dma_addr_t *p;
1413 const struct skb_shared_info *si;
1414 const struct deferred_unmap_info *dui;
1416 dui = (struct deferred_unmap_info *)skb->head;
1419 if (skb->tail - skb->transport_header)
1420 pci_unmap_single(dui->pdev, *p++,
1421 skb->tail - skb->transport_header,
1424 si = skb_shinfo(skb);
1425 for (i = 0; i < si->nr_frags; i++)
1426 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1430 static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1431 const struct sg_ent *sgl, int sgl_flits)
1434 struct deferred_unmap_info *dui;
1436 dui = (struct deferred_unmap_info *)skb->head;
1438 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1439 *p++ = be64_to_cpu(sgl->addr[0]);
1440 *p++ = be64_to_cpu(sgl->addr[1]);
1443 *p = be64_to_cpu(sgl->addr[0]);
1447 * write_ofld_wr - write an offload work request
1448 * @adap: the adapter
1449 * @skb: the packet to send
1451 * @pidx: index of the first Tx descriptor to write
1452 * @gen: the generation value to use
1453 * @ndesc: number of descriptors the packet will occupy
1455 * Write an offload work request to send the supplied packet. The packet
1456 * data already carry the work request with most fields populated.
1458 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1459 struct sge_txq *q, unsigned int pidx,
1460 unsigned int gen, unsigned int ndesc)
1462 unsigned int sgl_flits, flits;
1463 struct work_request_hdr *from;
1464 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1465 struct tx_desc *d = &q->desc[pidx];
1467 if (immediate(skb)) {
1468 q->sdesc[pidx].skb = NULL;
1469 write_imm(d, skb, skb->len, gen);
1473 /* Only TX_DATA builds SGLs */
1475 from = (struct work_request_hdr *)skb->data;
1476 memcpy(&d->flit[1], &from[1],
1477 skb_transport_offset(skb) - sizeof(*from));
1479 flits = skb_transport_offset(skb) / 8;
1480 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1481 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
1482 skb->tail - skb->transport_header,
1484 if (need_skb_unmap()) {
1485 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1486 skb->destructor = deferred_unmap_destructor;
1489 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1490 gen, from->wr_hi, from->wr_lo);
1494 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1497 * Returns the number of Tx descriptors needed for the given offload
1498 * packet. These packets are already fully constructed.
1500 static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1502 unsigned int flits, cnt;
1504 if (skb->len <= WR_LEN)
1505 return 1; /* packet fits as immediate data */
1507 flits = skb_transport_offset(skb) / 8; /* headers */
1508 cnt = skb_shinfo(skb)->nr_frags;
1509 if (skb->tail != skb->transport_header)
1511 return flits_to_desc(flits + sgl_len(cnt));
1515 * ofld_xmit - send a packet through an offload queue
1516 * @adap: the adapter
1517 * @q: the Tx offload queue
1520 * Send an offload packet through an SGE offload queue.
1522 static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1523 struct sk_buff *skb)
1526 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1528 spin_lock(&q->lock);
1529 again:reclaim_completed_tx(adap, q);
1531 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1532 if (unlikely(ret)) {
1534 skb->priority = ndesc; /* save for restart */
1535 spin_unlock(&q->lock);
1545 if (q->pidx >= q->size) {
1549 spin_unlock(&q->lock);
1551 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1552 check_ring_tx_db(adap, q);
1553 return NET_XMIT_SUCCESS;
1557 * restart_offloadq - restart a suspended offload queue
1558 * @qs: the queue set cotaining the offload queue
1560 * Resumes transmission on a suspended Tx offload queue.
1562 static void restart_offloadq(unsigned long data)
1564 struct sk_buff *skb;
1565 struct sge_qset *qs = (struct sge_qset *)data;
1566 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1567 const struct port_info *pi = netdev_priv(qs->netdev);
1568 struct adapter *adap = pi->adapter;
1570 spin_lock(&q->lock);
1571 again:reclaim_completed_tx(adap, q);
1573 while ((skb = skb_peek(&q->sendq)) != NULL) {
1574 unsigned int gen, pidx;
1575 unsigned int ndesc = skb->priority;
1577 if (unlikely(q->size - q->in_use < ndesc)) {
1578 set_bit(TXQ_OFLD, &qs->txq_stopped);
1579 smp_mb__after_clear_bit();
1581 if (should_restart_tx(q) &&
1582 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1592 if (q->pidx >= q->size) {
1596 __skb_unlink(skb, &q->sendq);
1597 spin_unlock(&q->lock);
1599 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1600 spin_lock(&q->lock);
1602 spin_unlock(&q->lock);
1605 set_bit(TXQ_RUNNING, &q->flags);
1606 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1609 t3_write_reg(adap, A_SG_KDOORBELL,
1610 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1614 * queue_set - return the queue set a packet should use
1617 * Maps a packet to the SGE queue set it should use. The desired queue
1618 * set is carried in bits 1-3 in the packet's priority.
1620 static inline int queue_set(const struct sk_buff *skb)
1622 return skb->priority >> 1;
1626 * is_ctrl_pkt - return whether an offload packet is a control packet
1629 * Determines whether an offload packet should use an OFLD or a CTRL
1630 * Tx queue. This is indicated by bit 0 in the packet's priority.
1632 static inline int is_ctrl_pkt(const struct sk_buff *skb)
1634 return skb->priority & 1;
1638 * t3_offload_tx - send an offload packet
1639 * @tdev: the offload device to send to
1642 * Sends an offload packet. We use the packet priority to select the
1643 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1644 * should be sent as regular or control, bits 1-3 select the queue set.
1646 int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1648 struct adapter *adap = tdev2adap(tdev);
1649 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1651 if (unlikely(is_ctrl_pkt(skb)))
1652 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1654 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1658 * offload_enqueue - add an offload packet to an SGE offload receive queue
1659 * @q: the SGE response queue
1662 * Add a new offload packet to an SGE response queue's offload packet
1663 * queue. If the packet is the first on the queue it schedules the RX
1664 * softirq to process the queue.
1666 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1668 skb->next = skb->prev = NULL;
1670 q->rx_tail->next = skb;
1672 struct sge_qset *qs = rspq_to_qset(q);
1674 napi_schedule(&qs->napi);
1681 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1682 * @tdev: the offload device that will be receiving the packets
1683 * @q: the SGE response queue that assembled the bundle
1684 * @skbs: the partial bundle
1685 * @n: the number of packets in the bundle
1687 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1689 static inline void deliver_partial_bundle(struct t3cdev *tdev,
1691 struct sk_buff *skbs[], int n)
1694 q->offload_bundles++;
1695 tdev->recv(tdev, skbs, n);
1700 * ofld_poll - NAPI handler for offload packets in interrupt mode
1701 * @dev: the network device doing the polling
1702 * @budget: polling budget
1704 * The NAPI handler for offload packets when a response queue is serviced
1705 * by the hard interrupt handler, i.e., when it's operating in non-polling
1706 * mode. Creates small packet batches and sends them through the offload
1707 * receive handler. Batches need to be of modest size as we do prefetches
1708 * on the packets in each.
1710 static int ofld_poll(struct napi_struct *napi, int budget)
1712 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
1713 struct sge_rspq *q = &qs->rspq;
1714 struct adapter *adapter = qs->adap;
1717 while (work_done < budget) {
1718 struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
1721 spin_lock_irq(&q->lock);
1724 napi_complete(napi);
1725 spin_unlock_irq(&q->lock);
1730 q->rx_head = q->rx_tail = NULL;
1731 spin_unlock_irq(&q->lock);
1733 for (ngathered = 0; work_done < budget && head; work_done++) {
1734 prefetch(head->data);
1735 skbs[ngathered] = head;
1737 skbs[ngathered]->next = NULL;
1738 if (++ngathered == RX_BUNDLE_SIZE) {
1739 q->offload_bundles++;
1740 adapter->tdev.recv(&adapter->tdev, skbs,
1745 if (head) { /* splice remaining packets back onto Rx queue */
1746 spin_lock_irq(&q->lock);
1747 tail->next = q->rx_head;
1751 spin_unlock_irq(&q->lock);
1753 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1760 * rx_offload - process a received offload packet
1761 * @tdev: the offload device receiving the packet
1762 * @rq: the response queue that received the packet
1764 * @rx_gather: a gather list of packets if we are building a bundle
1765 * @gather_idx: index of the next available slot in the bundle
1767 * Process an ingress offload pakcet and add it to the offload ingress
1768 * queue. Returns the index of the next available slot in the bundle.
1770 static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1771 struct sk_buff *skb, struct sk_buff *rx_gather[],
1772 unsigned int gather_idx)
1774 skb_reset_mac_header(skb);
1775 skb_reset_network_header(skb);
1776 skb_reset_transport_header(skb);
1779 rx_gather[gather_idx++] = skb;
1780 if (gather_idx == RX_BUNDLE_SIZE) {
1781 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1783 rq->offload_bundles++;
1786 offload_enqueue(rq, skb);
1792 * restart_tx - check whether to restart suspended Tx queues
1793 * @qs: the queue set to resume
1795 * Restarts suspended Tx queues of an SGE queue set if they have enough
1796 * free resources to resume operation.
1798 static void restart_tx(struct sge_qset *qs)
1800 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1801 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1802 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1803 qs->txq[TXQ_ETH].restarts++;
1804 if (netif_running(qs->netdev))
1805 netif_wake_queue(qs->netdev);
1808 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1809 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1810 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1811 qs->txq[TXQ_OFLD].restarts++;
1812 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1814 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1815 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1816 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1817 qs->txq[TXQ_CTRL].restarts++;
1818 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1823 * rx_eth - process an ingress ethernet packet
1824 * @adap: the adapter
1825 * @rq: the response queue that received the packet
1827 * @pad: amount of padding at the start of the buffer
1829 * Process an ingress ethernet pakcet and deliver it to the stack.
1830 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1831 * if it was immediate data in a response.
1833 static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1834 struct sk_buff *skb, int pad)
1836 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1837 struct port_info *pi;
1839 skb_pull(skb, sizeof(*p) + pad);
1840 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
1841 skb->dev->last_rx = jiffies;
1842 pi = netdev_priv(skb->dev);
1843 if (pi->rx_csum_offload && p->csum_valid && p->csum == htons(0xffff) &&
1845 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1846 skb->ip_summed = CHECKSUM_UNNECESSARY;
1848 skb->ip_summed = CHECKSUM_NONE;
1850 if (unlikely(p->vlan_valid)) {
1851 struct vlan_group *grp = pi->vlan_grp;
1853 rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++;
1855 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1858 dev_kfree_skb_any(skb);
1859 } else if (rq->polling)
1860 netif_receive_skb(skb);
1866 * handle_rsp_cntrl_info - handles control information in a response
1867 * @qs: the queue set corresponding to the response
1868 * @flags: the response control flags
1870 * Handles the control information of an SGE response, such as GTS
1871 * indications and completion credits for the queue set's Tx queues.
1872 * HW coalesces credits, we don't do any extra SW coalescing.
1874 static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
1876 unsigned int credits;
1879 if (flags & F_RSPD_TXQ0_GTS)
1880 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
1883 credits = G_RSPD_TXQ0_CR(flags);
1885 qs->txq[TXQ_ETH].processed += credits;
1887 credits = G_RSPD_TXQ2_CR(flags);
1889 qs->txq[TXQ_CTRL].processed += credits;
1892 if (flags & F_RSPD_TXQ1_GTS)
1893 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
1895 credits = G_RSPD_TXQ1_CR(flags);
1897 qs->txq[TXQ_OFLD].processed += credits;
1901 * check_ring_db - check if we need to ring any doorbells
1902 * @adapter: the adapter
1903 * @qs: the queue set whose Tx queues are to be examined
1904 * @sleeping: indicates which Tx queue sent GTS
1906 * Checks if some of a queue set's Tx queues need to ring their doorbells
1907 * to resume transmission after idling while they still have unprocessed
1910 static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
1911 unsigned int sleeping)
1913 if (sleeping & F_RSPD_TXQ0_GTS) {
1914 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1916 if (txq->cleaned + txq->in_use != txq->processed &&
1917 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1918 set_bit(TXQ_RUNNING, &txq->flags);
1919 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1920 V_EGRCNTX(txq->cntxt_id));
1924 if (sleeping & F_RSPD_TXQ1_GTS) {
1925 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
1927 if (txq->cleaned + txq->in_use != txq->processed &&
1928 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1929 set_bit(TXQ_RUNNING, &txq->flags);
1930 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1931 V_EGRCNTX(txq->cntxt_id));
1937 * is_new_response - check if a response is newly written
1938 * @r: the response descriptor
1939 * @q: the response queue
1941 * Returns true if a response descriptor contains a yet unprocessed
1944 static inline int is_new_response(const struct rsp_desc *r,
1945 const struct sge_rspq *q)
1947 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
1950 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
1951 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
1952 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
1953 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
1954 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
1956 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
1957 #define NOMEM_INTR_DELAY 2500
1960 * process_responses - process responses from an SGE response queue
1961 * @adap: the adapter
1962 * @qs: the queue set to which the response queue belongs
1963 * @budget: how many responses can be processed in this round
1965 * Process responses from an SGE response queue up to the supplied budget.
1966 * Responses include received packets as well as credits and other events
1967 * for the queues that belong to the response queue's queue set.
1968 * A negative budget is effectively unlimited.
1970 * Additionally choose the interrupt holdoff time for the next interrupt
1971 * on this queue. If the system is under memory shortage use a fairly
1972 * long delay to help recovery.
1974 static int process_responses(struct adapter *adap, struct sge_qset *qs,
1977 struct sge_rspq *q = &qs->rspq;
1978 struct rsp_desc *r = &q->desc[q->cidx];
1979 int budget_left = budget;
1980 unsigned int sleeping = 0;
1981 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
1984 q->next_holdoff = q->holdoff_tmr;
1986 while (likely(budget_left && is_new_response(r, q))) {
1987 int eth, ethpad = 2;
1988 struct sk_buff *skb = NULL;
1989 u32 len, flags = ntohl(r->flags);
1990 __be32 rss_hi = *(const __be32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
1992 eth = r->rss_hdr.opcode == CPL_RX_PKT;
1994 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
1995 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
1999 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
2000 skb->data[0] = CPL_ASYNC_NOTIF;
2001 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
2003 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2004 skb = get_imm_packet(r);
2005 if (unlikely(!skb)) {
2007 q->next_holdoff = NOMEM_INTR_DELAY;
2009 /* consume one credit since we tried */
2015 } else if ((len = ntohl(r->len_cq)) != 0) {
2018 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2019 if (fl->use_pages) {
2020 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2023 #if L1_CACHE_BYTES < 128
2024 prefetch(addr + L1_CACHE_BYTES);
2026 __refill_fl(adap, fl);
2028 skb = get_packet_pg(adap, fl, G_RSPD_LEN(len),
2029 eth ? SGE_RX_DROP_THRES : 0);
2031 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2032 eth ? SGE_RX_DROP_THRES : 0);
2033 if (unlikely(!skb)) {
2037 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2040 if (++fl->cidx == fl->size)
2045 if (flags & RSPD_CTRL_MASK) {
2046 sleeping |= flags & RSPD_GTS_MASK;
2047 handle_rsp_cntrl_info(qs, flags);
2051 if (unlikely(++q->cidx == q->size)) {
2058 if (++q->credits >= (q->size / 4)) {
2059 refill_rspq(adap, q, q->credits);
2063 if (likely(skb != NULL)) {
2065 rx_eth(adap, q, skb, ethpad);
2068 /* Preserve the RSS info in csum & priority */
2070 skb->priority = rss_lo;
2071 ngathered = rx_offload(&adap->tdev, q, skb,
2079 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2081 check_ring_db(adap, qs, sleeping);
2083 smp_mb(); /* commit Tx queue .processed updates */
2084 if (unlikely(qs->txq_stopped != 0))
2087 budget -= budget_left;
2091 static inline int is_pure_response(const struct rsp_desc *r)
2093 u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2095 return (n | r->len_cq) == 0;
2099 * napi_rx_handler - the NAPI handler for Rx processing
2100 * @napi: the napi instance
2101 * @budget: how many packets we can process in this round
2103 * Handler for new data events when using NAPI.
2105 static int napi_rx_handler(struct napi_struct *napi, int budget)
2107 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2108 struct adapter *adap = qs->adap;
2109 int work_done = process_responses(adap, qs, budget);
2111 if (likely(work_done < budget)) {
2112 napi_complete(napi);
2115 * Because we don't atomically flush the following
2116 * write it is possible that in very rare cases it can
2117 * reach the device in a way that races with a new
2118 * response being written plus an error interrupt
2119 * causing the NAPI interrupt handler below to return
2120 * unhandled status to the OS. To protect against
2121 * this would require flushing the write and doing
2122 * both the write and the flush with interrupts off.
2123 * Way too expensive and unjustifiable given the
2124 * rarity of the race.
2126 * The race cannot happen at all with MSI-X.
2128 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2129 V_NEWTIMER(qs->rspq.next_holdoff) |
2130 V_NEWINDEX(qs->rspq.cidx));
2136 * Returns true if the device is already scheduled for polling.
2138 static inline int napi_is_scheduled(struct napi_struct *napi)
2140 return test_bit(NAPI_STATE_SCHED, &napi->state);
2144 * process_pure_responses - process pure responses from a response queue
2145 * @adap: the adapter
2146 * @qs: the queue set owning the response queue
2147 * @r: the first pure response to process
2149 * A simpler version of process_responses() that handles only pure (i.e.,
2150 * non data-carrying) responses. Such respones are too light-weight to
2151 * justify calling a softirq under NAPI, so we handle them specially in
2152 * the interrupt handler. The function is called with a pointer to a
2153 * response, which the caller must ensure is a valid pure response.
2155 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2157 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2160 struct sge_rspq *q = &qs->rspq;
2161 unsigned int sleeping = 0;
2164 u32 flags = ntohl(r->flags);
2167 if (unlikely(++q->cidx == q->size)) {
2174 if (flags & RSPD_CTRL_MASK) {
2175 sleeping |= flags & RSPD_GTS_MASK;
2176 handle_rsp_cntrl_info(qs, flags);
2180 if (++q->credits >= (q->size / 4)) {
2181 refill_rspq(adap, q, q->credits);
2184 } while (is_new_response(r, q) && is_pure_response(r));
2187 check_ring_db(adap, qs, sleeping);
2189 smp_mb(); /* commit Tx queue .processed updates */
2190 if (unlikely(qs->txq_stopped != 0))
2193 return is_new_response(r, q);
2197 * handle_responses - decide what to do with new responses in NAPI mode
2198 * @adap: the adapter
2199 * @q: the response queue
2201 * This is used by the NAPI interrupt handlers to decide what to do with
2202 * new SGE responses. If there are no new responses it returns -1. If
2203 * there are new responses and they are pure (i.e., non-data carrying)
2204 * it handles them straight in hard interrupt context as they are very
2205 * cheap and don't deliver any packets. Finally, if there are any data
2206 * signaling responses it schedules the NAPI handler. Returns 1 if it
2207 * schedules NAPI, 0 if all new responses were pure.
2209 * The caller must ascertain NAPI is not already running.
2211 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2213 struct sge_qset *qs = rspq_to_qset(q);
2214 struct rsp_desc *r = &q->desc[q->cidx];
2216 if (!is_new_response(r, q))
2218 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2219 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2220 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2223 napi_schedule(&qs->napi);
2228 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2229 * (i.e., response queue serviced in hard interrupt).
2231 irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2233 struct sge_qset *qs = cookie;
2234 struct adapter *adap = qs->adap;
2235 struct sge_rspq *q = &qs->rspq;
2237 spin_lock(&q->lock);
2238 if (process_responses(adap, qs, -1) == 0)
2239 q->unhandled_irqs++;
2240 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2241 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2242 spin_unlock(&q->lock);
2247 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2248 * (i.e., response queue serviced by NAPI polling).
2250 static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2252 struct sge_qset *qs = cookie;
2253 struct sge_rspq *q = &qs->rspq;
2255 spin_lock(&q->lock);
2257 if (handle_responses(qs->adap, q) < 0)
2258 q->unhandled_irqs++;
2259 spin_unlock(&q->lock);
2264 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2265 * SGE response queues as well as error and other async events as they all use
2266 * the same MSI vector. We use one SGE response queue per port in this mode
2267 * and protect all response queues with queue 0's lock.
2269 static irqreturn_t t3_intr_msi(int irq, void *cookie)
2271 int new_packets = 0;
2272 struct adapter *adap = cookie;
2273 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2275 spin_lock(&q->lock);
2277 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2278 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2279 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2283 if (adap->params.nports == 2 &&
2284 process_responses(adap, &adap->sge.qs[1], -1)) {
2285 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2287 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2288 V_NEWTIMER(q1->next_holdoff) |
2289 V_NEWINDEX(q1->cidx));
2293 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2294 q->unhandled_irqs++;
2296 spin_unlock(&q->lock);
2300 static int rspq_check_napi(struct sge_qset *qs)
2302 struct sge_rspq *q = &qs->rspq;
2304 if (!napi_is_scheduled(&qs->napi) &&
2305 is_new_response(&q->desc[q->cidx], q)) {
2306 napi_schedule(&qs->napi);
2313 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2314 * by NAPI polling). Handles data events from SGE response queues as well as
2315 * error and other async events as they all use the same MSI vector. We use
2316 * one SGE response queue per port in this mode and protect all response
2317 * queues with queue 0's lock.
2319 static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2322 struct adapter *adap = cookie;
2323 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2325 spin_lock(&q->lock);
2327 new_packets = rspq_check_napi(&adap->sge.qs[0]);
2328 if (adap->params.nports == 2)
2329 new_packets += rspq_check_napi(&adap->sge.qs[1]);
2330 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2331 q->unhandled_irqs++;
2333 spin_unlock(&q->lock);
2338 * A helper function that processes responses and issues GTS.
2340 static inline int process_responses_gts(struct adapter *adap,
2341 struct sge_rspq *rq)
2345 work = process_responses(adap, rspq_to_qset(rq), -1);
2346 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2347 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2352 * The legacy INTx interrupt handler. This needs to handle data events from
2353 * SGE response queues as well as error and other async events as they all use
2354 * the same interrupt pin. We use one SGE response queue per port in this mode
2355 * and protect all response queues with queue 0's lock.
2357 static irqreturn_t t3_intr(int irq, void *cookie)
2359 int work_done, w0, w1;
2360 struct adapter *adap = cookie;
2361 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2362 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2364 spin_lock(&q0->lock);
2366 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2367 w1 = adap->params.nports == 2 &&
2368 is_new_response(&q1->desc[q1->cidx], q1);
2370 if (likely(w0 | w1)) {
2371 t3_write_reg(adap, A_PL_CLI, 0);
2372 t3_read_reg(adap, A_PL_CLI); /* flush */
2375 process_responses_gts(adap, q0);
2378 process_responses_gts(adap, q1);
2380 work_done = w0 | w1;
2382 work_done = t3_slow_intr_handler(adap);
2384 spin_unlock(&q0->lock);
2385 return IRQ_RETVAL(work_done != 0);
2389 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2390 * Handles data events from SGE response queues as well as error and other
2391 * async events as they all use the same interrupt pin. We use one SGE
2392 * response queue per port in this mode and protect all response queues with
2395 static irqreturn_t t3b_intr(int irq, void *cookie)
2398 struct adapter *adap = cookie;
2399 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2401 t3_write_reg(adap, A_PL_CLI, 0);
2402 map = t3_read_reg(adap, A_SG_DATA_INTR);
2404 if (unlikely(!map)) /* shared interrupt, most likely */
2407 spin_lock(&q0->lock);
2409 if (unlikely(map & F_ERRINTR))
2410 t3_slow_intr_handler(adap);
2412 if (likely(map & 1))
2413 process_responses_gts(adap, q0);
2416 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2418 spin_unlock(&q0->lock);
2423 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2424 * Handles data events from SGE response queues as well as error and other
2425 * async events as they all use the same interrupt pin. We use one SGE
2426 * response queue per port in this mode and protect all response queues with
2429 static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2432 struct adapter *adap = cookie;
2433 struct sge_qset *qs0 = &adap->sge.qs[0];
2434 struct sge_rspq *q0 = &qs0->rspq;
2436 t3_write_reg(adap, A_PL_CLI, 0);
2437 map = t3_read_reg(adap, A_SG_DATA_INTR);
2439 if (unlikely(!map)) /* shared interrupt, most likely */
2442 spin_lock(&q0->lock);
2444 if (unlikely(map & F_ERRINTR))
2445 t3_slow_intr_handler(adap);
2447 if (likely(map & 1))
2448 napi_schedule(&qs0->napi);
2451 napi_schedule(&adap->sge.qs[1].napi);
2453 spin_unlock(&q0->lock);
2458 * t3_intr_handler - select the top-level interrupt handler
2459 * @adap: the adapter
2460 * @polling: whether using NAPI to service response queues
2462 * Selects the top-level interrupt handler based on the type of interrupts
2463 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2466 irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
2468 if (adap->flags & USING_MSIX)
2469 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2470 if (adap->flags & USING_MSI)
2471 return polling ? t3_intr_msi_napi : t3_intr_msi;
2472 if (adap->params.rev > 0)
2473 return polling ? t3b_intr_napi : t3b_intr;
2477 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2478 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2479 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2480 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2482 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2483 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2487 * t3_sge_err_intr_handler - SGE async event interrupt handler
2488 * @adapter: the adapter
2490 * Interrupt handler for SGE asynchronous (non-data) events.
2492 void t3_sge_err_intr_handler(struct adapter *adapter)
2494 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2496 if (status & SGE_PARERR)
2497 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2498 status & SGE_PARERR);
2499 if (status & SGE_FRAMINGERR)
2500 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2501 status & SGE_FRAMINGERR);
2503 if (status & F_RSPQCREDITOVERFOW)
2504 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2506 if (status & F_RSPQDISABLED) {
2507 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2510 "packet delivered to disabled response queue "
2511 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2514 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2515 CH_ALERT(adapter, "SGE dropped %s priority doorbell\n",
2516 status & F_HIPIODRBDROPERR ? "high" : "lo");
2518 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2519 if (status & SGE_FATALERR)
2520 t3_fatal_err(adapter);
2524 * sge_timer_cb - perform periodic maintenance of an SGE qset
2525 * @data: the SGE queue set to maintain
2527 * Runs periodically from a timer to perform maintenance of an SGE queue
2528 * set. It performs two tasks:
2530 * a) Cleans up any completed Tx descriptors that may still be pending.
2531 * Normal descriptor cleanup happens when new packets are added to a Tx
2532 * queue so this timer is relatively infrequent and does any cleanup only
2533 * if the Tx queue has not seen any new packets in a while. We make a
2534 * best effort attempt to reclaim descriptors, in that we don't wait
2535 * around if we cannot get a queue's lock (which most likely is because
2536 * someone else is queueing new packets and so will also handle the clean
2537 * up). Since control queues use immediate data exclusively we don't
2538 * bother cleaning them up here.
2540 * b) Replenishes Rx queues that have run out due to memory shortage.
2541 * Normally new Rx buffers are added when existing ones are consumed but
2542 * when out of memory a queue can become empty. We try to add only a few
2543 * buffers here, the queue will be replenished fully as these new buffers
2544 * are used up if memory shortage has subsided.
2546 static void sge_timer_cb(unsigned long data)
2549 struct sge_qset *qs = (struct sge_qset *)data;
2550 struct adapter *adap = qs->adap;
2552 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2553 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2554 spin_unlock(&qs->txq[TXQ_ETH].lock);
2556 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2557 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2558 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2560 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
2561 &adap->sge.qs[0].rspq.lock;
2562 if (spin_trylock_irq(lock)) {
2563 if (!napi_is_scheduled(&qs->napi)) {
2564 u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2566 if (qs->fl[0].credits < qs->fl[0].size)
2567 __refill_fl(adap, &qs->fl[0]);
2568 if (qs->fl[1].credits < qs->fl[1].size)
2569 __refill_fl(adap, &qs->fl[1]);
2571 if (status & (1 << qs->rspq.cntxt_id)) {
2573 if (qs->rspq.credits) {
2574 refill_rspq(adap, &qs->rspq, 1);
2576 qs->rspq.restarted++;
2577 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2578 1 << qs->rspq.cntxt_id);
2582 spin_unlock_irq(lock);
2584 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2588 * t3_update_qset_coalesce - update coalescing settings for a queue set
2589 * @qs: the SGE queue set
2590 * @p: new queue set parameters
2592 * Update the coalescing settings for an SGE queue set. Nothing is done
2593 * if the queue set is not initialized yet.
2595 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2597 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2598 qs->rspq.polling = p->polling;
2599 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
2603 * t3_sge_alloc_qset - initialize an SGE queue set
2604 * @adapter: the adapter
2605 * @id: the queue set id
2606 * @nports: how many Ethernet ports will be using this queue set
2607 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2608 * @p: configuration parameters for this queue set
2609 * @ntxq: number of Tx queues for the queue set
2610 * @netdev: net device associated with this queue set
2612 * Allocate resources and initialize an SGE queue set. A queue set
2613 * comprises a response queue, two Rx free-buffer queues, and up to 3
2614 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2615 * queue, offload queue, and control queue.
2617 int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2618 int irq_vec_idx, const struct qset_params *p,
2619 int ntxq, struct net_device *dev)
2621 int i, ret = -ENOMEM;
2622 struct sge_qset *q = &adapter->sge.qs[id];
2624 init_qset_cntxt(q, id);
2625 init_timer(&q->tx_reclaim_timer);
2626 q->tx_reclaim_timer.data = (unsigned long)q;
2627 q->tx_reclaim_timer.function = sge_timer_cb;
2629 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2630 sizeof(struct rx_desc),
2631 sizeof(struct rx_sw_desc),
2632 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2636 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2637 sizeof(struct rx_desc),
2638 sizeof(struct rx_sw_desc),
2639 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2643 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2644 sizeof(struct rsp_desc), 0,
2645 &q->rspq.phys_addr, NULL);
2649 for (i = 0; i < ntxq; ++i) {
2651 * The control queue always uses immediate data so does not
2652 * need to keep track of any sk_buffs.
2654 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2656 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2657 sizeof(struct tx_desc), sz,
2658 &q->txq[i].phys_addr,
2660 if (!q->txq[i].desc)
2664 q->txq[i].size = p->txq_size[i];
2665 spin_lock_init(&q->txq[i].lock);
2666 skb_queue_head_init(&q->txq[i].sendq);
2669 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2671 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2674 q->fl[0].gen = q->fl[1].gen = 1;
2675 q->fl[0].size = p->fl_size;
2676 q->fl[1].size = p->jumbo_size;
2679 q->rspq.size = p->rspq_size;
2680 spin_lock_init(&q->rspq.lock);
2682 q->txq[TXQ_ETH].stop_thres = nports *
2683 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2685 #if FL0_PG_CHUNK_SIZE > 0
2686 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
2688 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
2690 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
2691 q->fl[1].buf_size = is_offload(adapter) ?
2692 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
2693 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
2695 spin_lock_irq(&adapter->sge.reg_lock);
2697 /* FL threshold comparison uses < */
2698 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2699 q->rspq.phys_addr, q->rspq.size,
2700 q->fl[0].buf_size, 1, 0);
2704 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2705 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2706 q->fl[i].phys_addr, q->fl[i].size,
2707 q->fl[i].buf_size, p->cong_thres, 1,
2713 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2714 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2715 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2721 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2722 USE_GTS, SGE_CNTXT_OFLD, id,
2723 q->txq[TXQ_OFLD].phys_addr,
2724 q->txq[TXQ_OFLD].size, 0, 1, 0);
2730 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2732 q->txq[TXQ_CTRL].phys_addr,
2733 q->txq[TXQ_CTRL].size,
2734 q->txq[TXQ_CTRL].token, 1, 0);
2739 spin_unlock_irq(&adapter->sge.reg_lock);
2743 t3_update_qset_coalesce(q, p);
2745 refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
2746 refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
2747 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2749 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2750 V_NEWTIMER(q->rspq.holdoff_tmr));
2752 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2756 spin_unlock_irq(&adapter->sge.reg_lock);
2758 t3_free_qset(adapter, q);
2763 * t3_free_sge_resources - free SGE resources
2764 * @adap: the adapter
2766 * Frees resources used by the SGE queue sets.
2768 void t3_free_sge_resources(struct adapter *adap)
2772 for (i = 0; i < SGE_QSETS; ++i)
2773 t3_free_qset(adap, &adap->sge.qs[i]);
2777 * t3_sge_start - enable SGE
2778 * @adap: the adapter
2780 * Enables the SGE for DMAs. This is the last step in starting packet
2783 void t3_sge_start(struct adapter *adap)
2785 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2789 * t3_sge_stop - disable SGE operation
2790 * @adap: the adapter
2792 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2793 * from error interrupts) or from normal process context. In the latter
2794 * case it also disables any pending queue restart tasklets. Note that
2795 * if it is called in interrupt context it cannot disable the restart
2796 * tasklets as it cannot wait, however the tasklets will have no effect
2797 * since the doorbells are disabled and the driver will call this again
2798 * later from process context, at which time the tasklets will be stopped
2799 * if they are still running.
2801 void t3_sge_stop(struct adapter *adap)
2803 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
2804 if (!in_interrupt()) {
2807 for (i = 0; i < SGE_QSETS; ++i) {
2808 struct sge_qset *qs = &adap->sge.qs[i];
2810 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
2811 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
2817 * t3_sge_init - initialize SGE
2818 * @adap: the adapter
2819 * @p: the SGE parameters
2821 * Performs SGE initialization needed every time after a chip reset.
2822 * We do not initialize any of the queue sets here, instead the driver
2823 * top-level must request those individually. We also do not enable DMA
2824 * here, that should be done after the queues have been set up.
2826 void t3_sge_init(struct adapter *adap, struct sge_params *p)
2828 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
2830 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
2831 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
2832 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
2833 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
2834 #if SGE_NUM_GENBITS == 1
2835 ctrl |= F_EGRGENCTRL;
2837 if (adap->params.rev > 0) {
2838 if (!(adap->flags & (USING_MSIX | USING_MSI)))
2839 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
2841 t3_write_reg(adap, A_SG_CONTROL, ctrl);
2842 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
2843 V_LORCQDRBTHRSH(512));
2844 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
2845 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
2846 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
2847 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
2848 adap->params.rev < T3_REV_C ? 1000 : 500);
2849 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
2850 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
2851 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
2852 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
2853 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
2857 * t3_sge_prep - one-time SGE initialization
2858 * @adap: the associated adapter
2859 * @p: SGE parameters
2861 * Performs one-time initialization of SGE SW state. Includes determining
2862 * defaults for the assorted SGE parameters, which admins can change until
2863 * they are used to initialize the SGE.
2865 void t3_sge_prep(struct adapter *adap, struct sge_params *p)
2869 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
2870 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2872 for (i = 0; i < SGE_QSETS; ++i) {
2873 struct qset_params *q = p->qset + i;
2875 q->polling = adap->params.rev > 0;
2876 q->coalesce_usecs = 5;
2877 q->rspq_size = 1024;
2879 q->jumbo_size = 512;
2880 q->txq_size[TXQ_ETH] = 1024;
2881 q->txq_size[TXQ_OFLD] = 1024;
2882 q->txq_size[TXQ_CTRL] = 256;
2886 spin_lock_init(&adap->sge.reg_lock);
2890 * t3_get_desc - dump an SGE descriptor for debugging purposes
2891 * @qs: the queue set
2892 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
2893 * @idx: the descriptor index in the queue
2894 * @data: where to dump the descriptor contents
2896 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
2897 * size of the descriptor.
2899 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
2900 unsigned char *data)
2906 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
2908 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
2909 return sizeof(struct tx_desc);
2913 if (!qs->rspq.desc || idx >= qs->rspq.size)
2915 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
2916 return sizeof(struct rsp_desc);
2920 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
2922 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
2923 return sizeof(struct rx_desc);