2 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/skbuff.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/if_vlan.h>
37 #include <linux/tcp.h>
38 #include <linux/dma-mapping.h>
44 #include "firmware_exports.h"
48 #define SGE_RX_SM_BUF_SIZE 1536
50 #define SGE_RX_COPY_THRES 256
51 #define SGE_RX_PULL_LEN 128
54 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
55 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
58 #define FL0_PG_CHUNK_SIZE 2048
59 #define FL0_PG_ORDER 0
60 #define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
61 #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
63 #define SGE_RX_DROP_THRES 16
64 #define RX_RECLAIM_PERIOD (HZ/4)
67 * Max number of Rx buffers we replenish at a time.
69 #define MAX_RX_REFILL 16U
71 * Period of the Tx buffer reclaim timer. This timer does not need to run
72 * frequently as Tx buffers are usually reclaimed by new Tx packets.
74 #define TX_RECLAIM_PERIOD (HZ / 4)
75 #define TX_RECLAIM_TIMER_CHUNK 64U
76 #define TX_RECLAIM_CHUNK 16U
78 /* WR size in bytes */
79 #define WR_LEN (WR_FLITS * 8)
82 * Types of Tx queues in each queue set. Order here matters, do not change.
84 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
86 /* Values for sge_txq.flags */
88 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
89 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
93 __be64 flit[TX_DESC_FLITS];
103 struct tx_sw_desc { /* SW state per Tx descriptor */
105 u8 eop; /* set if last descriptor for packet */
106 u8 addr_idx; /* buffer index of first SGL entry in descriptor */
107 u8 fragidx; /* first page fragment associated with descriptor */
108 s8 sflit; /* start flit of first SGL entry in descriptor */
111 struct rx_sw_desc { /* SW state per Rx descriptor */
114 struct fl_pg_chunk pg_chunk;
116 DECLARE_PCI_UNMAP_ADDR(dma_addr);
119 struct rsp_desc { /* response queue descriptor */
120 struct rss_header rss_hdr;
128 * Holds unmapping information for Tx packets that need deferred unmapping.
129 * This structure lives at skb->head and must be allocated by callers.
131 struct deferred_unmap_info {
132 struct pci_dev *pdev;
133 dma_addr_t addr[MAX_SKB_FRAGS + 1];
137 * Maps a number of flits to the number of Tx descriptors that can hold them.
140 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
142 * HW allows up to 4 descriptors to be combined into a WR.
144 static u8 flit_desc_map[] = {
146 #if SGE_NUM_GENBITS == 1
147 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
148 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
149 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
150 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
151 #elif SGE_NUM_GENBITS == 2
152 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
153 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
154 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
155 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
157 # error "SGE_NUM_GENBITS must be 1 or 2"
161 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
163 return container_of(q, struct sge_qset, fl[qidx]);
166 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
168 return container_of(q, struct sge_qset, rspq);
171 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
173 return container_of(q, struct sge_qset, txq[qidx]);
177 * refill_rspq - replenish an SGE response queue
178 * @adapter: the adapter
179 * @q: the response queue to replenish
180 * @credits: how many new responses to make available
182 * Replenishes a response queue by making the supplied number of responses
185 static inline void refill_rspq(struct adapter *adapter,
186 const struct sge_rspq *q, unsigned int credits)
189 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
190 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
194 * need_skb_unmap - does the platform need unmapping of sk_buffs?
196 * Returns true if the platfrom needs sk_buff unmapping. The compiler
197 * optimizes away unecessary code if this returns true.
199 static inline int need_skb_unmap(void)
202 * This structure is used to tell if the platfrom needs buffer
203 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
206 DECLARE_PCI_UNMAP_ADDR(addr);
209 return sizeof(struct dummy) != 0;
213 * unmap_skb - unmap a packet main body and its page fragments
215 * @q: the Tx queue containing Tx descriptors for the packet
216 * @cidx: index of Tx descriptor
217 * @pdev: the PCI device
219 * Unmap the main body of an sk_buff and its page fragments, if any.
220 * Because of the fairly complicated structure of our SGLs and the desire
221 * to conserve space for metadata, the information necessary to unmap an
222 * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
223 * descriptors (the physical addresses of the various data buffers), and
224 * the SW descriptor state (assorted indices). The send functions
225 * initialize the indices for the first packet descriptor so we can unmap
226 * the buffers held in the first Tx descriptor here, and we have enough
227 * information at this point to set the state for the next Tx descriptor.
229 * Note that it is possible to clean up the first descriptor of a packet
230 * before the send routines have written the next descriptors, but this
231 * race does not cause any problem. We just end up writing the unmapping
232 * info for the descriptor first.
234 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
235 unsigned int cidx, struct pci_dev *pdev)
237 const struct sg_ent *sgp;
238 struct tx_sw_desc *d = &q->sdesc[cidx];
239 int nfrags, frag_idx, curflit, j = d->addr_idx;
241 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
242 frag_idx = d->fragidx;
244 if (frag_idx == 0 && skb_headlen(skb)) {
245 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
246 skb_headlen(skb), PCI_DMA_TODEVICE);
250 curflit = d->sflit + 1 + j;
251 nfrags = skb_shinfo(skb)->nr_frags;
253 while (frag_idx < nfrags && curflit < WR_FLITS) {
254 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
255 skb_shinfo(skb)->frags[frag_idx].size,
266 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
267 d = cidx + 1 == q->size ? q->sdesc : d + 1;
268 d->fragidx = frag_idx;
270 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
275 * free_tx_desc - reclaims Tx descriptors and their buffers
276 * @adapter: the adapter
277 * @q: the Tx queue to reclaim descriptors from
278 * @n: the number of descriptors to reclaim
280 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
281 * Tx buffers. Called with the Tx queue lock held.
283 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
286 struct tx_sw_desc *d;
287 struct pci_dev *pdev = adapter->pdev;
288 unsigned int cidx = q->cidx;
290 const int need_unmap = need_skb_unmap() &&
291 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
295 if (d->skb) { /* an SGL is present */
297 unmap_skb(d->skb, q, cidx, pdev);
302 if (++cidx == q->size) {
311 * reclaim_completed_tx - reclaims completed Tx descriptors
312 * @adapter: the adapter
313 * @q: the Tx queue to reclaim completed descriptors from
314 * @chunk: maximum number of descriptors to reclaim
316 * Reclaims Tx descriptors that the SGE has indicated it has processed,
317 * and frees the associated buffers if possible. Called with the Tx
320 static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
324 unsigned int reclaim = q->processed - q->cleaned;
326 reclaim = min(chunk, reclaim);
328 free_tx_desc(adapter, q, reclaim);
329 q->cleaned += reclaim;
330 q->in_use -= reclaim;
332 return q->processed - q->cleaned;
336 * should_restart_tx - are there enough resources to restart a Tx queue?
339 * Checks if there are enough descriptors to restart a suspended Tx queue.
341 static inline int should_restart_tx(const struct sge_txq *q)
343 unsigned int r = q->processed - q->cleaned;
345 return q->in_use - r < (q->size >> 1);
348 static void clear_rx_desc(const struct sge_fl *q, struct rx_sw_desc *d)
351 if (d->pg_chunk.page)
352 put_page(d->pg_chunk.page);
353 d->pg_chunk.page = NULL;
361 * free_rx_bufs - free the Rx buffers on an SGE free list
362 * @pdev: the PCI device associated with the adapter
363 * @rxq: the SGE free list to clean up
365 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
366 * this queue should be stopped before calling this function.
368 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
370 unsigned int cidx = q->cidx;
372 while (q->credits--) {
373 struct rx_sw_desc *d = &q->sdesc[cidx];
375 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
376 q->buf_size, PCI_DMA_FROMDEVICE);
378 if (++cidx == q->size)
382 if (q->pg_chunk.page) {
383 __free_pages(q->pg_chunk.page, q->order);
384 q->pg_chunk.page = NULL;
389 * add_one_rx_buf - add a packet buffer to a free-buffer list
390 * @va: buffer start VA
391 * @len: the buffer length
392 * @d: the HW Rx descriptor to write
393 * @sd: the SW Rx descriptor to write
394 * @gen: the generation bit value
395 * @pdev: the PCI device associated with the adapter
397 * Add a buffer of the given length to the supplied HW and SW Rx
400 static inline int add_one_rx_buf(void *va, unsigned int len,
401 struct rx_desc *d, struct rx_sw_desc *sd,
402 unsigned int gen, struct pci_dev *pdev)
406 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
407 if (unlikely(pci_dma_mapping_error(pdev, mapping)))
410 pci_unmap_addr_set(sd, dma_addr, mapping);
412 d->addr_lo = cpu_to_be32(mapping);
413 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
415 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
416 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
420 static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp,
423 if (!q->pg_chunk.page) {
424 q->pg_chunk.page = alloc_pages(gfp, order);
425 if (unlikely(!q->pg_chunk.page))
427 q->pg_chunk.va = page_address(q->pg_chunk.page);
428 q->pg_chunk.offset = 0;
430 sd->pg_chunk = q->pg_chunk;
432 q->pg_chunk.offset += q->buf_size;
433 if (q->pg_chunk.offset == (PAGE_SIZE << order))
434 q->pg_chunk.page = NULL;
436 q->pg_chunk.va += q->buf_size;
437 get_page(q->pg_chunk.page);
442 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
444 if (q->pend_cred >= q->credits / 4) {
446 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
451 * refill_fl - refill an SGE free-buffer list
452 * @adapter: the adapter
453 * @q: the free-list to refill
454 * @n: the number of new buffers to allocate
455 * @gfp: the gfp flags for allocating new buffers
457 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
458 * allocated with the supplied gfp flags. The caller must assure that
459 * @n does not exceed the queue's capacity.
461 static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
464 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
465 struct rx_desc *d = &q->desc[q->pidx];
466 unsigned int count = 0;
472 if (unlikely(alloc_pg_chunk(q, sd, gfp, q->order))) {
473 nomem: q->alloc_failed++;
476 buf_start = sd->pg_chunk.va;
478 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
484 buf_start = skb->data;
487 err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
490 clear_rx_desc(q, sd);
496 if (++q->pidx == q->size) {
506 q->pend_cred += count;
512 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
514 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
515 GFP_ATOMIC | __GFP_COMP);
519 * recycle_rx_buf - recycle a receive buffer
520 * @adapter: the adapter
521 * @q: the SGE free list
522 * @idx: index of buffer to recycle
524 * Recycles the specified buffer on the given free list by adding it at
525 * the next available slot on the list.
527 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
530 struct rx_desc *from = &q->desc[idx];
531 struct rx_desc *to = &q->desc[q->pidx];
533 q->sdesc[q->pidx] = q->sdesc[idx];
534 to->addr_lo = from->addr_lo; /* already big endian */
535 to->addr_hi = from->addr_hi; /* likewise */
537 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
538 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
540 if (++q->pidx == q->size) {
551 * alloc_ring - allocate resources for an SGE descriptor ring
552 * @pdev: the PCI device
553 * @nelem: the number of descriptors
554 * @elem_size: the size of each descriptor
555 * @sw_size: the size of the SW state associated with each ring element
556 * @phys: the physical address of the allocated ring
557 * @metadata: address of the array holding the SW state for the ring
559 * Allocates resources for an SGE descriptor ring, such as Tx queues,
560 * free buffer lists, or response queues. Each SGE ring requires
561 * space for its HW descriptors plus, optionally, space for the SW state
562 * associated with each HW entry (the metadata). The function returns
563 * three values: the virtual address for the HW ring (the return value
564 * of the function), the physical address of the HW ring, and the address
567 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
568 size_t sw_size, dma_addr_t * phys, void *metadata)
570 size_t len = nelem * elem_size;
572 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
576 if (sw_size && metadata) {
577 s = kcalloc(nelem, sw_size, GFP_KERNEL);
580 dma_free_coherent(&pdev->dev, len, p, *phys);
583 *(void **)metadata = s;
590 * t3_reset_qset - reset a sge qset
593 * Reset the qset structure.
594 * the NAPI structure is preserved in the event of
595 * the qset's reincarnation, for example during EEH recovery.
597 static void t3_reset_qset(struct sge_qset *q)
600 !(q->adap->flags & NAPI_INIT)) {
601 memset(q, 0, sizeof(*q));
606 memset(&q->rspq, 0, sizeof(q->rspq));
607 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
608 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
610 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
611 q->rx_reclaim_timer.function = NULL;
612 q->lro_frag_tbl.nr_frags = q->lro_frag_tbl.len = 0;
617 * free_qset - free the resources of an SGE queue set
618 * @adapter: the adapter owning the queue set
621 * Release the HW and SW resources associated with an SGE queue set, such
622 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
623 * queue set must be quiesced prior to calling this.
625 static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
628 struct pci_dev *pdev = adapter->pdev;
630 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
632 spin_lock_irq(&adapter->sge.reg_lock);
633 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
634 spin_unlock_irq(&adapter->sge.reg_lock);
635 free_rx_bufs(pdev, &q->fl[i]);
636 kfree(q->fl[i].sdesc);
637 dma_free_coherent(&pdev->dev,
639 sizeof(struct rx_desc), q->fl[i].desc,
643 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
644 if (q->txq[i].desc) {
645 spin_lock_irq(&adapter->sge.reg_lock);
646 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
647 spin_unlock_irq(&adapter->sge.reg_lock);
648 if (q->txq[i].sdesc) {
649 free_tx_desc(adapter, &q->txq[i],
651 kfree(q->txq[i].sdesc);
653 dma_free_coherent(&pdev->dev,
655 sizeof(struct tx_desc),
656 q->txq[i].desc, q->txq[i].phys_addr);
657 __skb_queue_purge(&q->txq[i].sendq);
661 spin_lock_irq(&adapter->sge.reg_lock);
662 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
663 spin_unlock_irq(&adapter->sge.reg_lock);
664 dma_free_coherent(&pdev->dev,
665 q->rspq.size * sizeof(struct rsp_desc),
666 q->rspq.desc, q->rspq.phys_addr);
673 * init_qset_cntxt - initialize an SGE queue set context info
675 * @id: the queue set id
677 * Initializes the TIDs and context ids for the queues of a queue set.
679 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
681 qs->rspq.cntxt_id = id;
682 qs->fl[0].cntxt_id = 2 * id;
683 qs->fl[1].cntxt_id = 2 * id + 1;
684 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
685 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
686 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
687 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
688 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
692 * sgl_len - calculates the size of an SGL of the given capacity
693 * @n: the number of SGL entries
695 * Calculates the number of flits needed for a scatter/gather list that
696 * can hold the given number of entries.
698 static inline unsigned int sgl_len(unsigned int n)
700 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
701 return (3 * n) / 2 + (n & 1);
705 * flits_to_desc - returns the num of Tx descriptors for the given flits
706 * @n: the number of flits
708 * Calculates the number of Tx descriptors needed for the supplied number
711 static inline unsigned int flits_to_desc(unsigned int n)
713 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
714 return flit_desc_map[n];
718 * get_packet - return the next ingress packet buffer from a free list
719 * @adap: the adapter that received the packet
720 * @fl: the SGE free list holding the packet
721 * @len: the packet length including any SGE padding
722 * @drop_thres: # of remaining buffers before we start dropping packets
724 * Get the next packet from a free list and complete setup of the
725 * sk_buff. If the packet is small we make a copy and recycle the
726 * original buffer, otherwise we use the original buffer itself. If a
727 * positive drop threshold is supplied packets are dropped and their
728 * buffers recycled if (a) the number of remaining buffers is under the
729 * threshold and the packet is too big to copy, or (b) the packet should
730 * be copied but there is no memory for the copy.
732 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
733 unsigned int len, unsigned int drop_thres)
735 struct sk_buff *skb = NULL;
736 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
738 prefetch(sd->skb->data);
741 if (len <= SGE_RX_COPY_THRES) {
742 skb = alloc_skb(len, GFP_ATOMIC);
743 if (likely(skb != NULL)) {
745 pci_dma_sync_single_for_cpu(adap->pdev,
746 pci_unmap_addr(sd, dma_addr), len,
748 memcpy(skb->data, sd->skb->data, len);
749 pci_dma_sync_single_for_device(adap->pdev,
750 pci_unmap_addr(sd, dma_addr), len,
752 } else if (!drop_thres)
755 recycle_rx_buf(adap, fl, fl->cidx);
759 if (unlikely(fl->credits < drop_thres) &&
760 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
761 GFP_ATOMIC | __GFP_COMP) == 0)
765 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
766 fl->buf_size, PCI_DMA_FROMDEVICE);
769 __refill_fl(adap, fl);
774 * get_packet_pg - return the next ingress packet buffer from a free list
775 * @adap: the adapter that received the packet
776 * @fl: the SGE free list holding the packet
777 * @len: the packet length including any SGE padding
778 * @drop_thres: # of remaining buffers before we start dropping packets
780 * Get the next packet from a free list populated with page chunks.
781 * If the packet is small we make a copy and recycle the original buffer,
782 * otherwise we attach the original buffer as a page fragment to a fresh
783 * sk_buff. If a positive drop threshold is supplied packets are dropped
784 * and their buffers recycled if (a) the number of remaining buffers is
785 * under the threshold and the packet is too big to copy, or (b) there's
788 * Note: this function is similar to @get_packet but deals with Rx buffers
789 * that are page chunks rather than sk_buffs.
791 static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
792 struct sge_rspq *q, unsigned int len,
793 unsigned int drop_thres)
795 struct sk_buff *newskb, *skb;
796 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
798 newskb = skb = q->pg_skb;
800 if (!skb && (len <= SGE_RX_COPY_THRES)) {
801 newskb = alloc_skb(len, GFP_ATOMIC);
802 if (likely(newskb != NULL)) {
803 __skb_put(newskb, len);
804 pci_dma_sync_single_for_cpu(adap->pdev,
805 pci_unmap_addr(sd, dma_addr), len,
807 memcpy(newskb->data, sd->pg_chunk.va, len);
808 pci_dma_sync_single_for_device(adap->pdev,
809 pci_unmap_addr(sd, dma_addr), len,
811 } else if (!drop_thres)
815 recycle_rx_buf(adap, fl, fl->cidx);
820 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
824 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
825 if (unlikely(!newskb)) {
831 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
832 fl->buf_size, PCI_DMA_FROMDEVICE);
834 __skb_put(newskb, SGE_RX_PULL_LEN);
835 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
836 skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
837 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
838 len - SGE_RX_PULL_LEN);
840 newskb->data_len = len - SGE_RX_PULL_LEN;
841 newskb->truesize += newskb->data_len;
843 skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
845 sd->pg_chunk.offset, len);
847 newskb->data_len += len;
848 newskb->truesize += len;
853 * We do not refill FLs here, we let the caller do it to overlap a
860 * get_imm_packet - return the next ingress packet buffer from a response
861 * @resp: the response descriptor containing the packet data
863 * Return a packet containing the immediate data of the given response.
865 static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
867 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
870 __skb_put(skb, IMMED_PKT_SIZE);
871 skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
877 * calc_tx_descs - calculate the number of Tx descriptors for a packet
880 * Returns the number of Tx descriptors needed for the given Ethernet
881 * packet. Ethernet packets require addition of WR and CPL headers.
883 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
887 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
890 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
891 if (skb_shinfo(skb)->gso_size)
893 return flits_to_desc(flits);
897 * make_sgl - populate a scatter/gather list for a packet
899 * @sgp: the SGL to populate
900 * @start: start address of skb main body data to include in the SGL
901 * @len: length of skb main body data to include in the SGL
902 * @pdev: the PCI device
904 * Generates a scatter/gather list for the buffers that make up a packet
905 * and returns the SGL size in 8-byte words. The caller must size the SGL
908 static inline unsigned int make_sgl(const struct sk_buff *skb,
909 struct sg_ent *sgp, unsigned char *start,
910 unsigned int len, struct pci_dev *pdev)
913 unsigned int i, j = 0, nfrags;
916 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
917 sgp->len[0] = cpu_to_be32(len);
918 sgp->addr[0] = cpu_to_be64(mapping);
922 nfrags = skb_shinfo(skb)->nr_frags;
923 for (i = 0; i < nfrags; i++) {
924 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
926 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
927 frag->size, PCI_DMA_TODEVICE);
928 sgp->len[j] = cpu_to_be32(frag->size);
929 sgp->addr[j] = cpu_to_be64(mapping);
936 return ((nfrags + (len != 0)) * 3) / 2 + j;
940 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
944 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
945 * where the HW is going to sleep just after we checked, however,
946 * then the interrupt handler will detect the outstanding TX packet
947 * and ring the doorbell for us.
949 * When GTS is disabled we unconditionally ring the doorbell.
951 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
954 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
955 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
956 set_bit(TXQ_LAST_PKT_DB, &q->flags);
957 t3_write_reg(adap, A_SG_KDOORBELL,
958 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
961 wmb(); /* write descriptors before telling HW */
962 t3_write_reg(adap, A_SG_KDOORBELL,
963 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
967 static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
969 #if SGE_NUM_GENBITS == 2
970 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
975 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
976 * @ndesc: number of Tx descriptors spanned by the SGL
977 * @skb: the packet corresponding to the WR
978 * @d: first Tx descriptor to be written
979 * @pidx: index of above descriptors
980 * @q: the SGE Tx queue
982 * @flits: number of flits to the start of the SGL in the first descriptor
983 * @sgl_flits: the SGL size in flits
984 * @gen: the Tx descriptor generation
985 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
986 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
988 * Write a work request header and an associated SGL. If the SGL is
989 * small enough to fit into one Tx descriptor it has already been written
990 * and we just need to write the WR header. Otherwise we distribute the
991 * SGL across the number of descriptors it spans.
993 static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
994 struct tx_desc *d, unsigned int pidx,
995 const struct sge_txq *q,
996 const struct sg_ent *sgl,
997 unsigned int flits, unsigned int sgl_flits,
998 unsigned int gen, __be32 wr_hi,
1001 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
1002 struct tx_sw_desc *sd = &q->sdesc[pidx];
1005 if (need_skb_unmap()) {
1011 if (likely(ndesc == 1)) {
1013 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1014 V_WR_SGLSFLT(flits)) | wr_hi;
1016 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
1017 V_WR_GEN(gen)) | wr_lo;
1020 unsigned int ogen = gen;
1021 const u64 *fp = (const u64 *)sgl;
1022 struct work_request_hdr *wp = wrp;
1024 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1025 V_WR_SGLSFLT(flits)) | wr_hi;
1028 unsigned int avail = WR_FLITS - flits;
1030 if (avail > sgl_flits)
1032 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
1042 if (++pidx == q->size) {
1050 wrp = (struct work_request_hdr *)d;
1051 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1052 V_WR_SGLSFLT(1)) | wr_hi;
1053 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1055 V_WR_GEN(gen)) | wr_lo;
1060 wrp->wr_hi |= htonl(F_WR_EOP);
1062 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1063 wr_gen2((struct tx_desc *)wp, ogen);
1064 WARN_ON(ndesc != 0);
1069 * write_tx_pkt_wr - write a TX_PKT work request
1070 * @adap: the adapter
1071 * @skb: the packet to send
1072 * @pi: the egress interface
1073 * @pidx: index of the first Tx descriptor to write
1074 * @gen: the generation value to use
1076 * @ndesc: number of descriptors the packet will occupy
1077 * @compl: the value of the COMPL bit to use
1079 * Generate a TX_PKT work request to send the supplied packet.
1081 static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1082 const struct port_info *pi,
1083 unsigned int pidx, unsigned int gen,
1084 struct sge_txq *q, unsigned int ndesc,
1087 unsigned int flits, sgl_flits, cntrl, tso_info;
1088 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1089 struct tx_desc *d = &q->desc[pidx];
1090 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1092 cpl->len = htonl(skb->len | 0x80000000);
1093 cntrl = V_TXPKT_INTF(pi->port_id);
1095 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1096 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
1098 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1101 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1104 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1105 hdr->cntrl = htonl(cntrl);
1106 eth_type = skb_network_offset(skb) == ETH_HLEN ?
1107 CPL_ETH_II : CPL_ETH_II_VLAN;
1108 tso_info |= V_LSO_ETH_TYPE(eth_type) |
1109 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
1110 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
1111 hdr->lso_info = htonl(tso_info);
1114 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1115 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
1116 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1117 cpl->cntrl = htonl(cntrl);
1119 if (skb->len <= WR_LEN - sizeof(*cpl)) {
1120 q->sdesc[pidx].skb = NULL;
1122 skb_copy_from_linear_data(skb, &d->flit[2],
1125 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1127 flits = (skb->len + 7) / 8 + 2;
1128 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1129 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1130 | F_WR_SOP | F_WR_EOP | compl);
1132 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1133 V_WR_TID(q->token));
1142 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1143 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
1145 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1146 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1147 htonl(V_WR_TID(q->token)));
1150 static inline void t3_stop_tx_queue(struct netdev_queue *txq,
1151 struct sge_qset *qs, struct sge_txq *q)
1153 netif_tx_stop_queue(txq);
1154 set_bit(TXQ_ETH, &qs->txq_stopped);
1159 * eth_xmit - add a packet to the Ethernet Tx queue
1161 * @dev: the egress net device
1163 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
1165 int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1168 unsigned int ndesc, pidx, credits, gen, compl;
1169 const struct port_info *pi = netdev_priv(dev);
1170 struct adapter *adap = pi->adapter;
1171 struct netdev_queue *txq;
1172 struct sge_qset *qs;
1176 * The chip min packet length is 9 octets but play safe and reject
1177 * anything shorter than an Ethernet header.
1179 if (unlikely(skb->len < ETH_HLEN)) {
1181 return NETDEV_TX_OK;
1184 qidx = skb_get_queue_mapping(skb);
1186 q = &qs->txq[TXQ_ETH];
1187 txq = netdev_get_tx_queue(dev, qidx);
1189 spin_lock(&q->lock);
1190 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1192 credits = q->size - q->in_use;
1193 ndesc = calc_tx_descs(skb);
1195 if (unlikely(credits < ndesc)) {
1196 t3_stop_tx_queue(txq, qs, q);
1197 dev_err(&adap->pdev->dev,
1198 "%s: Tx ring %u full while queue awake!\n",
1199 dev->name, q->cntxt_id & 7);
1200 spin_unlock(&q->lock);
1201 return NETDEV_TX_BUSY;
1205 if (unlikely(credits - ndesc < q->stop_thres)) {
1206 t3_stop_tx_queue(txq, qs, q);
1208 if (should_restart_tx(q) &&
1209 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1211 netif_tx_wake_queue(txq);
1216 q->unacked += ndesc;
1217 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1221 if (q->pidx >= q->size) {
1226 /* update port statistics */
1227 if (skb->ip_summed == CHECKSUM_COMPLETE)
1228 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1229 if (skb_shinfo(skb)->gso_size)
1230 qs->port_stats[SGE_PSTAT_TSO]++;
1231 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1232 qs->port_stats[SGE_PSTAT_VLANINS]++;
1234 dev->trans_start = jiffies;
1235 spin_unlock(&q->lock);
1238 * We do not use Tx completion interrupts to free DMAd Tx packets.
1239 * This is good for performamce but means that we rely on new Tx
1240 * packets arriving to run the destructors of completed packets,
1241 * which open up space in their sockets' send queues. Sometimes
1242 * we do not get such new packets causing Tx to stall. A single
1243 * UDP transmitter is a good example of this situation. We have
1244 * a clean up timer that periodically reclaims completed packets
1245 * but it doesn't run often enough (nor do we want it to) to prevent
1246 * lengthy stalls. A solution to this problem is to run the
1247 * destructor early, after the packet is queued but before it's DMAd.
1248 * A cons is that we lie to socket memory accounting, but the amount
1249 * of extra memory is reasonable (limited by the number of Tx
1250 * descriptors), the packets do actually get freed quickly by new
1251 * packets almost always, and for protocols like TCP that wait for
1252 * acks to really free up the data the extra memory is even less.
1253 * On the positive side we run the destructors on the sending CPU
1254 * rather than on a potentially different completing CPU, usually a
1255 * good thing. We also run them without holding our Tx queue lock,
1256 * unlike what reclaim_completed_tx() would otherwise do.
1258 * Run the destructor before telling the DMA engine about the packet
1259 * to make sure it doesn't complete and get freed prematurely.
1261 if (likely(!skb_shared(skb)))
1264 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1265 check_ring_tx_db(adap, q);
1266 return NETDEV_TX_OK;
1270 * write_imm - write a packet into a Tx descriptor as immediate data
1271 * @d: the Tx descriptor to write
1273 * @len: the length of packet data to write as immediate data
1274 * @gen: the generation bit value to write
1276 * Writes a packet as immediate data into a Tx descriptor. The packet
1277 * contains a work request at its beginning. We must write the packet
1278 * carefully so the SGE doesn't read it accidentally before it's written
1281 static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1282 unsigned int len, unsigned int gen)
1284 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1285 struct work_request_hdr *to = (struct work_request_hdr *)d;
1287 if (likely(!skb->data_len))
1288 memcpy(&to[1], &from[1], len - sizeof(*from));
1290 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1292 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1293 V_WR_BCNTLFLT(len & 7));
1295 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1296 V_WR_LEN((len + 7) / 8));
1302 * check_desc_avail - check descriptor availability on a send queue
1303 * @adap: the adapter
1304 * @q: the send queue
1305 * @skb: the packet needing the descriptors
1306 * @ndesc: the number of Tx descriptors needed
1307 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1309 * Checks if the requested number of Tx descriptors is available on an
1310 * SGE send queue. If the queue is already suspended or not enough
1311 * descriptors are available the packet is queued for later transmission.
1312 * Must be called with the Tx queue locked.
1314 * Returns 0 if enough descriptors are available, 1 if there aren't
1315 * enough descriptors and the packet has been queued, and 2 if the caller
1316 * needs to retry because there weren't enough descriptors at the
1317 * beginning of the call but some freed up in the mean time.
1319 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1320 struct sk_buff *skb, unsigned int ndesc,
1323 if (unlikely(!skb_queue_empty(&q->sendq))) {
1324 addq_exit:__skb_queue_tail(&q->sendq, skb);
1327 if (unlikely(q->size - q->in_use < ndesc)) {
1328 struct sge_qset *qs = txq_to_qset(q, qid);
1330 set_bit(qid, &qs->txq_stopped);
1331 smp_mb__after_clear_bit();
1333 if (should_restart_tx(q) &&
1334 test_and_clear_bit(qid, &qs->txq_stopped))
1344 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1345 * @q: the SGE control Tx queue
1347 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1348 * that send only immediate data (presently just the control queues) and
1349 * thus do not have any sk_buffs to release.
1351 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1353 unsigned int reclaim = q->processed - q->cleaned;
1355 q->in_use -= reclaim;
1356 q->cleaned += reclaim;
1359 static inline int immediate(const struct sk_buff *skb)
1361 return skb->len <= WR_LEN;
1365 * ctrl_xmit - send a packet through an SGE control Tx queue
1366 * @adap: the adapter
1367 * @q: the control queue
1370 * Send a packet through an SGE control Tx queue. Packets sent through
1371 * a control queue must fit entirely as immediate data in a single Tx
1372 * descriptor and have no page fragments.
1374 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1375 struct sk_buff *skb)
1378 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1380 if (unlikely(!immediate(skb))) {
1383 return NET_XMIT_SUCCESS;
1386 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1387 wrp->wr_lo = htonl(V_WR_TID(q->token));
1389 spin_lock(&q->lock);
1390 again:reclaim_completed_tx_imm(q);
1392 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1393 if (unlikely(ret)) {
1395 spin_unlock(&q->lock);
1401 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1404 if (++q->pidx >= q->size) {
1408 spin_unlock(&q->lock);
1410 t3_write_reg(adap, A_SG_KDOORBELL,
1411 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1412 return NET_XMIT_SUCCESS;
1416 * restart_ctrlq - restart a suspended control queue
1417 * @qs: the queue set cotaining the control queue
1419 * Resumes transmission on a suspended Tx control queue.
1421 static void restart_ctrlq(unsigned long data)
1423 struct sk_buff *skb;
1424 struct sge_qset *qs = (struct sge_qset *)data;
1425 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1427 spin_lock(&q->lock);
1428 again:reclaim_completed_tx_imm(q);
1430 while (q->in_use < q->size &&
1431 (skb = __skb_dequeue(&q->sendq)) != NULL) {
1433 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1435 if (++q->pidx >= q->size) {
1442 if (!skb_queue_empty(&q->sendq)) {
1443 set_bit(TXQ_CTRL, &qs->txq_stopped);
1444 smp_mb__after_clear_bit();
1446 if (should_restart_tx(q) &&
1447 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1452 spin_unlock(&q->lock);
1454 t3_write_reg(qs->adap, A_SG_KDOORBELL,
1455 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1459 * Send a management message through control queue 0
1461 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1465 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1472 * deferred_unmap_destructor - unmap a packet when it is freed
1475 * This is the packet destructor used for Tx packets that need to remain
1476 * mapped until they are freed rather than until their Tx descriptors are
1479 static void deferred_unmap_destructor(struct sk_buff *skb)
1482 const dma_addr_t *p;
1483 const struct skb_shared_info *si;
1484 const struct deferred_unmap_info *dui;
1486 dui = (struct deferred_unmap_info *)skb->head;
1489 if (skb->tail - skb->transport_header)
1490 pci_unmap_single(dui->pdev, *p++,
1491 skb->tail - skb->transport_header,
1494 si = skb_shinfo(skb);
1495 for (i = 0; i < si->nr_frags; i++)
1496 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1500 static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1501 const struct sg_ent *sgl, int sgl_flits)
1504 struct deferred_unmap_info *dui;
1506 dui = (struct deferred_unmap_info *)skb->head;
1508 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1509 *p++ = be64_to_cpu(sgl->addr[0]);
1510 *p++ = be64_to_cpu(sgl->addr[1]);
1513 *p = be64_to_cpu(sgl->addr[0]);
1517 * write_ofld_wr - write an offload work request
1518 * @adap: the adapter
1519 * @skb: the packet to send
1521 * @pidx: index of the first Tx descriptor to write
1522 * @gen: the generation value to use
1523 * @ndesc: number of descriptors the packet will occupy
1525 * Write an offload work request to send the supplied packet. The packet
1526 * data already carry the work request with most fields populated.
1528 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1529 struct sge_txq *q, unsigned int pidx,
1530 unsigned int gen, unsigned int ndesc)
1532 unsigned int sgl_flits, flits;
1533 struct work_request_hdr *from;
1534 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1535 struct tx_desc *d = &q->desc[pidx];
1537 if (immediate(skb)) {
1538 q->sdesc[pidx].skb = NULL;
1539 write_imm(d, skb, skb->len, gen);
1543 /* Only TX_DATA builds SGLs */
1545 from = (struct work_request_hdr *)skb->data;
1546 memcpy(&d->flit[1], &from[1],
1547 skb_transport_offset(skb) - sizeof(*from));
1549 flits = skb_transport_offset(skb) / 8;
1550 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1551 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
1552 skb->tail - skb->transport_header,
1554 if (need_skb_unmap()) {
1555 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1556 skb->destructor = deferred_unmap_destructor;
1559 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1560 gen, from->wr_hi, from->wr_lo);
1564 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1567 * Returns the number of Tx descriptors needed for the given offload
1568 * packet. These packets are already fully constructed.
1570 static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1572 unsigned int flits, cnt;
1574 if (skb->len <= WR_LEN)
1575 return 1; /* packet fits as immediate data */
1577 flits = skb_transport_offset(skb) / 8; /* headers */
1578 cnt = skb_shinfo(skb)->nr_frags;
1579 if (skb->tail != skb->transport_header)
1581 return flits_to_desc(flits + sgl_len(cnt));
1585 * ofld_xmit - send a packet through an offload queue
1586 * @adap: the adapter
1587 * @q: the Tx offload queue
1590 * Send an offload packet through an SGE offload queue.
1592 static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1593 struct sk_buff *skb)
1596 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1598 spin_lock(&q->lock);
1599 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1601 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1602 if (unlikely(ret)) {
1604 skb->priority = ndesc; /* save for restart */
1605 spin_unlock(&q->lock);
1615 if (q->pidx >= q->size) {
1619 spin_unlock(&q->lock);
1621 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1622 check_ring_tx_db(adap, q);
1623 return NET_XMIT_SUCCESS;
1627 * restart_offloadq - restart a suspended offload queue
1628 * @qs: the queue set cotaining the offload queue
1630 * Resumes transmission on a suspended Tx offload queue.
1632 static void restart_offloadq(unsigned long data)
1634 struct sk_buff *skb;
1635 struct sge_qset *qs = (struct sge_qset *)data;
1636 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1637 const struct port_info *pi = netdev_priv(qs->netdev);
1638 struct adapter *adap = pi->adapter;
1640 spin_lock(&q->lock);
1641 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1643 while ((skb = skb_peek(&q->sendq)) != NULL) {
1644 unsigned int gen, pidx;
1645 unsigned int ndesc = skb->priority;
1647 if (unlikely(q->size - q->in_use < ndesc)) {
1648 set_bit(TXQ_OFLD, &qs->txq_stopped);
1649 smp_mb__after_clear_bit();
1651 if (should_restart_tx(q) &&
1652 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1662 if (q->pidx >= q->size) {
1666 __skb_unlink(skb, &q->sendq);
1667 spin_unlock(&q->lock);
1669 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1670 spin_lock(&q->lock);
1672 spin_unlock(&q->lock);
1675 set_bit(TXQ_RUNNING, &q->flags);
1676 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1679 t3_write_reg(adap, A_SG_KDOORBELL,
1680 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1684 * queue_set - return the queue set a packet should use
1687 * Maps a packet to the SGE queue set it should use. The desired queue
1688 * set is carried in bits 1-3 in the packet's priority.
1690 static inline int queue_set(const struct sk_buff *skb)
1692 return skb->priority >> 1;
1696 * is_ctrl_pkt - return whether an offload packet is a control packet
1699 * Determines whether an offload packet should use an OFLD or a CTRL
1700 * Tx queue. This is indicated by bit 0 in the packet's priority.
1702 static inline int is_ctrl_pkt(const struct sk_buff *skb)
1704 return skb->priority & 1;
1708 * t3_offload_tx - send an offload packet
1709 * @tdev: the offload device to send to
1712 * Sends an offload packet. We use the packet priority to select the
1713 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1714 * should be sent as regular or control, bits 1-3 select the queue set.
1716 int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1718 struct adapter *adap = tdev2adap(tdev);
1719 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1721 if (unlikely(is_ctrl_pkt(skb)))
1722 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1724 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1728 * offload_enqueue - add an offload packet to an SGE offload receive queue
1729 * @q: the SGE response queue
1732 * Add a new offload packet to an SGE response queue's offload packet
1733 * queue. If the packet is the first on the queue it schedules the RX
1734 * softirq to process the queue.
1736 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1738 int was_empty = skb_queue_empty(&q->rx_queue);
1740 __skb_queue_tail(&q->rx_queue, skb);
1743 struct sge_qset *qs = rspq_to_qset(q);
1745 napi_schedule(&qs->napi);
1750 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1751 * @tdev: the offload device that will be receiving the packets
1752 * @q: the SGE response queue that assembled the bundle
1753 * @skbs: the partial bundle
1754 * @n: the number of packets in the bundle
1756 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1758 static inline void deliver_partial_bundle(struct t3cdev *tdev,
1760 struct sk_buff *skbs[], int n)
1763 q->offload_bundles++;
1764 tdev->recv(tdev, skbs, n);
1769 * ofld_poll - NAPI handler for offload packets in interrupt mode
1770 * @dev: the network device doing the polling
1771 * @budget: polling budget
1773 * The NAPI handler for offload packets when a response queue is serviced
1774 * by the hard interrupt handler, i.e., when it's operating in non-polling
1775 * mode. Creates small packet batches and sends them through the offload
1776 * receive handler. Batches need to be of modest size as we do prefetches
1777 * on the packets in each.
1779 static int ofld_poll(struct napi_struct *napi, int budget)
1781 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
1782 struct sge_rspq *q = &qs->rspq;
1783 struct adapter *adapter = qs->adap;
1786 while (work_done < budget) {
1787 struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1788 struct sk_buff_head queue;
1791 spin_lock_irq(&q->lock);
1792 __skb_queue_head_init(&queue);
1793 skb_queue_splice_init(&q->rx_queue, &queue);
1794 if (skb_queue_empty(&queue)) {
1795 napi_complete(napi);
1796 spin_unlock_irq(&q->lock);
1799 spin_unlock_irq(&q->lock);
1802 skb_queue_walk_safe(&queue, skb, tmp) {
1803 if (work_done >= budget)
1807 __skb_unlink(skb, &queue);
1808 prefetch(skb->data);
1809 skbs[ngathered] = skb;
1810 if (++ngathered == RX_BUNDLE_SIZE) {
1811 q->offload_bundles++;
1812 adapter->tdev.recv(&adapter->tdev, skbs,
1817 if (!skb_queue_empty(&queue)) {
1818 /* splice remaining packets back onto Rx queue */
1819 spin_lock_irq(&q->lock);
1820 skb_queue_splice(&queue, &q->rx_queue);
1821 spin_unlock_irq(&q->lock);
1823 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1830 * rx_offload - process a received offload packet
1831 * @tdev: the offload device receiving the packet
1832 * @rq: the response queue that received the packet
1834 * @rx_gather: a gather list of packets if we are building a bundle
1835 * @gather_idx: index of the next available slot in the bundle
1837 * Process an ingress offload pakcet and add it to the offload ingress
1838 * queue. Returns the index of the next available slot in the bundle.
1840 static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1841 struct sk_buff *skb, struct sk_buff *rx_gather[],
1842 unsigned int gather_idx)
1844 skb_reset_mac_header(skb);
1845 skb_reset_network_header(skb);
1846 skb_reset_transport_header(skb);
1849 rx_gather[gather_idx++] = skb;
1850 if (gather_idx == RX_BUNDLE_SIZE) {
1851 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1853 rq->offload_bundles++;
1856 offload_enqueue(rq, skb);
1862 * restart_tx - check whether to restart suspended Tx queues
1863 * @qs: the queue set to resume
1865 * Restarts suspended Tx queues of an SGE queue set if they have enough
1866 * free resources to resume operation.
1868 static void restart_tx(struct sge_qset *qs)
1870 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1871 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1872 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1873 qs->txq[TXQ_ETH].restarts++;
1874 if (netif_running(qs->netdev))
1875 netif_tx_wake_queue(qs->tx_q);
1878 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1879 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1880 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1881 qs->txq[TXQ_OFLD].restarts++;
1882 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1884 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1885 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1886 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1887 qs->txq[TXQ_CTRL].restarts++;
1888 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1893 * cxgb3_arp_process - process an ARP request probing a private IP address
1894 * @adapter: the adapter
1895 * @skb: the skbuff containing the ARP request
1897 * Check if the ARP request is probing the private IP address
1898 * dedicated to iSCSI, generate an ARP reply if so.
1900 static void cxgb3_arp_process(struct adapter *adapter, struct sk_buff *skb)
1902 struct net_device *dev = skb->dev;
1903 struct port_info *pi;
1905 unsigned char *arp_ptr;
1912 skb_reset_network_header(skb);
1915 if (arp->ar_op != htons(ARPOP_REQUEST))
1918 arp_ptr = (unsigned char *)(arp + 1);
1920 arp_ptr += dev->addr_len;
1921 memcpy(&sip, arp_ptr, sizeof(sip));
1922 arp_ptr += sizeof(sip);
1923 arp_ptr += dev->addr_len;
1924 memcpy(&tip, arp_ptr, sizeof(tip));
1926 pi = netdev_priv(dev);
1927 if (tip != pi->iscsi_ipv4addr)
1930 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1931 dev->dev_addr, sha);
1935 static inline int is_arp(struct sk_buff *skb)
1937 return skb->protocol == htons(ETH_P_ARP);
1941 * rx_eth - process an ingress ethernet packet
1942 * @adap: the adapter
1943 * @rq: the response queue that received the packet
1945 * @pad: amount of padding at the start of the buffer
1947 * Process an ingress ethernet pakcet and deliver it to the stack.
1948 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1949 * if it was immediate data in a response.
1951 static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1952 struct sk_buff *skb, int pad, int lro)
1954 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1955 struct sge_qset *qs = rspq_to_qset(rq);
1956 struct port_info *pi;
1958 skb_pull(skb, sizeof(*p) + pad);
1959 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
1960 pi = netdev_priv(skb->dev);
1961 if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid && p->csum == htons(0xffff) &&
1963 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1964 skb->ip_summed = CHECKSUM_UNNECESSARY;
1966 skb->ip_summed = CHECKSUM_NONE;
1967 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
1969 if (unlikely(p->vlan_valid)) {
1970 struct vlan_group *grp = pi->vlan_grp;
1972 qs->port_stats[SGE_PSTAT_VLANEX]++;
1975 vlan_gro_receive(&qs->napi, grp,
1976 ntohs(p->vlan), skb);
1978 if (unlikely(pi->iscsi_ipv4addr &&
1980 unsigned short vtag = ntohs(p->vlan) &
1982 skb->dev = vlan_group_get_device(grp,
1984 cxgb3_arp_process(adap, skb);
1986 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1990 dev_kfree_skb_any(skb);
1991 } else if (rq->polling) {
1993 napi_gro_receive(&qs->napi, skb);
1995 if (unlikely(pi->iscsi_ipv4addr && is_arp(skb)))
1996 cxgb3_arp_process(adap, skb);
1997 netif_receive_skb(skb);
2003 static inline int is_eth_tcp(u32 rss)
2005 return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
2009 * lro_add_page - add a page chunk to an LRO session
2010 * @adap: the adapter
2011 * @qs: the associated queue set
2012 * @fl: the free list containing the page chunk to add
2013 * @len: packet length
2014 * @complete: Indicates the last fragment of a frame
2016 * Add a received packet contained in a page chunk to an existing LRO
2019 static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2020 struct sge_fl *fl, int len, int complete)
2022 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2023 struct cpl_rx_pkt *cpl;
2024 struct skb_frag_struct *rx_frag = qs->lro_frag_tbl.frags;
2025 int nr_frags = qs->lro_frag_tbl.nr_frags;
2026 int frag_len = qs->lro_frag_tbl.len;
2030 offset = 2 + sizeof(struct cpl_rx_pkt);
2031 qs->lro_va = cpl = sd->pg_chunk.va + 2;
2037 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
2038 fl->buf_size, PCI_DMA_FROMDEVICE);
2040 prefetch(&qs->lro_frag_tbl);
2042 rx_frag += nr_frags;
2043 rx_frag->page = sd->pg_chunk.page;
2044 rx_frag->page_offset = sd->pg_chunk.offset + offset;
2045 rx_frag->size = len;
2047 qs->lro_frag_tbl.nr_frags++;
2048 qs->lro_frag_tbl.len = frag_len;
2053 qs->lro_frag_tbl.ip_summed = CHECKSUM_UNNECESSARY;
2056 if (unlikely(cpl->vlan_valid)) {
2057 struct net_device *dev = qs->netdev;
2058 struct port_info *pi = netdev_priv(dev);
2059 struct vlan_group *grp = pi->vlan_grp;
2061 if (likely(grp != NULL)) {
2062 vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan),
2067 napi_gro_frags(&qs->napi, &qs->lro_frag_tbl);
2070 qs->lro_frag_tbl.nr_frags = qs->lro_frag_tbl.len = 0;
2074 * handle_rsp_cntrl_info - handles control information in a response
2075 * @qs: the queue set corresponding to the response
2076 * @flags: the response control flags
2078 * Handles the control information of an SGE response, such as GTS
2079 * indications and completion credits for the queue set's Tx queues.
2080 * HW coalesces credits, we don't do any extra SW coalescing.
2082 static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
2084 unsigned int credits;
2087 if (flags & F_RSPD_TXQ0_GTS)
2088 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2091 credits = G_RSPD_TXQ0_CR(flags);
2093 qs->txq[TXQ_ETH].processed += credits;
2095 credits = G_RSPD_TXQ2_CR(flags);
2097 qs->txq[TXQ_CTRL].processed += credits;
2100 if (flags & F_RSPD_TXQ1_GTS)
2101 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2103 credits = G_RSPD_TXQ1_CR(flags);
2105 qs->txq[TXQ_OFLD].processed += credits;
2109 * check_ring_db - check if we need to ring any doorbells
2110 * @adapter: the adapter
2111 * @qs: the queue set whose Tx queues are to be examined
2112 * @sleeping: indicates which Tx queue sent GTS
2114 * Checks if some of a queue set's Tx queues need to ring their doorbells
2115 * to resume transmission after idling while they still have unprocessed
2118 static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2119 unsigned int sleeping)
2121 if (sleeping & F_RSPD_TXQ0_GTS) {
2122 struct sge_txq *txq = &qs->txq[TXQ_ETH];
2124 if (txq->cleaned + txq->in_use != txq->processed &&
2125 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2126 set_bit(TXQ_RUNNING, &txq->flags);
2127 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2128 V_EGRCNTX(txq->cntxt_id));
2132 if (sleeping & F_RSPD_TXQ1_GTS) {
2133 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2135 if (txq->cleaned + txq->in_use != txq->processed &&
2136 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2137 set_bit(TXQ_RUNNING, &txq->flags);
2138 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2139 V_EGRCNTX(txq->cntxt_id));
2145 * is_new_response - check if a response is newly written
2146 * @r: the response descriptor
2147 * @q: the response queue
2149 * Returns true if a response descriptor contains a yet unprocessed
2152 static inline int is_new_response(const struct rsp_desc *r,
2153 const struct sge_rspq *q)
2155 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2158 static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2161 q->rx_recycle_buf = 0;
2164 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2165 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2166 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2167 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2168 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2170 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2171 #define NOMEM_INTR_DELAY 2500
2174 * process_responses - process responses from an SGE response queue
2175 * @adap: the adapter
2176 * @qs: the queue set to which the response queue belongs
2177 * @budget: how many responses can be processed in this round
2179 * Process responses from an SGE response queue up to the supplied budget.
2180 * Responses include received packets as well as credits and other events
2181 * for the queues that belong to the response queue's queue set.
2182 * A negative budget is effectively unlimited.
2184 * Additionally choose the interrupt holdoff time for the next interrupt
2185 * on this queue. If the system is under memory shortage use a fairly
2186 * long delay to help recovery.
2188 static int process_responses(struct adapter *adap, struct sge_qset *qs,
2191 struct sge_rspq *q = &qs->rspq;
2192 struct rsp_desc *r = &q->desc[q->cidx];
2193 int budget_left = budget;
2194 unsigned int sleeping = 0;
2195 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
2198 q->next_holdoff = q->holdoff_tmr;
2200 while (likely(budget_left && is_new_response(r, q))) {
2201 int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled;
2202 struct sk_buff *skb = NULL;
2203 u32 len, flags = ntohl(r->flags);
2204 __be32 rss_hi = *(const __be32 *)r,
2205 rss_lo = r->rss_hdr.rss_hash_val;
2207 eth = r->rss_hdr.opcode == CPL_RX_PKT;
2209 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2210 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
2214 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
2215 skb->data[0] = CPL_ASYNC_NOTIF;
2216 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
2218 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2219 skb = get_imm_packet(r);
2220 if (unlikely(!skb)) {
2222 q->next_holdoff = NOMEM_INTR_DELAY;
2224 /* consume one credit since we tried */
2230 } else if ((len = ntohl(r->len_cq)) != 0) {
2233 lro &= eth && is_eth_tcp(rss_hi);
2235 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2236 if (fl->use_pages) {
2237 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2240 #if L1_CACHE_BYTES < 128
2241 prefetch(addr + L1_CACHE_BYTES);
2243 __refill_fl(adap, fl);
2245 lro_add_page(adap, qs, fl,
2247 flags & F_RSPD_EOP);
2251 skb = get_packet_pg(adap, fl, q,
2254 SGE_RX_DROP_THRES : 0);
2257 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2258 eth ? SGE_RX_DROP_THRES : 0);
2259 if (unlikely(!skb)) {
2263 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2266 if (++fl->cidx == fl->size)
2271 if (flags & RSPD_CTRL_MASK) {
2272 sleeping |= flags & RSPD_GTS_MASK;
2273 handle_rsp_cntrl_info(qs, flags);
2277 if (unlikely(++q->cidx == q->size)) {
2284 if (++q->credits >= (q->size / 4)) {
2285 refill_rspq(adap, q, q->credits);
2289 packet_complete = flags &
2290 (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2291 F_RSPD_ASYNC_NOTIF);
2293 if (skb != NULL && packet_complete) {
2295 rx_eth(adap, q, skb, ethpad, lro);
2298 /* Preserve the RSS info in csum & priority */
2300 skb->priority = rss_lo;
2301 ngathered = rx_offload(&adap->tdev, q, skb,
2306 if (flags & F_RSPD_EOP)
2307 clear_rspq_bufstate(q);
2312 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2315 check_ring_db(adap, qs, sleeping);
2317 smp_mb(); /* commit Tx queue .processed updates */
2318 if (unlikely(qs->txq_stopped != 0))
2321 budget -= budget_left;
2325 static inline int is_pure_response(const struct rsp_desc *r)
2327 __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2329 return (n | r->len_cq) == 0;
2333 * napi_rx_handler - the NAPI handler for Rx processing
2334 * @napi: the napi instance
2335 * @budget: how many packets we can process in this round
2337 * Handler for new data events when using NAPI.
2339 static int napi_rx_handler(struct napi_struct *napi, int budget)
2341 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2342 struct adapter *adap = qs->adap;
2343 int work_done = process_responses(adap, qs, budget);
2345 if (likely(work_done < budget)) {
2346 napi_complete(napi);
2349 * Because we don't atomically flush the following
2350 * write it is possible that in very rare cases it can
2351 * reach the device in a way that races with a new
2352 * response being written plus an error interrupt
2353 * causing the NAPI interrupt handler below to return
2354 * unhandled status to the OS. To protect against
2355 * this would require flushing the write and doing
2356 * both the write and the flush with interrupts off.
2357 * Way too expensive and unjustifiable given the
2358 * rarity of the race.
2360 * The race cannot happen at all with MSI-X.
2362 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2363 V_NEWTIMER(qs->rspq.next_holdoff) |
2364 V_NEWINDEX(qs->rspq.cidx));
2370 * Returns true if the device is already scheduled for polling.
2372 static inline int napi_is_scheduled(struct napi_struct *napi)
2374 return test_bit(NAPI_STATE_SCHED, &napi->state);
2378 * process_pure_responses - process pure responses from a response queue
2379 * @adap: the adapter
2380 * @qs: the queue set owning the response queue
2381 * @r: the first pure response to process
2383 * A simpler version of process_responses() that handles only pure (i.e.,
2384 * non data-carrying) responses. Such respones are too light-weight to
2385 * justify calling a softirq under NAPI, so we handle them specially in
2386 * the interrupt handler. The function is called with a pointer to a
2387 * response, which the caller must ensure is a valid pure response.
2389 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2391 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2394 struct sge_rspq *q = &qs->rspq;
2395 unsigned int sleeping = 0;
2398 u32 flags = ntohl(r->flags);
2401 if (unlikely(++q->cidx == q->size)) {
2408 if (flags & RSPD_CTRL_MASK) {
2409 sleeping |= flags & RSPD_GTS_MASK;
2410 handle_rsp_cntrl_info(qs, flags);
2414 if (++q->credits >= (q->size / 4)) {
2415 refill_rspq(adap, q, q->credits);
2418 } while (is_new_response(r, q) && is_pure_response(r));
2421 check_ring_db(adap, qs, sleeping);
2423 smp_mb(); /* commit Tx queue .processed updates */
2424 if (unlikely(qs->txq_stopped != 0))
2427 return is_new_response(r, q);
2431 * handle_responses - decide what to do with new responses in NAPI mode
2432 * @adap: the adapter
2433 * @q: the response queue
2435 * This is used by the NAPI interrupt handlers to decide what to do with
2436 * new SGE responses. If there are no new responses it returns -1. If
2437 * there are new responses and they are pure (i.e., non-data carrying)
2438 * it handles them straight in hard interrupt context as they are very
2439 * cheap and don't deliver any packets. Finally, if there are any data
2440 * signaling responses it schedules the NAPI handler. Returns 1 if it
2441 * schedules NAPI, 0 if all new responses were pure.
2443 * The caller must ascertain NAPI is not already running.
2445 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2447 struct sge_qset *qs = rspq_to_qset(q);
2448 struct rsp_desc *r = &q->desc[q->cidx];
2450 if (!is_new_response(r, q))
2452 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2453 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2454 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2457 napi_schedule(&qs->napi);
2462 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2463 * (i.e., response queue serviced in hard interrupt).
2465 irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2467 struct sge_qset *qs = cookie;
2468 struct adapter *adap = qs->adap;
2469 struct sge_rspq *q = &qs->rspq;
2471 spin_lock(&q->lock);
2472 if (process_responses(adap, qs, -1) == 0)
2473 q->unhandled_irqs++;
2474 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2475 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2476 spin_unlock(&q->lock);
2481 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2482 * (i.e., response queue serviced by NAPI polling).
2484 static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2486 struct sge_qset *qs = cookie;
2487 struct sge_rspq *q = &qs->rspq;
2489 spin_lock(&q->lock);
2491 if (handle_responses(qs->adap, q) < 0)
2492 q->unhandled_irqs++;
2493 spin_unlock(&q->lock);
2498 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2499 * SGE response queues as well as error and other async events as they all use
2500 * the same MSI vector. We use one SGE response queue per port in this mode
2501 * and protect all response queues with queue 0's lock.
2503 static irqreturn_t t3_intr_msi(int irq, void *cookie)
2505 int new_packets = 0;
2506 struct adapter *adap = cookie;
2507 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2509 spin_lock(&q->lock);
2511 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2512 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2513 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2517 if (adap->params.nports == 2 &&
2518 process_responses(adap, &adap->sge.qs[1], -1)) {
2519 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2521 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2522 V_NEWTIMER(q1->next_holdoff) |
2523 V_NEWINDEX(q1->cidx));
2527 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2528 q->unhandled_irqs++;
2530 spin_unlock(&q->lock);
2534 static int rspq_check_napi(struct sge_qset *qs)
2536 struct sge_rspq *q = &qs->rspq;
2538 if (!napi_is_scheduled(&qs->napi) &&
2539 is_new_response(&q->desc[q->cidx], q)) {
2540 napi_schedule(&qs->napi);
2547 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2548 * by NAPI polling). Handles data events from SGE response queues as well as
2549 * error and other async events as they all use the same MSI vector. We use
2550 * one SGE response queue per port in this mode and protect all response
2551 * queues with queue 0's lock.
2553 static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2556 struct adapter *adap = cookie;
2557 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2559 spin_lock(&q->lock);
2561 new_packets = rspq_check_napi(&adap->sge.qs[0]);
2562 if (adap->params.nports == 2)
2563 new_packets += rspq_check_napi(&adap->sge.qs[1]);
2564 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2565 q->unhandled_irqs++;
2567 spin_unlock(&q->lock);
2572 * A helper function that processes responses and issues GTS.
2574 static inline int process_responses_gts(struct adapter *adap,
2575 struct sge_rspq *rq)
2579 work = process_responses(adap, rspq_to_qset(rq), -1);
2580 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2581 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2586 * The legacy INTx interrupt handler. This needs to handle data events from
2587 * SGE response queues as well as error and other async events as they all use
2588 * the same interrupt pin. We use one SGE response queue per port in this mode
2589 * and protect all response queues with queue 0's lock.
2591 static irqreturn_t t3_intr(int irq, void *cookie)
2593 int work_done, w0, w1;
2594 struct adapter *adap = cookie;
2595 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2596 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2598 spin_lock(&q0->lock);
2600 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2601 w1 = adap->params.nports == 2 &&
2602 is_new_response(&q1->desc[q1->cidx], q1);
2604 if (likely(w0 | w1)) {
2605 t3_write_reg(adap, A_PL_CLI, 0);
2606 t3_read_reg(adap, A_PL_CLI); /* flush */
2609 process_responses_gts(adap, q0);
2612 process_responses_gts(adap, q1);
2614 work_done = w0 | w1;
2616 work_done = t3_slow_intr_handler(adap);
2618 spin_unlock(&q0->lock);
2619 return IRQ_RETVAL(work_done != 0);
2623 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2624 * Handles data events from SGE response queues as well as error and other
2625 * async events as they all use the same interrupt pin. We use one SGE
2626 * response queue per port in this mode and protect all response queues with
2629 static irqreturn_t t3b_intr(int irq, void *cookie)
2632 struct adapter *adap = cookie;
2633 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2635 t3_write_reg(adap, A_PL_CLI, 0);
2636 map = t3_read_reg(adap, A_SG_DATA_INTR);
2638 if (unlikely(!map)) /* shared interrupt, most likely */
2641 spin_lock(&q0->lock);
2643 if (unlikely(map & F_ERRINTR))
2644 t3_slow_intr_handler(adap);
2646 if (likely(map & 1))
2647 process_responses_gts(adap, q0);
2650 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2652 spin_unlock(&q0->lock);
2657 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2658 * Handles data events from SGE response queues as well as error and other
2659 * async events as they all use the same interrupt pin. We use one SGE
2660 * response queue per port in this mode and protect all response queues with
2663 static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2666 struct adapter *adap = cookie;
2667 struct sge_qset *qs0 = &adap->sge.qs[0];
2668 struct sge_rspq *q0 = &qs0->rspq;
2670 t3_write_reg(adap, A_PL_CLI, 0);
2671 map = t3_read_reg(adap, A_SG_DATA_INTR);
2673 if (unlikely(!map)) /* shared interrupt, most likely */
2676 spin_lock(&q0->lock);
2678 if (unlikely(map & F_ERRINTR))
2679 t3_slow_intr_handler(adap);
2681 if (likely(map & 1))
2682 napi_schedule(&qs0->napi);
2685 napi_schedule(&adap->sge.qs[1].napi);
2687 spin_unlock(&q0->lock);
2692 * t3_intr_handler - select the top-level interrupt handler
2693 * @adap: the adapter
2694 * @polling: whether using NAPI to service response queues
2696 * Selects the top-level interrupt handler based on the type of interrupts
2697 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2700 irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
2702 if (adap->flags & USING_MSIX)
2703 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2704 if (adap->flags & USING_MSI)
2705 return polling ? t3_intr_msi_napi : t3_intr_msi;
2706 if (adap->params.rev > 0)
2707 return polling ? t3b_intr_napi : t3b_intr;
2711 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2712 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2713 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2714 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2716 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2717 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2721 * t3_sge_err_intr_handler - SGE async event interrupt handler
2722 * @adapter: the adapter
2724 * Interrupt handler for SGE asynchronous (non-data) events.
2726 void t3_sge_err_intr_handler(struct adapter *adapter)
2728 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) &
2731 if (status & SGE_PARERR)
2732 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2733 status & SGE_PARERR);
2734 if (status & SGE_FRAMINGERR)
2735 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2736 status & SGE_FRAMINGERR);
2738 if (status & F_RSPQCREDITOVERFOW)
2739 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2741 if (status & F_RSPQDISABLED) {
2742 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2745 "packet delivered to disabled response queue "
2746 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2749 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2750 CH_ALERT(adapter, "SGE dropped %s priority doorbell\n",
2751 status & F_HIPIODRBDROPERR ? "high" : "lo");
2753 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2754 if (status & SGE_FATALERR)
2755 t3_fatal_err(adapter);
2759 * sge_timer_tx - perform periodic maintenance of an SGE qset
2760 * @data: the SGE queue set to maintain
2762 * Runs periodically from a timer to perform maintenance of an SGE queue
2763 * set. It performs two tasks:
2765 * Cleans up any completed Tx descriptors that may still be pending.
2766 * Normal descriptor cleanup happens when new packets are added to a Tx
2767 * queue so this timer is relatively infrequent and does any cleanup only
2768 * if the Tx queue has not seen any new packets in a while. We make a
2769 * best effort attempt to reclaim descriptors, in that we don't wait
2770 * around if we cannot get a queue's lock (which most likely is because
2771 * someone else is queueing new packets and so will also handle the clean
2772 * up). Since control queues use immediate data exclusively we don't
2773 * bother cleaning them up here.
2776 static void sge_timer_tx(unsigned long data)
2778 struct sge_qset *qs = (struct sge_qset *)data;
2779 struct port_info *pi = netdev_priv(qs->netdev);
2780 struct adapter *adap = pi->adapter;
2781 unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
2782 unsigned long next_period;
2784 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2785 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2786 TX_RECLAIM_TIMER_CHUNK);
2787 spin_unlock(&qs->txq[TXQ_ETH].lock);
2789 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2790 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2791 TX_RECLAIM_TIMER_CHUNK);
2792 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2795 next_period = TX_RECLAIM_PERIOD >>
2796 (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
2797 TX_RECLAIM_TIMER_CHUNK);
2798 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
2802 * sge_timer_rx - perform periodic maintenance of an SGE qset
2803 * @data: the SGE queue set to maintain
2805 * a) Replenishes Rx queues that have run out due to memory shortage.
2806 * Normally new Rx buffers are added when existing ones are consumed but
2807 * when out of memory a queue can become empty. We try to add only a few
2808 * buffers here, the queue will be replenished fully as these new buffers
2809 * are used up if memory shortage has subsided.
2811 * b) Return coalesced response queue credits in case a response queue is
2815 static void sge_timer_rx(unsigned long data)
2818 struct sge_qset *qs = (struct sge_qset *)data;
2819 struct port_info *pi = netdev_priv(qs->netdev);
2820 struct adapter *adap = pi->adapter;
2823 lock = adap->params.rev > 0 ?
2824 &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
2826 if (!spin_trylock_irq(lock))
2829 if (napi_is_scheduled(&qs->napi))
2832 if (adap->params.rev < 4) {
2833 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2835 if (status & (1 << qs->rspq.cntxt_id)) {
2837 if (qs->rspq.credits) {
2839 refill_rspq(adap, &qs->rspq, 1);
2840 qs->rspq.restarted++;
2841 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2842 1 << qs->rspq.cntxt_id);
2847 if (qs->fl[0].credits < qs->fl[0].size)
2848 __refill_fl(adap, &qs->fl[0]);
2849 if (qs->fl[1].credits < qs->fl[1].size)
2850 __refill_fl(adap, &qs->fl[1]);
2853 spin_unlock_irq(lock);
2855 mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
2859 * t3_update_qset_coalesce - update coalescing settings for a queue set
2860 * @qs: the SGE queue set
2861 * @p: new queue set parameters
2863 * Update the coalescing settings for an SGE queue set. Nothing is done
2864 * if the queue set is not initialized yet.
2866 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2868 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2869 qs->rspq.polling = p->polling;
2870 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
2874 * t3_sge_alloc_qset - initialize an SGE queue set
2875 * @adapter: the adapter
2876 * @id: the queue set id
2877 * @nports: how many Ethernet ports will be using this queue set
2878 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2879 * @p: configuration parameters for this queue set
2880 * @ntxq: number of Tx queues for the queue set
2881 * @netdev: net device associated with this queue set
2882 * @netdevq: net device TX queue associated with this queue set
2884 * Allocate resources and initialize an SGE queue set. A queue set
2885 * comprises a response queue, two Rx free-buffer queues, and up to 3
2886 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2887 * queue, offload queue, and control queue.
2889 int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2890 int irq_vec_idx, const struct qset_params *p,
2891 int ntxq, struct net_device *dev,
2892 struct netdev_queue *netdevq)
2894 int i, avail, ret = -ENOMEM;
2895 struct sge_qset *q = &adapter->sge.qs[id];
2897 init_qset_cntxt(q, id);
2898 setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q);
2899 setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q);
2901 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2902 sizeof(struct rx_desc),
2903 sizeof(struct rx_sw_desc),
2904 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2908 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2909 sizeof(struct rx_desc),
2910 sizeof(struct rx_sw_desc),
2911 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2915 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2916 sizeof(struct rsp_desc), 0,
2917 &q->rspq.phys_addr, NULL);
2921 for (i = 0; i < ntxq; ++i) {
2923 * The control queue always uses immediate data so does not
2924 * need to keep track of any sk_buffs.
2926 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2928 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2929 sizeof(struct tx_desc), sz,
2930 &q->txq[i].phys_addr,
2932 if (!q->txq[i].desc)
2936 q->txq[i].size = p->txq_size[i];
2937 spin_lock_init(&q->txq[i].lock);
2938 skb_queue_head_init(&q->txq[i].sendq);
2941 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2943 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2946 q->fl[0].gen = q->fl[1].gen = 1;
2947 q->fl[0].size = p->fl_size;
2948 q->fl[1].size = p->jumbo_size;
2951 q->rspq.size = p->rspq_size;
2952 spin_lock_init(&q->rspq.lock);
2953 skb_queue_head_init(&q->rspq.rx_queue);
2955 q->txq[TXQ_ETH].stop_thres = nports *
2956 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2958 #if FL0_PG_CHUNK_SIZE > 0
2959 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
2961 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
2963 #if FL1_PG_CHUNK_SIZE > 0
2964 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
2966 q->fl[1].buf_size = is_offload(adapter) ?
2967 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
2968 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
2971 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
2972 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
2973 q->fl[0].order = FL0_PG_ORDER;
2974 q->fl[1].order = FL1_PG_ORDER;
2976 spin_lock_irq(&adapter->sge.reg_lock);
2978 /* FL threshold comparison uses < */
2979 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2980 q->rspq.phys_addr, q->rspq.size,
2981 q->fl[0].buf_size, 1, 0);
2985 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2986 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2987 q->fl[i].phys_addr, q->fl[i].size,
2988 q->fl[i].buf_size, p->cong_thres, 1,
2994 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2995 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2996 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
3002 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
3003 USE_GTS, SGE_CNTXT_OFLD, id,
3004 q->txq[TXQ_OFLD].phys_addr,
3005 q->txq[TXQ_OFLD].size, 0, 1, 0);
3011 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
3013 q->txq[TXQ_CTRL].phys_addr,
3014 q->txq[TXQ_CTRL].size,
3015 q->txq[TXQ_CTRL].token, 1, 0);
3020 spin_unlock_irq(&adapter->sge.reg_lock);
3025 t3_update_qset_coalesce(q, p);
3027 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3028 GFP_KERNEL | __GFP_COMP);
3030 CH_ALERT(adapter, "free list queue 0 initialization failed\n");
3033 if (avail < q->fl[0].size)
3034 CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
3037 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
3038 GFP_KERNEL | __GFP_COMP);
3039 if (avail < q->fl[1].size)
3040 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
3042 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
3044 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
3045 V_NEWTIMER(q->rspq.holdoff_tmr));
3047 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
3048 mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
3053 spin_unlock_irq(&adapter->sge.reg_lock);
3055 t3_free_qset(adapter, q);
3060 * t3_stop_sge_timers - stop SGE timer call backs
3061 * @adap: the adapter
3063 * Stops each SGE queue set's timer call back
3065 void t3_stop_sge_timers(struct adapter *adap)
3069 for (i = 0; i < SGE_QSETS; ++i) {
3070 struct sge_qset *q = &adap->sge.qs[i];
3072 if (q->tx_reclaim_timer.function)
3073 del_timer_sync(&q->tx_reclaim_timer);
3074 if (q->rx_reclaim_timer.function)
3075 del_timer_sync(&q->rx_reclaim_timer);
3080 * t3_free_sge_resources - free SGE resources
3081 * @adap: the adapter
3083 * Frees resources used by the SGE queue sets.
3085 void t3_free_sge_resources(struct adapter *adap)
3089 for (i = 0; i < SGE_QSETS; ++i)
3090 t3_free_qset(adap, &adap->sge.qs[i]);
3094 * t3_sge_start - enable SGE
3095 * @adap: the adapter
3097 * Enables the SGE for DMAs. This is the last step in starting packet
3100 void t3_sge_start(struct adapter *adap)
3102 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3106 * t3_sge_stop - disable SGE operation
3107 * @adap: the adapter
3109 * Disables the DMA engine. This can be called in emeregencies (e.g.,
3110 * from error interrupts) or from normal process context. In the latter
3111 * case it also disables any pending queue restart tasklets. Note that
3112 * if it is called in interrupt context it cannot disable the restart
3113 * tasklets as it cannot wait, however the tasklets will have no effect
3114 * since the doorbells are disabled and the driver will call this again
3115 * later from process context, at which time the tasklets will be stopped
3116 * if they are still running.
3118 void t3_sge_stop(struct adapter *adap)
3120 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3121 if (!in_interrupt()) {
3124 for (i = 0; i < SGE_QSETS; ++i) {
3125 struct sge_qset *qs = &adap->sge.qs[i];
3127 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
3128 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
3134 * t3_sge_init - initialize SGE
3135 * @adap: the adapter
3136 * @p: the SGE parameters
3138 * Performs SGE initialization needed every time after a chip reset.
3139 * We do not initialize any of the queue sets here, instead the driver
3140 * top-level must request those individually. We also do not enable DMA
3141 * here, that should be done after the queues have been set up.
3143 void t3_sge_init(struct adapter *adap, struct sge_params *p)
3145 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3147 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
3148 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
3149 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
3150 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
3151 #if SGE_NUM_GENBITS == 1
3152 ctrl |= F_EGRGENCTRL;
3154 if (adap->params.rev > 0) {
3155 if (!(adap->flags & (USING_MSIX | USING_MSI)))
3156 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
3158 t3_write_reg(adap, A_SG_CONTROL, ctrl);
3159 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3160 V_LORCQDRBTHRSH(512));
3161 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3162 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
3163 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
3164 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3165 adap->params.rev < T3_REV_C ? 1000 : 500);
3166 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3167 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3168 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3169 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3170 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3174 * t3_sge_prep - one-time SGE initialization
3175 * @adap: the associated adapter
3176 * @p: SGE parameters
3178 * Performs one-time initialization of SGE SW state. Includes determining
3179 * defaults for the assorted SGE parameters, which admins can change until
3180 * they are used to initialize the SGE.
3182 void t3_sge_prep(struct adapter *adap, struct sge_params *p)
3186 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
3187 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3189 for (i = 0; i < SGE_QSETS; ++i) {
3190 struct qset_params *q = p->qset + i;
3192 q->polling = adap->params.rev > 0;
3193 q->coalesce_usecs = 5;
3194 q->rspq_size = 1024;
3196 q->jumbo_size = 512;
3197 q->txq_size[TXQ_ETH] = 1024;
3198 q->txq_size[TXQ_OFLD] = 1024;
3199 q->txq_size[TXQ_CTRL] = 256;
3203 spin_lock_init(&adap->sge.reg_lock);
3207 * t3_get_desc - dump an SGE descriptor for debugging purposes
3208 * @qs: the queue set
3209 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
3210 * @idx: the descriptor index in the queue
3211 * @data: where to dump the descriptor contents
3213 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
3214 * size of the descriptor.
3216 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3217 unsigned char *data)
3223 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3225 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3226 return sizeof(struct tx_desc);
3230 if (!qs->rspq.desc || idx >= qs->rspq.size)
3232 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3233 return sizeof(struct rsp_desc);
3237 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3239 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3240 return sizeof(struct rx_desc);