2 * Copyright (C) 2005 - 2009 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
14 * 209 N. Fair Oaks Ave
20 MODULE_VERSION(DRV_VER);
21 MODULE_DEVICE_TABLE(pci, be_dev_ids);
22 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
23 MODULE_AUTHOR("ServerEngines Corporation");
24 MODULE_LICENSE("GPL");
26 static unsigned int rx_frag_size = 2048;
27 module_param(rx_frag_size, uint, S_IRUGO);
28 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
30 #define BE_VENDOR_ID 0x19a2
31 #define BE2_DEVICE_ID_1 0x0211
32 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
33 { PCI_DEVICE(BE_VENDOR_ID, BE2_DEVICE_ID_1) },
36 MODULE_DEVICE_TABLE(pci, be_dev_ids);
38 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
40 struct be_dma_mem *mem = &q->dma_mem;
42 pci_free_consistent(adapter->pdev, mem->size,
46 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
47 u16 len, u16 entry_size)
49 struct be_dma_mem *mem = &q->dma_mem;
51 memset(q, 0, sizeof(*q));
53 q->entry_size = entry_size;
54 mem->size = len * entry_size;
55 mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
58 memset(mem->va, 0, mem->size);
62 static inline void *queue_head_node(struct be_queue_info *q)
64 return q->dma_mem.va + q->head * q->entry_size;
67 static inline void *queue_tail_node(struct be_queue_info *q)
69 return q->dma_mem.va + q->tail * q->entry_size;
72 static inline void queue_head_inc(struct be_queue_info *q)
74 index_inc(&q->head, q->len);
77 static inline void queue_tail_inc(struct be_queue_info *q)
79 index_inc(&q->tail, q->len);
82 static void be_intr_set(struct be_ctrl_info *ctrl, bool enable)
84 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
85 u32 reg = ioread32(addr);
86 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
87 if (!enabled && enable) {
88 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
89 } else if (enabled && !enable) {
90 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
92 printk(KERN_WARNING DRV_NAME
93 ": bad value in membar_int_ctrl reg=0x%x\n", reg);
99 static void be_rxq_notify(struct be_ctrl_info *ctrl, u16 qid, u16 posted)
102 val |= qid & DB_RQ_RING_ID_MASK;
103 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
104 iowrite32(val, ctrl->db + DB_RQ_OFFSET);
107 static void be_txq_notify(struct be_ctrl_info *ctrl, u16 qid, u16 posted)
110 val |= qid & DB_TXULP_RING_ID_MASK;
111 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
112 iowrite32(val, ctrl->db + DB_TXULP1_OFFSET);
115 static void be_eq_notify(struct be_ctrl_info *ctrl, u16 qid,
116 bool arm, bool clear_int, u16 num_popped)
119 val |= qid & DB_EQ_RING_ID_MASK;
121 val |= 1 << DB_EQ_REARM_SHIFT;
123 val |= 1 << DB_EQ_CLR_SHIFT;
124 val |= 1 << DB_EQ_EVNT_SHIFT;
125 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
126 iowrite32(val, ctrl->db + DB_EQ_OFFSET);
129 static void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid,
130 bool arm, u16 num_popped)
133 val |= qid & DB_CQ_RING_ID_MASK;
135 val |= 1 << DB_CQ_REARM_SHIFT;
136 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
137 iowrite32(val, ctrl->db + DB_CQ_OFFSET);
141 static int be_mac_addr_set(struct net_device *netdev, void *p)
143 struct be_adapter *adapter = netdev_priv(netdev);
144 struct sockaddr *addr = p;
147 if (netif_running(netdev)) {
148 status = be_cmd_pmac_del(&adapter->ctrl, adapter->if_handle,
153 status = be_cmd_pmac_add(&adapter->ctrl, (u8 *)addr->sa_data,
154 adapter->if_handle, &adapter->pmac_id);
158 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
163 static void netdev_stats_update(struct be_adapter *adapter)
165 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
166 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
167 struct be_port_rxf_stats *port_stats =
168 &rxf_stats->port[adapter->port_num];
169 struct net_device_stats *dev_stats = &adapter->stats.net_stats;
171 dev_stats->rx_packets = port_stats->rx_total_frames;
172 dev_stats->tx_packets = port_stats->tx_unicastframes +
173 port_stats->tx_multicastframes + port_stats->tx_broadcastframes;
174 dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 |
175 (u64) port_stats->rx_bytes_lsd;
176 dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 |
177 (u64) port_stats->tx_bytes_lsd;
179 /* bad pkts received */
180 dev_stats->rx_errors = port_stats->rx_crc_errors +
181 port_stats->rx_alignment_symbol_errors +
182 port_stats->rx_in_range_errors +
183 port_stats->rx_out_range_errors + port_stats->rx_frame_too_long;
185 /* packet transmit problems */
186 dev_stats->tx_errors = 0;
188 /* no space in linux buffers */
189 dev_stats->rx_dropped = 0;
191 /* no space available in linux */
192 dev_stats->tx_dropped = 0;
194 dev_stats->multicast = port_stats->tx_multicastframes;
195 dev_stats->collisions = 0;
197 /* detailed rx errors */
198 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
199 port_stats->rx_out_range_errors + port_stats->rx_frame_too_long;
200 /* receive ring buffer overflow */
201 dev_stats->rx_over_errors = 0;
202 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
204 /* frame alignment errors */
205 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
206 /* receiver fifo overrun */
207 /* drops_no_pbuf is no per i/f, it's per BE card */
208 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
209 port_stats->rx_input_fifo_overflow +
210 rxf_stats->rx_drops_no_pbuf;
211 /* receiver missed packetd */
212 dev_stats->rx_missed_errors = 0;
213 /* detailed tx_errors */
214 dev_stats->tx_aborted_errors = 0;
215 dev_stats->tx_carrier_errors = 0;
216 dev_stats->tx_fifo_errors = 0;
217 dev_stats->tx_heartbeat_errors = 0;
218 dev_stats->tx_window_errors = 0;
221 static void be_link_status_update(struct be_adapter *adapter)
223 struct be_link_info *prev = &adapter->link;
224 struct be_link_info now = { 0 };
225 struct net_device *netdev = adapter->netdev;
227 be_cmd_link_status_query(&adapter->ctrl, &now);
229 /* If link came up or went down */
230 if (now.speed != prev->speed && (now.speed == PHY_LINK_SPEED_ZERO ||
231 prev->speed == PHY_LINK_SPEED_ZERO)) {
232 if (now.speed == PHY_LINK_SPEED_ZERO) {
233 netif_stop_queue(netdev);
234 netif_carrier_off(netdev);
235 printk(KERN_INFO "%s: Link down\n", netdev->name);
237 netif_start_queue(netdev);
238 netif_carrier_on(netdev);
239 printk(KERN_INFO "%s: Link up\n", netdev->name);
245 /* Update the EQ delay n BE based on the RX frags consumed / sec */
246 static void be_rx_eqd_update(struct be_adapter *adapter)
249 struct be_ctrl_info *ctrl = &adapter->ctrl;
250 struct be_eq_obj *rx_eq = &adapter->rx_eq;
251 struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
253 /* Update once a second */
254 if (((jiffies - stats->rx_fps_jiffies) < HZ) || rx_eq->enable_aic == 0)
257 stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
258 ((jiffies - stats->rx_fps_jiffies) / HZ);
260 stats->rx_fps_jiffies = jiffies;
261 stats->be_prev_rx_frags = stats->be_rx_frags;
262 eqd = stats->be_rx_fps / 110000;
264 if (eqd > rx_eq->max_eqd)
265 eqd = rx_eq->max_eqd;
266 if (eqd < rx_eq->min_eqd)
267 eqd = rx_eq->min_eqd;
270 if (eqd != rx_eq->cur_eqd)
271 be_cmd_modify_eqd(ctrl, rx_eq->q.id, eqd);
273 rx_eq->cur_eqd = eqd;
276 static struct net_device_stats *be_get_stats(struct net_device *dev)
278 struct be_adapter *adapter = netdev_priv(dev);
280 return &adapter->stats.net_stats;
283 static void be_tx_stats_update(struct be_adapter *adapter,
284 u32 wrb_cnt, u32 copied, bool stopped)
286 struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
288 stats->be_tx_wrbs += wrb_cnt;
289 stats->be_tx_bytes += copied;
291 stats->be_tx_stops++;
293 /* Update tx rate once in two seconds */
294 if ((jiffies - stats->be_tx_jiffies) > 2 * HZ) {
296 r = (stats->be_tx_bytes - stats->be_tx_bytes_prev) /
297 ((u32) (jiffies - stats->be_tx_jiffies) / HZ);
298 r = (r / 1000000); /* M bytes/s */
299 stats->be_tx_rate = (r * 8); /* M bits/s */
300 stats->be_tx_jiffies = jiffies;
301 stats->be_tx_bytes_prev = stats->be_tx_bytes;
305 /* Determine number of WRB entries needed to xmit data in an skb */
306 static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
310 if (skb->len > skb->data_len)
312 cnt += skb_shinfo(skb)->nr_frags;
313 skb = skb_shinfo(skb)->frag_list;
315 /* to account for hdr wrb */
318 /* add a dummy to make it an even num */
323 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
327 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
329 wrb->frag_pa_hi = upper_32_bits(addr);
330 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
331 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
334 static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
335 bool vlan, u32 wrb_cnt, u32 len)
337 memset(hdr, 0, sizeof(*hdr));
339 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
341 if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
342 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
343 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
344 hdr, skb_shinfo(skb)->gso_size);
345 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
347 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
348 else if (is_udp_pkt(skb))
349 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
352 if (vlan && vlan_tx_tag_present(skb)) {
353 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
354 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
355 hdr, vlan_tx_tag_get(skb));
358 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
359 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
360 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
361 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
365 static int make_tx_wrbs(struct be_adapter *adapter,
366 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
370 struct pci_dev *pdev = adapter->pdev;
371 struct sk_buff *first_skb = skb;
372 struct be_queue_info *txq = &adapter->tx_obj.q;
373 struct be_eth_wrb *wrb;
374 struct be_eth_hdr_wrb *hdr;
376 atomic_add(wrb_cnt, &txq->used);
377 hdr = queue_head_node(txq);
381 if (skb->len > skb->data_len) {
382 int len = skb->len - skb->data_len;
383 busaddr = pci_map_single(pdev, skb->data, len,
385 wrb = queue_head_node(txq);
386 wrb_fill(wrb, busaddr, len);
387 be_dws_cpu_to_le(wrb, sizeof(*wrb));
392 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
393 struct skb_frag_struct *frag =
394 &skb_shinfo(skb)->frags[i];
395 busaddr = pci_map_page(pdev, frag->page,
397 frag->size, PCI_DMA_TODEVICE);
398 wrb = queue_head_node(txq);
399 wrb_fill(wrb, busaddr, frag->size);
400 be_dws_cpu_to_le(wrb, sizeof(*wrb));
402 copied += frag->size;
404 skb = skb_shinfo(skb)->frag_list;
408 wrb = queue_head_node(txq);
410 be_dws_cpu_to_le(wrb, sizeof(*wrb));
414 wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
416 be_dws_cpu_to_le(hdr, sizeof(*hdr));
421 static int be_xmit(struct sk_buff *skb, struct net_device *netdev)
423 struct be_adapter *adapter = netdev_priv(netdev);
424 struct be_tx_obj *tx_obj = &adapter->tx_obj;
425 struct be_queue_info *txq = &tx_obj->q;
426 u32 wrb_cnt = 0, copied = 0;
427 u32 start = txq->head;
428 bool dummy_wrb, stopped = false;
430 wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
432 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
434 /* record the sent skb in the sent_skb table */
435 BUG_ON(tx_obj->sent_skb_list[start]);
436 tx_obj->sent_skb_list[start] = skb;
438 /* Ensure that txq has space for the next skb; Else stop the queue
439 * *BEFORE* ringing the tx doorbell, so that we serialze the
440 * tx compls of the current transmit which'll wake up the queue
442 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >= txq->len) {
443 netif_stop_queue(netdev);
447 be_txq_notify(&adapter->ctrl, txq->id, wrb_cnt);
449 netdev->trans_start = jiffies;
451 be_tx_stats_update(adapter, wrb_cnt, copied, stopped);
455 static int be_change_mtu(struct net_device *netdev, int new_mtu)
457 struct be_adapter *adapter = netdev_priv(netdev);
458 if (new_mtu < BE_MIN_MTU ||
459 new_mtu > BE_MAX_JUMBO_FRAME_SIZE) {
460 dev_info(&adapter->pdev->dev,
461 "MTU must be between %d and %d bytes\n",
462 BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE);
465 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
466 netdev->mtu, new_mtu);
467 netdev->mtu = new_mtu;
472 * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured,
473 * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured,
474 * set the BE in promiscuous VLAN mode.
476 static void be_vid_config(struct net_device *netdev)
478 struct be_adapter *adapter = netdev_priv(netdev);
479 u16 vtag[BE_NUM_VLANS_SUPPORTED];
482 if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) {
483 /* Construct VLAN Table to give to HW */
484 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
485 if (adapter->vlan_tag[i]) {
486 vtag[ntags] = cpu_to_le16(i);
490 be_cmd_vlan_config(&adapter->ctrl, adapter->if_handle,
493 be_cmd_vlan_config(&adapter->ctrl, adapter->if_handle,
498 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
500 struct be_adapter *adapter = netdev_priv(netdev);
501 struct be_eq_obj *rx_eq = &adapter->rx_eq;
502 struct be_eq_obj *tx_eq = &adapter->tx_eq;
503 struct be_ctrl_info *ctrl = &adapter->ctrl;
505 be_eq_notify(ctrl, rx_eq->q.id, false, false, 0);
506 be_eq_notify(ctrl, tx_eq->q.id, false, false, 0);
507 adapter->vlan_grp = grp;
508 be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
509 be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
512 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
514 struct be_adapter *adapter = netdev_priv(netdev);
516 adapter->num_vlans++;
517 adapter->vlan_tag[vid] = 1;
519 be_vid_config(netdev);
522 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
524 struct be_adapter *adapter = netdev_priv(netdev);
526 adapter->num_vlans--;
527 adapter->vlan_tag[vid] = 0;
529 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
530 be_vid_config(netdev);
533 static void be_set_multicast_filter(struct net_device *netdev)
535 struct be_adapter *adapter = netdev_priv(netdev);
536 struct dev_mc_list *mc_ptr;
537 u8 mac_addr[32][ETH_ALEN];
540 if (netdev->flags & IFF_ALLMULTI) {
541 /* set BE in Multicast promiscuous */
542 be_cmd_mcast_mac_set(&adapter->ctrl,
543 adapter->if_handle, NULL, 0, true);
547 for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
548 memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN);
550 be_cmd_mcast_mac_set(&adapter->ctrl,
551 adapter->if_handle, &mac_addr[0][0], i, false);
558 /* reset the promiscuous mode also. */
559 be_cmd_mcast_mac_set(&adapter->ctrl,
560 adapter->if_handle, &mac_addr[0][0], i, false);
564 static void be_set_multicast_list(struct net_device *netdev)
566 struct be_adapter *adapter = netdev_priv(netdev);
568 if (netdev->flags & IFF_PROMISC) {
569 be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 1);
571 be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 0);
572 be_set_multicast_filter(netdev);
576 static void be_rx_rate_update(struct be_adapter *adapter, u32 pktsize,
579 struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
582 stats->be_rx_compl++;
583 stats->be_rx_frags += numfrags;
584 stats->be_rx_bytes += pktsize;
586 /* Update the rate once in two seconds */
587 if ((jiffies - stats->be_rx_jiffies) < 2 * HZ)
590 rate = (stats->be_rx_bytes - stats->be_rx_bytes_prev) /
591 ((u32) (jiffies - stats->be_rx_jiffies) / HZ);
592 rate = (rate / 1000000); /* MB/Sec */
593 stats->be_rx_rate = (rate * 8); /* Mega Bits/Sec */
594 stats->be_rx_jiffies = jiffies;
595 stats->be_rx_bytes_prev = stats->be_rx_bytes;
598 static struct be_rx_page_info *
599 get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
601 struct be_rx_page_info *rx_page_info;
602 struct be_queue_info *rxq = &adapter->rx_obj.q;
604 rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
605 BUG_ON(!rx_page_info->page);
607 if (rx_page_info->last_page_user)
608 pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
609 adapter->big_page_size, PCI_DMA_FROMDEVICE);
611 atomic_dec(&rxq->used);
615 /* Throwaway the data in the Rx completion */
616 static void be_rx_compl_discard(struct be_adapter *adapter,
617 struct be_eth_rx_compl *rxcp)
619 struct be_queue_info *rxq = &adapter->rx_obj.q;
620 struct be_rx_page_info *page_info;
621 u16 rxq_idx, i, num_rcvd;
623 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
624 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
626 for (i = 0; i < num_rcvd; i++) {
627 page_info = get_rx_page_info(adapter, rxq_idx);
628 put_page(page_info->page);
629 memset(page_info, 0, sizeof(*page_info));
630 index_inc(&rxq_idx, rxq->len);
635 * skb_fill_rx_data forms a complete skb for an ether frame
638 static void skb_fill_rx_data(struct be_adapter *adapter,
639 struct sk_buff *skb, struct be_eth_rx_compl *rxcp)
641 struct be_queue_info *rxq = &adapter->rx_obj.q;
642 struct be_rx_page_info *page_info;
643 u16 rxq_idx, i, num_rcvd;
644 u32 pktsize, hdr_len, curr_frag_len;
647 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
648 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
649 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
651 page_info = get_rx_page_info(adapter, rxq_idx);
653 start = page_address(page_info->page) + page_info->page_offset;
656 /* Copy data in the first descriptor of this completion */
657 curr_frag_len = min(pktsize, rx_frag_size);
659 /* Copy the header portion into skb_data */
660 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
661 memcpy(skb->data, start, hdr_len);
662 skb->len = curr_frag_len;
663 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
664 /* Complete packet has now been moved to data */
665 put_page(page_info->page);
667 skb->tail += curr_frag_len;
669 skb_shinfo(skb)->nr_frags = 1;
670 skb_shinfo(skb)->frags[0].page = page_info->page;
671 skb_shinfo(skb)->frags[0].page_offset =
672 page_info->page_offset + hdr_len;
673 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
674 skb->data_len = curr_frag_len - hdr_len;
675 skb->tail += hdr_len;
677 memset(page_info, 0, sizeof(*page_info));
679 if (pktsize <= rx_frag_size) {
680 BUG_ON(num_rcvd != 1);
684 /* More frags present for this completion */
685 pktsize -= curr_frag_len; /* account for above copied frag */
686 for (i = 1; i < num_rcvd; i++) {
687 index_inc(&rxq_idx, rxq->len);
688 page_info = get_rx_page_info(adapter, rxq_idx);
690 curr_frag_len = min(pktsize, rx_frag_size);
692 skb_shinfo(skb)->frags[i].page = page_info->page;
693 skb_shinfo(skb)->frags[i].page_offset = page_info->page_offset;
694 skb_shinfo(skb)->frags[i].size = curr_frag_len;
695 skb->len += curr_frag_len;
696 skb->data_len += curr_frag_len;
697 skb_shinfo(skb)->nr_frags++;
698 pktsize -= curr_frag_len;
700 memset(page_info, 0, sizeof(*page_info));
703 be_rx_rate_update(adapter, pktsize, num_rcvd);
707 /* Process the RX completion indicated by rxcp when LRO is disabled */
708 static void be_rx_compl_process(struct be_adapter *adapter,
709 struct be_eth_rx_compl *rxcp)
715 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
716 vtp = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
718 skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
721 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
722 be_rx_compl_discard(adapter, rxcp);
726 skb_reserve(skb, NET_IP_ALIGN);
728 skb_fill_rx_data(adapter, skb, rxcp);
730 if (l4_cksm && adapter->rx_csum)
731 skb->ip_summed = CHECKSUM_UNNECESSARY;
733 skb->ip_summed = CHECKSUM_NONE;
735 skb->truesize = skb->len + sizeof(struct sk_buff);
736 skb->protocol = eth_type_trans(skb, adapter->netdev);
737 skb->dev = adapter->netdev;
740 if (!adapter->vlan_grp || adapter->num_vlans == 0) {
744 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
745 vid = be16_to_cpu(vid);
746 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
748 netif_receive_skb(skb);
751 adapter->netdev->last_rx = jiffies;
756 /* Process the RX completion indicated by rxcp when LRO is enabled */
757 static void be_rx_compl_process_lro(struct be_adapter *adapter,
758 struct be_eth_rx_compl *rxcp)
760 struct be_rx_page_info *page_info;
761 struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
762 struct be_queue_info *rxq = &adapter->rx_obj.q;
763 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
764 u16 i, rxq_idx = 0, vid;
766 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
767 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
768 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
769 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
771 remaining = pkt_size;
772 for (i = 0; i < num_rcvd; i++) {
773 page_info = get_rx_page_info(adapter, rxq_idx);
775 curr_frag_len = min(remaining, rx_frag_size);
777 rx_frags[i].page = page_info->page;
778 rx_frags[i].page_offset = page_info->page_offset;
779 rx_frags[i].size = curr_frag_len;
780 remaining -= curr_frag_len;
782 index_inc(&rxq_idx, rxq->len);
784 memset(page_info, 0, sizeof(*page_info));
787 if (likely(!vlanf)) {
788 lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size,
791 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
792 vid = be16_to_cpu(vid);
794 if (!adapter->vlan_grp || adapter->num_vlans == 0)
797 lro_vlan_hwaccel_receive_frags(&adapter->rx_obj.lro_mgr,
798 rx_frags, pkt_size, pkt_size, adapter->vlan_grp,
802 be_rx_rate_update(adapter, pkt_size, num_rcvd);
806 static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
808 struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);
810 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
813 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
815 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
817 queue_tail_inc(&adapter->rx_obj.cq);
821 static inline struct page *be_alloc_pages(u32 size)
823 gfp_t alloc_flags = GFP_ATOMIC;
824 u32 order = get_order(size);
826 alloc_flags |= __GFP_COMP;
827 return alloc_pages(alloc_flags, order);
831 * Allocate a page, split it to fragments of size rx_frag_size and post as
832 * receive buffers to BE
834 static void be_post_rx_frags(struct be_adapter *adapter)
836 struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
837 struct be_rx_page_info *page_info = NULL;
838 struct be_queue_info *rxq = &adapter->rx_obj.q;
839 struct page *pagep = NULL;
840 struct be_eth_rx_d *rxd;
841 u64 page_dmaaddr = 0, frag_dmaaddr;
842 u32 posted, page_offset = 0;
845 page_info = &page_info_tbl[rxq->head];
846 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
848 pagep = be_alloc_pages(adapter->big_page_size);
849 if (unlikely(!pagep)) {
850 drvr_stats(adapter)->be_ethrx_post_fail++;
853 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
854 adapter->big_page_size,
856 page_info->page_offset = 0;
859 page_info->page_offset = page_offset + rx_frag_size;
861 page_offset = page_info->page_offset;
862 page_info->page = pagep;
863 pci_unmap_addr_set(page_info, bus, page_dmaaddr);
864 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
866 rxd = queue_head_node(rxq);
867 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
868 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
871 /* Any space left in the current big page for another frag? */
872 if ((page_offset + rx_frag_size + rx_frag_size) >
873 adapter->big_page_size) {
875 page_info->last_page_user = true;
877 page_info = &page_info_tbl[rxq->head];
880 page_info->last_page_user = true;
883 atomic_add(posted, &rxq->used);
884 be_rxq_notify(&adapter->ctrl, rxq->id, posted);
885 } else if (atomic_read(&rxq->used) == 0) {
886 /* Let be_worker replenish when memory is available */
887 adapter->rx_post_starved = true;
893 static struct be_eth_tx_compl *
894 be_tx_compl_get(struct be_adapter *adapter)
896 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
897 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
899 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
902 be_dws_le_to_cpu(txcp, sizeof(*txcp));
904 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
906 queue_tail_inc(tx_cq);
910 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
912 struct be_queue_info *txq = &adapter->tx_obj.q;
913 struct be_eth_wrb *wrb;
914 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
915 struct sk_buff *sent_skb;
917 u16 cur_index, num_wrbs = 0;
919 cur_index = txq->tail;
920 sent_skb = sent_skbs[cur_index];
922 sent_skbs[cur_index] = NULL;
925 cur_index = txq->tail;
926 wrb = queue_tail_node(txq);
927 be_dws_le_to_cpu(wrb, sizeof(*wrb));
928 busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
930 pci_unmap_single(adapter->pdev, busaddr,
931 wrb->frag_len, PCI_DMA_TODEVICE);
935 } while (cur_index != last_index);
937 atomic_sub(num_wrbs, &txq->used);
942 static void be_rx_q_clean(struct be_adapter *adapter)
944 struct be_rx_page_info *page_info;
945 struct be_queue_info *rxq = &adapter->rx_obj.q;
946 struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
947 struct be_eth_rx_compl *rxcp;
950 /* First cleanup pending rx completions */
951 while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
952 be_rx_compl_discard(adapter, rxcp);
953 be_cq_notify(&adapter->ctrl, rx_cq->id, true, 1);
956 /* Then free posted rx buffer that were not used */
957 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
958 for (; tail != rxq->head; index_inc(&tail, rxq->len)) {
959 page_info = get_rx_page_info(adapter, tail);
960 put_page(page_info->page);
961 memset(page_info, 0, sizeof(*page_info));
963 BUG_ON(atomic_read(&rxq->used));
966 static void be_tx_q_clean(struct be_adapter *adapter)
968 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
969 struct sk_buff *sent_skb;
970 struct be_queue_info *txq = &adapter->tx_obj.q;
974 while (atomic_read(&txq->used)) {
975 sent_skb = sent_skbs[txq->tail];
976 last_index = txq->tail;
977 index_adv(&last_index,
978 wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
979 be_tx_compl_process(adapter, last_index);
983 static void be_tx_queues_destroy(struct be_adapter *adapter)
985 struct be_queue_info *q;
987 q = &adapter->tx_obj.q;
989 be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_TXQ);
990 be_queue_free(adapter, q);
992 q = &adapter->tx_obj.cq;
994 be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_CQ);
995 be_queue_free(adapter, q);
997 /* No more tx completions can be rcvd now; clean up if there are
998 * any pending completions or pending tx requests */
999 be_tx_q_clean(adapter);
1001 q = &adapter->tx_eq.q;
1003 be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_EQ);
1004 be_queue_free(adapter, q);
1007 static int be_tx_queues_create(struct be_adapter *adapter)
1009 struct be_queue_info *eq, *q, *cq;
1011 adapter->tx_eq.max_eqd = 0;
1012 adapter->tx_eq.min_eqd = 0;
1013 adapter->tx_eq.cur_eqd = 96;
1014 adapter->tx_eq.enable_aic = false;
1015 /* Alloc Tx Event queue */
1016 eq = &adapter->tx_eq.q;
1017 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1020 /* Ask BE to create Tx Event queue */
1021 if (be_cmd_eq_create(&adapter->ctrl, eq, adapter->tx_eq.cur_eqd))
1023 /* Alloc TX eth compl queue */
1024 cq = &adapter->tx_obj.cq;
1025 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1026 sizeof(struct be_eth_tx_compl)))
1029 /* Ask BE to create Tx eth compl queue */
1030 if (be_cmd_cq_create(&adapter->ctrl, cq, eq, false, false, 3))
1033 /* Alloc TX eth queue */
1034 q = &adapter->tx_obj.q;
1035 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1038 /* Ask BE to create Tx eth queue */
1039 if (be_cmd_txq_create(&adapter->ctrl, q, cq))
1044 be_queue_free(adapter, q);
1046 be_cmd_q_destroy(&adapter->ctrl, cq, QTYPE_CQ);
1048 be_queue_free(adapter, cq);
1050 be_cmd_q_destroy(&adapter->ctrl, eq, QTYPE_EQ);
1052 be_queue_free(adapter, eq);
1056 static void be_rx_queues_destroy(struct be_adapter *adapter)
1058 struct be_queue_info *q;
1060 q = &adapter->rx_obj.q;
1062 be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_RXQ);
1063 be_rx_q_clean(adapter);
1065 be_queue_free(adapter, q);
1067 q = &adapter->rx_obj.cq;
1069 be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_CQ);
1070 be_queue_free(adapter, q);
1072 q = &adapter->rx_eq.q;
1074 be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_EQ);
1075 be_queue_free(adapter, q);
1078 static int be_rx_queues_create(struct be_adapter *adapter)
1080 struct be_queue_info *eq, *q, *cq;
1083 adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME;
1084 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1085 adapter->rx_eq.max_eqd = BE_MAX_EQD;
1086 adapter->rx_eq.min_eqd = 0;
1087 adapter->rx_eq.cur_eqd = 0;
1088 adapter->rx_eq.enable_aic = true;
1090 /* Alloc Rx Event queue */
1091 eq = &adapter->rx_eq.q;
1092 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1093 sizeof(struct be_eq_entry));
1097 /* Ask BE to create Rx Event queue */
1098 rc = be_cmd_eq_create(&adapter->ctrl, eq, adapter->rx_eq.cur_eqd);
1102 /* Alloc RX eth compl queue */
1103 cq = &adapter->rx_obj.cq;
1104 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1105 sizeof(struct be_eth_rx_compl));
1109 /* Ask BE to create Rx eth compl queue */
1110 rc = be_cmd_cq_create(&adapter->ctrl, cq, eq, false, false, 3);
1114 /* Alloc RX eth queue */
1115 q = &adapter->rx_obj.q;
1116 rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
1120 /* Ask BE to create Rx eth queue */
1121 rc = be_cmd_rxq_create(&adapter->ctrl, q, cq->id, rx_frag_size,
1122 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
1128 be_queue_free(adapter, q);
1130 be_cmd_q_destroy(&adapter->ctrl, cq, QTYPE_CQ);
1132 be_queue_free(adapter, cq);
1134 be_cmd_q_destroy(&adapter->ctrl, eq, QTYPE_EQ);
1136 be_queue_free(adapter, eq);
1139 static bool event_get(struct be_eq_obj *eq_obj, u16 *rid)
1141 struct be_eq_entry *entry = queue_tail_node(&eq_obj->q);
1142 u32 evt = entry->evt;
1147 evt = le32_to_cpu(evt);
1148 *rid = (evt >> EQ_ENTRY_RES_ID_SHIFT) & EQ_ENTRY_RES_ID_MASK;
1150 queue_tail_inc(&eq_obj->q);
1154 static int event_handle(struct be_ctrl_info *ctrl,
1155 struct be_eq_obj *eq_obj)
1157 u16 rid = 0, num = 0;
1159 while (event_get(eq_obj, &rid))
1162 /* We can see an interrupt and no event */
1163 be_eq_notify(ctrl, eq_obj->q.id, true, true, num);
1165 napi_schedule(&eq_obj->napi);
1170 static irqreturn_t be_intx(int irq, void *dev)
1172 struct be_adapter *adapter = dev;
1173 struct be_ctrl_info *ctrl = &adapter->ctrl;
1176 tx = event_handle(ctrl, &adapter->tx_eq);
1177 rx = event_handle(ctrl, &adapter->rx_eq);
1185 static irqreturn_t be_msix_rx(int irq, void *dev)
1187 struct be_adapter *adapter = dev;
1189 event_handle(&adapter->ctrl, &adapter->rx_eq);
1194 static irqreturn_t be_msix_tx(int irq, void *dev)
1196 struct be_adapter *adapter = dev;
1198 event_handle(&adapter->ctrl, &adapter->tx_eq);
1203 static inline bool do_lro(struct be_adapter *adapter,
1204 struct be_eth_rx_compl *rxcp)
1206 int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1207 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1210 drvr_stats(adapter)->be_rxcp_err++;
1212 return (!tcp_frame || err || (adapter->max_rx_coal <= 1)) ?
1216 int be_poll_rx(struct napi_struct *napi, int budget)
1218 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1219 struct be_adapter *adapter =
1220 container_of(rx_eq, struct be_adapter, rx_eq);
1221 struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1222 struct be_eth_rx_compl *rxcp;
1225 for (work_done = 0; work_done < budget; work_done++) {
1226 rxcp = be_rx_compl_get(adapter);
1230 if (do_lro(adapter, rxcp))
1231 be_rx_compl_process_lro(adapter, rxcp);
1233 be_rx_compl_process(adapter, rxcp);
1236 lro_flush_all(&adapter->rx_obj.lro_mgr);
1238 /* Refill the queue */
1239 if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
1240 be_post_rx_frags(adapter);
1243 if (work_done < budget) {
1244 napi_complete(napi);
1245 be_cq_notify(&adapter->ctrl, rx_cq->id, true, work_done);
1247 /* More to be consumed; continue with interrupts disabled */
1248 be_cq_notify(&adapter->ctrl, rx_cq->id, false, work_done);
1253 /* For TX we don't honour budget; consume everything */
1254 int be_poll_tx(struct napi_struct *napi, int budget)
1256 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1257 struct be_adapter *adapter =
1258 container_of(tx_eq, struct be_adapter, tx_eq);
1259 struct be_tx_obj *tx_obj = &adapter->tx_obj;
1260 struct be_queue_info *tx_cq = &tx_obj->cq;
1261 struct be_queue_info *txq = &tx_obj->q;
1262 struct be_eth_tx_compl *txcp;
1266 while ((txcp = be_tx_compl_get(adapter))) {
1267 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1269 be_tx_compl_process(adapter, end_idx);
1273 /* As Tx wrbs have been freed up, wake up netdev queue if
1274 * it was stopped due to lack of tx wrbs.
1276 if (netif_queue_stopped(adapter->netdev) &&
1277 atomic_read(&txq->used) < txq->len / 2) {
1278 netif_wake_queue(adapter->netdev);
1281 napi_complete(napi);
1283 be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl);
1285 drvr_stats(adapter)->be_tx_events++;
1286 drvr_stats(adapter)->be_tx_compl += num_cmpl;
1291 static void be_worker(struct work_struct *work)
1293 struct be_adapter *adapter =
1294 container_of(work, struct be_adapter, work.work);
1298 be_link_status_update(adapter);
1301 status = be_cmd_get_stats(&adapter->ctrl, &adapter->stats.cmd);
1303 netdev_stats_update(adapter);
1306 be_rx_eqd_update(adapter);
1308 if (adapter->rx_post_starved) {
1309 adapter->rx_post_starved = false;
1310 be_post_rx_frags(adapter);
1313 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1316 static void be_msix_enable(struct be_adapter *adapter)
1320 for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
1321 adapter->msix_entries[i].entry = i;
1323 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1324 BE_NUM_MSIX_VECTORS);
1326 adapter->msix_enabled = true;
1330 static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1332 return adapter->msix_entries[eq_id -
1333 8 * adapter->ctrl.pci_func].vector;
1336 static int be_msix_register(struct be_adapter *adapter)
1338 struct net_device *netdev = adapter->netdev;
1339 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1340 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1343 sprintf(tx_eq->desc, "%s-tx", netdev->name);
1344 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1345 status = request_irq(vec, be_msix_tx, 0, tx_eq->desc, adapter);
1349 sprintf(rx_eq->desc, "%s-rx", netdev->name);
1350 vec = be_msix_vec_get(adapter, rx_eq->q.id);
1351 status = request_irq(vec, be_msix_rx, 0, rx_eq->desc, adapter);
1352 if (status) { /* Free TX IRQ */
1353 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1354 free_irq(vec, adapter);
1359 dev_warn(&adapter->pdev->dev,
1360 "MSIX Request IRQ failed - err %d\n", status);
1361 pci_disable_msix(adapter->pdev);
1362 adapter->msix_enabled = false;
1366 static int be_irq_register(struct be_adapter *adapter)
1368 struct net_device *netdev = adapter->netdev;
1371 if (adapter->msix_enabled) {
1372 status = be_msix_register(adapter);
1378 netdev->irq = adapter->pdev->irq;
1379 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
1382 dev_err(&adapter->pdev->dev,
1383 "INTx request IRQ failed - err %d\n", status);
1387 adapter->isr_registered = true;
1391 static void be_irq_unregister(struct be_adapter *adapter)
1393 struct net_device *netdev = adapter->netdev;
1396 if (!adapter->isr_registered)
1400 if (!adapter->msix_enabled) {
1401 free_irq(netdev->irq, adapter);
1406 vec = be_msix_vec_get(adapter, adapter->tx_eq.q.id);
1407 free_irq(vec, adapter);
1408 vec = be_msix_vec_get(adapter, adapter->rx_eq.q.id);
1409 free_irq(vec, adapter);
1411 adapter->isr_registered = false;
1415 static int be_open(struct net_device *netdev)
1417 struct be_adapter *adapter = netdev_priv(netdev);
1418 struct be_ctrl_info *ctrl = &adapter->ctrl;
1419 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1420 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1424 if_flags = BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PROMISCUOUS |
1425 BE_IF_FLAGS_MCAST_PROMISCUOUS | BE_IF_FLAGS_UNTAGGED |
1426 BE_IF_FLAGS_PASS_L3L4_ERRORS;
1427 status = be_cmd_if_create(ctrl, if_flags, netdev->dev_addr,
1428 false/* pmac_invalid */, &adapter->if_handle,
1433 be_vid_config(netdev);
1435 status = be_cmd_set_flow_control(ctrl, true, true);
1439 status = be_tx_queues_create(adapter);
1443 status = be_rx_queues_create(adapter);
1447 /* First time posting */
1448 be_post_rx_frags(adapter);
1450 napi_enable(&rx_eq->napi);
1451 napi_enable(&tx_eq->napi);
1453 be_irq_register(adapter);
1455 be_intr_set(ctrl, true);
1457 /* The evt queues are created in the unarmed state; arm them */
1458 be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
1459 be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
1461 /* The compl queues are created in the unarmed state; arm them */
1462 be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0);
1463 be_cq_notify(ctrl, adapter->tx_obj.cq.id, true, 0);
1465 be_link_status_update(adapter);
1467 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
1471 be_tx_queues_destroy(adapter);
1473 be_cmd_if_destroy(ctrl, adapter->if_handle);
1478 static int be_close(struct net_device *netdev)
1480 struct be_adapter *adapter = netdev_priv(netdev);
1481 struct be_ctrl_info *ctrl = &adapter->ctrl;
1482 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1483 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1486 cancel_delayed_work(&adapter->work);
1488 netif_stop_queue(netdev);
1489 netif_carrier_off(netdev);
1490 adapter->link.speed = PHY_LINK_SPEED_ZERO;
1492 be_intr_set(ctrl, false);
1494 if (adapter->msix_enabled) {
1495 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1496 synchronize_irq(vec);
1497 vec = be_msix_vec_get(adapter, rx_eq->q.id);
1498 synchronize_irq(vec);
1500 synchronize_irq(netdev->irq);
1502 be_irq_unregister(adapter);
1504 napi_disable(&rx_eq->napi);
1505 napi_disable(&tx_eq->napi);
1507 be_rx_queues_destroy(adapter);
1508 be_tx_queues_destroy(adapter);
1510 be_cmd_if_destroy(ctrl, adapter->if_handle);
1514 static int be_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
1515 void **ip_hdr, void **tcpudp_hdr,
1516 u64 *hdr_flags, void *priv)
1519 struct vlan_ethhdr *veh;
1521 u8 *va = page_address(frag->page) + frag->page_offset;
1522 unsigned long ll_hlen;
1525 eh = (struct ethhdr *)va;
1528 if (eh->h_proto != htons(ETH_P_IP)) {
1529 if (eh->h_proto == htons(ETH_P_8021Q)) {
1530 veh = (struct vlan_ethhdr *)va;
1531 if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
1534 ll_hlen += VLAN_HLEN;
1539 *hdr_flags = LRO_IPV4;
1540 iph = (struct iphdr *)(va + ll_hlen);
1542 if (iph->protocol != IPPROTO_TCP)
1544 *hdr_flags |= LRO_TCP;
1545 *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
1550 static void be_lro_init(struct be_adapter *adapter, struct net_device *netdev)
1552 struct net_lro_mgr *lro_mgr;
1554 lro_mgr = &adapter->rx_obj.lro_mgr;
1555 lro_mgr->dev = netdev;
1556 lro_mgr->features = LRO_F_NAPI;
1557 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
1558 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
1559 lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS;
1560 lro_mgr->lro_arr = adapter->rx_obj.lro_desc;
1561 lro_mgr->get_frag_header = be_get_frag_header;
1562 lro_mgr->max_aggr = BE_MAX_FRAGS_PER_FRAME;
1565 static struct net_device_ops be_netdev_ops = {
1566 .ndo_open = be_open,
1567 .ndo_stop = be_close,
1568 .ndo_start_xmit = be_xmit,
1569 .ndo_get_stats = be_get_stats,
1570 .ndo_set_rx_mode = be_set_multicast_list,
1571 .ndo_set_mac_address = be_mac_addr_set,
1572 .ndo_change_mtu = be_change_mtu,
1573 .ndo_validate_addr = eth_validate_addr,
1574 .ndo_vlan_rx_register = be_vlan_register,
1575 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
1576 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
1579 static void be_netdev_init(struct net_device *netdev)
1581 struct be_adapter *adapter = netdev_priv(netdev);
1583 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
1584 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM |
1585 NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
1587 netdev->flags |= IFF_MULTICAST;
1589 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
1591 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
1593 be_lro_init(adapter, netdev);
1595 netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
1597 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx,
1600 netif_carrier_off(netdev);
1601 netif_stop_queue(netdev);
1604 static void be_unmap_pci_bars(struct be_adapter *adapter)
1606 struct be_ctrl_info *ctrl = &adapter->ctrl;
1612 iounmap(ctrl->pcicfg);
1615 static int be_map_pci_bars(struct be_adapter *adapter)
1619 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
1620 pci_resource_len(adapter->pdev, 2));
1623 adapter->ctrl.csr = addr;
1625 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4),
1629 adapter->ctrl.db = addr;
1631 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1),
1632 pci_resource_len(adapter->pdev, 1));
1635 adapter->ctrl.pcicfg = addr;
1639 be_unmap_pci_bars(adapter);
1644 static void be_ctrl_cleanup(struct be_adapter *adapter)
1646 struct be_dma_mem *mem = &adapter->ctrl.mbox_mem_alloced;
1648 be_unmap_pci_bars(adapter);
1651 pci_free_consistent(adapter->pdev, mem->size,
1655 /* Initialize the mbox required to send cmds to BE */
1656 static int be_ctrl_init(struct be_adapter *adapter)
1658 struct be_ctrl_info *ctrl = &adapter->ctrl;
1659 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
1660 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
1664 status = be_map_pci_bars(adapter);
1668 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
1669 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
1670 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
1671 if (!mbox_mem_alloc->va) {
1672 be_unmap_pci_bars(adapter);
1675 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
1676 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
1677 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
1678 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
1679 spin_lock_init(&ctrl->cmd_lock);
1681 val = ioread32(ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
1682 ctrl->pci_func = (val >> MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT) &
1683 MEMBAR_CTRL_INT_CTRL_PFUNC_MASK;
1687 static void be_stats_cleanup(struct be_adapter *adapter)
1689 struct be_stats_obj *stats = &adapter->stats;
1690 struct be_dma_mem *cmd = &stats->cmd;
1693 pci_free_consistent(adapter->pdev, cmd->size,
1697 static int be_stats_init(struct be_adapter *adapter)
1699 struct be_stats_obj *stats = &adapter->stats;
1700 struct be_dma_mem *cmd = &stats->cmd;
1702 cmd->size = sizeof(struct be_cmd_req_get_stats);
1703 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
1704 if (cmd->va == NULL)
1709 static void __devexit be_remove(struct pci_dev *pdev)
1711 struct be_adapter *adapter = pci_get_drvdata(pdev);
1715 unregister_netdev(adapter->netdev);
1717 be_stats_cleanup(adapter);
1719 be_ctrl_cleanup(adapter);
1721 if (adapter->msix_enabled) {
1722 pci_disable_msix(adapter->pdev);
1723 adapter->msix_enabled = false;
1726 pci_set_drvdata(pdev, NULL);
1727 pci_release_regions(pdev);
1728 pci_disable_device(pdev);
1730 free_netdev(adapter->netdev);
1733 static int be_hw_up(struct be_adapter *adapter)
1735 struct be_ctrl_info *ctrl = &adapter->ctrl;
1738 status = be_cmd_POST(ctrl);
1742 status = be_cmd_get_fw_ver(ctrl, adapter->fw_ver);
1746 status = be_cmd_query_fw_cfg(ctrl, &adapter->port_num);
1750 static int __devinit be_probe(struct pci_dev *pdev,
1751 const struct pci_device_id *pdev_id)
1754 struct be_adapter *adapter;
1755 struct net_device *netdev;
1756 struct be_ctrl_info *ctrl;
1759 status = pci_enable_device(pdev);
1763 status = pci_request_regions(pdev, DRV_NAME);
1766 pci_set_master(pdev);
1768 netdev = alloc_etherdev(sizeof(struct be_adapter));
1769 if (netdev == NULL) {
1773 adapter = netdev_priv(netdev);
1774 adapter->pdev = pdev;
1775 pci_set_drvdata(pdev, adapter);
1776 adapter->netdev = netdev;
1778 be_msix_enable(adapter);
1780 status = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
1782 netdev->features |= NETIF_F_HIGHDMA;
1784 status = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1786 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
1791 ctrl = &adapter->ctrl;
1792 status = be_ctrl_init(adapter);
1796 status = be_stats_init(adapter);
1800 status = be_hw_up(adapter);
1804 status = be_cmd_mac_addr_query(ctrl, mac, MAC_ADDRESS_TYPE_NETWORK,
1805 true /* permanent */, 0);
1808 memcpy(netdev->dev_addr, mac, ETH_ALEN);
1810 INIT_DELAYED_WORK(&adapter->work, be_worker);
1811 be_netdev_init(netdev);
1812 SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
1814 status = register_netdev(netdev);
1818 dev_info(&pdev->dev, BE_NAME " port %d\n", adapter->port_num);
1822 be_stats_cleanup(adapter);
1824 be_ctrl_cleanup(adapter);
1826 free_netdev(adapter->netdev);
1828 pci_release_regions(pdev);
1830 pci_disable_device(pdev);
1832 dev_warn(&pdev->dev, BE_NAME " initialization failed\n");
1836 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
1838 struct be_adapter *adapter = pci_get_drvdata(pdev);
1839 struct net_device *netdev = adapter->netdev;
1841 netif_device_detach(netdev);
1842 if (netif_running(netdev)) {
1848 pci_save_state(pdev);
1849 pci_disable_device(pdev);
1850 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1854 static int be_resume(struct pci_dev *pdev)
1857 struct be_adapter *adapter = pci_get_drvdata(pdev);
1858 struct net_device *netdev = adapter->netdev;
1860 netif_device_detach(netdev);
1862 status = pci_enable_device(pdev);
1866 pci_set_power_state(pdev, 0);
1867 pci_restore_state(pdev);
1869 if (netif_running(netdev)) {
1874 netif_device_attach(netdev);
1878 static struct pci_driver be_driver = {
1880 .id_table = be_dev_ids,
1882 .remove = be_remove,
1883 .suspend = be_suspend,
1887 static int __init be_init_module(void)
1889 if (rx_frag_size != 8192 && rx_frag_size != 4096
1890 && rx_frag_size != 2048) {
1891 printk(KERN_WARNING DRV_NAME
1892 " : Module param rx_frag_size must be 2048/4096/8192."
1894 rx_frag_size = 2048;
1896 /* Ensure rx_frag_size is aligned to chache line */
1897 if (SKB_DATA_ALIGN(rx_frag_size) != rx_frag_size) {
1898 printk(KERN_WARNING DRV_NAME
1899 " : Bad module param rx_frag_size. Using 2048\n");
1900 rx_frag_size = 2048;
1903 return pci_register_driver(&be_driver);
1905 module_init(be_init_module);
1907 static void __exit be_exit_module(void)
1909 pci_unregister_driver(&be_driver);
1911 module_exit(be_exit_module);