2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * $Id: ipoib_ib.c 1386 2004-12-27 16:23:17Z roland $
38 #include <linux/delay.h>
39 #include <linux/dma-mapping.h>
41 #include <rdma/ib_cache.h>
45 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
46 static int data_debug_level;
48 module_param(data_debug_level, int, 0644);
49 MODULE_PARM_DESC(data_debug_level,
50 "Enable data path debug tracing if > 0");
53 #define IPOIB_OP_RECV (1ul << 31)
55 static DEFINE_MUTEX(pkey_mutex);
57 struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
58 struct ib_pd *pd, struct ib_ah_attr *attr)
62 ah = kmalloc(sizeof *ah, GFP_KERNEL);
70 ah->ah = ib_create_ah(pd, attr);
75 ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
80 void ipoib_free_ah(struct kref *kref)
82 struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
83 struct ipoib_dev_priv *priv = netdev_priv(ah->dev);
87 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
88 ipoib_dbg(priv, "Freeing ah %p\n", ah->ah);
89 ib_destroy_ah(ah->ah);
92 spin_lock_irqsave(&priv->lock, flags);
93 list_add_tail(&ah->list, &priv->dead_ahs);
94 spin_unlock_irqrestore(&priv->lock, flags);
98 static int ipoib_ib_post_receive(struct net_device *dev, int id)
100 struct ipoib_dev_priv *priv = netdev_priv(dev);
102 struct ib_recv_wr param;
103 struct ib_recv_wr *bad_wr;
106 list.addr = priv->rx_ring[id].mapping;
107 list.length = IPOIB_BUF_SIZE;
108 list.lkey = priv->mr->lkey;
111 param.wr_id = id | IPOIB_OP_RECV;
112 param.sg_list = &list;
115 ret = ib_post_recv(priv->qp, ¶m, &bad_wr);
117 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
118 dma_unmap_single(priv->ca->dma_device,
119 priv->rx_ring[id].mapping,
120 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
121 dev_kfree_skb_any(priv->rx_ring[id].skb);
122 priv->rx_ring[id].skb = NULL;
128 static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
130 struct ipoib_dev_priv *priv = netdev_priv(dev);
134 skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4);
139 * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
140 * header. So we need 4 more bytes to get to 48 and align the
141 * IP header to a multiple of 16.
145 addr = dma_map_single(priv->ca->dma_device,
146 skb->data, IPOIB_BUF_SIZE,
148 if (unlikely(dma_mapping_error(addr))) {
149 dev_kfree_skb_any(skb);
153 priv->rx_ring[id].skb = skb;
154 priv->rx_ring[id].mapping = addr;
159 static int ipoib_ib_post_receives(struct net_device *dev)
161 struct ipoib_dev_priv *priv = netdev_priv(dev);
164 for (i = 0; i < IPOIB_RX_RING_SIZE; ++i) {
165 if (ipoib_alloc_rx_skb(dev, i)) {
166 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
169 if (ipoib_ib_post_receive(dev, i)) {
170 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
178 static void ipoib_ib_handle_wc(struct net_device *dev,
181 struct ipoib_dev_priv *priv = netdev_priv(dev);
182 unsigned int wr_id = wc->wr_id;
184 ipoib_dbg_data(priv, "called: id %d, op %d, status: %d\n",
185 wr_id, wc->opcode, wc->status);
187 if (wr_id & IPOIB_OP_RECV) {
188 wr_id &= ~IPOIB_OP_RECV;
190 if (wr_id < IPOIB_RX_RING_SIZE) {
191 struct sk_buff *skb = priv->rx_ring[wr_id].skb;
192 dma_addr_t addr = priv->rx_ring[wr_id].mapping;
194 if (unlikely(wc->status != IB_WC_SUCCESS)) {
195 if (wc->status != IB_WC_WR_FLUSH_ERR)
196 ipoib_warn(priv, "failed recv event "
197 "(status=%d, wrid=%d vend_err %x)\n",
198 wc->status, wr_id, wc->vendor_err);
199 dma_unmap_single(priv->ca->dma_device, addr,
200 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
201 dev_kfree_skb_any(skb);
202 priv->rx_ring[wr_id].skb = NULL;
207 * If we can't allocate a new RX buffer, dump
208 * this packet and reuse the old buffer.
210 if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
211 ++priv->stats.rx_dropped;
215 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
216 wc->byte_len, wc->slid);
218 dma_unmap_single(priv->ca->dma_device, addr,
219 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
221 skb_put(skb, wc->byte_len);
222 skb_pull(skb, IB_GRH_BYTES);
224 if (wc->slid != priv->local_lid ||
225 wc->src_qp != priv->qp->qp_num) {
226 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
227 skb->mac.raw = skb->data;
228 skb_pull(skb, IPOIB_ENCAP_LEN);
230 dev->last_rx = jiffies;
231 ++priv->stats.rx_packets;
232 priv->stats.rx_bytes += skb->len;
235 /* XXX get correct PACKET_ type here */
236 skb->pkt_type = PACKET_HOST;
239 ipoib_dbg_data(priv, "dropping loopback packet\n");
240 dev_kfree_skb_any(skb);
244 if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
245 ipoib_warn(priv, "ipoib_ib_post_receive failed "
246 "for buf %d\n", wr_id);
248 ipoib_warn(priv, "completion event with wrid %d\n",
252 struct ipoib_tx_buf *tx_req;
255 if (wr_id >= IPOIB_TX_RING_SIZE) {
256 ipoib_warn(priv, "completion event with wrid %d (> %d)\n",
257 wr_id, IPOIB_TX_RING_SIZE);
261 ipoib_dbg_data(priv, "send complete, wrid %d\n", wr_id);
263 tx_req = &priv->tx_ring[wr_id];
265 dma_unmap_single(priv->ca->dma_device,
266 pci_unmap_addr(tx_req, mapping),
270 ++priv->stats.tx_packets;
271 priv->stats.tx_bytes += tx_req->skb->len;
273 dev_kfree_skb_any(tx_req->skb);
275 spin_lock_irqsave(&priv->tx_lock, flags);
277 if (netif_queue_stopped(dev) &&
278 priv->tx_head - priv->tx_tail <= IPOIB_TX_RING_SIZE / 2)
279 netif_wake_queue(dev);
280 spin_unlock_irqrestore(&priv->tx_lock, flags);
282 if (wc->status != IB_WC_SUCCESS &&
283 wc->status != IB_WC_WR_FLUSH_ERR)
284 ipoib_warn(priv, "failed send event "
285 "(status=%d, wrid=%d vend_err %x)\n",
286 wc->status, wr_id, wc->vendor_err);
290 void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
292 struct net_device *dev = (struct net_device *) dev_ptr;
293 struct ipoib_dev_priv *priv = netdev_priv(dev);
296 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
298 n = ib_poll_cq(cq, IPOIB_NUM_WC, priv->ibwc);
299 for (i = 0; i < n; ++i)
300 ipoib_ib_handle_wc(dev, priv->ibwc + i);
301 } while (n == IPOIB_NUM_WC);
304 static inline int post_send(struct ipoib_dev_priv *priv,
306 struct ib_ah *address, u32 qpn,
307 dma_addr_t addr, int len)
309 struct ib_send_wr *bad_wr;
311 priv->tx_sge.addr = addr;
312 priv->tx_sge.length = len;
314 priv->tx_wr.wr_id = wr_id;
315 priv->tx_wr.wr.ud.remote_qpn = qpn;
316 priv->tx_wr.wr.ud.ah = address;
318 return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
321 void ipoib_send(struct net_device *dev, struct sk_buff *skb,
322 struct ipoib_ah *address, u32 qpn)
324 struct ipoib_dev_priv *priv = netdev_priv(dev);
325 struct ipoib_tx_buf *tx_req;
328 if (skb->len > dev->mtu + INFINIBAND_ALEN) {
329 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
330 skb->len, dev->mtu + INFINIBAND_ALEN);
331 ++priv->stats.tx_dropped;
332 ++priv->stats.tx_errors;
333 dev_kfree_skb_any(skb);
337 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
338 skb->len, address, qpn);
341 * We put the skb into the tx_ring _before_ we call post_send()
342 * because it's entirely possible that the completion handler will
343 * run before we execute anything after the post_send(). That
344 * means we have to make sure everything is properly recorded and
345 * our state is consistent before we call post_send().
347 tx_req = &priv->tx_ring[priv->tx_head & (IPOIB_TX_RING_SIZE - 1)];
349 addr = dma_map_single(priv->ca->dma_device, skb->data, skb->len,
351 pci_unmap_addr_set(tx_req, mapping, addr);
353 if (unlikely(post_send(priv, priv->tx_head & (IPOIB_TX_RING_SIZE - 1),
354 address->ah, qpn, addr, skb->len))) {
355 ipoib_warn(priv, "post_send failed\n");
356 ++priv->stats.tx_errors;
357 dma_unmap_single(priv->ca->dma_device, addr, skb->len,
359 dev_kfree_skb_any(skb);
361 dev->trans_start = jiffies;
363 address->last_send = priv->tx_head;
366 if (priv->tx_head - priv->tx_tail == IPOIB_TX_RING_SIZE) {
367 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
368 netif_stop_queue(dev);
373 static void __ipoib_reap_ah(struct net_device *dev)
375 struct ipoib_dev_priv *priv = netdev_priv(dev);
376 struct ipoib_ah *ah, *tah;
377 LIST_HEAD(remove_list);
379 spin_lock_irq(&priv->lock);
380 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
381 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
383 list_add_tail(&ah->list, &remove_list);
385 spin_unlock_irq(&priv->lock);
387 list_for_each_entry_safe(ah, tah, &remove_list, list) {
388 ipoib_dbg(priv, "Reaping ah %p\n", ah->ah);
389 ib_destroy_ah(ah->ah);
394 void ipoib_reap_ah(void *dev_ptr)
396 struct net_device *dev = dev_ptr;
397 struct ipoib_dev_priv *priv = netdev_priv(dev);
399 __ipoib_reap_ah(dev);
401 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
402 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, HZ);
405 int ipoib_ib_dev_open(struct net_device *dev)
407 struct ipoib_dev_priv *priv = netdev_priv(dev);
410 ret = ipoib_init_qp(dev);
412 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
416 ret = ipoib_ib_post_receives(dev);
418 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
419 ipoib_ib_dev_stop(dev);
423 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
424 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, HZ);
429 int ipoib_ib_dev_up(struct net_device *dev)
431 struct ipoib_dev_priv *priv = netdev_priv(dev);
433 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
435 return ipoib_mcast_start_thread(dev);
438 int ipoib_ib_dev_down(struct net_device *dev, int flush)
440 struct ipoib_dev_priv *priv = netdev_priv(dev);
442 ipoib_dbg(priv, "downing ib_dev\n");
444 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
445 netif_carrier_off(dev);
447 /* Shutdown the P_Key thread if still active */
448 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
449 mutex_lock(&pkey_mutex);
450 set_bit(IPOIB_PKEY_STOP, &priv->flags);
451 cancel_delayed_work(&priv->pkey_task);
452 mutex_unlock(&pkey_mutex);
454 flush_workqueue(ipoib_workqueue);
457 ipoib_mcast_stop_thread(dev, flush);
458 ipoib_mcast_dev_flush(dev);
460 ipoib_flush_paths(dev);
465 static int recvs_pending(struct net_device *dev)
467 struct ipoib_dev_priv *priv = netdev_priv(dev);
471 for (i = 0; i < IPOIB_RX_RING_SIZE; ++i)
472 if (priv->rx_ring[i].skb)
478 int ipoib_ib_dev_stop(struct net_device *dev)
480 struct ipoib_dev_priv *priv = netdev_priv(dev);
481 struct ib_qp_attr qp_attr;
483 struct ipoib_tx_buf *tx_req;
487 * Move our QP to the error state and then reinitialize in
488 * when all work requests have completed or have been flushed.
490 qp_attr.qp_state = IB_QPS_ERR;
491 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
492 ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
494 /* Wait for all sends and receives to complete */
497 while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
498 if (time_after(jiffies, begin + 5 * HZ)) {
499 ipoib_warn(priv, "timing out; %d sends %d receives not completed\n",
500 priv->tx_head - priv->tx_tail, recvs_pending(dev));
503 * assume the HW is wedged and just free up
504 * all our pending work requests.
506 while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
507 tx_req = &priv->tx_ring[priv->tx_tail &
508 (IPOIB_TX_RING_SIZE - 1)];
509 dma_unmap_single(priv->ca->dma_device,
510 pci_unmap_addr(tx_req, mapping),
513 dev_kfree_skb_any(tx_req->skb);
517 for (i = 0; i < IPOIB_RX_RING_SIZE; ++i)
518 if (priv->rx_ring[i].skb) {
519 dma_unmap_single(priv->ca->dma_device,
520 pci_unmap_addr(&priv->rx_ring[i],
524 dev_kfree_skb_any(priv->rx_ring[i].skb);
525 priv->rx_ring[i].skb = NULL;
534 ipoib_dbg(priv, "All sends and receives done.\n");
537 qp_attr.qp_state = IB_QPS_RESET;
538 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
539 ipoib_warn(priv, "Failed to modify QP to RESET state\n");
541 /* Wait for all AHs to be reaped */
542 set_bit(IPOIB_STOP_REAPER, &priv->flags);
543 cancel_delayed_work(&priv->ah_reap_task);
544 flush_workqueue(ipoib_workqueue);
548 while (!list_empty(&priv->dead_ahs)) {
549 __ipoib_reap_ah(dev);
551 if (time_after(jiffies, begin + HZ)) {
552 ipoib_warn(priv, "timing out; will leak address handles\n");
562 int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
564 struct ipoib_dev_priv *priv = netdev_priv(dev);
570 if (ipoib_transport_dev_init(dev, ca)) {
571 printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name);
575 if (dev->flags & IFF_UP) {
576 if (ipoib_ib_dev_open(dev)) {
577 ipoib_transport_dev_cleanup(dev);
585 void ipoib_ib_dev_flush(void *_dev)
587 struct net_device *dev = (struct net_device *)_dev;
588 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv;
590 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
593 ipoib_dbg(priv, "flushing\n");
595 ipoib_ib_dev_down(dev, 0);
598 * The device could have been brought down between the start and when
599 * we get here, don't bring it back up if it's not configured up
601 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
602 ipoib_ib_dev_up(dev);
604 mutex_lock(&priv->vlan_mutex);
606 /* Flush any child interfaces too */
607 list_for_each_entry(cpriv, &priv->child_intfs, list)
608 ipoib_ib_dev_flush(&cpriv->dev);
610 mutex_unlock(&priv->vlan_mutex);
613 void ipoib_ib_dev_cleanup(struct net_device *dev)
615 struct ipoib_dev_priv *priv = netdev_priv(dev);
617 ipoib_dbg(priv, "cleaning up ib_dev\n");
619 ipoib_mcast_stop_thread(dev, 1);
620 ipoib_mcast_dev_flush(dev);
622 ipoib_transport_dev_cleanup(dev);
626 * Delayed P_Key Assigment Interim Support
628 * The following is initial implementation of delayed P_Key assigment
629 * mechanism. It is using the same approach implemented for the multicast
630 * group join. The single goal of this implementation is to quickly address
631 * Bug #2507. This implementation will probably be removed when the P_Key
632 * change async notification is available.
635 static void ipoib_pkey_dev_check_presence(struct net_device *dev)
637 struct ipoib_dev_priv *priv = netdev_priv(dev);
640 if (ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &pkey_index))
641 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
643 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
646 void ipoib_pkey_poll(void *dev_ptr)
648 struct net_device *dev = dev_ptr;
649 struct ipoib_dev_priv *priv = netdev_priv(dev);
651 ipoib_pkey_dev_check_presence(dev);
653 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
656 mutex_lock(&pkey_mutex);
657 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
658 queue_delayed_work(ipoib_workqueue,
661 mutex_unlock(&pkey_mutex);
665 int ipoib_pkey_dev_delay_open(struct net_device *dev)
667 struct ipoib_dev_priv *priv = netdev_priv(dev);
669 /* Look for the interface pkey value in the IB Port P_Key table and */
670 /* set the interface pkey assigment flag */
671 ipoib_pkey_dev_check_presence(dev);
673 /* P_Key value not assigned yet - start polling */
674 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
675 mutex_lock(&pkey_mutex);
676 clear_bit(IPOIB_PKEY_STOP, &priv->flags);
677 queue_delayed_work(ipoib_workqueue,
680 mutex_unlock(&pkey_mutex);