2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * $Id: ipoib_ib.c 1386 2004-12-27 16:23:17Z roland $
38 #include <linux/delay.h>
39 #include <linux/dma-mapping.h>
41 #include <rdma/ib_cache.h>
45 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
46 static int data_debug_level;
48 module_param(data_debug_level, int, 0644);
49 MODULE_PARM_DESC(data_debug_level,
50 "Enable data path debug tracing if > 0");
53 #define IPOIB_OP_RECV (1ul << 31)
55 static DEFINE_MUTEX(pkey_mutex);
57 struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
58 struct ib_pd *pd, struct ib_ah_attr *attr)
62 ah = kmalloc(sizeof *ah, GFP_KERNEL);
70 ah->ah = ib_create_ah(pd, attr);
75 ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
80 void ipoib_free_ah(struct kref *kref)
82 struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
83 struct ipoib_dev_priv *priv = netdev_priv(ah->dev);
87 spin_lock_irqsave(&priv->lock, flags);
88 list_add_tail(&ah->list, &priv->dead_ahs);
89 spin_unlock_irqrestore(&priv->lock, flags);
92 static int ipoib_ib_post_receive(struct net_device *dev, int id)
94 struct ipoib_dev_priv *priv = netdev_priv(dev);
96 struct ib_recv_wr param;
97 struct ib_recv_wr *bad_wr;
100 list.addr = priv->rx_ring[id].mapping;
101 list.length = IPOIB_BUF_SIZE;
102 list.lkey = priv->mr->lkey;
105 param.wr_id = id | IPOIB_OP_RECV;
106 param.sg_list = &list;
109 ret = ib_post_recv(priv->qp, ¶m, &bad_wr);
111 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
112 ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping,
113 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
114 dev_kfree_skb_any(priv->rx_ring[id].skb);
115 priv->rx_ring[id].skb = NULL;
121 static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
123 struct ipoib_dev_priv *priv = netdev_priv(dev);
127 skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4);
132 * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
133 * header. So we need 4 more bytes to get to 48 and align the
134 * IP header to a multiple of 16.
138 addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE,
140 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
141 dev_kfree_skb_any(skb);
145 priv->rx_ring[id].skb = skb;
146 priv->rx_ring[id].mapping = addr;
151 static int ipoib_ib_post_receives(struct net_device *dev)
153 struct ipoib_dev_priv *priv = netdev_priv(dev);
156 for (i = 0; i < ipoib_recvq_size; ++i) {
157 if (ipoib_alloc_rx_skb(dev, i)) {
158 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
161 if (ipoib_ib_post_receive(dev, i)) {
162 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
170 static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
172 struct ipoib_dev_priv *priv = netdev_priv(dev);
173 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
177 ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n",
178 wr_id, wc->opcode, wc->status);
180 if (unlikely(wr_id >= ipoib_recvq_size)) {
181 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
182 wr_id, ipoib_recvq_size);
186 skb = priv->rx_ring[wr_id].skb;
187 addr = priv->rx_ring[wr_id].mapping;
189 if (unlikely(wc->status != IB_WC_SUCCESS)) {
190 if (wc->status != IB_WC_WR_FLUSH_ERR)
191 ipoib_warn(priv, "failed recv event "
192 "(status=%d, wrid=%d vend_err %x)\n",
193 wc->status, wr_id, wc->vendor_err);
194 ib_dma_unmap_single(priv->ca, addr,
195 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
196 dev_kfree_skb_any(skb);
197 priv->rx_ring[wr_id].skb = NULL;
202 * If we can't allocate a new RX buffer, dump
203 * this packet and reuse the old buffer.
205 if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
206 ++priv->stats.rx_dropped;
210 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
211 wc->byte_len, wc->slid);
213 ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
215 skb_put(skb, wc->byte_len);
216 skb_pull(skb, IB_GRH_BYTES);
218 if (wc->slid != priv->local_lid ||
219 wc->src_qp != priv->qp->qp_num) {
220 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
221 skb->mac.raw = skb->data;
222 skb_pull(skb, IPOIB_ENCAP_LEN);
224 dev->last_rx = jiffies;
225 ++priv->stats.rx_packets;
226 priv->stats.rx_bytes += skb->len;
229 /* XXX get correct PACKET_ type here */
230 skb->pkt_type = PACKET_HOST;
233 ipoib_dbg_data(priv, "dropping loopback packet\n");
234 dev_kfree_skb_any(skb);
238 if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
239 ipoib_warn(priv, "ipoib_ib_post_receive failed "
240 "for buf %d\n", wr_id);
243 static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
245 struct ipoib_dev_priv *priv = netdev_priv(dev);
246 unsigned int wr_id = wc->wr_id;
247 struct ipoib_tx_buf *tx_req;
250 ipoib_dbg_data(priv, "send completion: id %d, op %d, status: %d\n",
251 wr_id, wc->opcode, wc->status);
253 if (unlikely(wr_id >= ipoib_sendq_size)) {
254 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
255 wr_id, ipoib_sendq_size);
259 tx_req = &priv->tx_ring[wr_id];
261 ib_dma_unmap_single(priv->ca, tx_req->mapping,
262 tx_req->skb->len, DMA_TO_DEVICE);
264 ++priv->stats.tx_packets;
265 priv->stats.tx_bytes += tx_req->skb->len;
267 dev_kfree_skb_any(tx_req->skb);
269 spin_lock_irqsave(&priv->tx_lock, flags);
271 if (netif_queue_stopped(dev) &&
272 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags) &&
273 priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1)
274 netif_wake_queue(dev);
275 spin_unlock_irqrestore(&priv->tx_lock, flags);
277 if (wc->status != IB_WC_SUCCESS &&
278 wc->status != IB_WC_WR_FLUSH_ERR)
279 ipoib_warn(priv, "failed send event "
280 "(status=%d, wrid=%d vend_err %x)\n",
281 wc->status, wr_id, wc->vendor_err);
284 static void ipoib_ib_handle_wc(struct net_device *dev, struct ib_wc *wc)
286 if (wc->wr_id & IPOIB_OP_RECV)
287 ipoib_ib_handle_rx_wc(dev, wc);
289 ipoib_ib_handle_tx_wc(dev, wc);
292 void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
294 struct net_device *dev = (struct net_device *) dev_ptr;
295 struct ipoib_dev_priv *priv = netdev_priv(dev);
298 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
300 n = ib_poll_cq(cq, IPOIB_NUM_WC, priv->ibwc);
301 for (i = 0; i < n; ++i)
302 ipoib_ib_handle_wc(dev, priv->ibwc + i);
303 } while (n == IPOIB_NUM_WC);
306 static inline int post_send(struct ipoib_dev_priv *priv,
308 struct ib_ah *address, u32 qpn,
311 struct ib_send_wr *bad_wr;
313 priv->tx_sge.addr = addr;
314 priv->tx_sge.length = len;
316 priv->tx_wr.wr_id = wr_id;
317 priv->tx_wr.wr.ud.remote_qpn = qpn;
318 priv->tx_wr.wr.ud.ah = address;
320 return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
323 void ipoib_send(struct net_device *dev, struct sk_buff *skb,
324 struct ipoib_ah *address, u32 qpn)
326 struct ipoib_dev_priv *priv = netdev_priv(dev);
327 struct ipoib_tx_buf *tx_req;
330 if (unlikely(skb->len > dev->mtu + INFINIBAND_ALEN)) {
331 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
332 skb->len, dev->mtu + INFINIBAND_ALEN);
333 ++priv->stats.tx_dropped;
334 ++priv->stats.tx_errors;
335 dev_kfree_skb_any(skb);
339 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
340 skb->len, address, qpn);
343 * We put the skb into the tx_ring _before_ we call post_send()
344 * because it's entirely possible that the completion handler will
345 * run before we execute anything after the post_send(). That
346 * means we have to make sure everything is properly recorded and
347 * our state is consistent before we call post_send().
349 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
351 addr = ib_dma_map_single(priv->ca, skb->data, skb->len,
353 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
354 ++priv->stats.tx_errors;
355 dev_kfree_skb_any(skb);
358 tx_req->mapping = addr;
360 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
361 address->ah, qpn, addr, skb->len))) {
362 ipoib_warn(priv, "post_send failed\n");
363 ++priv->stats.tx_errors;
364 ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
365 dev_kfree_skb_any(skb);
367 dev->trans_start = jiffies;
369 address->last_send = priv->tx_head;
372 if (priv->tx_head - priv->tx_tail == ipoib_sendq_size) {
373 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
374 netif_stop_queue(dev);
379 static void __ipoib_reap_ah(struct net_device *dev)
381 struct ipoib_dev_priv *priv = netdev_priv(dev);
382 struct ipoib_ah *ah, *tah;
383 LIST_HEAD(remove_list);
385 spin_lock_irq(&priv->tx_lock);
386 spin_lock(&priv->lock);
387 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
388 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
390 ib_destroy_ah(ah->ah);
393 spin_unlock(&priv->lock);
394 spin_unlock_irq(&priv->tx_lock);
397 void ipoib_reap_ah(struct work_struct *work)
399 struct ipoib_dev_priv *priv =
400 container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
401 struct net_device *dev = priv->dev;
403 __ipoib_reap_ah(dev);
405 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
406 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, HZ);
409 int ipoib_ib_dev_open(struct net_device *dev)
411 struct ipoib_dev_priv *priv = netdev_priv(dev);
414 ret = ipoib_init_qp(dev);
416 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
420 ret = ipoib_ib_post_receives(dev);
422 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
423 ipoib_ib_dev_stop(dev);
427 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
428 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, HZ);
430 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
435 static void ipoib_pkey_dev_check_presence(struct net_device *dev)
437 struct ipoib_dev_priv *priv = netdev_priv(dev);
440 if (ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &pkey_index))
441 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
443 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
446 int ipoib_ib_dev_up(struct net_device *dev)
448 struct ipoib_dev_priv *priv = netdev_priv(dev);
450 ipoib_pkey_dev_check_presence(dev);
452 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
453 ipoib_dbg(priv, "PKEY is not assigned.\n");
457 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
459 return ipoib_mcast_start_thread(dev);
462 int ipoib_ib_dev_down(struct net_device *dev, int flush)
464 struct ipoib_dev_priv *priv = netdev_priv(dev);
466 ipoib_dbg(priv, "downing ib_dev\n");
468 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
469 netif_carrier_off(dev);
471 /* Shutdown the P_Key thread if still active */
472 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
473 mutex_lock(&pkey_mutex);
474 set_bit(IPOIB_PKEY_STOP, &priv->flags);
475 cancel_delayed_work(&priv->pkey_task);
476 mutex_unlock(&pkey_mutex);
478 flush_workqueue(ipoib_workqueue);
481 ipoib_mcast_stop_thread(dev, flush);
482 ipoib_mcast_dev_flush(dev);
484 ipoib_flush_paths(dev);
489 static int recvs_pending(struct net_device *dev)
491 struct ipoib_dev_priv *priv = netdev_priv(dev);
495 for (i = 0; i < ipoib_recvq_size; ++i)
496 if (priv->rx_ring[i].skb)
502 int ipoib_ib_dev_stop(struct net_device *dev)
504 struct ipoib_dev_priv *priv = netdev_priv(dev);
505 struct ib_qp_attr qp_attr;
507 struct ipoib_tx_buf *tx_req;
510 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
513 * Move our QP to the error state and then reinitialize in
514 * when all work requests have completed or have been flushed.
516 qp_attr.qp_state = IB_QPS_ERR;
517 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
518 ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
520 /* Wait for all sends and receives to complete */
523 while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
524 if (time_after(jiffies, begin + 5 * HZ)) {
525 ipoib_warn(priv, "timing out; %d sends %d receives not completed\n",
526 priv->tx_head - priv->tx_tail, recvs_pending(dev));
529 * assume the HW is wedged and just free up
530 * all our pending work requests.
532 while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
533 tx_req = &priv->tx_ring[priv->tx_tail &
534 (ipoib_sendq_size - 1)];
535 ib_dma_unmap_single(priv->ca,
539 dev_kfree_skb_any(tx_req->skb);
543 for (i = 0; i < ipoib_recvq_size; ++i) {
544 struct ipoib_rx_buf *rx_req;
546 rx_req = &priv->rx_ring[i];
549 ib_dma_unmap_single(priv->ca,
553 dev_kfree_skb_any(rx_req->skb);
563 ipoib_dbg(priv, "All sends and receives done.\n");
566 qp_attr.qp_state = IB_QPS_RESET;
567 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
568 ipoib_warn(priv, "Failed to modify QP to RESET state\n");
570 /* Wait for all AHs to be reaped */
571 set_bit(IPOIB_STOP_REAPER, &priv->flags);
572 cancel_delayed_work(&priv->ah_reap_task);
573 flush_workqueue(ipoib_workqueue);
577 while (!list_empty(&priv->dead_ahs)) {
578 __ipoib_reap_ah(dev);
580 if (time_after(jiffies, begin + HZ)) {
581 ipoib_warn(priv, "timing out; will leak address handles\n");
591 int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
593 struct ipoib_dev_priv *priv = netdev_priv(dev);
599 if (ipoib_transport_dev_init(dev, ca)) {
600 printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name);
604 if (dev->flags & IFF_UP) {
605 if (ipoib_ib_dev_open(dev)) {
606 ipoib_transport_dev_cleanup(dev);
614 void ipoib_ib_dev_flush(struct work_struct *work)
616 struct ipoib_dev_priv *cpriv, *priv =
617 container_of(work, struct ipoib_dev_priv, flush_task);
618 struct net_device *dev = priv->dev;
620 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) {
621 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
625 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
626 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
630 ipoib_dbg(priv, "flushing\n");
632 ipoib_ib_dev_down(dev, 0);
635 * The device could have been brought down between the start and when
636 * we get here, don't bring it back up if it's not configured up
638 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
639 ipoib_ib_dev_up(dev);
640 ipoib_mcast_restart_task(&priv->restart_task);
643 mutex_lock(&priv->vlan_mutex);
645 /* Flush any child interfaces too */
646 list_for_each_entry(cpriv, &priv->child_intfs, list)
647 ipoib_ib_dev_flush(&cpriv->flush_task);
649 mutex_unlock(&priv->vlan_mutex);
652 void ipoib_ib_dev_cleanup(struct net_device *dev)
654 struct ipoib_dev_priv *priv = netdev_priv(dev);
656 ipoib_dbg(priv, "cleaning up ib_dev\n");
658 ipoib_mcast_stop_thread(dev, 1);
659 ipoib_mcast_dev_flush(dev);
661 ipoib_transport_dev_cleanup(dev);
665 * Delayed P_Key Assigment Interim Support
667 * The following is initial implementation of delayed P_Key assigment
668 * mechanism. It is using the same approach implemented for the multicast
669 * group join. The single goal of this implementation is to quickly address
670 * Bug #2507. This implementation will probably be removed when the P_Key
671 * change async notification is available.
674 void ipoib_pkey_poll(struct work_struct *work)
676 struct ipoib_dev_priv *priv =
677 container_of(work, struct ipoib_dev_priv, pkey_task.work);
678 struct net_device *dev = priv->dev;
680 ipoib_pkey_dev_check_presence(dev);
682 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
685 mutex_lock(&pkey_mutex);
686 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
687 queue_delayed_work(ipoib_workqueue,
690 mutex_unlock(&pkey_mutex);
694 int ipoib_pkey_dev_delay_open(struct net_device *dev)
696 struct ipoib_dev_priv *priv = netdev_priv(dev);
698 /* Look for the interface pkey value in the IB Port P_Key table and */
699 /* set the interface pkey assigment flag */
700 ipoib_pkey_dev_check_presence(dev);
702 /* P_Key value not assigned yet - start polling */
703 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
704 mutex_lock(&pkey_mutex);
705 clear_bit(IPOIB_PKEY_STOP, &priv->flags);
706 queue_delayed_work(ipoib_workqueue,
709 mutex_unlock(&pkey_mutex);