2 * Copyright (c) 2006 Mellanox Technologies. All rights reserved
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <rdma/ib_cm.h>
36 #include <rdma/ib_cache.h>
39 #include <linux/icmpv6.h>
40 #include <linux/delay.h>
42 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
43 static int data_debug_level;
45 module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
46 MODULE_PARM_DESC(cm_data_debug_level,
47 "Enable data path debug tracing for connected mode if > 0");
52 #define IPOIB_CM_IETF_ID 0x1000000000000000ULL
54 #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
55 #define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ)
56 #define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
57 #define IPOIB_CM_RX_UPDATE_MASK (0x3)
66 static struct ib_qp_attr ipoib_cm_err_attr = {
67 .qp_state = IB_QPS_ERR
70 #define IPOIB_CM_RX_DRAIN_WRID 0x7fffffff
72 static struct ib_recv_wr ipoib_cm_rx_drain_wr = {
73 .wr_id = IPOIB_CM_RX_DRAIN_WRID
76 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
77 struct ib_cm_event *event);
79 static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
80 u64 mapping[IPOIB_CM_RX_SG])
84 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
86 for (i = 0; i < frags; ++i)
87 ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
90 static int ipoib_cm_post_receive(struct net_device *dev, int id)
92 struct ipoib_dev_priv *priv = netdev_priv(dev);
93 struct ib_recv_wr *bad_wr;
96 priv->cm.rx_wr.wr_id = id | IPOIB_CM_OP_SRQ;
98 for (i = 0; i < IPOIB_CM_RX_SG; ++i)
99 priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
101 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
103 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
104 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
105 priv->cm.srq_ring[id].mapping);
106 dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
107 priv->cm.srq_ring[id].skb = NULL;
113 static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, int frags,
114 u64 mapping[IPOIB_CM_RX_SG])
116 struct ipoib_dev_priv *priv = netdev_priv(dev);
120 skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
125 * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
126 * IP header to a multiple of 16.
128 skb_reserve(skb, 12);
130 mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
132 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
133 dev_kfree_skb_any(skb);
137 for (i = 0; i < frags; i++) {
138 struct page *page = alloc_page(GFP_ATOMIC);
142 skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
144 mapping[i + 1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[i].page,
145 0, PAGE_SIZE, DMA_FROM_DEVICE);
146 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
150 priv->cm.srq_ring[id].skb = skb;
155 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
158 ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
160 dev_kfree_skb_any(skb);
164 static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv* priv)
166 struct ib_recv_wr *bad_wr;
168 /* rx_drain_qp send queue depth is 1, so
169 * make sure we have at most 1 outstanding WR. */
170 if (list_empty(&priv->cm.rx_flush_list) ||
171 !list_empty(&priv->cm.rx_drain_list))
174 if (ib_post_recv(priv->cm.rx_drain_qp, &ipoib_cm_rx_drain_wr, &bad_wr))
175 ipoib_warn(priv, "failed to post rx_drain wr\n");
177 list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
180 static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
182 struct ipoib_cm_rx *p = ctx;
183 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
186 if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
189 spin_lock_irqsave(&priv->lock, flags);
190 list_move(&p->list, &priv->cm.rx_flush_list);
191 p->state = IPOIB_CM_RX_FLUSH;
192 ipoib_cm_start_rx_drain(priv);
193 spin_unlock_irqrestore(&priv->lock, flags);
196 static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
197 struct ipoib_cm_rx *p)
199 struct ipoib_dev_priv *priv = netdev_priv(dev);
200 struct ib_qp_init_attr attr = {
201 .event_handler = ipoib_cm_rx_event_handler,
202 .send_cq = priv->cq, /* does not matter, we never send anything */
205 .cap.max_send_wr = 1, /* FIXME: 0 Seems not to work */
206 .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
207 .sq_sig_type = IB_SIGNAL_ALL_WR,
208 .qp_type = IB_QPT_RC,
211 return ib_create_qp(priv->pd, &attr);
214 static int ipoib_cm_modify_rx_qp(struct net_device *dev,
215 struct ib_cm_id *cm_id, struct ib_qp *qp,
218 struct ipoib_dev_priv *priv = netdev_priv(dev);
219 struct ib_qp_attr qp_attr;
220 int qp_attr_mask, ret;
222 qp_attr.qp_state = IB_QPS_INIT;
223 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
225 ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
228 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
230 ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
233 qp_attr.qp_state = IB_QPS_RTR;
234 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
236 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
239 qp_attr.rq_psn = psn;
240 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
242 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
248 static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
249 struct ib_qp *qp, struct ib_cm_req_event_param *req,
252 struct ipoib_dev_priv *priv = netdev_priv(dev);
253 struct ipoib_cm_data data = {};
254 struct ib_cm_rep_param rep = {};
256 data.qpn = cpu_to_be32(priv->qp->qp_num);
257 data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
259 rep.private_data = &data;
260 rep.private_data_len = sizeof data;
261 rep.flow_control = 0;
262 rep.rnr_retry_count = req->rnr_retry_count;
263 rep.target_ack_delay = 20; /* FIXME */
265 rep.qp_num = qp->qp_num;
266 rep.starting_psn = psn;
267 return ib_send_cm_rep(cm_id, &rep);
270 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
272 struct net_device *dev = cm_id->context;
273 struct ipoib_dev_priv *priv = netdev_priv(dev);
274 struct ipoib_cm_rx *p;
278 ipoib_dbg(priv, "REQ arrived\n");
279 p = kzalloc(sizeof *p, GFP_KERNEL);
284 p->qp = ipoib_cm_create_rx_qp(dev, p);
286 ret = PTR_ERR(p->qp);
290 psn = random32() & 0xffffff;
291 ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
295 ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
297 ipoib_warn(priv, "failed to send REP: %d\n", ret);
302 p->jiffies = jiffies;
303 p->state = IPOIB_CM_RX_LIVE;
304 spin_lock_irq(&priv->lock);
305 if (list_empty(&priv->cm.passive_ids))
306 queue_delayed_work(ipoib_workqueue,
307 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
308 list_add(&p->list, &priv->cm.passive_ids);
309 spin_unlock_irq(&priv->lock);
314 ib_destroy_qp(p->qp);
320 static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
321 struct ib_cm_event *event)
323 struct ipoib_cm_rx *p;
324 struct ipoib_dev_priv *priv;
326 switch (event->event) {
327 case IB_CM_REQ_RECEIVED:
328 return ipoib_cm_req_handler(cm_id, event);
329 case IB_CM_DREQ_RECEIVED:
331 ib_send_cm_drep(cm_id, NULL, 0);
333 case IB_CM_REJ_RECEIVED:
335 priv = netdev_priv(p->dev);
336 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
337 ipoib_warn(priv, "unable to move qp to error state\n");
343 /* Adjust length of skb with fragments to match received data */
344 static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
345 unsigned int length, struct sk_buff *toskb)
350 /* put header into skb */
351 size = min(length, hdr_space);
356 num_frags = skb_shinfo(skb)->nr_frags;
357 for (i = 0; i < num_frags; i++) {
358 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
361 /* don't need this page */
362 skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE);
363 --skb_shinfo(skb)->nr_frags;
365 size = min(length, (unsigned) PAGE_SIZE);
368 skb->data_len += size;
369 skb->truesize += size;
376 void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
378 struct ipoib_dev_priv *priv = netdev_priv(dev);
379 unsigned int wr_id = wc->wr_id & ~IPOIB_CM_OP_SRQ;
380 struct sk_buff *skb, *newskb;
381 struct ipoib_cm_rx *p;
383 u64 mapping[IPOIB_CM_RX_SG];
386 ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
389 if (unlikely(wr_id >= ipoib_recvq_size)) {
390 if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~IPOIB_CM_OP_SRQ)) {
391 spin_lock_irqsave(&priv->lock, flags);
392 list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
393 ipoib_cm_start_rx_drain(priv);
394 queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
395 spin_unlock_irqrestore(&priv->lock, flags);
397 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
398 wr_id, ipoib_recvq_size);
402 skb = priv->cm.srq_ring[wr_id].skb;
404 if (unlikely(wc->status != IB_WC_SUCCESS)) {
405 ipoib_dbg(priv, "cm recv error "
406 "(status=%d, wrid=%d vend_err %x)\n",
407 wc->status, wr_id, wc->vendor_err);
408 ++priv->stats.rx_dropped;
412 if (!likely(wr_id & IPOIB_CM_RX_UPDATE_MASK)) {
413 p = wc->qp->qp_context;
414 if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
415 spin_lock_irqsave(&priv->lock, flags);
416 p->jiffies = jiffies;
417 /* Move this entry to list head, but do not re-add it
418 * if it has been moved out of list. */
419 if (p->state == IPOIB_CM_RX_LIVE)
420 list_move(&p->list, &priv->cm.passive_ids);
421 spin_unlock_irqrestore(&priv->lock, flags);
425 frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
426 (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
428 newskb = ipoib_cm_alloc_rx_skb(dev, wr_id, frags, mapping);
429 if (unlikely(!newskb)) {
431 * If we can't allocate a new RX buffer, dump
432 * this packet and reuse the old buffer.
434 ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
435 ++priv->stats.rx_dropped;
439 ipoib_cm_dma_unmap_rx(priv, frags, priv->cm.srq_ring[wr_id].mapping);
440 memcpy(priv->cm.srq_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
442 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
443 wc->byte_len, wc->slid);
445 skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
447 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
448 skb_reset_mac_header(skb);
449 skb_pull(skb, IPOIB_ENCAP_LEN);
451 dev->last_rx = jiffies;
452 ++priv->stats.rx_packets;
453 priv->stats.rx_bytes += skb->len;
456 /* XXX get correct PACKET_ type here */
457 skb->pkt_type = PACKET_HOST;
458 netif_receive_skb(skb);
461 if (unlikely(ipoib_cm_post_receive(dev, wr_id)))
462 ipoib_warn(priv, "ipoib_cm_post_receive failed "
463 "for buf %d\n", wr_id);
466 static inline int post_send(struct ipoib_dev_priv *priv,
467 struct ipoib_cm_tx *tx,
471 struct ib_send_wr *bad_wr;
473 priv->tx_sge.addr = addr;
474 priv->tx_sge.length = len;
476 priv->tx_wr.wr_id = wr_id;
478 return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
481 void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
483 struct ipoib_dev_priv *priv = netdev_priv(dev);
484 struct ipoib_tx_buf *tx_req;
487 if (unlikely(skb->len > tx->mtu)) {
488 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
490 ++priv->stats.tx_dropped;
491 ++priv->stats.tx_errors;
492 ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
496 ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
497 tx->tx_head, skb->len, tx->qp->qp_num);
500 * We put the skb into the tx_ring _before_ we call post_send()
501 * because it's entirely possible that the completion handler will
502 * run before we execute anything after the post_send(). That
503 * means we have to make sure everything is properly recorded and
504 * our state is consistent before we call post_send().
506 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
508 addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
509 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
510 ++priv->stats.tx_errors;
511 dev_kfree_skb_any(skb);
515 tx_req->mapping = addr;
517 if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
519 ipoib_warn(priv, "post_send failed\n");
520 ++priv->stats.tx_errors;
521 ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
522 dev_kfree_skb_any(skb);
524 dev->trans_start = jiffies;
527 if (tx->tx_head - tx->tx_tail == ipoib_sendq_size) {
528 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
530 netif_stop_queue(dev);
531 set_bit(IPOIB_FLAG_NETIF_STOPPED, &tx->flags);
536 static void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ipoib_cm_tx *tx,
539 struct ipoib_dev_priv *priv = netdev_priv(dev);
540 unsigned int wr_id = wc->wr_id;
541 struct ipoib_tx_buf *tx_req;
544 ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
547 if (unlikely(wr_id >= ipoib_sendq_size)) {
548 ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
549 wr_id, ipoib_sendq_size);
553 tx_req = &tx->tx_ring[wr_id];
555 ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
557 /* FIXME: is this right? Shouldn't we only increment on success? */
558 ++priv->stats.tx_packets;
559 priv->stats.tx_bytes += tx_req->skb->len;
561 dev_kfree_skb_any(tx_req->skb);
563 spin_lock_irqsave(&priv->tx_lock, flags);
565 if (unlikely(test_bit(IPOIB_FLAG_NETIF_STOPPED, &tx->flags)) &&
566 tx->tx_head - tx->tx_tail <= ipoib_sendq_size >> 1) {
567 clear_bit(IPOIB_FLAG_NETIF_STOPPED, &tx->flags);
568 netif_wake_queue(dev);
571 if (wc->status != IB_WC_SUCCESS &&
572 wc->status != IB_WC_WR_FLUSH_ERR) {
573 struct ipoib_neigh *neigh;
575 ipoib_dbg(priv, "failed cm send event "
576 "(status=%d, wrid=%d vend_err %x)\n",
577 wc->status, wr_id, wc->vendor_err);
579 spin_lock(&priv->lock);
584 list_del(&neigh->list);
586 ipoib_put_ah(neigh->ah);
587 ipoib_neigh_free(dev, neigh);
592 /* queue would be re-started anyway when TX is destroyed,
593 * but it makes sense to do it ASAP here. */
594 if (test_and_clear_bit(IPOIB_FLAG_NETIF_STOPPED, &tx->flags))
595 netif_wake_queue(dev);
597 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
598 list_move(&tx->list, &priv->cm.reap_list);
599 queue_work(ipoib_workqueue, &priv->cm.reap_task);
602 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
604 spin_unlock(&priv->lock);
607 spin_unlock_irqrestore(&priv->tx_lock, flags);
610 static void ipoib_cm_tx_completion(struct ib_cq *cq, void *tx_ptr)
612 struct ipoib_cm_tx *tx = tx_ptr;
615 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
617 n = ib_poll_cq(cq, IPOIB_NUM_WC, tx->ibwc);
618 for (i = 0; i < n; ++i)
619 ipoib_cm_handle_tx_wc(tx->dev, tx, tx->ibwc + i);
620 } while (n == IPOIB_NUM_WC);
623 int ipoib_cm_dev_open(struct net_device *dev)
625 struct ipoib_dev_priv *priv = netdev_priv(dev);
626 struct ib_qp_init_attr qp_init_attr = {
627 .send_cq = priv->cq, /* does not matter, we never send anything */
629 .cap.max_send_wr = 1, /* FIXME: 0 Seems not to work */
630 .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
631 .cap.max_recv_wr = 1,
632 .cap.max_recv_sge = 1, /* FIXME: 0 Seems not to work */
633 .sq_sig_type = IB_SIGNAL_ALL_WR,
634 .qp_type = IB_QPT_UC,
638 if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
641 priv->cm.rx_drain_qp = ib_create_qp(priv->pd, &qp_init_attr);
642 if (IS_ERR(priv->cm.rx_drain_qp)) {
643 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
644 ret = PTR_ERR(priv->cm.rx_drain_qp);
649 * We put the QP in error state directly. This way, a "flush
650 * error" WC will be immediately generated for each WR we post.
652 ret = ib_modify_qp(priv->cm.rx_drain_qp, &ipoib_cm_err_attr, IB_QP_STATE);
654 ipoib_warn(priv, "failed to modify drain QP to error: %d\n", ret);
658 priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
659 if (IS_ERR(priv->cm.id)) {
660 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
661 ret = PTR_ERR(priv->cm.id);
665 ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
668 printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
669 IPOIB_CM_IETF_ID | priv->qp->qp_num);
676 ib_destroy_cm_id(priv->cm.id);
680 ib_destroy_qp(priv->cm.rx_drain_qp);
684 void ipoib_cm_dev_stop(struct net_device *dev)
686 struct ipoib_dev_priv *priv = netdev_priv(dev);
687 struct ipoib_cm_rx *p, *n;
692 if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id)
695 ib_destroy_cm_id(priv->cm.id);
698 spin_lock_irq(&priv->lock);
699 while (!list_empty(&priv->cm.passive_ids)) {
700 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
701 list_move(&p->list, &priv->cm.rx_error_list);
702 p->state = IPOIB_CM_RX_ERROR;
703 spin_unlock_irq(&priv->lock);
704 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
706 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
707 spin_lock_irq(&priv->lock);
710 /* Wait for all RX to be drained */
713 while (!list_empty(&priv->cm.rx_error_list) ||
714 !list_empty(&priv->cm.rx_flush_list) ||
715 !list_empty(&priv->cm.rx_drain_list)) {
716 if (!time_after(jiffies, begin + 5 * HZ)) {
717 ipoib_warn(priv, "RX drain timing out\n");
720 * assume the HW is wedged and just free up everything.
722 list_splice_init(&priv->cm.rx_flush_list, &list);
723 list_splice_init(&priv->cm.rx_error_list, &list);
724 list_splice_init(&priv->cm.rx_drain_list, &list);
727 spin_unlock_irq(&priv->lock);
729 spin_lock_irq(&priv->lock);
732 list_splice_init(&priv->cm.rx_reap_list, &list);
734 spin_unlock_irq(&priv->lock);
736 list_for_each_entry_safe(p, n, &list, list) {
737 ib_destroy_cm_id(p->id);
738 ib_destroy_qp(p->qp);
742 ib_destroy_qp(priv->cm.rx_drain_qp);
743 cancel_delayed_work(&priv->cm.stale_task);
746 static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
748 struct ipoib_cm_tx *p = cm_id->context;
749 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
750 struct ipoib_cm_data *data = event->private_data;
751 struct sk_buff_head skqueue;
752 struct ib_qp_attr qp_attr;
753 int qp_attr_mask, ret;
756 p->mtu = be32_to_cpu(data->mtu);
758 if (p->mtu < priv->dev->mtu + IPOIB_ENCAP_LEN) {
759 ipoib_warn(priv, "Rejecting connection: mtu %d < device mtu %d + 4\n",
760 p->mtu, priv->dev->mtu);
764 qp_attr.qp_state = IB_QPS_RTR;
765 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
767 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
771 qp_attr.rq_psn = 0 /* FIXME */;
772 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
774 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
778 qp_attr.qp_state = IB_QPS_RTS;
779 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
781 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
784 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
786 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
790 skb_queue_head_init(&skqueue);
792 spin_lock_irq(&priv->lock);
793 set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
795 while ((skb = __skb_dequeue(&p->neigh->queue)))
796 __skb_queue_tail(&skqueue, skb);
797 spin_unlock_irq(&priv->lock);
799 while ((skb = __skb_dequeue(&skqueue))) {
801 if (dev_queue_xmit(skb))
802 ipoib_warn(priv, "dev_queue_xmit failed "
803 "to requeue packet\n");
806 ret = ib_send_cm_rtu(cm_id, NULL, 0);
808 ipoib_warn(priv, "failed to send RTU: %d\n", ret);
814 static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ib_cq *cq)
816 struct ipoib_dev_priv *priv = netdev_priv(dev);
817 struct ib_qp_init_attr attr = {};
818 attr.recv_cq = priv->cq;
819 attr.srq = priv->cm.srq;
820 attr.cap.max_send_wr = ipoib_sendq_size;
821 attr.cap.max_send_sge = 1;
822 attr.sq_sig_type = IB_SIGNAL_ALL_WR;
823 attr.qp_type = IB_QPT_RC;
825 return ib_create_qp(priv->pd, &attr);
828 static int ipoib_cm_send_req(struct net_device *dev,
829 struct ib_cm_id *id, struct ib_qp *qp,
831 struct ib_sa_path_rec *pathrec)
833 struct ipoib_dev_priv *priv = netdev_priv(dev);
834 struct ipoib_cm_data data = {};
835 struct ib_cm_req_param req = {};
837 data.qpn = cpu_to_be32(priv->qp->qp_num);
838 data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
840 req.primary_path = pathrec;
841 req.alternate_path = NULL;
842 req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
843 req.qp_num = qp->qp_num;
844 req.qp_type = qp->qp_type;
845 req.private_data = &data;
846 req.private_data_len = sizeof data;
847 req.flow_control = 0;
849 req.starting_psn = 0; /* FIXME */
852 * Pick some arbitrary defaults here; we could make these
853 * module parameters if anyone cared about setting them.
855 req.responder_resources = 4;
856 req.remote_cm_response_timeout = 20;
857 req.local_cm_response_timeout = 20;
858 req.retry_count = 0; /* RFC draft warns against retries */
859 req.rnr_retry_count = 0; /* RFC draft warns against retries */
860 req.max_cm_retries = 15;
862 return ib_send_cm_req(id, &req);
865 static int ipoib_cm_modify_tx_init(struct net_device *dev,
866 struct ib_cm_id *cm_id, struct ib_qp *qp)
868 struct ipoib_dev_priv *priv = netdev_priv(dev);
869 struct ib_qp_attr qp_attr;
870 int qp_attr_mask, ret;
871 ret = ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
873 ipoib_warn(priv, "pkey 0x%x not in cache: %d\n", priv->pkey, ret);
877 qp_attr.qp_state = IB_QPS_INIT;
878 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
879 qp_attr.port_num = priv->port;
880 qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
882 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
884 ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
890 static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
891 struct ib_sa_path_rec *pathrec)
893 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
896 p->tx_ring = kzalloc(ipoib_sendq_size * sizeof *p->tx_ring,
899 ipoib_warn(priv, "failed to allocate tx ring\n");
904 p->cq = ib_create_cq(priv->ca, ipoib_cm_tx_completion, NULL, p,
905 ipoib_sendq_size + 1, 0);
907 ret = PTR_ERR(p->cq);
908 ipoib_warn(priv, "failed to allocate tx cq: %d\n", ret);
912 ret = ib_req_notify_cq(p->cq, IB_CQ_NEXT_COMP);
914 ipoib_warn(priv, "failed to request completion notification: %d\n", ret);
918 p->qp = ipoib_cm_create_tx_qp(p->dev, p->cq);
920 ret = PTR_ERR(p->qp);
921 ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret);
925 p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
927 ret = PTR_ERR(p->id);
928 ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
932 ret = ipoib_cm_modify_tx_init(p->dev, p->id, p->qp);
934 ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
938 ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec);
940 ipoib_warn(priv, "failed to send cm req: %d\n", ret);
944 ipoib_dbg(priv, "Request connection 0x%x for gid " IPOIB_GID_FMT " qpn 0x%x\n",
945 p->qp->qp_num, IPOIB_GID_ARG(pathrec->dgid), qpn);
951 ib_destroy_cm_id(p->id);
954 ib_destroy_qp(p->qp);
958 ib_destroy_cq(p->cq);
965 static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
967 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
968 struct ipoib_tx_buf *tx_req;
970 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
971 p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
974 ib_destroy_cm_id(p->id);
977 ib_destroy_qp(p->qp);
980 ib_destroy_cq(p->cq);
982 if (test_bit(IPOIB_FLAG_NETIF_STOPPED, &p->flags))
983 netif_wake_queue(p->dev);
986 while ((int) p->tx_tail - (int) p->tx_head < 0) {
987 tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
988 ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len,
990 dev_kfree_skb_any(tx_req->skb);
1000 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1001 struct ib_cm_event *event)
1003 struct ipoib_cm_tx *tx = cm_id->context;
1004 struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
1005 struct net_device *dev = priv->dev;
1006 struct ipoib_neigh *neigh;
1009 switch (event->event) {
1010 case IB_CM_DREQ_RECEIVED:
1011 ipoib_dbg(priv, "DREQ received.\n");
1012 ib_send_cm_drep(cm_id, NULL, 0);
1014 case IB_CM_REP_RECEIVED:
1015 ipoib_dbg(priv, "REP received.\n");
1016 ret = ipoib_cm_rep_handler(cm_id, event);
1018 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
1021 case IB_CM_REQ_ERROR:
1022 case IB_CM_REJ_RECEIVED:
1023 case IB_CM_TIMEWAIT_EXIT:
1024 ipoib_dbg(priv, "CM error %d.\n", event->event);
1025 spin_lock_irq(&priv->tx_lock);
1026 spin_lock(&priv->lock);
1031 list_del(&neigh->list);
1033 ipoib_put_ah(neigh->ah);
1034 ipoib_neigh_free(dev, neigh);
1039 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1040 list_move(&tx->list, &priv->cm.reap_list);
1041 queue_work(ipoib_workqueue, &priv->cm.reap_task);
1044 spin_unlock(&priv->lock);
1045 spin_unlock_irq(&priv->tx_lock);
1054 struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
1055 struct ipoib_neigh *neigh)
1057 struct ipoib_dev_priv *priv = netdev_priv(dev);
1058 struct ipoib_cm_tx *tx;
1060 tx = kzalloc(sizeof *tx, GFP_ATOMIC);
1068 list_add(&tx->list, &priv->cm.start_list);
1069 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
1070 queue_work(ipoib_workqueue, &priv->cm.start_task);
1074 void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
1076 struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
1077 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1078 list_move(&tx->list, &priv->cm.reap_list);
1079 queue_work(ipoib_workqueue, &priv->cm.reap_task);
1080 ipoib_dbg(priv, "Reap connection for gid " IPOIB_GID_FMT "\n",
1081 IPOIB_GID_ARG(tx->neigh->dgid));
1086 static void ipoib_cm_tx_start(struct work_struct *work)
1088 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1090 struct net_device *dev = priv->dev;
1091 struct ipoib_neigh *neigh;
1092 struct ipoib_cm_tx *p;
1093 unsigned long flags;
1096 struct ib_sa_path_rec pathrec;
1099 spin_lock_irqsave(&priv->tx_lock, flags);
1100 spin_lock(&priv->lock);
1101 while (!list_empty(&priv->cm.start_list)) {
1102 p = list_entry(priv->cm.start_list.next, typeof(*p), list);
1103 list_del_init(&p->list);
1105 qpn = IPOIB_QPN(neigh->neighbour->ha);
1106 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
1107 spin_unlock(&priv->lock);
1108 spin_unlock_irqrestore(&priv->tx_lock, flags);
1109 ret = ipoib_cm_tx_init(p, qpn, &pathrec);
1110 spin_lock_irqsave(&priv->tx_lock, flags);
1111 spin_lock(&priv->lock);
1116 list_del(&neigh->list);
1118 ipoib_put_ah(neigh->ah);
1119 ipoib_neigh_free(dev, neigh);
1125 spin_unlock(&priv->lock);
1126 spin_unlock_irqrestore(&priv->tx_lock, flags);
1129 static void ipoib_cm_tx_reap(struct work_struct *work)
1131 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1133 struct ipoib_cm_tx *p;
1135 spin_lock_irq(&priv->tx_lock);
1136 spin_lock(&priv->lock);
1137 while (!list_empty(&priv->cm.reap_list)) {
1138 p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
1140 spin_unlock(&priv->lock);
1141 spin_unlock_irq(&priv->tx_lock);
1142 ipoib_cm_tx_destroy(p);
1143 spin_lock_irq(&priv->tx_lock);
1144 spin_lock(&priv->lock);
1146 spin_unlock(&priv->lock);
1147 spin_unlock_irq(&priv->tx_lock);
1150 static void ipoib_cm_skb_reap(struct work_struct *work)
1152 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1154 struct net_device *dev = priv->dev;
1155 struct sk_buff *skb;
1157 unsigned mtu = priv->mcast_mtu;
1159 spin_lock_irq(&priv->tx_lock);
1160 spin_lock(&priv->lock);
1161 while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
1162 spin_unlock(&priv->lock);
1163 spin_unlock_irq(&priv->tx_lock);
1164 if (skb->protocol == htons(ETH_P_IP))
1165 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1166 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1167 else if (skb->protocol == htons(ETH_P_IPV6))
1168 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
1170 dev_kfree_skb_any(skb);
1171 spin_lock_irq(&priv->tx_lock);
1172 spin_lock(&priv->lock);
1174 spin_unlock(&priv->lock);
1175 spin_unlock_irq(&priv->tx_lock);
1178 void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb,
1181 struct ipoib_dev_priv *priv = netdev_priv(dev);
1182 int e = skb_queue_empty(&priv->cm.skb_queue);
1185 skb->dst->ops->update_pmtu(skb->dst, mtu);
1187 skb_queue_tail(&priv->cm.skb_queue, skb);
1189 queue_work(ipoib_workqueue, &priv->cm.skb_task);
1192 static void ipoib_cm_rx_reap(struct work_struct *work)
1194 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1196 struct ipoib_cm_rx *p, *n;
1199 spin_lock_irq(&priv->lock);
1200 list_splice_init(&priv->cm.rx_reap_list, &list);
1201 spin_unlock_irq(&priv->lock);
1203 list_for_each_entry_safe(p, n, &list, list) {
1204 ib_destroy_cm_id(p->id);
1205 ib_destroy_qp(p->qp);
1210 static void ipoib_cm_stale_task(struct work_struct *work)
1212 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1213 cm.stale_task.work);
1214 struct ipoib_cm_rx *p;
1217 spin_lock_irq(&priv->lock);
1218 while (!list_empty(&priv->cm.passive_ids)) {
1219 /* List is sorted by LRU, start from tail,
1220 * stop when we see a recently used entry */
1221 p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
1222 if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
1224 list_move(&p->list, &priv->cm.rx_error_list);
1225 p->state = IPOIB_CM_RX_ERROR;
1226 spin_unlock_irq(&priv->lock);
1227 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
1229 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
1230 spin_lock_irq(&priv->lock);
1233 if (!list_empty(&priv->cm.passive_ids))
1234 queue_delayed_work(ipoib_workqueue,
1235 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
1236 spin_unlock_irq(&priv->lock);
1240 static ssize_t show_mode(struct device *d, struct device_attribute *attr,
1243 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(d));
1245 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
1246 return sprintf(buf, "connected\n");
1248 return sprintf(buf, "datagram\n");
1251 static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1252 const char *buf, size_t count)
1254 struct net_device *dev = to_net_dev(d);
1255 struct ipoib_dev_priv *priv = netdev_priv(dev);
1257 /* flush paths if we switch modes so that connections are restarted */
1258 if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
1259 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
1260 ipoib_warn(priv, "enabling connected mode "
1261 "will cause multicast packet drops\n");
1262 ipoib_flush_paths(dev);
1266 if (!strcmp(buf, "datagram\n")) {
1267 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
1268 dev->mtu = min(priv->mcast_mtu, dev->mtu);
1269 ipoib_flush_paths(dev);
1276 static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
1278 int ipoib_cm_add_mode_attr(struct net_device *dev)
1280 return device_create_file(&dev->dev, &dev_attr_mode);
1283 int ipoib_cm_dev_init(struct net_device *dev)
1285 struct ipoib_dev_priv *priv = netdev_priv(dev);
1286 struct ib_srq_init_attr srq_init_attr = {
1288 .max_wr = ipoib_recvq_size,
1289 .max_sge = IPOIB_CM_RX_SG
1294 INIT_LIST_HEAD(&priv->cm.passive_ids);
1295 INIT_LIST_HEAD(&priv->cm.reap_list);
1296 INIT_LIST_HEAD(&priv->cm.start_list);
1297 INIT_LIST_HEAD(&priv->cm.rx_error_list);
1298 INIT_LIST_HEAD(&priv->cm.rx_flush_list);
1299 INIT_LIST_HEAD(&priv->cm.rx_drain_list);
1300 INIT_LIST_HEAD(&priv->cm.rx_reap_list);
1301 INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
1302 INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
1303 INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap);
1304 INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap);
1305 INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
1307 skb_queue_head_init(&priv->cm.skb_queue);
1309 priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
1310 if (IS_ERR(priv->cm.srq)) {
1311 ret = PTR_ERR(priv->cm.srq);
1312 priv->cm.srq = NULL;
1316 priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring,
1318 if (!priv->cm.srq_ring) {
1319 printk(KERN_WARNING "%s: failed to allocate CM ring (%d entries)\n",
1320 priv->ca->name, ipoib_recvq_size);
1321 ipoib_cm_dev_cleanup(dev);
1325 for (i = 0; i < IPOIB_CM_RX_SG; ++i)
1326 priv->cm.rx_sge[i].lkey = priv->mr->lkey;
1328 priv->cm.rx_sge[0].length = IPOIB_CM_HEAD_SIZE;
1329 for (i = 1; i < IPOIB_CM_RX_SG; ++i)
1330 priv->cm.rx_sge[i].length = PAGE_SIZE;
1331 priv->cm.rx_wr.next = NULL;
1332 priv->cm.rx_wr.sg_list = priv->cm.rx_sge;
1333 priv->cm.rx_wr.num_sge = IPOIB_CM_RX_SG;
1335 for (i = 0; i < ipoib_recvq_size; ++i) {
1336 if (!ipoib_cm_alloc_rx_skb(dev, i, IPOIB_CM_RX_SG - 1,
1337 priv->cm.srq_ring[i].mapping)) {
1338 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
1339 ipoib_cm_dev_cleanup(dev);
1342 if (ipoib_cm_post_receive(dev, i)) {
1343 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
1344 ipoib_cm_dev_cleanup(dev);
1349 priv->dev->dev_addr[0] = IPOIB_FLAGS_RC;
1353 void ipoib_cm_dev_cleanup(struct net_device *dev)
1355 struct ipoib_dev_priv *priv = netdev_priv(dev);
1361 ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
1363 ret = ib_destroy_srq(priv->cm.srq);
1365 ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret);
1367 priv->cm.srq = NULL;
1368 if (!priv->cm.srq_ring)
1370 for (i = 0; i < ipoib_recvq_size; ++i)
1371 if (priv->cm.srq_ring[i].skb) {
1372 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
1373 priv->cm.srq_ring[i].mapping);
1374 dev_kfree_skb_any(priv->cm.srq_ring[i].skb);
1375 priv->cm.srq_ring[i].skb = NULL;
1377 kfree(priv->cm.srq_ring);
1378 priv->cm.srq_ring = NULL;