2 * Copyright (c) 2005 Intel Inc. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * $Id: mad_rmpp.c 1921 2005-03-02 22:58:44Z sean.hefty $
36 #include <linux/dma-mapping.h>
47 struct mad_rmpp_recv {
48 struct ib_mad_agent_private *agent;
49 struct list_head list;
50 struct work_struct timeout_work;
51 struct work_struct cleanup_work;
52 wait_queue_head_t wait;
53 enum rmpp_state state;
58 struct ib_mad_recv_wc *rmpp_wc;
59 struct ib_mad_recv_buf *cur_seg_buf;
72 static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
74 atomic_dec(&rmpp_recv->refcount);
75 wait_event(rmpp_recv->wait, !atomic_read(&rmpp_recv->refcount));
76 ib_destroy_ah(rmpp_recv->ah);
80 void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
82 struct mad_rmpp_recv *rmpp_recv, *temp_rmpp_recv;
85 spin_lock_irqsave(&agent->lock, flags);
86 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
87 cancel_delayed_work(&rmpp_recv->timeout_work);
88 cancel_delayed_work(&rmpp_recv->cleanup_work);
90 spin_unlock_irqrestore(&agent->lock, flags);
92 flush_workqueue(agent->qp_info->port_priv->wq);
94 list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv,
95 &agent->rmpp_list, list) {
96 list_del(&rmpp_recv->list);
97 if (rmpp_recv->state != RMPP_STATE_COMPLETE)
98 ib_free_recv_mad(rmpp_recv->rmpp_wc);
99 destroy_rmpp_recv(rmpp_recv);
103 static void recv_timeout_handler(void *data)
105 struct mad_rmpp_recv *rmpp_recv = data;
106 struct ib_mad_recv_wc *rmpp_wc;
109 spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
110 if (rmpp_recv->state != RMPP_STATE_ACTIVE) {
111 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
114 rmpp_recv->state = RMPP_STATE_TIMEOUT;
115 list_del(&rmpp_recv->list);
116 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
118 /* TODO: send abort. */
119 rmpp_wc = rmpp_recv->rmpp_wc;
120 destroy_rmpp_recv(rmpp_recv);
121 ib_free_recv_mad(rmpp_wc);
124 static void recv_cleanup_handler(void *data)
126 struct mad_rmpp_recv *rmpp_recv = data;
129 spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
130 list_del(&rmpp_recv->list);
131 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
132 destroy_rmpp_recv(rmpp_recv);
135 static struct mad_rmpp_recv *
136 create_rmpp_recv(struct ib_mad_agent_private *agent,
137 struct ib_mad_recv_wc *mad_recv_wc)
139 struct mad_rmpp_recv *rmpp_recv;
140 struct ib_mad_hdr *mad_hdr;
142 rmpp_recv = kmalloc(sizeof *rmpp_recv, GFP_KERNEL);
146 rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd,
148 mad_recv_wc->recv_buf.grh,
149 agent->agent.port_num);
150 if (IS_ERR(rmpp_recv->ah))
153 rmpp_recv->agent = agent;
154 init_waitqueue_head(&rmpp_recv->wait);
155 INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv);
156 INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv);
157 spin_lock_init(&rmpp_recv->lock);
158 rmpp_recv->state = RMPP_STATE_ACTIVE;
159 atomic_set(&rmpp_recv->refcount, 1);
161 rmpp_recv->rmpp_wc = mad_recv_wc;
162 rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf;
163 rmpp_recv->newwin = 1;
164 rmpp_recv->seg_num = 1;
165 rmpp_recv->last_ack = 0;
167 mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
168 rmpp_recv->tid = mad_hdr->tid;
169 rmpp_recv->src_qp = mad_recv_wc->wc->src_qp;
170 rmpp_recv->slid = mad_recv_wc->wc->slid;
171 rmpp_recv->mgmt_class = mad_hdr->mgmt_class;
172 rmpp_recv->class_version = mad_hdr->class_version;
173 rmpp_recv->method = mad_hdr->method;
176 error: kfree(rmpp_recv);
180 static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
182 if (atomic_dec_and_test(&rmpp_recv->refcount))
183 wake_up(&rmpp_recv->wait);
186 static struct mad_rmpp_recv *
187 find_rmpp_recv(struct ib_mad_agent_private *agent,
188 struct ib_mad_recv_wc *mad_recv_wc)
190 struct mad_rmpp_recv *rmpp_recv;
191 struct ib_mad_hdr *mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
193 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
194 if (rmpp_recv->tid == mad_hdr->tid &&
195 rmpp_recv->src_qp == mad_recv_wc->wc->src_qp &&
196 rmpp_recv->slid == mad_recv_wc->wc->slid &&
197 rmpp_recv->mgmt_class == mad_hdr->mgmt_class &&
198 rmpp_recv->class_version == mad_hdr->class_version &&
199 rmpp_recv->method == mad_hdr->method)
205 static struct mad_rmpp_recv *
206 acquire_rmpp_recv(struct ib_mad_agent_private *agent,
207 struct ib_mad_recv_wc *mad_recv_wc)
209 struct mad_rmpp_recv *rmpp_recv;
212 spin_lock_irqsave(&agent->lock, flags);
213 rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
215 atomic_inc(&rmpp_recv->refcount);
216 spin_unlock_irqrestore(&agent->lock, flags);
220 static struct mad_rmpp_recv *
221 insert_rmpp_recv(struct ib_mad_agent_private *agent,
222 struct mad_rmpp_recv *rmpp_recv)
224 struct mad_rmpp_recv *cur_rmpp_recv;
226 cur_rmpp_recv = find_rmpp_recv(agent, rmpp_recv->rmpp_wc);
228 list_add_tail(&rmpp_recv->list, &agent->rmpp_list);
230 return cur_rmpp_recv;
233 static int data_offset(u8 mgmt_class)
235 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
236 return offsetof(struct ib_sa_mad, data);
237 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
238 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
239 return offsetof(struct ib_vendor_mad, data);
241 return offsetof(struct ib_rmpp_mad, data);
244 static void format_ack(struct ib_rmpp_mad *ack,
245 struct ib_rmpp_mad *data,
246 struct mad_rmpp_recv *rmpp_recv)
250 memcpy(&ack->mad_hdr, &data->mad_hdr,
251 data_offset(data->mad_hdr.mgmt_class));
253 ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
254 ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
255 ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
257 spin_lock_irqsave(&rmpp_recv->lock, flags);
258 rmpp_recv->last_ack = rmpp_recv->seg_num;
259 ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num);
260 ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin);
261 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
264 static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
265 struct ib_mad_recv_wc *recv_wc)
267 struct ib_mad_send_buf *msg;
268 struct ib_send_wr *bad_send_wr;
271 hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
272 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
273 recv_wc->wc->pkey_index, rmpp_recv->ah, 1,
274 hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len,
279 format_ack((struct ib_rmpp_mad *) msg->mad,
280 (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
281 ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr,
284 ib_free_send_mad(msg);
287 static inline int get_last_flag(struct ib_mad_recv_buf *seg)
289 struct ib_rmpp_mad *rmpp_mad;
291 rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
292 return ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_LAST;
295 static inline int get_seg_num(struct ib_mad_recv_buf *seg)
297 struct ib_rmpp_mad *rmpp_mad;
299 rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
300 return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
303 static inline struct ib_mad_recv_buf * get_next_seg(struct list_head *rmpp_list,
304 struct ib_mad_recv_buf *seg)
306 if (seg->list.next == rmpp_list)
309 return container_of(seg->list.next, struct ib_mad_recv_buf, list);
312 static inline int window_size(struct ib_mad_agent_private *agent)
314 return max(agent->qp_info->recv_queue.max_active >> 3, 1);
317 static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list,
320 struct ib_mad_recv_buf *seg_buf;
323 list_for_each_entry_reverse(seg_buf, rmpp_list, list) {
324 cur_seg_num = get_seg_num(seg_buf);
325 if (seg_num > cur_seg_num)
327 if (seg_num == cur_seg_num)
333 static void update_seg_num(struct mad_rmpp_recv *rmpp_recv,
334 struct ib_mad_recv_buf *new_buf)
336 struct list_head *rmpp_list = &rmpp_recv->rmpp_wc->rmpp_list;
338 while (new_buf && (get_seg_num(new_buf) == rmpp_recv->seg_num + 1)) {
339 rmpp_recv->cur_seg_buf = new_buf;
340 rmpp_recv->seg_num++;
341 new_buf = get_next_seg(rmpp_list, new_buf);
345 static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
347 struct ib_rmpp_mad *rmpp_mad;
348 int hdr_size, data_size, pad;
350 rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
352 hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class);
353 data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
354 pad = data_size - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
355 if (pad > data_size || pad < 0)
358 return hdr_size + rmpp_recv->seg_num * data_size - pad;
361 static struct ib_mad_recv_wc * complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
363 struct ib_mad_recv_wc *rmpp_wc;
365 ack_recv(rmpp_recv, rmpp_recv->rmpp_wc);
366 if (rmpp_recv->seg_num > 1)
367 cancel_delayed_work(&rmpp_recv->timeout_work);
369 rmpp_wc = rmpp_recv->rmpp_wc;
370 rmpp_wc->mad_len = get_mad_len(rmpp_recv);
371 /* 10 seconds until we can find the packet lifetime */
372 queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq,
373 &rmpp_recv->cleanup_work, msecs_to_jiffies(10000));
377 void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc, void *buf)
379 struct ib_mad_recv_buf *seg_buf;
380 struct ib_rmpp_mad *rmpp_mad;
382 int size, len, offset;
385 len = mad_recv_wc->mad_len;
386 if (len <= sizeof(struct ib_mad)) {
387 memcpy(buf, mad_recv_wc->recv_buf.mad, len);
391 offset = data_offset(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
393 list_for_each_entry(seg_buf, &mad_recv_wc->rmpp_list, list) {
394 rmpp_mad = (struct ib_rmpp_mad *)seg_buf->mad;
395 flags = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr);
397 if (flags & IB_MGMT_RMPP_FLAG_FIRST) {
399 size = sizeof(*rmpp_mad);
401 data = (void *) rmpp_mad + offset;
402 if (flags & IB_MGMT_RMPP_FLAG_LAST)
405 size = sizeof(*rmpp_mad) - offset;
408 memcpy(buf, data, size);
413 EXPORT_SYMBOL(ib_coalesce_recv_mad);
415 static struct ib_mad_recv_wc *
416 continue_rmpp(struct ib_mad_agent_private *agent,
417 struct ib_mad_recv_wc *mad_recv_wc)
419 struct mad_rmpp_recv *rmpp_recv;
420 struct ib_mad_recv_buf *prev_buf;
421 struct ib_mad_recv_wc *done_wc;
425 rmpp_recv = acquire_rmpp_recv(agent, mad_recv_wc);
429 seg_num = get_seg_num(&mad_recv_wc->recv_buf);
431 spin_lock_irqsave(&rmpp_recv->lock, flags);
432 if ((rmpp_recv->state == RMPP_STATE_TIMEOUT) ||
433 (seg_num > rmpp_recv->newwin))
436 if ((seg_num <= rmpp_recv->last_ack) ||
437 (rmpp_recv->state == RMPP_STATE_COMPLETE)) {
438 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
439 ack_recv(rmpp_recv, mad_recv_wc);
443 prev_buf = find_seg_location(&rmpp_recv->rmpp_wc->rmpp_list, seg_num);
448 list_add(&mad_recv_wc->recv_buf.list, &prev_buf->list);
449 if (rmpp_recv->cur_seg_buf == prev_buf) {
450 update_seg_num(rmpp_recv, &mad_recv_wc->recv_buf);
451 if (get_last_flag(rmpp_recv->cur_seg_buf)) {
452 rmpp_recv->state = RMPP_STATE_COMPLETE;
453 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
454 done_wc = complete_rmpp(rmpp_recv);
456 } else if (rmpp_recv->seg_num == rmpp_recv->newwin) {
457 rmpp_recv->newwin += window_size(agent);
458 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
459 ack_recv(rmpp_recv, mad_recv_wc);
463 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
465 deref_rmpp_recv(rmpp_recv);
468 drop3: spin_unlock_irqrestore(&rmpp_recv->lock, flags);
469 drop2: deref_rmpp_recv(rmpp_recv);
470 drop1: ib_free_recv_mad(mad_recv_wc);
474 static struct ib_mad_recv_wc *
475 start_rmpp(struct ib_mad_agent_private *agent,
476 struct ib_mad_recv_wc *mad_recv_wc)
478 struct mad_rmpp_recv *rmpp_recv;
481 rmpp_recv = create_rmpp_recv(agent, mad_recv_wc);
483 ib_free_recv_mad(mad_recv_wc);
487 spin_lock_irqsave(&agent->lock, flags);
488 if (insert_rmpp_recv(agent, rmpp_recv)) {
489 spin_unlock_irqrestore(&agent->lock, flags);
490 /* duplicate first MAD */
491 destroy_rmpp_recv(rmpp_recv);
492 return continue_rmpp(agent, mad_recv_wc);
494 atomic_inc(&rmpp_recv->refcount);
496 if (get_last_flag(&mad_recv_wc->recv_buf)) {
497 rmpp_recv->state = RMPP_STATE_COMPLETE;
498 spin_unlock_irqrestore(&agent->lock, flags);
499 complete_rmpp(rmpp_recv);
501 spin_unlock_irqrestore(&agent->lock, flags);
502 /* 40 seconds until we can find the packet lifetimes */
503 queue_delayed_work(agent->qp_info->port_priv->wq,
504 &rmpp_recv->timeout_work,
505 msecs_to_jiffies(40000));
506 rmpp_recv->newwin += window_size(agent);
507 ack_recv(rmpp_recv, mad_recv_wc);
510 deref_rmpp_recv(rmpp_recv);
514 static inline u64 get_seg_addr(struct ib_mad_send_wr_private *mad_send_wr)
516 return mad_send_wr->sg_list[0].addr + mad_send_wr->data_offset +
517 (sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset) *
518 (mad_send_wr->seg_num - 1);
521 static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
523 struct ib_rmpp_mad *rmpp_mad;
526 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
527 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
528 rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(mad_send_wr->seg_num);
530 if (mad_send_wr->seg_num == 1) {
531 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
532 rmpp_mad->rmpp_hdr.paylen_newwin =
533 cpu_to_be32(mad_send_wr->total_seg *
534 (sizeof(struct ib_rmpp_mad) -
535 offsetof(struct ib_rmpp_mad, data)));
536 mad_send_wr->sg_list[0].length = sizeof(struct ib_rmpp_mad);
538 mad_send_wr->send_wr.num_sge = 2;
539 mad_send_wr->sg_list[0].length = mad_send_wr->data_offset;
540 mad_send_wr->sg_list[1].addr = get_seg_addr(mad_send_wr);
541 mad_send_wr->sg_list[1].length = sizeof(struct ib_rmpp_mad) -
542 mad_send_wr->data_offset;
543 mad_send_wr->sg_list[1].lkey = mad_send_wr->sg_list[0].lkey;
546 if (mad_send_wr->seg_num == mad_send_wr->total_seg) {
547 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
548 rmpp_mad->rmpp_hdr.paylen_newwin =
549 cpu_to_be32(sizeof(struct ib_rmpp_mad) -
550 offsetof(struct ib_rmpp_mad, data) -
554 /* 2 seconds for an ACK until we can find the packet lifetime */
555 timeout = mad_send_wr->send_wr.wr.ud.timeout_ms;
556 if (!timeout || timeout > 2000)
557 mad_send_wr->timeout = msecs_to_jiffies(2000);
558 mad_send_wr->seg_num++;
559 return ib_send_mad(mad_send_wr);
562 static void process_rmpp_ack(struct ib_mad_agent_private *agent,
563 struct ib_mad_recv_wc *mad_recv_wc)
565 struct ib_mad_send_wr_private *mad_send_wr;
566 struct ib_rmpp_mad *rmpp_mad;
568 int seg_num, newwin, ret;
570 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
571 if (rmpp_mad->rmpp_hdr.rmpp_status)
574 seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
575 newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
577 spin_lock_irqsave(&agent->lock, flags);
578 mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid);
580 goto out; /* Unmatched ACK */
582 if ((mad_send_wr->last_ack == mad_send_wr->total_seg) ||
583 (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
584 goto out; /* Send is already done */
586 if (seg_num > mad_send_wr->total_seg)
587 goto out; /* Bad ACK */
589 if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack)
590 goto out; /* Old ACK */
592 if (seg_num > mad_send_wr->last_ack) {
593 mad_send_wr->last_ack = seg_num;
594 mad_send_wr->retries = mad_send_wr->send_wr.wr.ud.retries;
596 mad_send_wr->newwin = newwin;
597 if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
598 /* If no response is expected, the ACK completes the send */
599 if (!mad_send_wr->send_wr.wr.ud.timeout_ms) {
600 struct ib_mad_send_wc wc;
602 ib_mark_mad_done(mad_send_wr);
603 spin_unlock_irqrestore(&agent->lock, flags);
605 wc.status = IB_WC_SUCCESS;
607 wc.wr_id = mad_send_wr->wr_id;
608 ib_mad_complete_send_wr(mad_send_wr, &wc);
611 if (mad_send_wr->refcount == 1)
612 ib_reset_mad_timeout(mad_send_wr, mad_send_wr->
613 send_wr.wr.ud.timeout_ms);
614 } else if (mad_send_wr->refcount == 1 &&
615 mad_send_wr->seg_num < mad_send_wr->newwin &&
616 mad_send_wr->seg_num <= mad_send_wr->total_seg) {
617 /* Send failure will just result in a timeout/retry */
618 ret = send_next_seg(mad_send_wr);
622 mad_send_wr->refcount++;
623 list_del(&mad_send_wr->agent_list);
624 list_add_tail(&mad_send_wr->agent_list,
625 &mad_send_wr->mad_agent_priv->send_list);
628 spin_unlock_irqrestore(&agent->lock, flags);
631 struct ib_mad_recv_wc *
632 ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
633 struct ib_mad_recv_wc *mad_recv_wc)
635 struct ib_rmpp_mad *rmpp_mad;
637 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
638 if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE))
641 if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION)
644 switch (rmpp_mad->rmpp_hdr.rmpp_type) {
645 case IB_MGMT_RMPP_TYPE_DATA:
646 if (rmpp_mad->rmpp_hdr.seg_num == __constant_htonl(1))
647 return start_rmpp(agent, mad_recv_wc);
649 return continue_rmpp(agent, mad_recv_wc);
650 case IB_MGMT_RMPP_TYPE_ACK:
651 process_rmpp_ack(agent, mad_recv_wc);
653 case IB_MGMT_RMPP_TYPE_STOP:
654 case IB_MGMT_RMPP_TYPE_ABORT:
655 /* TODO: process_rmpp_nack(agent, mad_recv_wc); */
661 ib_free_recv_mad(mad_recv_wc);
665 int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
667 struct ib_rmpp_mad *rmpp_mad;
668 int i, total_len, ret;
670 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
671 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
672 IB_MGMT_RMPP_FLAG_ACTIVE))
673 return IB_RMPP_RESULT_UNHANDLED;
675 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA)
676 return IB_RMPP_RESULT_INTERNAL;
678 if (mad_send_wr->send_wr.num_sge > 1)
679 return -EINVAL; /* TODO: support num_sge > 1 */
681 mad_send_wr->seg_num = 1;
682 mad_send_wr->newwin = 1;
683 mad_send_wr->data_offset = data_offset(rmpp_mad->mad_hdr.mgmt_class);
686 for (i = 0; i < mad_send_wr->send_wr.num_sge; i++)
687 total_len += mad_send_wr->send_wr.sg_list[i].length;
689 mad_send_wr->total_seg = (total_len - mad_send_wr->data_offset) /
690 (sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset);
691 mad_send_wr->pad = total_len - offsetof(struct ib_rmpp_mad, data) -
692 be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
694 /* We need to wait for the final ACK even if there isn't a response */
695 mad_send_wr->refcount += (mad_send_wr->timeout == 0);
696 ret = send_next_seg(mad_send_wr);
698 return IB_RMPP_RESULT_CONSUMED;
702 int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
703 struct ib_mad_send_wc *mad_send_wc)
705 struct ib_rmpp_mad *rmpp_mad;
706 struct ib_mad_send_buf *msg;
709 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
710 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
711 IB_MGMT_RMPP_FLAG_ACTIVE))
712 return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
714 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) {
715 msg = (struct ib_mad_send_buf *) (unsigned long)
717 ib_free_send_mad(msg);
718 return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */
721 if (mad_send_wc->status != IB_WC_SUCCESS ||
722 mad_send_wr->status != IB_WC_SUCCESS)
723 return IB_RMPP_RESULT_PROCESSED; /* Canceled or send error */
725 if (!mad_send_wr->timeout)
726 return IB_RMPP_RESULT_PROCESSED; /* Response received */
728 if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
729 mad_send_wr->timeout =
730 msecs_to_jiffies(mad_send_wr->send_wr.wr.ud.timeout_ms);
731 return IB_RMPP_RESULT_PROCESSED; /* Send done */
734 if (mad_send_wr->seg_num > mad_send_wr->newwin ||
735 mad_send_wr->seg_num > mad_send_wr->total_seg)
736 return IB_RMPP_RESULT_PROCESSED; /* Wait for ACK */
738 ret = send_next_seg(mad_send_wr);
740 mad_send_wc->status = IB_WC_GENERAL_ERR;
741 return IB_RMPP_RESULT_PROCESSED;
743 return IB_RMPP_RESULT_CONSUMED;
746 int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr)
748 struct ib_rmpp_mad *rmpp_mad;
751 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
752 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
753 IB_MGMT_RMPP_FLAG_ACTIVE))
754 return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
756 if (mad_send_wr->last_ack == mad_send_wr->total_seg)
757 return IB_RMPP_RESULT_PROCESSED;
759 mad_send_wr->seg_num = mad_send_wr->last_ack + 1;
760 ret = send_next_seg(mad_send_wr);
762 return IB_RMPP_RESULT_PROCESSED;
764 return IB_RMPP_RESULT_CONSUMED;