Merge 'acpi-2.6.12' branch into to-akpm
[linux-2.6] / drivers / infiniband / core / mad_rmpp.c
1 /*
2  * Copyright (c) 2005 Intel Inc. All rights reserved.
3  * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  *
33  * $Id: mad_rmpp.c 1921 2005-03-02 22:58:44Z sean.hefty $
34  */
35
36 #include <linux/dma-mapping.h>
37
38 #include "mad_priv.h"
39 #include "mad_rmpp.h"
40
41 enum rmpp_state {
42         RMPP_STATE_ACTIVE,
43         RMPP_STATE_TIMEOUT,
44         RMPP_STATE_COMPLETE
45 };
46
47 struct mad_rmpp_recv {
48         struct ib_mad_agent_private *agent;
49         struct list_head list;
50         struct work_struct timeout_work;
51         struct work_struct cleanup_work;
52         wait_queue_head_t wait;
53         enum rmpp_state state;
54         spinlock_t lock;
55         atomic_t refcount;
56
57         struct ib_ah *ah;
58         struct ib_mad_recv_wc *rmpp_wc;
59         struct ib_mad_recv_buf *cur_seg_buf;
60         int last_ack;
61         int seg_num;
62         int newwin;
63
64         u64 tid;
65         u32 src_qp;
66         u16 slid;
67         u8 mgmt_class;
68         u8 class_version;
69         u8 method;
70 };
71
72 static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
73 {
74         atomic_dec(&rmpp_recv->refcount);
75         wait_event(rmpp_recv->wait, !atomic_read(&rmpp_recv->refcount));
76         ib_destroy_ah(rmpp_recv->ah);
77         kfree(rmpp_recv);
78 }
79
80 void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
81 {
82         struct mad_rmpp_recv *rmpp_recv, *temp_rmpp_recv;
83         unsigned long flags;
84
85         spin_lock_irqsave(&agent->lock, flags);
86         list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
87                 cancel_delayed_work(&rmpp_recv->timeout_work);
88                 cancel_delayed_work(&rmpp_recv->cleanup_work);
89         }
90         spin_unlock_irqrestore(&agent->lock, flags);
91
92         flush_workqueue(agent->qp_info->port_priv->wq);
93
94         list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv,
95                                  &agent->rmpp_list, list) {
96                 list_del(&rmpp_recv->list);
97                 if (rmpp_recv->state != RMPP_STATE_COMPLETE)
98                         ib_free_recv_mad(rmpp_recv->rmpp_wc);
99                 destroy_rmpp_recv(rmpp_recv);
100         }
101 }
102
103 static void recv_timeout_handler(void *data)
104 {
105         struct mad_rmpp_recv *rmpp_recv = data;
106         struct ib_mad_recv_wc *rmpp_wc;
107         unsigned long flags;
108
109         spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
110         if (rmpp_recv->state != RMPP_STATE_ACTIVE) {
111                 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
112                 return;
113         }
114         rmpp_recv->state = RMPP_STATE_TIMEOUT;
115         list_del(&rmpp_recv->list);
116         spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
117
118         /* TODO: send abort. */
119         rmpp_wc = rmpp_recv->rmpp_wc;
120         destroy_rmpp_recv(rmpp_recv);
121         ib_free_recv_mad(rmpp_wc);
122 }
123
124 static void recv_cleanup_handler(void *data)
125 {
126         struct mad_rmpp_recv *rmpp_recv = data;
127         unsigned long flags;
128
129         spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
130         list_del(&rmpp_recv->list);
131         spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
132         destroy_rmpp_recv(rmpp_recv);
133 }
134
135 static struct mad_rmpp_recv *
136 create_rmpp_recv(struct ib_mad_agent_private *agent,
137                  struct ib_mad_recv_wc *mad_recv_wc)
138 {
139         struct mad_rmpp_recv *rmpp_recv;
140         struct ib_mad_hdr *mad_hdr;
141
142         rmpp_recv = kmalloc(sizeof *rmpp_recv, GFP_KERNEL);
143         if (!rmpp_recv)
144                 return NULL;
145
146         rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd,
147                                              mad_recv_wc->wc,
148                                              mad_recv_wc->recv_buf.grh,
149                                              agent->agent.port_num);
150         if (IS_ERR(rmpp_recv->ah))
151                 goto error;
152
153         rmpp_recv->agent = agent;
154         init_waitqueue_head(&rmpp_recv->wait);
155         INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv);
156         INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv);
157         spin_lock_init(&rmpp_recv->lock);
158         rmpp_recv->state = RMPP_STATE_ACTIVE;
159         atomic_set(&rmpp_recv->refcount, 1);
160
161         rmpp_recv->rmpp_wc = mad_recv_wc;
162         rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf;
163         rmpp_recv->newwin = 1;
164         rmpp_recv->seg_num = 1;
165         rmpp_recv->last_ack = 0;
166
167         mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
168         rmpp_recv->tid = mad_hdr->tid;
169         rmpp_recv->src_qp = mad_recv_wc->wc->src_qp;
170         rmpp_recv->slid = mad_recv_wc->wc->slid;
171         rmpp_recv->mgmt_class = mad_hdr->mgmt_class;
172         rmpp_recv->class_version = mad_hdr->class_version;
173         rmpp_recv->method  = mad_hdr->method;
174         return rmpp_recv;
175
176 error:  kfree(rmpp_recv);
177         return NULL;
178 }
179
180 static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
181 {
182         if (atomic_dec_and_test(&rmpp_recv->refcount))
183                 wake_up(&rmpp_recv->wait);
184 }
185
186 static struct mad_rmpp_recv *
187 find_rmpp_recv(struct ib_mad_agent_private *agent,
188                struct ib_mad_recv_wc *mad_recv_wc)
189 {
190         struct mad_rmpp_recv *rmpp_recv;
191         struct ib_mad_hdr *mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
192
193         list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
194                 if (rmpp_recv->tid == mad_hdr->tid &&
195                     rmpp_recv->src_qp == mad_recv_wc->wc->src_qp &&
196                     rmpp_recv->slid == mad_recv_wc->wc->slid &&
197                     rmpp_recv->mgmt_class == mad_hdr->mgmt_class &&
198                     rmpp_recv->class_version == mad_hdr->class_version &&
199                     rmpp_recv->method == mad_hdr->method)
200                         return rmpp_recv;
201         }
202         return NULL;
203 }
204
205 static struct mad_rmpp_recv *
206 acquire_rmpp_recv(struct ib_mad_agent_private *agent,
207                   struct ib_mad_recv_wc *mad_recv_wc)
208 {
209         struct mad_rmpp_recv *rmpp_recv;
210         unsigned long flags;
211
212         spin_lock_irqsave(&agent->lock, flags);
213         rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
214         if (rmpp_recv)
215                 atomic_inc(&rmpp_recv->refcount);
216         spin_unlock_irqrestore(&agent->lock, flags);
217         return rmpp_recv;
218 }
219
220 static struct mad_rmpp_recv *
221 insert_rmpp_recv(struct ib_mad_agent_private *agent,
222                  struct mad_rmpp_recv *rmpp_recv)
223 {
224         struct mad_rmpp_recv *cur_rmpp_recv;
225
226         cur_rmpp_recv = find_rmpp_recv(agent, rmpp_recv->rmpp_wc);
227         if (!cur_rmpp_recv)
228                 list_add_tail(&rmpp_recv->list, &agent->rmpp_list);
229
230         return cur_rmpp_recv;
231 }
232
233 static int data_offset(u8 mgmt_class)
234 {
235         if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
236                 return offsetof(struct ib_sa_mad, data);
237         else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
238                  (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
239                 return offsetof(struct ib_vendor_mad, data);
240         else
241                 return offsetof(struct ib_rmpp_mad, data);
242 }
243
244 static void format_ack(struct ib_rmpp_mad *ack,
245                        struct ib_rmpp_mad *data,
246                        struct mad_rmpp_recv *rmpp_recv)
247 {
248         unsigned long flags;
249
250         memcpy(&ack->mad_hdr, &data->mad_hdr,
251                data_offset(data->mad_hdr.mgmt_class));
252
253         ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
254         ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
255         ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
256
257         spin_lock_irqsave(&rmpp_recv->lock, flags);
258         rmpp_recv->last_ack = rmpp_recv->seg_num;
259         ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num);
260         ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin);
261         spin_unlock_irqrestore(&rmpp_recv->lock, flags);
262 }
263
264 static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
265                      struct ib_mad_recv_wc *recv_wc)
266 {
267         struct ib_mad_send_buf *msg;
268         struct ib_send_wr *bad_send_wr;
269         int hdr_len, ret;
270
271         hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
272         msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
273                                  recv_wc->wc->pkey_index, rmpp_recv->ah, 1,
274                                  hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len,
275                                  GFP_KERNEL);
276         if (!msg)
277                 return;
278
279         format_ack((struct ib_rmpp_mad *) msg->mad,
280                    (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
281         ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr,
282                                &bad_send_wr);
283         if (ret)
284                 ib_free_send_mad(msg);
285 }
286
287 static inline int get_last_flag(struct ib_mad_recv_buf *seg)
288 {
289         struct ib_rmpp_mad *rmpp_mad;
290
291         rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
292         return ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_LAST;
293 }
294
295 static inline int get_seg_num(struct ib_mad_recv_buf *seg)
296 {
297         struct ib_rmpp_mad *rmpp_mad;
298
299         rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
300         return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
301 }
302
303 static inline struct ib_mad_recv_buf * get_next_seg(struct list_head *rmpp_list,
304                                                     struct ib_mad_recv_buf *seg)
305 {
306         if (seg->list.next == rmpp_list)
307                 return NULL;
308
309         return container_of(seg->list.next, struct ib_mad_recv_buf, list);
310 }
311
312 static inline int window_size(struct ib_mad_agent_private *agent)
313 {
314         return max(agent->qp_info->recv_queue.max_active >> 3, 1);
315 }
316
317 static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list,
318                                                   int seg_num)
319 {
320         struct ib_mad_recv_buf *seg_buf;
321         int cur_seg_num;
322
323         list_for_each_entry_reverse(seg_buf, rmpp_list, list) {
324                 cur_seg_num = get_seg_num(seg_buf);
325                 if (seg_num > cur_seg_num)
326                         return seg_buf;
327                 if (seg_num == cur_seg_num)
328                         break;
329         }
330         return NULL;
331 }
332
333 static void update_seg_num(struct mad_rmpp_recv *rmpp_recv,
334                            struct ib_mad_recv_buf *new_buf)
335 {
336         struct list_head *rmpp_list = &rmpp_recv->rmpp_wc->rmpp_list;
337
338         while (new_buf && (get_seg_num(new_buf) == rmpp_recv->seg_num + 1)) {
339                 rmpp_recv->cur_seg_buf = new_buf;
340                 rmpp_recv->seg_num++;
341                 new_buf = get_next_seg(rmpp_list, new_buf);
342         }
343 }
344
345 static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
346 {
347         struct ib_rmpp_mad *rmpp_mad;
348         int hdr_size, data_size, pad;
349
350         rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
351
352         hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class);
353         data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
354         pad = data_size - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
355         if (pad > data_size || pad < 0)
356                 pad = 0;
357
358         return hdr_size + rmpp_recv->seg_num * data_size - pad;
359 }
360
361 static struct ib_mad_recv_wc * complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
362 {
363         struct ib_mad_recv_wc *rmpp_wc;
364
365         ack_recv(rmpp_recv, rmpp_recv->rmpp_wc);
366         if (rmpp_recv->seg_num > 1)
367                 cancel_delayed_work(&rmpp_recv->timeout_work);
368
369         rmpp_wc = rmpp_recv->rmpp_wc;
370         rmpp_wc->mad_len = get_mad_len(rmpp_recv);
371         /* 10 seconds until we can find the packet lifetime */
372         queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq,
373                            &rmpp_recv->cleanup_work, msecs_to_jiffies(10000));
374         return rmpp_wc;
375 }
376
377 void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc, void *buf)
378 {
379         struct ib_mad_recv_buf *seg_buf;
380         struct ib_rmpp_mad *rmpp_mad;
381         void *data;
382         int size, len, offset;
383         u8 flags;
384
385         len = mad_recv_wc->mad_len;
386         if (len <= sizeof(struct ib_mad)) {
387                 memcpy(buf, mad_recv_wc->recv_buf.mad, len);
388                 return;
389         }
390
391         offset = data_offset(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
392
393         list_for_each_entry(seg_buf, &mad_recv_wc->rmpp_list, list) {
394                 rmpp_mad = (struct ib_rmpp_mad *)seg_buf->mad;
395                 flags = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr);
396
397                 if (flags & IB_MGMT_RMPP_FLAG_FIRST) {
398                         data = rmpp_mad;
399                         size = sizeof(*rmpp_mad);
400                 } else {
401                         data = (void *) rmpp_mad + offset;
402                         if (flags & IB_MGMT_RMPP_FLAG_LAST)
403                                 size = len;
404                         else
405                                 size = sizeof(*rmpp_mad) - offset;
406                 }
407
408                 memcpy(buf, data, size);
409                 len -= size;
410                 buf += size;
411         }
412 }
413 EXPORT_SYMBOL(ib_coalesce_recv_mad);
414
415 static struct ib_mad_recv_wc *
416 continue_rmpp(struct ib_mad_agent_private *agent,
417               struct ib_mad_recv_wc *mad_recv_wc)
418 {
419         struct mad_rmpp_recv *rmpp_recv;
420         struct ib_mad_recv_buf *prev_buf;
421         struct ib_mad_recv_wc *done_wc;
422         int seg_num;
423         unsigned long flags;
424
425         rmpp_recv = acquire_rmpp_recv(agent, mad_recv_wc);
426         if (!rmpp_recv)
427                 goto drop1;
428
429         seg_num = get_seg_num(&mad_recv_wc->recv_buf);
430
431         spin_lock_irqsave(&rmpp_recv->lock, flags);
432         if ((rmpp_recv->state == RMPP_STATE_TIMEOUT) ||
433             (seg_num > rmpp_recv->newwin))
434                 goto drop3;
435
436         if ((seg_num <= rmpp_recv->last_ack) ||
437             (rmpp_recv->state == RMPP_STATE_COMPLETE)) {
438                 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
439                 ack_recv(rmpp_recv, mad_recv_wc);
440                 goto drop2;
441         }
442
443         prev_buf = find_seg_location(&rmpp_recv->rmpp_wc->rmpp_list, seg_num);
444         if (!prev_buf)
445                 goto drop3;
446
447         done_wc = NULL;
448         list_add(&mad_recv_wc->recv_buf.list, &prev_buf->list);
449         if (rmpp_recv->cur_seg_buf == prev_buf) {
450                 update_seg_num(rmpp_recv, &mad_recv_wc->recv_buf);
451                 if (get_last_flag(rmpp_recv->cur_seg_buf)) {
452                         rmpp_recv->state = RMPP_STATE_COMPLETE;
453                         spin_unlock_irqrestore(&rmpp_recv->lock, flags);
454                         done_wc = complete_rmpp(rmpp_recv);
455                         goto out;
456                 } else if (rmpp_recv->seg_num == rmpp_recv->newwin) {
457                         rmpp_recv->newwin += window_size(agent);
458                         spin_unlock_irqrestore(&rmpp_recv->lock, flags);
459                         ack_recv(rmpp_recv, mad_recv_wc);
460                         goto out;
461                 }
462         }
463         spin_unlock_irqrestore(&rmpp_recv->lock, flags);
464 out:
465         deref_rmpp_recv(rmpp_recv);
466         return done_wc;
467
468 drop3:  spin_unlock_irqrestore(&rmpp_recv->lock, flags);
469 drop2:  deref_rmpp_recv(rmpp_recv);
470 drop1:  ib_free_recv_mad(mad_recv_wc);
471         return NULL;
472 }
473
474 static struct ib_mad_recv_wc *
475 start_rmpp(struct ib_mad_agent_private *agent,
476            struct ib_mad_recv_wc *mad_recv_wc)
477 {
478         struct mad_rmpp_recv *rmpp_recv;
479         unsigned long flags;
480
481         rmpp_recv = create_rmpp_recv(agent, mad_recv_wc);
482         if (!rmpp_recv) {
483                 ib_free_recv_mad(mad_recv_wc);
484                 return NULL;
485         }
486
487         spin_lock_irqsave(&agent->lock, flags);
488         if (insert_rmpp_recv(agent, rmpp_recv)) {
489                 spin_unlock_irqrestore(&agent->lock, flags);
490                 /* duplicate first MAD */
491                 destroy_rmpp_recv(rmpp_recv);
492                 return continue_rmpp(agent, mad_recv_wc);
493         }
494         atomic_inc(&rmpp_recv->refcount);
495
496         if (get_last_flag(&mad_recv_wc->recv_buf)) {
497                 rmpp_recv->state = RMPP_STATE_COMPLETE;
498                 spin_unlock_irqrestore(&agent->lock, flags);
499                 complete_rmpp(rmpp_recv);
500         } else {
501                 spin_unlock_irqrestore(&agent->lock, flags);
502                 /* 40 seconds until we can find the packet lifetimes */
503                 queue_delayed_work(agent->qp_info->port_priv->wq,
504                                    &rmpp_recv->timeout_work,
505                                    msecs_to_jiffies(40000));
506                 rmpp_recv->newwin += window_size(agent);
507                 ack_recv(rmpp_recv, mad_recv_wc);
508                 mad_recv_wc = NULL;
509         }
510         deref_rmpp_recv(rmpp_recv);
511         return mad_recv_wc;
512 }
513
514 static inline u64 get_seg_addr(struct ib_mad_send_wr_private *mad_send_wr)
515 {
516         return mad_send_wr->sg_list[0].addr + mad_send_wr->data_offset +
517                (sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset) *
518                (mad_send_wr->seg_num - 1);
519 }
520
521 static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
522 {
523         struct ib_rmpp_mad *rmpp_mad;
524         int timeout;
525
526         rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
527         ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
528         rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(mad_send_wr->seg_num);
529
530         if (mad_send_wr->seg_num == 1) {
531                 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
532                 rmpp_mad->rmpp_hdr.paylen_newwin =
533                         cpu_to_be32(mad_send_wr->total_seg *
534                                     (sizeof(struct ib_rmpp_mad) -
535                                        offsetof(struct ib_rmpp_mad, data)));
536                 mad_send_wr->sg_list[0].length = sizeof(struct ib_rmpp_mad);
537         } else {
538                 mad_send_wr->send_wr.num_sge = 2;
539                 mad_send_wr->sg_list[0].length = mad_send_wr->data_offset;
540                 mad_send_wr->sg_list[1].addr = get_seg_addr(mad_send_wr);
541                 mad_send_wr->sg_list[1].length = sizeof(struct ib_rmpp_mad) -
542                                                  mad_send_wr->data_offset;
543                 mad_send_wr->sg_list[1].lkey = mad_send_wr->sg_list[0].lkey;
544         }
545
546         if (mad_send_wr->seg_num == mad_send_wr->total_seg) {
547                 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
548                 rmpp_mad->rmpp_hdr.paylen_newwin =
549                         cpu_to_be32(sizeof(struct ib_rmpp_mad) -
550                                     offsetof(struct ib_rmpp_mad, data) -
551                                     mad_send_wr->pad);
552         }
553
554         /* 2 seconds for an ACK until we can find the packet lifetime */
555         timeout = mad_send_wr->send_wr.wr.ud.timeout_ms;
556         if (!timeout || timeout > 2000)
557                 mad_send_wr->timeout = msecs_to_jiffies(2000);
558         mad_send_wr->seg_num++;
559         return ib_send_mad(mad_send_wr);
560 }
561
562 static void process_rmpp_ack(struct ib_mad_agent_private *agent,
563                              struct ib_mad_recv_wc *mad_recv_wc)
564 {
565         struct ib_mad_send_wr_private *mad_send_wr;
566         struct ib_rmpp_mad *rmpp_mad;
567         unsigned long flags;
568         int seg_num, newwin, ret;
569
570         rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
571         if (rmpp_mad->rmpp_hdr.rmpp_status)
572                 return;
573
574         seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
575         newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
576
577         spin_lock_irqsave(&agent->lock, flags);
578         mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid);
579         if (!mad_send_wr)
580                 goto out;       /* Unmatched ACK */
581
582         if ((mad_send_wr->last_ack == mad_send_wr->total_seg) ||
583             (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
584                 goto out;       /* Send is already done */
585
586         if (seg_num > mad_send_wr->total_seg)
587                 goto out;       /* Bad ACK */
588
589         if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack)
590                 goto out;       /* Old ACK */
591
592         if (seg_num > mad_send_wr->last_ack) {
593                 mad_send_wr->last_ack = seg_num;
594                 mad_send_wr->retries = mad_send_wr->send_wr.wr.ud.retries;
595         }
596         mad_send_wr->newwin = newwin;
597         if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
598                 /* If no response is expected, the ACK completes the send */
599                 if (!mad_send_wr->send_wr.wr.ud.timeout_ms) {
600                         struct ib_mad_send_wc wc;
601
602                         ib_mark_mad_done(mad_send_wr);
603                         spin_unlock_irqrestore(&agent->lock, flags);
604
605                         wc.status = IB_WC_SUCCESS;
606                         wc.vendor_err = 0;
607                         wc.wr_id = mad_send_wr->wr_id;
608                         ib_mad_complete_send_wr(mad_send_wr, &wc);
609                         return;
610                 }
611                 if (mad_send_wr->refcount == 1)
612                         ib_reset_mad_timeout(mad_send_wr, mad_send_wr->
613                                              send_wr.wr.ud.timeout_ms);
614         } else if (mad_send_wr->refcount == 1 &&
615                    mad_send_wr->seg_num < mad_send_wr->newwin &&
616                    mad_send_wr->seg_num <= mad_send_wr->total_seg) {
617                 /* Send failure will just result in a timeout/retry */
618                 ret = send_next_seg(mad_send_wr);
619                 if (ret)
620                         goto out;
621
622                 mad_send_wr->refcount++;
623                 list_del(&mad_send_wr->agent_list);
624                 list_add_tail(&mad_send_wr->agent_list,
625                               &mad_send_wr->mad_agent_priv->send_list);
626         }
627 out:
628         spin_unlock_irqrestore(&agent->lock, flags);
629 }
630
631 struct ib_mad_recv_wc *
632 ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
633                         struct ib_mad_recv_wc *mad_recv_wc)
634 {
635         struct ib_rmpp_mad *rmpp_mad;
636
637         rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
638         if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE))
639                 return mad_recv_wc;
640
641         if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION)
642                 goto out;
643
644         switch (rmpp_mad->rmpp_hdr.rmpp_type) {
645         case IB_MGMT_RMPP_TYPE_DATA:
646                 if (rmpp_mad->rmpp_hdr.seg_num == __constant_htonl(1))
647                         return start_rmpp(agent, mad_recv_wc);
648                 else
649                         return continue_rmpp(agent, mad_recv_wc);
650         case IB_MGMT_RMPP_TYPE_ACK:
651                 process_rmpp_ack(agent, mad_recv_wc);
652                 break;
653         case IB_MGMT_RMPP_TYPE_STOP:
654         case IB_MGMT_RMPP_TYPE_ABORT:
655                 /* TODO: process_rmpp_nack(agent, mad_recv_wc); */
656                 break;
657         default:
658                 break;
659         }
660 out:
661         ib_free_recv_mad(mad_recv_wc);
662         return NULL;
663 }
664
665 int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
666 {
667         struct ib_rmpp_mad *rmpp_mad;
668         int i, total_len, ret;
669
670         rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
671         if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
672               IB_MGMT_RMPP_FLAG_ACTIVE))
673                 return IB_RMPP_RESULT_UNHANDLED;
674
675         if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA)
676                 return IB_RMPP_RESULT_INTERNAL;
677
678         if (mad_send_wr->send_wr.num_sge > 1)
679                 return -EINVAL;         /* TODO: support num_sge > 1 */
680
681         mad_send_wr->seg_num = 1;
682         mad_send_wr->newwin = 1;
683         mad_send_wr->data_offset = data_offset(rmpp_mad->mad_hdr.mgmt_class);
684
685         total_len = 0;
686         for (i = 0; i < mad_send_wr->send_wr.num_sge; i++)
687                 total_len += mad_send_wr->send_wr.sg_list[i].length;
688
689         mad_send_wr->total_seg = (total_len - mad_send_wr->data_offset) /
690                         (sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset);
691         mad_send_wr->pad = total_len - offsetof(struct ib_rmpp_mad, data) -
692                            be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
693
694         /* We need to wait for the final ACK even if there isn't a response */
695         mad_send_wr->refcount += (mad_send_wr->timeout == 0);
696         ret = send_next_seg(mad_send_wr);
697         if (!ret)
698                 return IB_RMPP_RESULT_CONSUMED;
699         return ret;
700 }
701
702 int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
703                             struct ib_mad_send_wc *mad_send_wc)
704 {
705         struct ib_rmpp_mad *rmpp_mad;
706         struct ib_mad_send_buf *msg;
707         int ret;
708
709         rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
710         if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
711               IB_MGMT_RMPP_FLAG_ACTIVE))
712                 return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
713
714         if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) {
715                 msg = (struct ib_mad_send_buf *) (unsigned long)
716                       mad_send_wc->wr_id;
717                 ib_free_send_mad(msg);
718                 return IB_RMPP_RESULT_INTERNAL;  /* ACK, STOP, or ABORT */
719         }
720
721         if (mad_send_wc->status != IB_WC_SUCCESS ||
722             mad_send_wr->status != IB_WC_SUCCESS)
723                 return IB_RMPP_RESULT_PROCESSED; /* Canceled or send error */
724
725         if (!mad_send_wr->timeout)
726                 return IB_RMPP_RESULT_PROCESSED; /* Response received */
727
728         if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
729                 mad_send_wr->timeout =
730                         msecs_to_jiffies(mad_send_wr->send_wr.wr.ud.timeout_ms);
731                 return IB_RMPP_RESULT_PROCESSED; /* Send done */
732         }
733
734         if (mad_send_wr->seg_num > mad_send_wr->newwin ||
735             mad_send_wr->seg_num > mad_send_wr->total_seg)
736                 return IB_RMPP_RESULT_PROCESSED; /* Wait for ACK */
737
738         ret = send_next_seg(mad_send_wr);
739         if (ret) {
740                 mad_send_wc->status = IB_WC_GENERAL_ERR;
741                 return IB_RMPP_RESULT_PROCESSED;
742         }
743         return IB_RMPP_RESULT_CONSUMED;
744 }
745
746 int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr)
747 {
748         struct ib_rmpp_mad *rmpp_mad;
749         int ret;
750
751         rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
752         if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
753               IB_MGMT_RMPP_FLAG_ACTIVE))
754                 return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
755
756         if (mad_send_wr->last_ack == mad_send_wr->total_seg)
757                 return IB_RMPP_RESULT_PROCESSED;
758
759         mad_send_wr->seg_num = mad_send_wr->last_ack + 1;
760         ret = send_next_seg(mad_send_wr);
761         if (ret)
762                 return IB_RMPP_RESULT_PROCESSED;
763
764         return IB_RMPP_RESULT_CONSUMED;
765 }