[PATCH] skge: version 1.5
[linux-2.6] / net / irda / irttp.c
1 /*********************************************************************
2  *                
3  * Filename:      irttp.c
4  * Version:       1.2
5  * Description:   Tiny Transport Protocol (TTP) implementation
6  * Status:        Stable
7  * Author:        Dag Brattli <dagb@cs.uit.no>
8  * Created at:    Sun Aug 31 20:14:31 1997
9  * Modified at:   Wed Jan  5 11:31:27 2000
10  * Modified by:   Dag Brattli <dagb@cs.uit.no>
11  * 
12  *     Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>, 
13  *     All Rights Reserved.
14  *     Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
15  *     
16  *     This program is free software; you can redistribute it and/or 
17  *     modify it under the terms of the GNU General Public License as 
18  *     published by the Free Software Foundation; either version 2 of 
19  *     the License, or (at your option) any later version.
20  *
21  *     Neither Dag Brattli nor University of Tromsø admit liability nor
22  *     provide warranty for any of this software. This material is 
23  *     provided "AS-IS" and at no charge.
24  *
25  ********************************************************************/
26
27 #include <linux/config.h>
28 #include <linux/skbuff.h>
29 #include <linux/init.h>
30 #include <linux/seq_file.h>
31
32 #include <asm/byteorder.h>
33 #include <asm/unaligned.h>
34
35 #include <net/irda/irda.h>
36 #include <net/irda/irlap.h>
37 #include <net/irda/irlmp.h>
38 #include <net/irda/parameters.h>
39 #include <net/irda/irttp.h>
40
41 static struct irttp_cb *irttp;
42
43 static void __irttp_close_tsap(struct tsap_cb *self);
44
45 static int irttp_data_indication(void *instance, void *sap, 
46                                  struct sk_buff *skb);
47 static int irttp_udata_indication(void *instance, void *sap, 
48                                   struct sk_buff *skb);
49 static void irttp_disconnect_indication(void *instance, void *sap,  
50                                         LM_REASON reason, struct sk_buff *);
51 static void irttp_connect_indication(void *instance, void *sap, 
52                                      struct qos_info *qos, __u32 max_sdu_size,
53                                      __u8 header_size, struct sk_buff *skb);
54 static void irttp_connect_confirm(void *instance, void *sap, 
55                                   struct qos_info *qos, __u32 max_sdu_size, 
56                                   __u8 header_size, struct sk_buff *skb);
57 static void irttp_run_tx_queue(struct tsap_cb *self);
58 static void irttp_run_rx_queue(struct tsap_cb *self);
59
60 static void irttp_flush_queues(struct tsap_cb *self);
61 static void irttp_fragment_skb(struct tsap_cb *self, struct sk_buff *skb);
62 static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self);
63 static void irttp_todo_expired(unsigned long data);
64 static int irttp_param_max_sdu_size(void *instance, irda_param_t *param, 
65                                     int get);
66
67 static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow);
68 static void irttp_status_indication(void *instance,
69                                     LINK_STATUS link, LOCK_STATUS lock);
70
71 /* Information for parsing parameters in IrTTP */
72 static pi_minor_info_t pi_minor_call_table[] = {
73         { NULL, 0 },                                             /* 0x00 */
74         { irttp_param_max_sdu_size, PV_INTEGER | PV_BIG_ENDIAN } /* 0x01 */
75 };
76 static pi_major_info_t pi_major_call_table[] = {{ pi_minor_call_table, 2 }};
77 static pi_param_info_t param_info = { pi_major_call_table, 1, 0x0f, 4 };
78
79 /************************ GLOBAL PROCEDURES ************************/
80
81 /*
82  * Function irttp_init (void)
83  *
84  *    Initialize the IrTTP layer. Called by module initialization code
85  *
86  */
87 int __init irttp_init(void)
88 {
89         irttp = kmalloc(sizeof(struct irttp_cb), GFP_KERNEL);
90         if (irttp == NULL)
91                 return -ENOMEM;
92         memset(irttp, 0, sizeof(struct irttp_cb));
93
94         irttp->magic = TTP_MAGIC;
95
96         irttp->tsaps = hashbin_new(HB_LOCK);
97         if (!irttp->tsaps) {
98                 IRDA_ERROR("%s: can't allocate IrTTP hashbin!\n",
99                            __FUNCTION__);
100                 kfree(irttp);
101                 return -ENOMEM;
102         }
103
104         return 0;
105 }
106
107 /*
108  * Function irttp_cleanup (void)
109  *
110  *    Called by module destruction/cleanup code
111  *
112  */
113 void __exit irttp_cleanup(void) 
114 {
115         /* Check for main structure */
116         IRDA_ASSERT(irttp->magic == TTP_MAGIC, return;);
117
118         /*
119          *  Delete hashbin and close all TSAP instances in it
120          */
121         hashbin_delete(irttp->tsaps, (FREE_FUNC) __irttp_close_tsap);
122
123         irttp->magic = 0;
124
125         /* De-allocate main structure */
126         kfree(irttp);
127
128         irttp = NULL;
129 }
130
131 /*************************** SUBROUTINES ***************************/
132
133 /*
134  * Function irttp_start_todo_timer (self, timeout)
135  *
136  *    Start todo timer.
137  *
138  * Made it more effient and unsensitive to race conditions - Jean II
139  */
140 static inline void irttp_start_todo_timer(struct tsap_cb *self, int timeout)
141 {
142         /* Set new value for timer */
143         mod_timer(&self->todo_timer, jiffies + timeout);
144 }
145
146 /*
147  * Function irttp_todo_expired (data)
148  *
149  *    Todo timer has expired!
150  *
151  * One of the restriction of the timer is that it is run only on the timer
152  * interrupt which run every 10ms. This mean that even if you set the timer
153  * with a delay of 0, it may take up to 10ms before it's run.
154  * So, to minimise latency and keep cache fresh, we try to avoid using
155  * it as much as possible.
156  * Note : we can't use tasklets, because they can't be asynchronously
157  * killed (need user context), and we can't guarantee that here...
158  * Jean II
159  */
160 static void irttp_todo_expired(unsigned long data)
161 {
162         struct tsap_cb *self = (struct tsap_cb *) data;
163
164         /* Check that we still exist */
165         if (!self || self->magic != TTP_TSAP_MAGIC)
166                 return;
167
168         IRDA_DEBUG(4, "%s(instance=%p)\n", __FUNCTION__, self);
169
170         /* Try to make some progress, especially on Tx side - Jean II */
171         irttp_run_rx_queue(self);
172         irttp_run_tx_queue(self);
173
174         /* Check if time for disconnect */
175         if (test_bit(0, &self->disconnect_pend)) {
176                 /* Check if it's possible to disconnect yet */
177                 if (skb_queue_empty(&self->tx_queue)) {
178                         /* Make sure disconnect is not pending anymore */
179                         clear_bit(0, &self->disconnect_pend);   /* FALSE */
180
181                         /* Note : self->disconnect_skb may be NULL */
182                         irttp_disconnect_request(self, self->disconnect_skb,
183                                                  P_NORMAL);
184                         self->disconnect_skb = NULL;
185                 } else {
186                         /* Try again later */
187                         irttp_start_todo_timer(self, HZ/10);
188
189                         /* No reason to try and close now */
190                         return;
191                 }
192         }
193
194         /* Check if it's closing time */
195         if (self->close_pend)
196                 /* Finish cleanup */
197                 irttp_close_tsap(self);
198 }
199
200 /*
201  * Function irttp_flush_queues (self)
202  *
203  *     Flushes (removes all frames) in transitt-buffer (tx_list)
204  */
205 void irttp_flush_queues(struct tsap_cb *self)
206 {
207         struct sk_buff* skb;
208
209         IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
210
211         IRDA_ASSERT(self != NULL, return;);
212         IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
213
214         /* Deallocate frames waiting to be sent */
215         while ((skb = skb_dequeue(&self->tx_queue)) != NULL)
216                 dev_kfree_skb(skb);
217
218         /* Deallocate received frames */
219         while ((skb = skb_dequeue(&self->rx_queue)) != NULL)
220                 dev_kfree_skb(skb);
221
222         /* Deallocate received fragments */
223         while ((skb = skb_dequeue(&self->rx_fragments)) != NULL)
224                 dev_kfree_skb(skb);
225 }
226
227 /*
228  * Function irttp_reassemble (self)
229  *
230  *    Makes a new (continuous) skb of all the fragments in the fragment
231  *    queue
232  *
233  */
234 static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self)
235 {
236         struct sk_buff *skb, *frag;
237         int n = 0;  /* Fragment index */
238
239         IRDA_ASSERT(self != NULL, return NULL;);
240         IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return NULL;);
241
242         IRDA_DEBUG(2, "%s(), self->rx_sdu_size=%d\n", __FUNCTION__,
243                    self->rx_sdu_size);
244
245         skb = dev_alloc_skb(TTP_HEADER + self->rx_sdu_size);
246         if (!skb)
247                 return NULL;
248
249         /*
250          * Need to reserve space for TTP header in case this skb needs to
251          * be requeued in case delivery failes
252          */
253         skb_reserve(skb, TTP_HEADER);
254         skb_put(skb, self->rx_sdu_size);
255
256         /*
257          *  Copy all fragments to a new buffer
258          */
259         while ((frag = skb_dequeue(&self->rx_fragments)) != NULL) {
260                 memcpy(skb->data+n, frag->data, frag->len);
261                 n += frag->len;
262
263                 dev_kfree_skb(frag);
264         }
265
266         IRDA_DEBUG(2,
267                    "%s(), frame len=%d, rx_sdu_size=%d, rx_max_sdu_size=%d\n",
268                    __FUNCTION__, n, self->rx_sdu_size, self->rx_max_sdu_size);
269         /* Note : irttp_run_rx_queue() calculate self->rx_sdu_size
270          * by summing the size of all fragments, so we should always
271          * have n == self->rx_sdu_size, except in cases where we
272          * droped the last fragment (when self->rx_sdu_size exceed
273          * self->rx_max_sdu_size), where n < self->rx_sdu_size.
274          * Jean II */
275         IRDA_ASSERT(n <= self->rx_sdu_size, n = self->rx_sdu_size;);
276
277         /* Set the new length */
278         skb_trim(skb, n);
279
280         self->rx_sdu_size = 0;
281
282         return skb;
283 }
284
285 /*
286  * Function irttp_fragment_skb (skb)
287  *
288  *    Fragments a frame and queues all the fragments for transmission
289  *
290  */
291 static inline void irttp_fragment_skb(struct tsap_cb *self,
292                                       struct sk_buff *skb)
293 {
294         struct sk_buff *frag;
295         __u8 *frame;
296
297         IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
298
299         IRDA_ASSERT(self != NULL, return;);
300         IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
301         IRDA_ASSERT(skb != NULL, return;);
302
303         /*
304          *  Split frame into a number of segments
305          */
306         while (skb->len > self->max_seg_size) {
307                 IRDA_DEBUG(2, "%s(), fragmenting ...\n", __FUNCTION__);
308
309                 /* Make new segment */
310                 frag = dev_alloc_skb(self->max_seg_size+self->max_header_size);
311                 if (!frag)
312                         return;
313
314                 skb_reserve(frag, self->max_header_size);
315
316                 /* Copy data from the original skb into this fragment. */
317                 memcpy(skb_put(frag, self->max_seg_size), skb->data,
318                        self->max_seg_size);
319
320                 /* Insert TTP header, with the more bit set */
321                 frame = skb_push(frag, TTP_HEADER);
322                 frame[0] = TTP_MORE;
323
324                 /* Hide the copied data from the original skb */
325                 skb_pull(skb, self->max_seg_size);
326
327                 /* Queue fragment */
328                 skb_queue_tail(&self->tx_queue, frag);
329         }
330         /* Queue what is left of the original skb */
331         IRDA_DEBUG(2, "%s(), queuing last segment\n", __FUNCTION__);
332
333         frame = skb_push(skb, TTP_HEADER);
334         frame[0] = 0x00; /* Clear more bit */
335
336         /* Queue fragment */
337         skb_queue_tail(&self->tx_queue, skb);
338 }
339
340 /*
341  * Function irttp_param_max_sdu_size (self, param)
342  *
343  *    Handle the MaxSduSize parameter in the connect frames, this function
344  *    will be called both when this parameter needs to be inserted into, and
345  *    extracted from the connect frames
346  */
347 static int irttp_param_max_sdu_size(void *instance, irda_param_t *param,
348                                     int get)
349 {
350         struct tsap_cb *self;
351
352         self = (struct tsap_cb *) instance;
353
354         IRDA_ASSERT(self != NULL, return -1;);
355         IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
356
357         if (get)
358                 param->pv.i = self->tx_max_sdu_size;
359         else
360                 self->tx_max_sdu_size = param->pv.i;
361
362         IRDA_DEBUG(1, "%s(), MaxSduSize=%d\n", __FUNCTION__, param->pv.i);
363
364         return 0;
365 }
366
367 /*************************** CLIENT CALLS ***************************/
368 /************************** LMP CALLBACKS **************************/
369 /* Everything is happily mixed up. Waiting for next clean up - Jean II */
370
371 /*
372  * Function irttp_open_tsap (stsap, notify)
373  *
374  *    Create TSAP connection endpoint,
375  */
376 struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
377 {
378         struct tsap_cb *self;
379         struct lsap_cb *lsap;
380         notify_t ttp_notify;
381
382         IRDA_ASSERT(irttp->magic == TTP_MAGIC, return NULL;);
383
384         /* The IrLMP spec (IrLMP 1.1 p10) says that we have the right to
385          * use only 0x01-0x6F. Of course, we can use LSAP_ANY as well.
386          * JeanII */
387         if((stsap_sel != LSAP_ANY) &&
388            ((stsap_sel < 0x01) || (stsap_sel >= 0x70))) {
389                 IRDA_DEBUG(0, "%s(), invalid tsap!\n", __FUNCTION__);
390                 return NULL;
391         }
392
393         self = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
394         if (self == NULL) {
395                 IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __FUNCTION__);
396                 return NULL;
397         }
398         memset(self, 0, sizeof(struct tsap_cb));
399         spin_lock_init(&self->lock);
400
401         /* Initialise todo timer */
402         init_timer(&self->todo_timer);
403         self->todo_timer.data     = (unsigned long) self;
404         self->todo_timer.function = &irttp_todo_expired;
405
406         /* Initialize callbacks for IrLMP to use */
407         irda_notify_init(&ttp_notify);
408         ttp_notify.connect_confirm = irttp_connect_confirm;
409         ttp_notify.connect_indication = irttp_connect_indication;
410         ttp_notify.disconnect_indication = irttp_disconnect_indication;
411         ttp_notify.data_indication = irttp_data_indication;
412         ttp_notify.udata_indication = irttp_udata_indication;
413         ttp_notify.flow_indication = irttp_flow_indication;
414         if(notify->status_indication != NULL)
415                 ttp_notify.status_indication = irttp_status_indication;
416         ttp_notify.instance = self;
417         strncpy(ttp_notify.name, notify->name, NOTIFY_MAX_NAME);
418
419         self->magic = TTP_TSAP_MAGIC;
420         self->connected = FALSE;
421
422         skb_queue_head_init(&self->rx_queue);
423         skb_queue_head_init(&self->tx_queue);
424         skb_queue_head_init(&self->rx_fragments);
425         /*
426          *  Create LSAP at IrLMP layer
427          */
428         lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0);
429         if (lsap == NULL) {
430                 IRDA_WARNING("%s: unable to allocate LSAP!!\n", __FUNCTION__);
431                 return NULL;
432         }
433
434         /*
435          *  If user specified LSAP_ANY as source TSAP selector, then IrLMP
436          *  will replace it with whatever source selector which is free, so
437          *  the stsap_sel we have might not be valid anymore
438          */
439         self->stsap_sel = lsap->slsap_sel;
440         IRDA_DEBUG(4, "%s(), stsap_sel=%02x\n", __FUNCTION__, self->stsap_sel);
441
442         self->notify = *notify;
443         self->lsap = lsap;
444
445         hashbin_insert(irttp->tsaps, (irda_queue_t *) self, (long) self, NULL);
446
447         if (credit > TTP_RX_MAX_CREDIT)
448                 self->initial_credit = TTP_RX_MAX_CREDIT;
449         else
450                 self->initial_credit = credit;
451
452         return self;
453 }
454 EXPORT_SYMBOL(irttp_open_tsap);
455
456 /*
457  * Function irttp_close (handle)
458  *
459  *    Remove an instance of a TSAP. This function should only deal with the
460  *    deallocation of the TSAP, and resetting of the TSAPs values;
461  *
462  */
463 static void __irttp_close_tsap(struct tsap_cb *self)
464 {
465         /* First make sure we're connected. */
466         IRDA_ASSERT(self != NULL, return;);
467         IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
468
469         irttp_flush_queues(self);
470
471         del_timer(&self->todo_timer);
472
473         /* This one won't be cleaned up if we are disconnect_pend + close_pend
474          * and we receive a disconnect_indication */
475         if (self->disconnect_skb)
476                 dev_kfree_skb(self->disconnect_skb);
477
478         self->connected = FALSE;
479         self->magic = ~TTP_TSAP_MAGIC;
480
481         kfree(self);
482 }
483
484 /*
485  * Function irttp_close (self)
486  *
487  *    Remove TSAP from list of all TSAPs and then deallocate all resources
488  *    associated with this TSAP
489  *
490  * Note : because we *free* the tsap structure, it is the responsibility
491  * of the caller to make sure we are called only once and to deal with
492  * possible race conditions. - Jean II
493  */
494 int irttp_close_tsap(struct tsap_cb *self)
495 {
496         struct tsap_cb *tsap;
497
498         IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
499
500         IRDA_ASSERT(self != NULL, return -1;);
501         IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
502
503         /* Make sure tsap has been disconnected */
504         if (self->connected) {
505                 /* Check if disconnect is not pending */
506                 if (!test_bit(0, &self->disconnect_pend)) {
507                         IRDA_WARNING("%s: TSAP still connected!\n",
508                                      __FUNCTION__);
509                         irttp_disconnect_request(self, NULL, P_NORMAL);
510                 }
511                 self->close_pend = TRUE;
512                 irttp_start_todo_timer(self, HZ/10);
513
514                 return 0; /* Will be back! */
515         }
516
517         tsap = hashbin_remove(irttp->tsaps, (long) self, NULL);
518
519         IRDA_ASSERT(tsap == self, return -1;);
520
521         /* Close corresponding LSAP */
522         if (self->lsap) {
523                 irlmp_close_lsap(self->lsap);
524                 self->lsap = NULL;
525         }
526
527         __irttp_close_tsap(self);
528
529         return 0;
530 }
531 EXPORT_SYMBOL(irttp_close_tsap);
532
533 /*
534  * Function irttp_udata_request (self, skb)
535  *
536  *    Send unreliable data on this TSAP
537  *
538  */
539 int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb)
540 {
541         IRDA_ASSERT(self != NULL, return -1;);
542         IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
543         IRDA_ASSERT(skb != NULL, return -1;);
544
545         IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
546
547         /* Check that nothing bad happens */
548         if ((skb->len == 0) || (!self->connected)) {
549                 IRDA_DEBUG(1, "%s(), No data, or not connected\n",
550                            __FUNCTION__);
551                 goto err;
552         }
553
554         if (skb->len > self->max_seg_size) {
555                 IRDA_DEBUG(1, "%s(), UData is to large for IrLAP!\n",
556                            __FUNCTION__);
557                 goto err;
558         }
559
560         irlmp_udata_request(self->lsap, skb);
561         self->stats.tx_packets++;
562
563         return 0;
564
565 err:
566         dev_kfree_skb(skb);
567         return -1;
568 }
569 EXPORT_SYMBOL(irttp_udata_request);
570
571
572 /*
573  * Function irttp_data_request (handle, skb)
574  *
575  *    Queue frame for transmission. If SAR is enabled, fragement the frame
576  *    and queue the fragments for transmission
577  */
578 int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
579 {
580         __u8 *frame;
581         int ret;
582
583         IRDA_ASSERT(self != NULL, return -1;);
584         IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
585         IRDA_ASSERT(skb != NULL, return -1;);
586
587         IRDA_DEBUG(2, "%s() : queue len = %d\n", __FUNCTION__,
588                    skb_queue_len(&self->tx_queue));
589
590         /* Check that nothing bad happens */
591         if ((skb->len == 0) || (!self->connected)) {
592                 IRDA_WARNING("%s: No data, or not connected\n", __FUNCTION__);
593                 ret = -ENOTCONN;
594                 goto err;
595         }
596
597         /*
598          *  Check if SAR is disabled, and the frame is larger than what fits
599          *  inside an IrLAP frame
600          */
601         if ((self->tx_max_sdu_size == 0) && (skb->len > self->max_seg_size)) {
602                 IRDA_ERROR("%s: SAR disabled, and data is to large for IrLAP!\n",
603                            __FUNCTION__);
604                 ret = -EMSGSIZE;
605                 goto err;
606         }
607
608         /*
609          *  Check if SAR is enabled, and the frame is larger than the
610          *  TxMaxSduSize
611          */
612         if ((self->tx_max_sdu_size != 0) &&
613             (self->tx_max_sdu_size != TTP_SAR_UNBOUND) &&
614             (skb->len > self->tx_max_sdu_size))
615         {
616                 IRDA_ERROR("%s: SAR enabled, but data is larger than TxMaxSduSize!\n",
617                            __FUNCTION__);
618                 ret = -EMSGSIZE;
619                 goto err;
620         }
621         /*
622          *  Check if transmit queue is full
623          */
624         if (skb_queue_len(&self->tx_queue) >= TTP_TX_MAX_QUEUE) {
625                 /*
626                  *  Give it a chance to empty itself
627                  */
628                 irttp_run_tx_queue(self);
629
630                 /* Drop packet. This error code should trigger the caller
631                  * to resend the data in the client code - Jean II */
632                 ret = -ENOBUFS;
633                 goto err;
634         }
635
636         /* Queue frame, or queue frame segments */
637         if ((self->tx_max_sdu_size == 0) || (skb->len < self->max_seg_size)) {
638                 /* Queue frame */
639                 IRDA_ASSERT(skb_headroom(skb) >= TTP_HEADER, return -1;);
640                 frame = skb_push(skb, TTP_HEADER);
641                 frame[0] = 0x00; /* Clear more bit */
642
643                 skb_queue_tail(&self->tx_queue, skb);
644         } else {
645                 /*
646                  *  Fragment the frame, this function will also queue the
647                  *  fragments, we don't care about the fact the transmit
648                  *  queue may be overfilled by all the segments for a little
649                  *  while
650                  */
651                 irttp_fragment_skb(self, skb);
652         }
653
654         /* Check if we can accept more data from client */
655         if ((!self->tx_sdu_busy) &&
656             (skb_queue_len(&self->tx_queue) > TTP_TX_HIGH_THRESHOLD)) {
657                 /* Tx queue filling up, so stop client. */
658                 if (self->notify.flow_indication) {
659                         self->notify.flow_indication(self->notify.instance,
660                                                      self, FLOW_STOP);
661                 }
662                 /* self->tx_sdu_busy is the state of the client.
663                  * Update state after notifying client to avoid
664                  * race condition with irttp_flow_indication().
665                  * If the queue empty itself after our test but before
666                  * we set the flag, we will fix ourselves below in
667                  * irttp_run_tx_queue().
668                  * Jean II */
669                 self->tx_sdu_busy = TRUE;
670         }
671
672         /* Try to make some progress */
673         irttp_run_tx_queue(self);
674
675         return 0;
676
677 err:
678         dev_kfree_skb(skb);
679         return ret;
680 }
681 EXPORT_SYMBOL(irttp_data_request);
682
683 /*
684  * Function irttp_run_tx_queue (self)
685  *
686  *    Transmit packets queued for transmission (if possible)
687  *
688  */
689 static void irttp_run_tx_queue(struct tsap_cb *self)
690 {
691         struct sk_buff *skb;
692         unsigned long flags;
693         int n;
694
695         IRDA_DEBUG(2, "%s() : send_credit = %d, queue_len = %d\n",
696                    __FUNCTION__,
697                    self->send_credit, skb_queue_len(&self->tx_queue));
698
699         /* Get exclusive access to the tx queue, otherwise don't touch it */
700         if (irda_lock(&self->tx_queue_lock) == FALSE)
701                 return;
702
703         /* Try to send out frames as long as we have credits
704          * and as long as LAP is not full. If LAP is full, it will
705          * poll us through irttp_flow_indication() - Jean II */
706         while ((self->send_credit > 0) &&
707                (!irlmp_lap_tx_queue_full(self->lsap)) &&
708                (skb = skb_dequeue(&self->tx_queue)))
709         {
710                 /*
711                  *  Since we can transmit and receive frames concurrently,
712                  *  the code below is a critical region and we must assure that
713                  *  nobody messes with the credits while we update them.
714                  */
715                 spin_lock_irqsave(&self->lock, flags);
716
717                 n = self->avail_credit;
718                 self->avail_credit = 0;
719
720                 /* Only room for 127 credits in frame */
721                 if (n > 127) {
722                         self->avail_credit = n-127;
723                         n = 127;
724                 }
725                 self->remote_credit += n;
726                 self->send_credit--;
727
728                 spin_unlock_irqrestore(&self->lock, flags);
729
730                 /*
731                  *  More bit must be set by the data_request() or fragment()
732                  *  functions
733                  */
734                 skb->data[0] |= (n & 0x7f);
735
736                 /* Detach from socket.
737                  * The current skb has a reference to the socket that sent
738                  * it (skb->sk). When we pass it to IrLMP, the skb will be
739                  * stored in in IrLAP (self->wx_list). When we are within
740                  * IrLAP, we lose the notion of socket, so we should not
741                  * have a reference to a socket. So, we drop it here.
742                  *
743                  * Why does it matter ?
744                  * When the skb is freed (kfree_skb), if it is associated
745                  * with a socket, it release buffer space on the socket
746                  * (through sock_wfree() and sock_def_write_space()).
747                  * If the socket no longer exist, we may crash. Hard.
748                  * When we close a socket, we make sure that associated packets
749                  * in IrTTP are freed. However, we have no way to cancel
750                  * the packet that we have passed to IrLAP. So, if a packet
751                  * remains in IrLAP (retry on the link or else) after we
752                  * close the socket, we are dead !
753                  * Jean II */
754                 if (skb->sk != NULL) {
755                         /* IrSOCK application, IrOBEX, ... */
756                         skb_orphan(skb);
757                 }
758                         /* IrCOMM over IrTTP, IrLAN, ... */
759
760                 /* Pass the skb to IrLMP - done */
761                 irlmp_data_request(self->lsap, skb);
762                 self->stats.tx_packets++;
763         }
764
765         /* Check if we can accept more frames from client.
766          * We don't want to wait until the todo timer to do that, and we
767          * can't use tasklets (grr...), so we are obliged to give control
768          * to client. That's ok, this test will be true not too often
769          * (max once per LAP window) and we are called from places
770          * where we can spend a bit of time doing stuff. - Jean II */
771         if ((self->tx_sdu_busy) &&
772             (skb_queue_len(&self->tx_queue) < TTP_TX_LOW_THRESHOLD) &&
773             (!self->close_pend))
774         {
775                 if (self->notify.flow_indication)
776                         self->notify.flow_indication(self->notify.instance,
777                                                      self, FLOW_START);
778
779                 /* self->tx_sdu_busy is the state of the client.
780                  * We don't really have a race here, but it's always safer
781                  * to update our state after the client - Jean II */
782                 self->tx_sdu_busy = FALSE;
783         }
784
785         /* Reset lock */
786         self->tx_queue_lock = 0;
787 }
788
789 /*
790  * Function irttp_give_credit (self)
791  *
792  *    Send a dataless flowdata TTP-PDU and give available credit to peer
793  *    TSAP
794  */
795 static inline void irttp_give_credit(struct tsap_cb *self)
796 {
797         struct sk_buff *tx_skb = NULL;
798         unsigned long flags;
799         int n;
800
801         IRDA_ASSERT(self != NULL, return;);
802         IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
803
804         IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n",
805                    __FUNCTION__,
806                    self->send_credit, self->avail_credit, self->remote_credit);
807
808         /* Give credit to peer */
809         tx_skb = dev_alloc_skb(64);
810         if (!tx_skb)
811                 return;
812
813         /* Reserve space for LMP, and LAP header */
814         skb_reserve(tx_skb, self->max_header_size);
815
816         /*
817          *  Since we can transmit and receive frames concurrently,
818          *  the code below is a critical region and we must assure that
819          *  nobody messes with the credits while we update them.
820          */
821         spin_lock_irqsave(&self->lock, flags);
822
823         n = self->avail_credit;
824         self->avail_credit = 0;
825
826         /* Only space for 127 credits in frame */
827         if (n > 127) {
828                 self->avail_credit = n - 127;
829                 n = 127;
830         }
831         self->remote_credit += n;
832
833         spin_unlock_irqrestore(&self->lock, flags);
834
835         skb_put(tx_skb, 1);
836         tx_skb->data[0] = (__u8) (n & 0x7f);
837
838         irlmp_data_request(self->lsap, tx_skb);
839         self->stats.tx_packets++;
840 }
841
842 /*
843  * Function irttp_udata_indication (instance, sap, skb)
844  *
845  *    Received some unit-data (unreliable)
846  *
847  */
848 static int irttp_udata_indication(void *instance, void *sap,
849                                   struct sk_buff *skb)
850 {
851         struct tsap_cb *self;
852         int err;
853
854         IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
855
856         self = (struct tsap_cb *) instance;
857
858         IRDA_ASSERT(self != NULL, return -1;);
859         IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
860         IRDA_ASSERT(skb != NULL, return -1;);
861
862         self->stats.rx_packets++;
863
864         /* Just pass data to layer above */
865         if (self->notify.udata_indication) {
866                 err = self->notify.udata_indication(self->notify.instance,
867                                                     self,skb);
868                 /* Same comment as in irttp_do_data_indication() */
869                 if (!err) 
870                         return 0;
871         }
872         /* Either no handler, or handler returns an error */
873         dev_kfree_skb(skb);
874
875         return 0;
876 }
877
878 /*
879  * Function irttp_data_indication (instance, sap, skb)
880  *
881  *    Receive segment from IrLMP.
882  *
883  */
884 static int irttp_data_indication(void *instance, void *sap,
885                                  struct sk_buff *skb)
886 {
887         struct tsap_cb *self;
888         unsigned long flags;
889         int n;
890
891         self = (struct tsap_cb *) instance;
892
893         n = skb->data[0] & 0x7f;     /* Extract the credits */
894
895         self->stats.rx_packets++;
896
897         /*  Deal with inbound credit
898          *  Since we can transmit and receive frames concurrently,
899          *  the code below is a critical region and we must assure that
900          *  nobody messes with the credits while we update them.
901          */
902         spin_lock_irqsave(&self->lock, flags);
903         self->send_credit += n;
904         if (skb->len > 1)
905                 self->remote_credit--;
906         spin_unlock_irqrestore(&self->lock, flags);
907
908         /*
909          *  Data or dataless packet? Dataless frames contains only the
910          *  TTP_HEADER.
911          */
912         if (skb->len > 1) {
913                 /*
914                  *  We don't remove the TTP header, since we must preserve the
915                  *  more bit, so the defragment routing knows what to do
916                  */
917                 skb_queue_tail(&self->rx_queue, skb);
918         } else {
919                 /* Dataless flowdata TTP-PDU */
920                 dev_kfree_skb(skb);
921         }
922
923
924         /* Push data to the higher layer.
925          * We do it synchronously because running the todo timer for each
926          * receive packet would be too much overhead and latency.
927          * By passing control to the higher layer, we run the risk that
928          * it may take time or grab a lock. Most often, the higher layer
929          * will only put packet in a queue.
930          * Anyway, packets are only dripping through the IrDA, so we can
931          * have time before the next packet.
932          * Further, we are run from NET_BH, so the worse that can happen is
933          * us missing the optimal time to send back the PF bit in LAP.
934          * Jean II */
935         irttp_run_rx_queue(self);
936
937         /* We now give credits to peer in irttp_run_rx_queue().
938          * We need to send credit *NOW*, otherwise we are going
939          * to miss the next Tx window. The todo timer may take
940          * a while before it's run... - Jean II */
941
942         /*
943          * If the peer device has given us some credits and we didn't have
944          * anyone from before, then we need to shedule the tx queue.
945          * We need to do that because our Tx have stopped (so we may not
946          * get any LAP flow indication) and the user may be stopped as
947          * well. - Jean II
948          */
949         if (self->send_credit == n) {
950                 /* Restart pushing stuff to LAP */
951                 irttp_run_tx_queue(self);
952                 /* Note : we don't want to schedule the todo timer
953                  * because it has horrible latency. No tasklets
954                  * because the tasklet API is broken. - Jean II */
955         }
956
957         return 0;
958 }
959
960 /*
961  * Function irttp_status_indication (self, reason)
962  *
963  *    Status_indication, just pass to the higher layer...
964  *
965  */
966 static void irttp_status_indication(void *instance,
967                                     LINK_STATUS link, LOCK_STATUS lock)
968 {
969         struct tsap_cb *self;
970
971         IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
972
973         self = (struct tsap_cb *) instance;
974
975         IRDA_ASSERT(self != NULL, return;);
976         IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
977
978         /* Check if client has already closed the TSAP and gone away */
979         if (self->close_pend)
980                 return;
981
982         /*
983          *  Inform service user if he has requested it
984          */
985         if (self->notify.status_indication != NULL)
986                 self->notify.status_indication(self->notify.instance,
987                                                link, lock);
988         else
989                 IRDA_DEBUG(2, "%s(), no handler\n", __FUNCTION__);
990 }
991
992 /*
993  * Function irttp_flow_indication (self, reason)
994  *
995  *    Flow_indication : IrLAP tells us to send more data.
996  *
997  */
998 static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
999 {
1000         struct tsap_cb *self;
1001
1002         self = (struct tsap_cb *) instance;
1003
1004         IRDA_ASSERT(self != NULL, return;);
1005         IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
1006
1007         IRDA_DEBUG(4, "%s(instance=%p)\n", __FUNCTION__, self);
1008
1009         /* We are "polled" directly from LAP, and the LAP want to fill
1010          * its Tx window. We want to do our best to send it data, so that
1011          * we maximise the window. On the other hand, we want to limit the
1012          * amount of work here so that LAP doesn't hang forever waiting
1013          * for packets. - Jean II */
1014
1015         /* Try to send some packets. Currently, LAP calls us every time
1016          * there is one free slot, so we will send only one packet.
1017          * This allow the scheduler to do its round robin - Jean II */
1018         irttp_run_tx_queue(self);
1019
1020         /* Note regarding the interraction with higher layer.
1021          * irttp_run_tx_queue() may call the client when its queue
1022          * start to empty, via notify.flow_indication(). Initially.
1023          * I wanted this to happen in a tasklet, to avoid client
1024          * grabbing the CPU, but we can't use tasklets safely. And timer
1025          * is definitely too slow.
1026          * This will happen only once per LAP window, and usually at
1027          * the third packet (unless window is smaller). LAP is still
1028          * doing mtt and sending first packet so it's sort of OK
1029          * to do that. Jean II */
1030
1031         /* If we need to send disconnect. try to do it now */
1032         if(self->disconnect_pend)
1033                 irttp_start_todo_timer(self, 0);
1034 }
1035
1036 /*
1037  * Function irttp_flow_request (self, command)
1038  *
1039  *    This function could be used by the upper layers to tell IrTTP to stop
1040  *    delivering frames if the receive queues are starting to get full, or
1041  *    to tell IrTTP to start delivering frames again.
1042  */
1043 void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow)
1044 {
1045         IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
1046
1047         IRDA_ASSERT(self != NULL, return;);
1048         IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
1049
1050         switch (flow) {
1051         case FLOW_STOP:
1052                 IRDA_DEBUG(1, "%s(), flow stop\n", __FUNCTION__);
1053                 self->rx_sdu_busy = TRUE;
1054                 break;
1055         case FLOW_START:
1056                 IRDA_DEBUG(1, "%s(), flow start\n", __FUNCTION__);
1057                 self->rx_sdu_busy = FALSE;
1058
1059                 /* Client say he can accept more data, try to free our
1060                  * queues ASAP - Jean II */
1061                 irttp_run_rx_queue(self);
1062
1063                 break;
1064         default:
1065                 IRDA_DEBUG(1, "%s(), Unknown flow command!\n", __FUNCTION__);
1066         }
1067 }
1068 EXPORT_SYMBOL(irttp_flow_request);
1069
1070 /*
1071  * Function irttp_connect_request (self, dtsap_sel, daddr, qos)
1072  *
1073  *    Try to connect to remote destination TSAP selector
1074  *
1075  */
1076 int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
1077                           __u32 saddr, __u32 daddr,
1078                           struct qos_info *qos, __u32 max_sdu_size,
1079                           struct sk_buff *userdata)
1080 {
1081         struct sk_buff *tx_skb;
1082         __u8 *frame;
1083         __u8 n;
1084
1085         IRDA_DEBUG(4, "%s(), max_sdu_size=%d\n", __FUNCTION__, max_sdu_size);
1086
1087         IRDA_ASSERT(self != NULL, return -EBADR;);
1088         IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -EBADR;);
1089
1090         if (self->connected) {
1091                 if(userdata)
1092                         dev_kfree_skb(userdata);
1093                 return -EISCONN;
1094         }
1095
1096         /* Any userdata supplied? */
1097         if (userdata == NULL) {
1098                 tx_skb = dev_alloc_skb(64);
1099                 if (!tx_skb)
1100                         return -ENOMEM;
1101
1102                 /* Reserve space for MUX_CONTROL and LAP header */
1103                 skb_reserve(tx_skb, TTP_MAX_HEADER);
1104         } else {
1105                 tx_skb = userdata;
1106                 /*
1107                  *  Check that the client has reserved enough space for
1108                  *  headers
1109                  */
1110                 IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER,
1111                         { dev_kfree_skb(userdata); return -1; } );
1112         }
1113
1114         /* Initialize connection parameters */
1115         self->connected = FALSE;
1116         self->avail_credit = 0;
1117         self->rx_max_sdu_size = max_sdu_size;
1118         self->rx_sdu_size = 0;
1119         self->rx_sdu_busy = FALSE;
1120         self->dtsap_sel = dtsap_sel;
1121
1122         n = self->initial_credit;
1123
1124         self->remote_credit = 0;
1125         self->send_credit = 0;
1126
1127         /*
1128          *  Give away max 127 credits for now
1129          */
1130         if (n > 127) {
1131                 self->avail_credit=n-127;
1132                 n = 127;
1133         }
1134
1135         self->remote_credit = n;
1136
1137         /* SAR enabled? */
1138         if (max_sdu_size > 0) {
1139                 IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER),
1140                         { dev_kfree_skb(tx_skb); return -1; } );
1141
1142                 /* Insert SAR parameters */
1143                 frame = skb_push(tx_skb, TTP_HEADER+TTP_SAR_HEADER);
1144
1145                 frame[0] = TTP_PARAMETERS | n;
1146                 frame[1] = 0x04; /* Length */
1147                 frame[2] = 0x01; /* MaxSduSize */
1148                 frame[3] = 0x02; /* Value length */
1149
1150                 put_unaligned(cpu_to_be16((__u16) max_sdu_size),
1151                               (__u16 *)(frame+4));
1152         } else {
1153                 /* Insert plain TTP header */
1154                 frame = skb_push(tx_skb, TTP_HEADER);
1155
1156                 /* Insert initial credit in frame */
1157                 frame[0] = n & 0x7f;
1158         }
1159
1160         /* Connect with IrLMP. No QoS parameters for now */
1161         return irlmp_connect_request(self->lsap, dtsap_sel, saddr, daddr, qos,
1162                                      tx_skb);
1163 }
1164 EXPORT_SYMBOL(irttp_connect_request);
1165
1166 /*
1167  * Function irttp_connect_confirm (handle, qos, skb)
1168  *
1169  *    Sevice user confirms TSAP connection with peer.
1170  *
1171  */
1172 static void irttp_connect_confirm(void *instance, void *sap,
1173                                   struct qos_info *qos, __u32 max_seg_size,
1174                                   __u8 max_header_size, struct sk_buff *skb)
1175 {
1176         struct tsap_cb *self;
1177         int parameters;
1178         int ret;
1179         __u8 plen;
1180         __u8 n;
1181
1182         IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
1183
1184         self = (struct tsap_cb *) instance;
1185
1186         IRDA_ASSERT(self != NULL, return;);
1187         IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
1188         IRDA_ASSERT(skb != NULL, return;);
1189
1190         self->max_seg_size = max_seg_size - TTP_HEADER;
1191         self->max_header_size = max_header_size + TTP_HEADER;
1192
1193         /*
1194          *  Check if we have got some QoS parameters back! This should be the
1195          *  negotiated QoS for the link.
1196          */
1197         if (qos) {
1198                 IRDA_DEBUG(4, "IrTTP, Negotiated BAUD_RATE: %02x\n",
1199                        qos->baud_rate.bits);
1200                 IRDA_DEBUG(4, "IrTTP, Negotiated BAUD_RATE: %d bps.\n",
1201                        qos->baud_rate.value);
1202         }
1203
1204         n = skb->data[0] & 0x7f;
1205
1206         IRDA_DEBUG(4, "%s(), Initial send_credit=%d\n", __FUNCTION__, n);
1207
1208         self->send_credit = n;
1209         self->tx_max_sdu_size = 0;
1210         self->connected = TRUE;
1211
1212         parameters = skb->data[0] & 0x80;
1213
1214         IRDA_ASSERT(skb->len >= TTP_HEADER, return;);
1215         skb_pull(skb, TTP_HEADER);
1216
1217         if (parameters) {
1218                 plen = skb->data[0];
1219
1220                 ret = irda_param_extract_all(self, skb->data+1,
1221                                              IRDA_MIN(skb->len-1, plen),
1222                                              &param_info);
1223
1224                 /* Any errors in the parameter list? */
1225                 if (ret < 0) {
1226                         IRDA_WARNING("%s: error extracting parameters\n",
1227                                      __FUNCTION__);
1228                         dev_kfree_skb(skb);
1229
1230                         /* Do not accept this connection attempt */
1231                         return;
1232                 }
1233                 /* Remove parameters */
1234                 skb_pull(skb, IRDA_MIN(skb->len, plen+1));
1235         }
1236
1237         IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n", __FUNCTION__,
1238               self->send_credit, self->avail_credit, self->remote_credit);
1239
1240         IRDA_DEBUG(2, "%s(), MaxSduSize=%d\n", __FUNCTION__,
1241                    self->tx_max_sdu_size);
1242
1243         if (self->notify.connect_confirm) {
1244                 self->notify.connect_confirm(self->notify.instance, self, qos,
1245                                              self->tx_max_sdu_size,
1246                                              self->max_header_size, skb);
1247         } else
1248                 dev_kfree_skb(skb);
1249 }
1250
1251 /*
1252  * Function irttp_connect_indication (handle, skb)
1253  *
1254  *    Some other device is connecting to this TSAP
1255  *
1256  */
1257 void irttp_connect_indication(void *instance, void *sap, struct qos_info *qos,
1258                               __u32 max_seg_size, __u8 max_header_size,
1259                               struct sk_buff *skb)
1260 {
1261         struct tsap_cb *self;
1262         struct lsap_cb *lsap;
1263         int parameters;
1264         int ret;
1265         __u8 plen;
1266         __u8 n;
1267
1268         self = (struct tsap_cb *) instance;
1269
1270         IRDA_ASSERT(self != NULL, return;);
1271         IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
1272         IRDA_ASSERT(skb != NULL, return;);
1273
1274         lsap = (struct lsap_cb *) sap;
1275
1276         self->max_seg_size = max_seg_size - TTP_HEADER;
1277         self->max_header_size = max_header_size+TTP_HEADER;
1278
1279         IRDA_DEBUG(4, "%s(), TSAP sel=%02x\n", __FUNCTION__, self->stsap_sel);
1280
1281         /* Need to update dtsap_sel if its equal to LSAP_ANY */
1282         self->dtsap_sel = lsap->dlsap_sel;
1283
1284         n = skb->data[0] & 0x7f;
1285
1286         self->send_credit = n;
1287         self->tx_max_sdu_size = 0;
1288
1289         parameters = skb->data[0] & 0x80;
1290
1291         IRDA_ASSERT(skb->len >= TTP_HEADER, return;);
1292         skb_pull(skb, TTP_HEADER);
1293
1294         if (parameters) {
1295                 plen = skb->data[0];
1296
1297                 ret = irda_param_extract_all(self, skb->data+1,
1298                                              IRDA_MIN(skb->len-1, plen),
1299                                              &param_info);
1300
1301                 /* Any errors in the parameter list? */
1302                 if (ret < 0) {
1303                         IRDA_WARNING("%s: error extracting parameters\n",
1304                                      __FUNCTION__);
1305                         dev_kfree_skb(skb);
1306
1307                         /* Do not accept this connection attempt */
1308                         return;
1309                 }
1310
1311                 /* Remove parameters */
1312                 skb_pull(skb, IRDA_MIN(skb->len, plen+1));
1313         }
1314
1315         if (self->notify.connect_indication) {
1316                 self->notify.connect_indication(self->notify.instance, self,
1317                                                 qos, self->tx_max_sdu_size,
1318                                                 self->max_header_size, skb);
1319         } else
1320                 dev_kfree_skb(skb);
1321 }
1322
1323 /*
1324  * Function irttp_connect_response (handle, userdata)
1325  *
1326  *    Service user is accepting the connection, just pass it down to
1327  *    IrLMP!
1328  *
1329  */
1330 int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
1331                            struct sk_buff *userdata)
1332 {
1333         struct sk_buff *tx_skb;
1334         __u8 *frame;
1335         int ret;
1336         __u8 n;
1337
1338         IRDA_ASSERT(self != NULL, return -1;);
1339         IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
1340
1341         IRDA_DEBUG(4, "%s(), Source TSAP selector=%02x\n", __FUNCTION__,
1342                    self->stsap_sel);
1343
1344         /* Any userdata supplied? */
1345         if (userdata == NULL) {
1346                 tx_skb = dev_alloc_skb(64);
1347                 if (!tx_skb)
1348                         return -ENOMEM;
1349
1350                 /* Reserve space for MUX_CONTROL and LAP header */
1351                 skb_reserve(tx_skb, TTP_MAX_HEADER);
1352         } else {
1353                 tx_skb = userdata;
1354                 /*
1355                  *  Check that the client has reserved enough space for
1356                  *  headers
1357                  */
1358                 IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER,
1359                         { dev_kfree_skb(userdata); return -1; } );
1360         }
1361
1362         self->avail_credit = 0;
1363         self->remote_credit = 0;
1364         self->rx_max_sdu_size = max_sdu_size;
1365         self->rx_sdu_size = 0;
1366         self->rx_sdu_busy = FALSE;
1367
1368         n = self->initial_credit;
1369
1370         /* Frame has only space for max 127 credits (7 bits) */
1371         if (n > 127) {
1372                 self->avail_credit = n - 127;
1373                 n = 127;
1374         }
1375
1376         self->remote_credit = n;
1377         self->connected = TRUE;
1378
1379         /* SAR enabled? */
1380         if (max_sdu_size > 0) {
1381                 IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER),
1382                         { dev_kfree_skb(tx_skb); return -1; } );
1383
1384                 /* Insert TTP header with SAR parameters */
1385                 frame = skb_push(tx_skb, TTP_HEADER+TTP_SAR_HEADER);
1386
1387                 frame[0] = TTP_PARAMETERS | n;
1388                 frame[1] = 0x04; /* Length */
1389
1390                 /* irda_param_insert(self, IRTTP_MAX_SDU_SIZE, frame+1,  */
1391 /*                                TTP_SAR_HEADER, &param_info) */
1392
1393                 frame[2] = 0x01; /* MaxSduSize */
1394                 frame[3] = 0x02; /* Value length */
1395
1396                 put_unaligned(cpu_to_be16((__u16) max_sdu_size),
1397                               (__u16 *)(frame+4));
1398         } else {
1399                 /* Insert TTP header */
1400                 frame = skb_push(tx_skb, TTP_HEADER);
1401
1402                 frame[0] = n & 0x7f;
1403         }
1404
1405         ret = irlmp_connect_response(self->lsap, tx_skb);
1406
1407         return ret;
1408 }
1409 EXPORT_SYMBOL(irttp_connect_response);
1410
1411 /*
1412  * Function irttp_dup (self, instance)
1413  *
1414  *    Duplicate TSAP, can be used by servers to confirm a connection on a
1415  *    new TSAP so it can keep listening on the old one.
1416  */
1417 struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance)
1418 {
1419         struct tsap_cb *new;
1420         unsigned long flags;
1421
1422         IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
1423
1424         /* Protect our access to the old tsap instance */
1425         spin_lock_irqsave(&irttp->tsaps->hb_spinlock, flags);
1426
1427         /* Find the old instance */
1428         if (!hashbin_find(irttp->tsaps, (long) orig, NULL)) {
1429                 IRDA_DEBUG(0, "%s(), unable to find TSAP\n", __FUNCTION__);
1430                 spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
1431                 return NULL;
1432         }
1433
1434         /* Allocate a new instance */
1435         new = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
1436         if (!new) {
1437                 IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __FUNCTION__);
1438                 spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
1439                 return NULL;
1440         }
1441         /* Dup */
1442         memcpy(new, orig, sizeof(struct tsap_cb));
1443
1444         /* We don't need the old instance any more */
1445         spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
1446
1447         /* Try to dup the LSAP (may fail if we were too slow) */
1448         new->lsap = irlmp_dup(orig->lsap, new);
1449         if (!new->lsap) {
1450                 IRDA_DEBUG(0, "%s(), dup failed!\n", __FUNCTION__);
1451                 kfree(new);
1452                 return NULL;
1453         }
1454
1455         /* Not everything should be copied */
1456         new->notify.instance = instance;
1457         init_timer(&new->todo_timer);
1458
1459         skb_queue_head_init(&new->rx_queue);
1460         skb_queue_head_init(&new->tx_queue);
1461         skb_queue_head_init(&new->rx_fragments);
1462
1463         /* This is locked */
1464         hashbin_insert(irttp->tsaps, (irda_queue_t *) new, (long) new, NULL);
1465
1466         return new;
1467 }
1468 EXPORT_SYMBOL(irttp_dup);
1469
1470 /*
1471  * Function irttp_disconnect_request (self)
1472  *
1473  *    Close this connection please! If priority is high, the queued data
1474  *    segments, if any, will be deallocated first
1475  *
1476  */
1477 int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
1478                              int priority)
1479 {
1480         int ret;
1481
1482         IRDA_ASSERT(self != NULL, return -1;);
1483         IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
1484
1485         /* Already disconnected? */
1486         if (!self->connected) {
1487                 IRDA_DEBUG(4, "%s(), already disconnected!\n", __FUNCTION__);
1488                 if (userdata)
1489                         dev_kfree_skb(userdata);
1490                 return -1;
1491         }
1492
1493         /* Disconnect already pending ?
1494          * We need to use an atomic operation to prevent reentry. This
1495          * function may be called from various context, like user, timer
1496          * for following a disconnect_indication() (i.e. net_bh).
1497          * Jean II */
1498         if(test_and_set_bit(0, &self->disconnect_pend)) {
1499                 IRDA_DEBUG(0, "%s(), disconnect already pending\n",
1500                            __FUNCTION__);
1501                 if (userdata)
1502                         dev_kfree_skb(userdata);
1503
1504                 /* Try to make some progress */
1505                 irttp_run_tx_queue(self);
1506                 return -1;
1507         }
1508
1509         /*
1510          *  Check if there is still data segments in the transmit queue
1511          */
1512         if (!skb_queue_empty(&self->tx_queue)) {
1513                 if (priority == P_HIGH) {
1514                         /*
1515                          *  No need to send the queued data, if we are
1516                          *  disconnecting right now since the data will
1517                          *  not have any usable connection to be sent on
1518                          */
1519                         IRDA_DEBUG(1, "%s(): High priority!!()\n", __FUNCTION__);
1520                         irttp_flush_queues(self);
1521                 } else if (priority == P_NORMAL) {
1522                         /*
1523                          *  Must delay disconnect until after all data segments
1524                          *  have been sent and the tx_queue is empty
1525                          */
1526                         /* We'll reuse this one later for the disconnect */
1527                         self->disconnect_skb = userdata;  /* May be NULL */
1528
1529                         irttp_run_tx_queue(self);
1530
1531                         irttp_start_todo_timer(self, HZ/10);
1532                         return -1;
1533                 }
1534         }
1535         /* Note : we don't need to check if self->rx_queue is full and the
1536          * state of self->rx_sdu_busy because the disconnect response will
1537          * be sent at the LMP level (so even if the peer has its Tx queue
1538          * full of data). - Jean II */
1539
1540         IRDA_DEBUG(1, "%s(), Disconnecting ...\n", __FUNCTION__);
1541         self->connected = FALSE;
1542
1543         if (!userdata) {
1544                 struct sk_buff *tx_skb;
1545                 tx_skb = dev_alloc_skb(64);
1546                 if (!tx_skb)
1547                         return -ENOMEM;
1548
1549                 /*
1550                  *  Reserve space for MUX and LAP header
1551                  */
1552                 skb_reserve(tx_skb, TTP_MAX_HEADER);
1553
1554                 userdata = tx_skb;
1555         }
1556         ret = irlmp_disconnect_request(self->lsap, userdata);
1557
1558         /* The disconnect is no longer pending */
1559         clear_bit(0, &self->disconnect_pend);   /* FALSE */
1560
1561         return ret;
1562 }
1563 EXPORT_SYMBOL(irttp_disconnect_request);
1564
1565 /*
1566  * Function irttp_disconnect_indication (self, reason)
1567  *
1568  *    Disconnect indication, TSAP disconnected by peer?
1569  *
1570  */
1571 void irttp_disconnect_indication(void *instance, void *sap, LM_REASON reason,
1572                                  struct sk_buff *skb)
1573 {
1574         struct tsap_cb *self;
1575
1576         IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
1577
1578         self = (struct tsap_cb *) instance;
1579
1580         IRDA_ASSERT(self != NULL, return;);
1581         IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
1582
1583         /* Prevent higher layer to send more data */
1584         self->connected = FALSE;
1585
1586         /* Check if client has already tried to close the TSAP */
1587         if (self->close_pend) {
1588                 /* In this case, the higher layer is probably gone. Don't
1589                  * bother it and clean up the remains - Jean II */
1590                 if (skb)
1591                         dev_kfree_skb(skb);
1592                 irttp_close_tsap(self);
1593                 return;
1594         }
1595
1596         /* If we are here, we assume that is the higher layer is still
1597          * waiting for the disconnect notification and able to process it,
1598          * even if he tried to disconnect. Otherwise, it would have already
1599          * attempted to close the tsap and self->close_pend would be TRUE.
1600          * Jean II */
1601
1602         /* No need to notify the client if has already tried to disconnect */
1603         if(self->notify.disconnect_indication)
1604                 self->notify.disconnect_indication(self->notify.instance, self,
1605                                                    reason, skb);
1606         else
1607                 if (skb)
1608                         dev_kfree_skb(skb);
1609 }
1610
1611 /*
1612  * Function irttp_do_data_indication (self, skb)
1613  *
1614  *    Try to deliver reassembled skb to layer above, and requeue it if that
1615  *    for some reason should fail. We mark rx sdu as busy to apply back
1616  *    pressure is necessary.
1617  */
1618 static void irttp_do_data_indication(struct tsap_cb *self, struct sk_buff *skb)
1619 {
1620         int err;
1621
1622         /* Check if client has already closed the TSAP and gone away */
1623         if (self->close_pend) {
1624                 dev_kfree_skb(skb);
1625                 return;
1626         }
1627
1628         err = self->notify.data_indication(self->notify.instance, self, skb);
1629
1630         /* Usually the layer above will notify that it's input queue is
1631          * starting to get filled by using the flow request, but this may
1632          * be difficult, so it can instead just refuse to eat it and just
1633          * give an error back
1634          */
1635         if (err) {
1636                 IRDA_DEBUG(0, "%s() requeueing skb!\n", __FUNCTION__);
1637
1638                 /* Make sure we take a break */
1639                 self->rx_sdu_busy = TRUE;
1640
1641                 /* Need to push the header in again */
1642                 skb_push(skb, TTP_HEADER);
1643                 skb->data[0] = 0x00; /* Make sure MORE bit is cleared */
1644
1645                 /* Put skb back on queue */
1646                 skb_queue_head(&self->rx_queue, skb);
1647         }
1648 }
1649
1650 /*
1651  * Function irttp_run_rx_queue (self)
1652  *
1653  *     Check if we have any frames to be transmitted, or if we have any
1654  *     available credit to give away.
1655  */
1656 void irttp_run_rx_queue(struct tsap_cb *self)
1657 {
1658         struct sk_buff *skb;
1659         int more = 0;
1660
1661         IRDA_DEBUG(2, "%s() send=%d,avail=%d,remote=%d\n", __FUNCTION__,
1662                    self->send_credit, self->avail_credit, self->remote_credit);
1663
1664         /* Get exclusive access to the rx queue, otherwise don't touch it */
1665         if (irda_lock(&self->rx_queue_lock) == FALSE)
1666                 return;
1667
1668         /*
1669          *  Reassemble all frames in receive queue and deliver them
1670          */
1671         while (!self->rx_sdu_busy && (skb = skb_dequeue(&self->rx_queue))) {
1672                 /* This bit will tell us if it's the last fragment or not */
1673                 more = skb->data[0] & 0x80;
1674
1675                 /* Remove TTP header */
1676                 skb_pull(skb, TTP_HEADER);
1677
1678                 /* Add the length of the remaining data */
1679                 self->rx_sdu_size += skb->len;
1680
1681                 /*
1682                  * If SAR is disabled, or user has requested no reassembly
1683                  * of received fragments then we just deliver them
1684                  * immediately. This can be requested by clients that
1685                  * implements byte streams without any message boundaries
1686                  */
1687                 if (self->rx_max_sdu_size == TTP_SAR_DISABLE) {
1688                         irttp_do_data_indication(self, skb);
1689                         self->rx_sdu_size = 0;
1690
1691                         continue;
1692                 }
1693
1694                 /* Check if this is a fragment, and not the last fragment */
1695                 if (more) {
1696                         /*
1697                          *  Queue the fragment if we still are within the
1698                          *  limits of the maximum size of the rx_sdu
1699                          */
1700                         if (self->rx_sdu_size <= self->rx_max_sdu_size) {
1701                                 IRDA_DEBUG(4, "%s(), queueing frag\n",
1702                                            __FUNCTION__);
1703                                 skb_queue_tail(&self->rx_fragments, skb);
1704                         } else {
1705                                 /* Free the part of the SDU that is too big */
1706                                 dev_kfree_skb(skb);
1707                         }
1708                         continue;
1709                 }
1710                 /*
1711                  *  This is the last fragment, so time to reassemble!
1712                  */
1713                 if ((self->rx_sdu_size <= self->rx_max_sdu_size) ||
1714                     (self->rx_max_sdu_size == TTP_SAR_UNBOUND))
1715                 {
1716                         /*
1717                          * A little optimizing. Only queue the fragment if
1718                          * there are other fragments. Since if this is the
1719                          * last and only fragment, there is no need to
1720                          * reassemble :-)
1721                          */
1722                         if (!skb_queue_empty(&self->rx_fragments)) {
1723                                 skb_queue_tail(&self->rx_fragments,
1724                                                skb);
1725
1726                                 skb = irttp_reassemble_skb(self);
1727                         }
1728
1729                         /* Now we can deliver the reassembled skb */
1730                         irttp_do_data_indication(self, skb);
1731                 } else {
1732                         IRDA_DEBUG(1, "%s(), Truncated frame\n", __FUNCTION__);
1733
1734                         /* Free the part of the SDU that is too big */
1735                         dev_kfree_skb(skb);
1736
1737                         /* Deliver only the valid but truncated part of SDU */
1738                         skb = irttp_reassemble_skb(self);
1739
1740                         irttp_do_data_indication(self, skb);
1741                 }
1742                 self->rx_sdu_size = 0;
1743         }
1744
1745         /*
1746          * It's not trivial to keep track of how many credits are available
1747          * by incrementing at each packet, because delivery may fail
1748          * (irttp_do_data_indication() may requeue the frame) and because
1749          * we need to take care of fragmentation.
1750          * We want the other side to send up to initial_credit packets.
1751          * We have some frames in our queues, and we have already allowed it
1752          * to send remote_credit.
1753          * No need to spinlock, write is atomic and self correcting...
1754          * Jean II
1755          */
1756         self->avail_credit = (self->initial_credit -
1757                               (self->remote_credit +
1758                                skb_queue_len(&self->rx_queue) +
1759                                skb_queue_len(&self->rx_fragments)));
1760
1761         /* Do we have too much credits to send to peer ? */
1762         if ((self->remote_credit <= TTP_RX_MIN_CREDIT) &&
1763             (self->avail_credit > 0)) {
1764                 /* Send explicit credit frame */
1765                 irttp_give_credit(self);
1766                 /* Note : do *NOT* check if tx_queue is non-empty, that
1767                  * will produce deadlocks. I repeat : send a credit frame
1768                  * even if we have something to send in our Tx queue.
1769                  * If we have credits, it means that our Tx queue is blocked.
1770                  *
1771                  * Let's suppose the peer can't keep up with our Tx. He will
1772                  * flow control us by not sending us any credits, and we
1773                  * will stop Tx and start accumulating credits here.
1774                  * Up to the point where the peer will stop its Tx queue,
1775                  * for lack of credits.
1776                  * Let's assume the peer application is single threaded.
1777                  * It will block on Tx and never consume any Rx buffer.
1778                  * Deadlock. Guaranteed. - Jean II
1779                  */
1780         }
1781
1782         /* Reset lock */
1783         self->rx_queue_lock = 0;
1784 }
1785
1786 #ifdef CONFIG_PROC_FS
1787 struct irttp_iter_state {
1788         int id;
1789 };
1790
1791 static void *irttp_seq_start(struct seq_file *seq, loff_t *pos)
1792 {
1793         struct irttp_iter_state *iter = seq->private;
1794         struct tsap_cb *self;
1795
1796         /* Protect our access to the tsap list */
1797         spin_lock_irq(&irttp->tsaps->hb_spinlock);
1798         iter->id = 0;
1799
1800         for (self = (struct tsap_cb *) hashbin_get_first(irttp->tsaps); 
1801              self != NULL;
1802              self = (struct tsap_cb *) hashbin_get_next(irttp->tsaps)) {
1803                 if (iter->id == *pos)
1804                         break;
1805                 ++iter->id;
1806         }
1807                 
1808         return self;
1809 }
1810
1811 static void *irttp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1812 {
1813         struct irttp_iter_state *iter = seq->private;
1814
1815         ++*pos;
1816         ++iter->id;
1817         return (void *) hashbin_get_next(irttp->tsaps);
1818 }
1819
1820 static void irttp_seq_stop(struct seq_file *seq, void *v)
1821 {
1822         spin_unlock_irq(&irttp->tsaps->hb_spinlock);
1823 }
1824
1825 static int irttp_seq_show(struct seq_file *seq, void *v)
1826 {
1827         const struct irttp_iter_state *iter = seq->private;
1828         const struct tsap_cb *self = v;
1829
1830         seq_printf(seq, "TSAP %d, ", iter->id);
1831         seq_printf(seq, "stsap_sel: %02x, ",
1832                    self->stsap_sel);
1833         seq_printf(seq, "dtsap_sel: %02x\n",
1834                    self->dtsap_sel);
1835         seq_printf(seq, "  connected: %s, ",
1836                    self->connected? "TRUE":"FALSE");
1837         seq_printf(seq, "avail credit: %d, ",
1838                    self->avail_credit);
1839         seq_printf(seq, "remote credit: %d, ",
1840                    self->remote_credit);
1841         seq_printf(seq, "send credit: %d\n",
1842                    self->send_credit);
1843         seq_printf(seq, "  tx packets: %ld, ",
1844                    self->stats.tx_packets);
1845         seq_printf(seq, "rx packets: %ld, ",
1846                    self->stats.rx_packets);
1847         seq_printf(seq, "tx_queue len: %d ",
1848                    skb_queue_len(&self->tx_queue));
1849         seq_printf(seq, "rx_queue len: %d\n",
1850                    skb_queue_len(&self->rx_queue));
1851         seq_printf(seq, "  tx_sdu_busy: %s, ",
1852                    self->tx_sdu_busy? "TRUE":"FALSE");
1853         seq_printf(seq, "rx_sdu_busy: %s\n",
1854                    self->rx_sdu_busy? "TRUE":"FALSE");
1855         seq_printf(seq, "  max_seg_size: %d, ",
1856                    self->max_seg_size);
1857         seq_printf(seq, "tx_max_sdu_size: %d, ",
1858                    self->tx_max_sdu_size);
1859         seq_printf(seq, "rx_max_sdu_size: %d\n",
1860                    self->rx_max_sdu_size);
1861
1862         seq_printf(seq, "  Used by (%s)\n\n",
1863                    self->notify.name);
1864         return 0;
1865 }
1866
1867 static struct seq_operations irttp_seq_ops = {
1868         .start  = irttp_seq_start,
1869         .next   = irttp_seq_next,
1870         .stop   = irttp_seq_stop,
1871         .show   = irttp_seq_show,
1872 };
1873
1874 static int irttp_seq_open(struct inode *inode, struct file *file)
1875 {
1876         struct seq_file *seq;
1877         int rc = -ENOMEM;
1878         struct irttp_iter_state *s;
1879
1880         s = kmalloc(sizeof(*s), GFP_KERNEL);
1881         if (!s)
1882                 goto out;
1883
1884         rc = seq_open(file, &irttp_seq_ops);
1885         if (rc)
1886                 goto out_kfree;
1887
1888         seq          = file->private_data;
1889         seq->private = s;
1890         memset(s, 0, sizeof(*s));
1891 out:
1892         return rc;
1893 out_kfree:
1894         kfree(s);
1895         goto out;
1896 }
1897
1898 struct file_operations irttp_seq_fops = {
1899         .owner          = THIS_MODULE,
1900         .open           = irttp_seq_open,
1901         .read           = seq_read,
1902         .llseek         = seq_lseek,
1903         .release        = seq_release_private,
1904 };
1905
1906 #endif /* PROC_FS */