1 /*********************************************************************
5 * Description: IrLAP implementation for Linux
7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Mon Aug 4 20:40:53 1997
9 * Modified at: Tue Dec 14 09:26:44 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
12 * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
13 * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version.
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
30 ********************************************************************/
32 #include <linux/slab.h>
33 #include <linux/string.h>
34 #include <linux/skbuff.h>
35 #include <linux/delay.h>
36 #include <linux/proc_fs.h>
37 #include <linux/init.h>
38 #include <linux/random.h>
39 #include <linux/module.h>
40 #include <linux/seq_file.h>
42 #include <net/irda/irda.h>
43 #include <net/irda/irda_device.h>
44 #include <net/irda/irqueue.h>
45 #include <net/irda/irlmp.h>
46 #include <net/irda/irlmp_frame.h>
47 #include <net/irda/irlap_frame.h>
48 #include <net/irda/irlap.h>
49 #include <net/irda/timer.h>
50 #include <net/irda/qos.h>
52 static hashbin_t *irlap = NULL;
53 int sysctl_slot_timeout = SLOT_TIMEOUT * 1000 / HZ;
55 /* This is the delay of missed pf period before generating an event
56 * to the application. The spec mandate 3 seconds, but in some cases
57 * it's way too long. - Jean II */
58 int sysctl_warn_noreply_time = 3;
60 extern void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb);
61 static void __irlap_close(struct irlap_cb *self);
62 static void irlap_init_qos_capabilities(struct irlap_cb *self,
63 struct qos_info *qos_user);
65 #ifdef CONFIG_IRDA_DEBUG
66 static char *lap_reasons[] = {
68 "LAP_DISC_INDICATION",
70 "LAP_RESET_INDICATION",
73 "LAP_PRIMARY_CONFLICT",
76 #endif /* CONFIG_IRDA_DEBUG */
78 int __init irlap_init(void)
80 /* Check if the compiler did its job properly.
81 * May happen on some ARM configuration, check with Russell King. */
82 IRDA_ASSERT(sizeof(struct xid_frame) == 14, ;);
83 IRDA_ASSERT(sizeof(struct test_frame) == 10, ;);
84 IRDA_ASSERT(sizeof(struct ua_frame) == 10, ;);
85 IRDA_ASSERT(sizeof(struct snrm_frame) == 11, ;);
87 /* Allocate master array */
88 irlap = hashbin_new(HB_LOCK);
90 IRDA_ERROR("%s: can't allocate irlap hashbin!\n",
98 void __exit irlap_cleanup(void)
100 IRDA_ASSERT(irlap != NULL, return;);
102 hashbin_delete(irlap, (FREE_FUNC) __irlap_close);
106 * Function irlap_open (driver)
108 * Initialize IrLAP layer
111 struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
114 struct irlap_cb *self;
116 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
118 /* Initialize the irlap structure. */
119 self = kmalloc(sizeof(struct irlap_cb), GFP_KERNEL);
123 memset(self, 0, sizeof(struct irlap_cb));
124 self->magic = LAP_MAGIC;
126 /* Make a binding between the layers */
129 /* Copy hardware name */
130 if(hw_name != NULL) {
131 strlcpy(self->hw_name, hw_name, sizeof(self->hw_name));
133 self->hw_name[0] = '\0';
136 /* FIXME: should we get our own field? */
137 dev->atalk_ptr = self;
139 self->state = LAP_OFFLINE;
141 /* Initialize transmit queue */
142 skb_queue_head_init(&self->txq);
143 skb_queue_head_init(&self->txq_ultra);
144 skb_queue_head_init(&self->wx_list);
146 /* My unique IrLAP device address! */
147 /* We don't want the broadcast address, neither the NULL address
148 * (most often used to signify "invalid"), and we don't want an
149 * address already in use (otherwise connect won't be able
150 * to select the proper link). - Jean II */
152 get_random_bytes(&self->saddr, sizeof(self->saddr));
153 } while ((self->saddr == 0x0) || (self->saddr == BROADCAST) ||
154 (hashbin_lock_find(irlap, self->saddr, NULL)) );
155 /* Copy to the driver */
156 memcpy(dev->dev_addr, &self->saddr, 4);
158 init_timer(&self->slot_timer);
159 init_timer(&self->query_timer);
160 init_timer(&self->discovery_timer);
161 init_timer(&self->final_timer);
162 init_timer(&self->poll_timer);
163 init_timer(&self->wd_timer);
164 init_timer(&self->backoff_timer);
165 init_timer(&self->media_busy_timer);
167 irlap_apply_default_connection_parameters(self);
169 self->N3 = 3; /* # connections attemts to try before giving up */
171 self->state = LAP_NDM;
173 hashbin_insert(irlap, (irda_queue_t *) self, self->saddr, NULL);
175 irlmp_register_link(self, self->saddr, &self->notify);
179 EXPORT_SYMBOL(irlap_open);
182 * Function __irlap_close (self)
184 * Remove IrLAP and all allocated memory. Stop any pending timers.
187 static void __irlap_close(struct irlap_cb *self)
189 IRDA_ASSERT(self != NULL, return;);
190 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
193 del_timer(&self->slot_timer);
194 del_timer(&self->query_timer);
195 del_timer(&self->discovery_timer);
196 del_timer(&self->final_timer);
197 del_timer(&self->poll_timer);
198 del_timer(&self->wd_timer);
199 del_timer(&self->backoff_timer);
200 del_timer(&self->media_busy_timer);
202 irlap_flush_all_queues(self);
210 * Function irlap_close (self)
212 * Remove IrLAP instance
215 void irlap_close(struct irlap_cb *self)
217 struct irlap_cb *lap;
219 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
221 IRDA_ASSERT(self != NULL, return;);
222 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
224 /* We used to send a LAP_DISC_INDICATION here, but this was
225 * racy. This has been move within irlmp_unregister_link()
228 /* Kill the LAP and all LSAPs on top of it */
229 irlmp_unregister_link(self->saddr);
230 self->notify.instance = NULL;
232 /* Be sure that we manage to remove ourself from the hash */
233 lap = hashbin_remove(irlap, self->saddr, NULL);
235 IRDA_DEBUG(1, "%s(), Didn't find myself!\n", __FUNCTION__);
240 EXPORT_SYMBOL(irlap_close);
243 * Function irlap_connect_indication (self, skb)
245 * Another device is attempting to make a connection
248 void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb)
250 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
252 IRDA_ASSERT(self != NULL, return;);
253 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
255 irlap_init_qos_capabilities(self, NULL); /* No user QoS! */
257 irlmp_link_connect_indication(self->notify.instance, self->saddr,
258 self->daddr, &self->qos_tx, skb);
262 * Function irlap_connect_response (self, skb)
264 * Service user has accepted incoming connection
267 void irlap_connect_response(struct irlap_cb *self, struct sk_buff *userdata)
269 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
271 irlap_do_event(self, CONNECT_RESPONSE, userdata, NULL);
275 * Function irlap_connect_request (self, daddr, qos_user, sniff)
277 * Request connection with another device, sniffing is not implemented
281 void irlap_connect_request(struct irlap_cb *self, __u32 daddr,
282 struct qos_info *qos_user, int sniff)
284 IRDA_DEBUG(3, "%s(), daddr=0x%08x\n", __FUNCTION__, daddr);
286 IRDA_ASSERT(self != NULL, return;);
287 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
292 * If the service user specifies QoS values for this connection,
295 irlap_init_qos_capabilities(self, qos_user);
297 if ((self->state == LAP_NDM) && !self->media_busy)
298 irlap_do_event(self, CONNECT_REQUEST, NULL, NULL);
300 self->connect_pending = TRUE;
304 * Function irlap_connect_confirm (self, skb)
306 * Connection request has been accepted
309 void irlap_connect_confirm(struct irlap_cb *self, struct sk_buff *skb)
311 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
313 IRDA_ASSERT(self != NULL, return;);
314 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
316 irlmp_link_connect_confirm(self->notify.instance, &self->qos_tx, skb);
320 * Function irlap_data_indication (self, skb)
322 * Received data frames from IR-port, so we just pass them up to
323 * IrLMP for further processing
326 void irlap_data_indication(struct irlap_cb *self, struct sk_buff *skb,
329 /* Hide LAP header from IrLMP layer */
330 skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
332 irlmp_link_data_indication(self->notify.instance, skb, unreliable);
337 * Function irlap_data_request (self, skb)
339 * Queue data for transmission, must wait until XMIT state
342 void irlap_data_request(struct irlap_cb *self, struct sk_buff *skb,
345 IRDA_ASSERT(self != NULL, return;);
346 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
348 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
350 IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
352 skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
355 * Must set frame format now so that the rest of the code knows
356 * if its dealing with an I or an UI frame
359 skb->data[1] = UI_FRAME;
361 skb->data[1] = I_FRAME;
363 /* Don't forget to refcount it - see irlmp_connect_request(). */
366 /* Add at the end of the queue (keep ordering) - Jean II */
367 skb_queue_tail(&self->txq, skb);
370 * Send event if this frame only if we are in the right state
371 * FIXME: udata should be sent first! (skb_queue_head?)
373 if ((self->state == LAP_XMIT_P) || (self->state == LAP_XMIT_S)) {
374 /* If we are not already processing the Tx queue, trigger
375 * transmission immediately - Jean II */
376 if((skb_queue_len(&self->txq) <= 1) && (!self->local_busy))
377 irlap_do_event(self, DATA_REQUEST, skb, NULL);
378 /* Otherwise, the packets will be sent normally at the
379 * next pf-poll - Jean II */
384 * Function irlap_unitdata_request (self, skb)
386 * Send Ultra data. This is data that must be sent outside any connection
389 #ifdef CONFIG_IRDA_ULTRA
390 void irlap_unitdata_request(struct irlap_cb *self, struct sk_buff *skb)
392 IRDA_ASSERT(self != NULL, return;);
393 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
395 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
397 IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
399 skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
401 skb->data[0] = CBROADCAST;
402 skb->data[1] = UI_FRAME;
404 /* Don't need to refcount, see irlmp_connless_data_request() */
406 skb_queue_tail(&self->txq_ultra, skb);
408 irlap_do_event(self, SEND_UI_FRAME, NULL, NULL);
410 #endif /*CONFIG_IRDA_ULTRA */
413 * Function irlap_udata_indication (self, skb)
415 * Receive Ultra data. This is data that is received outside any connection
418 #ifdef CONFIG_IRDA_ULTRA
419 void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb)
421 IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
423 IRDA_ASSERT(self != NULL, return;);
424 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
425 IRDA_ASSERT(skb != NULL, return;);
427 /* Hide LAP header from IrLMP layer */
428 skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
430 irlmp_link_unitdata_indication(self->notify.instance, skb);
432 #endif /* CONFIG_IRDA_ULTRA */
435 * Function irlap_disconnect_request (void)
437 * Request to disconnect connection by service user
439 void irlap_disconnect_request(struct irlap_cb *self)
441 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
443 IRDA_ASSERT(self != NULL, return;);
444 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
446 /* Don't disconnect until all data frames are successfully sent */
447 if (!skb_queue_empty(&self->txq)) {
448 self->disconnect_pending = TRUE;
452 /* Check if we are in the right state for disconnecting */
453 switch (self->state) {
454 case LAP_XMIT_P: /* FALLTROUGH */
455 case LAP_XMIT_S: /* FALLTROUGH */
456 case LAP_CONN: /* FALLTROUGH */
457 case LAP_RESET_WAIT: /* FALLTROUGH */
458 case LAP_RESET_CHECK:
459 irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL);
462 IRDA_DEBUG(2, "%s(), disconnect pending!\n", __FUNCTION__);
463 self->disconnect_pending = TRUE;
469 * Function irlap_disconnect_indication (void)
471 * Disconnect request from other device
474 void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason)
476 IRDA_DEBUG(1, "%s(), reason=%s\n", __FUNCTION__, lap_reasons[reason]);
478 IRDA_ASSERT(self != NULL, return;);
479 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
482 irlap_flush_all_queues(self);
485 case LAP_RESET_INDICATION:
486 IRDA_DEBUG(1, "%s(), Sending reset request!\n", __FUNCTION__);
487 irlap_do_event(self, RESET_REQUEST, NULL, NULL);
489 case LAP_NO_RESPONSE: /* FALLTROUGH */
490 case LAP_DISC_INDICATION: /* FALLTROUGH */
491 case LAP_FOUND_NONE: /* FALLTROUGH */
493 irlmp_link_disconnect_indication(self->notify.instance, self,
497 IRDA_ERROR("%s: Unknown reason %d\n", __FUNCTION__, reason);
502 * Function irlap_discovery_request (gen_addr_bit)
504 * Start one single discovery operation.
507 void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery)
509 struct irlap_info info;
511 IRDA_ASSERT(self != NULL, return;);
512 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
513 IRDA_ASSERT(discovery != NULL, return;);
515 IRDA_DEBUG(4, "%s(), nslots = %d\n", __FUNCTION__, discovery->nslots);
517 IRDA_ASSERT((discovery->nslots == 1) || (discovery->nslots == 6) ||
518 (discovery->nslots == 8) || (discovery->nslots == 16),
521 /* Discovery is only possible in NDM mode */
522 if (self->state != LAP_NDM) {
523 IRDA_DEBUG(4, "%s(), discovery only possible in NDM mode\n",
525 irlap_discovery_confirm(self, NULL);
526 /* Note : in theory, if we are not in NDM, we could postpone
527 * the discovery like we do for connection request.
528 * In practice, it's not worth it. If the media was busy,
529 * it's likely next time around it won't be busy. If we are
530 * in REPLY state, we will get passive discovery info & event.
535 /* Check if last discovery request finished in time, or if
536 * it was aborted due to the media busy flag. */
537 if (self->discovery_log != NULL) {
538 hashbin_delete(self->discovery_log, (FREE_FUNC) kfree);
539 self->discovery_log = NULL;
542 /* All operations will occur at predictable time, no need to lock */
543 self->discovery_log = hashbin_new(HB_NOLOCK);
545 if (self->discovery_log == NULL) {
546 IRDA_WARNING("%s(), Unable to allocate discovery log!\n",
551 info.S = discovery->nslots; /* Number of slots */
552 info.s = 0; /* Current slot */
554 self->discovery_cmd = discovery;
555 info.discovery = discovery;
557 /* sysctl_slot_timeout bounds are checked in irsysctl.c - Jean II */
558 self->slot_timeout = sysctl_slot_timeout * HZ / 1000;
560 irlap_do_event(self, DISCOVERY_REQUEST, NULL, &info);
564 * Function irlap_discovery_confirm (log)
566 * A device has been discovered in front of this station, we
567 * report directly to LMP.
569 void irlap_discovery_confirm(struct irlap_cb *self, hashbin_t *discovery_log)
571 IRDA_ASSERT(self != NULL, return;);
572 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
574 IRDA_ASSERT(self->notify.instance != NULL, return;);
577 * Check for successful discovery, since we are then allowed to clear
578 * the media busy condition (IrLAP 6.13.4 - p.94). This should allow
579 * us to make connection attempts much faster and easier (i.e. no
581 * Setting media busy to false will also generate an event allowing
582 * to process pending events in NDM state machine.
583 * Note : the spec doesn't define what's a successful discovery is.
584 * If we want Ultra to work, it's successful even if there is
585 * nobody discovered - Jean II
588 irda_device_set_media_busy(self->netdev, FALSE);
591 irlmp_link_discovery_confirm(self->notify.instance, discovery_log);
595 * Function irlap_discovery_indication (log)
597 * Somebody is trying to discover us!
600 void irlap_discovery_indication(struct irlap_cb *self, discovery_t *discovery)
602 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
604 IRDA_ASSERT(self != NULL, return;);
605 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
606 IRDA_ASSERT(discovery != NULL, return;);
608 IRDA_ASSERT(self->notify.instance != NULL, return;);
610 /* A device is very likely to connect immediately after it performs
611 * a successful discovery. This means that in our case, we are much
612 * more likely to receive a connection request over the medium.
613 * So, we backoff to avoid collisions.
614 * IrLAP spec 6.13.4 suggest 100ms...
615 * Note : this little trick actually make a *BIG* difference. If I set
616 * my Linux box with discovery enabled and one Ultra frame sent every
617 * second, my Palm has no trouble connecting to it every time !
619 irda_device_set_media_busy(self->netdev, SMALL);
621 irlmp_link_discovery_indication(self->notify.instance, discovery);
625 * Function irlap_status_indication (quality_of_link)
627 void irlap_status_indication(struct irlap_cb *self, int quality_of_link)
629 switch (quality_of_link) {
630 case STATUS_NO_ACTIVITY:
631 IRDA_MESSAGE("IrLAP, no activity on link!\n");
634 IRDA_MESSAGE("IrLAP, noisy link!\n");
639 irlmp_status_indication(self->notify.instance,
640 quality_of_link, LOCK_NO_CHANGE);
644 * Function irlap_reset_indication (void)
646 void irlap_reset_indication(struct irlap_cb *self)
648 IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
650 IRDA_ASSERT(self != NULL, return;);
651 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
653 if (self->state == LAP_RESET_WAIT)
654 irlap_do_event(self, RESET_REQUEST, NULL, NULL);
656 irlap_do_event(self, RESET_RESPONSE, NULL, NULL);
660 * Function irlap_reset_confirm (void)
662 void irlap_reset_confirm(void)
664 IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
668 * Function irlap_generate_rand_time_slot (S, s)
670 * Generate a random time slot between s and S-1 where
671 * S = Number of slots (0 -> S-1)
674 int irlap_generate_rand_time_slot(int S, int s)
679 IRDA_ASSERT((S - s) > 0, return 0;);
682 rand ^= (rand << 12);
683 rand ^= (rand >> 20);
685 slot = s + rand % (S-s);
687 IRDA_ASSERT((slot >= s) || (slot < S), return 0;);
693 * Function irlap_update_nr_received (nr)
695 * Remove all acknowledged frames in current window queue. This code is
696 * not intuitive and you should not try to change it. If you think it
697 * contains bugs, please mail a patch to the author instead.
699 void irlap_update_nr_received(struct irlap_cb *self, int nr)
701 struct sk_buff *skb = NULL;
705 * Remove all the ack-ed frames from the window queue.
709 * Optimize for the common case. It is most likely that the receiver
710 * will acknowledge all the frames we have sent! So in that case we
711 * delete all frames stored in window.
713 if (nr == self->vs) {
714 while ((skb = skb_dequeue(&self->wx_list)) != NULL) {
717 /* The last acked frame is the next to send minus one */
720 /* Remove all acknowledged frames in current window */
721 while ((skb_peek(&self->wx_list) != NULL) &&
722 (((self->va+1) % 8) != nr))
724 skb = skb_dequeue(&self->wx_list);
727 self->va = (self->va + 1) % 8;
733 self->window = self->window_size - skb_queue_len(&self->wx_list);
737 * Function irlap_validate_ns_received (ns)
739 * Validate the next to send (ns) field from received frame.
741 int irlap_validate_ns_received(struct irlap_cb *self, int ns)
743 /* ns as expected? */
747 * Stations are allowed to treat invalid NS as unexpected NS
748 * IrLAP, Recv ... with-invalid-Ns. p. 84
750 return NS_UNEXPECTED;
752 /* return NR_INVALID; */
755 * Function irlap_validate_nr_received (nr)
757 * Validate the next to receive (nr) field from received frame.
760 int irlap_validate_nr_received(struct irlap_cb *self, int nr)
762 /* nr as expected? */
763 if (nr == self->vs) {
764 IRDA_DEBUG(4, "%s(), expected!\n", __FUNCTION__);
769 * unexpected nr? (but within current window), first we check if the
770 * ns numbers of the frames in the current window wrap.
772 if (self->va < self->vs) {
773 if ((nr >= self->va) && (nr <= self->vs))
774 return NR_UNEXPECTED;
776 if ((nr >= self->va) || (nr <= self->vs))
777 return NR_UNEXPECTED;
785 * Function irlap_initiate_connection_state ()
787 * Initialize the connection state parameters
790 void irlap_initiate_connection_state(struct irlap_cb *self)
792 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
794 IRDA_ASSERT(self != NULL, return;);
795 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
797 /* Next to send and next to receive */
798 self->vs = self->vr = 0;
800 /* Last frame which got acked (0 - 1) % 8 */
805 self->remote_busy = FALSE;
806 self->retry_count = 0;
810 * Function irlap_wait_min_turn_around (self, qos)
812 * Wait negotiated minimum turn around time, this function actually sets
813 * the number of BOS's that must be sent before the next transmitted
814 * frame in order to delay for the specified amount of time. This is
815 * done to avoid using timers, and the forbidden udelay!
817 void irlap_wait_min_turn_around(struct irlap_cb *self, struct qos_info *qos)
822 /* Get QoS values. */
823 speed = qos->baud_rate.value;
824 min_turn_time = qos->min_turn_time.value;
826 /* No need to calculate XBOFs for speeds over 115200 bps */
827 if (speed > 115200) {
828 self->mtt_required = min_turn_time;
833 * Send additional BOF's for the next frame for the requested
834 * min turn time, so now we must calculate how many chars (XBOF's) we
835 * must send for the requested time period (min turn time)
837 self->xbofs_delay = irlap_min_turn_time_in_bytes(speed, min_turn_time);
841 * Function irlap_flush_all_queues (void)
846 void irlap_flush_all_queues(struct irlap_cb *self)
850 IRDA_ASSERT(self != NULL, return;);
851 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
853 /* Free transmission queue */
854 while ((skb = skb_dequeue(&self->txq)) != NULL)
857 while ((skb = skb_dequeue(&self->txq_ultra)) != NULL)
860 /* Free sliding window buffered packets */
861 while ((skb = skb_dequeue(&self->wx_list)) != NULL)
866 * Function irlap_setspeed (self, speed)
868 * Change the speed of the IrDA port
871 static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now)
875 IRDA_DEBUG(0, "%s(), setting speed to %d\n", __FUNCTION__, speed);
877 IRDA_ASSERT(self != NULL, return;);
878 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
882 /* Change speed now, or just piggyback speed on frames */
884 /* Send down empty frame to trigger speed change */
885 skb = dev_alloc_skb(0);
887 irlap_queue_xmit(self, skb);
892 * Function irlap_init_qos_capabilities (self, qos)
894 * Initialize QoS for this IrLAP session, What we do is to compute the
895 * intersection of the QoS capabilities for the user, driver and for
896 * IrLAP itself. Normally, IrLAP will not specify any values, but it can
897 * be used to restrict certain values.
899 static void irlap_init_qos_capabilities(struct irlap_cb *self,
900 struct qos_info *qos_user)
902 IRDA_ASSERT(self != NULL, return;);
903 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
904 IRDA_ASSERT(self->netdev != NULL, return;);
906 /* Start out with the maximum QoS support possible */
907 irda_init_max_qos_capabilies(&self->qos_rx);
909 /* Apply drivers QoS capabilities */
910 irda_qos_compute_intersection(&self->qos_rx, self->qos_dev);
913 * Check for user supplied QoS parameters. The service user is only
914 * allowed to supply these values. We check each parameter since the
915 * user may not have set all of them.
918 IRDA_DEBUG(1, "%s(), Found user specified QoS!\n", __FUNCTION__);
920 if (qos_user->baud_rate.bits)
921 self->qos_rx.baud_rate.bits &= qos_user->baud_rate.bits;
923 if (qos_user->max_turn_time.bits)
924 self->qos_rx.max_turn_time.bits &= qos_user->max_turn_time.bits;
925 if (qos_user->data_size.bits)
926 self->qos_rx.data_size.bits &= qos_user->data_size.bits;
928 if (qos_user->link_disc_time.bits)
929 self->qos_rx.link_disc_time.bits &= qos_user->link_disc_time.bits;
932 /* Use 500ms in IrLAP for now */
933 self->qos_rx.max_turn_time.bits &= 0x01;
936 /*self->qos_rx.data_size.bits &= 0x03;*/
938 irda_qos_bits_to_value(&self->qos_rx);
942 * Function irlap_apply_default_connection_parameters (void, now)
944 * Use the default connection and transmission parameters
946 void irlap_apply_default_connection_parameters(struct irlap_cb *self)
948 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
950 IRDA_ASSERT(self != NULL, return;);
951 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
953 /* xbofs : Default value in NDM */
954 self->next_bofs = 12;
955 self->bofs_count = 12;
957 /* NDM Speed is 9600 */
958 irlap_change_speed(self, 9600, TRUE);
960 /* Set mbusy when going to NDM state */
961 irda_device_set_media_busy(self->netdev, TRUE);
964 * Generate random connection address for this session, which must
965 * be 7 bits wide and different from 0x00 and 0xfe
967 while ((self->caddr == 0x00) || (self->caddr == 0xfe)) {
968 get_random_bytes(&self->caddr, sizeof(self->caddr));
972 /* Use default values until connection has been negitiated */
973 self->slot_timeout = sysctl_slot_timeout;
974 self->final_timeout = FINAL_TIMEOUT;
975 self->poll_timeout = POLL_TIMEOUT;
976 self->wd_timeout = WD_TIMEOUT;
978 /* Set some default values */
979 self->qos_tx.baud_rate.value = 9600;
980 self->qos_rx.baud_rate.value = 9600;
981 self->qos_tx.max_turn_time.value = 0;
982 self->qos_rx.max_turn_time.value = 0;
983 self->qos_tx.min_turn_time.value = 0;
984 self->qos_rx.min_turn_time.value = 0;
985 self->qos_tx.data_size.value = 64;
986 self->qos_rx.data_size.value = 64;
987 self->qos_tx.window_size.value = 1;
988 self->qos_rx.window_size.value = 1;
989 self->qos_tx.additional_bofs.value = 12;
990 self->qos_rx.additional_bofs.value = 12;
991 self->qos_tx.link_disc_time.value = 0;
992 self->qos_rx.link_disc_time.value = 0;
994 irlap_flush_all_queues(self);
996 self->disconnect_pending = FALSE;
997 self->connect_pending = FALSE;
1001 * Function irlap_apply_connection_parameters (qos, now)
1003 * Initialize IrLAP with the negotiated QoS values
1005 * If 'now' is false, the speed and xbofs will be changed after the next
1007 * If 'now' is true, the speed and xbofs is changed immediately
1009 void irlap_apply_connection_parameters(struct irlap_cb *self, int now)
1011 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
1013 IRDA_ASSERT(self != NULL, return;);
1014 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
1016 /* Set the negotiated xbofs value */
1017 self->next_bofs = self->qos_tx.additional_bofs.value;
1019 self->bofs_count = self->next_bofs;
1021 /* Set the negotiated link speed (may need the new xbofs value) */
1022 irlap_change_speed(self, self->qos_tx.baud_rate.value, now);
1024 self->window_size = self->qos_tx.window_size.value;
1025 self->window = self->qos_tx.window_size.value;
1027 #ifdef CONFIG_IRDA_DYNAMIC_WINDOW
1029 * Calculate how many bytes it is possible to transmit before the
1030 * link must be turned around
1032 self->line_capacity =
1033 irlap_max_line_capacity(self->qos_tx.baud_rate.value,
1034 self->qos_tx.max_turn_time.value);
1035 self->bytes_left = self->line_capacity;
1036 #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
1040 * Initialize timeout values, some of the rules are listed on
1043 IRDA_ASSERT(self->qos_tx.max_turn_time.value != 0, return;);
1044 IRDA_ASSERT(self->qos_rx.max_turn_time.value != 0, return;);
1045 /* The poll timeout applies only to the primary station.
1046 * It defines the maximum time the primary stay in XMIT mode
1047 * before timeout and turning the link around (sending a RR).
1048 * Or, this is how much we can keep the pf bit in primary mode.
1049 * Therefore, it must be lower or equal than our *OWN* max turn around.
1051 self->poll_timeout = self->qos_tx.max_turn_time.value * HZ / 1000;
1052 /* The Final timeout applies only to the primary station.
1053 * It defines the maximum time the primary wait (mostly in RECV mode)
1054 * for an answer from the secondary station before polling it again.
1055 * Therefore, it must be greater or equal than our *PARTNER*
1056 * max turn around time - Jean II */
1057 self->final_timeout = self->qos_rx.max_turn_time.value * HZ / 1000;
1058 /* The Watchdog Bit timeout applies only to the secondary station.
1059 * It defines the maximum time the secondary wait (mostly in RECV mode)
1060 * for poll from the primary station before getting annoyed.
1061 * Therefore, it must be greater or equal than our *PARTNER*
1062 * max turn around time - Jean II */
1063 self->wd_timeout = self->final_timeout * 2;
1066 * N1 and N2 are maximum retry count for *both* the final timer
1067 * and the wd timer (with a factor 2) as defined above.
1068 * After N1 retry of a timer, we give a warning to the user.
1069 * After N2 retry, we consider the link dead and disconnect it.
1074 * Set N1 to 0 if Link Disconnect/Threshold Time = 3 and set it to
1075 * 3 seconds otherwise. See page 71 in IrLAP for more details.
1076 * Actually, it's not always 3 seconds, as we allow to set
1077 * it via sysctl... Max maxtt is 500ms, and N1 need to be multiple
1078 * of 2, so 1 second is minimum we can allow. - Jean II
1080 if (self->qos_tx.link_disc_time.value == sysctl_warn_noreply_time)
1082 * If we set N1 to 0, it will trigger immediately, which is
1083 * not what we want. What we really want is to disable it,
1086 self->N1 = -2; /* Disable - Need to be multiple of 2*/
1088 self->N1 = sysctl_warn_noreply_time * 1000 /
1089 self->qos_rx.max_turn_time.value;
1091 IRDA_DEBUG(4, "Setting N1 = %d\n", self->N1);
1093 /* Set N2 to match our own disconnect time */
1094 self->N2 = self->qos_tx.link_disc_time.value * 1000 /
1095 self->qos_rx.max_turn_time.value;
1096 IRDA_DEBUG(4, "Setting N2 = %d\n", self->N2);
1099 #ifdef CONFIG_PROC_FS
1100 struct irlap_iter_state {
1104 static void *irlap_seq_start(struct seq_file *seq, loff_t *pos)
1106 struct irlap_iter_state *iter = seq->private;
1107 struct irlap_cb *self;
1109 /* Protect our access to the tsap list */
1110 spin_lock_irq(&irlap->hb_spinlock);
1113 for (self = (struct irlap_cb *) hashbin_get_first(irlap);
1114 self; self = (struct irlap_cb *) hashbin_get_next(irlap)) {
1115 if (iter->id == *pos)
1123 static void *irlap_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1125 struct irlap_iter_state *iter = seq->private;
1129 return (void *) hashbin_get_next(irlap);
1132 static void irlap_seq_stop(struct seq_file *seq, void *v)
1134 spin_unlock_irq(&irlap->hb_spinlock);
1137 static int irlap_seq_show(struct seq_file *seq, void *v)
1139 const struct irlap_iter_state *iter = seq->private;
1140 const struct irlap_cb *self = v;
1142 IRDA_ASSERT(self->magic == LAP_MAGIC, return -EINVAL;);
1144 seq_printf(seq, "irlap%d ", iter->id);
1145 seq_printf(seq, "state: %s\n",
1146 irlap_state[self->state]);
1148 seq_printf(seq, " device name: %s, ",
1149 (self->netdev) ? self->netdev->name : "bug");
1150 seq_printf(seq, "hardware name: %s\n", self->hw_name);
1152 seq_printf(seq, " caddr: %#02x, ", self->caddr);
1153 seq_printf(seq, "saddr: %#08x, ", self->saddr);
1154 seq_printf(seq, "daddr: %#08x\n", self->daddr);
1156 seq_printf(seq, " win size: %d, ",
1158 seq_printf(seq, "win: %d, ", self->window);
1159 #ifdef CONFIG_IRDA_DYNAMIC_WINDOW
1160 seq_printf(seq, "line capacity: %d, ",
1161 self->line_capacity);
1162 seq_printf(seq, "bytes left: %d\n", self->bytes_left);
1163 #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
1164 seq_printf(seq, " tx queue len: %d ",
1165 skb_queue_len(&self->txq));
1166 seq_printf(seq, "win queue len: %d ",
1167 skb_queue_len(&self->wx_list));
1168 seq_printf(seq, "rbusy: %s", self->remote_busy ?
1170 seq_printf(seq, " mbusy: %s\n", self->media_busy ?
1173 seq_printf(seq, " retrans: %d ", self->retry_count);
1174 seq_printf(seq, "vs: %d ", self->vs);
1175 seq_printf(seq, "vr: %d ", self->vr);
1176 seq_printf(seq, "va: %d\n", self->va);
1178 seq_printf(seq, " qos\tbps\tmaxtt\tdsize\twinsize\taddbofs\tmintt\tldisc\tcomp\n");
1180 seq_printf(seq, " tx\t%d\t",
1181 self->qos_tx.baud_rate.value);
1182 seq_printf(seq, "%d\t",
1183 self->qos_tx.max_turn_time.value);
1184 seq_printf(seq, "%d\t",
1185 self->qos_tx.data_size.value);
1186 seq_printf(seq, "%d\t",
1187 self->qos_tx.window_size.value);
1188 seq_printf(seq, "%d\t",
1189 self->qos_tx.additional_bofs.value);
1190 seq_printf(seq, "%d\t",
1191 self->qos_tx.min_turn_time.value);
1192 seq_printf(seq, "%d\t",
1193 self->qos_tx.link_disc_time.value);
1194 seq_printf(seq, "\n");
1196 seq_printf(seq, " rx\t%d\t",
1197 self->qos_rx.baud_rate.value);
1198 seq_printf(seq, "%d\t",
1199 self->qos_rx.max_turn_time.value);
1200 seq_printf(seq, "%d\t",
1201 self->qos_rx.data_size.value);
1202 seq_printf(seq, "%d\t",
1203 self->qos_rx.window_size.value);
1204 seq_printf(seq, "%d\t",
1205 self->qos_rx.additional_bofs.value);
1206 seq_printf(seq, "%d\t",
1207 self->qos_rx.min_turn_time.value);
1208 seq_printf(seq, "%d\n",
1209 self->qos_rx.link_disc_time.value);
1214 static struct seq_operations irlap_seq_ops = {
1215 .start = irlap_seq_start,
1216 .next = irlap_seq_next,
1217 .stop = irlap_seq_stop,
1218 .show = irlap_seq_show,
1221 static int irlap_seq_open(struct inode *inode, struct file *file)
1223 struct seq_file *seq;
1225 struct irlap_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
1230 if (irlap == NULL) {
1235 rc = seq_open(file, &irlap_seq_ops);
1239 seq = file->private_data;
1241 memset(s, 0, sizeof(*s));
1249 struct file_operations irlap_seq_fops = {
1250 .owner = THIS_MODULE,
1251 .open = irlap_seq_open,
1253 .llseek = seq_lseek,
1254 .release = seq_release_private,
1257 #endif /* CONFIG_PROC_FS */