Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm
[linux-2.6] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/interrupt.h>
41 #include <linux/notifier.h>
42 #include <net/sock.h>
43
44 #include <asm/system.h>
45 #include <asm/uaccess.h>
46 #include <asm/unaligned.h>
47
48 #include <net/bluetooth/bluetooth.h>
49 #include <net/bluetooth/hci_core.h>
50
51 #ifndef CONFIG_BT_HCI_CORE_DEBUG
52 #undef  BT_DBG
53 #define BT_DBG(D...)
54 #endif
55
56 static void hci_cmd_task(unsigned long arg);
57 static void hci_rx_task(unsigned long arg);
58 static void hci_tx_task(unsigned long arg);
59 static void hci_notify(struct hci_dev *hdev, int event);
60
61 static DEFINE_RWLOCK(hci_task_lock);
62
63 /* HCI device list */
64 LIST_HEAD(hci_dev_list);
65 DEFINE_RWLOCK(hci_dev_list_lock);
66
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list);
69 DEFINE_RWLOCK(hci_cb_list_lock);
70
71 /* HCI protocols */
72 #define HCI_MAX_PROTO   2
73 struct hci_proto *hci_proto[HCI_MAX_PROTO];
74
75 /* HCI notifiers list */
76 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
77
78 /* ---- HCI notifications ---- */
79
80 int hci_register_notifier(struct notifier_block *nb)
81 {
82         return atomic_notifier_chain_register(&hci_notifier, nb);
83 }
84
85 int hci_unregister_notifier(struct notifier_block *nb)
86 {
87         return atomic_notifier_chain_unregister(&hci_notifier, nb);
88 }
89
90 static void hci_notify(struct hci_dev *hdev, int event)
91 {
92         atomic_notifier_call_chain(&hci_notifier, event, hdev);
93 }
94
95 /* ---- HCI requests ---- */
96
97 void hci_req_complete(struct hci_dev *hdev, int result)
98 {
99         BT_DBG("%s result 0x%2.2x", hdev->name, result);
100
101         if (hdev->req_status == HCI_REQ_PEND) {
102                 hdev->req_result = result;
103                 hdev->req_status = HCI_REQ_DONE;
104                 wake_up_interruptible(&hdev->req_wait_q);
105         }
106 }
107
108 static void hci_req_cancel(struct hci_dev *hdev, int err)
109 {
110         BT_DBG("%s err 0x%2.2x", hdev->name, err);
111
112         if (hdev->req_status == HCI_REQ_PEND) {
113                 hdev->req_result = err;
114                 hdev->req_status = HCI_REQ_CANCELED;
115                 wake_up_interruptible(&hdev->req_wait_q);
116         }
117 }
118
119 /* Execute request and wait for completion. */
120 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
121                                 unsigned long opt, __u32 timeout)
122 {
123         DECLARE_WAITQUEUE(wait, current);
124         int err = 0;
125
126         BT_DBG("%s start", hdev->name);
127
128         hdev->req_status = HCI_REQ_PEND;
129
130         add_wait_queue(&hdev->req_wait_q, &wait);
131         set_current_state(TASK_INTERRUPTIBLE);
132
133         req(hdev, opt);
134         schedule_timeout(timeout);
135
136         remove_wait_queue(&hdev->req_wait_q, &wait);
137
138         if (signal_pending(current))
139                 return -EINTR;
140
141         switch (hdev->req_status) {
142         case HCI_REQ_DONE:
143                 err = -bt_err(hdev->req_result);
144                 break;
145
146         case HCI_REQ_CANCELED:
147                 err = -hdev->req_result;
148                 break;
149
150         default:
151                 err = -ETIMEDOUT;
152                 break;
153         }
154
155         hdev->req_status = hdev->req_result = 0;
156
157         BT_DBG("%s end: err %d", hdev->name, err);
158
159         return err;
160 }
161
162 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
163                                 unsigned long opt, __u32 timeout)
164 {
165         int ret;
166
167         /* Serialize all requests */
168         hci_req_lock(hdev);
169         ret = __hci_request(hdev, req, opt, timeout);
170         hci_req_unlock(hdev);
171
172         return ret;
173 }
174
175 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
176 {
177         BT_DBG("%s %ld", hdev->name, opt);
178
179         /* Reset device */
180         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
181 }
182
183 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
184 {
185         struct sk_buff *skb;
186         __le16 param;
187         __u8 flt_type;
188
189         BT_DBG("%s %ld", hdev->name, opt);
190
191         /* Driver initialization */
192
193         /* Special commands */
194         while ((skb = skb_dequeue(&hdev->driver_init))) {
195                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
196                 skb->dev = (void *) hdev;
197                 skb_queue_tail(&hdev->cmd_q, skb);
198                 hci_sched_cmd(hdev);
199         }
200         skb_queue_purge(&hdev->driver_init);
201
202         /* Mandatory initialization */
203
204         /* Reset */
205         if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
206                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
207
208         /* Read Local Supported Features */
209         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
210
211         /* Read Local Version */
212         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
213
214         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
215         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
216
217 #if 0
218         /* Host buffer size */
219         {
220                 struct hci_cp_host_buffer_size cp;
221                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
222                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
223                 cp.acl_max_pkt = cpu_to_le16(0xffff);
224                 cp.sco_max_pkt = cpu_to_le16(0xffff);
225                 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
226         }
227 #endif
228
229         /* Read BD Address */
230         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
231
232         /* Read Class of Device */
233         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
234
235         /* Read Local Name */
236         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
237
238         /* Read Voice Setting */
239         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
240
241         /* Optional initialization */
242
243         /* Clear Event Filters */
244         flt_type = HCI_FLT_CLEAR_ALL;
245         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
246
247         /* Page timeout ~20 secs */
248         param = cpu_to_le16(0x8000);
249         hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
250
251         /* Connection accept timeout ~20 secs */
252         param = cpu_to_le16(0x7d00);
253         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
254 }
255
256 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
257 {
258         __u8 scan = opt;
259
260         BT_DBG("%s %x", hdev->name, scan);
261
262         /* Inquiry and Page scans */
263         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
264 }
265
266 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
267 {
268         __u8 auth = opt;
269
270         BT_DBG("%s %x", hdev->name, auth);
271
272         /* Authentication */
273         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
274 }
275
276 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
277 {
278         __u8 encrypt = opt;
279
280         BT_DBG("%s %x", hdev->name, encrypt);
281
282         /* Authentication */
283         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
284 }
285
286 /* Get HCI device by index.
287  * Device is held on return. */
288 struct hci_dev *hci_dev_get(int index)
289 {
290         struct hci_dev *hdev = NULL;
291         struct list_head *p;
292
293         BT_DBG("%d", index);
294
295         if (index < 0)
296                 return NULL;
297
298         read_lock(&hci_dev_list_lock);
299         list_for_each(p, &hci_dev_list) {
300                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
301                 if (d->id == index) {
302                         hdev = hci_dev_hold(d);
303                         break;
304                 }
305         }
306         read_unlock(&hci_dev_list_lock);
307         return hdev;
308 }
309
310 /* ---- Inquiry support ---- */
311 static void inquiry_cache_flush(struct hci_dev *hdev)
312 {
313         struct inquiry_cache *cache = &hdev->inq_cache;
314         struct inquiry_entry *next  = cache->list, *e;
315
316         BT_DBG("cache %p", cache);
317
318         cache->list = NULL;
319         while ((e = next)) {
320                 next = e->next;
321                 kfree(e);
322         }
323 }
324
325 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
326 {
327         struct inquiry_cache *cache = &hdev->inq_cache;
328         struct inquiry_entry *e;
329
330         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
331
332         for (e = cache->list; e; e = e->next)
333                 if (!bacmp(&e->data.bdaddr, bdaddr))
334                         break;
335         return e;
336 }
337
338 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
339 {
340         struct inquiry_cache *cache = &hdev->inq_cache;
341         struct inquiry_entry *e;
342
343         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
344
345         if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
346                 /* Entry not in the cache. Add new one. */
347                 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
348                         return;
349                 e->next     = cache->list;
350                 cache->list = e;
351         }
352
353         memcpy(&e->data, data, sizeof(*data));
354         e->timestamp = jiffies;
355         cache->timestamp = jiffies;
356 }
357
358 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
359 {
360         struct inquiry_cache *cache = &hdev->inq_cache;
361         struct inquiry_info *info = (struct inquiry_info *) buf;
362         struct inquiry_entry *e;
363         int copied = 0;
364
365         for (e = cache->list; e && copied < num; e = e->next, copied++) {
366                 struct inquiry_data *data = &e->data;
367                 bacpy(&info->bdaddr, &data->bdaddr);
368                 info->pscan_rep_mode    = data->pscan_rep_mode;
369                 info->pscan_period_mode = data->pscan_period_mode;
370                 info->pscan_mode        = data->pscan_mode;
371                 memcpy(info->dev_class, data->dev_class, 3);
372                 info->clock_offset      = data->clock_offset;
373                 info++;
374         }
375
376         BT_DBG("cache %p, copied %d", cache, copied);
377         return copied;
378 }
379
380 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
381 {
382         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
383         struct hci_cp_inquiry cp;
384
385         BT_DBG("%s", hdev->name);
386
387         if (test_bit(HCI_INQUIRY, &hdev->flags))
388                 return;
389
390         /* Start Inquiry */
391         memcpy(&cp.lap, &ir->lap, 3);
392         cp.length  = ir->length;
393         cp.num_rsp = ir->num_rsp;
394         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
395 }
396
397 int hci_inquiry(void __user *arg)
398 {
399         __u8 __user *ptr = arg;
400         struct hci_inquiry_req ir;
401         struct hci_dev *hdev;
402         int err = 0, do_inquiry = 0, max_rsp;
403         long timeo;
404         __u8 *buf;
405
406         if (copy_from_user(&ir, ptr, sizeof(ir)))
407                 return -EFAULT;
408
409         if (!(hdev = hci_dev_get(ir.dev_id)))
410                 return -ENODEV;
411
412         hci_dev_lock_bh(hdev);
413         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
414                                         inquiry_cache_empty(hdev) ||
415                                         ir.flags & IREQ_CACHE_FLUSH) {
416                 inquiry_cache_flush(hdev);
417                 do_inquiry = 1;
418         }
419         hci_dev_unlock_bh(hdev);
420
421         timeo = ir.length * msecs_to_jiffies(2000);
422         if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
423                 goto done;
424
425         /* for unlimited number of responses we will use buffer with 255 entries */
426         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
427
428         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
429          * copy it to the user space.
430          */
431         if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
432                 err = -ENOMEM;
433                 goto done;
434         }
435
436         hci_dev_lock_bh(hdev);
437         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
438         hci_dev_unlock_bh(hdev);
439
440         BT_DBG("num_rsp %d", ir.num_rsp);
441
442         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
443                 ptr += sizeof(ir);
444                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
445                                         ir.num_rsp))
446                         err = -EFAULT;
447         } else
448                 err = -EFAULT;
449
450         kfree(buf);
451
452 done:
453         hci_dev_put(hdev);
454         return err;
455 }
456
457 /* ---- HCI ioctl helpers ---- */
458
459 int hci_dev_open(__u16 dev)
460 {
461         struct hci_dev *hdev;
462         int ret = 0;
463
464         if (!(hdev = hci_dev_get(dev)))
465                 return -ENODEV;
466
467         BT_DBG("%s %p", hdev->name, hdev);
468
469         hci_req_lock(hdev);
470
471         if (test_bit(HCI_UP, &hdev->flags)) {
472                 ret = -EALREADY;
473                 goto done;
474         }
475
476         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
477                 set_bit(HCI_RAW, &hdev->flags);
478
479         if (hdev->open(hdev)) {
480                 ret = -EIO;
481                 goto done;
482         }
483
484         if (!test_bit(HCI_RAW, &hdev->flags)) {
485                 atomic_set(&hdev->cmd_cnt, 1);
486                 set_bit(HCI_INIT, &hdev->flags);
487
488                 //__hci_request(hdev, hci_reset_req, 0, HZ);
489                 ret = __hci_request(hdev, hci_init_req, 0,
490                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
491
492                 clear_bit(HCI_INIT, &hdev->flags);
493         }
494
495         if (!ret) {
496                 hci_dev_hold(hdev);
497                 set_bit(HCI_UP, &hdev->flags);
498                 hci_notify(hdev, HCI_DEV_UP);
499         } else {
500                 /* Init failed, cleanup */
501                 tasklet_kill(&hdev->rx_task);
502                 tasklet_kill(&hdev->tx_task);
503                 tasklet_kill(&hdev->cmd_task);
504
505                 skb_queue_purge(&hdev->cmd_q);
506                 skb_queue_purge(&hdev->rx_q);
507
508                 if (hdev->flush)
509                         hdev->flush(hdev);
510
511                 if (hdev->sent_cmd) {
512                         kfree_skb(hdev->sent_cmd);
513                         hdev->sent_cmd = NULL;
514                 }
515
516                 hdev->close(hdev);
517                 hdev->flags = 0;
518         }
519
520 done:
521         hci_req_unlock(hdev);
522         hci_dev_put(hdev);
523         return ret;
524 }
525
526 static int hci_dev_do_close(struct hci_dev *hdev)
527 {
528         BT_DBG("%s %p", hdev->name, hdev);
529
530         hci_req_cancel(hdev, ENODEV);
531         hci_req_lock(hdev);
532
533         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
534                 hci_req_unlock(hdev);
535                 return 0;
536         }
537
538         /* Kill RX and TX tasks */
539         tasklet_kill(&hdev->rx_task);
540         tasklet_kill(&hdev->tx_task);
541
542         hci_dev_lock_bh(hdev);
543         inquiry_cache_flush(hdev);
544         hci_conn_hash_flush(hdev);
545         hci_dev_unlock_bh(hdev);
546
547         hci_notify(hdev, HCI_DEV_DOWN);
548
549         if (hdev->flush)
550                 hdev->flush(hdev);
551
552         /* Reset device */
553         skb_queue_purge(&hdev->cmd_q);
554         atomic_set(&hdev->cmd_cnt, 1);
555         if (!test_bit(HCI_RAW, &hdev->flags)) {
556                 set_bit(HCI_INIT, &hdev->flags);
557                 __hci_request(hdev, hci_reset_req, 0,
558                                         msecs_to_jiffies(250));
559                 clear_bit(HCI_INIT, &hdev->flags);
560         }
561
562         /* Kill cmd task */
563         tasklet_kill(&hdev->cmd_task);
564
565         /* Drop queues */
566         skb_queue_purge(&hdev->rx_q);
567         skb_queue_purge(&hdev->cmd_q);
568         skb_queue_purge(&hdev->raw_q);
569
570         /* Drop last sent command */
571         if (hdev->sent_cmd) {
572                 kfree_skb(hdev->sent_cmd);
573                 hdev->sent_cmd = NULL;
574         }
575
576         /* After this point our queues are empty
577          * and no tasks are scheduled. */
578         hdev->close(hdev);
579
580         /* Clear flags */
581         hdev->flags = 0;
582
583         hci_req_unlock(hdev);
584
585         hci_dev_put(hdev);
586         return 0;
587 }
588
589 int hci_dev_close(__u16 dev)
590 {
591         struct hci_dev *hdev;
592         int err;
593
594         if (!(hdev = hci_dev_get(dev)))
595                 return -ENODEV;
596         err = hci_dev_do_close(hdev);
597         hci_dev_put(hdev);
598         return err;
599 }
600
601 int hci_dev_reset(__u16 dev)
602 {
603         struct hci_dev *hdev;
604         int ret = 0;
605
606         if (!(hdev = hci_dev_get(dev)))
607                 return -ENODEV;
608
609         hci_req_lock(hdev);
610         tasklet_disable(&hdev->tx_task);
611
612         if (!test_bit(HCI_UP, &hdev->flags))
613                 goto done;
614
615         /* Drop queues */
616         skb_queue_purge(&hdev->rx_q);
617         skb_queue_purge(&hdev->cmd_q);
618
619         hci_dev_lock_bh(hdev);
620         inquiry_cache_flush(hdev);
621         hci_conn_hash_flush(hdev);
622         hci_dev_unlock_bh(hdev);
623
624         if (hdev->flush)
625                 hdev->flush(hdev);
626
627         atomic_set(&hdev->cmd_cnt, 1);
628         hdev->acl_cnt = 0; hdev->sco_cnt = 0;
629
630         if (!test_bit(HCI_RAW, &hdev->flags))
631                 ret = __hci_request(hdev, hci_reset_req, 0,
632                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
633
634 done:
635         tasklet_enable(&hdev->tx_task);
636         hci_req_unlock(hdev);
637         hci_dev_put(hdev);
638         return ret;
639 }
640
641 int hci_dev_reset_stat(__u16 dev)
642 {
643         struct hci_dev *hdev;
644         int ret = 0;
645
646         if (!(hdev = hci_dev_get(dev)))
647                 return -ENODEV;
648
649         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
650
651         hci_dev_put(hdev);
652
653         return ret;
654 }
655
656 int hci_dev_cmd(unsigned int cmd, void __user *arg)
657 {
658         struct hci_dev *hdev;
659         struct hci_dev_req dr;
660         int err = 0;
661
662         if (copy_from_user(&dr, arg, sizeof(dr)))
663                 return -EFAULT;
664
665         if (!(hdev = hci_dev_get(dr.dev_id)))
666                 return -ENODEV;
667
668         switch (cmd) {
669         case HCISETAUTH:
670                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
671                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
672                 break;
673
674         case HCISETENCRYPT:
675                 if (!lmp_encrypt_capable(hdev)) {
676                         err = -EOPNOTSUPP;
677                         break;
678                 }
679
680                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
681                         /* Auth must be enabled first */
682                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
683                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
684                         if (err)
685                                 break;
686                 }
687
688                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
689                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
690                 break;
691
692         case HCISETSCAN:
693                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
694                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
695                 break;
696
697         case HCISETPTYPE:
698                 hdev->pkt_type = (__u16) dr.dev_opt;
699                 break;
700
701         case HCISETLINKPOL:
702                 hdev->link_policy = (__u16) dr.dev_opt;
703                 break;
704
705         case HCISETLINKMODE:
706                 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
707                 break;
708
709         case HCISETACLMTU:
710                 hdev->acl_mtu  = *((__u16 *)&dr.dev_opt + 1);
711                 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
712                 break;
713
714         case HCISETSCOMTU:
715                 hdev->sco_mtu  = *((__u16 *)&dr.dev_opt + 1);
716                 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
717                 break;
718
719         default:
720                 err = -EINVAL;
721                 break;
722         }
723         hci_dev_put(hdev);
724         return err;
725 }
726
727 int hci_get_dev_list(void __user *arg)
728 {
729         struct hci_dev_list_req *dl;
730         struct hci_dev_req *dr;
731         struct list_head *p;
732         int n = 0, size, err;
733         __u16 dev_num;
734
735         if (get_user(dev_num, (__u16 __user *) arg))
736                 return -EFAULT;
737
738         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
739                 return -EINVAL;
740
741         size = sizeof(*dl) + dev_num * sizeof(*dr);
742
743         if (!(dl = kmalloc(size, GFP_KERNEL)))
744                 return -ENOMEM;
745
746         dr = dl->dev_req;
747
748         read_lock_bh(&hci_dev_list_lock);
749         list_for_each(p, &hci_dev_list) {
750                 struct hci_dev *hdev;
751                 hdev = list_entry(p, struct hci_dev, list);
752                 (dr + n)->dev_id  = hdev->id;
753                 (dr + n)->dev_opt = hdev->flags;
754                 if (++n >= dev_num)
755                         break;
756         }
757         read_unlock_bh(&hci_dev_list_lock);
758
759         dl->dev_num = n;
760         size = sizeof(*dl) + n * sizeof(*dr);
761
762         err = copy_to_user(arg, dl, size);
763         kfree(dl);
764
765         return err ? -EFAULT : 0;
766 }
767
768 int hci_get_dev_info(void __user *arg)
769 {
770         struct hci_dev *hdev;
771         struct hci_dev_info di;
772         int err = 0;
773
774         if (copy_from_user(&di, arg, sizeof(di)))
775                 return -EFAULT;
776
777         if (!(hdev = hci_dev_get(di.dev_id)))
778                 return -ENODEV;
779
780         strcpy(di.name, hdev->name);
781         di.bdaddr   = hdev->bdaddr;
782         di.type     = hdev->type;
783         di.flags    = hdev->flags;
784         di.pkt_type = hdev->pkt_type;
785         di.acl_mtu  = hdev->acl_mtu;
786         di.acl_pkts = hdev->acl_pkts;
787         di.sco_mtu  = hdev->sco_mtu;
788         di.sco_pkts = hdev->sco_pkts;
789         di.link_policy = hdev->link_policy;
790         di.link_mode   = hdev->link_mode;
791
792         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
793         memcpy(&di.features, &hdev->features, sizeof(di.features));
794
795         if (copy_to_user(arg, &di, sizeof(di)))
796                 err = -EFAULT;
797
798         hci_dev_put(hdev);
799
800         return err;
801 }
802
803 /* ---- Interface to HCI drivers ---- */
804
805 /* Alloc HCI device */
806 struct hci_dev *hci_alloc_dev(void)
807 {
808         struct hci_dev *hdev;
809
810         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
811         if (!hdev)
812                 return NULL;
813
814         skb_queue_head_init(&hdev->driver_init);
815
816         return hdev;
817 }
818 EXPORT_SYMBOL(hci_alloc_dev);
819
820 /* Free HCI device */
821 void hci_free_dev(struct hci_dev *hdev)
822 {
823         skb_queue_purge(&hdev->driver_init);
824
825         /* will free via device release */
826         put_device(&hdev->dev);
827 }
828 EXPORT_SYMBOL(hci_free_dev);
829
830 /* Register HCI device */
831 int hci_register_dev(struct hci_dev *hdev)
832 {
833         struct list_head *head = &hci_dev_list, *p;
834         int i, id = 0;
835
836         BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
837
838         if (!hdev->open || !hdev->close || !hdev->destruct)
839                 return -EINVAL;
840
841         write_lock_bh(&hci_dev_list_lock);
842
843         /* Find first available device id */
844         list_for_each(p, &hci_dev_list) {
845                 if (list_entry(p, struct hci_dev, list)->id != id)
846                         break;
847                 head = p; id++;
848         }
849
850         sprintf(hdev->name, "hci%d", id);
851         hdev->id = id;
852         list_add(&hdev->list, head);
853
854         atomic_set(&hdev->refcnt, 1);
855         spin_lock_init(&hdev->lock);
856
857         hdev->flags = 0;
858         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
859         hdev->esco_type = (ESCO_HV1);
860         hdev->link_mode = (HCI_LM_ACCEPT);
861
862         hdev->idle_timeout = 0;
863         hdev->sniff_max_interval = 800;
864         hdev->sniff_min_interval = 80;
865
866         tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
867         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
868         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
869
870         skb_queue_head_init(&hdev->rx_q);
871         skb_queue_head_init(&hdev->cmd_q);
872         skb_queue_head_init(&hdev->raw_q);
873
874         for (i = 0; i < 3; i++)
875                 hdev->reassembly[i] = NULL;
876
877         init_waitqueue_head(&hdev->req_wait_q);
878         init_MUTEX(&hdev->req_lock);
879
880         inquiry_cache_init(hdev);
881
882         hci_conn_hash_init(hdev);
883
884         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
885
886         atomic_set(&hdev->promisc, 0);
887
888         write_unlock_bh(&hci_dev_list_lock);
889
890         hci_register_sysfs(hdev);
891
892         hci_notify(hdev, HCI_DEV_REG);
893
894         return id;
895 }
896 EXPORT_SYMBOL(hci_register_dev);
897
898 /* Unregister HCI device */
899 int hci_unregister_dev(struct hci_dev *hdev)
900 {
901         int i;
902
903         BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
904
905         hci_unregister_sysfs(hdev);
906
907         write_lock_bh(&hci_dev_list_lock);
908         list_del(&hdev->list);
909         write_unlock_bh(&hci_dev_list_lock);
910
911         hci_dev_do_close(hdev);
912
913         for (i = 0; i < 3; i++)
914                 kfree_skb(hdev->reassembly[i]);
915
916         hci_notify(hdev, HCI_DEV_UNREG);
917
918         __hci_dev_put(hdev);
919
920         return 0;
921 }
922 EXPORT_SYMBOL(hci_unregister_dev);
923
924 /* Suspend HCI device */
925 int hci_suspend_dev(struct hci_dev *hdev)
926 {
927         hci_notify(hdev, HCI_DEV_SUSPEND);
928         return 0;
929 }
930 EXPORT_SYMBOL(hci_suspend_dev);
931
932 /* Resume HCI device */
933 int hci_resume_dev(struct hci_dev *hdev)
934 {
935         hci_notify(hdev, HCI_DEV_RESUME);
936         return 0;
937 }
938 EXPORT_SYMBOL(hci_resume_dev);
939
940 /* Receive packet type fragment */
941 #define __reassembly(hdev, type)  ((hdev)->reassembly[(type) - 2])
942
943 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
944 {
945         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
946                 return -EILSEQ;
947
948         while (count) {
949                 struct sk_buff *skb = __reassembly(hdev, type);
950                 struct { int expect; } *scb;
951                 int len = 0;
952
953                 if (!skb) {
954                         /* Start of the frame */
955
956                         switch (type) {
957                         case HCI_EVENT_PKT:
958                                 if (count >= HCI_EVENT_HDR_SIZE) {
959                                         struct hci_event_hdr *h = data;
960                                         len = HCI_EVENT_HDR_SIZE + h->plen;
961                                 } else
962                                         return -EILSEQ;
963                                 break;
964
965                         case HCI_ACLDATA_PKT:
966                                 if (count >= HCI_ACL_HDR_SIZE) {
967                                         struct hci_acl_hdr *h = data;
968                                         len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
969                                 } else
970                                         return -EILSEQ;
971                                 break;
972
973                         case HCI_SCODATA_PKT:
974                                 if (count >= HCI_SCO_HDR_SIZE) {
975                                         struct hci_sco_hdr *h = data;
976                                         len = HCI_SCO_HDR_SIZE + h->dlen;
977                                 } else
978                                         return -EILSEQ;
979                                 break;
980                         }
981
982                         skb = bt_skb_alloc(len, GFP_ATOMIC);
983                         if (!skb) {
984                                 BT_ERR("%s no memory for packet", hdev->name);
985                                 return -ENOMEM;
986                         }
987
988                         skb->dev = (void *) hdev;
989                         bt_cb(skb)->pkt_type = type;
990
991                         __reassembly(hdev, type) = skb;
992
993                         scb = (void *) skb->cb;
994                         scb->expect = len;
995                 } else {
996                         /* Continuation */
997
998                         scb = (void *) skb->cb;
999                         len = scb->expect;
1000                 }
1001
1002                 len = min(len, count);
1003
1004                 memcpy(skb_put(skb, len), data, len);
1005
1006                 scb->expect -= len;
1007
1008                 if (scb->expect == 0) {
1009                         /* Complete frame */
1010
1011                         __reassembly(hdev, type) = NULL;
1012
1013                         bt_cb(skb)->pkt_type = type;
1014                         hci_recv_frame(skb);
1015                 }
1016
1017                 count -= len; data += len;
1018         }
1019
1020         return 0;
1021 }
1022 EXPORT_SYMBOL(hci_recv_fragment);
1023
1024 /* ---- Interface to upper protocols ---- */
1025
1026 /* Register/Unregister protocols.
1027  * hci_task_lock is used to ensure that no tasks are running. */
1028 int hci_register_proto(struct hci_proto *hp)
1029 {
1030         int err = 0;
1031
1032         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1033
1034         if (hp->id >= HCI_MAX_PROTO)
1035                 return -EINVAL;
1036
1037         write_lock_bh(&hci_task_lock);
1038
1039         if (!hci_proto[hp->id])
1040                 hci_proto[hp->id] = hp;
1041         else
1042                 err = -EEXIST;
1043
1044         write_unlock_bh(&hci_task_lock);
1045
1046         return err;
1047 }
1048 EXPORT_SYMBOL(hci_register_proto);
1049
1050 int hci_unregister_proto(struct hci_proto *hp)
1051 {
1052         int err = 0;
1053
1054         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1055
1056         if (hp->id >= HCI_MAX_PROTO)
1057                 return -EINVAL;
1058
1059         write_lock_bh(&hci_task_lock);
1060
1061         if (hci_proto[hp->id])
1062                 hci_proto[hp->id] = NULL;
1063         else
1064                 err = -ENOENT;
1065
1066         write_unlock_bh(&hci_task_lock);
1067
1068         return err;
1069 }
1070 EXPORT_SYMBOL(hci_unregister_proto);
1071
1072 int hci_register_cb(struct hci_cb *cb)
1073 {
1074         BT_DBG("%p name %s", cb, cb->name);
1075
1076         write_lock_bh(&hci_cb_list_lock);
1077         list_add(&cb->list, &hci_cb_list);
1078         write_unlock_bh(&hci_cb_list_lock);
1079
1080         return 0;
1081 }
1082 EXPORT_SYMBOL(hci_register_cb);
1083
1084 int hci_unregister_cb(struct hci_cb *cb)
1085 {
1086         BT_DBG("%p name %s", cb, cb->name);
1087
1088         write_lock_bh(&hci_cb_list_lock);
1089         list_del(&cb->list);
1090         write_unlock_bh(&hci_cb_list_lock);
1091
1092         return 0;
1093 }
1094 EXPORT_SYMBOL(hci_unregister_cb);
1095
1096 static int hci_send_frame(struct sk_buff *skb)
1097 {
1098         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1099
1100         if (!hdev) {
1101                 kfree_skb(skb);
1102                 return -ENODEV;
1103         }
1104
1105         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1106
1107         if (atomic_read(&hdev->promisc)) {
1108                 /* Time stamp */
1109                 __net_timestamp(skb);
1110
1111                 hci_send_to_sock(hdev, skb);
1112         }
1113
1114         /* Get rid of skb owner, prior to sending to the driver. */
1115         skb_orphan(skb);
1116
1117         return hdev->send(skb);
1118 }
1119
1120 /* Send HCI command */
1121 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1122 {
1123         int len = HCI_COMMAND_HDR_SIZE + plen;
1124         struct hci_command_hdr *hdr;
1125         struct sk_buff *skb;
1126
1127         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1128
1129         skb = bt_skb_alloc(len, GFP_ATOMIC);
1130         if (!skb) {
1131                 BT_ERR("%s no memory for command", hdev->name);
1132                 return -ENOMEM;
1133         }
1134
1135         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1136         hdr->opcode = cpu_to_le16(opcode);
1137         hdr->plen   = plen;
1138
1139         if (plen)
1140                 memcpy(skb_put(skb, plen), param, plen);
1141
1142         BT_DBG("skb len %d", skb->len);
1143
1144         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1145         skb->dev = (void *) hdev;
1146         skb_queue_tail(&hdev->cmd_q, skb);
1147         hci_sched_cmd(hdev);
1148
1149         return 0;
1150 }
1151
1152 /* Get data from the previously sent command */
1153 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1154 {
1155         struct hci_command_hdr *hdr;
1156
1157         if (!hdev->sent_cmd)
1158                 return NULL;
1159
1160         hdr = (void *) hdev->sent_cmd->data;
1161
1162         if (hdr->opcode != cpu_to_le16(opcode))
1163                 return NULL;
1164
1165         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1166
1167         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1168 }
1169
1170 /* Send ACL data */
1171 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1172 {
1173         struct hci_acl_hdr *hdr;
1174         int len = skb->len;
1175
1176         skb_push(skb, HCI_ACL_HDR_SIZE);
1177         skb_reset_transport_header(skb);
1178         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1179         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1180         hdr->dlen   = cpu_to_le16(len);
1181 }
1182
1183 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1184 {
1185         struct hci_dev *hdev = conn->hdev;
1186         struct sk_buff *list;
1187
1188         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1189
1190         skb->dev = (void *) hdev;
1191         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1192         hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1193
1194         if (!(list = skb_shinfo(skb)->frag_list)) {
1195                 /* Non fragmented */
1196                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1197
1198                 skb_queue_tail(&conn->data_q, skb);
1199         } else {
1200                 /* Fragmented */
1201                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1202
1203                 skb_shinfo(skb)->frag_list = NULL;
1204
1205                 /* Queue all fragments atomically */
1206                 spin_lock_bh(&conn->data_q.lock);
1207
1208                 __skb_queue_tail(&conn->data_q, skb);
1209                 do {
1210                         skb = list; list = list->next;
1211
1212                         skb->dev = (void *) hdev;
1213                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1214                         hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1215
1216                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1217
1218                         __skb_queue_tail(&conn->data_q, skb);
1219                 } while (list);
1220
1221                 spin_unlock_bh(&conn->data_q.lock);
1222         }
1223
1224         hci_sched_tx(hdev);
1225         return 0;
1226 }
1227 EXPORT_SYMBOL(hci_send_acl);
1228
1229 /* Send SCO data */
1230 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1231 {
1232         struct hci_dev *hdev = conn->hdev;
1233         struct hci_sco_hdr hdr;
1234
1235         BT_DBG("%s len %d", hdev->name, skb->len);
1236
1237         if (skb->len > hdev->sco_mtu) {
1238                 kfree_skb(skb);
1239                 return -EINVAL;
1240         }
1241
1242         hdr.handle = cpu_to_le16(conn->handle);
1243         hdr.dlen   = skb->len;
1244
1245         skb_push(skb, HCI_SCO_HDR_SIZE);
1246         skb_reset_transport_header(skb);
1247         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1248
1249         skb->dev = (void *) hdev;
1250         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1251         skb_queue_tail(&conn->data_q, skb);
1252         hci_sched_tx(hdev);
1253         return 0;
1254 }
1255 EXPORT_SYMBOL(hci_send_sco);
1256
1257 /* ---- HCI TX task (outgoing data) ---- */
1258
1259 /* HCI Connection scheduler */
1260 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1261 {
1262         struct hci_conn_hash *h = &hdev->conn_hash;
1263         struct hci_conn *conn = NULL;
1264         int num = 0, min = ~0;
1265         struct list_head *p;
1266
1267         /* We don't have to lock device here. Connections are always
1268          * added and removed with TX task disabled. */
1269         list_for_each(p, &h->list) {
1270                 struct hci_conn *c;
1271                 c = list_entry(p, struct hci_conn, list);
1272
1273                 if (c->type != type || c->state != BT_CONNECTED
1274                                 || skb_queue_empty(&c->data_q))
1275                         continue;
1276                 num++;
1277
1278                 if (c->sent < min) {
1279                         min  = c->sent;
1280                         conn = c;
1281                 }
1282         }
1283
1284         if (conn) {
1285                 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1286                 int q = cnt / num;
1287                 *quote = q ? q : 1;
1288         } else
1289                 *quote = 0;
1290
1291         BT_DBG("conn %p quote %d", conn, *quote);
1292         return conn;
1293 }
1294
1295 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1296 {
1297         struct hci_conn_hash *h = &hdev->conn_hash;
1298         struct list_head *p;
1299         struct hci_conn  *c;
1300
1301         BT_ERR("%s ACL tx timeout", hdev->name);
1302
1303         /* Kill stalled connections */
1304         list_for_each(p, &h->list) {
1305                 c = list_entry(p, struct hci_conn, list);
1306                 if (c->type == ACL_LINK && c->sent) {
1307                         BT_ERR("%s killing stalled ACL connection %s",
1308                                 hdev->name, batostr(&c->dst));
1309                         hci_acl_disconn(c, 0x13);
1310                 }
1311         }
1312 }
1313
1314 static inline void hci_sched_acl(struct hci_dev *hdev)
1315 {
1316         struct hci_conn *conn;
1317         struct sk_buff *skb;
1318         int quote;
1319
1320         BT_DBG("%s", hdev->name);
1321
1322         if (!test_bit(HCI_RAW, &hdev->flags)) {
1323                 /* ACL tx timeout must be longer than maximum
1324                  * link supervision timeout (40.9 seconds) */
1325                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1326                         hci_acl_tx_to(hdev);
1327         }
1328
1329         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1330                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1331                         BT_DBG("skb %p len %d", skb, skb->len);
1332
1333                         hci_conn_enter_active_mode(conn);
1334
1335                         hci_send_frame(skb);
1336                         hdev->acl_last_tx = jiffies;
1337
1338                         hdev->acl_cnt--;
1339                         conn->sent++;
1340                 }
1341         }
1342 }
1343
1344 /* Schedule SCO */
1345 static inline void hci_sched_sco(struct hci_dev *hdev)
1346 {
1347         struct hci_conn *conn;
1348         struct sk_buff *skb;
1349         int quote;
1350
1351         BT_DBG("%s", hdev->name);
1352
1353         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1354                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1355                         BT_DBG("skb %p len %d", skb, skb->len);
1356                         hci_send_frame(skb);
1357
1358                         conn->sent++;
1359                         if (conn->sent == ~0)
1360                                 conn->sent = 0;
1361                 }
1362         }
1363 }
1364
1365 static inline void hci_sched_esco(struct hci_dev *hdev)
1366 {
1367         struct hci_conn *conn;
1368         struct sk_buff *skb;
1369         int quote;
1370
1371         BT_DBG("%s", hdev->name);
1372
1373         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1374                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1375                         BT_DBG("skb %p len %d", skb, skb->len);
1376                         hci_send_frame(skb);
1377
1378                         conn->sent++;
1379                         if (conn->sent == ~0)
1380                                 conn->sent = 0;
1381                 }
1382         }
1383 }
1384
1385 static void hci_tx_task(unsigned long arg)
1386 {
1387         struct hci_dev *hdev = (struct hci_dev *) arg;
1388         struct sk_buff *skb;
1389
1390         read_lock(&hci_task_lock);
1391
1392         BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1393
1394         /* Schedule queues and send stuff to HCI driver */
1395
1396         hci_sched_acl(hdev);
1397
1398         hci_sched_sco(hdev);
1399
1400         hci_sched_esco(hdev);
1401
1402         /* Send next queued raw (unknown type) packet */
1403         while ((skb = skb_dequeue(&hdev->raw_q)))
1404                 hci_send_frame(skb);
1405
1406         read_unlock(&hci_task_lock);
1407 }
1408
1409 /* ----- HCI RX task (incoming data proccessing) ----- */
1410
1411 /* ACL data packet */
1412 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1413 {
1414         struct hci_acl_hdr *hdr = (void *) skb->data;
1415         struct hci_conn *conn;
1416         __u16 handle, flags;
1417
1418         skb_pull(skb, HCI_ACL_HDR_SIZE);
1419
1420         handle = __le16_to_cpu(hdr->handle);
1421         flags  = hci_flags(handle);
1422         handle = hci_handle(handle);
1423
1424         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1425
1426         hdev->stat.acl_rx++;
1427
1428         hci_dev_lock(hdev);
1429         conn = hci_conn_hash_lookup_handle(hdev, handle);
1430         hci_dev_unlock(hdev);
1431
1432         if (conn) {
1433                 register struct hci_proto *hp;
1434
1435                 hci_conn_enter_active_mode(conn);
1436
1437                 /* Send to upper protocol */
1438                 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1439                         hp->recv_acldata(conn, skb, flags);
1440                         return;
1441                 }
1442         } else {
1443                 BT_ERR("%s ACL packet for unknown connection handle %d",
1444                         hdev->name, handle);
1445         }
1446
1447         kfree_skb(skb);
1448 }
1449
1450 /* SCO data packet */
1451 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1452 {
1453         struct hci_sco_hdr *hdr = (void *) skb->data;
1454         struct hci_conn *conn;
1455         __u16 handle;
1456
1457         skb_pull(skb, HCI_SCO_HDR_SIZE);
1458
1459         handle = __le16_to_cpu(hdr->handle);
1460
1461         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1462
1463         hdev->stat.sco_rx++;
1464
1465         hci_dev_lock(hdev);
1466         conn = hci_conn_hash_lookup_handle(hdev, handle);
1467         hci_dev_unlock(hdev);
1468
1469         if (conn) {
1470                 register struct hci_proto *hp;
1471
1472                 /* Send to upper protocol */
1473                 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1474                         hp->recv_scodata(conn, skb);
1475                         return;
1476                 }
1477         } else {
1478                 BT_ERR("%s SCO packet for unknown connection handle %d",
1479                         hdev->name, handle);
1480         }
1481
1482         kfree_skb(skb);
1483 }
1484
1485 static void hci_rx_task(unsigned long arg)
1486 {
1487         struct hci_dev *hdev = (struct hci_dev *) arg;
1488         struct sk_buff *skb;
1489
1490         BT_DBG("%s", hdev->name);
1491
1492         read_lock(&hci_task_lock);
1493
1494         while ((skb = skb_dequeue(&hdev->rx_q))) {
1495                 if (atomic_read(&hdev->promisc)) {
1496                         /* Send copy to the sockets */
1497                         hci_send_to_sock(hdev, skb);
1498                 }
1499
1500                 if (test_bit(HCI_RAW, &hdev->flags)) {
1501                         kfree_skb(skb);
1502                         continue;
1503                 }
1504
1505                 if (test_bit(HCI_INIT, &hdev->flags)) {
1506                         /* Don't process data packets in this states. */
1507                         switch (bt_cb(skb)->pkt_type) {
1508                         case HCI_ACLDATA_PKT:
1509                         case HCI_SCODATA_PKT:
1510                                 kfree_skb(skb);
1511                                 continue;
1512                         }
1513                 }
1514
1515                 /* Process frame */
1516                 switch (bt_cb(skb)->pkt_type) {
1517                 case HCI_EVENT_PKT:
1518                         hci_event_packet(hdev, skb);
1519                         break;
1520
1521                 case HCI_ACLDATA_PKT:
1522                         BT_DBG("%s ACL data packet", hdev->name);
1523                         hci_acldata_packet(hdev, skb);
1524                         break;
1525
1526                 case HCI_SCODATA_PKT:
1527                         BT_DBG("%s SCO data packet", hdev->name);
1528                         hci_scodata_packet(hdev, skb);
1529                         break;
1530
1531                 default:
1532                         kfree_skb(skb);
1533                         break;
1534                 }
1535         }
1536
1537         read_unlock(&hci_task_lock);
1538 }
1539
1540 static void hci_cmd_task(unsigned long arg)
1541 {
1542         struct hci_dev *hdev = (struct hci_dev *) arg;
1543         struct sk_buff *skb;
1544
1545         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1546
1547         if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1548                 BT_ERR("%s command tx timeout", hdev->name);
1549                 atomic_set(&hdev->cmd_cnt, 1);
1550         }
1551
1552         /* Send queued commands */
1553         if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1554                 if (hdev->sent_cmd)
1555                         kfree_skb(hdev->sent_cmd);
1556
1557                 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1558                         atomic_dec(&hdev->cmd_cnt);
1559                         hci_send_frame(skb);
1560                         hdev->cmd_last_tx = jiffies;
1561                 } else {
1562                         skb_queue_head(&hdev->cmd_q, skb);
1563                         hci_sched_cmd(hdev);
1564                 }
1565         }
1566 }