ALSA: ASoC: Remove references to Endrelia ETI-B1 board
[linux-2.6] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/interrupt.h>
41 #include <linux/notifier.h>
42 #include <net/sock.h>
43
44 #include <asm/system.h>
45 #include <asm/uaccess.h>
46 #include <asm/unaligned.h>
47
48 #include <net/bluetooth/bluetooth.h>
49 #include <net/bluetooth/hci_core.h>
50
51 #ifndef CONFIG_BT_HCI_CORE_DEBUG
52 #undef  BT_DBG
53 #define BT_DBG(D...)
54 #endif
55
56 static void hci_cmd_task(unsigned long arg);
57 static void hci_rx_task(unsigned long arg);
58 static void hci_tx_task(unsigned long arg);
59 static void hci_notify(struct hci_dev *hdev, int event);
60
61 static DEFINE_RWLOCK(hci_task_lock);
62
63 /* HCI device list */
64 LIST_HEAD(hci_dev_list);
65 DEFINE_RWLOCK(hci_dev_list_lock);
66
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list);
69 DEFINE_RWLOCK(hci_cb_list_lock);
70
71 /* HCI protocols */
72 #define HCI_MAX_PROTO   2
73 struct hci_proto *hci_proto[HCI_MAX_PROTO];
74
75 /* HCI notifiers list */
76 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
77
78 /* ---- HCI notifications ---- */
79
80 int hci_register_notifier(struct notifier_block *nb)
81 {
82         return atomic_notifier_chain_register(&hci_notifier, nb);
83 }
84
85 int hci_unregister_notifier(struct notifier_block *nb)
86 {
87         return atomic_notifier_chain_unregister(&hci_notifier, nb);
88 }
89
90 static void hci_notify(struct hci_dev *hdev, int event)
91 {
92         atomic_notifier_call_chain(&hci_notifier, event, hdev);
93 }
94
95 /* ---- HCI requests ---- */
96
97 void hci_req_complete(struct hci_dev *hdev, int result)
98 {
99         BT_DBG("%s result 0x%2.2x", hdev->name, result);
100
101         if (hdev->req_status == HCI_REQ_PEND) {
102                 hdev->req_result = result;
103                 hdev->req_status = HCI_REQ_DONE;
104                 wake_up_interruptible(&hdev->req_wait_q);
105         }
106 }
107
108 static void hci_req_cancel(struct hci_dev *hdev, int err)
109 {
110         BT_DBG("%s err 0x%2.2x", hdev->name, err);
111
112         if (hdev->req_status == HCI_REQ_PEND) {
113                 hdev->req_result = err;
114                 hdev->req_status = HCI_REQ_CANCELED;
115                 wake_up_interruptible(&hdev->req_wait_q);
116         }
117 }
118
119 /* Execute request and wait for completion. */
120 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
121                                 unsigned long opt, __u32 timeout)
122 {
123         DECLARE_WAITQUEUE(wait, current);
124         int err = 0;
125
126         BT_DBG("%s start", hdev->name);
127
128         hdev->req_status = HCI_REQ_PEND;
129
130         add_wait_queue(&hdev->req_wait_q, &wait);
131         set_current_state(TASK_INTERRUPTIBLE);
132
133         req(hdev, opt);
134         schedule_timeout(timeout);
135
136         remove_wait_queue(&hdev->req_wait_q, &wait);
137
138         if (signal_pending(current))
139                 return -EINTR;
140
141         switch (hdev->req_status) {
142         case HCI_REQ_DONE:
143                 err = -bt_err(hdev->req_result);
144                 break;
145
146         case HCI_REQ_CANCELED:
147                 err = -hdev->req_result;
148                 break;
149
150         default:
151                 err = -ETIMEDOUT;
152                 break;
153         }
154
155         hdev->req_status = hdev->req_result = 0;
156
157         BT_DBG("%s end: err %d", hdev->name, err);
158
159         return err;
160 }
161
162 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
163                                 unsigned long opt, __u32 timeout)
164 {
165         int ret;
166
167         if (!test_bit(HCI_UP, &hdev->flags))
168                 return -ENETDOWN;
169
170         /* Serialize all requests */
171         hci_req_lock(hdev);
172         ret = __hci_request(hdev, req, opt, timeout);
173         hci_req_unlock(hdev);
174
175         return ret;
176 }
177
178 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
179 {
180         BT_DBG("%s %ld", hdev->name, opt);
181
182         /* Reset device */
183         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
184 }
185
186 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
187 {
188         struct sk_buff *skb;
189         __le16 param;
190         __u8 flt_type;
191
192         BT_DBG("%s %ld", hdev->name, opt);
193
194         /* Driver initialization */
195
196         /* Special commands */
197         while ((skb = skb_dequeue(&hdev->driver_init))) {
198                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
199                 skb->dev = (void *) hdev;
200                 skb_queue_tail(&hdev->cmd_q, skb);
201                 hci_sched_cmd(hdev);
202         }
203         skb_queue_purge(&hdev->driver_init);
204
205         /* Mandatory initialization */
206
207         /* Reset */
208         if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
209                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
210
211         /* Read Local Supported Features */
212         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
213
214         /* Read Local Version */
215         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
216
217         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
218         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
219
220 #if 0
221         /* Host buffer size */
222         {
223                 struct hci_cp_host_buffer_size cp;
224                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
225                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
226                 cp.acl_max_pkt = cpu_to_le16(0xffff);
227                 cp.sco_max_pkt = cpu_to_le16(0xffff);
228                 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
229         }
230 #endif
231
232         /* Read BD Address */
233         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
234
235         /* Read Class of Device */
236         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
237
238         /* Read Local Name */
239         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
240
241         /* Read Voice Setting */
242         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
243
244         /* Optional initialization */
245
246         /* Clear Event Filters */
247         flt_type = HCI_FLT_CLEAR_ALL;
248         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
249
250         /* Page timeout ~20 secs */
251         param = cpu_to_le16(0x8000);
252         hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
253
254         /* Connection accept timeout ~20 secs */
255         param = cpu_to_le16(0x7d00);
256         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
257 }
258
259 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
260 {
261         __u8 scan = opt;
262
263         BT_DBG("%s %x", hdev->name, scan);
264
265         /* Inquiry and Page scans */
266         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
267 }
268
269 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
270 {
271         __u8 auth = opt;
272
273         BT_DBG("%s %x", hdev->name, auth);
274
275         /* Authentication */
276         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
277 }
278
279 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
280 {
281         __u8 encrypt = opt;
282
283         BT_DBG("%s %x", hdev->name, encrypt);
284
285         /* Encryption */
286         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
287 }
288
289 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
290 {
291         __le16 policy = cpu_to_le16(opt);
292
293         BT_DBG("%s %x", hdev->name, opt);
294
295         /* Default link policy */
296         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
297 }
298
299 /* Get HCI device by index.
300  * Device is held on return. */
301 struct hci_dev *hci_dev_get(int index)
302 {
303         struct hci_dev *hdev = NULL;
304         struct list_head *p;
305
306         BT_DBG("%d", index);
307
308         if (index < 0)
309                 return NULL;
310
311         read_lock(&hci_dev_list_lock);
312         list_for_each(p, &hci_dev_list) {
313                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
314                 if (d->id == index) {
315                         hdev = hci_dev_hold(d);
316                         break;
317                 }
318         }
319         read_unlock(&hci_dev_list_lock);
320         return hdev;
321 }
322
323 /* ---- Inquiry support ---- */
324 static void inquiry_cache_flush(struct hci_dev *hdev)
325 {
326         struct inquiry_cache *cache = &hdev->inq_cache;
327         struct inquiry_entry *next  = cache->list, *e;
328
329         BT_DBG("cache %p", cache);
330
331         cache->list = NULL;
332         while ((e = next)) {
333                 next = e->next;
334                 kfree(e);
335         }
336 }
337
338 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
339 {
340         struct inquiry_cache *cache = &hdev->inq_cache;
341         struct inquiry_entry *e;
342
343         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
344
345         for (e = cache->list; e; e = e->next)
346                 if (!bacmp(&e->data.bdaddr, bdaddr))
347                         break;
348         return e;
349 }
350
351 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
352 {
353         struct inquiry_cache *cache = &hdev->inq_cache;
354         struct inquiry_entry *e;
355
356         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
357
358         if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
359                 /* Entry not in the cache. Add new one. */
360                 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
361                         return;
362                 e->next     = cache->list;
363                 cache->list = e;
364         }
365
366         memcpy(&e->data, data, sizeof(*data));
367         e->timestamp = jiffies;
368         cache->timestamp = jiffies;
369 }
370
371 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
372 {
373         struct inquiry_cache *cache = &hdev->inq_cache;
374         struct inquiry_info *info = (struct inquiry_info *) buf;
375         struct inquiry_entry *e;
376         int copied = 0;
377
378         for (e = cache->list; e && copied < num; e = e->next, copied++) {
379                 struct inquiry_data *data = &e->data;
380                 bacpy(&info->bdaddr, &data->bdaddr);
381                 info->pscan_rep_mode    = data->pscan_rep_mode;
382                 info->pscan_period_mode = data->pscan_period_mode;
383                 info->pscan_mode        = data->pscan_mode;
384                 memcpy(info->dev_class, data->dev_class, 3);
385                 info->clock_offset      = data->clock_offset;
386                 info++;
387         }
388
389         BT_DBG("cache %p, copied %d", cache, copied);
390         return copied;
391 }
392
393 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
394 {
395         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
396         struct hci_cp_inquiry cp;
397
398         BT_DBG("%s", hdev->name);
399
400         if (test_bit(HCI_INQUIRY, &hdev->flags))
401                 return;
402
403         /* Start Inquiry */
404         memcpy(&cp.lap, &ir->lap, 3);
405         cp.length  = ir->length;
406         cp.num_rsp = ir->num_rsp;
407         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
408 }
409
410 int hci_inquiry(void __user *arg)
411 {
412         __u8 __user *ptr = arg;
413         struct hci_inquiry_req ir;
414         struct hci_dev *hdev;
415         int err = 0, do_inquiry = 0, max_rsp;
416         long timeo;
417         __u8 *buf;
418
419         if (copy_from_user(&ir, ptr, sizeof(ir)))
420                 return -EFAULT;
421
422         if (!(hdev = hci_dev_get(ir.dev_id)))
423                 return -ENODEV;
424
425         hci_dev_lock_bh(hdev);
426         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
427                                         inquiry_cache_empty(hdev) ||
428                                         ir.flags & IREQ_CACHE_FLUSH) {
429                 inquiry_cache_flush(hdev);
430                 do_inquiry = 1;
431         }
432         hci_dev_unlock_bh(hdev);
433
434         timeo = ir.length * msecs_to_jiffies(2000);
435         if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
436                 goto done;
437
438         /* for unlimited number of responses we will use buffer with 255 entries */
439         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
440
441         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
442          * copy it to the user space.
443          */
444         if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
445                 err = -ENOMEM;
446                 goto done;
447         }
448
449         hci_dev_lock_bh(hdev);
450         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
451         hci_dev_unlock_bh(hdev);
452
453         BT_DBG("num_rsp %d", ir.num_rsp);
454
455         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
456                 ptr += sizeof(ir);
457                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
458                                         ir.num_rsp))
459                         err = -EFAULT;
460         } else
461                 err = -EFAULT;
462
463         kfree(buf);
464
465 done:
466         hci_dev_put(hdev);
467         return err;
468 }
469
470 /* ---- HCI ioctl helpers ---- */
471
472 int hci_dev_open(__u16 dev)
473 {
474         struct hci_dev *hdev;
475         int ret = 0;
476
477         if (!(hdev = hci_dev_get(dev)))
478                 return -ENODEV;
479
480         BT_DBG("%s %p", hdev->name, hdev);
481
482         hci_req_lock(hdev);
483
484         if (test_bit(HCI_UP, &hdev->flags)) {
485                 ret = -EALREADY;
486                 goto done;
487         }
488
489         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
490                 set_bit(HCI_RAW, &hdev->flags);
491
492         if (hdev->open(hdev)) {
493                 ret = -EIO;
494                 goto done;
495         }
496
497         if (!test_bit(HCI_RAW, &hdev->flags)) {
498                 atomic_set(&hdev->cmd_cnt, 1);
499                 set_bit(HCI_INIT, &hdev->flags);
500
501                 //__hci_request(hdev, hci_reset_req, 0, HZ);
502                 ret = __hci_request(hdev, hci_init_req, 0,
503                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
504
505                 clear_bit(HCI_INIT, &hdev->flags);
506         }
507
508         if (!ret) {
509                 hci_dev_hold(hdev);
510                 set_bit(HCI_UP, &hdev->flags);
511                 hci_notify(hdev, HCI_DEV_UP);
512         } else {
513                 /* Init failed, cleanup */
514                 tasklet_kill(&hdev->rx_task);
515                 tasklet_kill(&hdev->tx_task);
516                 tasklet_kill(&hdev->cmd_task);
517
518                 skb_queue_purge(&hdev->cmd_q);
519                 skb_queue_purge(&hdev->rx_q);
520
521                 if (hdev->flush)
522                         hdev->flush(hdev);
523
524                 if (hdev->sent_cmd) {
525                         kfree_skb(hdev->sent_cmd);
526                         hdev->sent_cmd = NULL;
527                 }
528
529                 hdev->close(hdev);
530                 hdev->flags = 0;
531         }
532
533 done:
534         hci_req_unlock(hdev);
535         hci_dev_put(hdev);
536         return ret;
537 }
538
539 static int hci_dev_do_close(struct hci_dev *hdev)
540 {
541         BT_DBG("%s %p", hdev->name, hdev);
542
543         hci_req_cancel(hdev, ENODEV);
544         hci_req_lock(hdev);
545
546         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
547                 hci_req_unlock(hdev);
548                 return 0;
549         }
550
551         /* Kill RX and TX tasks */
552         tasklet_kill(&hdev->rx_task);
553         tasklet_kill(&hdev->tx_task);
554
555         hci_dev_lock_bh(hdev);
556         inquiry_cache_flush(hdev);
557         hci_conn_hash_flush(hdev);
558         hci_dev_unlock_bh(hdev);
559
560         hci_notify(hdev, HCI_DEV_DOWN);
561
562         if (hdev->flush)
563                 hdev->flush(hdev);
564
565         /* Reset device */
566         skb_queue_purge(&hdev->cmd_q);
567         atomic_set(&hdev->cmd_cnt, 1);
568         if (!test_bit(HCI_RAW, &hdev->flags)) {
569                 set_bit(HCI_INIT, &hdev->flags);
570                 __hci_request(hdev, hci_reset_req, 0,
571                                         msecs_to_jiffies(250));
572                 clear_bit(HCI_INIT, &hdev->flags);
573         }
574
575         /* Kill cmd task */
576         tasklet_kill(&hdev->cmd_task);
577
578         /* Drop queues */
579         skb_queue_purge(&hdev->rx_q);
580         skb_queue_purge(&hdev->cmd_q);
581         skb_queue_purge(&hdev->raw_q);
582
583         /* Drop last sent command */
584         if (hdev->sent_cmd) {
585                 kfree_skb(hdev->sent_cmd);
586                 hdev->sent_cmd = NULL;
587         }
588
589         /* After this point our queues are empty
590          * and no tasks are scheduled. */
591         hdev->close(hdev);
592
593         /* Clear flags */
594         hdev->flags = 0;
595
596         hci_req_unlock(hdev);
597
598         hci_dev_put(hdev);
599         return 0;
600 }
601
602 int hci_dev_close(__u16 dev)
603 {
604         struct hci_dev *hdev;
605         int err;
606
607         if (!(hdev = hci_dev_get(dev)))
608                 return -ENODEV;
609         err = hci_dev_do_close(hdev);
610         hci_dev_put(hdev);
611         return err;
612 }
613
614 int hci_dev_reset(__u16 dev)
615 {
616         struct hci_dev *hdev;
617         int ret = 0;
618
619         if (!(hdev = hci_dev_get(dev)))
620                 return -ENODEV;
621
622         hci_req_lock(hdev);
623         tasklet_disable(&hdev->tx_task);
624
625         if (!test_bit(HCI_UP, &hdev->flags))
626                 goto done;
627
628         /* Drop queues */
629         skb_queue_purge(&hdev->rx_q);
630         skb_queue_purge(&hdev->cmd_q);
631
632         hci_dev_lock_bh(hdev);
633         inquiry_cache_flush(hdev);
634         hci_conn_hash_flush(hdev);
635         hci_dev_unlock_bh(hdev);
636
637         if (hdev->flush)
638                 hdev->flush(hdev);
639
640         atomic_set(&hdev->cmd_cnt, 1);
641         hdev->acl_cnt = 0; hdev->sco_cnt = 0;
642
643         if (!test_bit(HCI_RAW, &hdev->flags))
644                 ret = __hci_request(hdev, hci_reset_req, 0,
645                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
646
647 done:
648         tasklet_enable(&hdev->tx_task);
649         hci_req_unlock(hdev);
650         hci_dev_put(hdev);
651         return ret;
652 }
653
654 int hci_dev_reset_stat(__u16 dev)
655 {
656         struct hci_dev *hdev;
657         int ret = 0;
658
659         if (!(hdev = hci_dev_get(dev)))
660                 return -ENODEV;
661
662         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
663
664         hci_dev_put(hdev);
665
666         return ret;
667 }
668
669 int hci_dev_cmd(unsigned int cmd, void __user *arg)
670 {
671         struct hci_dev *hdev;
672         struct hci_dev_req dr;
673         int err = 0;
674
675         if (copy_from_user(&dr, arg, sizeof(dr)))
676                 return -EFAULT;
677
678         if (!(hdev = hci_dev_get(dr.dev_id)))
679                 return -ENODEV;
680
681         switch (cmd) {
682         case HCISETAUTH:
683                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
684                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
685                 break;
686
687         case HCISETENCRYPT:
688                 if (!lmp_encrypt_capable(hdev)) {
689                         err = -EOPNOTSUPP;
690                         break;
691                 }
692
693                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
694                         /* Auth must be enabled first */
695                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
696                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
697                         if (err)
698                                 break;
699                 }
700
701                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
702                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
703                 break;
704
705         case HCISETSCAN:
706                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
707                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
708                 break;
709
710         case HCISETLINKPOL:
711                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
712                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
713                 break;
714
715         case HCISETLINKMODE:
716                 hdev->link_mode = ((__u16) dr.dev_opt) &
717                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
718                 break;
719
720         case HCISETPTYPE:
721                 hdev->pkt_type = (__u16) dr.dev_opt;
722                 break;
723
724         case HCISETACLMTU:
725                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
726                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
727                 break;
728
729         case HCISETSCOMTU:
730                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
731                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
732                 break;
733
734         default:
735                 err = -EINVAL;
736                 break;
737         }
738
739         hci_dev_put(hdev);
740         return err;
741 }
742
743 int hci_get_dev_list(void __user *arg)
744 {
745         struct hci_dev_list_req *dl;
746         struct hci_dev_req *dr;
747         struct list_head *p;
748         int n = 0, size, err;
749         __u16 dev_num;
750
751         if (get_user(dev_num, (__u16 __user *) arg))
752                 return -EFAULT;
753
754         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
755                 return -EINVAL;
756
757         size = sizeof(*dl) + dev_num * sizeof(*dr);
758
759         if (!(dl = kmalloc(size, GFP_KERNEL)))
760                 return -ENOMEM;
761
762         dr = dl->dev_req;
763
764         read_lock_bh(&hci_dev_list_lock);
765         list_for_each(p, &hci_dev_list) {
766                 struct hci_dev *hdev;
767                 hdev = list_entry(p, struct hci_dev, list);
768                 (dr + n)->dev_id  = hdev->id;
769                 (dr + n)->dev_opt = hdev->flags;
770                 if (++n >= dev_num)
771                         break;
772         }
773         read_unlock_bh(&hci_dev_list_lock);
774
775         dl->dev_num = n;
776         size = sizeof(*dl) + n * sizeof(*dr);
777
778         err = copy_to_user(arg, dl, size);
779         kfree(dl);
780
781         return err ? -EFAULT : 0;
782 }
783
784 int hci_get_dev_info(void __user *arg)
785 {
786         struct hci_dev *hdev;
787         struct hci_dev_info di;
788         int err = 0;
789
790         if (copy_from_user(&di, arg, sizeof(di)))
791                 return -EFAULT;
792
793         if (!(hdev = hci_dev_get(di.dev_id)))
794                 return -ENODEV;
795
796         strcpy(di.name, hdev->name);
797         di.bdaddr   = hdev->bdaddr;
798         di.type     = hdev->type;
799         di.flags    = hdev->flags;
800         di.pkt_type = hdev->pkt_type;
801         di.acl_mtu  = hdev->acl_mtu;
802         di.acl_pkts = hdev->acl_pkts;
803         di.sco_mtu  = hdev->sco_mtu;
804         di.sco_pkts = hdev->sco_pkts;
805         di.link_policy = hdev->link_policy;
806         di.link_mode   = hdev->link_mode;
807
808         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
809         memcpy(&di.features, &hdev->features, sizeof(di.features));
810
811         if (copy_to_user(arg, &di, sizeof(di)))
812                 err = -EFAULT;
813
814         hci_dev_put(hdev);
815
816         return err;
817 }
818
819 /* ---- Interface to HCI drivers ---- */
820
821 /* Alloc HCI device */
822 struct hci_dev *hci_alloc_dev(void)
823 {
824         struct hci_dev *hdev;
825
826         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
827         if (!hdev)
828                 return NULL;
829
830         skb_queue_head_init(&hdev->driver_init);
831
832         return hdev;
833 }
834 EXPORT_SYMBOL(hci_alloc_dev);
835
836 /* Free HCI device */
837 void hci_free_dev(struct hci_dev *hdev)
838 {
839         skb_queue_purge(&hdev->driver_init);
840
841         /* will free via device release */
842         put_device(&hdev->dev);
843 }
844 EXPORT_SYMBOL(hci_free_dev);
845
846 /* Register HCI device */
847 int hci_register_dev(struct hci_dev *hdev)
848 {
849         struct list_head *head = &hci_dev_list, *p;
850         int i, id = 0;
851
852         BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
853
854         if (!hdev->open || !hdev->close || !hdev->destruct)
855                 return -EINVAL;
856
857         write_lock_bh(&hci_dev_list_lock);
858
859         /* Find first available device id */
860         list_for_each(p, &hci_dev_list) {
861                 if (list_entry(p, struct hci_dev, list)->id != id)
862                         break;
863                 head = p; id++;
864         }
865
866         sprintf(hdev->name, "hci%d", id);
867         hdev->id = id;
868         list_add(&hdev->list, head);
869
870         atomic_set(&hdev->refcnt, 1);
871         spin_lock_init(&hdev->lock);
872
873         hdev->flags = 0;
874         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
875         hdev->esco_type = (ESCO_HV1);
876         hdev->link_mode = (HCI_LM_ACCEPT);
877
878         hdev->idle_timeout = 0;
879         hdev->sniff_max_interval = 800;
880         hdev->sniff_min_interval = 80;
881
882         tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
883         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
884         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
885
886         skb_queue_head_init(&hdev->rx_q);
887         skb_queue_head_init(&hdev->cmd_q);
888         skb_queue_head_init(&hdev->raw_q);
889
890         for (i = 0; i < 3; i++)
891                 hdev->reassembly[i] = NULL;
892
893         init_waitqueue_head(&hdev->req_wait_q);
894         init_MUTEX(&hdev->req_lock);
895
896         inquiry_cache_init(hdev);
897
898         hci_conn_hash_init(hdev);
899
900         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
901
902         atomic_set(&hdev->promisc, 0);
903
904         write_unlock_bh(&hci_dev_list_lock);
905
906         hci_register_sysfs(hdev);
907
908         hci_notify(hdev, HCI_DEV_REG);
909
910         return id;
911 }
912 EXPORT_SYMBOL(hci_register_dev);
913
914 /* Unregister HCI device */
915 int hci_unregister_dev(struct hci_dev *hdev)
916 {
917         int i;
918
919         BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
920
921         write_lock_bh(&hci_dev_list_lock);
922         list_del(&hdev->list);
923         write_unlock_bh(&hci_dev_list_lock);
924
925         hci_dev_do_close(hdev);
926
927         for (i = 0; i < 3; i++)
928                 kfree_skb(hdev->reassembly[i]);
929
930         hci_notify(hdev, HCI_DEV_UNREG);
931
932         hci_unregister_sysfs(hdev);
933
934         __hci_dev_put(hdev);
935
936         return 0;
937 }
938 EXPORT_SYMBOL(hci_unregister_dev);
939
940 /* Suspend HCI device */
941 int hci_suspend_dev(struct hci_dev *hdev)
942 {
943         hci_notify(hdev, HCI_DEV_SUSPEND);
944         return 0;
945 }
946 EXPORT_SYMBOL(hci_suspend_dev);
947
948 /* Resume HCI device */
949 int hci_resume_dev(struct hci_dev *hdev)
950 {
951         hci_notify(hdev, HCI_DEV_RESUME);
952         return 0;
953 }
954 EXPORT_SYMBOL(hci_resume_dev);
955
956 /* Receive packet type fragment */
957 #define __reassembly(hdev, type)  ((hdev)->reassembly[(type) - 2])
958
959 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
960 {
961         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
962                 return -EILSEQ;
963
964         while (count) {
965                 struct sk_buff *skb = __reassembly(hdev, type);
966                 struct { int expect; } *scb;
967                 int len = 0;
968
969                 if (!skb) {
970                         /* Start of the frame */
971
972                         switch (type) {
973                         case HCI_EVENT_PKT:
974                                 if (count >= HCI_EVENT_HDR_SIZE) {
975                                         struct hci_event_hdr *h = data;
976                                         len = HCI_EVENT_HDR_SIZE + h->plen;
977                                 } else
978                                         return -EILSEQ;
979                                 break;
980
981                         case HCI_ACLDATA_PKT:
982                                 if (count >= HCI_ACL_HDR_SIZE) {
983                                         struct hci_acl_hdr *h = data;
984                                         len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
985                                 } else
986                                         return -EILSEQ;
987                                 break;
988
989                         case HCI_SCODATA_PKT:
990                                 if (count >= HCI_SCO_HDR_SIZE) {
991                                         struct hci_sco_hdr *h = data;
992                                         len = HCI_SCO_HDR_SIZE + h->dlen;
993                                 } else
994                                         return -EILSEQ;
995                                 break;
996                         }
997
998                         skb = bt_skb_alloc(len, GFP_ATOMIC);
999                         if (!skb) {
1000                                 BT_ERR("%s no memory for packet", hdev->name);
1001                                 return -ENOMEM;
1002                         }
1003
1004                         skb->dev = (void *) hdev;
1005                         bt_cb(skb)->pkt_type = type;
1006
1007                         __reassembly(hdev, type) = skb;
1008
1009                         scb = (void *) skb->cb;
1010                         scb->expect = len;
1011                 } else {
1012                         /* Continuation */
1013
1014                         scb = (void *) skb->cb;
1015                         len = scb->expect;
1016                 }
1017
1018                 len = min(len, count);
1019
1020                 memcpy(skb_put(skb, len), data, len);
1021
1022                 scb->expect -= len;
1023
1024                 if (scb->expect == 0) {
1025                         /* Complete frame */
1026
1027                         __reassembly(hdev, type) = NULL;
1028
1029                         bt_cb(skb)->pkt_type = type;
1030                         hci_recv_frame(skb);
1031                 }
1032
1033                 count -= len; data += len;
1034         }
1035
1036         return 0;
1037 }
1038 EXPORT_SYMBOL(hci_recv_fragment);
1039
1040 /* ---- Interface to upper protocols ---- */
1041
1042 /* Register/Unregister protocols.
1043  * hci_task_lock is used to ensure that no tasks are running. */
1044 int hci_register_proto(struct hci_proto *hp)
1045 {
1046         int err = 0;
1047
1048         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1049
1050         if (hp->id >= HCI_MAX_PROTO)
1051                 return -EINVAL;
1052
1053         write_lock_bh(&hci_task_lock);
1054
1055         if (!hci_proto[hp->id])
1056                 hci_proto[hp->id] = hp;
1057         else
1058                 err = -EEXIST;
1059
1060         write_unlock_bh(&hci_task_lock);
1061
1062         return err;
1063 }
1064 EXPORT_SYMBOL(hci_register_proto);
1065
1066 int hci_unregister_proto(struct hci_proto *hp)
1067 {
1068         int err = 0;
1069
1070         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1071
1072         if (hp->id >= HCI_MAX_PROTO)
1073                 return -EINVAL;
1074
1075         write_lock_bh(&hci_task_lock);
1076
1077         if (hci_proto[hp->id])
1078                 hci_proto[hp->id] = NULL;
1079         else
1080                 err = -ENOENT;
1081
1082         write_unlock_bh(&hci_task_lock);
1083
1084         return err;
1085 }
1086 EXPORT_SYMBOL(hci_unregister_proto);
1087
1088 int hci_register_cb(struct hci_cb *cb)
1089 {
1090         BT_DBG("%p name %s", cb, cb->name);
1091
1092         write_lock_bh(&hci_cb_list_lock);
1093         list_add(&cb->list, &hci_cb_list);
1094         write_unlock_bh(&hci_cb_list_lock);
1095
1096         return 0;
1097 }
1098 EXPORT_SYMBOL(hci_register_cb);
1099
1100 int hci_unregister_cb(struct hci_cb *cb)
1101 {
1102         BT_DBG("%p name %s", cb, cb->name);
1103
1104         write_lock_bh(&hci_cb_list_lock);
1105         list_del(&cb->list);
1106         write_unlock_bh(&hci_cb_list_lock);
1107
1108         return 0;
1109 }
1110 EXPORT_SYMBOL(hci_unregister_cb);
1111
1112 static int hci_send_frame(struct sk_buff *skb)
1113 {
1114         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1115
1116         if (!hdev) {
1117                 kfree_skb(skb);
1118                 return -ENODEV;
1119         }
1120
1121         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1122
1123         if (atomic_read(&hdev->promisc)) {
1124                 /* Time stamp */
1125                 __net_timestamp(skb);
1126
1127                 hci_send_to_sock(hdev, skb);
1128         }
1129
1130         /* Get rid of skb owner, prior to sending to the driver. */
1131         skb_orphan(skb);
1132
1133         return hdev->send(skb);
1134 }
1135
1136 /* Send HCI command */
1137 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1138 {
1139         int len = HCI_COMMAND_HDR_SIZE + plen;
1140         struct hci_command_hdr *hdr;
1141         struct sk_buff *skb;
1142
1143         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1144
1145         skb = bt_skb_alloc(len, GFP_ATOMIC);
1146         if (!skb) {
1147                 BT_ERR("%s no memory for command", hdev->name);
1148                 return -ENOMEM;
1149         }
1150
1151         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1152         hdr->opcode = cpu_to_le16(opcode);
1153         hdr->plen   = plen;
1154
1155         if (plen)
1156                 memcpy(skb_put(skb, plen), param, plen);
1157
1158         BT_DBG("skb len %d", skb->len);
1159
1160         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1161         skb->dev = (void *) hdev;
1162         skb_queue_tail(&hdev->cmd_q, skb);
1163         hci_sched_cmd(hdev);
1164
1165         return 0;
1166 }
1167
1168 /* Get data from the previously sent command */
1169 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1170 {
1171         struct hci_command_hdr *hdr;
1172
1173         if (!hdev->sent_cmd)
1174                 return NULL;
1175
1176         hdr = (void *) hdev->sent_cmd->data;
1177
1178         if (hdr->opcode != cpu_to_le16(opcode))
1179                 return NULL;
1180
1181         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1182
1183         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1184 }
1185
1186 /* Send ACL data */
1187 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1188 {
1189         struct hci_acl_hdr *hdr;
1190         int len = skb->len;
1191
1192         skb_push(skb, HCI_ACL_HDR_SIZE);
1193         skb_reset_transport_header(skb);
1194         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1195         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1196         hdr->dlen   = cpu_to_le16(len);
1197 }
1198
1199 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1200 {
1201         struct hci_dev *hdev = conn->hdev;
1202         struct sk_buff *list;
1203
1204         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1205
1206         skb->dev = (void *) hdev;
1207         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1208         hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1209
1210         if (!(list = skb_shinfo(skb)->frag_list)) {
1211                 /* Non fragmented */
1212                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1213
1214                 skb_queue_tail(&conn->data_q, skb);
1215         } else {
1216                 /* Fragmented */
1217                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1218
1219                 skb_shinfo(skb)->frag_list = NULL;
1220
1221                 /* Queue all fragments atomically */
1222                 spin_lock_bh(&conn->data_q.lock);
1223
1224                 __skb_queue_tail(&conn->data_q, skb);
1225                 do {
1226                         skb = list; list = list->next;
1227
1228                         skb->dev = (void *) hdev;
1229                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1230                         hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1231
1232                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1233
1234                         __skb_queue_tail(&conn->data_q, skb);
1235                 } while (list);
1236
1237                 spin_unlock_bh(&conn->data_q.lock);
1238         }
1239
1240         hci_sched_tx(hdev);
1241         return 0;
1242 }
1243 EXPORT_SYMBOL(hci_send_acl);
1244
1245 /* Send SCO data */
1246 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1247 {
1248         struct hci_dev *hdev = conn->hdev;
1249         struct hci_sco_hdr hdr;
1250
1251         BT_DBG("%s len %d", hdev->name, skb->len);
1252
1253         if (skb->len > hdev->sco_mtu) {
1254                 kfree_skb(skb);
1255                 return -EINVAL;
1256         }
1257
1258         hdr.handle = cpu_to_le16(conn->handle);
1259         hdr.dlen   = skb->len;
1260
1261         skb_push(skb, HCI_SCO_HDR_SIZE);
1262         skb_reset_transport_header(skb);
1263         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1264
1265         skb->dev = (void *) hdev;
1266         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1267         skb_queue_tail(&conn->data_q, skb);
1268         hci_sched_tx(hdev);
1269         return 0;
1270 }
1271 EXPORT_SYMBOL(hci_send_sco);
1272
1273 /* ---- HCI TX task (outgoing data) ---- */
1274
1275 /* HCI Connection scheduler */
1276 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1277 {
1278         struct hci_conn_hash *h = &hdev->conn_hash;
1279         struct hci_conn *conn = NULL;
1280         int num = 0, min = ~0;
1281         struct list_head *p;
1282
1283         /* We don't have to lock device here. Connections are always
1284          * added and removed with TX task disabled. */
1285         list_for_each(p, &h->list) {
1286                 struct hci_conn *c;
1287                 c = list_entry(p, struct hci_conn, list);
1288
1289                 if (c->type != type || skb_queue_empty(&c->data_q))
1290                         continue;
1291
1292                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1293                         continue;
1294
1295                 num++;
1296
1297                 if (c->sent < min) {
1298                         min  = c->sent;
1299                         conn = c;
1300                 }
1301         }
1302
1303         if (conn) {
1304                 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1305                 int q = cnt / num;
1306                 *quote = q ? q : 1;
1307         } else
1308                 *quote = 0;
1309
1310         BT_DBG("conn %p quote %d", conn, *quote);
1311         return conn;
1312 }
1313
1314 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1315 {
1316         struct hci_conn_hash *h = &hdev->conn_hash;
1317         struct list_head *p;
1318         struct hci_conn  *c;
1319
1320         BT_ERR("%s ACL tx timeout", hdev->name);
1321
1322         /* Kill stalled connections */
1323         list_for_each(p, &h->list) {
1324                 c = list_entry(p, struct hci_conn, list);
1325                 if (c->type == ACL_LINK && c->sent) {
1326                         BT_ERR("%s killing stalled ACL connection %s",
1327                                 hdev->name, batostr(&c->dst));
1328                         hci_acl_disconn(c, 0x13);
1329                 }
1330         }
1331 }
1332
1333 static inline void hci_sched_acl(struct hci_dev *hdev)
1334 {
1335         struct hci_conn *conn;
1336         struct sk_buff *skb;
1337         int quote;
1338
1339         BT_DBG("%s", hdev->name);
1340
1341         if (!test_bit(HCI_RAW, &hdev->flags)) {
1342                 /* ACL tx timeout must be longer than maximum
1343                  * link supervision timeout (40.9 seconds) */
1344                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1345                         hci_acl_tx_to(hdev);
1346         }
1347
1348         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1349                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1350                         BT_DBG("skb %p len %d", skb, skb->len);
1351
1352                         hci_conn_enter_active_mode(conn);
1353
1354                         hci_send_frame(skb);
1355                         hdev->acl_last_tx = jiffies;
1356
1357                         hdev->acl_cnt--;
1358                         conn->sent++;
1359                 }
1360         }
1361 }
1362
1363 /* Schedule SCO */
1364 static inline void hci_sched_sco(struct hci_dev *hdev)
1365 {
1366         struct hci_conn *conn;
1367         struct sk_buff *skb;
1368         int quote;
1369
1370         BT_DBG("%s", hdev->name);
1371
1372         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1373                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1374                         BT_DBG("skb %p len %d", skb, skb->len);
1375                         hci_send_frame(skb);
1376
1377                         conn->sent++;
1378                         if (conn->sent == ~0)
1379                                 conn->sent = 0;
1380                 }
1381         }
1382 }
1383
1384 static inline void hci_sched_esco(struct hci_dev *hdev)
1385 {
1386         struct hci_conn *conn;
1387         struct sk_buff *skb;
1388         int quote;
1389
1390         BT_DBG("%s", hdev->name);
1391
1392         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1393                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1394                         BT_DBG("skb %p len %d", skb, skb->len);
1395                         hci_send_frame(skb);
1396
1397                         conn->sent++;
1398                         if (conn->sent == ~0)
1399                                 conn->sent = 0;
1400                 }
1401         }
1402 }
1403
1404 static void hci_tx_task(unsigned long arg)
1405 {
1406         struct hci_dev *hdev = (struct hci_dev *) arg;
1407         struct sk_buff *skb;
1408
1409         read_lock(&hci_task_lock);
1410
1411         BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1412
1413         /* Schedule queues and send stuff to HCI driver */
1414
1415         hci_sched_acl(hdev);
1416
1417         hci_sched_sco(hdev);
1418
1419         hci_sched_esco(hdev);
1420
1421         /* Send next queued raw (unknown type) packet */
1422         while ((skb = skb_dequeue(&hdev->raw_q)))
1423                 hci_send_frame(skb);
1424
1425         read_unlock(&hci_task_lock);
1426 }
1427
1428 /* ----- HCI RX task (incoming data proccessing) ----- */
1429
1430 /* ACL data packet */
1431 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1432 {
1433         struct hci_acl_hdr *hdr = (void *) skb->data;
1434         struct hci_conn *conn;
1435         __u16 handle, flags;
1436
1437         skb_pull(skb, HCI_ACL_HDR_SIZE);
1438
1439         handle = __le16_to_cpu(hdr->handle);
1440         flags  = hci_flags(handle);
1441         handle = hci_handle(handle);
1442
1443         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1444
1445         hdev->stat.acl_rx++;
1446
1447         hci_dev_lock(hdev);
1448         conn = hci_conn_hash_lookup_handle(hdev, handle);
1449         hci_dev_unlock(hdev);
1450
1451         if (conn) {
1452                 register struct hci_proto *hp;
1453
1454                 hci_conn_enter_active_mode(conn);
1455
1456                 /* Send to upper protocol */
1457                 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1458                         hp->recv_acldata(conn, skb, flags);
1459                         return;
1460                 }
1461         } else {
1462                 BT_ERR("%s ACL packet for unknown connection handle %d",
1463                         hdev->name, handle);
1464         }
1465
1466         kfree_skb(skb);
1467 }
1468
1469 /* SCO data packet */
1470 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1471 {
1472         struct hci_sco_hdr *hdr = (void *) skb->data;
1473         struct hci_conn *conn;
1474         __u16 handle;
1475
1476         skb_pull(skb, HCI_SCO_HDR_SIZE);
1477
1478         handle = __le16_to_cpu(hdr->handle);
1479
1480         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1481
1482         hdev->stat.sco_rx++;
1483
1484         hci_dev_lock(hdev);
1485         conn = hci_conn_hash_lookup_handle(hdev, handle);
1486         hci_dev_unlock(hdev);
1487
1488         if (conn) {
1489                 register struct hci_proto *hp;
1490
1491                 /* Send to upper protocol */
1492                 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1493                         hp->recv_scodata(conn, skb);
1494                         return;
1495                 }
1496         } else {
1497                 BT_ERR("%s SCO packet for unknown connection handle %d",
1498                         hdev->name, handle);
1499         }
1500
1501         kfree_skb(skb);
1502 }
1503
1504 static void hci_rx_task(unsigned long arg)
1505 {
1506         struct hci_dev *hdev = (struct hci_dev *) arg;
1507         struct sk_buff *skb;
1508
1509         BT_DBG("%s", hdev->name);
1510
1511         read_lock(&hci_task_lock);
1512
1513         while ((skb = skb_dequeue(&hdev->rx_q))) {
1514                 if (atomic_read(&hdev->promisc)) {
1515                         /* Send copy to the sockets */
1516                         hci_send_to_sock(hdev, skb);
1517                 }
1518
1519                 if (test_bit(HCI_RAW, &hdev->flags)) {
1520                         kfree_skb(skb);
1521                         continue;
1522                 }
1523
1524                 if (test_bit(HCI_INIT, &hdev->flags)) {
1525                         /* Don't process data packets in this states. */
1526                         switch (bt_cb(skb)->pkt_type) {
1527                         case HCI_ACLDATA_PKT:
1528                         case HCI_SCODATA_PKT:
1529                                 kfree_skb(skb);
1530                                 continue;
1531                         }
1532                 }
1533
1534                 /* Process frame */
1535                 switch (bt_cb(skb)->pkt_type) {
1536                 case HCI_EVENT_PKT:
1537                         hci_event_packet(hdev, skb);
1538                         break;
1539
1540                 case HCI_ACLDATA_PKT:
1541                         BT_DBG("%s ACL data packet", hdev->name);
1542                         hci_acldata_packet(hdev, skb);
1543                         break;
1544
1545                 case HCI_SCODATA_PKT:
1546                         BT_DBG("%s SCO data packet", hdev->name);
1547                         hci_scodata_packet(hdev, skb);
1548                         break;
1549
1550                 default:
1551                         kfree_skb(skb);
1552                         break;
1553                 }
1554         }
1555
1556         read_unlock(&hci_task_lock);
1557 }
1558
1559 static void hci_cmd_task(unsigned long arg)
1560 {
1561         struct hci_dev *hdev = (struct hci_dev *) arg;
1562         struct sk_buff *skb;
1563
1564         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1565
1566         if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1567                 BT_ERR("%s command tx timeout", hdev->name);
1568                 atomic_set(&hdev->cmd_cnt, 1);
1569         }
1570
1571         /* Send queued commands */
1572         if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1573                 if (hdev->sent_cmd)
1574                         kfree_skb(hdev->sent_cmd);
1575
1576                 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1577                         atomic_dec(&hdev->cmd_cnt);
1578                         hci_send_frame(skb);
1579                         hdev->cmd_last_tx = jiffies;
1580                 } else {
1581                         skb_queue_head(&hdev->cmd_q, skb);
1582                         hci_sched_cmd(hdev);
1583                 }
1584         }
1585 }