Merge ../linux-2.6
[linux-2.6] / net / bluetooth / hci_core.c
1 /* 
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/config.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/interrupt.h>
41 #include <linux/notifier.h>
42 #include <net/sock.h>
43
44 #include <asm/system.h>
45 #include <asm/uaccess.h>
46 #include <asm/unaligned.h>
47
48 #include <net/bluetooth/bluetooth.h>
49 #include <net/bluetooth/hci_core.h>
50
51 #ifndef CONFIG_BT_HCI_CORE_DEBUG
52 #undef  BT_DBG
53 #define BT_DBG(D...)
54 #endif
55
56 static void hci_cmd_task(unsigned long arg);
57 static void hci_rx_task(unsigned long arg);
58 static void hci_tx_task(unsigned long arg);
59 static void hci_notify(struct hci_dev *hdev, int event);
60
61 static DEFINE_RWLOCK(hci_task_lock);
62
63 /* HCI device list */
64 LIST_HEAD(hci_dev_list);
65 DEFINE_RWLOCK(hci_dev_list_lock);
66
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list);
69 DEFINE_RWLOCK(hci_cb_list_lock);
70
71 /* HCI protocols */
72 #define HCI_MAX_PROTO   2
73 struct hci_proto *hci_proto[HCI_MAX_PROTO];
74
75 /* HCI notifiers list */
76 static struct notifier_block *hci_notifier;
77
78 /* ---- HCI notifications ---- */
79
80 int hci_register_notifier(struct notifier_block *nb)
81 {
82         return notifier_chain_register(&hci_notifier, nb);
83 }
84
85 int hci_unregister_notifier(struct notifier_block *nb)
86 {
87         return notifier_chain_unregister(&hci_notifier, nb);
88 }
89
90 static void hci_notify(struct hci_dev *hdev, int event)
91 {
92         notifier_call_chain(&hci_notifier, event, hdev);
93 }
94
95 /* ---- HCI requests ---- */
96
97 void hci_req_complete(struct hci_dev *hdev, int result)
98 {
99         BT_DBG("%s result 0x%2.2x", hdev->name, result);
100
101         if (hdev->req_status == HCI_REQ_PEND) {
102                 hdev->req_result = result;
103                 hdev->req_status = HCI_REQ_DONE;
104                 wake_up_interruptible(&hdev->req_wait_q);
105         }
106 }
107
108 static void hci_req_cancel(struct hci_dev *hdev, int err)
109 {
110         BT_DBG("%s err 0x%2.2x", hdev->name, err);
111
112         if (hdev->req_status == HCI_REQ_PEND) {
113                 hdev->req_result = err;
114                 hdev->req_status = HCI_REQ_CANCELED;
115                 wake_up_interruptible(&hdev->req_wait_q);
116         }
117 }
118
119 /* Execute request and wait for completion. */
120 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 
121                                 unsigned long opt, __u32 timeout)
122 {
123         DECLARE_WAITQUEUE(wait, current);
124         int err = 0;
125
126         BT_DBG("%s start", hdev->name);
127
128         hdev->req_status = HCI_REQ_PEND;
129
130         add_wait_queue(&hdev->req_wait_q, &wait);
131         set_current_state(TASK_INTERRUPTIBLE);
132
133         req(hdev, opt);
134         schedule_timeout(timeout);
135
136         remove_wait_queue(&hdev->req_wait_q, &wait);
137
138         if (signal_pending(current))
139                 return -EINTR;
140
141         switch (hdev->req_status) {
142         case HCI_REQ_DONE:
143                 err = -bt_err(hdev->req_result);
144                 break;
145
146         case HCI_REQ_CANCELED:
147                 err = -hdev->req_result;
148                 break;
149
150         default:
151                 err = -ETIMEDOUT;
152                 break;
153         };
154
155         hdev->req_status = hdev->req_result = 0;
156
157         BT_DBG("%s end: err %d", hdev->name, err);
158
159         return err;
160 }
161
162 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
163                                 unsigned long opt, __u32 timeout)
164 {
165         int ret;
166
167         /* Serialize all requests */
168         hci_req_lock(hdev);
169         ret = __hci_request(hdev, req, opt, timeout);
170         hci_req_unlock(hdev);
171
172         return ret;
173 }
174
175 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
176 {
177         BT_DBG("%s %ld", hdev->name, opt);
178
179         /* Reset device */
180         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
181 }
182
183 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
184 {
185         struct sk_buff *skb;
186         __le16 param;
187
188         BT_DBG("%s %ld", hdev->name, opt);
189
190         /* Driver initialization */
191
192         /* Special commands */
193         while ((skb = skb_dequeue(&hdev->driver_init))) {
194                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
195                 skb->dev = (void *) hdev;
196                 skb_queue_tail(&hdev->cmd_q, skb);
197                 hci_sched_cmd(hdev);
198         }
199         skb_queue_purge(&hdev->driver_init);
200
201         /* Mandatory initialization */
202
203         /* Reset */
204         if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
205                         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
206
207         /* Read Local Supported Features */
208         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL);
209
210         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
211         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL);
212
213 #if 0
214         /* Host buffer size */
215         {
216                 struct hci_cp_host_buffer_size cp;
217                 cp.acl_mtu = __cpu_to_le16(HCI_MAX_ACL_SIZE);
218                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
219                 cp.acl_max_pkt = __cpu_to_le16(0xffff);
220                 cp.sco_max_pkt = __cpu_to_le16(0xffff);
221                 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE, sizeof(cp), &cp);
222         }
223 #endif
224
225         /* Read BD Address */
226         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL);
227
228         /* Read Voice Setting */
229         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_READ_VOICE_SETTING, 0, NULL);
230
231         /* Optional initialization */
232
233         /* Clear Event Filters */
234         {
235                 struct hci_cp_set_event_flt cp;
236                 cp.flt_type  = HCI_FLT_CLEAR_ALL;
237                 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, sizeof(cp), &cp);
238         }
239
240         /* Page timeout ~20 secs */
241         param = __cpu_to_le16(0x8000);
242         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, &param);
243
244         /* Connection accept timeout ~20 secs */
245         param = __cpu_to_le16(0x7d00);
246         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, &param);
247 }
248
249 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
250 {
251         __u8 scan = opt;
252
253         BT_DBG("%s %x", hdev->name, scan);
254
255         /* Inquiry and Page scans */
256         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan);
257 }
258
259 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
260 {
261         __u8 auth = opt;
262
263         BT_DBG("%s %x", hdev->name, auth);
264
265         /* Authentication */
266         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth);
267 }
268
269 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
270 {
271         __u8 encrypt = opt;
272
273         BT_DBG("%s %x", hdev->name, encrypt);
274
275         /* Authentication */
276         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt);
277 }
278
279 /* Get HCI device by index. 
280  * Device is held on return. */
281 struct hci_dev *hci_dev_get(int index)
282 {
283         struct hci_dev *hdev = NULL;
284         struct list_head *p;
285
286         BT_DBG("%d", index);
287
288         if (index < 0)
289                 return NULL;
290
291         read_lock(&hci_dev_list_lock);
292         list_for_each(p, &hci_dev_list) {
293                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
294                 if (d->id == index) {
295                         hdev = hci_dev_hold(d);
296                         break;
297                 }
298         }
299         read_unlock(&hci_dev_list_lock);
300         return hdev;
301 }
302
303 /* ---- Inquiry support ---- */
304 static void inquiry_cache_flush(struct hci_dev *hdev)
305 {
306         struct inquiry_cache *cache = &hdev->inq_cache;
307         struct inquiry_entry *next  = cache->list, *e;
308
309         BT_DBG("cache %p", cache);
310
311         cache->list = NULL;
312         while ((e = next)) {
313                 next = e->next;
314                 kfree(e);
315         }
316 }
317
318 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
319 {
320         struct inquiry_cache *cache = &hdev->inq_cache;
321         struct inquiry_entry *e;
322
323         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
324
325         for (e = cache->list; e; e = e->next)
326                 if (!bacmp(&e->data.bdaddr, bdaddr))
327                         break;
328         return e;
329 }
330
331 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
332 {
333         struct inquiry_cache *cache = &hdev->inq_cache;
334         struct inquiry_entry *e;
335
336         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
337
338         if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
339                 /* Entry not in the cache. Add new one. */
340                 if (!(e = kmalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
341                         return;
342                 memset(e, 0, sizeof(struct inquiry_entry));
343                 e->next     = cache->list;
344                 cache->list = e;
345         }
346
347         memcpy(&e->data, data, sizeof(*data));
348         e->timestamp = jiffies;
349         cache->timestamp = jiffies;
350 }
351
352 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
353 {
354         struct inquiry_cache *cache = &hdev->inq_cache;
355         struct inquiry_info *info = (struct inquiry_info *) buf;
356         struct inquiry_entry *e;
357         int copied = 0;
358
359         for (e = cache->list; e && copied < num; e = e->next, copied++) {
360                 struct inquiry_data *data = &e->data;
361                 bacpy(&info->bdaddr, &data->bdaddr);
362                 info->pscan_rep_mode    = data->pscan_rep_mode;
363                 info->pscan_period_mode = data->pscan_period_mode;
364                 info->pscan_mode        = data->pscan_mode;
365                 memcpy(info->dev_class, data->dev_class, 3);
366                 info->clock_offset      = data->clock_offset;
367                 info++;
368         }
369
370         BT_DBG("cache %p, copied %d", cache, copied);
371         return copied;
372 }
373
374 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
375 {
376         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
377         struct hci_cp_inquiry cp;
378
379         BT_DBG("%s", hdev->name);
380
381         if (test_bit(HCI_INQUIRY, &hdev->flags))
382                 return;
383
384         /* Start Inquiry */
385         memcpy(&cp.lap, &ir->lap, 3);
386         cp.length  = ir->length;
387         cp.num_rsp = ir->num_rsp;
388         hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, sizeof(cp), &cp);
389 }
390
391 int hci_inquiry(void __user *arg)
392 {
393         __u8 __user *ptr = arg;
394         struct hci_inquiry_req ir;
395         struct hci_dev *hdev;
396         int err = 0, do_inquiry = 0, max_rsp;
397         long timeo;
398         __u8 *buf;
399
400         if (copy_from_user(&ir, ptr, sizeof(ir)))
401                 return -EFAULT;
402
403         if (!(hdev = hci_dev_get(ir.dev_id)))
404                 return -ENODEV;
405
406         hci_dev_lock_bh(hdev);
407         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 
408                                         inquiry_cache_empty(hdev) ||
409                                         ir.flags & IREQ_CACHE_FLUSH) {
410                 inquiry_cache_flush(hdev);
411                 do_inquiry = 1;
412         }
413         hci_dev_unlock_bh(hdev);
414
415         timeo = ir.length * 2 * HZ;
416         if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
417                 goto done;
418
419         /* for unlimited number of responses we will use buffer with 255 entries */
420         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
421
422         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
423          * copy it to the user space.
424          */
425         if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
426                 err = -ENOMEM;
427                 goto done;
428         }
429
430         hci_dev_lock_bh(hdev);
431         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
432         hci_dev_unlock_bh(hdev);
433
434         BT_DBG("num_rsp %d", ir.num_rsp);
435
436         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
437                 ptr += sizeof(ir);
438                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
439                                         ir.num_rsp))
440                         err = -EFAULT;
441         } else 
442                 err = -EFAULT;
443
444         kfree(buf);
445
446 done:
447         hci_dev_put(hdev);
448         return err;
449 }
450
451 /* ---- HCI ioctl helpers ---- */
452
453 int hci_dev_open(__u16 dev)
454 {
455         struct hci_dev *hdev;
456         int ret = 0;
457
458         if (!(hdev = hci_dev_get(dev)))
459                 return -ENODEV;
460
461         BT_DBG("%s %p", hdev->name, hdev);
462
463         hci_req_lock(hdev);
464
465         if (test_bit(HCI_UP, &hdev->flags)) {
466                 ret = -EALREADY;
467                 goto done;
468         }
469
470         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
471                 set_bit(HCI_RAW, &hdev->flags);
472
473         if (hdev->open(hdev)) {
474                 ret = -EIO;
475                 goto done;
476         }
477
478         if (!test_bit(HCI_RAW, &hdev->flags)) {
479                 atomic_set(&hdev->cmd_cnt, 1);
480                 set_bit(HCI_INIT, &hdev->flags);
481
482                 //__hci_request(hdev, hci_reset_req, 0, HZ);
483                 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
484
485                 clear_bit(HCI_INIT, &hdev->flags);
486         }
487
488         if (!ret) {
489                 hci_dev_hold(hdev);
490                 set_bit(HCI_UP, &hdev->flags);
491                 hci_notify(hdev, HCI_DEV_UP);
492         } else {        
493                 /* Init failed, cleanup */
494                 tasklet_kill(&hdev->rx_task);
495                 tasklet_kill(&hdev->tx_task);
496                 tasklet_kill(&hdev->cmd_task);
497
498                 skb_queue_purge(&hdev->cmd_q);
499                 skb_queue_purge(&hdev->rx_q);
500
501                 if (hdev->flush)
502                         hdev->flush(hdev);
503
504                 if (hdev->sent_cmd) {
505                         kfree_skb(hdev->sent_cmd);
506                         hdev->sent_cmd = NULL;
507                 }
508
509                 hdev->close(hdev);
510                 hdev->flags = 0;
511         }
512
513 done:
514         hci_req_unlock(hdev);
515         hci_dev_put(hdev);
516         return ret;
517 }
518
519 static int hci_dev_do_close(struct hci_dev *hdev)
520 {
521         BT_DBG("%s %p", hdev->name, hdev);
522
523         hci_req_cancel(hdev, ENODEV);
524         hci_req_lock(hdev);
525
526         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
527                 hci_req_unlock(hdev);
528                 return 0;
529         }
530
531         /* Kill RX and TX tasks */
532         tasklet_kill(&hdev->rx_task);
533         tasklet_kill(&hdev->tx_task);
534
535         hci_dev_lock_bh(hdev);
536         inquiry_cache_flush(hdev);
537         hci_conn_hash_flush(hdev);
538         hci_dev_unlock_bh(hdev);
539
540         hci_notify(hdev, HCI_DEV_DOWN);
541
542         if (hdev->flush)
543                 hdev->flush(hdev);
544
545         /* Reset device */
546         skb_queue_purge(&hdev->cmd_q);
547         atomic_set(&hdev->cmd_cnt, 1);
548         if (!test_bit(HCI_RAW, &hdev->flags)) {
549                 set_bit(HCI_INIT, &hdev->flags);
550                 __hci_request(hdev, hci_reset_req, 0, HZ/4);
551                 clear_bit(HCI_INIT, &hdev->flags);
552         }
553
554         /* Kill cmd task */
555         tasklet_kill(&hdev->cmd_task);
556
557         /* Drop queues */
558         skb_queue_purge(&hdev->rx_q);
559         skb_queue_purge(&hdev->cmd_q);
560         skb_queue_purge(&hdev->raw_q);
561
562         /* Drop last sent command */
563         if (hdev->sent_cmd) {
564                 kfree_skb(hdev->sent_cmd);
565                 hdev->sent_cmd = NULL;
566         }
567
568         /* After this point our queues are empty
569          * and no tasks are scheduled. */
570         hdev->close(hdev);
571
572         /* Clear flags */
573         hdev->flags = 0;
574
575         hci_req_unlock(hdev);
576
577         hci_dev_put(hdev);
578         return 0;
579 }
580
581 int hci_dev_close(__u16 dev)
582 {
583         struct hci_dev *hdev;
584         int err;
585
586         if (!(hdev = hci_dev_get(dev)))
587                 return -ENODEV;
588         err = hci_dev_do_close(hdev);
589         hci_dev_put(hdev);
590         return err;
591 }
592
593 int hci_dev_reset(__u16 dev)
594 {
595         struct hci_dev *hdev;
596         int ret = 0;
597
598         if (!(hdev = hci_dev_get(dev)))
599                 return -ENODEV;
600
601         hci_req_lock(hdev);
602         tasklet_disable(&hdev->tx_task);
603
604         if (!test_bit(HCI_UP, &hdev->flags))
605                 goto done;
606
607         /* Drop queues */
608         skb_queue_purge(&hdev->rx_q);
609         skb_queue_purge(&hdev->cmd_q);
610
611         hci_dev_lock_bh(hdev);
612         inquiry_cache_flush(hdev);
613         hci_conn_hash_flush(hdev);
614         hci_dev_unlock_bh(hdev);
615
616         if (hdev->flush)
617                 hdev->flush(hdev);
618
619         atomic_set(&hdev->cmd_cnt, 1); 
620         hdev->acl_cnt = 0; hdev->sco_cnt = 0;
621
622         if (!test_bit(HCI_RAW, &hdev->flags))
623                 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
624
625 done:
626         tasklet_enable(&hdev->tx_task);
627         hci_req_unlock(hdev);
628         hci_dev_put(hdev);
629         return ret;
630 }
631
632 int hci_dev_reset_stat(__u16 dev)
633 {
634         struct hci_dev *hdev;
635         int ret = 0;
636
637         if (!(hdev = hci_dev_get(dev)))
638                 return -ENODEV;
639
640         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
641
642         hci_dev_put(hdev);
643
644         return ret;
645 }
646
647 int hci_dev_cmd(unsigned int cmd, void __user *arg)
648 {
649         struct hci_dev *hdev;
650         struct hci_dev_req dr;
651         int err = 0;
652
653         if (copy_from_user(&dr, arg, sizeof(dr)))
654                 return -EFAULT;
655
656         if (!(hdev = hci_dev_get(dr.dev_id)))
657                 return -ENODEV;
658
659         switch (cmd) {
660         case HCISETAUTH:
661                 err = hci_request(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT);
662                 break;
663
664         case HCISETENCRYPT:
665                 if (!lmp_encrypt_capable(hdev)) {
666                         err = -EOPNOTSUPP;
667                         break;
668                 }
669
670                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
671                         /* Auth must be enabled first */
672                         err = hci_request(hdev, hci_auth_req,
673                                         dr.dev_opt, HCI_INIT_TIMEOUT);
674                         if (err)
675                                 break;
676                 }
677
678                 err = hci_request(hdev, hci_encrypt_req,
679                                         dr.dev_opt, HCI_INIT_TIMEOUT);
680                 break;
681
682         case HCISETSCAN:
683                 err = hci_request(hdev, hci_scan_req, dr.dev_opt, HCI_INIT_TIMEOUT);
684                 break;
685
686         case HCISETPTYPE:
687                 hdev->pkt_type = (__u16) dr.dev_opt;
688                 break;
689
690         case HCISETLINKPOL:
691                 hdev->link_policy = (__u16) dr.dev_opt;
692                 break;
693
694         case HCISETLINKMODE:
695                 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
696                 break;
697
698         case HCISETACLMTU:
699                 hdev->acl_mtu  = *((__u16 *)&dr.dev_opt + 1);
700                 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
701                 break;
702
703         case HCISETSCOMTU:
704                 hdev->sco_mtu  = *((__u16 *)&dr.dev_opt + 1);
705                 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
706                 break;
707
708         default:
709                 err = -EINVAL;
710                 break;
711         }
712         hci_dev_put(hdev);
713         return err;
714 }
715
716 int hci_get_dev_list(void __user *arg)
717 {
718         struct hci_dev_list_req *dl;
719         struct hci_dev_req *dr;
720         struct list_head *p;
721         int n = 0, size, err;
722         __u16 dev_num;
723
724         if (get_user(dev_num, (__u16 __user *) arg))
725                 return -EFAULT;
726
727         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
728                 return -EINVAL;
729
730         size = sizeof(*dl) + dev_num * sizeof(*dr);
731
732         if (!(dl = kmalloc(size, GFP_KERNEL)))
733                 return -ENOMEM;
734
735         dr = dl->dev_req;
736
737         read_lock_bh(&hci_dev_list_lock);
738         list_for_each(p, &hci_dev_list) {
739                 struct hci_dev *hdev;
740                 hdev = list_entry(p, struct hci_dev, list);
741                 (dr + n)->dev_id  = hdev->id;
742                 (dr + n)->dev_opt = hdev->flags;
743                 if (++n >= dev_num)
744                         break;
745         }
746         read_unlock_bh(&hci_dev_list_lock);
747
748         dl->dev_num = n;
749         size = sizeof(*dl) + n * sizeof(*dr);
750
751         err = copy_to_user(arg, dl, size);
752         kfree(dl);
753
754         return err ? -EFAULT : 0;
755 }
756
757 int hci_get_dev_info(void __user *arg)
758 {
759         struct hci_dev *hdev;
760         struct hci_dev_info di;
761         int err = 0;
762
763         if (copy_from_user(&di, arg, sizeof(di)))
764                 return -EFAULT;
765
766         if (!(hdev = hci_dev_get(di.dev_id)))
767                 return -ENODEV;
768
769         strcpy(di.name, hdev->name);
770         di.bdaddr   = hdev->bdaddr;
771         di.type     = hdev->type;
772         di.flags    = hdev->flags;
773         di.pkt_type = hdev->pkt_type;
774         di.acl_mtu  = hdev->acl_mtu;
775         di.acl_pkts = hdev->acl_pkts;
776         di.sco_mtu  = hdev->sco_mtu;
777         di.sco_pkts = hdev->sco_pkts;
778         di.link_policy = hdev->link_policy;
779         di.link_mode   = hdev->link_mode;
780
781         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
782         memcpy(&di.features, &hdev->features, sizeof(di.features));
783
784         if (copy_to_user(arg, &di, sizeof(di)))
785                 err = -EFAULT;
786
787         hci_dev_put(hdev);
788
789         return err;
790 }
791
792 /* ---- Interface to HCI drivers ---- */
793
794 /* Alloc HCI device */
795 struct hci_dev *hci_alloc_dev(void)
796 {
797         struct hci_dev *hdev;
798
799         hdev = kmalloc(sizeof(struct hci_dev), GFP_KERNEL);
800         if (!hdev)
801                 return NULL;
802
803         memset(hdev, 0, sizeof(struct hci_dev));
804
805         skb_queue_head_init(&hdev->driver_init);
806
807         return hdev;
808 }
809 EXPORT_SYMBOL(hci_alloc_dev);
810
811 /* Free HCI device */
812 void hci_free_dev(struct hci_dev *hdev)
813 {
814         skb_queue_purge(&hdev->driver_init);
815
816         /* will free via class release */
817         class_device_put(&hdev->class_dev);
818 }
819 EXPORT_SYMBOL(hci_free_dev);
820
821 /* Register HCI device */
822 int hci_register_dev(struct hci_dev *hdev)
823 {
824         struct list_head *head = &hci_dev_list, *p;
825         int id = 0;
826
827         BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
828
829         if (!hdev->open || !hdev->close || !hdev->destruct)
830                 return -EINVAL;
831
832         write_lock_bh(&hci_dev_list_lock);
833
834         /* Find first available device id */
835         list_for_each(p, &hci_dev_list) {
836                 if (list_entry(p, struct hci_dev, list)->id != id)
837                         break;
838                 head = p; id++;
839         }
840         
841         sprintf(hdev->name, "hci%d", id);
842         hdev->id = id;
843         list_add(&hdev->list, head);
844
845         atomic_set(&hdev->refcnt, 1);
846         spin_lock_init(&hdev->lock);
847
848         hdev->flags = 0;
849         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
850         hdev->link_mode = (HCI_LM_ACCEPT);
851
852         tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
853         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
854         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
855
856         skb_queue_head_init(&hdev->rx_q);
857         skb_queue_head_init(&hdev->cmd_q);
858         skb_queue_head_init(&hdev->raw_q);
859
860         init_waitqueue_head(&hdev->req_wait_q);
861         init_MUTEX(&hdev->req_lock);
862
863         inquiry_cache_init(hdev);
864
865         hci_conn_hash_init(hdev);
866
867         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
868
869         atomic_set(&hdev->promisc, 0);
870
871         write_unlock_bh(&hci_dev_list_lock);
872
873         hci_register_sysfs(hdev);
874
875         hci_notify(hdev, HCI_DEV_REG);
876
877         return id;
878 }
879 EXPORT_SYMBOL(hci_register_dev);
880
881 /* Unregister HCI device */
882 int hci_unregister_dev(struct hci_dev *hdev)
883 {
884         BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
885
886         hci_unregister_sysfs(hdev);
887
888         write_lock_bh(&hci_dev_list_lock);
889         list_del(&hdev->list);
890         write_unlock_bh(&hci_dev_list_lock);
891
892         hci_dev_do_close(hdev);
893
894         hci_notify(hdev, HCI_DEV_UNREG);
895
896         __hci_dev_put(hdev);
897         return 0;
898 }
899 EXPORT_SYMBOL(hci_unregister_dev);
900
901 /* Suspend HCI device */
902 int hci_suspend_dev(struct hci_dev *hdev)
903 {
904         hci_notify(hdev, HCI_DEV_SUSPEND);
905         return 0;
906 }
907 EXPORT_SYMBOL(hci_suspend_dev);
908
909 /* Resume HCI device */
910 int hci_resume_dev(struct hci_dev *hdev)
911 {
912         hci_notify(hdev, HCI_DEV_RESUME);
913         return 0;
914 }
915 EXPORT_SYMBOL(hci_resume_dev);
916
917 /* ---- Interface to upper protocols ---- */
918
919 /* Register/Unregister protocols.
920  * hci_task_lock is used to ensure that no tasks are running. */
921 int hci_register_proto(struct hci_proto *hp)
922 {
923         int err = 0;
924
925         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
926
927         if (hp->id >= HCI_MAX_PROTO)
928                 return -EINVAL;
929
930         write_lock_bh(&hci_task_lock);
931
932         if (!hci_proto[hp->id])
933                 hci_proto[hp->id] = hp;
934         else
935                 err = -EEXIST;
936
937         write_unlock_bh(&hci_task_lock);
938
939         return err;
940 }
941 EXPORT_SYMBOL(hci_register_proto);
942
943 int hci_unregister_proto(struct hci_proto *hp)
944 {
945         int err = 0;
946
947         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
948
949         if (hp->id >= HCI_MAX_PROTO)
950                 return -EINVAL;
951
952         write_lock_bh(&hci_task_lock);
953
954         if (hci_proto[hp->id])
955                 hci_proto[hp->id] = NULL;
956         else
957                 err = -ENOENT;
958
959         write_unlock_bh(&hci_task_lock);
960
961         return err;
962 }
963 EXPORT_SYMBOL(hci_unregister_proto);
964
965 int hci_register_cb(struct hci_cb *cb)
966 {
967         BT_DBG("%p name %s", cb, cb->name);
968
969         write_lock_bh(&hci_cb_list_lock);
970         list_add(&cb->list, &hci_cb_list);
971         write_unlock_bh(&hci_cb_list_lock);
972
973         return 0;
974 }
975 EXPORT_SYMBOL(hci_register_cb);
976
977 int hci_unregister_cb(struct hci_cb *cb)
978 {
979         BT_DBG("%p name %s", cb, cb->name);
980
981         write_lock_bh(&hci_cb_list_lock);
982         list_del(&cb->list);
983         write_unlock_bh(&hci_cb_list_lock);
984
985         return 0;
986 }
987 EXPORT_SYMBOL(hci_unregister_cb);
988
989 static int hci_send_frame(struct sk_buff *skb)
990 {
991         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
992
993         if (!hdev) {
994                 kfree_skb(skb);
995                 return -ENODEV;
996         }
997
998         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
999
1000         if (atomic_read(&hdev->promisc)) {
1001                 /* Time stamp */
1002                 __net_timestamp(skb);
1003
1004                 hci_send_to_sock(hdev, skb);
1005         }
1006
1007         /* Get rid of skb owner, prior to sending to the driver. */
1008         skb_orphan(skb);
1009
1010         return hdev->send(skb);
1011 }
1012
1013 /* Send HCI command */
1014 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param)
1015 {
1016         int len = HCI_COMMAND_HDR_SIZE + plen;
1017         struct hci_command_hdr *hdr;
1018         struct sk_buff *skb;
1019
1020         BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen);
1021
1022         skb = bt_skb_alloc(len, GFP_ATOMIC);
1023         if (!skb) {
1024                 BT_ERR("%s Can't allocate memory for HCI command", hdev->name);
1025                 return -ENOMEM;
1026         }
1027
1028         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1029         hdr->opcode = __cpu_to_le16(hci_opcode_pack(ogf, ocf));
1030         hdr->plen   = plen;
1031
1032         if (plen)
1033                 memcpy(skb_put(skb, plen), param, plen);
1034
1035         BT_DBG("skb len %d", skb->len);
1036
1037         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1038         skb->dev = (void *) hdev;
1039         skb_queue_tail(&hdev->cmd_q, skb);
1040         hci_sched_cmd(hdev);
1041
1042         return 0;
1043 }
1044
1045 /* Get data from the previously sent command */
1046 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
1047 {
1048         struct hci_command_hdr *hdr;
1049
1050         if (!hdev->sent_cmd)
1051                 return NULL;
1052
1053         hdr = (void *) hdev->sent_cmd->data;
1054
1055         if (hdr->opcode != __cpu_to_le16(hci_opcode_pack(ogf, ocf)))
1056                 return NULL;
1057
1058         BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf);
1059
1060         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1061 }
1062
1063 /* Send ACL data */
1064 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1065 {
1066         struct hci_acl_hdr *hdr;
1067         int len = skb->len;
1068
1069         hdr = (struct hci_acl_hdr *) skb_push(skb, HCI_ACL_HDR_SIZE);
1070         hdr->handle = __cpu_to_le16(hci_handle_pack(handle, flags));
1071         hdr->dlen   = __cpu_to_le16(len);
1072
1073         skb->h.raw = (void *) hdr;
1074 }
1075
1076 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1077 {
1078         struct hci_dev *hdev = conn->hdev;
1079         struct sk_buff *list;
1080
1081         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1082
1083         skb->dev = (void *) hdev;
1084         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1085         hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1086
1087         if (!(list = skb_shinfo(skb)->frag_list)) {
1088                 /* Non fragmented */
1089                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1090
1091                 skb_queue_tail(&conn->data_q, skb);
1092         } else {
1093                 /* Fragmented */
1094                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1095
1096                 skb_shinfo(skb)->frag_list = NULL;
1097
1098                 /* Queue all fragments atomically */
1099                 spin_lock_bh(&conn->data_q.lock);
1100
1101                 __skb_queue_tail(&conn->data_q, skb);
1102                 do {
1103                         skb = list; list = list->next;
1104                         
1105                         skb->dev = (void *) hdev;
1106                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1107                         hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1108
1109                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1110
1111                         __skb_queue_tail(&conn->data_q, skb);
1112                 } while (list);
1113
1114                 spin_unlock_bh(&conn->data_q.lock);
1115         }
1116
1117         hci_sched_tx(hdev);
1118         return 0;
1119 }
1120 EXPORT_SYMBOL(hci_send_acl);
1121
1122 /* Send SCO data */
1123 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1124 {
1125         struct hci_dev *hdev = conn->hdev;
1126         struct hci_sco_hdr hdr;
1127
1128         BT_DBG("%s len %d", hdev->name, skb->len);
1129
1130         if (skb->len > hdev->sco_mtu) {
1131                 kfree_skb(skb);
1132                 return -EINVAL;
1133         }
1134
1135         hdr.handle = __cpu_to_le16(conn->handle);
1136         hdr.dlen   = skb->len;
1137
1138         skb->h.raw = skb_push(skb, HCI_SCO_HDR_SIZE);
1139         memcpy(skb->h.raw, &hdr, HCI_SCO_HDR_SIZE);
1140
1141         skb->dev = (void *) hdev;
1142         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1143         skb_queue_tail(&conn->data_q, skb);
1144         hci_sched_tx(hdev);
1145         return 0;
1146 }
1147 EXPORT_SYMBOL(hci_send_sco);
1148
1149 /* ---- HCI TX task (outgoing data) ---- */
1150
1151 /* HCI Connection scheduler */
1152 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1153 {
1154         struct hci_conn_hash *h = &hdev->conn_hash;
1155         struct hci_conn  *conn = NULL;
1156         int num = 0, min = ~0;
1157         struct list_head *p;
1158
1159         /* We don't have to lock device here. Connections are always 
1160          * added and removed with TX task disabled. */
1161         list_for_each(p, &h->list) {
1162                 struct hci_conn *c;
1163                 c = list_entry(p, struct hci_conn, list);
1164
1165                 if (c->type != type || c->state != BT_CONNECTED
1166                                 || skb_queue_empty(&c->data_q))
1167                         continue;
1168                 num++;
1169
1170                 if (c->sent < min) {
1171                         min  = c->sent;
1172                         conn = c;
1173                 }
1174         }
1175
1176         if (conn) {
1177                 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1178                 int q = cnt / num;
1179                 *quote = q ? q : 1;
1180         } else
1181                 *quote = 0;
1182
1183         BT_DBG("conn %p quote %d", conn, *quote);
1184         return conn;
1185 }
1186
1187 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1188 {
1189         struct hci_conn_hash *h = &hdev->conn_hash;
1190         struct list_head *p;
1191         struct hci_conn  *c;
1192
1193         BT_ERR("%s ACL tx timeout", hdev->name);
1194
1195         /* Kill stalled connections */
1196         list_for_each(p, &h->list) {
1197                 c = list_entry(p, struct hci_conn, list);
1198                 if (c->type == ACL_LINK && c->sent) {
1199                         BT_ERR("%s killing stalled ACL connection %s",
1200                                 hdev->name, batostr(&c->dst));
1201                         hci_acl_disconn(c, 0x13);
1202                 }
1203         }
1204 }
1205
1206 static inline void hci_sched_acl(struct hci_dev *hdev)
1207 {
1208         struct hci_conn *conn;
1209         struct sk_buff *skb;
1210         int quote;
1211
1212         BT_DBG("%s", hdev->name);
1213
1214         if (!test_bit(HCI_RAW, &hdev->flags)) {
1215                 /* ACL tx timeout must be longer than maximum
1216                  * link supervision timeout (40.9 seconds) */
1217                 if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
1218                         hci_acl_tx_to(hdev);
1219         }
1220
1221         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1222                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1223                         BT_DBG("skb %p len %d", skb, skb->len);
1224                         hci_send_frame(skb);
1225                         hdev->acl_last_tx = jiffies;
1226
1227                         hdev->acl_cnt--;
1228                         conn->sent++;
1229                 }
1230         }
1231 }
1232
1233 /* Schedule SCO */
1234 static inline void hci_sched_sco(struct hci_dev *hdev)
1235 {
1236         struct hci_conn *conn;
1237         struct sk_buff *skb;
1238         int quote;
1239
1240         BT_DBG("%s", hdev->name);
1241
1242         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1243                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1244                         BT_DBG("skb %p len %d", skb, skb->len);
1245                         hci_send_frame(skb);
1246
1247                         conn->sent++;
1248                         if (conn->sent == ~0)
1249                                 conn->sent = 0;
1250                 }
1251         }
1252 }
1253
1254 static void hci_tx_task(unsigned long arg)
1255 {
1256         struct hci_dev *hdev = (struct hci_dev *) arg;
1257         struct sk_buff *skb;
1258
1259         read_lock(&hci_task_lock);
1260
1261         BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1262
1263         /* Schedule queues and send stuff to HCI driver */
1264
1265         hci_sched_acl(hdev);
1266
1267         hci_sched_sco(hdev);
1268
1269         /* Send next queued raw (unknown type) packet */
1270         while ((skb = skb_dequeue(&hdev->raw_q)))
1271                 hci_send_frame(skb);
1272
1273         read_unlock(&hci_task_lock);
1274 }
1275
1276 /* ----- HCI RX task (incoming data proccessing) ----- */
1277
1278 /* ACL data packet */
1279 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1280 {
1281         struct hci_acl_hdr *hdr = (void *) skb->data;
1282         struct hci_conn *conn;
1283         __u16 handle, flags;
1284
1285         skb_pull(skb, HCI_ACL_HDR_SIZE);
1286
1287         handle = __le16_to_cpu(hdr->handle);
1288         flags  = hci_flags(handle);
1289         handle = hci_handle(handle);
1290
1291         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1292
1293         hdev->stat.acl_rx++;
1294
1295         hci_dev_lock(hdev);
1296         conn = hci_conn_hash_lookup_handle(hdev, handle);
1297         hci_dev_unlock(hdev);
1298         
1299         if (conn) {
1300                 register struct hci_proto *hp;
1301
1302                 /* Send to upper protocol */
1303                 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1304                         hp->recv_acldata(conn, skb, flags);
1305                         return;
1306                 }
1307         } else {
1308                 BT_ERR("%s ACL packet for unknown connection handle %d", 
1309                         hdev->name, handle);
1310         }
1311
1312         kfree_skb(skb);
1313 }
1314
1315 /* SCO data packet */
1316 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1317 {
1318         struct hci_sco_hdr *hdr = (void *) skb->data;
1319         struct hci_conn *conn;
1320         __u16 handle;
1321
1322         skb_pull(skb, HCI_SCO_HDR_SIZE);
1323
1324         handle = __le16_to_cpu(hdr->handle);
1325
1326         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1327
1328         hdev->stat.sco_rx++;
1329
1330         hci_dev_lock(hdev);
1331         conn = hci_conn_hash_lookup_handle(hdev, handle);
1332         hci_dev_unlock(hdev);
1333
1334         if (conn) {
1335                 register struct hci_proto *hp;
1336
1337                 /* Send to upper protocol */
1338                 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1339                         hp->recv_scodata(conn, skb);
1340                         return;
1341                 }
1342         } else {
1343                 BT_ERR("%s SCO packet for unknown connection handle %d", 
1344                         hdev->name, handle);
1345         }
1346
1347         kfree_skb(skb);
1348 }
1349
1350 static void hci_rx_task(unsigned long arg)
1351 {
1352         struct hci_dev *hdev = (struct hci_dev *) arg;
1353         struct sk_buff *skb;
1354
1355         BT_DBG("%s", hdev->name);
1356
1357         read_lock(&hci_task_lock);
1358
1359         while ((skb = skb_dequeue(&hdev->rx_q))) {
1360                 if (atomic_read(&hdev->promisc)) {
1361                         /* Send copy to the sockets */
1362                         hci_send_to_sock(hdev, skb);
1363                 }
1364
1365                 if (test_bit(HCI_RAW, &hdev->flags)) {
1366                         kfree_skb(skb);
1367                         continue;
1368                 }
1369
1370                 if (test_bit(HCI_INIT, &hdev->flags)) {
1371                         /* Don't process data packets in this states. */
1372                         switch (bt_cb(skb)->pkt_type) {
1373                         case HCI_ACLDATA_PKT:
1374                         case HCI_SCODATA_PKT:
1375                                 kfree_skb(skb);
1376                                 continue;
1377                         };
1378                 }
1379
1380                 /* Process frame */
1381                 switch (bt_cb(skb)->pkt_type) {
1382                 case HCI_EVENT_PKT:
1383                         hci_event_packet(hdev, skb);
1384                         break;
1385
1386                 case HCI_ACLDATA_PKT:
1387                         BT_DBG("%s ACL data packet", hdev->name);
1388                         hci_acldata_packet(hdev, skb);
1389                         break;
1390
1391                 case HCI_SCODATA_PKT:
1392                         BT_DBG("%s SCO data packet", hdev->name);
1393                         hci_scodata_packet(hdev, skb);
1394                         break;
1395
1396                 default:
1397                         kfree_skb(skb);
1398                         break;
1399                 }
1400         }
1401
1402         read_unlock(&hci_task_lock);
1403 }
1404
1405 static void hci_cmd_task(unsigned long arg)
1406 {
1407         struct hci_dev *hdev = (struct hci_dev *) arg;
1408         struct sk_buff *skb;
1409
1410         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1411
1412         if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
1413                 BT_ERR("%s command tx timeout", hdev->name);
1414                 atomic_set(&hdev->cmd_cnt, 1);
1415         }
1416
1417         /* Send queued commands */
1418         if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1419                 if (hdev->sent_cmd)
1420                         kfree_skb(hdev->sent_cmd);
1421
1422                 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1423                         atomic_dec(&hdev->cmd_cnt);
1424                         hci_send_frame(skb);
1425                         hdev->cmd_last_tx = jiffies;
1426                 } else {
1427                         skb_queue_head(&hdev->cmd_q, skb);
1428                         hci_sched_cmd(hdev);
1429                 }
1430         }
1431 }