merge by hand - fix up rejections in Documentation/DocBook/Makefile
[linux-2.6] / net / bluetooth / hci_core.c
1 /* 
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/config.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/interrupt.h>
41 #include <linux/notifier.h>
42 #include <net/sock.h>
43
44 #include <asm/system.h>
45 #include <asm/uaccess.h>
46 #include <asm/unaligned.h>
47
48 #include <net/bluetooth/bluetooth.h>
49 #include <net/bluetooth/hci_core.h>
50
51 #ifndef CONFIG_BT_HCI_CORE_DEBUG
52 #undef  BT_DBG
53 #define BT_DBG(D...)
54 #endif
55
56 static void hci_cmd_task(unsigned long arg);
57 static void hci_rx_task(unsigned long arg);
58 static void hci_tx_task(unsigned long arg);
59 static void hci_notify(struct hci_dev *hdev, int event);
60
61 static DEFINE_RWLOCK(hci_task_lock);
62
63 /* HCI device list */
64 LIST_HEAD(hci_dev_list);
65 DEFINE_RWLOCK(hci_dev_list_lock);
66
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list);
69 DEFINE_RWLOCK(hci_cb_list_lock);
70
71 /* HCI protocols */
72 #define HCI_MAX_PROTO   2
73 struct hci_proto *hci_proto[HCI_MAX_PROTO];
74
75 /* HCI notifiers list */
76 static struct notifier_block *hci_notifier;
77
78 /* ---- HCI notifications ---- */
79
80 int hci_register_notifier(struct notifier_block *nb)
81 {
82         return notifier_chain_register(&hci_notifier, nb);
83 }
84
85 int hci_unregister_notifier(struct notifier_block *nb)
86 {
87         return notifier_chain_unregister(&hci_notifier, nb);
88 }
89
90 void hci_notify(struct hci_dev *hdev, int event)
91 {
92         notifier_call_chain(&hci_notifier, event, hdev);
93 }
94
95 /* ---- HCI requests ---- */
96
97 void hci_req_complete(struct hci_dev *hdev, int result)
98 {
99         BT_DBG("%s result 0x%2.2x", hdev->name, result);
100
101         if (hdev->req_status == HCI_REQ_PEND) {
102                 hdev->req_result = result;
103                 hdev->req_status = HCI_REQ_DONE;
104                 wake_up_interruptible(&hdev->req_wait_q);
105         }
106 }
107
108 static void hci_req_cancel(struct hci_dev *hdev, int err)
109 {
110         BT_DBG("%s err 0x%2.2x", hdev->name, err);
111
112         if (hdev->req_status == HCI_REQ_PEND) {
113                 hdev->req_result = err;
114                 hdev->req_status = HCI_REQ_CANCELED;
115                 wake_up_interruptible(&hdev->req_wait_q);
116         }
117 }
118
119 /* Execute request and wait for completion. */
120 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 
121                                 unsigned long opt, __u32 timeout)
122 {
123         DECLARE_WAITQUEUE(wait, current);
124         int err = 0;
125
126         BT_DBG("%s start", hdev->name);
127
128         hdev->req_status = HCI_REQ_PEND;
129
130         add_wait_queue(&hdev->req_wait_q, &wait);
131         set_current_state(TASK_INTERRUPTIBLE);
132
133         req(hdev, opt);
134         schedule_timeout(timeout);
135
136         remove_wait_queue(&hdev->req_wait_q, &wait);
137
138         if (signal_pending(current))
139                 return -EINTR;
140
141         switch (hdev->req_status) {
142         case HCI_REQ_DONE:
143                 err = -bt_err(hdev->req_result);
144                 break;
145
146         case HCI_REQ_CANCELED:
147                 err = -hdev->req_result;
148                 break;
149
150         default:
151                 err = -ETIMEDOUT;
152                 break;
153         };
154
155         hdev->req_status = hdev->req_result = 0;
156
157         BT_DBG("%s end: err %d", hdev->name, err);
158
159         return err;
160 }
161
162 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
163                                 unsigned long opt, __u32 timeout)
164 {
165         int ret;
166
167         /* Serialize all requests */
168         hci_req_lock(hdev);
169         ret = __hci_request(hdev, req, opt, timeout);
170         hci_req_unlock(hdev);
171
172         return ret;
173 }
174
175 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
176 {
177         BT_DBG("%s %ld", hdev->name, opt);
178
179         /* Reset device */
180         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
181 }
182
183 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
184 {
185         struct sk_buff *skb;
186         __u16 param;
187
188         BT_DBG("%s %ld", hdev->name, opt);
189
190         /* Driver initialization */
191
192         /* Special commands */
193         while ((skb = skb_dequeue(&hdev->driver_init))) {
194                 skb->pkt_type = HCI_COMMAND_PKT;
195                 skb->dev = (void *) hdev;
196                 skb_queue_tail(&hdev->cmd_q, skb);
197                 hci_sched_cmd(hdev);
198         }
199         skb_queue_purge(&hdev->driver_init);
200
201         /* Mandatory initialization */
202
203         /* Reset */
204         if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
205                         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
206
207         /* Read Local Supported Features */
208         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL);
209
210         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
211         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL);
212
213 #if 0
214         /* Host buffer size */
215         {
216                 struct hci_cp_host_buffer_size cp;
217                 cp.acl_mtu = __cpu_to_le16(HCI_MAX_ACL_SIZE);
218                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
219                 cp.acl_max_pkt = __cpu_to_le16(0xffff);
220                 cp.sco_max_pkt = __cpu_to_le16(0xffff);
221                 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE, sizeof(cp), &cp);
222         }
223 #endif
224
225         /* Read BD Address */
226         hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL);
227
228         /* Read Voice Setting */
229         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_READ_VOICE_SETTING, 0, NULL);
230
231         /* Optional initialization */
232
233         /* Clear Event Filters */
234         {
235                 struct hci_cp_set_event_flt cp;
236                 cp.flt_type  = HCI_FLT_CLEAR_ALL;
237                 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, sizeof(cp), &cp);
238         }
239
240         /* Page timeout ~20 secs */
241         param = __cpu_to_le16(0x8000);
242         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, &param);
243
244         /* Connection accept timeout ~20 secs */
245         param = __cpu_to_le16(0x7d00);
246         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, &param);
247 }
248
249 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
250 {
251         __u8 scan = opt;
252
253         BT_DBG("%s %x", hdev->name, scan);
254
255         /* Inquiry and Page scans */
256         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan);
257 }
258
259 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
260 {
261         __u8 auth = opt;
262
263         BT_DBG("%s %x", hdev->name, auth);
264
265         /* Authentication */
266         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth);
267 }
268
269 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
270 {
271         __u8 encrypt = opt;
272
273         BT_DBG("%s %x", hdev->name, encrypt);
274
275         /* Authentication */
276         hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt);
277 }
278
279 /* Get HCI device by index. 
280  * Device is held on return. */
281 struct hci_dev *hci_dev_get(int index)
282 {
283         struct hci_dev *hdev = NULL;
284         struct list_head *p;
285
286         BT_DBG("%d", index);
287
288         if (index < 0)
289                 return NULL;
290
291         read_lock(&hci_dev_list_lock);
292         list_for_each(p, &hci_dev_list) {
293                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
294                 if (d->id == index) {
295                         hdev = hci_dev_hold(d);
296                         break;
297                 }
298         }
299         read_unlock(&hci_dev_list_lock);
300         return hdev;
301 }
302 EXPORT_SYMBOL(hci_dev_get);
303
304 /* ---- Inquiry support ---- */
305 static void inquiry_cache_flush(struct hci_dev *hdev)
306 {
307         struct inquiry_cache *cache = &hdev->inq_cache;
308         struct inquiry_entry *next  = cache->list, *e;
309
310         BT_DBG("cache %p", cache);
311
312         cache->list = NULL;
313         while ((e = next)) {
314                 next = e->next;
315                 kfree(e);
316         }
317 }
318
319 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
320 {
321         struct inquiry_cache *cache = &hdev->inq_cache;
322         struct inquiry_entry *e;
323
324         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
325
326         for (e = cache->list; e; e = e->next)
327                 if (!bacmp(&e->data.bdaddr, bdaddr))
328                         break;
329         return e;
330 }
331
332 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
333 {
334         struct inquiry_cache *cache = &hdev->inq_cache;
335         struct inquiry_entry *e;
336
337         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
338
339         if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
340                 /* Entry not in the cache. Add new one. */
341                 if (!(e = kmalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
342                         return;
343                 memset(e, 0, sizeof(struct inquiry_entry));
344                 e->next     = cache->list;
345                 cache->list = e;
346         }
347
348         memcpy(&e->data, data, sizeof(*data));
349         e->timestamp = jiffies;
350         cache->timestamp = jiffies;
351 }
352
353 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
354 {
355         struct inquiry_cache *cache = &hdev->inq_cache;
356         struct inquiry_info *info = (struct inquiry_info *) buf;
357         struct inquiry_entry *e;
358         int copied = 0;
359
360         for (e = cache->list; e && copied < num; e = e->next, copied++) {
361                 struct inquiry_data *data = &e->data;
362                 bacpy(&info->bdaddr, &data->bdaddr);
363                 info->pscan_rep_mode    = data->pscan_rep_mode;
364                 info->pscan_period_mode = data->pscan_period_mode;
365                 info->pscan_mode        = data->pscan_mode;
366                 memcpy(info->dev_class, data->dev_class, 3);
367                 info->clock_offset      = data->clock_offset;
368                 info++;
369         }
370
371         BT_DBG("cache %p, copied %d", cache, copied);
372         return copied;
373 }
374
375 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
376 {
377         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
378         struct hci_cp_inquiry cp;
379
380         BT_DBG("%s", hdev->name);
381
382         if (test_bit(HCI_INQUIRY, &hdev->flags))
383                 return;
384
385         /* Start Inquiry */
386         memcpy(&cp.lap, &ir->lap, 3);
387         cp.length  = ir->length;
388         cp.num_rsp = ir->num_rsp;
389         hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, sizeof(cp), &cp);
390 }
391
392 int hci_inquiry(void __user *arg)
393 {
394         __u8 __user *ptr = arg;
395         struct hci_inquiry_req ir;
396         struct hci_dev *hdev;
397         int err = 0, do_inquiry = 0, max_rsp;
398         long timeo;
399         __u8 *buf;
400
401         if (copy_from_user(&ir, ptr, sizeof(ir)))
402                 return -EFAULT;
403
404         if (!(hdev = hci_dev_get(ir.dev_id)))
405                 return -ENODEV;
406
407         hci_dev_lock_bh(hdev);
408         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 
409                                         inquiry_cache_empty(hdev) ||
410                                         ir.flags & IREQ_CACHE_FLUSH) {
411                 inquiry_cache_flush(hdev);
412                 do_inquiry = 1;
413         }
414         hci_dev_unlock_bh(hdev);
415
416         timeo = ir.length * 2 * HZ;
417         if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
418                 goto done;
419
420         /* for unlimited number of responses we will use buffer with 255 entries */
421         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
422
423         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
424          * copy it to the user space.
425          */
426         if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
427                 err = -ENOMEM;
428                 goto done;
429         }
430
431         hci_dev_lock_bh(hdev);
432         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
433         hci_dev_unlock_bh(hdev);
434
435         BT_DBG("num_rsp %d", ir.num_rsp);
436
437         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
438                 ptr += sizeof(ir);
439                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
440                                         ir.num_rsp))
441                         err = -EFAULT;
442         } else 
443                 err = -EFAULT;
444
445         kfree(buf);
446
447 done:
448         hci_dev_put(hdev);
449         return err;
450 }
451
452 /* ---- HCI ioctl helpers ---- */
453
454 int hci_dev_open(__u16 dev)
455 {
456         struct hci_dev *hdev;
457         int ret = 0;
458
459         if (!(hdev = hci_dev_get(dev)))
460                 return -ENODEV;
461
462         BT_DBG("%s %p", hdev->name, hdev);
463
464         hci_req_lock(hdev);
465
466         if (test_bit(HCI_UP, &hdev->flags)) {
467                 ret = -EALREADY;
468                 goto done;
469         }
470
471         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
472                 set_bit(HCI_RAW, &hdev->flags);
473
474         if (hdev->open(hdev)) {
475                 ret = -EIO;
476                 goto done;
477         }
478
479         if (!test_bit(HCI_RAW, &hdev->flags)) {
480                 atomic_set(&hdev->cmd_cnt, 1);
481                 set_bit(HCI_INIT, &hdev->flags);
482
483                 //__hci_request(hdev, hci_reset_req, 0, HZ);
484                 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
485
486                 clear_bit(HCI_INIT, &hdev->flags);
487         }
488
489         if (!ret) {
490                 hci_dev_hold(hdev);
491                 set_bit(HCI_UP, &hdev->flags);
492                 hci_notify(hdev, HCI_DEV_UP);
493         } else {        
494                 /* Init failed, cleanup */
495                 tasklet_kill(&hdev->rx_task);
496                 tasklet_kill(&hdev->tx_task);
497                 tasklet_kill(&hdev->cmd_task);
498
499                 skb_queue_purge(&hdev->cmd_q);
500                 skb_queue_purge(&hdev->rx_q);
501
502                 if (hdev->flush)
503                         hdev->flush(hdev);
504
505                 if (hdev->sent_cmd) {
506                         kfree_skb(hdev->sent_cmd);
507                         hdev->sent_cmd = NULL;
508                 }
509
510                 hdev->close(hdev);
511                 hdev->flags = 0;
512         }
513
514 done:
515         hci_req_unlock(hdev);
516         hci_dev_put(hdev);
517         return ret;
518 }
519
520 static int hci_dev_do_close(struct hci_dev *hdev)
521 {
522         BT_DBG("%s %p", hdev->name, hdev);
523
524         hci_req_cancel(hdev, ENODEV);
525         hci_req_lock(hdev);
526
527         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
528                 hci_req_unlock(hdev);
529                 return 0;
530         }
531
532         /* Kill RX and TX tasks */
533         tasklet_kill(&hdev->rx_task);
534         tasklet_kill(&hdev->tx_task);
535
536         hci_dev_lock_bh(hdev);
537         inquiry_cache_flush(hdev);
538         hci_conn_hash_flush(hdev);
539         hci_dev_unlock_bh(hdev);
540
541         hci_notify(hdev, HCI_DEV_DOWN);
542
543         if (hdev->flush)
544                 hdev->flush(hdev);
545
546         /* Reset device */
547         skb_queue_purge(&hdev->cmd_q);
548         atomic_set(&hdev->cmd_cnt, 1);
549         if (!test_bit(HCI_RAW, &hdev->flags)) {
550                 set_bit(HCI_INIT, &hdev->flags);
551                 __hci_request(hdev, hci_reset_req, 0, HZ/4);
552                 clear_bit(HCI_INIT, &hdev->flags);
553         }
554
555         /* Kill cmd task */
556         tasklet_kill(&hdev->cmd_task);
557
558         /* Drop queues */
559         skb_queue_purge(&hdev->rx_q);
560         skb_queue_purge(&hdev->cmd_q);
561         skb_queue_purge(&hdev->raw_q);
562
563         /* Drop last sent command */
564         if (hdev->sent_cmd) {
565                 kfree_skb(hdev->sent_cmd);
566                 hdev->sent_cmd = NULL;
567         }
568
569         /* After this point our queues are empty
570          * and no tasks are scheduled. */
571         hdev->close(hdev);
572
573         /* Clear flags */
574         hdev->flags = 0;
575
576         hci_req_unlock(hdev);
577
578         hci_dev_put(hdev);
579         return 0;
580 }
581
582 int hci_dev_close(__u16 dev)
583 {
584         struct hci_dev *hdev;
585         int err;
586
587         if (!(hdev = hci_dev_get(dev)))
588                 return -ENODEV;
589         err = hci_dev_do_close(hdev);
590         hci_dev_put(hdev);
591         return err;
592 }
593
594 int hci_dev_reset(__u16 dev)
595 {
596         struct hci_dev *hdev;
597         int ret = 0;
598
599         if (!(hdev = hci_dev_get(dev)))
600                 return -ENODEV;
601
602         hci_req_lock(hdev);
603         tasklet_disable(&hdev->tx_task);
604
605         if (!test_bit(HCI_UP, &hdev->flags))
606                 goto done;
607
608         /* Drop queues */
609         skb_queue_purge(&hdev->rx_q);
610         skb_queue_purge(&hdev->cmd_q);
611
612         hci_dev_lock_bh(hdev);
613         inquiry_cache_flush(hdev);
614         hci_conn_hash_flush(hdev);
615         hci_dev_unlock_bh(hdev);
616
617         if (hdev->flush)
618                 hdev->flush(hdev);
619
620         atomic_set(&hdev->cmd_cnt, 1); 
621         hdev->acl_cnt = 0; hdev->sco_cnt = 0;
622
623         if (!test_bit(HCI_RAW, &hdev->flags))
624                 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
625
626 done:
627         tasklet_enable(&hdev->tx_task);
628         hci_req_unlock(hdev);
629         hci_dev_put(hdev);
630         return ret;
631 }
632
633 int hci_dev_reset_stat(__u16 dev)
634 {
635         struct hci_dev *hdev;
636         int ret = 0;
637
638         if (!(hdev = hci_dev_get(dev)))
639                 return -ENODEV;
640
641         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
642
643         hci_dev_put(hdev);
644
645         return ret;
646 }
647
648 int hci_dev_cmd(unsigned int cmd, void __user *arg)
649 {
650         struct hci_dev *hdev;
651         struct hci_dev_req dr;
652         int err = 0;
653
654         if (copy_from_user(&dr, arg, sizeof(dr)))
655                 return -EFAULT;
656
657         if (!(hdev = hci_dev_get(dr.dev_id)))
658                 return -ENODEV;
659
660         switch (cmd) {
661         case HCISETAUTH:
662                 err = hci_request(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT);
663                 break;
664
665         case HCISETENCRYPT:
666                 if (!lmp_encrypt_capable(hdev)) {
667                         err = -EOPNOTSUPP;
668                         break;
669                 }
670
671                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
672                         /* Auth must be enabled first */
673                         err = hci_request(hdev, hci_auth_req,
674                                         dr.dev_opt, HCI_INIT_TIMEOUT);
675                         if (err)
676                                 break;
677                 }
678
679                 err = hci_request(hdev, hci_encrypt_req,
680                                         dr.dev_opt, HCI_INIT_TIMEOUT);
681                 break;
682
683         case HCISETSCAN:
684                 err = hci_request(hdev, hci_scan_req, dr.dev_opt, HCI_INIT_TIMEOUT);
685                 break;
686
687         case HCISETPTYPE:
688                 hdev->pkt_type = (__u16) dr.dev_opt;
689                 break;
690
691         case HCISETLINKPOL:
692                 hdev->link_policy = (__u16) dr.dev_opt;
693                 break;
694
695         case HCISETLINKMODE:
696                 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
697                 break;
698
699         case HCISETACLMTU:
700                 hdev->acl_mtu  = *((__u16 *)&dr.dev_opt + 1);
701                 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
702                 break;
703
704         case HCISETSCOMTU:
705                 hdev->sco_mtu  = *((__u16 *)&dr.dev_opt + 1);
706                 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
707                 break;
708
709         default:
710                 err = -EINVAL;
711                 break;
712         }
713         hci_dev_put(hdev);
714         return err;
715 }
716
717 int hci_get_dev_list(void __user *arg)
718 {
719         struct hci_dev_list_req *dl;
720         struct hci_dev_req *dr;
721         struct list_head *p;
722         int n = 0, size, err;
723         __u16 dev_num;
724
725         if (get_user(dev_num, (__u16 __user *) arg))
726                 return -EFAULT;
727
728         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
729                 return -EINVAL;
730
731         size = sizeof(*dl) + dev_num * sizeof(*dr);
732
733         if (!(dl = kmalloc(size, GFP_KERNEL)))
734                 return -ENOMEM;
735
736         dr = dl->dev_req;
737
738         read_lock_bh(&hci_dev_list_lock);
739         list_for_each(p, &hci_dev_list) {
740                 struct hci_dev *hdev;
741                 hdev = list_entry(p, struct hci_dev, list);
742                 (dr + n)->dev_id  = hdev->id;
743                 (dr + n)->dev_opt = hdev->flags;
744                 if (++n >= dev_num)
745                         break;
746         }
747         read_unlock_bh(&hci_dev_list_lock);
748
749         dl->dev_num = n;
750         size = sizeof(*dl) + n * sizeof(*dr);
751
752         err = copy_to_user(arg, dl, size);
753         kfree(dl);
754
755         return err ? -EFAULT : 0;
756 }
757
758 int hci_get_dev_info(void __user *arg)
759 {
760         struct hci_dev *hdev;
761         struct hci_dev_info di;
762         int err = 0;
763
764         if (copy_from_user(&di, arg, sizeof(di)))
765                 return -EFAULT;
766
767         if (!(hdev = hci_dev_get(di.dev_id)))
768                 return -ENODEV;
769
770         strcpy(di.name, hdev->name);
771         di.bdaddr   = hdev->bdaddr;
772         di.type     = hdev->type;
773         di.flags    = hdev->flags;
774         di.pkt_type = hdev->pkt_type;
775         di.acl_mtu  = hdev->acl_mtu;
776         di.acl_pkts = hdev->acl_pkts;
777         di.sco_mtu  = hdev->sco_mtu;
778         di.sco_pkts = hdev->sco_pkts;
779         di.link_policy = hdev->link_policy;
780         di.link_mode   = hdev->link_mode;
781
782         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
783         memcpy(&di.features, &hdev->features, sizeof(di.features));
784
785         if (copy_to_user(arg, &di, sizeof(di)))
786                 err = -EFAULT;
787
788         hci_dev_put(hdev);
789
790         return err;
791 }
792
793 /* ---- Interface to HCI drivers ---- */
794
795 /* Alloc HCI device */
796 struct hci_dev *hci_alloc_dev(void)
797 {
798         struct hci_dev *hdev;
799
800         hdev = kmalloc(sizeof(struct hci_dev), GFP_KERNEL);
801         if (!hdev)
802                 return NULL;
803
804         memset(hdev, 0, sizeof(struct hci_dev));
805
806         skb_queue_head_init(&hdev->driver_init);
807
808         return hdev;
809 }
810 EXPORT_SYMBOL(hci_alloc_dev);
811
812 /* Free HCI device */
813 void hci_free_dev(struct hci_dev *hdev)
814 {
815         skb_queue_purge(&hdev->driver_init);
816
817         /* will free via class release */
818         class_device_put(&hdev->class_dev);
819 }
820 EXPORT_SYMBOL(hci_free_dev);
821
822 /* Register HCI device */
823 int hci_register_dev(struct hci_dev *hdev)
824 {
825         struct list_head *head = &hci_dev_list, *p;
826         int id = 0;
827
828         BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
829
830         if (!hdev->open || !hdev->close || !hdev->destruct)
831                 return -EINVAL;
832
833         write_lock_bh(&hci_dev_list_lock);
834
835         /* Find first available device id */
836         list_for_each(p, &hci_dev_list) {
837                 if (list_entry(p, struct hci_dev, list)->id != id)
838                         break;
839                 head = p; id++;
840         }
841         
842         sprintf(hdev->name, "hci%d", id);
843         hdev->id = id;
844         list_add(&hdev->list, head);
845
846         atomic_set(&hdev->refcnt, 1);
847         spin_lock_init(&hdev->lock);
848
849         hdev->flags = 0;
850         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
851         hdev->link_mode = (HCI_LM_ACCEPT);
852
853         tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
854         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
855         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
856
857         skb_queue_head_init(&hdev->rx_q);
858         skb_queue_head_init(&hdev->cmd_q);
859         skb_queue_head_init(&hdev->raw_q);
860
861         init_waitqueue_head(&hdev->req_wait_q);
862         init_MUTEX(&hdev->req_lock);
863
864         inquiry_cache_init(hdev);
865
866         hci_conn_hash_init(hdev);
867
868         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
869
870         atomic_set(&hdev->promisc, 0);
871
872         write_unlock_bh(&hci_dev_list_lock);
873
874         hci_register_sysfs(hdev);
875
876         hci_notify(hdev, HCI_DEV_REG);
877
878         return id;
879 }
880 EXPORT_SYMBOL(hci_register_dev);
881
882 /* Unregister HCI device */
883 int hci_unregister_dev(struct hci_dev *hdev)
884 {
885         BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
886
887         hci_unregister_sysfs(hdev);
888
889         write_lock_bh(&hci_dev_list_lock);
890         list_del(&hdev->list);
891         write_unlock_bh(&hci_dev_list_lock);
892
893         hci_dev_do_close(hdev);
894
895         hci_notify(hdev, HCI_DEV_UNREG);
896
897         __hci_dev_put(hdev);
898         return 0;
899 }
900 EXPORT_SYMBOL(hci_unregister_dev);
901
902 /* Suspend HCI device */
903 int hci_suspend_dev(struct hci_dev *hdev)
904 {
905         hci_notify(hdev, HCI_DEV_SUSPEND);
906         return 0;
907 }
908 EXPORT_SYMBOL(hci_suspend_dev);
909
910 /* Resume HCI device */
911 int hci_resume_dev(struct hci_dev *hdev)
912 {
913         hci_notify(hdev, HCI_DEV_RESUME);
914         return 0;
915 }
916 EXPORT_SYMBOL(hci_resume_dev);
917
918 /* ---- Interface to upper protocols ---- */
919
920 /* Register/Unregister protocols.
921  * hci_task_lock is used to ensure that no tasks are running. */
922 int hci_register_proto(struct hci_proto *hp)
923 {
924         int err = 0;
925
926         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
927
928         if (hp->id >= HCI_MAX_PROTO)
929                 return -EINVAL;
930
931         write_lock_bh(&hci_task_lock);
932
933         if (!hci_proto[hp->id])
934                 hci_proto[hp->id] = hp;
935         else
936                 err = -EEXIST;
937
938         write_unlock_bh(&hci_task_lock);
939
940         return err;
941 }
942 EXPORT_SYMBOL(hci_register_proto);
943
944 int hci_unregister_proto(struct hci_proto *hp)
945 {
946         int err = 0;
947
948         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
949
950         if (hp->id >= HCI_MAX_PROTO)
951                 return -EINVAL;
952
953         write_lock_bh(&hci_task_lock);
954
955         if (hci_proto[hp->id])
956                 hci_proto[hp->id] = NULL;
957         else
958                 err = -ENOENT;
959
960         write_unlock_bh(&hci_task_lock);
961
962         return err;
963 }
964 EXPORT_SYMBOL(hci_unregister_proto);
965
966 int hci_register_cb(struct hci_cb *cb)
967 {
968         BT_DBG("%p name %s", cb, cb->name);
969
970         write_lock_bh(&hci_cb_list_lock);
971         list_add(&cb->list, &hci_cb_list);
972         write_unlock_bh(&hci_cb_list_lock);
973
974         return 0;
975 }
976 EXPORT_SYMBOL(hci_register_cb);
977
978 int hci_unregister_cb(struct hci_cb *cb)
979 {
980         BT_DBG("%p name %s", cb, cb->name);
981
982         write_lock_bh(&hci_cb_list_lock);
983         list_del(&cb->list);
984         write_unlock_bh(&hci_cb_list_lock);
985
986         return 0;
987 }
988 EXPORT_SYMBOL(hci_unregister_cb);
989
990 static int hci_send_frame(struct sk_buff *skb)
991 {
992         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
993
994         if (!hdev) {
995                 kfree_skb(skb);
996                 return -ENODEV;
997         }
998
999         BT_DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len);
1000
1001         if (atomic_read(&hdev->promisc)) {
1002                 /* Time stamp */
1003                 do_gettimeofday(&skb->stamp);
1004
1005                 hci_send_to_sock(hdev, skb);
1006         }
1007
1008         /* Get rid of skb owner, prior to sending to the driver. */
1009         skb_orphan(skb);
1010
1011         return hdev->send(skb);
1012 }
1013
1014 /* Send HCI command */
1015 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param)
1016 {
1017         int len = HCI_COMMAND_HDR_SIZE + plen;
1018         struct hci_command_hdr *hdr;
1019         struct sk_buff *skb;
1020
1021         BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen);
1022
1023         skb = bt_skb_alloc(len, GFP_ATOMIC);
1024         if (!skb) {
1025                 BT_ERR("%s Can't allocate memory for HCI command", hdev->name);
1026                 return -ENOMEM;
1027         }
1028
1029         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1030         hdr->opcode = __cpu_to_le16(hci_opcode_pack(ogf, ocf));
1031         hdr->plen   = plen;
1032
1033         if (plen)
1034                 memcpy(skb_put(skb, plen), param, plen);
1035
1036         BT_DBG("skb len %d", skb->len);
1037
1038         skb->pkt_type = HCI_COMMAND_PKT;
1039         skb->dev = (void *) hdev;
1040         skb_queue_tail(&hdev->cmd_q, skb);
1041         hci_sched_cmd(hdev);
1042
1043         return 0;
1044 }
1045 EXPORT_SYMBOL(hci_send_cmd);
1046
1047 /* Get data from the previously sent command */
1048 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
1049 {
1050         struct hci_command_hdr *hdr;
1051
1052         if (!hdev->sent_cmd)
1053                 return NULL;
1054
1055         hdr = (void *) hdev->sent_cmd->data;
1056
1057         if (hdr->opcode != __cpu_to_le16(hci_opcode_pack(ogf, ocf)))
1058                 return NULL;
1059
1060         BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf);
1061
1062         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1063 }
1064
1065 /* Send ACL data */
1066 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1067 {
1068         struct hci_acl_hdr *hdr;
1069         int len = skb->len;
1070
1071         hdr = (struct hci_acl_hdr *) skb_push(skb, HCI_ACL_HDR_SIZE);
1072         hdr->handle = __cpu_to_le16(hci_handle_pack(handle, flags));
1073         hdr->dlen   = __cpu_to_le16(len);
1074
1075         skb->h.raw = (void *) hdr;
1076 }
1077
1078 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1079 {
1080         struct hci_dev *hdev = conn->hdev;
1081         struct sk_buff *list;
1082
1083         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1084
1085         skb->dev = (void *) hdev;
1086         skb->pkt_type = HCI_ACLDATA_PKT;
1087         hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1088
1089         if (!(list = skb_shinfo(skb)->frag_list)) {
1090                 /* Non fragmented */
1091                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1092
1093                 skb_queue_tail(&conn->data_q, skb);
1094         } else {
1095                 /* Fragmented */
1096                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1097
1098                 skb_shinfo(skb)->frag_list = NULL;
1099
1100                 /* Queue all fragments atomically */
1101                 spin_lock_bh(&conn->data_q.lock);
1102
1103                 __skb_queue_tail(&conn->data_q, skb);
1104                 do {
1105                         skb = list; list = list->next;
1106                         
1107                         skb->dev = (void *) hdev;
1108                         skb->pkt_type = HCI_ACLDATA_PKT;
1109                         hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1110
1111                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1112
1113                         __skb_queue_tail(&conn->data_q, skb);
1114                 } while (list);
1115
1116                 spin_unlock_bh(&conn->data_q.lock);
1117         }
1118
1119         hci_sched_tx(hdev);
1120         return 0;
1121 }
1122 EXPORT_SYMBOL(hci_send_acl);
1123
1124 /* Send SCO data */
1125 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1126 {
1127         struct hci_dev *hdev = conn->hdev;
1128         struct hci_sco_hdr hdr;
1129
1130         BT_DBG("%s len %d", hdev->name, skb->len);
1131
1132         if (skb->len > hdev->sco_mtu) {
1133                 kfree_skb(skb);
1134                 return -EINVAL;
1135         }
1136
1137         hdr.handle = __cpu_to_le16(conn->handle);
1138         hdr.dlen   = skb->len;
1139
1140         skb->h.raw = skb_push(skb, HCI_SCO_HDR_SIZE);
1141         memcpy(skb->h.raw, &hdr, HCI_SCO_HDR_SIZE);
1142
1143         skb->dev = (void *) hdev;
1144         skb->pkt_type = HCI_SCODATA_PKT;
1145         skb_queue_tail(&conn->data_q, skb);
1146         hci_sched_tx(hdev);
1147         return 0;
1148 }
1149 EXPORT_SYMBOL(hci_send_sco);
1150
1151 /* ---- HCI TX task (outgoing data) ---- */
1152
1153 /* HCI Connection scheduler */
1154 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1155 {
1156         struct hci_conn_hash *h = &hdev->conn_hash;
1157         struct hci_conn  *conn = NULL;
1158         int num = 0, min = ~0;
1159         struct list_head *p;
1160
1161         /* We don't have to lock device here. Connections are always 
1162          * added and removed with TX task disabled. */
1163         list_for_each(p, &h->list) {
1164                 struct hci_conn *c;
1165                 c = list_entry(p, struct hci_conn, list);
1166
1167                 if (c->type != type || c->state != BT_CONNECTED
1168                                 || skb_queue_empty(&c->data_q))
1169                         continue;
1170                 num++;
1171
1172                 if (c->sent < min) {
1173                         min  = c->sent;
1174                         conn = c;
1175                 }
1176         }
1177
1178         if (conn) {
1179                 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1180                 int q = cnt / num;
1181                 *quote = q ? q : 1;
1182         } else
1183                 *quote = 0;
1184
1185         BT_DBG("conn %p quote %d", conn, *quote);
1186         return conn;
1187 }
1188
1189 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1190 {
1191         struct hci_conn_hash *h = &hdev->conn_hash;
1192         struct list_head *p;
1193         struct hci_conn  *c;
1194
1195         BT_ERR("%s ACL tx timeout", hdev->name);
1196
1197         /* Kill stalled connections */
1198         list_for_each(p, &h->list) {
1199                 c = list_entry(p, struct hci_conn, list);
1200                 if (c->type == ACL_LINK && c->sent) {
1201                         BT_ERR("%s killing stalled ACL connection %s",
1202                                 hdev->name, batostr(&c->dst));
1203                         hci_acl_disconn(c, 0x13);
1204                 }
1205         }
1206 }
1207
1208 static inline void hci_sched_acl(struct hci_dev *hdev)
1209 {
1210         struct hci_conn *conn;
1211         struct sk_buff *skb;
1212         int quote;
1213
1214         BT_DBG("%s", hdev->name);
1215
1216         if (!test_bit(HCI_RAW, &hdev->flags)) {
1217                 /* ACL tx timeout must be longer than maximum
1218                  * link supervision timeout (40.9 seconds) */
1219                 if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
1220                         hci_acl_tx_to(hdev);
1221         }
1222
1223         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1224                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1225                         BT_DBG("skb %p len %d", skb, skb->len);
1226                         hci_send_frame(skb);
1227                         hdev->acl_last_tx = jiffies;
1228
1229                         hdev->acl_cnt--;
1230                         conn->sent++;
1231                 }
1232         }
1233 }
1234
1235 /* Schedule SCO */
1236 static inline void hci_sched_sco(struct hci_dev *hdev)
1237 {
1238         struct hci_conn *conn;
1239         struct sk_buff *skb;
1240         int quote;
1241
1242         BT_DBG("%s", hdev->name);
1243
1244         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1245                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1246                         BT_DBG("skb %p len %d", skb, skb->len);
1247                         hci_send_frame(skb);
1248
1249                         conn->sent++;
1250                         if (conn->sent == ~0)
1251                                 conn->sent = 0;
1252                 }
1253         }
1254 }
1255
1256 static void hci_tx_task(unsigned long arg)
1257 {
1258         struct hci_dev *hdev = (struct hci_dev *) arg;
1259         struct sk_buff *skb;
1260
1261         read_lock(&hci_task_lock);
1262
1263         BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1264
1265         /* Schedule queues and send stuff to HCI driver */
1266
1267         hci_sched_acl(hdev);
1268
1269         hci_sched_sco(hdev);
1270
1271         /* Send next queued raw (unknown type) packet */
1272         while ((skb = skb_dequeue(&hdev->raw_q)))
1273                 hci_send_frame(skb);
1274
1275         read_unlock(&hci_task_lock);
1276 }
1277
1278 /* ----- HCI RX task (incoming data proccessing) ----- */
1279
1280 /* ACL data packet */
1281 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1282 {
1283         struct hci_acl_hdr *hdr = (void *) skb->data;
1284         struct hci_conn *conn;
1285         __u16 handle, flags;
1286
1287         skb_pull(skb, HCI_ACL_HDR_SIZE);
1288
1289         handle = __le16_to_cpu(hdr->handle);
1290         flags  = hci_flags(handle);
1291         handle = hci_handle(handle);
1292
1293         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1294
1295         hdev->stat.acl_rx++;
1296
1297         hci_dev_lock(hdev);
1298         conn = hci_conn_hash_lookup_handle(hdev, handle);
1299         hci_dev_unlock(hdev);
1300         
1301         if (conn) {
1302                 register struct hci_proto *hp;
1303
1304                 /* Send to upper protocol */
1305                 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1306                         hp->recv_acldata(conn, skb, flags);
1307                         return;
1308                 }
1309         } else {
1310                 BT_ERR("%s ACL packet for unknown connection handle %d", 
1311                         hdev->name, handle);
1312         }
1313
1314         kfree_skb(skb);
1315 }
1316
1317 /* SCO data packet */
1318 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1319 {
1320         struct hci_sco_hdr *hdr = (void *) skb->data;
1321         struct hci_conn *conn;
1322         __u16 handle;
1323
1324         skb_pull(skb, HCI_SCO_HDR_SIZE);
1325
1326         handle = __le16_to_cpu(hdr->handle);
1327
1328         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1329
1330         hdev->stat.sco_rx++;
1331
1332         hci_dev_lock(hdev);
1333         conn = hci_conn_hash_lookup_handle(hdev, handle);
1334         hci_dev_unlock(hdev);
1335
1336         if (conn) {
1337                 register struct hci_proto *hp;
1338
1339                 /* Send to upper protocol */
1340                 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1341                         hp->recv_scodata(conn, skb);
1342                         return;
1343                 }
1344         } else {
1345                 BT_ERR("%s SCO packet for unknown connection handle %d", 
1346                         hdev->name, handle);
1347         }
1348
1349         kfree_skb(skb);
1350 }
1351
1352 void hci_rx_task(unsigned long arg)
1353 {
1354         struct hci_dev *hdev = (struct hci_dev *) arg;
1355         struct sk_buff *skb;
1356
1357         BT_DBG("%s", hdev->name);
1358
1359         read_lock(&hci_task_lock);
1360
1361         while ((skb = skb_dequeue(&hdev->rx_q))) {
1362                 if (atomic_read(&hdev->promisc)) {
1363                         /* Send copy to the sockets */
1364                         hci_send_to_sock(hdev, skb);
1365                 }
1366
1367                 if (test_bit(HCI_RAW, &hdev->flags)) {
1368                         kfree_skb(skb);
1369                         continue;
1370                 }
1371
1372                 if (test_bit(HCI_INIT, &hdev->flags)) {
1373                         /* Don't process data packets in this states. */
1374                         switch (skb->pkt_type) {
1375                         case HCI_ACLDATA_PKT:
1376                         case HCI_SCODATA_PKT:
1377                                 kfree_skb(skb);
1378                                 continue;
1379                         };
1380                 }
1381
1382                 /* Process frame */
1383                 switch (skb->pkt_type) {
1384                 case HCI_EVENT_PKT:
1385                         hci_event_packet(hdev, skb);
1386                         break;
1387
1388                 case HCI_ACLDATA_PKT:
1389                         BT_DBG("%s ACL data packet", hdev->name);
1390                         hci_acldata_packet(hdev, skb);
1391                         break;
1392
1393                 case HCI_SCODATA_PKT:
1394                         BT_DBG("%s SCO data packet", hdev->name);
1395                         hci_scodata_packet(hdev, skb);
1396                         break;
1397
1398                 default:
1399                         kfree_skb(skb);
1400                         break;
1401                 }
1402         }
1403
1404         read_unlock(&hci_task_lock);
1405 }
1406
1407 static void hci_cmd_task(unsigned long arg)
1408 {
1409         struct hci_dev *hdev = (struct hci_dev *) arg;
1410         struct sk_buff *skb;
1411
1412         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1413
1414         if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
1415                 BT_ERR("%s command tx timeout", hdev->name);
1416                 atomic_set(&hdev->cmd_cnt, 1);
1417         }
1418
1419         /* Send queued commands */
1420         if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1421                 if (hdev->sent_cmd)
1422                         kfree_skb(hdev->sent_cmd);
1423
1424                 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1425                         atomic_dec(&hdev->cmd_cnt);
1426                         hci_send_frame(skb);
1427                         hdev->cmd_last_tx = jiffies;
1428                 } else {
1429                         skb_queue_head(&hdev->cmd_q, skb);
1430                         hci_sched_cmd(hdev);
1431                 }
1432         }
1433 }