2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/module.h>
28 #include <linux/kmod.h>
30 #include <linux/types.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/skbuff.h>
39 #include <linux/interrupt.h>
40 #include <linux/notifier.h>
43 #include <asm/system.h>
44 #include <asm/uaccess.h>
45 #include <asm/unaligned.h>
47 #include <net/bluetooth/bluetooth.h>
48 #include <net/bluetooth/hci_core.h>
50 #ifndef CONFIG_BT_HCI_CORE_DEBUG
55 static void hci_cmd_task(unsigned long arg);
56 static void hci_rx_task(unsigned long arg);
57 static void hci_tx_task(unsigned long arg);
58 static void hci_notify(struct hci_dev *hdev, int event);
60 static DEFINE_RWLOCK(hci_task_lock);
63 LIST_HEAD(hci_dev_list);
64 DEFINE_RWLOCK(hci_dev_list_lock);
66 /* HCI callback list */
67 LIST_HEAD(hci_cb_list);
68 DEFINE_RWLOCK(hci_cb_list_lock);
71 #define HCI_MAX_PROTO 2
72 struct hci_proto *hci_proto[HCI_MAX_PROTO];
74 /* HCI notifiers list */
75 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
77 /* ---- HCI notifications ---- */
79 int hci_register_notifier(struct notifier_block *nb)
81 return atomic_notifier_chain_register(&hci_notifier, nb);
84 int hci_unregister_notifier(struct notifier_block *nb)
86 return atomic_notifier_chain_unregister(&hci_notifier, nb);
89 static void hci_notify(struct hci_dev *hdev, int event)
91 atomic_notifier_call_chain(&hci_notifier, event, hdev);
94 /* ---- HCI requests ---- */
96 void hci_req_complete(struct hci_dev *hdev, int result)
98 BT_DBG("%s result 0x%2.2x", hdev->name, result);
100 if (hdev->req_status == HCI_REQ_PEND) {
101 hdev->req_result = result;
102 hdev->req_status = HCI_REQ_DONE;
103 wake_up_interruptible(&hdev->req_wait_q);
107 static void hci_req_cancel(struct hci_dev *hdev, int err)
109 BT_DBG("%s err 0x%2.2x", hdev->name, err);
111 if (hdev->req_status == HCI_REQ_PEND) {
112 hdev->req_result = err;
113 hdev->req_status = HCI_REQ_CANCELED;
114 wake_up_interruptible(&hdev->req_wait_q);
118 /* Execute request and wait for completion. */
119 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
120 unsigned long opt, __u32 timeout)
122 DECLARE_WAITQUEUE(wait, current);
125 BT_DBG("%s start", hdev->name);
127 hdev->req_status = HCI_REQ_PEND;
129 add_wait_queue(&hdev->req_wait_q, &wait);
130 set_current_state(TASK_INTERRUPTIBLE);
133 schedule_timeout(timeout);
135 remove_wait_queue(&hdev->req_wait_q, &wait);
137 if (signal_pending(current))
140 switch (hdev->req_status) {
142 err = -bt_err(hdev->req_result);
145 case HCI_REQ_CANCELED:
146 err = -hdev->req_result;
154 hdev->req_status = hdev->req_result = 0;
156 BT_DBG("%s end: err %d", hdev->name, err);
161 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
162 unsigned long opt, __u32 timeout)
166 /* Serialize all requests */
168 ret = __hci_request(hdev, req, opt, timeout);
169 hci_req_unlock(hdev);
174 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
176 BT_DBG("%s %ld", hdev->name, opt);
179 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
182 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
188 BT_DBG("%s %ld", hdev->name, opt);
190 /* Driver initialization */
192 /* Special commands */
193 while ((skb = skb_dequeue(&hdev->driver_init))) {
194 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
195 skb->dev = (void *) hdev;
196 skb_queue_tail(&hdev->cmd_q, skb);
199 skb_queue_purge(&hdev->driver_init);
201 /* Mandatory initialization */
204 if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
205 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
207 /* Read Local Supported Features */
208 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
210 /* Read Local Version */
211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
213 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
214 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
217 /* Host buffer size */
219 struct hci_cp_host_buffer_size cp;
220 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
221 cp.sco_mtu = HCI_MAX_SCO_SIZE;
222 cp.acl_max_pkt = cpu_to_le16(0xffff);
223 cp.sco_max_pkt = cpu_to_le16(0xffff);
224 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
228 /* Read BD Address */
229 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
231 /* Read Class of Device */
232 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
234 /* Read Local Name */
235 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
237 /* Read Voice Setting */
238 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
240 /* Optional initialization */
242 /* Clear Event Filters */
243 flt_type = HCI_FLT_CLEAR_ALL;
244 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
246 /* Page timeout ~20 secs */
247 param = cpu_to_le16(0x8000);
248 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, ¶m);
250 /* Connection accept timeout ~20 secs */
251 param = cpu_to_le16(0x7d00);
252 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
255 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
259 BT_DBG("%s %x", hdev->name, scan);
261 /* Inquiry and Page scans */
262 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
265 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
269 BT_DBG("%s %x", hdev->name, auth);
272 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
275 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
279 BT_DBG("%s %x", hdev->name, encrypt);
282 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
285 /* Get HCI device by index.
286 * Device is held on return. */
287 struct hci_dev *hci_dev_get(int index)
289 struct hci_dev *hdev = NULL;
297 read_lock(&hci_dev_list_lock);
298 list_for_each(p, &hci_dev_list) {
299 struct hci_dev *d = list_entry(p, struct hci_dev, list);
300 if (d->id == index) {
301 hdev = hci_dev_hold(d);
305 read_unlock(&hci_dev_list_lock);
309 /* ---- Inquiry support ---- */
310 static void inquiry_cache_flush(struct hci_dev *hdev)
312 struct inquiry_cache *cache = &hdev->inq_cache;
313 struct inquiry_entry *next = cache->list, *e;
315 BT_DBG("cache %p", cache);
324 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
326 struct inquiry_cache *cache = &hdev->inq_cache;
327 struct inquiry_entry *e;
329 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
331 for (e = cache->list; e; e = e->next)
332 if (!bacmp(&e->data.bdaddr, bdaddr))
337 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
339 struct inquiry_cache *cache = &hdev->inq_cache;
340 struct inquiry_entry *e;
342 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
344 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
345 /* Entry not in the cache. Add new one. */
346 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
348 e->next = cache->list;
352 memcpy(&e->data, data, sizeof(*data));
353 e->timestamp = jiffies;
354 cache->timestamp = jiffies;
357 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
359 struct inquiry_cache *cache = &hdev->inq_cache;
360 struct inquiry_info *info = (struct inquiry_info *) buf;
361 struct inquiry_entry *e;
364 for (e = cache->list; e && copied < num; e = e->next, copied++) {
365 struct inquiry_data *data = &e->data;
366 bacpy(&info->bdaddr, &data->bdaddr);
367 info->pscan_rep_mode = data->pscan_rep_mode;
368 info->pscan_period_mode = data->pscan_period_mode;
369 info->pscan_mode = data->pscan_mode;
370 memcpy(info->dev_class, data->dev_class, 3);
371 info->clock_offset = data->clock_offset;
375 BT_DBG("cache %p, copied %d", cache, copied);
379 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
381 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
382 struct hci_cp_inquiry cp;
384 BT_DBG("%s", hdev->name);
386 if (test_bit(HCI_INQUIRY, &hdev->flags))
390 memcpy(&cp.lap, &ir->lap, 3);
391 cp.length = ir->length;
392 cp.num_rsp = ir->num_rsp;
393 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
396 int hci_inquiry(void __user *arg)
398 __u8 __user *ptr = arg;
399 struct hci_inquiry_req ir;
400 struct hci_dev *hdev;
401 int err = 0, do_inquiry = 0, max_rsp;
405 if (copy_from_user(&ir, ptr, sizeof(ir)))
408 if (!(hdev = hci_dev_get(ir.dev_id)))
411 hci_dev_lock_bh(hdev);
412 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
413 inquiry_cache_empty(hdev) ||
414 ir.flags & IREQ_CACHE_FLUSH) {
415 inquiry_cache_flush(hdev);
418 hci_dev_unlock_bh(hdev);
420 timeo = ir.length * msecs_to_jiffies(2000);
421 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
424 /* for unlimited number of responses we will use buffer with 255 entries */
425 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
427 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
428 * copy it to the user space.
430 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
435 hci_dev_lock_bh(hdev);
436 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
437 hci_dev_unlock_bh(hdev);
439 BT_DBG("num_rsp %d", ir.num_rsp);
441 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
443 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
456 /* ---- HCI ioctl helpers ---- */
458 int hci_dev_open(__u16 dev)
460 struct hci_dev *hdev;
463 if (!(hdev = hci_dev_get(dev)))
466 BT_DBG("%s %p", hdev->name, hdev);
470 if (test_bit(HCI_UP, &hdev->flags)) {
475 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
476 set_bit(HCI_RAW, &hdev->flags);
478 if (hdev->open(hdev)) {
483 if (!test_bit(HCI_RAW, &hdev->flags)) {
484 atomic_set(&hdev->cmd_cnt, 1);
485 set_bit(HCI_INIT, &hdev->flags);
487 //__hci_request(hdev, hci_reset_req, 0, HZ);
488 ret = __hci_request(hdev, hci_init_req, 0,
489 msecs_to_jiffies(HCI_INIT_TIMEOUT));
491 clear_bit(HCI_INIT, &hdev->flags);
496 set_bit(HCI_UP, &hdev->flags);
497 hci_notify(hdev, HCI_DEV_UP);
499 /* Init failed, cleanup */
500 tasklet_kill(&hdev->rx_task);
501 tasklet_kill(&hdev->tx_task);
502 tasklet_kill(&hdev->cmd_task);
504 skb_queue_purge(&hdev->cmd_q);
505 skb_queue_purge(&hdev->rx_q);
510 if (hdev->sent_cmd) {
511 kfree_skb(hdev->sent_cmd);
512 hdev->sent_cmd = NULL;
520 hci_req_unlock(hdev);
525 static int hci_dev_do_close(struct hci_dev *hdev)
527 BT_DBG("%s %p", hdev->name, hdev);
529 hci_req_cancel(hdev, ENODEV);
532 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
533 hci_req_unlock(hdev);
537 /* Kill RX and TX tasks */
538 tasklet_kill(&hdev->rx_task);
539 tasklet_kill(&hdev->tx_task);
541 hci_dev_lock_bh(hdev);
542 inquiry_cache_flush(hdev);
543 hci_conn_hash_flush(hdev);
544 hci_dev_unlock_bh(hdev);
546 hci_notify(hdev, HCI_DEV_DOWN);
552 skb_queue_purge(&hdev->cmd_q);
553 atomic_set(&hdev->cmd_cnt, 1);
554 if (!test_bit(HCI_RAW, &hdev->flags)) {
555 set_bit(HCI_INIT, &hdev->flags);
556 __hci_request(hdev, hci_reset_req, 0,
557 msecs_to_jiffies(250));
558 clear_bit(HCI_INIT, &hdev->flags);
562 tasklet_kill(&hdev->cmd_task);
565 skb_queue_purge(&hdev->rx_q);
566 skb_queue_purge(&hdev->cmd_q);
567 skb_queue_purge(&hdev->raw_q);
569 /* Drop last sent command */
570 if (hdev->sent_cmd) {
571 kfree_skb(hdev->sent_cmd);
572 hdev->sent_cmd = NULL;
575 /* After this point our queues are empty
576 * and no tasks are scheduled. */
582 hci_req_unlock(hdev);
588 int hci_dev_close(__u16 dev)
590 struct hci_dev *hdev;
593 if (!(hdev = hci_dev_get(dev)))
595 err = hci_dev_do_close(hdev);
600 int hci_dev_reset(__u16 dev)
602 struct hci_dev *hdev;
605 if (!(hdev = hci_dev_get(dev)))
609 tasklet_disable(&hdev->tx_task);
611 if (!test_bit(HCI_UP, &hdev->flags))
615 skb_queue_purge(&hdev->rx_q);
616 skb_queue_purge(&hdev->cmd_q);
618 hci_dev_lock_bh(hdev);
619 inquiry_cache_flush(hdev);
620 hci_conn_hash_flush(hdev);
621 hci_dev_unlock_bh(hdev);
626 atomic_set(&hdev->cmd_cnt, 1);
627 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
629 if (!test_bit(HCI_RAW, &hdev->flags))
630 ret = __hci_request(hdev, hci_reset_req, 0,
631 msecs_to_jiffies(HCI_INIT_TIMEOUT));
634 tasklet_enable(&hdev->tx_task);
635 hci_req_unlock(hdev);
640 int hci_dev_reset_stat(__u16 dev)
642 struct hci_dev *hdev;
645 if (!(hdev = hci_dev_get(dev)))
648 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
655 int hci_dev_cmd(unsigned int cmd, void __user *arg)
657 struct hci_dev *hdev;
658 struct hci_dev_req dr;
661 if (copy_from_user(&dr, arg, sizeof(dr)))
664 if (!(hdev = hci_dev_get(dr.dev_id)))
669 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
670 msecs_to_jiffies(HCI_INIT_TIMEOUT));
674 if (!lmp_encrypt_capable(hdev)) {
679 if (!test_bit(HCI_AUTH, &hdev->flags)) {
680 /* Auth must be enabled first */
681 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
682 msecs_to_jiffies(HCI_INIT_TIMEOUT));
687 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
688 msecs_to_jiffies(HCI_INIT_TIMEOUT));
692 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
693 msecs_to_jiffies(HCI_INIT_TIMEOUT));
697 hdev->pkt_type = (__u16) dr.dev_opt;
701 hdev->link_policy = (__u16) dr.dev_opt;
705 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
709 hdev->acl_mtu = *((__u16 *)&dr.dev_opt + 1);
710 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
714 hdev->sco_mtu = *((__u16 *)&dr.dev_opt + 1);
715 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
726 int hci_get_dev_list(void __user *arg)
728 struct hci_dev_list_req *dl;
729 struct hci_dev_req *dr;
731 int n = 0, size, err;
734 if (get_user(dev_num, (__u16 __user *) arg))
737 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
740 size = sizeof(*dl) + dev_num * sizeof(*dr);
742 if (!(dl = kmalloc(size, GFP_KERNEL)))
747 read_lock_bh(&hci_dev_list_lock);
748 list_for_each(p, &hci_dev_list) {
749 struct hci_dev *hdev;
750 hdev = list_entry(p, struct hci_dev, list);
751 (dr + n)->dev_id = hdev->id;
752 (dr + n)->dev_opt = hdev->flags;
756 read_unlock_bh(&hci_dev_list_lock);
759 size = sizeof(*dl) + n * sizeof(*dr);
761 err = copy_to_user(arg, dl, size);
764 return err ? -EFAULT : 0;
767 int hci_get_dev_info(void __user *arg)
769 struct hci_dev *hdev;
770 struct hci_dev_info di;
773 if (copy_from_user(&di, arg, sizeof(di)))
776 if (!(hdev = hci_dev_get(di.dev_id)))
779 strcpy(di.name, hdev->name);
780 di.bdaddr = hdev->bdaddr;
781 di.type = hdev->type;
782 di.flags = hdev->flags;
783 di.pkt_type = hdev->pkt_type;
784 di.acl_mtu = hdev->acl_mtu;
785 di.acl_pkts = hdev->acl_pkts;
786 di.sco_mtu = hdev->sco_mtu;
787 di.sco_pkts = hdev->sco_pkts;
788 di.link_policy = hdev->link_policy;
789 di.link_mode = hdev->link_mode;
791 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
792 memcpy(&di.features, &hdev->features, sizeof(di.features));
794 if (copy_to_user(arg, &di, sizeof(di)))
802 /* ---- Interface to HCI drivers ---- */
804 /* Alloc HCI device */
805 struct hci_dev *hci_alloc_dev(void)
807 struct hci_dev *hdev;
809 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
813 skb_queue_head_init(&hdev->driver_init);
817 EXPORT_SYMBOL(hci_alloc_dev);
819 /* Free HCI device */
820 void hci_free_dev(struct hci_dev *hdev)
822 skb_queue_purge(&hdev->driver_init);
824 /* will free via device release */
825 put_device(&hdev->dev);
827 EXPORT_SYMBOL(hci_free_dev);
829 /* Register HCI device */
830 int hci_register_dev(struct hci_dev *hdev)
832 struct list_head *head = &hci_dev_list, *p;
835 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
837 if (!hdev->open || !hdev->close || !hdev->destruct)
840 write_lock_bh(&hci_dev_list_lock);
842 /* Find first available device id */
843 list_for_each(p, &hci_dev_list) {
844 if (list_entry(p, struct hci_dev, list)->id != id)
849 sprintf(hdev->name, "hci%d", id);
851 list_add(&hdev->list, head);
853 atomic_set(&hdev->refcnt, 1);
854 spin_lock_init(&hdev->lock);
857 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
858 hdev->esco_type = (ESCO_HV1);
859 hdev->link_mode = (HCI_LM_ACCEPT);
861 hdev->idle_timeout = 0;
862 hdev->sniff_max_interval = 800;
863 hdev->sniff_min_interval = 80;
865 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
866 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
867 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
869 skb_queue_head_init(&hdev->rx_q);
870 skb_queue_head_init(&hdev->cmd_q);
871 skb_queue_head_init(&hdev->raw_q);
873 for (i = 0; i < 3; i++)
874 hdev->reassembly[i] = NULL;
876 init_waitqueue_head(&hdev->req_wait_q);
877 init_MUTEX(&hdev->req_lock);
879 inquiry_cache_init(hdev);
881 hci_conn_hash_init(hdev);
883 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
885 atomic_set(&hdev->promisc, 0);
887 write_unlock_bh(&hci_dev_list_lock);
889 hci_register_sysfs(hdev);
891 hci_notify(hdev, HCI_DEV_REG);
895 EXPORT_SYMBOL(hci_register_dev);
897 /* Unregister HCI device */
898 int hci_unregister_dev(struct hci_dev *hdev)
902 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
904 hci_unregister_sysfs(hdev);
906 write_lock_bh(&hci_dev_list_lock);
907 list_del(&hdev->list);
908 write_unlock_bh(&hci_dev_list_lock);
910 hci_dev_do_close(hdev);
912 for (i = 0; i < 3; i++)
913 kfree_skb(hdev->reassembly[i]);
915 hci_notify(hdev, HCI_DEV_UNREG);
921 EXPORT_SYMBOL(hci_unregister_dev);
923 /* Suspend HCI device */
924 int hci_suspend_dev(struct hci_dev *hdev)
926 hci_notify(hdev, HCI_DEV_SUSPEND);
929 EXPORT_SYMBOL(hci_suspend_dev);
931 /* Resume HCI device */
932 int hci_resume_dev(struct hci_dev *hdev)
934 hci_notify(hdev, HCI_DEV_RESUME);
937 EXPORT_SYMBOL(hci_resume_dev);
939 /* Receive packet type fragment */
940 #define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2])
942 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
944 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
948 struct sk_buff *skb = __reassembly(hdev, type);
949 struct { int expect; } *scb;
953 /* Start of the frame */
957 if (count >= HCI_EVENT_HDR_SIZE) {
958 struct hci_event_hdr *h = data;
959 len = HCI_EVENT_HDR_SIZE + h->plen;
964 case HCI_ACLDATA_PKT:
965 if (count >= HCI_ACL_HDR_SIZE) {
966 struct hci_acl_hdr *h = data;
967 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
972 case HCI_SCODATA_PKT:
973 if (count >= HCI_SCO_HDR_SIZE) {
974 struct hci_sco_hdr *h = data;
975 len = HCI_SCO_HDR_SIZE + h->dlen;
981 skb = bt_skb_alloc(len, GFP_ATOMIC);
983 BT_ERR("%s no memory for packet", hdev->name);
987 skb->dev = (void *) hdev;
988 bt_cb(skb)->pkt_type = type;
990 __reassembly(hdev, type) = skb;
992 scb = (void *) skb->cb;
997 scb = (void *) skb->cb;
1001 len = min(len, count);
1003 memcpy(skb_put(skb, len), data, len);
1007 if (scb->expect == 0) {
1008 /* Complete frame */
1010 __reassembly(hdev, type) = NULL;
1012 bt_cb(skb)->pkt_type = type;
1013 hci_recv_frame(skb);
1016 count -= len; data += len;
1021 EXPORT_SYMBOL(hci_recv_fragment);
1023 /* ---- Interface to upper protocols ---- */
1025 /* Register/Unregister protocols.
1026 * hci_task_lock is used to ensure that no tasks are running. */
1027 int hci_register_proto(struct hci_proto *hp)
1031 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1033 if (hp->id >= HCI_MAX_PROTO)
1036 write_lock_bh(&hci_task_lock);
1038 if (!hci_proto[hp->id])
1039 hci_proto[hp->id] = hp;
1043 write_unlock_bh(&hci_task_lock);
1047 EXPORT_SYMBOL(hci_register_proto);
1049 int hci_unregister_proto(struct hci_proto *hp)
1053 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1055 if (hp->id >= HCI_MAX_PROTO)
1058 write_lock_bh(&hci_task_lock);
1060 if (hci_proto[hp->id])
1061 hci_proto[hp->id] = NULL;
1065 write_unlock_bh(&hci_task_lock);
1069 EXPORT_SYMBOL(hci_unregister_proto);
1071 int hci_register_cb(struct hci_cb *cb)
1073 BT_DBG("%p name %s", cb, cb->name);
1075 write_lock_bh(&hci_cb_list_lock);
1076 list_add(&cb->list, &hci_cb_list);
1077 write_unlock_bh(&hci_cb_list_lock);
1081 EXPORT_SYMBOL(hci_register_cb);
1083 int hci_unregister_cb(struct hci_cb *cb)
1085 BT_DBG("%p name %s", cb, cb->name);
1087 write_lock_bh(&hci_cb_list_lock);
1088 list_del(&cb->list);
1089 write_unlock_bh(&hci_cb_list_lock);
1093 EXPORT_SYMBOL(hci_unregister_cb);
1095 static int hci_send_frame(struct sk_buff *skb)
1097 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1104 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1106 if (atomic_read(&hdev->promisc)) {
1108 __net_timestamp(skb);
1110 hci_send_to_sock(hdev, skb);
1113 /* Get rid of skb owner, prior to sending to the driver. */
1116 return hdev->send(skb);
1119 /* Send HCI command */
1120 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1122 int len = HCI_COMMAND_HDR_SIZE + plen;
1123 struct hci_command_hdr *hdr;
1124 struct sk_buff *skb;
1126 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1128 skb = bt_skb_alloc(len, GFP_ATOMIC);
1130 BT_ERR("%s no memory for command", hdev->name);
1134 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1135 hdr->opcode = cpu_to_le16(opcode);
1139 memcpy(skb_put(skb, plen), param, plen);
1141 BT_DBG("skb len %d", skb->len);
1143 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1144 skb->dev = (void *) hdev;
1145 skb_queue_tail(&hdev->cmd_q, skb);
1146 hci_sched_cmd(hdev);
1151 /* Get data from the previously sent command */
1152 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1154 struct hci_command_hdr *hdr;
1156 if (!hdev->sent_cmd)
1159 hdr = (void *) hdev->sent_cmd->data;
1161 if (hdr->opcode != cpu_to_le16(opcode))
1164 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1166 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1170 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1172 struct hci_acl_hdr *hdr;
1175 skb_push(skb, HCI_ACL_HDR_SIZE);
1176 skb_reset_transport_header(skb);
1177 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1178 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1179 hdr->dlen = cpu_to_le16(len);
1182 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1184 struct hci_dev *hdev = conn->hdev;
1185 struct sk_buff *list;
1187 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1189 skb->dev = (void *) hdev;
1190 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1191 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1193 if (!(list = skb_shinfo(skb)->frag_list)) {
1194 /* Non fragmented */
1195 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1197 skb_queue_tail(&conn->data_q, skb);
1200 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1202 skb_shinfo(skb)->frag_list = NULL;
1204 /* Queue all fragments atomically */
1205 spin_lock_bh(&conn->data_q.lock);
1207 __skb_queue_tail(&conn->data_q, skb);
1209 skb = list; list = list->next;
1211 skb->dev = (void *) hdev;
1212 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1213 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1215 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1217 __skb_queue_tail(&conn->data_q, skb);
1220 spin_unlock_bh(&conn->data_q.lock);
1226 EXPORT_SYMBOL(hci_send_acl);
1229 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1231 struct hci_dev *hdev = conn->hdev;
1232 struct hci_sco_hdr hdr;
1234 BT_DBG("%s len %d", hdev->name, skb->len);
1236 if (skb->len > hdev->sco_mtu) {
1241 hdr.handle = cpu_to_le16(conn->handle);
1242 hdr.dlen = skb->len;
1244 skb_push(skb, HCI_SCO_HDR_SIZE);
1245 skb_reset_transport_header(skb);
1246 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1248 skb->dev = (void *) hdev;
1249 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1250 skb_queue_tail(&conn->data_q, skb);
1254 EXPORT_SYMBOL(hci_send_sco);
1256 /* ---- HCI TX task (outgoing data) ---- */
1258 /* HCI Connection scheduler */
1259 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1261 struct hci_conn_hash *h = &hdev->conn_hash;
1262 struct hci_conn *conn = NULL;
1263 int num = 0, min = ~0;
1264 struct list_head *p;
1266 /* We don't have to lock device here. Connections are always
1267 * added and removed with TX task disabled. */
1268 list_for_each(p, &h->list) {
1270 c = list_entry(p, struct hci_conn, list);
1272 if (c->type != type || c->state != BT_CONNECTED
1273 || skb_queue_empty(&c->data_q))
1277 if (c->sent < min) {
1284 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1290 BT_DBG("conn %p quote %d", conn, *quote);
1294 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1296 struct hci_conn_hash *h = &hdev->conn_hash;
1297 struct list_head *p;
1300 BT_ERR("%s ACL tx timeout", hdev->name);
1302 /* Kill stalled connections */
1303 list_for_each(p, &h->list) {
1304 c = list_entry(p, struct hci_conn, list);
1305 if (c->type == ACL_LINK && c->sent) {
1306 BT_ERR("%s killing stalled ACL connection %s",
1307 hdev->name, batostr(&c->dst));
1308 hci_acl_disconn(c, 0x13);
1313 static inline void hci_sched_acl(struct hci_dev *hdev)
1315 struct hci_conn *conn;
1316 struct sk_buff *skb;
1319 BT_DBG("%s", hdev->name);
1321 if (!test_bit(HCI_RAW, &hdev->flags)) {
1322 /* ACL tx timeout must be longer than maximum
1323 * link supervision timeout (40.9 seconds) */
1324 if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
1325 hci_acl_tx_to(hdev);
1328 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) {
1329 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1330 BT_DBG("skb %p len %d", skb, skb->len);
1332 hci_conn_enter_active_mode(conn);
1334 hci_send_frame(skb);
1335 hdev->acl_last_tx = jiffies;
1344 static inline void hci_sched_sco(struct hci_dev *hdev)
1346 struct hci_conn *conn;
1347 struct sk_buff *skb;
1350 BT_DBG("%s", hdev->name);
1352 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
1353 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1354 BT_DBG("skb %p len %d", skb, skb->len);
1355 hci_send_frame(skb);
1358 if (conn->sent == ~0)
1364 static inline void hci_sched_esco(struct hci_dev *hdev)
1366 struct hci_conn *conn;
1367 struct sk_buff *skb;
1370 BT_DBG("%s", hdev->name);
1372 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
1373 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1374 BT_DBG("skb %p len %d", skb, skb->len);
1375 hci_send_frame(skb);
1378 if (conn->sent == ~0)
1384 static void hci_tx_task(unsigned long arg)
1386 struct hci_dev *hdev = (struct hci_dev *) arg;
1387 struct sk_buff *skb;
1389 read_lock(&hci_task_lock);
1391 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1393 /* Schedule queues and send stuff to HCI driver */
1395 hci_sched_acl(hdev);
1397 hci_sched_sco(hdev);
1399 hci_sched_esco(hdev);
1401 /* Send next queued raw (unknown type) packet */
1402 while ((skb = skb_dequeue(&hdev->raw_q)))
1403 hci_send_frame(skb);
1405 read_unlock(&hci_task_lock);
1408 /* ----- HCI RX task (incoming data proccessing) ----- */
1410 /* ACL data packet */
1411 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1413 struct hci_acl_hdr *hdr = (void *) skb->data;
1414 struct hci_conn *conn;
1415 __u16 handle, flags;
1417 skb_pull(skb, HCI_ACL_HDR_SIZE);
1419 handle = __le16_to_cpu(hdr->handle);
1420 flags = hci_flags(handle);
1421 handle = hci_handle(handle);
1423 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1425 hdev->stat.acl_rx++;
1428 conn = hci_conn_hash_lookup_handle(hdev, handle);
1429 hci_dev_unlock(hdev);
1432 register struct hci_proto *hp;
1434 hci_conn_enter_active_mode(conn);
1436 /* Send to upper protocol */
1437 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1438 hp->recv_acldata(conn, skb, flags);
1442 BT_ERR("%s ACL packet for unknown connection handle %d",
1443 hdev->name, handle);
1449 /* SCO data packet */
1450 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1452 struct hci_sco_hdr *hdr = (void *) skb->data;
1453 struct hci_conn *conn;
1456 skb_pull(skb, HCI_SCO_HDR_SIZE);
1458 handle = __le16_to_cpu(hdr->handle);
1460 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1462 hdev->stat.sco_rx++;
1465 conn = hci_conn_hash_lookup_handle(hdev, handle);
1466 hci_dev_unlock(hdev);
1469 register struct hci_proto *hp;
1471 /* Send to upper protocol */
1472 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1473 hp->recv_scodata(conn, skb);
1477 BT_ERR("%s SCO packet for unknown connection handle %d",
1478 hdev->name, handle);
1484 static void hci_rx_task(unsigned long arg)
1486 struct hci_dev *hdev = (struct hci_dev *) arg;
1487 struct sk_buff *skb;
1489 BT_DBG("%s", hdev->name);
1491 read_lock(&hci_task_lock);
1493 while ((skb = skb_dequeue(&hdev->rx_q))) {
1494 if (atomic_read(&hdev->promisc)) {
1495 /* Send copy to the sockets */
1496 hci_send_to_sock(hdev, skb);
1499 if (test_bit(HCI_RAW, &hdev->flags)) {
1504 if (test_bit(HCI_INIT, &hdev->flags)) {
1505 /* Don't process data packets in this states. */
1506 switch (bt_cb(skb)->pkt_type) {
1507 case HCI_ACLDATA_PKT:
1508 case HCI_SCODATA_PKT:
1515 switch (bt_cb(skb)->pkt_type) {
1517 hci_event_packet(hdev, skb);
1520 case HCI_ACLDATA_PKT:
1521 BT_DBG("%s ACL data packet", hdev->name);
1522 hci_acldata_packet(hdev, skb);
1525 case HCI_SCODATA_PKT:
1526 BT_DBG("%s SCO data packet", hdev->name);
1527 hci_scodata_packet(hdev, skb);
1536 read_unlock(&hci_task_lock);
1539 static void hci_cmd_task(unsigned long arg)
1541 struct hci_dev *hdev = (struct hci_dev *) arg;
1542 struct sk_buff *skb;
1544 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1546 if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
1547 BT_ERR("%s command tx timeout", hdev->name);
1548 atomic_set(&hdev->cmd_cnt, 1);
1551 /* Send queued commands */
1552 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1554 kfree_skb(hdev->sent_cmd);
1556 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1557 atomic_dec(&hdev->cmd_cnt);
1558 hci_send_frame(skb);
1559 hdev->cmd_last_tx = jiffies;
1561 skb_queue_head(&hdev->cmd_q, skb);
1562 hci_sched_cmd(hdev);