2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/interrupt.h>
41 #include <linux/notifier.h>
44 #include <asm/system.h>
45 #include <asm/uaccess.h>
46 #include <asm/unaligned.h>
48 #include <net/bluetooth/bluetooth.h>
49 #include <net/bluetooth/hci_core.h>
51 #ifndef CONFIG_BT_HCI_CORE_DEBUG
56 static void hci_cmd_task(unsigned long arg);
57 static void hci_rx_task(unsigned long arg);
58 static void hci_tx_task(unsigned long arg);
59 static void hci_notify(struct hci_dev *hdev, int event);
61 static DEFINE_RWLOCK(hci_task_lock);
64 LIST_HEAD(hci_dev_list);
65 DEFINE_RWLOCK(hci_dev_list_lock);
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list);
69 DEFINE_RWLOCK(hci_cb_list_lock);
72 #define HCI_MAX_PROTO 2
73 struct hci_proto *hci_proto[HCI_MAX_PROTO];
75 /* HCI notifiers list */
76 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
78 /* ---- HCI notifications ---- */
80 int hci_register_notifier(struct notifier_block *nb)
82 return atomic_notifier_chain_register(&hci_notifier, nb);
85 int hci_unregister_notifier(struct notifier_block *nb)
87 return atomic_notifier_chain_unregister(&hci_notifier, nb);
90 static void hci_notify(struct hci_dev *hdev, int event)
92 atomic_notifier_call_chain(&hci_notifier, event, hdev);
95 /* ---- HCI requests ---- */
97 void hci_req_complete(struct hci_dev *hdev, int result)
99 BT_DBG("%s result 0x%2.2x", hdev->name, result);
101 if (hdev->req_status == HCI_REQ_PEND) {
102 hdev->req_result = result;
103 hdev->req_status = HCI_REQ_DONE;
104 wake_up_interruptible(&hdev->req_wait_q);
108 static void hci_req_cancel(struct hci_dev *hdev, int err)
110 BT_DBG("%s err 0x%2.2x", hdev->name, err);
112 if (hdev->req_status == HCI_REQ_PEND) {
113 hdev->req_result = err;
114 hdev->req_status = HCI_REQ_CANCELED;
115 wake_up_interruptible(&hdev->req_wait_q);
119 /* Execute request and wait for completion. */
120 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
121 unsigned long opt, __u32 timeout)
123 DECLARE_WAITQUEUE(wait, current);
126 BT_DBG("%s start", hdev->name);
128 hdev->req_status = HCI_REQ_PEND;
130 add_wait_queue(&hdev->req_wait_q, &wait);
131 set_current_state(TASK_INTERRUPTIBLE);
134 schedule_timeout(timeout);
136 remove_wait_queue(&hdev->req_wait_q, &wait);
138 if (signal_pending(current))
141 switch (hdev->req_status) {
143 err = -bt_err(hdev->req_result);
146 case HCI_REQ_CANCELED:
147 err = -hdev->req_result;
155 hdev->req_status = hdev->req_result = 0;
157 BT_DBG("%s end: err %d", hdev->name, err);
162 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
163 unsigned long opt, __u32 timeout)
167 if (!test_bit(HCI_UP, &hdev->flags))
170 /* Serialize all requests */
172 ret = __hci_request(hdev, req, opt, timeout);
173 hci_req_unlock(hdev);
178 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
180 BT_DBG("%s %ld", hdev->name, opt);
183 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
186 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
192 BT_DBG("%s %ld", hdev->name, opt);
194 /* Driver initialization */
196 /* Special commands */
197 while ((skb = skb_dequeue(&hdev->driver_init))) {
198 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
199 skb->dev = (void *) hdev;
200 skb_queue_tail(&hdev->cmd_q, skb);
203 skb_queue_purge(&hdev->driver_init);
205 /* Mandatory initialization */
208 if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
209 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
211 /* Read Local Supported Features */
212 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
214 /* Read Local Version */
215 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
217 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
218 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
221 /* Host buffer size */
223 struct hci_cp_host_buffer_size cp;
224 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
225 cp.sco_mtu = HCI_MAX_SCO_SIZE;
226 cp.acl_max_pkt = cpu_to_le16(0xffff);
227 cp.sco_max_pkt = cpu_to_le16(0xffff);
228 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
232 /* Read BD Address */
233 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
235 /* Read Class of Device */
236 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
238 /* Read Local Name */
239 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
241 /* Read Voice Setting */
242 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
244 /* Optional initialization */
246 /* Clear Event Filters */
247 flt_type = HCI_FLT_CLEAR_ALL;
248 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
250 /* Page timeout ~20 secs */
251 param = cpu_to_le16(0x8000);
252 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, ¶m);
254 /* Connection accept timeout ~20 secs */
255 param = cpu_to_le16(0x7d00);
256 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
259 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
263 BT_DBG("%s %x", hdev->name, scan);
265 /* Inquiry and Page scans */
266 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
269 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
273 BT_DBG("%s %x", hdev->name, auth);
276 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
279 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
283 BT_DBG("%s %x", hdev->name, encrypt);
286 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
289 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
291 __le16 policy = cpu_to_le16(opt);
293 BT_DBG("%s %x", hdev->name, opt);
295 /* Default link policy */
296 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
299 /* Get HCI device by index.
300 * Device is held on return. */
301 struct hci_dev *hci_dev_get(int index)
303 struct hci_dev *hdev = NULL;
311 read_lock(&hci_dev_list_lock);
312 list_for_each(p, &hci_dev_list) {
313 struct hci_dev *d = list_entry(p, struct hci_dev, list);
314 if (d->id == index) {
315 hdev = hci_dev_hold(d);
319 read_unlock(&hci_dev_list_lock);
323 /* ---- Inquiry support ---- */
324 static void inquiry_cache_flush(struct hci_dev *hdev)
326 struct inquiry_cache *cache = &hdev->inq_cache;
327 struct inquiry_entry *next = cache->list, *e;
329 BT_DBG("cache %p", cache);
338 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
340 struct inquiry_cache *cache = &hdev->inq_cache;
341 struct inquiry_entry *e;
343 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
345 for (e = cache->list; e; e = e->next)
346 if (!bacmp(&e->data.bdaddr, bdaddr))
351 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
353 struct inquiry_cache *cache = &hdev->inq_cache;
354 struct inquiry_entry *e;
356 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
358 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
359 /* Entry not in the cache. Add new one. */
360 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
362 e->next = cache->list;
366 memcpy(&e->data, data, sizeof(*data));
367 e->timestamp = jiffies;
368 cache->timestamp = jiffies;
371 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
373 struct inquiry_cache *cache = &hdev->inq_cache;
374 struct inquiry_info *info = (struct inquiry_info *) buf;
375 struct inquiry_entry *e;
378 for (e = cache->list; e && copied < num; e = e->next, copied++) {
379 struct inquiry_data *data = &e->data;
380 bacpy(&info->bdaddr, &data->bdaddr);
381 info->pscan_rep_mode = data->pscan_rep_mode;
382 info->pscan_period_mode = data->pscan_period_mode;
383 info->pscan_mode = data->pscan_mode;
384 memcpy(info->dev_class, data->dev_class, 3);
385 info->clock_offset = data->clock_offset;
389 BT_DBG("cache %p, copied %d", cache, copied);
393 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
395 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
396 struct hci_cp_inquiry cp;
398 BT_DBG("%s", hdev->name);
400 if (test_bit(HCI_INQUIRY, &hdev->flags))
404 memcpy(&cp.lap, &ir->lap, 3);
405 cp.length = ir->length;
406 cp.num_rsp = ir->num_rsp;
407 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
410 int hci_inquiry(void __user *arg)
412 __u8 __user *ptr = arg;
413 struct hci_inquiry_req ir;
414 struct hci_dev *hdev;
415 int err = 0, do_inquiry = 0, max_rsp;
419 if (copy_from_user(&ir, ptr, sizeof(ir)))
422 if (!(hdev = hci_dev_get(ir.dev_id)))
425 hci_dev_lock_bh(hdev);
426 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
427 inquiry_cache_empty(hdev) ||
428 ir.flags & IREQ_CACHE_FLUSH) {
429 inquiry_cache_flush(hdev);
432 hci_dev_unlock_bh(hdev);
434 timeo = ir.length * msecs_to_jiffies(2000);
435 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
438 /* for unlimited number of responses we will use buffer with 255 entries */
439 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
441 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
442 * copy it to the user space.
444 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
449 hci_dev_lock_bh(hdev);
450 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
451 hci_dev_unlock_bh(hdev);
453 BT_DBG("num_rsp %d", ir.num_rsp);
455 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
457 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
470 /* ---- HCI ioctl helpers ---- */
472 int hci_dev_open(__u16 dev)
474 struct hci_dev *hdev;
477 if (!(hdev = hci_dev_get(dev)))
480 BT_DBG("%s %p", hdev->name, hdev);
484 if (test_bit(HCI_UP, &hdev->flags)) {
489 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
490 set_bit(HCI_RAW, &hdev->flags);
492 if (hdev->open(hdev)) {
497 if (!test_bit(HCI_RAW, &hdev->flags)) {
498 atomic_set(&hdev->cmd_cnt, 1);
499 set_bit(HCI_INIT, &hdev->flags);
501 //__hci_request(hdev, hci_reset_req, 0, HZ);
502 ret = __hci_request(hdev, hci_init_req, 0,
503 msecs_to_jiffies(HCI_INIT_TIMEOUT));
505 clear_bit(HCI_INIT, &hdev->flags);
510 set_bit(HCI_UP, &hdev->flags);
511 hci_notify(hdev, HCI_DEV_UP);
513 /* Init failed, cleanup */
514 tasklet_kill(&hdev->rx_task);
515 tasklet_kill(&hdev->tx_task);
516 tasklet_kill(&hdev->cmd_task);
518 skb_queue_purge(&hdev->cmd_q);
519 skb_queue_purge(&hdev->rx_q);
524 if (hdev->sent_cmd) {
525 kfree_skb(hdev->sent_cmd);
526 hdev->sent_cmd = NULL;
534 hci_req_unlock(hdev);
539 static int hci_dev_do_close(struct hci_dev *hdev)
541 BT_DBG("%s %p", hdev->name, hdev);
543 hci_req_cancel(hdev, ENODEV);
546 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
547 hci_req_unlock(hdev);
551 /* Kill RX and TX tasks */
552 tasklet_kill(&hdev->rx_task);
553 tasklet_kill(&hdev->tx_task);
555 hci_dev_lock_bh(hdev);
556 inquiry_cache_flush(hdev);
557 hci_conn_hash_flush(hdev);
558 hci_dev_unlock_bh(hdev);
560 hci_notify(hdev, HCI_DEV_DOWN);
566 skb_queue_purge(&hdev->cmd_q);
567 atomic_set(&hdev->cmd_cnt, 1);
568 if (!test_bit(HCI_RAW, &hdev->flags)) {
569 set_bit(HCI_INIT, &hdev->flags);
570 __hci_request(hdev, hci_reset_req, 0,
571 msecs_to_jiffies(250));
572 clear_bit(HCI_INIT, &hdev->flags);
576 tasklet_kill(&hdev->cmd_task);
579 skb_queue_purge(&hdev->rx_q);
580 skb_queue_purge(&hdev->cmd_q);
581 skb_queue_purge(&hdev->raw_q);
583 /* Drop last sent command */
584 if (hdev->sent_cmd) {
585 kfree_skb(hdev->sent_cmd);
586 hdev->sent_cmd = NULL;
589 /* After this point our queues are empty
590 * and no tasks are scheduled. */
596 hci_req_unlock(hdev);
602 int hci_dev_close(__u16 dev)
604 struct hci_dev *hdev;
607 if (!(hdev = hci_dev_get(dev)))
609 err = hci_dev_do_close(hdev);
614 int hci_dev_reset(__u16 dev)
616 struct hci_dev *hdev;
619 if (!(hdev = hci_dev_get(dev)))
623 tasklet_disable(&hdev->tx_task);
625 if (!test_bit(HCI_UP, &hdev->flags))
629 skb_queue_purge(&hdev->rx_q);
630 skb_queue_purge(&hdev->cmd_q);
632 hci_dev_lock_bh(hdev);
633 inquiry_cache_flush(hdev);
634 hci_conn_hash_flush(hdev);
635 hci_dev_unlock_bh(hdev);
640 atomic_set(&hdev->cmd_cnt, 1);
641 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
643 if (!test_bit(HCI_RAW, &hdev->flags))
644 ret = __hci_request(hdev, hci_reset_req, 0,
645 msecs_to_jiffies(HCI_INIT_TIMEOUT));
648 tasklet_enable(&hdev->tx_task);
649 hci_req_unlock(hdev);
654 int hci_dev_reset_stat(__u16 dev)
656 struct hci_dev *hdev;
659 if (!(hdev = hci_dev_get(dev)))
662 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
669 int hci_dev_cmd(unsigned int cmd, void __user *arg)
671 struct hci_dev *hdev;
672 struct hci_dev_req dr;
675 if (copy_from_user(&dr, arg, sizeof(dr)))
678 if (!(hdev = hci_dev_get(dr.dev_id)))
683 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
684 msecs_to_jiffies(HCI_INIT_TIMEOUT));
688 if (!lmp_encrypt_capable(hdev)) {
693 if (!test_bit(HCI_AUTH, &hdev->flags)) {
694 /* Auth must be enabled first */
695 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
696 msecs_to_jiffies(HCI_INIT_TIMEOUT));
701 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
702 msecs_to_jiffies(HCI_INIT_TIMEOUT));
706 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
707 msecs_to_jiffies(HCI_INIT_TIMEOUT));
711 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
712 msecs_to_jiffies(HCI_INIT_TIMEOUT));
716 hdev->link_mode = ((__u16) dr.dev_opt) &
717 (HCI_LM_MASTER | HCI_LM_ACCEPT);
721 hdev->pkt_type = (__u16) dr.dev_opt;
725 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
726 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
730 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
731 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
743 int hci_get_dev_list(void __user *arg)
745 struct hci_dev_list_req *dl;
746 struct hci_dev_req *dr;
748 int n = 0, size, err;
751 if (get_user(dev_num, (__u16 __user *) arg))
754 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
757 size = sizeof(*dl) + dev_num * sizeof(*dr);
759 if (!(dl = kmalloc(size, GFP_KERNEL)))
764 read_lock_bh(&hci_dev_list_lock);
765 list_for_each(p, &hci_dev_list) {
766 struct hci_dev *hdev;
767 hdev = list_entry(p, struct hci_dev, list);
768 (dr + n)->dev_id = hdev->id;
769 (dr + n)->dev_opt = hdev->flags;
773 read_unlock_bh(&hci_dev_list_lock);
776 size = sizeof(*dl) + n * sizeof(*dr);
778 err = copy_to_user(arg, dl, size);
781 return err ? -EFAULT : 0;
784 int hci_get_dev_info(void __user *arg)
786 struct hci_dev *hdev;
787 struct hci_dev_info di;
790 if (copy_from_user(&di, arg, sizeof(di)))
793 if (!(hdev = hci_dev_get(di.dev_id)))
796 strcpy(di.name, hdev->name);
797 di.bdaddr = hdev->bdaddr;
798 di.type = hdev->type;
799 di.flags = hdev->flags;
800 di.pkt_type = hdev->pkt_type;
801 di.acl_mtu = hdev->acl_mtu;
802 di.acl_pkts = hdev->acl_pkts;
803 di.sco_mtu = hdev->sco_mtu;
804 di.sco_pkts = hdev->sco_pkts;
805 di.link_policy = hdev->link_policy;
806 di.link_mode = hdev->link_mode;
808 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
809 memcpy(&di.features, &hdev->features, sizeof(di.features));
811 if (copy_to_user(arg, &di, sizeof(di)))
819 /* ---- Interface to HCI drivers ---- */
821 /* Alloc HCI device */
822 struct hci_dev *hci_alloc_dev(void)
824 struct hci_dev *hdev;
826 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
830 skb_queue_head_init(&hdev->driver_init);
834 EXPORT_SYMBOL(hci_alloc_dev);
836 /* Free HCI device */
837 void hci_free_dev(struct hci_dev *hdev)
839 skb_queue_purge(&hdev->driver_init);
841 /* will free via device release */
842 put_device(&hdev->dev);
844 EXPORT_SYMBOL(hci_free_dev);
846 /* Register HCI device */
847 int hci_register_dev(struct hci_dev *hdev)
849 struct list_head *head = &hci_dev_list, *p;
852 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
854 if (!hdev->open || !hdev->close || !hdev->destruct)
857 write_lock_bh(&hci_dev_list_lock);
859 /* Find first available device id */
860 list_for_each(p, &hci_dev_list) {
861 if (list_entry(p, struct hci_dev, list)->id != id)
866 sprintf(hdev->name, "hci%d", id);
868 list_add(&hdev->list, head);
870 atomic_set(&hdev->refcnt, 1);
871 spin_lock_init(&hdev->lock);
874 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
875 hdev->esco_type = (ESCO_HV1);
876 hdev->link_mode = (HCI_LM_ACCEPT);
878 hdev->idle_timeout = 0;
879 hdev->sniff_max_interval = 800;
880 hdev->sniff_min_interval = 80;
882 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
883 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
884 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
886 skb_queue_head_init(&hdev->rx_q);
887 skb_queue_head_init(&hdev->cmd_q);
888 skb_queue_head_init(&hdev->raw_q);
890 for (i = 0; i < 3; i++)
891 hdev->reassembly[i] = NULL;
893 init_waitqueue_head(&hdev->req_wait_q);
894 init_MUTEX(&hdev->req_lock);
896 inquiry_cache_init(hdev);
898 hci_conn_hash_init(hdev);
900 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
902 atomic_set(&hdev->promisc, 0);
904 write_unlock_bh(&hci_dev_list_lock);
906 hci_register_sysfs(hdev);
908 hci_notify(hdev, HCI_DEV_REG);
912 EXPORT_SYMBOL(hci_register_dev);
914 /* Unregister HCI device */
915 int hci_unregister_dev(struct hci_dev *hdev)
919 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
921 write_lock_bh(&hci_dev_list_lock);
922 list_del(&hdev->list);
923 write_unlock_bh(&hci_dev_list_lock);
925 hci_dev_do_close(hdev);
927 for (i = 0; i < 3; i++)
928 kfree_skb(hdev->reassembly[i]);
930 hci_notify(hdev, HCI_DEV_UNREG);
932 hci_unregister_sysfs(hdev);
938 EXPORT_SYMBOL(hci_unregister_dev);
940 /* Suspend HCI device */
941 int hci_suspend_dev(struct hci_dev *hdev)
943 hci_notify(hdev, HCI_DEV_SUSPEND);
946 EXPORT_SYMBOL(hci_suspend_dev);
948 /* Resume HCI device */
949 int hci_resume_dev(struct hci_dev *hdev)
951 hci_notify(hdev, HCI_DEV_RESUME);
954 EXPORT_SYMBOL(hci_resume_dev);
956 /* Receive packet type fragment */
957 #define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2])
959 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
961 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
965 struct sk_buff *skb = __reassembly(hdev, type);
966 struct { int expect; } *scb;
970 /* Start of the frame */
974 if (count >= HCI_EVENT_HDR_SIZE) {
975 struct hci_event_hdr *h = data;
976 len = HCI_EVENT_HDR_SIZE + h->plen;
981 case HCI_ACLDATA_PKT:
982 if (count >= HCI_ACL_HDR_SIZE) {
983 struct hci_acl_hdr *h = data;
984 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
989 case HCI_SCODATA_PKT:
990 if (count >= HCI_SCO_HDR_SIZE) {
991 struct hci_sco_hdr *h = data;
992 len = HCI_SCO_HDR_SIZE + h->dlen;
998 skb = bt_skb_alloc(len, GFP_ATOMIC);
1000 BT_ERR("%s no memory for packet", hdev->name);
1004 skb->dev = (void *) hdev;
1005 bt_cb(skb)->pkt_type = type;
1007 __reassembly(hdev, type) = skb;
1009 scb = (void *) skb->cb;
1014 scb = (void *) skb->cb;
1018 len = min(len, count);
1020 memcpy(skb_put(skb, len), data, len);
1024 if (scb->expect == 0) {
1025 /* Complete frame */
1027 __reassembly(hdev, type) = NULL;
1029 bt_cb(skb)->pkt_type = type;
1030 hci_recv_frame(skb);
1033 count -= len; data += len;
1038 EXPORT_SYMBOL(hci_recv_fragment);
1040 /* ---- Interface to upper protocols ---- */
1042 /* Register/Unregister protocols.
1043 * hci_task_lock is used to ensure that no tasks are running. */
1044 int hci_register_proto(struct hci_proto *hp)
1048 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1050 if (hp->id >= HCI_MAX_PROTO)
1053 write_lock_bh(&hci_task_lock);
1055 if (!hci_proto[hp->id])
1056 hci_proto[hp->id] = hp;
1060 write_unlock_bh(&hci_task_lock);
1064 EXPORT_SYMBOL(hci_register_proto);
1066 int hci_unregister_proto(struct hci_proto *hp)
1070 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1072 if (hp->id >= HCI_MAX_PROTO)
1075 write_lock_bh(&hci_task_lock);
1077 if (hci_proto[hp->id])
1078 hci_proto[hp->id] = NULL;
1082 write_unlock_bh(&hci_task_lock);
1086 EXPORT_SYMBOL(hci_unregister_proto);
1088 int hci_register_cb(struct hci_cb *cb)
1090 BT_DBG("%p name %s", cb, cb->name);
1092 write_lock_bh(&hci_cb_list_lock);
1093 list_add(&cb->list, &hci_cb_list);
1094 write_unlock_bh(&hci_cb_list_lock);
1098 EXPORT_SYMBOL(hci_register_cb);
1100 int hci_unregister_cb(struct hci_cb *cb)
1102 BT_DBG("%p name %s", cb, cb->name);
1104 write_lock_bh(&hci_cb_list_lock);
1105 list_del(&cb->list);
1106 write_unlock_bh(&hci_cb_list_lock);
1110 EXPORT_SYMBOL(hci_unregister_cb);
1112 static int hci_send_frame(struct sk_buff *skb)
1114 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1121 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1123 if (atomic_read(&hdev->promisc)) {
1125 __net_timestamp(skb);
1127 hci_send_to_sock(hdev, skb);
1130 /* Get rid of skb owner, prior to sending to the driver. */
1133 return hdev->send(skb);
1136 /* Send HCI command */
1137 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1139 int len = HCI_COMMAND_HDR_SIZE + plen;
1140 struct hci_command_hdr *hdr;
1141 struct sk_buff *skb;
1143 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1145 skb = bt_skb_alloc(len, GFP_ATOMIC);
1147 BT_ERR("%s no memory for command", hdev->name);
1151 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1152 hdr->opcode = cpu_to_le16(opcode);
1156 memcpy(skb_put(skb, plen), param, plen);
1158 BT_DBG("skb len %d", skb->len);
1160 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1161 skb->dev = (void *) hdev;
1162 skb_queue_tail(&hdev->cmd_q, skb);
1163 hci_sched_cmd(hdev);
1168 /* Get data from the previously sent command */
1169 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1171 struct hci_command_hdr *hdr;
1173 if (!hdev->sent_cmd)
1176 hdr = (void *) hdev->sent_cmd->data;
1178 if (hdr->opcode != cpu_to_le16(opcode))
1181 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1183 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1187 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1189 struct hci_acl_hdr *hdr;
1192 skb_push(skb, HCI_ACL_HDR_SIZE);
1193 skb_reset_transport_header(skb);
1194 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1195 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1196 hdr->dlen = cpu_to_le16(len);
1199 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1201 struct hci_dev *hdev = conn->hdev;
1202 struct sk_buff *list;
1204 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1206 skb->dev = (void *) hdev;
1207 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1208 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1210 if (!(list = skb_shinfo(skb)->frag_list)) {
1211 /* Non fragmented */
1212 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1214 skb_queue_tail(&conn->data_q, skb);
1217 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1219 skb_shinfo(skb)->frag_list = NULL;
1221 /* Queue all fragments atomically */
1222 spin_lock_bh(&conn->data_q.lock);
1224 __skb_queue_tail(&conn->data_q, skb);
1226 skb = list; list = list->next;
1228 skb->dev = (void *) hdev;
1229 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1230 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1232 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1234 __skb_queue_tail(&conn->data_q, skb);
1237 spin_unlock_bh(&conn->data_q.lock);
1243 EXPORT_SYMBOL(hci_send_acl);
1246 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1248 struct hci_dev *hdev = conn->hdev;
1249 struct hci_sco_hdr hdr;
1251 BT_DBG("%s len %d", hdev->name, skb->len);
1253 if (skb->len > hdev->sco_mtu) {
1258 hdr.handle = cpu_to_le16(conn->handle);
1259 hdr.dlen = skb->len;
1261 skb_push(skb, HCI_SCO_HDR_SIZE);
1262 skb_reset_transport_header(skb);
1263 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1265 skb->dev = (void *) hdev;
1266 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1267 skb_queue_tail(&conn->data_q, skb);
1271 EXPORT_SYMBOL(hci_send_sco);
1273 /* ---- HCI TX task (outgoing data) ---- */
1275 /* HCI Connection scheduler */
1276 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1278 struct hci_conn_hash *h = &hdev->conn_hash;
1279 struct hci_conn *conn = NULL;
1280 int num = 0, min = ~0;
1281 struct list_head *p;
1283 /* We don't have to lock device here. Connections are always
1284 * added and removed with TX task disabled. */
1285 list_for_each(p, &h->list) {
1287 c = list_entry(p, struct hci_conn, list);
1289 if (c->type != type || skb_queue_empty(&c->data_q))
1292 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1297 if (c->sent < min) {
1304 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1310 BT_DBG("conn %p quote %d", conn, *quote);
1314 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1316 struct hci_conn_hash *h = &hdev->conn_hash;
1317 struct list_head *p;
1320 BT_ERR("%s ACL tx timeout", hdev->name);
1322 /* Kill stalled connections */
1323 list_for_each(p, &h->list) {
1324 c = list_entry(p, struct hci_conn, list);
1325 if (c->type == ACL_LINK && c->sent) {
1326 BT_ERR("%s killing stalled ACL connection %s",
1327 hdev->name, batostr(&c->dst));
1328 hci_acl_disconn(c, 0x13);
1333 static inline void hci_sched_acl(struct hci_dev *hdev)
1335 struct hci_conn *conn;
1336 struct sk_buff *skb;
1339 BT_DBG("%s", hdev->name);
1341 if (!test_bit(HCI_RAW, &hdev->flags)) {
1342 /* ACL tx timeout must be longer than maximum
1343 * link supervision timeout (40.9 seconds) */
1344 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1345 hci_acl_tx_to(hdev);
1348 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) {
1349 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1350 BT_DBG("skb %p len %d", skb, skb->len);
1352 hci_conn_enter_active_mode(conn);
1354 hci_send_frame(skb);
1355 hdev->acl_last_tx = jiffies;
1364 static inline void hci_sched_sco(struct hci_dev *hdev)
1366 struct hci_conn *conn;
1367 struct sk_buff *skb;
1370 BT_DBG("%s", hdev->name);
1372 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
1373 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1374 BT_DBG("skb %p len %d", skb, skb->len);
1375 hci_send_frame(skb);
1378 if (conn->sent == ~0)
1384 static inline void hci_sched_esco(struct hci_dev *hdev)
1386 struct hci_conn *conn;
1387 struct sk_buff *skb;
1390 BT_DBG("%s", hdev->name);
1392 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
1393 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1394 BT_DBG("skb %p len %d", skb, skb->len);
1395 hci_send_frame(skb);
1398 if (conn->sent == ~0)
1404 static void hci_tx_task(unsigned long arg)
1406 struct hci_dev *hdev = (struct hci_dev *) arg;
1407 struct sk_buff *skb;
1409 read_lock(&hci_task_lock);
1411 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1413 /* Schedule queues and send stuff to HCI driver */
1415 hci_sched_acl(hdev);
1417 hci_sched_sco(hdev);
1419 hci_sched_esco(hdev);
1421 /* Send next queued raw (unknown type) packet */
1422 while ((skb = skb_dequeue(&hdev->raw_q)))
1423 hci_send_frame(skb);
1425 read_unlock(&hci_task_lock);
1428 /* ----- HCI RX task (incoming data proccessing) ----- */
1430 /* ACL data packet */
1431 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1433 struct hci_acl_hdr *hdr = (void *) skb->data;
1434 struct hci_conn *conn;
1435 __u16 handle, flags;
1437 skb_pull(skb, HCI_ACL_HDR_SIZE);
1439 handle = __le16_to_cpu(hdr->handle);
1440 flags = hci_flags(handle);
1441 handle = hci_handle(handle);
1443 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1445 hdev->stat.acl_rx++;
1448 conn = hci_conn_hash_lookup_handle(hdev, handle);
1449 hci_dev_unlock(hdev);
1452 register struct hci_proto *hp;
1454 hci_conn_enter_active_mode(conn);
1456 /* Send to upper protocol */
1457 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1458 hp->recv_acldata(conn, skb, flags);
1462 BT_ERR("%s ACL packet for unknown connection handle %d",
1463 hdev->name, handle);
1469 /* SCO data packet */
1470 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1472 struct hci_sco_hdr *hdr = (void *) skb->data;
1473 struct hci_conn *conn;
1476 skb_pull(skb, HCI_SCO_HDR_SIZE);
1478 handle = __le16_to_cpu(hdr->handle);
1480 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1482 hdev->stat.sco_rx++;
1485 conn = hci_conn_hash_lookup_handle(hdev, handle);
1486 hci_dev_unlock(hdev);
1489 register struct hci_proto *hp;
1491 /* Send to upper protocol */
1492 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1493 hp->recv_scodata(conn, skb);
1497 BT_ERR("%s SCO packet for unknown connection handle %d",
1498 hdev->name, handle);
1504 static void hci_rx_task(unsigned long arg)
1506 struct hci_dev *hdev = (struct hci_dev *) arg;
1507 struct sk_buff *skb;
1509 BT_DBG("%s", hdev->name);
1511 read_lock(&hci_task_lock);
1513 while ((skb = skb_dequeue(&hdev->rx_q))) {
1514 if (atomic_read(&hdev->promisc)) {
1515 /* Send copy to the sockets */
1516 hci_send_to_sock(hdev, skb);
1519 if (test_bit(HCI_RAW, &hdev->flags)) {
1524 if (test_bit(HCI_INIT, &hdev->flags)) {
1525 /* Don't process data packets in this states. */
1526 switch (bt_cb(skb)->pkt_type) {
1527 case HCI_ACLDATA_PKT:
1528 case HCI_SCODATA_PKT:
1535 switch (bt_cb(skb)->pkt_type) {
1537 hci_event_packet(hdev, skb);
1540 case HCI_ACLDATA_PKT:
1541 BT_DBG("%s ACL data packet", hdev->name);
1542 hci_acldata_packet(hdev, skb);
1545 case HCI_SCODATA_PKT:
1546 BT_DBG("%s SCO data packet", hdev->name);
1547 hci_scodata_packet(hdev, skb);
1556 read_unlock(&hci_task_lock);
1559 static void hci_cmd_task(unsigned long arg)
1561 struct hci_dev *hdev = (struct hci_dev *) arg;
1562 struct sk_buff *skb;
1564 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1566 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1567 BT_ERR("%s command tx timeout", hdev->name);
1568 atomic_set(&hdev->cmd_cnt, 1);
1571 /* Send queued commands */
1572 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1574 kfree_skb(hdev->sent_cmd);
1576 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1577 atomic_dec(&hdev->cmd_cnt);
1578 hci_send_frame(skb);
1579 hdev->cmd_last_tx = jiffies;
1581 skb_queue_head(&hdev->cmd_q, skb);
1582 hci_sched_cmd(hdev);