2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/interrupt.h>
41 #include <linux/notifier.h>
44 #include <asm/system.h>
45 #include <asm/uaccess.h>
46 #include <asm/unaligned.h>
48 #include <net/bluetooth/bluetooth.h>
49 #include <net/bluetooth/hci_core.h>
51 static void hci_cmd_task(unsigned long arg);
52 static void hci_rx_task(unsigned long arg);
53 static void hci_tx_task(unsigned long arg);
54 static void hci_notify(struct hci_dev *hdev, int event);
56 static DEFINE_RWLOCK(hci_task_lock);
59 LIST_HEAD(hci_dev_list);
60 DEFINE_RWLOCK(hci_dev_list_lock);
62 /* HCI callback list */
63 LIST_HEAD(hci_cb_list);
64 DEFINE_RWLOCK(hci_cb_list_lock);
67 #define HCI_MAX_PROTO 2
68 struct hci_proto *hci_proto[HCI_MAX_PROTO];
70 /* HCI notifiers list */
71 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
73 /* ---- HCI notifications ---- */
75 int hci_register_notifier(struct notifier_block *nb)
77 return atomic_notifier_chain_register(&hci_notifier, nb);
80 int hci_unregister_notifier(struct notifier_block *nb)
82 return atomic_notifier_chain_unregister(&hci_notifier, nb);
85 static void hci_notify(struct hci_dev *hdev, int event)
87 atomic_notifier_call_chain(&hci_notifier, event, hdev);
90 /* ---- HCI requests ---- */
92 void hci_req_complete(struct hci_dev *hdev, int result)
94 BT_DBG("%s result 0x%2.2x", hdev->name, result);
96 if (hdev->req_status == HCI_REQ_PEND) {
97 hdev->req_result = result;
98 hdev->req_status = HCI_REQ_DONE;
99 wake_up_interruptible(&hdev->req_wait_q);
103 static void hci_req_cancel(struct hci_dev *hdev, int err)
105 BT_DBG("%s err 0x%2.2x", hdev->name, err);
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = err;
109 hdev->req_status = HCI_REQ_CANCELED;
110 wake_up_interruptible(&hdev->req_wait_q);
114 /* Execute request and wait for completion. */
115 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
116 unsigned long opt, __u32 timeout)
118 DECLARE_WAITQUEUE(wait, current);
121 BT_DBG("%s start", hdev->name);
123 hdev->req_status = HCI_REQ_PEND;
125 add_wait_queue(&hdev->req_wait_q, &wait);
126 set_current_state(TASK_INTERRUPTIBLE);
129 schedule_timeout(timeout);
131 remove_wait_queue(&hdev->req_wait_q, &wait);
133 if (signal_pending(current))
136 switch (hdev->req_status) {
138 err = -bt_err(hdev->req_result);
141 case HCI_REQ_CANCELED:
142 err = -hdev->req_result;
150 hdev->req_status = hdev->req_result = 0;
152 BT_DBG("%s end: err %d", hdev->name, err);
157 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
158 unsigned long opt, __u32 timeout)
162 if (!test_bit(HCI_UP, &hdev->flags))
165 /* Serialize all requests */
167 ret = __hci_request(hdev, req, opt, timeout);
168 hci_req_unlock(hdev);
173 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
175 BT_DBG("%s %ld", hdev->name, opt);
178 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
181 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
187 BT_DBG("%s %ld", hdev->name, opt);
189 /* Driver initialization */
191 /* Special commands */
192 while ((skb = skb_dequeue(&hdev->driver_init))) {
193 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
194 skb->dev = (void *) hdev;
195 skb_queue_tail(&hdev->cmd_q, skb);
198 skb_queue_purge(&hdev->driver_init);
200 /* Mandatory initialization */
203 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
204 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
206 /* Read Local Supported Features */
207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
209 /* Read Local Version */
210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
216 /* Host buffer size */
218 struct hci_cp_host_buffer_size cp;
219 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
220 cp.sco_mtu = HCI_MAX_SCO_SIZE;
221 cp.acl_max_pkt = cpu_to_le16(0xffff);
222 cp.sco_max_pkt = cpu_to_le16(0xffff);
223 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
227 /* Read BD Address */
228 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
230 /* Read Class of Device */
231 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
233 /* Read Local Name */
234 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
236 /* Read Voice Setting */
237 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
239 /* Optional initialization */
241 /* Clear Event Filters */
242 flt_type = HCI_FLT_CLEAR_ALL;
243 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
245 /* Page timeout ~20 secs */
246 param = cpu_to_le16(0x8000);
247 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, ¶m);
249 /* Connection accept timeout ~20 secs */
250 param = cpu_to_le16(0x7d00);
251 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
254 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
258 BT_DBG("%s %x", hdev->name, scan);
260 /* Inquiry and Page scans */
261 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
264 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
268 BT_DBG("%s %x", hdev->name, auth);
271 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
274 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
278 BT_DBG("%s %x", hdev->name, encrypt);
281 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
284 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
286 __le16 policy = cpu_to_le16(opt);
288 BT_DBG("%s %x", hdev->name, policy);
290 /* Default link policy */
291 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
294 /* Get HCI device by index.
295 * Device is held on return. */
296 struct hci_dev *hci_dev_get(int index)
298 struct hci_dev *hdev = NULL;
306 read_lock(&hci_dev_list_lock);
307 list_for_each(p, &hci_dev_list) {
308 struct hci_dev *d = list_entry(p, struct hci_dev, list);
309 if (d->id == index) {
310 hdev = hci_dev_hold(d);
314 read_unlock(&hci_dev_list_lock);
318 /* ---- Inquiry support ---- */
319 static void inquiry_cache_flush(struct hci_dev *hdev)
321 struct inquiry_cache *cache = &hdev->inq_cache;
322 struct inquiry_entry *next = cache->list, *e;
324 BT_DBG("cache %p", cache);
333 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
335 struct inquiry_cache *cache = &hdev->inq_cache;
336 struct inquiry_entry *e;
338 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
340 for (e = cache->list; e; e = e->next)
341 if (!bacmp(&e->data.bdaddr, bdaddr))
346 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
348 struct inquiry_cache *cache = &hdev->inq_cache;
349 struct inquiry_entry *e;
351 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
353 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
354 /* Entry not in the cache. Add new one. */
355 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
357 e->next = cache->list;
361 memcpy(&e->data, data, sizeof(*data));
362 e->timestamp = jiffies;
363 cache->timestamp = jiffies;
366 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
368 struct inquiry_cache *cache = &hdev->inq_cache;
369 struct inquiry_info *info = (struct inquiry_info *) buf;
370 struct inquiry_entry *e;
373 for (e = cache->list; e && copied < num; e = e->next, copied++) {
374 struct inquiry_data *data = &e->data;
375 bacpy(&info->bdaddr, &data->bdaddr);
376 info->pscan_rep_mode = data->pscan_rep_mode;
377 info->pscan_period_mode = data->pscan_period_mode;
378 info->pscan_mode = data->pscan_mode;
379 memcpy(info->dev_class, data->dev_class, 3);
380 info->clock_offset = data->clock_offset;
384 BT_DBG("cache %p, copied %d", cache, copied);
388 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
390 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
391 struct hci_cp_inquiry cp;
393 BT_DBG("%s", hdev->name);
395 if (test_bit(HCI_INQUIRY, &hdev->flags))
399 memcpy(&cp.lap, &ir->lap, 3);
400 cp.length = ir->length;
401 cp.num_rsp = ir->num_rsp;
402 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
405 int hci_inquiry(void __user *arg)
407 __u8 __user *ptr = arg;
408 struct hci_inquiry_req ir;
409 struct hci_dev *hdev;
410 int err = 0, do_inquiry = 0, max_rsp;
414 if (copy_from_user(&ir, ptr, sizeof(ir)))
417 if (!(hdev = hci_dev_get(ir.dev_id)))
420 hci_dev_lock_bh(hdev);
421 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
422 inquiry_cache_empty(hdev) ||
423 ir.flags & IREQ_CACHE_FLUSH) {
424 inquiry_cache_flush(hdev);
427 hci_dev_unlock_bh(hdev);
429 timeo = ir.length * msecs_to_jiffies(2000);
430 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
433 /* for unlimited number of responses we will use buffer with 255 entries */
434 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
436 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
437 * copy it to the user space.
439 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
444 hci_dev_lock_bh(hdev);
445 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
446 hci_dev_unlock_bh(hdev);
448 BT_DBG("num_rsp %d", ir.num_rsp);
450 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
452 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
465 /* ---- HCI ioctl helpers ---- */
467 int hci_dev_open(__u16 dev)
469 struct hci_dev *hdev;
472 if (!(hdev = hci_dev_get(dev)))
475 BT_DBG("%s %p", hdev->name, hdev);
479 if (test_bit(HCI_UP, &hdev->flags)) {
484 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
485 set_bit(HCI_RAW, &hdev->flags);
487 if (hdev->open(hdev)) {
492 if (!test_bit(HCI_RAW, &hdev->flags)) {
493 atomic_set(&hdev->cmd_cnt, 1);
494 set_bit(HCI_INIT, &hdev->flags);
496 //__hci_request(hdev, hci_reset_req, 0, HZ);
497 ret = __hci_request(hdev, hci_init_req, 0,
498 msecs_to_jiffies(HCI_INIT_TIMEOUT));
500 clear_bit(HCI_INIT, &hdev->flags);
505 set_bit(HCI_UP, &hdev->flags);
506 hci_notify(hdev, HCI_DEV_UP);
508 /* Init failed, cleanup */
509 tasklet_kill(&hdev->rx_task);
510 tasklet_kill(&hdev->tx_task);
511 tasklet_kill(&hdev->cmd_task);
513 skb_queue_purge(&hdev->cmd_q);
514 skb_queue_purge(&hdev->rx_q);
519 if (hdev->sent_cmd) {
520 kfree_skb(hdev->sent_cmd);
521 hdev->sent_cmd = NULL;
529 hci_req_unlock(hdev);
534 static int hci_dev_do_close(struct hci_dev *hdev)
536 BT_DBG("%s %p", hdev->name, hdev);
538 hci_req_cancel(hdev, ENODEV);
541 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
542 hci_req_unlock(hdev);
546 /* Kill RX and TX tasks */
547 tasklet_kill(&hdev->rx_task);
548 tasklet_kill(&hdev->tx_task);
550 hci_dev_lock_bh(hdev);
551 inquiry_cache_flush(hdev);
552 hci_conn_hash_flush(hdev);
553 hci_dev_unlock_bh(hdev);
555 hci_notify(hdev, HCI_DEV_DOWN);
561 skb_queue_purge(&hdev->cmd_q);
562 atomic_set(&hdev->cmd_cnt, 1);
563 if (!test_bit(HCI_RAW, &hdev->flags)) {
564 set_bit(HCI_INIT, &hdev->flags);
565 __hci_request(hdev, hci_reset_req, 0,
566 msecs_to_jiffies(250));
567 clear_bit(HCI_INIT, &hdev->flags);
571 tasklet_kill(&hdev->cmd_task);
574 skb_queue_purge(&hdev->rx_q);
575 skb_queue_purge(&hdev->cmd_q);
576 skb_queue_purge(&hdev->raw_q);
578 /* Drop last sent command */
579 if (hdev->sent_cmd) {
580 kfree_skb(hdev->sent_cmd);
581 hdev->sent_cmd = NULL;
584 /* After this point our queues are empty
585 * and no tasks are scheduled. */
591 hci_req_unlock(hdev);
597 int hci_dev_close(__u16 dev)
599 struct hci_dev *hdev;
602 if (!(hdev = hci_dev_get(dev)))
604 err = hci_dev_do_close(hdev);
609 int hci_dev_reset(__u16 dev)
611 struct hci_dev *hdev;
614 if (!(hdev = hci_dev_get(dev)))
618 tasklet_disable(&hdev->tx_task);
620 if (!test_bit(HCI_UP, &hdev->flags))
624 skb_queue_purge(&hdev->rx_q);
625 skb_queue_purge(&hdev->cmd_q);
627 hci_dev_lock_bh(hdev);
628 inquiry_cache_flush(hdev);
629 hci_conn_hash_flush(hdev);
630 hci_dev_unlock_bh(hdev);
635 atomic_set(&hdev->cmd_cnt, 1);
636 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
638 if (!test_bit(HCI_RAW, &hdev->flags))
639 ret = __hci_request(hdev, hci_reset_req, 0,
640 msecs_to_jiffies(HCI_INIT_TIMEOUT));
643 tasklet_enable(&hdev->tx_task);
644 hci_req_unlock(hdev);
649 int hci_dev_reset_stat(__u16 dev)
651 struct hci_dev *hdev;
654 if (!(hdev = hci_dev_get(dev)))
657 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
664 int hci_dev_cmd(unsigned int cmd, void __user *arg)
666 struct hci_dev *hdev;
667 struct hci_dev_req dr;
670 if (copy_from_user(&dr, arg, sizeof(dr)))
673 if (!(hdev = hci_dev_get(dr.dev_id)))
678 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
679 msecs_to_jiffies(HCI_INIT_TIMEOUT));
683 if (!lmp_encrypt_capable(hdev)) {
688 if (!test_bit(HCI_AUTH, &hdev->flags)) {
689 /* Auth must be enabled first */
690 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
691 msecs_to_jiffies(HCI_INIT_TIMEOUT));
696 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
697 msecs_to_jiffies(HCI_INIT_TIMEOUT));
701 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
702 msecs_to_jiffies(HCI_INIT_TIMEOUT));
706 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
707 msecs_to_jiffies(HCI_INIT_TIMEOUT));
711 hdev->link_mode = ((__u16) dr.dev_opt) &
712 (HCI_LM_MASTER | HCI_LM_ACCEPT);
716 hdev->pkt_type = (__u16) dr.dev_opt;
720 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
721 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
725 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
726 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
738 int hci_get_dev_list(void __user *arg)
740 struct hci_dev_list_req *dl;
741 struct hci_dev_req *dr;
743 int n = 0, size, err;
746 if (get_user(dev_num, (__u16 __user *) arg))
749 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
752 size = sizeof(*dl) + dev_num * sizeof(*dr);
754 if (!(dl = kzalloc(size, GFP_KERNEL)))
759 read_lock_bh(&hci_dev_list_lock);
760 list_for_each(p, &hci_dev_list) {
761 struct hci_dev *hdev;
762 hdev = list_entry(p, struct hci_dev, list);
763 (dr + n)->dev_id = hdev->id;
764 (dr + n)->dev_opt = hdev->flags;
768 read_unlock_bh(&hci_dev_list_lock);
771 size = sizeof(*dl) + n * sizeof(*dr);
773 err = copy_to_user(arg, dl, size);
776 return err ? -EFAULT : 0;
779 int hci_get_dev_info(void __user *arg)
781 struct hci_dev *hdev;
782 struct hci_dev_info di;
785 if (copy_from_user(&di, arg, sizeof(di)))
788 if (!(hdev = hci_dev_get(di.dev_id)))
791 strcpy(di.name, hdev->name);
792 di.bdaddr = hdev->bdaddr;
793 di.type = hdev->type;
794 di.flags = hdev->flags;
795 di.pkt_type = hdev->pkt_type;
796 di.acl_mtu = hdev->acl_mtu;
797 di.acl_pkts = hdev->acl_pkts;
798 di.sco_mtu = hdev->sco_mtu;
799 di.sco_pkts = hdev->sco_pkts;
800 di.link_policy = hdev->link_policy;
801 di.link_mode = hdev->link_mode;
803 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
804 memcpy(&di.features, &hdev->features, sizeof(di.features));
806 if (copy_to_user(arg, &di, sizeof(di)))
814 /* ---- Interface to HCI drivers ---- */
816 /* Alloc HCI device */
817 struct hci_dev *hci_alloc_dev(void)
819 struct hci_dev *hdev;
821 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
825 skb_queue_head_init(&hdev->driver_init);
829 EXPORT_SYMBOL(hci_alloc_dev);
831 /* Free HCI device */
832 void hci_free_dev(struct hci_dev *hdev)
834 skb_queue_purge(&hdev->driver_init);
836 /* will free via device release */
837 put_device(&hdev->dev);
839 EXPORT_SYMBOL(hci_free_dev);
841 /* Register HCI device */
842 int hci_register_dev(struct hci_dev *hdev)
844 struct list_head *head = &hci_dev_list, *p;
847 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
849 if (!hdev->open || !hdev->close || !hdev->destruct)
852 write_lock_bh(&hci_dev_list_lock);
854 /* Find first available device id */
855 list_for_each(p, &hci_dev_list) {
856 if (list_entry(p, struct hci_dev, list)->id != id)
861 sprintf(hdev->name, "hci%d", id);
863 list_add(&hdev->list, head);
865 atomic_set(&hdev->refcnt, 1);
866 spin_lock_init(&hdev->lock);
869 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
870 hdev->esco_type = (ESCO_HV1);
871 hdev->link_mode = (HCI_LM_ACCEPT);
873 hdev->idle_timeout = 0;
874 hdev->sniff_max_interval = 800;
875 hdev->sniff_min_interval = 80;
877 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
878 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
879 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
881 skb_queue_head_init(&hdev->rx_q);
882 skb_queue_head_init(&hdev->cmd_q);
883 skb_queue_head_init(&hdev->raw_q);
885 for (i = 0; i < 3; i++)
886 hdev->reassembly[i] = NULL;
888 init_waitqueue_head(&hdev->req_wait_q);
889 init_MUTEX(&hdev->req_lock);
891 inquiry_cache_init(hdev);
893 hci_conn_hash_init(hdev);
895 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
897 atomic_set(&hdev->promisc, 0);
899 write_unlock_bh(&hci_dev_list_lock);
901 hci_register_sysfs(hdev);
903 hci_notify(hdev, HCI_DEV_REG);
907 EXPORT_SYMBOL(hci_register_dev);
909 /* Unregister HCI device */
910 int hci_unregister_dev(struct hci_dev *hdev)
914 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
916 write_lock_bh(&hci_dev_list_lock);
917 list_del(&hdev->list);
918 write_unlock_bh(&hci_dev_list_lock);
920 hci_dev_do_close(hdev);
922 for (i = 0; i < 3; i++)
923 kfree_skb(hdev->reassembly[i]);
925 hci_notify(hdev, HCI_DEV_UNREG);
927 hci_unregister_sysfs(hdev);
933 EXPORT_SYMBOL(hci_unregister_dev);
935 /* Suspend HCI device */
936 int hci_suspend_dev(struct hci_dev *hdev)
938 hci_notify(hdev, HCI_DEV_SUSPEND);
941 EXPORT_SYMBOL(hci_suspend_dev);
943 /* Resume HCI device */
944 int hci_resume_dev(struct hci_dev *hdev)
946 hci_notify(hdev, HCI_DEV_RESUME);
949 EXPORT_SYMBOL(hci_resume_dev);
951 /* Receive packet type fragment */
952 #define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2])
954 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
956 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
960 struct sk_buff *skb = __reassembly(hdev, type);
961 struct { int expect; } *scb;
965 /* Start of the frame */
969 if (count >= HCI_EVENT_HDR_SIZE) {
970 struct hci_event_hdr *h = data;
971 len = HCI_EVENT_HDR_SIZE + h->plen;
976 case HCI_ACLDATA_PKT:
977 if (count >= HCI_ACL_HDR_SIZE) {
978 struct hci_acl_hdr *h = data;
979 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
984 case HCI_SCODATA_PKT:
985 if (count >= HCI_SCO_HDR_SIZE) {
986 struct hci_sco_hdr *h = data;
987 len = HCI_SCO_HDR_SIZE + h->dlen;
993 skb = bt_skb_alloc(len, GFP_ATOMIC);
995 BT_ERR("%s no memory for packet", hdev->name);
999 skb->dev = (void *) hdev;
1000 bt_cb(skb)->pkt_type = type;
1002 __reassembly(hdev, type) = skb;
1004 scb = (void *) skb->cb;
1009 scb = (void *) skb->cb;
1013 len = min(len, count);
1015 memcpy(skb_put(skb, len), data, len);
1019 if (scb->expect == 0) {
1020 /* Complete frame */
1022 __reassembly(hdev, type) = NULL;
1024 bt_cb(skb)->pkt_type = type;
1025 hci_recv_frame(skb);
1028 count -= len; data += len;
1033 EXPORT_SYMBOL(hci_recv_fragment);
1035 /* ---- Interface to upper protocols ---- */
1037 /* Register/Unregister protocols.
1038 * hci_task_lock is used to ensure that no tasks are running. */
1039 int hci_register_proto(struct hci_proto *hp)
1043 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1045 if (hp->id >= HCI_MAX_PROTO)
1048 write_lock_bh(&hci_task_lock);
1050 if (!hci_proto[hp->id])
1051 hci_proto[hp->id] = hp;
1055 write_unlock_bh(&hci_task_lock);
1059 EXPORT_SYMBOL(hci_register_proto);
1061 int hci_unregister_proto(struct hci_proto *hp)
1065 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1067 if (hp->id >= HCI_MAX_PROTO)
1070 write_lock_bh(&hci_task_lock);
1072 if (hci_proto[hp->id])
1073 hci_proto[hp->id] = NULL;
1077 write_unlock_bh(&hci_task_lock);
1081 EXPORT_SYMBOL(hci_unregister_proto);
1083 int hci_register_cb(struct hci_cb *cb)
1085 BT_DBG("%p name %s", cb, cb->name);
1087 write_lock_bh(&hci_cb_list_lock);
1088 list_add(&cb->list, &hci_cb_list);
1089 write_unlock_bh(&hci_cb_list_lock);
1093 EXPORT_SYMBOL(hci_register_cb);
1095 int hci_unregister_cb(struct hci_cb *cb)
1097 BT_DBG("%p name %s", cb, cb->name);
1099 write_lock_bh(&hci_cb_list_lock);
1100 list_del(&cb->list);
1101 write_unlock_bh(&hci_cb_list_lock);
1105 EXPORT_SYMBOL(hci_unregister_cb);
1107 static int hci_send_frame(struct sk_buff *skb)
1109 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1116 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1118 if (atomic_read(&hdev->promisc)) {
1120 __net_timestamp(skb);
1122 hci_send_to_sock(hdev, skb);
1125 /* Get rid of skb owner, prior to sending to the driver. */
1128 return hdev->send(skb);
1131 /* Send HCI command */
1132 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1134 int len = HCI_COMMAND_HDR_SIZE + plen;
1135 struct hci_command_hdr *hdr;
1136 struct sk_buff *skb;
1138 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1140 skb = bt_skb_alloc(len, GFP_ATOMIC);
1142 BT_ERR("%s no memory for command", hdev->name);
1146 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1147 hdr->opcode = cpu_to_le16(opcode);
1151 memcpy(skb_put(skb, plen), param, plen);
1153 BT_DBG("skb len %d", skb->len);
1155 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1156 skb->dev = (void *) hdev;
1157 skb_queue_tail(&hdev->cmd_q, skb);
1158 hci_sched_cmd(hdev);
1163 /* Get data from the previously sent command */
1164 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1166 struct hci_command_hdr *hdr;
1168 if (!hdev->sent_cmd)
1171 hdr = (void *) hdev->sent_cmd->data;
1173 if (hdr->opcode != cpu_to_le16(opcode))
1176 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1178 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1182 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1184 struct hci_acl_hdr *hdr;
1187 skb_push(skb, HCI_ACL_HDR_SIZE);
1188 skb_reset_transport_header(skb);
1189 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1190 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1191 hdr->dlen = cpu_to_le16(len);
1194 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1196 struct hci_dev *hdev = conn->hdev;
1197 struct sk_buff *list;
1199 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1201 skb->dev = (void *) hdev;
1202 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1203 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1205 if (!(list = skb_shinfo(skb)->frag_list)) {
1206 /* Non fragmented */
1207 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1209 skb_queue_tail(&conn->data_q, skb);
1212 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1214 skb_shinfo(skb)->frag_list = NULL;
1216 /* Queue all fragments atomically */
1217 spin_lock_bh(&conn->data_q.lock);
1219 __skb_queue_tail(&conn->data_q, skb);
1221 skb = list; list = list->next;
1223 skb->dev = (void *) hdev;
1224 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1225 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1227 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1229 __skb_queue_tail(&conn->data_q, skb);
1232 spin_unlock_bh(&conn->data_q.lock);
1238 EXPORT_SYMBOL(hci_send_acl);
1241 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1243 struct hci_dev *hdev = conn->hdev;
1244 struct hci_sco_hdr hdr;
1246 BT_DBG("%s len %d", hdev->name, skb->len);
1248 if (skb->len > hdev->sco_mtu) {
1253 hdr.handle = cpu_to_le16(conn->handle);
1254 hdr.dlen = skb->len;
1256 skb_push(skb, HCI_SCO_HDR_SIZE);
1257 skb_reset_transport_header(skb);
1258 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1260 skb->dev = (void *) hdev;
1261 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1262 skb_queue_tail(&conn->data_q, skb);
1266 EXPORT_SYMBOL(hci_send_sco);
1268 /* ---- HCI TX task (outgoing data) ---- */
1270 /* HCI Connection scheduler */
1271 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1273 struct hci_conn_hash *h = &hdev->conn_hash;
1274 struct hci_conn *conn = NULL;
1275 int num = 0, min = ~0;
1276 struct list_head *p;
1278 /* We don't have to lock device here. Connections are always
1279 * added and removed with TX task disabled. */
1280 list_for_each(p, &h->list) {
1282 c = list_entry(p, struct hci_conn, list);
1284 if (c->type != type || skb_queue_empty(&c->data_q))
1287 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1292 if (c->sent < min) {
1299 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1305 BT_DBG("conn %p quote %d", conn, *quote);
1309 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1311 struct hci_conn_hash *h = &hdev->conn_hash;
1312 struct list_head *p;
1315 BT_ERR("%s ACL tx timeout", hdev->name);
1317 /* Kill stalled connections */
1318 list_for_each(p, &h->list) {
1319 c = list_entry(p, struct hci_conn, list);
1320 if (c->type == ACL_LINK && c->sent) {
1321 BT_ERR("%s killing stalled ACL connection %s",
1322 hdev->name, batostr(&c->dst));
1323 hci_acl_disconn(c, 0x13);
1328 static inline void hci_sched_acl(struct hci_dev *hdev)
1330 struct hci_conn *conn;
1331 struct sk_buff *skb;
1334 BT_DBG("%s", hdev->name);
1336 if (!test_bit(HCI_RAW, &hdev->flags)) {
1337 /* ACL tx timeout must be longer than maximum
1338 * link supervision timeout (40.9 seconds) */
1339 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1340 hci_acl_tx_to(hdev);
1343 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) {
1344 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1345 BT_DBG("skb %p len %d", skb, skb->len);
1347 hci_conn_enter_active_mode(conn);
1349 hci_send_frame(skb);
1350 hdev->acl_last_tx = jiffies;
1359 static inline void hci_sched_sco(struct hci_dev *hdev)
1361 struct hci_conn *conn;
1362 struct sk_buff *skb;
1365 BT_DBG("%s", hdev->name);
1367 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
1368 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1369 BT_DBG("skb %p len %d", skb, skb->len);
1370 hci_send_frame(skb);
1373 if (conn->sent == ~0)
1379 static inline void hci_sched_esco(struct hci_dev *hdev)
1381 struct hci_conn *conn;
1382 struct sk_buff *skb;
1385 BT_DBG("%s", hdev->name);
1387 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
1388 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1389 BT_DBG("skb %p len %d", skb, skb->len);
1390 hci_send_frame(skb);
1393 if (conn->sent == ~0)
1399 static void hci_tx_task(unsigned long arg)
1401 struct hci_dev *hdev = (struct hci_dev *) arg;
1402 struct sk_buff *skb;
1404 read_lock(&hci_task_lock);
1406 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1408 /* Schedule queues and send stuff to HCI driver */
1410 hci_sched_acl(hdev);
1412 hci_sched_sco(hdev);
1414 hci_sched_esco(hdev);
1416 /* Send next queued raw (unknown type) packet */
1417 while ((skb = skb_dequeue(&hdev->raw_q)))
1418 hci_send_frame(skb);
1420 read_unlock(&hci_task_lock);
1423 /* ----- HCI RX task (incoming data proccessing) ----- */
1425 /* ACL data packet */
1426 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1428 struct hci_acl_hdr *hdr = (void *) skb->data;
1429 struct hci_conn *conn;
1430 __u16 handle, flags;
1432 skb_pull(skb, HCI_ACL_HDR_SIZE);
1434 handle = __le16_to_cpu(hdr->handle);
1435 flags = hci_flags(handle);
1436 handle = hci_handle(handle);
1438 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1440 hdev->stat.acl_rx++;
1443 conn = hci_conn_hash_lookup_handle(hdev, handle);
1444 hci_dev_unlock(hdev);
1447 register struct hci_proto *hp;
1449 hci_conn_enter_active_mode(conn);
1451 /* Send to upper protocol */
1452 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1453 hp->recv_acldata(conn, skb, flags);
1457 BT_ERR("%s ACL packet for unknown connection handle %d",
1458 hdev->name, handle);
1464 /* SCO data packet */
1465 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1467 struct hci_sco_hdr *hdr = (void *) skb->data;
1468 struct hci_conn *conn;
1471 skb_pull(skb, HCI_SCO_HDR_SIZE);
1473 handle = __le16_to_cpu(hdr->handle);
1475 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1477 hdev->stat.sco_rx++;
1480 conn = hci_conn_hash_lookup_handle(hdev, handle);
1481 hci_dev_unlock(hdev);
1484 register struct hci_proto *hp;
1486 /* Send to upper protocol */
1487 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1488 hp->recv_scodata(conn, skb);
1492 BT_ERR("%s SCO packet for unknown connection handle %d",
1493 hdev->name, handle);
1499 static void hci_rx_task(unsigned long arg)
1501 struct hci_dev *hdev = (struct hci_dev *) arg;
1502 struct sk_buff *skb;
1504 BT_DBG("%s", hdev->name);
1506 read_lock(&hci_task_lock);
1508 while ((skb = skb_dequeue(&hdev->rx_q))) {
1509 if (atomic_read(&hdev->promisc)) {
1510 /* Send copy to the sockets */
1511 hci_send_to_sock(hdev, skb);
1514 if (test_bit(HCI_RAW, &hdev->flags)) {
1519 if (test_bit(HCI_INIT, &hdev->flags)) {
1520 /* Don't process data packets in this states. */
1521 switch (bt_cb(skb)->pkt_type) {
1522 case HCI_ACLDATA_PKT:
1523 case HCI_SCODATA_PKT:
1530 switch (bt_cb(skb)->pkt_type) {
1532 hci_event_packet(hdev, skb);
1535 case HCI_ACLDATA_PKT:
1536 BT_DBG("%s ACL data packet", hdev->name);
1537 hci_acldata_packet(hdev, skb);
1540 case HCI_SCODATA_PKT:
1541 BT_DBG("%s SCO data packet", hdev->name);
1542 hci_scodata_packet(hdev, skb);
1551 read_unlock(&hci_task_lock);
1554 static void hci_cmd_task(unsigned long arg)
1556 struct hci_dev *hdev = (struct hci_dev *) arg;
1557 struct sk_buff *skb;
1559 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1561 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1562 BT_ERR("%s command tx timeout", hdev->name);
1563 atomic_set(&hdev->cmd_cnt, 1);
1566 /* Send queued commands */
1567 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1568 kfree_skb(hdev->sent_cmd);
1570 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1571 atomic_dec(&hdev->cmd_cnt);
1572 hci_send_frame(skb);
1573 hdev->cmd_last_tx = jiffies;
1575 skb_queue_head(&hdev->cmd_q, skb);
1576 hci_sched_cmd(hdev);