2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
28 #include <net/bluetooth/hci.h>
30 /* HCI upper protocols */
31 #define HCI_PROTO_L2CAP 0
32 #define HCI_PROTO_SCO 1
34 /* HCI Core structures */
38 __u8 pscan_period_mode;
45 struct inquiry_entry {
46 struct inquiry_entry *next;
48 struct inquiry_data data;
51 struct inquiry_cache {
54 struct inquiry_entry *list;
57 struct hci_conn_hash {
58 struct list_head list;
65 struct list_head list;
85 __u16 sniff_min_interval;
86 __u16 sniff_max_interval;
96 unsigned int acl_pkts;
97 unsigned int sco_pkts;
99 unsigned long cmd_last_tx;
100 unsigned long acl_last_tx;
101 unsigned long sco_last_tx;
103 struct tasklet_struct cmd_task;
104 struct tasklet_struct rx_task;
105 struct tasklet_struct tx_task;
107 struct sk_buff_head rx_q;
108 struct sk_buff_head raw_q;
109 struct sk_buff_head cmd_q;
111 struct sk_buff *sent_cmd;
113 struct semaphore req_lock;
114 wait_queue_head_t req_wait_q;
118 struct inquiry_cache inq_cache;
119 struct hci_conn_hash conn_hash;
121 struct hci_dev_stats stat;
123 struct sk_buff_head driver_init;
130 struct device *parent;
133 struct module *owner;
135 int (*open)(struct hci_dev *hdev);
136 int (*close)(struct hci_dev *hdev);
137 int (*flush)(struct hci_dev *hdev);
138 int (*send)(struct sk_buff *skb);
139 void (*destruct)(struct hci_dev *hdev);
140 void (*notify)(struct hci_dev *hdev, unsigned int evt);
141 int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
145 struct list_head list;
167 struct sk_buff_head data_q;
169 struct timer_list disc_timer;
170 struct timer_list idle_timer;
172 struct work_struct work;
176 struct hci_dev *hdev;
181 struct hci_conn *link;
184 extern struct hci_proto *hci_proto[];
185 extern struct list_head hci_dev_list;
186 extern struct list_head hci_cb_list;
187 extern rwlock_t hci_dev_list_lock;
188 extern rwlock_t hci_cb_list_lock;
190 /* ----- Inquiry cache ----- */
191 #define INQUIRY_CACHE_AGE_MAX (HZ*30) // 30 seconds
192 #define INQUIRY_ENTRY_AGE_MAX (HZ*60) // 60 seconds
194 #define inquiry_cache_lock(c) spin_lock(&c->lock)
195 #define inquiry_cache_unlock(c) spin_unlock(&c->lock)
196 #define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
197 #define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
199 static inline void inquiry_cache_init(struct hci_dev *hdev)
201 struct inquiry_cache *c = &hdev->inq_cache;
202 spin_lock_init(&c->lock);
206 static inline int inquiry_cache_empty(struct hci_dev *hdev)
208 struct inquiry_cache *c = &hdev->inq_cache;
209 return (c->list == NULL);
212 static inline long inquiry_cache_age(struct hci_dev *hdev)
214 struct inquiry_cache *c = &hdev->inq_cache;
215 return jiffies - c->timestamp;
218 static inline long inquiry_entry_age(struct inquiry_entry *e)
220 return jiffies - e->timestamp;
223 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
224 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
226 /* ----- HCI Connections ----- */
229 HCI_CONN_ENCRYPT_PEND,
230 HCI_CONN_RSWITCH_PEND,
231 HCI_CONN_MODE_CHANGE_PEND,
234 static inline void hci_conn_hash_init(struct hci_dev *hdev)
236 struct hci_conn_hash *h = &hdev->conn_hash;
237 INIT_LIST_HEAD(&h->list);
238 spin_lock_init(&h->lock);
243 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
245 struct hci_conn_hash *h = &hdev->conn_hash;
246 list_add(&c->list, &h->list);
247 if (c->type == ACL_LINK)
253 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
255 struct hci_conn_hash *h = &hdev->conn_hash;
257 if (c->type == ACL_LINK)
263 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
266 struct hci_conn_hash *h = &hdev->conn_hash;
270 list_for_each(p, &h->list) {
271 c = list_entry(p, struct hci_conn, list);
272 if (c->handle == handle)
278 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
279 __u8 type, bdaddr_t *ba)
281 struct hci_conn_hash *h = &hdev->conn_hash;
285 list_for_each(p, &h->list) {
286 c = list_entry(p, struct hci_conn, list);
287 if (c->type == type && !bacmp(&c->dst, ba))
293 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
294 __u8 type, __u16 state)
296 struct hci_conn_hash *h = &hdev->conn_hash;
300 list_for_each(p, &h->list) {
301 c = list_entry(p, struct hci_conn, list);
302 if (c->type == type && c->state == state)
308 void hci_acl_connect(struct hci_conn *conn);
309 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
310 void hci_add_sco(struct hci_conn *conn, __u16 handle);
312 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
313 int hci_conn_del(struct hci_conn *conn);
314 void hci_conn_hash_flush(struct hci_dev *hdev);
316 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *src);
317 int hci_conn_auth(struct hci_conn *conn);
318 int hci_conn_encrypt(struct hci_conn *conn);
319 int hci_conn_change_link_key(struct hci_conn *conn);
320 int hci_conn_switch_role(struct hci_conn *conn, uint8_t role);
322 void hci_conn_enter_active_mode(struct hci_conn *conn);
323 void hci_conn_enter_sniff_mode(struct hci_conn *conn);
325 static inline void hci_conn_hold(struct hci_conn *conn)
327 atomic_inc(&conn->refcnt);
328 del_timer(&conn->disc_timer);
331 static inline void hci_conn_put(struct hci_conn *conn)
333 if (atomic_dec_and_test(&conn->refcnt)) {
335 if (conn->type == ACL_LINK) {
336 del_timer(&conn->idle_timer);
337 if (conn->state == BT_CONNECTED) {
338 timeo = msecs_to_jiffies(HCI_DISCONN_TIMEOUT);
342 timeo = msecs_to_jiffies(10);
344 timeo = msecs_to_jiffies(10);
345 mod_timer(&conn->disc_timer, jiffies + timeo);
349 /* ----- HCI tasks ----- */
350 static inline void hci_sched_cmd(struct hci_dev *hdev)
352 tasklet_schedule(&hdev->cmd_task);
355 static inline void hci_sched_rx(struct hci_dev *hdev)
357 tasklet_schedule(&hdev->rx_task);
360 static inline void hci_sched_tx(struct hci_dev *hdev)
362 tasklet_schedule(&hdev->tx_task);
365 /* ----- HCI Devices ----- */
366 static inline void __hci_dev_put(struct hci_dev *d)
368 if (atomic_dec_and_test(&d->refcnt))
372 static inline void hci_dev_put(struct hci_dev *d)
375 module_put(d->owner);
378 static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
380 atomic_inc(&d->refcnt);
384 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
386 if (try_module_get(d->owner))
387 return __hci_dev_hold(d);
391 #define hci_dev_lock(d) spin_lock(&d->lock)
392 #define hci_dev_unlock(d) spin_unlock(&d->lock)
393 #define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
394 #define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
396 struct hci_dev *hci_dev_get(int index);
397 struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
399 struct hci_dev *hci_alloc_dev(void);
400 void hci_free_dev(struct hci_dev *hdev);
401 int hci_register_dev(struct hci_dev *hdev);
402 int hci_unregister_dev(struct hci_dev *hdev);
403 int hci_suspend_dev(struct hci_dev *hdev);
404 int hci_resume_dev(struct hci_dev *hdev);
405 int hci_dev_open(__u16 dev);
406 int hci_dev_close(__u16 dev);
407 int hci_dev_reset(__u16 dev);
408 int hci_dev_reset_stat(__u16 dev);
409 int hci_dev_cmd(unsigned int cmd, void __user *arg);
410 int hci_get_dev_list(void __user *arg);
411 int hci_get_dev_info(void __user *arg);
412 int hci_get_conn_list(void __user *arg);
413 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
414 int hci_inquiry(void __user *arg);
416 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
418 /* Receive frame from HCI drivers */
419 static inline int hci_recv_frame(struct sk_buff *skb)
421 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
422 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
423 && !test_bit(HCI_INIT, &hdev->flags))) {
429 bt_cb(skb)->incoming = 1;
432 __net_timestamp(skb);
434 /* Queue frame for rx task */
435 skb_queue_tail(&hdev->rx_q, skb);
440 int hci_register_sysfs(struct hci_dev *hdev);
441 void hci_unregister_sysfs(struct hci_dev *hdev);
442 void hci_conn_add_sysfs(struct hci_conn *conn);
443 void hci_conn_del_sysfs(struct hci_conn *conn);
445 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
447 /* ----- LMP capabilities ----- */
448 #define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
449 #define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
450 #define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
451 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
453 /* ----- HCI protocols ----- */
461 int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
462 int (*connect_cfm) (struct hci_conn *conn, __u8 status);
463 int (*disconn_ind) (struct hci_conn *conn, __u8 reason);
464 int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
465 int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb);
466 int (*auth_cfm) (struct hci_conn *conn, __u8 status);
467 int (*encrypt_cfm) (struct hci_conn *conn, __u8 status);
470 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
472 register struct hci_proto *hp;
475 hp = hci_proto[HCI_PROTO_L2CAP];
476 if (hp && hp->connect_ind)
477 mask |= hp->connect_ind(hdev, bdaddr, type);
479 hp = hci_proto[HCI_PROTO_SCO];
480 if (hp && hp->connect_ind)
481 mask |= hp->connect_ind(hdev, bdaddr, type);
486 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
488 register struct hci_proto *hp;
490 hp = hci_proto[HCI_PROTO_L2CAP];
491 if (hp && hp->connect_cfm)
492 hp->connect_cfm(conn, status);
494 hp = hci_proto[HCI_PROTO_SCO];
495 if (hp && hp->connect_cfm)
496 hp->connect_cfm(conn, status);
499 static inline void hci_proto_disconn_ind(struct hci_conn *conn, __u8 reason)
501 register struct hci_proto *hp;
503 hp = hci_proto[HCI_PROTO_L2CAP];
504 if (hp && hp->disconn_ind)
505 hp->disconn_ind(conn, reason);
507 hp = hci_proto[HCI_PROTO_SCO];
508 if (hp && hp->disconn_ind)
509 hp->disconn_ind(conn, reason);
512 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
514 register struct hci_proto *hp;
516 hp = hci_proto[HCI_PROTO_L2CAP];
517 if (hp && hp->auth_cfm)
518 hp->auth_cfm(conn, status);
520 hp = hci_proto[HCI_PROTO_SCO];
521 if (hp && hp->auth_cfm)
522 hp->auth_cfm(conn, status);
525 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status)
527 register struct hci_proto *hp;
529 hp = hci_proto[HCI_PROTO_L2CAP];
530 if (hp && hp->encrypt_cfm)
531 hp->encrypt_cfm(conn, status);
533 hp = hci_proto[HCI_PROTO_SCO];
534 if (hp && hp->encrypt_cfm)
535 hp->encrypt_cfm(conn, status);
538 int hci_register_proto(struct hci_proto *hproto);
539 int hci_unregister_proto(struct hci_proto *hproto);
541 /* ----- HCI callbacks ----- */
543 struct list_head list;
547 void (*auth_cfm) (struct hci_conn *conn, __u8 status);
548 void (*encrypt_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
549 void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
550 void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
553 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
557 hci_proto_auth_cfm(conn, status);
559 read_lock_bh(&hci_cb_list_lock);
560 list_for_each(p, &hci_cb_list) {
561 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
563 cb->auth_cfm(conn, status);
565 read_unlock_bh(&hci_cb_list_lock);
568 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
572 hci_proto_encrypt_cfm(conn, status);
574 read_lock_bh(&hci_cb_list_lock);
575 list_for_each(p, &hci_cb_list) {
576 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
578 cb->encrypt_cfm(conn, status, encrypt);
580 read_unlock_bh(&hci_cb_list_lock);
583 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
587 read_lock_bh(&hci_cb_list_lock);
588 list_for_each(p, &hci_cb_list) {
589 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
590 if (cb->key_change_cfm)
591 cb->key_change_cfm(conn, status);
593 read_unlock_bh(&hci_cb_list_lock);
596 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
600 read_lock_bh(&hci_cb_list_lock);
601 list_for_each(p, &hci_cb_list) {
602 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
603 if (cb->role_switch_cfm)
604 cb->role_switch_cfm(conn, status, role);
606 read_unlock_bh(&hci_cb_list_lock);
609 int hci_register_cb(struct hci_cb *hcb);
610 int hci_unregister_cb(struct hci_cb *hcb);
612 int hci_register_notifier(struct notifier_block *nb);
613 int hci_unregister_notifier(struct notifier_block *nb);
615 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param);
616 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
617 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
619 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf);
621 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
623 /* ----- HCI Sockets ----- */
624 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
626 /* HCI info for socket */
627 #define hci_pi(sk) ((struct hci_pinfo *) sk)
631 struct hci_dev *hdev;
632 struct hci_filter filter;
636 /* HCI security filter */
637 #define HCI_SFLT_MAX_OGF 5
639 struct hci_sec_filter {
642 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
645 /* ----- HCI requests ----- */
646 #define HCI_REQ_DONE 0
647 #define HCI_REQ_PEND 1
648 #define HCI_REQ_CANCELED 2
650 #define hci_req_lock(d) down(&d->req_lock)
651 #define hci_req_unlock(d) up(&d->req_lock)
653 void hci_req_complete(struct hci_dev *hdev, int result);
655 #endif /* __HCI_CORE_H */