2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
28 #include <net/bluetooth/hci.h>
30 /* HCI upper protocols */
31 #define HCI_PROTO_L2CAP 0
32 #define HCI_PROTO_SCO 1
34 /* HCI Core structures */
38 __u8 pscan_period_mode;
45 struct inquiry_entry {
46 struct inquiry_entry *next;
48 struct inquiry_data data;
51 struct inquiry_cache {
54 struct inquiry_entry *list;
57 struct hci_conn_hash {
58 struct list_head list;
65 struct list_head list;
82 __u16 sniff_min_interval;
83 __u16 sniff_max_interval;
93 unsigned int acl_pkts;
94 unsigned int sco_pkts;
96 unsigned long cmd_last_tx;
97 unsigned long acl_last_tx;
98 unsigned long sco_last_tx;
100 struct tasklet_struct cmd_task;
101 struct tasklet_struct rx_task;
102 struct tasklet_struct tx_task;
104 struct sk_buff_head rx_q;
105 struct sk_buff_head raw_q;
106 struct sk_buff_head cmd_q;
108 struct sk_buff *sent_cmd;
110 struct semaphore req_lock;
111 wait_queue_head_t req_wait_q;
115 struct inquiry_cache inq_cache;
116 struct hci_conn_hash conn_hash;
118 struct hci_dev_stats stat;
120 struct sk_buff_head driver_init;
127 struct device *parent;
130 struct module *owner;
132 int (*open)(struct hci_dev *hdev);
133 int (*close)(struct hci_dev *hdev);
134 int (*flush)(struct hci_dev *hdev);
135 int (*send)(struct sk_buff *skb);
136 void (*destruct)(struct hci_dev *hdev);
137 void (*notify)(struct hci_dev *hdev, unsigned int evt);
138 int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
142 struct list_head list;
163 struct sk_buff_head data_q;
165 struct timer_list disc_timer;
166 struct timer_list idle_timer;
168 struct hci_dev *hdev;
173 struct hci_conn *link;
176 extern struct hci_proto *hci_proto[];
177 extern struct list_head hci_dev_list;
178 extern struct list_head hci_cb_list;
179 extern rwlock_t hci_dev_list_lock;
180 extern rwlock_t hci_cb_list_lock;
182 /* ----- Inquiry cache ----- */
183 #define INQUIRY_CACHE_AGE_MAX (HZ*30) // 30 seconds
184 #define INQUIRY_ENTRY_AGE_MAX (HZ*60) // 60 seconds
186 #define inquiry_cache_lock(c) spin_lock(&c->lock)
187 #define inquiry_cache_unlock(c) spin_unlock(&c->lock)
188 #define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
189 #define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
191 static inline void inquiry_cache_init(struct hci_dev *hdev)
193 struct inquiry_cache *c = &hdev->inq_cache;
194 spin_lock_init(&c->lock);
198 static inline int inquiry_cache_empty(struct hci_dev *hdev)
200 struct inquiry_cache *c = &hdev->inq_cache;
201 return (c->list == NULL);
204 static inline long inquiry_cache_age(struct hci_dev *hdev)
206 struct inquiry_cache *c = &hdev->inq_cache;
207 return jiffies - c->timestamp;
210 static inline long inquiry_entry_age(struct inquiry_entry *e)
212 return jiffies - e->timestamp;
215 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
216 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
218 /* ----- HCI Connections ----- */
221 HCI_CONN_ENCRYPT_PEND,
222 HCI_CONN_RSWITCH_PEND,
223 HCI_CONN_MODE_CHANGE_PEND,
226 static inline void hci_conn_hash_init(struct hci_dev *hdev)
228 struct hci_conn_hash *h = &hdev->conn_hash;
229 INIT_LIST_HEAD(&h->list);
230 spin_lock_init(&h->lock);
235 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
237 struct hci_conn_hash *h = &hdev->conn_hash;
238 list_add(&c->list, &h->list);
239 if (c->type == ACL_LINK)
245 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
247 struct hci_conn_hash *h = &hdev->conn_hash;
249 if (c->type == ACL_LINK)
255 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
258 struct hci_conn_hash *h = &hdev->conn_hash;
262 list_for_each(p, &h->list) {
263 c = list_entry(p, struct hci_conn, list);
264 if (c->handle == handle)
270 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
271 __u8 type, bdaddr_t *ba)
273 struct hci_conn_hash *h = &hdev->conn_hash;
277 list_for_each(p, &h->list) {
278 c = list_entry(p, struct hci_conn, list);
279 if (c->type == type && !bacmp(&c->dst, ba))
285 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
286 void hci_add_sco(struct hci_conn *conn, __u16 handle);
288 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
289 int hci_conn_del(struct hci_conn *conn);
290 void hci_conn_hash_flush(struct hci_dev *hdev);
292 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *src);
293 int hci_conn_auth(struct hci_conn *conn);
294 int hci_conn_encrypt(struct hci_conn *conn);
295 int hci_conn_change_link_key(struct hci_conn *conn);
296 int hci_conn_switch_role(struct hci_conn *conn, uint8_t role);
298 void hci_conn_enter_active_mode(struct hci_conn *conn);
299 void hci_conn_enter_sniff_mode(struct hci_conn *conn);
301 static inline void hci_conn_hold(struct hci_conn *conn)
303 atomic_inc(&conn->refcnt);
304 del_timer(&conn->disc_timer);
307 static inline void hci_conn_put(struct hci_conn *conn)
309 if (atomic_dec_and_test(&conn->refcnt)) {
311 if (conn->type == ACL_LINK) {
312 timeo = msecs_to_jiffies(HCI_DISCONN_TIMEOUT);
315 del_timer(&conn->idle_timer);
317 timeo = msecs_to_jiffies(10);
318 mod_timer(&conn->disc_timer, jiffies + timeo);
322 /* ----- HCI tasks ----- */
323 static inline void hci_sched_cmd(struct hci_dev *hdev)
325 tasklet_schedule(&hdev->cmd_task);
328 static inline void hci_sched_rx(struct hci_dev *hdev)
330 tasklet_schedule(&hdev->rx_task);
333 static inline void hci_sched_tx(struct hci_dev *hdev)
335 tasklet_schedule(&hdev->tx_task);
338 /* ----- HCI Devices ----- */
339 static inline void __hci_dev_put(struct hci_dev *d)
341 if (atomic_dec_and_test(&d->refcnt))
345 static inline void hci_dev_put(struct hci_dev *d)
348 module_put(d->owner);
351 static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
353 atomic_inc(&d->refcnt);
357 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
359 if (try_module_get(d->owner))
360 return __hci_dev_hold(d);
364 #define hci_dev_lock(d) spin_lock(&d->lock)
365 #define hci_dev_unlock(d) spin_unlock(&d->lock)
366 #define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
367 #define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
369 struct hci_dev *hci_dev_get(int index);
370 struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
372 struct hci_dev *hci_alloc_dev(void);
373 void hci_free_dev(struct hci_dev *hdev);
374 int hci_register_dev(struct hci_dev *hdev);
375 int hci_unregister_dev(struct hci_dev *hdev);
376 int hci_suspend_dev(struct hci_dev *hdev);
377 int hci_resume_dev(struct hci_dev *hdev);
378 int hci_dev_open(__u16 dev);
379 int hci_dev_close(__u16 dev);
380 int hci_dev_reset(__u16 dev);
381 int hci_dev_reset_stat(__u16 dev);
382 int hci_dev_cmd(unsigned int cmd, void __user *arg);
383 int hci_get_dev_list(void __user *arg);
384 int hci_get_dev_info(void __user *arg);
385 int hci_get_conn_list(void __user *arg);
386 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
387 int hci_inquiry(void __user *arg);
389 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
391 /* Receive frame from HCI drivers */
392 static inline int hci_recv_frame(struct sk_buff *skb)
394 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
395 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
396 && !test_bit(HCI_INIT, &hdev->flags))) {
402 bt_cb(skb)->incoming = 1;
405 __net_timestamp(skb);
407 /* Queue frame for rx task */
408 skb_queue_tail(&hdev->rx_q, skb);
413 int hci_register_sysfs(struct hci_dev *hdev);
414 void hci_unregister_sysfs(struct hci_dev *hdev);
416 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
418 /* ----- LMP capabilities ----- */
419 #define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
420 #define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
421 #define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
422 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
424 /* ----- HCI protocols ----- */
432 int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
433 int (*connect_cfm) (struct hci_conn *conn, __u8 status);
434 int (*disconn_ind) (struct hci_conn *conn, __u8 reason);
435 int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
436 int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb);
437 int (*auth_cfm) (struct hci_conn *conn, __u8 status);
438 int (*encrypt_cfm) (struct hci_conn *conn, __u8 status);
441 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
443 register struct hci_proto *hp;
446 hp = hci_proto[HCI_PROTO_L2CAP];
447 if (hp && hp->connect_ind)
448 mask |= hp->connect_ind(hdev, bdaddr, type);
450 hp = hci_proto[HCI_PROTO_SCO];
451 if (hp && hp->connect_ind)
452 mask |= hp->connect_ind(hdev, bdaddr, type);
457 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
459 register struct hci_proto *hp;
461 hp = hci_proto[HCI_PROTO_L2CAP];
462 if (hp && hp->connect_cfm)
463 hp->connect_cfm(conn, status);
465 hp = hci_proto[HCI_PROTO_SCO];
466 if (hp && hp->connect_cfm)
467 hp->connect_cfm(conn, status);
470 static inline void hci_proto_disconn_ind(struct hci_conn *conn, __u8 reason)
472 register struct hci_proto *hp;
474 hp = hci_proto[HCI_PROTO_L2CAP];
475 if (hp && hp->disconn_ind)
476 hp->disconn_ind(conn, reason);
478 hp = hci_proto[HCI_PROTO_SCO];
479 if (hp && hp->disconn_ind)
480 hp->disconn_ind(conn, reason);
483 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
485 register struct hci_proto *hp;
487 hp = hci_proto[HCI_PROTO_L2CAP];
488 if (hp && hp->auth_cfm)
489 hp->auth_cfm(conn, status);
491 hp = hci_proto[HCI_PROTO_SCO];
492 if (hp && hp->auth_cfm)
493 hp->auth_cfm(conn, status);
496 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status)
498 register struct hci_proto *hp;
500 hp = hci_proto[HCI_PROTO_L2CAP];
501 if (hp && hp->encrypt_cfm)
502 hp->encrypt_cfm(conn, status);
504 hp = hci_proto[HCI_PROTO_SCO];
505 if (hp && hp->encrypt_cfm)
506 hp->encrypt_cfm(conn, status);
509 int hci_register_proto(struct hci_proto *hproto);
510 int hci_unregister_proto(struct hci_proto *hproto);
512 /* ----- HCI callbacks ----- */
514 struct list_head list;
518 void (*auth_cfm) (struct hci_conn *conn, __u8 status);
519 void (*encrypt_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
520 void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
521 void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
524 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
528 hci_proto_auth_cfm(conn, status);
530 read_lock_bh(&hci_cb_list_lock);
531 list_for_each(p, &hci_cb_list) {
532 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
534 cb->auth_cfm(conn, status);
536 read_unlock_bh(&hci_cb_list_lock);
539 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
543 hci_proto_encrypt_cfm(conn, status);
545 read_lock_bh(&hci_cb_list_lock);
546 list_for_each(p, &hci_cb_list) {
547 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
549 cb->encrypt_cfm(conn, status, encrypt);
551 read_unlock_bh(&hci_cb_list_lock);
554 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
558 read_lock_bh(&hci_cb_list_lock);
559 list_for_each(p, &hci_cb_list) {
560 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
561 if (cb->key_change_cfm)
562 cb->key_change_cfm(conn, status);
564 read_unlock_bh(&hci_cb_list_lock);
567 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
571 read_lock_bh(&hci_cb_list_lock);
572 list_for_each(p, &hci_cb_list) {
573 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
574 if (cb->role_switch_cfm)
575 cb->role_switch_cfm(conn, status, role);
577 read_unlock_bh(&hci_cb_list_lock);
580 int hci_register_cb(struct hci_cb *hcb);
581 int hci_unregister_cb(struct hci_cb *hcb);
583 int hci_register_notifier(struct notifier_block *nb);
584 int hci_unregister_notifier(struct notifier_block *nb);
586 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param);
587 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
588 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
590 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf);
592 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
594 /* ----- HCI Sockets ----- */
595 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
597 /* HCI info for socket */
598 #define hci_pi(sk) ((struct hci_pinfo *) sk)
602 struct hci_dev *hdev;
603 struct hci_filter filter;
607 /* HCI security filter */
608 #define HCI_SFLT_MAX_OGF 5
610 struct hci_sec_filter {
613 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
616 /* ----- HCI requests ----- */
617 #define HCI_REQ_DONE 0
618 #define HCI_REQ_PEND 1
619 #define HCI_REQ_CANCELED 2
621 #define hci_req_lock(d) down(&d->req_lock)
622 #define hci_req_unlock(d) up(&d->req_lock)
624 void hci_req_complete(struct hci_dev *hdev, int result);
626 #endif /* __HCI_CORE_H */