2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #define VERSION "2.11"
55 static u32 l2cap_feat_mask = 0x0000;
57 static const struct proto_ops l2cap_sock_ops;
59 static struct bt_sock_list l2cap_sk_list = {
60 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
63 static void __l2cap_sock_close(struct sock *sk, int reason);
64 static void l2cap_sock_close(struct sock *sk);
65 static void l2cap_sock_kill(struct sock *sk);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
70 /* ---- L2CAP timers ---- */
71 static void l2cap_sock_timeout(unsigned long arg)
73 struct sock *sk = (struct sock *) arg;
76 BT_DBG("sock %p state %d", sk, sk->sk_state);
80 if (sk->sk_state == BT_CONNECT &&
81 (l2cap_pi(sk)->link_mode & (L2CAP_LM_AUTH |
82 L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)))
83 reason = ECONNREFUSED;
87 __l2cap_sock_close(sk, reason);
95 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
97 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
98 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
101 static void l2cap_sock_clear_timer(struct sock *sk)
103 BT_DBG("sock %p state %d", sk, sk->sk_state);
104 sk_stop_timer(sk, &sk->sk_timer);
107 /* ---- L2CAP channels ---- */
108 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
111 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
112 if (l2cap_pi(s)->dcid == cid)
118 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
121 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
122 if (l2cap_pi(s)->scid == cid)
128 /* Find channel with given SCID.
129 * Returns locked socket */
130 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
134 s = __l2cap_get_chan_by_scid(l, cid);
135 if (s) bh_lock_sock(s);
136 read_unlock(&l->lock);
140 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
143 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
144 if (l2cap_pi(s)->ident == ident)
150 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
154 s = __l2cap_get_chan_by_ident(l, ident);
155 if (s) bh_lock_sock(s);
156 read_unlock(&l->lock);
160 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
164 for (; cid < 0xffff; cid++) {
165 if(!__l2cap_get_chan_by_scid(l, cid))
172 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
177 l2cap_pi(l->head)->prev_c = sk;
179 l2cap_pi(sk)->next_c = l->head;
180 l2cap_pi(sk)->prev_c = NULL;
184 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
186 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
188 write_lock_bh(&l->lock);
193 l2cap_pi(next)->prev_c = prev;
195 l2cap_pi(prev)->next_c = next;
196 write_unlock_bh(&l->lock);
201 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
203 struct l2cap_chan_list *l = &conn->chan_list;
205 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
207 l2cap_pi(sk)->conn = conn;
209 if (sk->sk_type == SOCK_SEQPACKET) {
210 /* Alloc CID for connection-oriented socket */
211 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
212 } else if (sk->sk_type == SOCK_DGRAM) {
213 /* Connectionless socket */
214 l2cap_pi(sk)->scid = 0x0002;
215 l2cap_pi(sk)->dcid = 0x0002;
216 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
218 /* Raw socket can send/recv signalling messages only */
219 l2cap_pi(sk)->scid = 0x0001;
220 l2cap_pi(sk)->dcid = 0x0001;
221 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
224 __l2cap_chan_link(l, sk);
227 bt_accept_enqueue(parent, sk);
231 * Must be called on the locked socket. */
232 static void l2cap_chan_del(struct sock *sk, int err)
234 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
235 struct sock *parent = bt_sk(sk)->parent;
237 l2cap_sock_clear_timer(sk);
239 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
242 /* Unlink from channel list */
243 l2cap_chan_unlink(&conn->chan_list, sk);
244 l2cap_pi(sk)->conn = NULL;
245 hci_conn_put(conn->hcon);
248 sk->sk_state = BT_CLOSED;
249 sock_set_flag(sk, SOCK_ZAPPED);
255 bt_accept_unlink(sk);
256 parent->sk_data_ready(parent, 0);
258 sk->sk_state_change(sk);
261 /* Service level security */
262 static inline int l2cap_check_link_mode(struct sock *sk)
264 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
266 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
267 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE))
268 return hci_conn_encrypt(conn->hcon);
270 if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH)
271 return hci_conn_auth(conn->hcon);
276 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
280 /* Get next available identificator.
281 * 1 - 128 are used by kernel.
282 * 129 - 199 are reserved.
283 * 200 - 254 are used by utilities like l2ping, etc.
286 spin_lock_bh(&conn->lock);
288 if (++conn->tx_ident > 128)
293 spin_unlock_bh(&conn->lock);
298 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
300 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
302 BT_DBG("code 0x%2.2x", code);
307 return hci_send_acl(conn->hcon, skb, 0);
310 static void l2cap_do_start(struct sock *sk)
312 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
314 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
315 if (l2cap_check_link_mode(sk)) {
316 struct l2cap_conn_req req;
317 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
318 req.psm = l2cap_pi(sk)->psm;
320 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
322 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
323 L2CAP_CONN_REQ, sizeof(req), &req);
326 struct l2cap_info_req req;
327 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
329 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
330 conn->info_ident = l2cap_get_ident(conn);
332 mod_timer(&conn->info_timer, jiffies +
333 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
335 l2cap_send_cmd(conn, conn->info_ident,
336 L2CAP_INFO_REQ, sizeof(req), &req);
340 /* ---- L2CAP connections ---- */
341 static void l2cap_conn_start(struct l2cap_conn *conn)
343 struct l2cap_chan_list *l = &conn->chan_list;
346 BT_DBG("conn %p", conn);
350 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
353 if (sk->sk_type != SOCK_SEQPACKET) {
358 if (sk->sk_state == BT_CONNECT) {
359 if (l2cap_check_link_mode(sk)) {
360 struct l2cap_conn_req req;
361 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
362 req.psm = l2cap_pi(sk)->psm;
364 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
366 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
367 L2CAP_CONN_REQ, sizeof(req), &req);
369 } else if (sk->sk_state == BT_CONNECT2) {
370 struct l2cap_conn_rsp rsp;
371 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
372 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
374 if (l2cap_check_link_mode(sk)) {
375 sk->sk_state = BT_CONFIG;
376 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
377 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
379 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
380 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
383 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
384 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
390 read_unlock(&l->lock);
393 static void l2cap_conn_ready(struct l2cap_conn *conn)
395 struct l2cap_chan_list *l = &conn->chan_list;
398 BT_DBG("conn %p", conn);
402 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
405 if (sk->sk_type != SOCK_SEQPACKET) {
406 l2cap_sock_clear_timer(sk);
407 sk->sk_state = BT_CONNECTED;
408 sk->sk_state_change(sk);
409 } else if (sk->sk_state == BT_CONNECT)
415 read_unlock(&l->lock);
418 /* Notify sockets that we cannot guaranty reliability anymore */
419 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
421 struct l2cap_chan_list *l = &conn->chan_list;
424 BT_DBG("conn %p", conn);
428 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
429 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
433 read_unlock(&l->lock);
436 static void l2cap_info_timeout(unsigned long arg)
438 struct l2cap_conn *conn = (void *) arg;
440 conn->info_ident = 0;
442 l2cap_conn_start(conn);
445 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
447 struct l2cap_conn *conn = hcon->l2cap_data;
452 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
456 hcon->l2cap_data = conn;
459 BT_DBG("hcon %p conn %p", hcon, conn);
461 conn->mtu = hcon->hdev->acl_mtu;
462 conn->src = &hcon->hdev->bdaddr;
463 conn->dst = &hcon->dst;
467 setup_timer(&conn->info_timer, l2cap_info_timeout,
468 (unsigned long) conn);
470 spin_lock_init(&conn->lock);
471 rwlock_init(&conn->chan_list.lock);
476 static void l2cap_conn_del(struct hci_conn *hcon, int err)
478 struct l2cap_conn *conn = hcon->l2cap_data;
484 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
487 kfree_skb(conn->rx_skb);
490 while ((sk = conn->chan_list.head)) {
492 l2cap_chan_del(sk, err);
497 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
498 del_timer_sync(&conn->info_timer);
500 hcon->l2cap_data = NULL;
504 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
506 struct l2cap_chan_list *l = &conn->chan_list;
507 write_lock_bh(&l->lock);
508 __l2cap_chan_add(conn, sk, parent);
509 write_unlock_bh(&l->lock);
512 /* ---- Socket interface ---- */
513 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
516 struct hlist_node *node;
517 sk_for_each(sk, node, &l2cap_sk_list.head)
518 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
525 /* Find socket with psm and source bdaddr.
526 * Returns closest match.
528 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
530 struct sock *sk = NULL, *sk1 = NULL;
531 struct hlist_node *node;
533 sk_for_each(sk, node, &l2cap_sk_list.head) {
534 if (state && sk->sk_state != state)
537 if (l2cap_pi(sk)->psm == psm) {
539 if (!bacmp(&bt_sk(sk)->src, src))
543 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
547 return node ? sk : sk1;
550 /* Find socket with given address (psm, src).
551 * Returns locked socket */
552 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
555 read_lock(&l2cap_sk_list.lock);
556 s = __l2cap_get_sock_by_psm(state, psm, src);
557 if (s) bh_lock_sock(s);
558 read_unlock(&l2cap_sk_list.lock);
562 static void l2cap_sock_destruct(struct sock *sk)
566 skb_queue_purge(&sk->sk_receive_queue);
567 skb_queue_purge(&sk->sk_write_queue);
570 static void l2cap_sock_cleanup_listen(struct sock *parent)
574 BT_DBG("parent %p", parent);
576 /* Close not yet accepted channels */
577 while ((sk = bt_accept_dequeue(parent, NULL)))
578 l2cap_sock_close(sk);
580 parent->sk_state = BT_CLOSED;
581 sock_set_flag(parent, SOCK_ZAPPED);
584 /* Kill socket (only if zapped and orphan)
585 * Must be called on unlocked socket.
587 static void l2cap_sock_kill(struct sock *sk)
589 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
592 BT_DBG("sk %p state %d", sk, sk->sk_state);
594 /* Kill poor orphan */
595 bt_sock_unlink(&l2cap_sk_list, sk);
596 sock_set_flag(sk, SOCK_DEAD);
600 static void __l2cap_sock_close(struct sock *sk, int reason)
602 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
604 switch (sk->sk_state) {
606 l2cap_sock_cleanup_listen(sk);
612 if (sk->sk_type == SOCK_SEQPACKET) {
613 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
614 struct l2cap_disconn_req req;
616 sk->sk_state = BT_DISCONN;
617 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
619 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
620 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
621 l2cap_send_cmd(conn, l2cap_get_ident(conn),
622 L2CAP_DISCONN_REQ, sizeof(req), &req);
624 l2cap_chan_del(sk, reason);
629 l2cap_chan_del(sk, reason);
633 sock_set_flag(sk, SOCK_ZAPPED);
638 /* Must be called on unlocked socket. */
639 static void l2cap_sock_close(struct sock *sk)
641 l2cap_sock_clear_timer(sk);
643 __l2cap_sock_close(sk, ECONNRESET);
648 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
650 struct l2cap_pinfo *pi = l2cap_pi(sk);
655 sk->sk_type = parent->sk_type;
656 pi->imtu = l2cap_pi(parent)->imtu;
657 pi->omtu = l2cap_pi(parent)->omtu;
658 pi->link_mode = l2cap_pi(parent)->link_mode;
660 pi->imtu = L2CAP_DEFAULT_MTU;
665 /* Default config options */
667 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
670 static struct proto l2cap_proto = {
672 .owner = THIS_MODULE,
673 .obj_size = sizeof(struct l2cap_pinfo)
676 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
680 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
684 sock_init_data(sock, sk);
685 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
687 sk->sk_destruct = l2cap_sock_destruct;
688 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
690 sock_reset_flag(sk, SOCK_ZAPPED);
692 sk->sk_protocol = proto;
693 sk->sk_state = BT_OPEN;
695 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
697 bt_sock_link(&l2cap_sk_list, sk);
701 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
705 BT_DBG("sock %p", sock);
707 sock->state = SS_UNCONNECTED;
709 if (sock->type != SOCK_SEQPACKET &&
710 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
711 return -ESOCKTNOSUPPORT;
713 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
716 sock->ops = &l2cap_sock_ops;
718 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
722 l2cap_sock_init(sk, NULL);
726 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
728 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
729 struct sock *sk = sock->sk;
732 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
734 if (!addr || addr->sa_family != AF_BLUETOOTH)
739 if (sk->sk_state != BT_OPEN) {
744 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
745 !capable(CAP_NET_BIND_SERVICE)) {
750 write_lock_bh(&l2cap_sk_list.lock);
752 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
755 /* Save source address */
756 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
757 l2cap_pi(sk)->psm = la->l2_psm;
758 l2cap_pi(sk)->sport = la->l2_psm;
759 sk->sk_state = BT_BOUND;
762 write_unlock_bh(&l2cap_sk_list.lock);
769 static int l2cap_do_connect(struct sock *sk)
771 bdaddr_t *src = &bt_sk(sk)->src;
772 bdaddr_t *dst = &bt_sk(sk)->dst;
773 struct l2cap_conn *conn;
774 struct hci_conn *hcon;
775 struct hci_dev *hdev;
779 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
781 if (!(hdev = hci_get_route(dst, src)))
782 return -EHOSTUNREACH;
784 hci_dev_lock_bh(hdev);
788 if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH ||
789 l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT ||
790 l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE) {
791 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001))
792 auth_type = HCI_AT_NO_BONDING_MITM;
794 auth_type = HCI_AT_GENERAL_BONDING_MITM;
796 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001))
797 auth_type = HCI_AT_NO_BONDING;
799 auth_type = HCI_AT_GENERAL_BONDING;
802 hcon = hci_connect(hdev, ACL_LINK, dst, auth_type);
806 conn = l2cap_conn_add(hcon, 0);
814 /* Update source addr of the socket */
815 bacpy(src, conn->src);
817 l2cap_chan_add(conn, sk, NULL);
819 sk->sk_state = BT_CONNECT;
820 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
822 if (hcon->state == BT_CONNECTED) {
823 if (sk->sk_type != SOCK_SEQPACKET) {
824 l2cap_sock_clear_timer(sk);
825 sk->sk_state = BT_CONNECTED;
831 hci_dev_unlock_bh(hdev);
836 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
838 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
839 struct sock *sk = sock->sk;
846 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
851 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
856 switch(sk->sk_state) {
860 /* Already connecting */
864 /* Already connected */
877 /* Set destination address and psm */
878 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
879 l2cap_pi(sk)->psm = la->l2_psm;
881 if ((err = l2cap_do_connect(sk)))
885 err = bt_sock_wait_state(sk, BT_CONNECTED,
886 sock_sndtimeo(sk, flags & O_NONBLOCK));
892 static int l2cap_sock_listen(struct socket *sock, int backlog)
894 struct sock *sk = sock->sk;
897 BT_DBG("sk %p backlog %d", sk, backlog);
901 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
906 if (!l2cap_pi(sk)->psm) {
907 bdaddr_t *src = &bt_sk(sk)->src;
912 write_lock_bh(&l2cap_sk_list.lock);
914 for (psm = 0x1001; psm < 0x1100; psm += 2)
915 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
916 l2cap_pi(sk)->psm = htobs(psm);
917 l2cap_pi(sk)->sport = htobs(psm);
922 write_unlock_bh(&l2cap_sk_list.lock);
928 sk->sk_max_ack_backlog = backlog;
929 sk->sk_ack_backlog = 0;
930 sk->sk_state = BT_LISTEN;
937 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
939 DECLARE_WAITQUEUE(wait, current);
940 struct sock *sk = sock->sk, *nsk;
944 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
946 if (sk->sk_state != BT_LISTEN) {
951 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
953 BT_DBG("sk %p timeo %ld", sk, timeo);
955 /* Wait for an incoming connection. (wake-one). */
956 add_wait_queue_exclusive(sk->sk_sleep, &wait);
957 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
958 set_current_state(TASK_INTERRUPTIBLE);
965 timeo = schedule_timeout(timeo);
966 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
968 if (sk->sk_state != BT_LISTEN) {
973 if (signal_pending(current)) {
974 err = sock_intr_errno(timeo);
978 set_current_state(TASK_RUNNING);
979 remove_wait_queue(sk->sk_sleep, &wait);
984 newsock->state = SS_CONNECTED;
986 BT_DBG("new socket %p", nsk);
993 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
995 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
996 struct sock *sk = sock->sk;
998 BT_DBG("sock %p, sk %p", sock, sk);
1000 addr->sa_family = AF_BLUETOOTH;
1001 *len = sizeof(struct sockaddr_l2);
1004 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1006 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1008 la->l2_psm = l2cap_pi(sk)->psm;
1012 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
1014 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1015 struct sk_buff *skb, **frag;
1016 int err, hlen, count, sent=0;
1017 struct l2cap_hdr *lh;
1019 BT_DBG("sk %p len %d", sk, len);
1021 /* First fragment (with L2CAP header) */
1022 if (sk->sk_type == SOCK_DGRAM)
1023 hlen = L2CAP_HDR_SIZE + 2;
1025 hlen = L2CAP_HDR_SIZE;
1027 count = min_t(unsigned int, (conn->mtu - hlen), len);
1029 skb = bt_skb_send_alloc(sk, hlen + count,
1030 msg->msg_flags & MSG_DONTWAIT, &err);
1034 /* Create L2CAP header */
1035 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1036 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1037 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1039 if (sk->sk_type == SOCK_DGRAM)
1040 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
1042 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1050 /* Continuation fragments (no L2CAP header) */
1051 frag = &skb_shinfo(skb)->frag_list;
1053 count = min_t(unsigned int, conn->mtu, len);
1055 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1059 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
1067 frag = &(*frag)->next;
1070 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
1080 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1082 struct sock *sk = sock->sk;
1085 BT_DBG("sock %p, sk %p", sock, sk);
1087 err = sock_error(sk);
1091 if (msg->msg_flags & MSG_OOB)
1094 /* Check outgoing MTU */
1095 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1100 if (sk->sk_state == BT_CONNECTED)
1101 err = l2cap_do_send(sk, msg, len);
1109 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1111 struct sock *sk = sock->sk;
1112 struct l2cap_options opts;
1116 BT_DBG("sk %p", sk);
1122 opts.imtu = l2cap_pi(sk)->imtu;
1123 opts.omtu = l2cap_pi(sk)->omtu;
1124 opts.flush_to = l2cap_pi(sk)->flush_to;
1125 opts.mode = L2CAP_MODE_BASIC;
1127 len = min_t(unsigned int, sizeof(opts), optlen);
1128 if (copy_from_user((char *) &opts, optval, len)) {
1133 l2cap_pi(sk)->imtu = opts.imtu;
1134 l2cap_pi(sk)->omtu = opts.omtu;
1138 if (get_user(opt, (u32 __user *) optval)) {
1143 l2cap_pi(sk)->link_mode = opt;
1155 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1157 struct sock *sk = sock->sk;
1158 struct l2cap_options opts;
1159 struct l2cap_conninfo cinfo;
1162 BT_DBG("sk %p", sk);
1164 if (get_user(len, optlen))
1171 opts.imtu = l2cap_pi(sk)->imtu;
1172 opts.omtu = l2cap_pi(sk)->omtu;
1173 opts.flush_to = l2cap_pi(sk)->flush_to;
1174 opts.mode = L2CAP_MODE_BASIC;
1176 len = min_t(unsigned int, len, sizeof(opts));
1177 if (copy_to_user(optval, (char *) &opts, len))
1183 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1187 case L2CAP_CONNINFO:
1188 if (sk->sk_state != BT_CONNECTED) {
1193 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1194 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1196 len = min_t(unsigned int, len, sizeof(cinfo));
1197 if (copy_to_user(optval, (char *) &cinfo, len))
1211 static int l2cap_sock_shutdown(struct socket *sock, int how)
1213 struct sock *sk = sock->sk;
1216 BT_DBG("sock %p, sk %p", sock, sk);
1222 if (!sk->sk_shutdown) {
1223 sk->sk_shutdown = SHUTDOWN_MASK;
1224 l2cap_sock_clear_timer(sk);
1225 __l2cap_sock_close(sk, 0);
1227 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1228 err = bt_sock_wait_state(sk, BT_CLOSED,
1235 static int l2cap_sock_release(struct socket *sock)
1237 struct sock *sk = sock->sk;
1240 BT_DBG("sock %p, sk %p", sock, sk);
1245 err = l2cap_sock_shutdown(sock, 2);
1248 l2cap_sock_kill(sk);
1252 static void l2cap_chan_ready(struct sock *sk)
1254 struct sock *parent = bt_sk(sk)->parent;
1256 BT_DBG("sk %p, parent %p", sk, parent);
1258 l2cap_pi(sk)->conf_state = 0;
1259 l2cap_sock_clear_timer(sk);
1262 /* Outgoing channel.
1263 * Wake up socket sleeping on connect.
1265 sk->sk_state = BT_CONNECTED;
1266 sk->sk_state_change(sk);
1268 /* Incoming channel.
1269 * Wake up socket sleeping on accept.
1271 parent->sk_data_ready(parent, 0);
1274 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE) {
1275 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1276 hci_conn_change_link_key(conn->hcon);
1280 /* Copy frame to all raw sockets on that connection */
1281 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1283 struct l2cap_chan_list *l = &conn->chan_list;
1284 struct sk_buff *nskb;
1287 BT_DBG("conn %p", conn);
1289 read_lock(&l->lock);
1290 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1291 if (sk->sk_type != SOCK_RAW)
1294 /* Don't send frame to the socket it came from */
1298 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1301 if (sock_queue_rcv_skb(sk, nskb))
1304 read_unlock(&l->lock);
1307 /* ---- L2CAP signalling commands ---- */
1308 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1309 u8 code, u8 ident, u16 dlen, void *data)
1311 struct sk_buff *skb, **frag;
1312 struct l2cap_cmd_hdr *cmd;
1313 struct l2cap_hdr *lh;
1316 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1318 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1319 count = min_t(unsigned int, conn->mtu, len);
1321 skb = bt_skb_alloc(count, GFP_ATOMIC);
1325 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1326 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1327 lh->cid = cpu_to_le16(0x0001);
1329 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1332 cmd->len = cpu_to_le16(dlen);
1335 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1336 memcpy(skb_put(skb, count), data, count);
1342 /* Continuation fragments (no L2CAP header) */
1343 frag = &skb_shinfo(skb)->frag_list;
1345 count = min_t(unsigned int, conn->mtu, len);
1347 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1351 memcpy(skb_put(*frag, count), data, count);
1356 frag = &(*frag)->next;
1366 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1368 struct l2cap_conf_opt *opt = *ptr;
1371 len = L2CAP_CONF_OPT_SIZE + opt->len;
1379 *val = *((u8 *) opt->val);
1383 *val = __le16_to_cpu(*((__le16 *) opt->val));
1387 *val = __le32_to_cpu(*((__le32 *) opt->val));
1391 *val = (unsigned long) opt->val;
1395 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1399 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1401 struct l2cap_conf_opt *opt = *ptr;
1403 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1410 *((u8 *) opt->val) = val;
1414 *((__le16 *) opt->val) = cpu_to_le16(val);
1418 *((__le32 *) opt->val) = cpu_to_le32(val);
1422 memcpy(opt->val, (void *) val, len);
1426 *ptr += L2CAP_CONF_OPT_SIZE + len;
1429 static int l2cap_build_conf_req(struct sock *sk, void *data)
1431 struct l2cap_pinfo *pi = l2cap_pi(sk);
1432 struct l2cap_conf_req *req = data;
1433 void *ptr = req->data;
1435 BT_DBG("sk %p", sk);
1437 if (pi->imtu != L2CAP_DEFAULT_MTU)
1438 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1440 /* FIXME: Need actual value of the flush timeout */
1441 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1442 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1444 req->dcid = cpu_to_le16(pi->dcid);
1445 req->flags = cpu_to_le16(0);
1450 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1452 struct l2cap_pinfo *pi = l2cap_pi(sk);
1453 struct l2cap_conf_rsp *rsp = data;
1454 void *ptr = rsp->data;
1455 void *req = pi->conf_req;
1456 int len = pi->conf_len;
1457 int type, hint, olen;
1459 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1460 u16 mtu = L2CAP_DEFAULT_MTU;
1461 u16 result = L2CAP_CONF_SUCCESS;
1463 BT_DBG("sk %p", sk);
1465 while (len >= L2CAP_CONF_OPT_SIZE) {
1466 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1472 case L2CAP_CONF_MTU:
1476 case L2CAP_CONF_FLUSH_TO:
1480 case L2CAP_CONF_QOS:
1483 case L2CAP_CONF_RFC:
1484 if (olen == sizeof(rfc))
1485 memcpy(&rfc, (void *) val, olen);
1492 result = L2CAP_CONF_UNKNOWN;
1493 *((u8 *) ptr++) = type;
1498 if (result == L2CAP_CONF_SUCCESS) {
1499 /* Configure output options and let the other side know
1500 * which ones we don't like. */
1502 if (rfc.mode == L2CAP_MODE_BASIC) {
1504 result = L2CAP_CONF_UNACCEPT;
1507 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1510 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1512 result = L2CAP_CONF_UNACCEPT;
1514 memset(&rfc, 0, sizeof(rfc));
1515 rfc.mode = L2CAP_MODE_BASIC;
1517 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1518 sizeof(rfc), (unsigned long) &rfc);
1522 rsp->scid = cpu_to_le16(pi->dcid);
1523 rsp->result = cpu_to_le16(result);
1524 rsp->flags = cpu_to_le16(0x0000);
1529 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1531 struct l2cap_conf_rsp *rsp = data;
1532 void *ptr = rsp->data;
1534 BT_DBG("sk %p", sk);
1536 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1537 rsp->result = cpu_to_le16(result);
1538 rsp->flags = cpu_to_le16(flags);
1543 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1545 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1547 if (rej->reason != 0x0000)
1550 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1551 cmd->ident == conn->info_ident) {
1552 conn->info_ident = 0;
1553 del_timer(&conn->info_timer);
1554 l2cap_conn_start(conn);
1560 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1562 struct l2cap_chan_list *list = &conn->chan_list;
1563 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1564 struct l2cap_conn_rsp rsp;
1565 struct sock *sk, *parent;
1566 int result, status = L2CAP_CS_NO_INFO;
1568 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1569 __le16 psm = req->psm;
1571 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1573 /* Check if we have socket listening on psm */
1574 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1576 result = L2CAP_CR_BAD_PSM;
1580 /* Check if the ACL is secure enough (if not SDP) */
1581 if (psm != cpu_to_le16(0x0001) &&
1582 !hci_conn_check_link_mode(conn->hcon)) {
1583 result = L2CAP_CR_SEC_BLOCK;
1587 result = L2CAP_CR_NO_MEM;
1589 /* Check for backlog size */
1590 if (sk_acceptq_is_full(parent)) {
1591 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1595 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1599 write_lock_bh(&list->lock);
1601 /* Check if we already have channel with that dcid */
1602 if (__l2cap_get_chan_by_dcid(list, scid)) {
1603 write_unlock_bh(&list->lock);
1604 sock_set_flag(sk, SOCK_ZAPPED);
1605 l2cap_sock_kill(sk);
1609 hci_conn_hold(conn->hcon);
1611 l2cap_sock_init(sk, parent);
1612 bacpy(&bt_sk(sk)->src, conn->src);
1613 bacpy(&bt_sk(sk)->dst, conn->dst);
1614 l2cap_pi(sk)->psm = psm;
1615 l2cap_pi(sk)->dcid = scid;
1617 __l2cap_chan_add(conn, sk, parent);
1618 dcid = l2cap_pi(sk)->scid;
1620 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1622 l2cap_pi(sk)->ident = cmd->ident;
1624 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1625 if (l2cap_check_link_mode(sk)) {
1626 sk->sk_state = BT_CONFIG;
1627 result = L2CAP_CR_SUCCESS;
1628 status = L2CAP_CS_NO_INFO;
1630 sk->sk_state = BT_CONNECT2;
1631 result = L2CAP_CR_PEND;
1632 status = L2CAP_CS_AUTHEN_PEND;
1635 sk->sk_state = BT_CONNECT2;
1636 result = L2CAP_CR_PEND;
1637 status = L2CAP_CS_NO_INFO;
1640 write_unlock_bh(&list->lock);
1643 bh_unlock_sock(parent);
1646 rsp.scid = cpu_to_le16(scid);
1647 rsp.dcid = cpu_to_le16(dcid);
1648 rsp.result = cpu_to_le16(result);
1649 rsp.status = cpu_to_le16(status);
1650 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1652 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
1653 struct l2cap_info_req info;
1654 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1656 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1657 conn->info_ident = l2cap_get_ident(conn);
1659 mod_timer(&conn->info_timer, jiffies +
1660 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
1662 l2cap_send_cmd(conn, conn->info_ident,
1663 L2CAP_INFO_REQ, sizeof(info), &info);
1669 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1671 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1672 u16 scid, dcid, result, status;
1676 scid = __le16_to_cpu(rsp->scid);
1677 dcid = __le16_to_cpu(rsp->dcid);
1678 result = __le16_to_cpu(rsp->result);
1679 status = __le16_to_cpu(rsp->status);
1681 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1684 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1687 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1692 case L2CAP_CR_SUCCESS:
1693 sk->sk_state = BT_CONFIG;
1694 l2cap_pi(sk)->ident = 0;
1695 l2cap_pi(sk)->dcid = dcid;
1696 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1698 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1699 l2cap_build_conf_req(sk, req), req);
1706 l2cap_chan_del(sk, ECONNREFUSED);
1714 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1716 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1722 dcid = __le16_to_cpu(req->dcid);
1723 flags = __le16_to_cpu(req->flags);
1725 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1727 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1730 if (sk->sk_state == BT_DISCONN)
1733 /* Reject if config buffer is too small. */
1734 len = cmd_len - sizeof(*req);
1735 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1736 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1737 l2cap_build_conf_rsp(sk, rsp,
1738 L2CAP_CONF_REJECT, flags), rsp);
1743 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1744 l2cap_pi(sk)->conf_len += len;
1746 if (flags & 0x0001) {
1747 /* Incomplete config. Send empty response. */
1748 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1749 l2cap_build_conf_rsp(sk, rsp,
1750 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1754 /* Complete config. */
1755 len = l2cap_parse_conf_req(sk, rsp);
1759 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1761 /* Reset config buffer. */
1762 l2cap_pi(sk)->conf_len = 0;
1764 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1767 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1768 sk->sk_state = BT_CONNECTED;
1769 l2cap_chan_ready(sk);
1773 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1775 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1776 l2cap_build_conf_req(sk, buf), buf);
1784 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1786 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1787 u16 scid, flags, result;
1790 scid = __le16_to_cpu(rsp->scid);
1791 flags = __le16_to_cpu(rsp->flags);
1792 result = __le16_to_cpu(rsp->result);
1794 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1796 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1800 case L2CAP_CONF_SUCCESS:
1803 case L2CAP_CONF_UNACCEPT:
1804 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1806 /* It does not make sense to adjust L2CAP parameters
1807 * that are currently defined in the spec. We simply
1808 * resend config request that we sent earlier. It is
1809 * stupid, but it helps qualification testing which
1810 * expects at least some response from us. */
1811 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1812 l2cap_build_conf_req(sk, req), req);
1817 sk->sk_state = BT_DISCONN;
1818 sk->sk_err = ECONNRESET;
1819 l2cap_sock_set_timer(sk, HZ * 5);
1821 struct l2cap_disconn_req req;
1822 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1823 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1824 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1825 L2CAP_DISCONN_REQ, sizeof(req), &req);
1833 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1835 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1836 sk->sk_state = BT_CONNECTED;
1837 l2cap_chan_ready(sk);
1845 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1847 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1848 struct l2cap_disconn_rsp rsp;
1852 scid = __le16_to_cpu(req->scid);
1853 dcid = __le16_to_cpu(req->dcid);
1855 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1857 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1860 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1861 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1862 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1864 sk->sk_shutdown = SHUTDOWN_MASK;
1866 l2cap_chan_del(sk, ECONNRESET);
1869 l2cap_sock_kill(sk);
1873 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1875 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1879 scid = __le16_to_cpu(rsp->scid);
1880 dcid = __le16_to_cpu(rsp->dcid);
1882 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1884 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1887 l2cap_chan_del(sk, 0);
1890 l2cap_sock_kill(sk);
1894 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1896 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1899 type = __le16_to_cpu(req->type);
1901 BT_DBG("type 0x%4.4x", type);
1903 if (type == L2CAP_IT_FEAT_MASK) {
1905 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
1906 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1907 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
1908 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
1909 l2cap_send_cmd(conn, cmd->ident,
1910 L2CAP_INFO_RSP, sizeof(buf), buf);
1912 struct l2cap_info_rsp rsp;
1913 rsp.type = cpu_to_le16(type);
1914 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1915 l2cap_send_cmd(conn, cmd->ident,
1916 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1922 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1924 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1927 type = __le16_to_cpu(rsp->type);
1928 result = __le16_to_cpu(rsp->result);
1930 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1932 conn->info_ident = 0;
1934 del_timer(&conn->info_timer);
1936 if (type == L2CAP_IT_FEAT_MASK)
1937 conn->feat_mask = get_unaligned_le32(rsp->data);
1939 l2cap_conn_start(conn);
1944 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1946 u8 *data = skb->data;
1948 struct l2cap_cmd_hdr cmd;
1951 l2cap_raw_recv(conn, skb);
1953 while (len >= L2CAP_CMD_HDR_SIZE) {
1955 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1956 data += L2CAP_CMD_HDR_SIZE;
1957 len -= L2CAP_CMD_HDR_SIZE;
1959 cmd_len = le16_to_cpu(cmd.len);
1961 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
1963 if (cmd_len > len || !cmd.ident) {
1964 BT_DBG("corrupted command");
1969 case L2CAP_COMMAND_REJ:
1970 l2cap_command_rej(conn, &cmd, data);
1973 case L2CAP_CONN_REQ:
1974 err = l2cap_connect_req(conn, &cmd, data);
1977 case L2CAP_CONN_RSP:
1978 err = l2cap_connect_rsp(conn, &cmd, data);
1981 case L2CAP_CONF_REQ:
1982 err = l2cap_config_req(conn, &cmd, cmd_len, data);
1985 case L2CAP_CONF_RSP:
1986 err = l2cap_config_rsp(conn, &cmd, data);
1989 case L2CAP_DISCONN_REQ:
1990 err = l2cap_disconnect_req(conn, &cmd, data);
1993 case L2CAP_DISCONN_RSP:
1994 err = l2cap_disconnect_rsp(conn, &cmd, data);
1997 case L2CAP_ECHO_REQ:
1998 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2001 case L2CAP_ECHO_RSP:
2004 case L2CAP_INFO_REQ:
2005 err = l2cap_information_req(conn, &cmd, data);
2008 case L2CAP_INFO_RSP:
2009 err = l2cap_information_rsp(conn, &cmd, data);
2013 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2019 struct l2cap_cmd_rej rej;
2020 BT_DBG("error %d", err);
2022 /* FIXME: Map err to a valid reason */
2023 rej.reason = cpu_to_le16(0);
2024 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2034 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
2038 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2040 BT_DBG("unknown cid 0x%4.4x", cid);
2044 BT_DBG("sk %p, len %d", sk, skb->len);
2046 if (sk->sk_state != BT_CONNECTED)
2049 if (l2cap_pi(sk)->imtu < skb->len)
2052 /* If socket recv buffers overflows we drop data here
2053 * which is *bad* because L2CAP has to be reliable.
2054 * But we don't have any other choice. L2CAP doesn't
2055 * provide flow control mechanism. */
2057 if (!sock_queue_rcv_skb(sk, skb))
2070 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
2074 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
2078 BT_DBG("sk %p, len %d", sk, skb->len);
2080 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
2083 if (l2cap_pi(sk)->imtu < skb->len)
2086 if (!sock_queue_rcv_skb(sk, skb))
2093 if (sk) bh_unlock_sock(sk);
2097 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2099 struct l2cap_hdr *lh = (void *) skb->data;
2103 skb_pull(skb, L2CAP_HDR_SIZE);
2104 cid = __le16_to_cpu(lh->cid);
2105 len = __le16_to_cpu(lh->len);
2107 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2111 l2cap_sig_channel(conn, skb);
2115 psm = get_unaligned((__le16 *) skb->data);
2117 l2cap_conless_channel(conn, psm, skb);
2121 l2cap_data_channel(conn, cid, skb);
2126 /* ---- L2CAP interface with lower layer (HCI) ---- */
2128 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2130 int exact = 0, lm1 = 0, lm2 = 0;
2131 register struct sock *sk;
2132 struct hlist_node *node;
2134 if (type != ACL_LINK)
2137 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2139 /* Find listening sockets and check their link_mode */
2140 read_lock(&l2cap_sk_list.lock);
2141 sk_for_each(sk, node, &l2cap_sk_list.head) {
2142 if (sk->sk_state != BT_LISTEN)
2145 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2146 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2148 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
2149 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2151 read_unlock(&l2cap_sk_list.lock);
2153 return exact ? lm1 : lm2;
2156 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2158 struct l2cap_conn *conn;
2160 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2162 if (hcon->type != ACL_LINK)
2166 conn = l2cap_conn_add(hcon, status);
2168 l2cap_conn_ready(conn);
2170 l2cap_conn_del(hcon, bt_err(status));
2175 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
2177 BT_DBG("hcon %p reason %d", hcon, reason);
2179 if (hcon->type != ACL_LINK)
2182 l2cap_conn_del(hcon, bt_err(reason));
2187 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
2189 struct l2cap_chan_list *l;
2190 struct l2cap_conn *conn = hcon->l2cap_data;
2196 l = &conn->chan_list;
2198 BT_DBG("conn %p", conn);
2200 read_lock(&l->lock);
2202 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2203 struct l2cap_pinfo *pi = l2cap_pi(sk);
2207 if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
2208 !(hcon->link_mode & HCI_LM_ENCRYPT) &&
2214 if (sk->sk_state == BT_CONNECT) {
2216 struct l2cap_conn_req req;
2217 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2218 req.psm = l2cap_pi(sk)->psm;
2220 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2222 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2223 L2CAP_CONN_REQ, sizeof(req), &req);
2225 l2cap_sock_clear_timer(sk);
2226 l2cap_sock_set_timer(sk, HZ / 10);
2228 } else if (sk->sk_state == BT_CONNECT2) {
2229 struct l2cap_conn_rsp rsp;
2233 sk->sk_state = BT_CONFIG;
2234 result = L2CAP_CR_SUCCESS;
2236 sk->sk_state = BT_DISCONN;
2237 l2cap_sock_set_timer(sk, HZ / 10);
2238 result = L2CAP_CR_SEC_BLOCK;
2241 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2242 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2243 rsp.result = cpu_to_le16(result);
2244 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2245 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2246 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2252 read_unlock(&l->lock);
2257 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2259 struct l2cap_chan_list *l;
2260 struct l2cap_conn *conn = hcon->l2cap_data;
2266 l = &conn->chan_list;
2268 BT_DBG("conn %p", conn);
2270 read_lock(&l->lock);
2272 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2273 struct l2cap_pinfo *pi = l2cap_pi(sk);
2277 if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
2278 (sk->sk_state == BT_CONNECTED ||
2279 sk->sk_state == BT_CONFIG) &&
2280 !status && encrypt == 0x00) {
2281 __l2cap_sock_close(sk, ECONNREFUSED);
2286 if (sk->sk_state == BT_CONNECT) {
2288 struct l2cap_conn_req req;
2289 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2290 req.psm = l2cap_pi(sk)->psm;
2292 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2294 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2295 L2CAP_CONN_REQ, sizeof(req), &req);
2297 l2cap_sock_clear_timer(sk);
2298 l2cap_sock_set_timer(sk, HZ / 10);
2300 } else if (sk->sk_state == BT_CONNECT2) {
2301 struct l2cap_conn_rsp rsp;
2305 sk->sk_state = BT_CONFIG;
2306 result = L2CAP_CR_SUCCESS;
2308 sk->sk_state = BT_DISCONN;
2309 l2cap_sock_set_timer(sk, HZ / 10);
2310 result = L2CAP_CR_SEC_BLOCK;
2313 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2314 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2315 rsp.result = cpu_to_le16(result);
2316 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2317 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2318 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2324 read_unlock(&l->lock);
2329 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2331 struct l2cap_conn *conn = hcon->l2cap_data;
2333 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2336 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2338 if (flags & ACL_START) {
2339 struct l2cap_hdr *hdr;
2343 BT_ERR("Unexpected start frame (len %d)", skb->len);
2344 kfree_skb(conn->rx_skb);
2345 conn->rx_skb = NULL;
2347 l2cap_conn_unreliable(conn, ECOMM);
2351 BT_ERR("Frame is too short (len %d)", skb->len);
2352 l2cap_conn_unreliable(conn, ECOMM);
2356 hdr = (struct l2cap_hdr *) skb->data;
2357 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2359 if (len == skb->len) {
2360 /* Complete frame received */
2361 l2cap_recv_frame(conn, skb);
2365 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2367 if (skb->len > len) {
2368 BT_ERR("Frame is too long (len %d, expected len %d)",
2370 l2cap_conn_unreliable(conn, ECOMM);
2374 /* Allocate skb for the complete frame (with header) */
2375 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2378 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2380 conn->rx_len = len - skb->len;
2382 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2384 if (!conn->rx_len) {
2385 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2386 l2cap_conn_unreliable(conn, ECOMM);
2390 if (skb->len > conn->rx_len) {
2391 BT_ERR("Fragment is too long (len %d, expected %d)",
2392 skb->len, conn->rx_len);
2393 kfree_skb(conn->rx_skb);
2394 conn->rx_skb = NULL;
2396 l2cap_conn_unreliable(conn, ECOMM);
2400 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2402 conn->rx_len -= skb->len;
2404 if (!conn->rx_len) {
2405 /* Complete frame received */
2406 l2cap_recv_frame(conn, conn->rx_skb);
2407 conn->rx_skb = NULL;
2416 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2419 struct hlist_node *node;
2422 read_lock_bh(&l2cap_sk_list.lock);
2424 sk_for_each(sk, node, &l2cap_sk_list.head) {
2425 struct l2cap_pinfo *pi = l2cap_pi(sk);
2427 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2428 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2429 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2430 pi->imtu, pi->omtu, pi->link_mode);
2433 read_unlock_bh(&l2cap_sk_list.lock);
2438 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2440 static const struct proto_ops l2cap_sock_ops = {
2441 .family = PF_BLUETOOTH,
2442 .owner = THIS_MODULE,
2443 .release = l2cap_sock_release,
2444 .bind = l2cap_sock_bind,
2445 .connect = l2cap_sock_connect,
2446 .listen = l2cap_sock_listen,
2447 .accept = l2cap_sock_accept,
2448 .getname = l2cap_sock_getname,
2449 .sendmsg = l2cap_sock_sendmsg,
2450 .recvmsg = bt_sock_recvmsg,
2451 .poll = bt_sock_poll,
2452 .ioctl = bt_sock_ioctl,
2453 .mmap = sock_no_mmap,
2454 .socketpair = sock_no_socketpair,
2455 .shutdown = l2cap_sock_shutdown,
2456 .setsockopt = l2cap_sock_setsockopt,
2457 .getsockopt = l2cap_sock_getsockopt
2460 static struct net_proto_family l2cap_sock_family_ops = {
2461 .family = PF_BLUETOOTH,
2462 .owner = THIS_MODULE,
2463 .create = l2cap_sock_create,
2466 static struct hci_proto l2cap_hci_proto = {
2468 .id = HCI_PROTO_L2CAP,
2469 .connect_ind = l2cap_connect_ind,
2470 .connect_cfm = l2cap_connect_cfm,
2471 .disconn_ind = l2cap_disconn_ind,
2472 .auth_cfm = l2cap_auth_cfm,
2473 .encrypt_cfm = l2cap_encrypt_cfm,
2474 .recv_acldata = l2cap_recv_acldata
2477 static int __init l2cap_init(void)
2481 err = proto_register(&l2cap_proto, 0);
2485 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2487 BT_ERR("L2CAP socket registration failed");
2491 err = hci_register_proto(&l2cap_hci_proto);
2493 BT_ERR("L2CAP protocol registration failed");
2494 bt_sock_unregister(BTPROTO_L2CAP);
2498 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2499 BT_ERR("Failed to create L2CAP info file");
2501 BT_INFO("L2CAP ver %s", VERSION);
2502 BT_INFO("L2CAP socket layer initialized");
2507 proto_unregister(&l2cap_proto);
2511 static void __exit l2cap_exit(void)
2513 class_remove_file(bt_class, &class_attr_l2cap);
2515 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2516 BT_ERR("L2CAP socket unregistration failed");
2518 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2519 BT_ERR("L2CAP protocol unregistration failed");
2521 proto_unregister(&l2cap_proto);
2524 void l2cap_load(void)
2526 /* Dummy function to trigger automatic L2CAP module loading by
2527 * other modules that use L2CAP sockets but don't use any other
2528 * symbols from it. */
2531 EXPORT_SYMBOL(l2cap_load);
2533 module_init(l2cap_init);
2534 module_exit(l2cap_exit);
2536 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
2537 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2538 MODULE_VERSION(VERSION);
2539 MODULE_LICENSE("GPL");
2540 MODULE_ALIAS("bt-proto-0");