2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #define VERSION "2.13"
55 static u32 l2cap_feat_mask = 0x0080;
56 static u8 l2cap_fixed_chan[8] = { 0x02, };
58 static const struct proto_ops l2cap_sock_ops;
60 static struct bt_sock_list l2cap_sk_list = {
61 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
64 static void __l2cap_sock_close(struct sock *sk, int reason);
65 static void l2cap_sock_close(struct sock *sk);
66 static void l2cap_sock_kill(struct sock *sk);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
71 /* ---- L2CAP timers ---- */
72 static void l2cap_sock_timeout(unsigned long arg)
74 struct sock *sk = (struct sock *) arg;
77 BT_DBG("sock %p state %d", sk, sk->sk_state);
81 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
82 reason = ECONNREFUSED;
83 else if (sk->sk_state == BT_CONNECT &&
84 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
85 reason = ECONNREFUSED;
89 __l2cap_sock_close(sk, reason);
97 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
99 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
100 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
103 static void l2cap_sock_clear_timer(struct sock *sk)
105 BT_DBG("sock %p state %d", sk, sk->sk_state);
106 sk_stop_timer(sk, &sk->sk_timer);
109 /* ---- L2CAP channels ---- */
110 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
113 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
114 if (l2cap_pi(s)->dcid == cid)
120 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
123 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
124 if (l2cap_pi(s)->scid == cid)
130 /* Find channel with given SCID.
131 * Returns locked socket */
132 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
136 s = __l2cap_get_chan_by_scid(l, cid);
137 if (s) bh_lock_sock(s);
138 read_unlock(&l->lock);
142 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
145 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
146 if (l2cap_pi(s)->ident == ident)
152 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
156 s = __l2cap_get_chan_by_ident(l, ident);
157 if (s) bh_lock_sock(s);
158 read_unlock(&l->lock);
162 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
166 for (; cid < 0xffff; cid++) {
167 if(!__l2cap_get_chan_by_scid(l, cid))
174 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
179 l2cap_pi(l->head)->prev_c = sk;
181 l2cap_pi(sk)->next_c = l->head;
182 l2cap_pi(sk)->prev_c = NULL;
186 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
188 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
190 write_lock_bh(&l->lock);
195 l2cap_pi(next)->prev_c = prev;
197 l2cap_pi(prev)->next_c = next;
198 write_unlock_bh(&l->lock);
203 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
205 struct l2cap_chan_list *l = &conn->chan_list;
207 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
209 conn->disc_reason = 0x13;
211 l2cap_pi(sk)->conn = conn;
213 if (sk->sk_type == SOCK_SEQPACKET) {
214 /* Alloc CID for connection-oriented socket */
215 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
216 } else if (sk->sk_type == SOCK_DGRAM) {
217 /* Connectionless socket */
218 l2cap_pi(sk)->scid = 0x0002;
219 l2cap_pi(sk)->dcid = 0x0002;
220 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
222 /* Raw socket can send/recv signalling messages only */
223 l2cap_pi(sk)->scid = 0x0001;
224 l2cap_pi(sk)->dcid = 0x0001;
225 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
228 __l2cap_chan_link(l, sk);
231 bt_accept_enqueue(parent, sk);
235 * Must be called on the locked socket. */
236 static void l2cap_chan_del(struct sock *sk, int err)
238 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
239 struct sock *parent = bt_sk(sk)->parent;
241 l2cap_sock_clear_timer(sk);
243 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
246 /* Unlink from channel list */
247 l2cap_chan_unlink(&conn->chan_list, sk);
248 l2cap_pi(sk)->conn = NULL;
249 hci_conn_put(conn->hcon);
252 sk->sk_state = BT_CLOSED;
253 sock_set_flag(sk, SOCK_ZAPPED);
259 bt_accept_unlink(sk);
260 parent->sk_data_ready(parent, 0);
262 sk->sk_state_change(sk);
265 /* Service level security */
266 static inline int l2cap_check_security(struct sock *sk)
268 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
271 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
272 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
273 auth_type = HCI_AT_NO_BONDING_MITM;
275 auth_type = HCI_AT_NO_BONDING;
277 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
278 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
280 switch (l2cap_pi(sk)->sec_level) {
281 case BT_SECURITY_HIGH:
282 auth_type = HCI_AT_GENERAL_BONDING_MITM;
284 case BT_SECURITY_MEDIUM:
285 auth_type = HCI_AT_GENERAL_BONDING;
288 auth_type = HCI_AT_NO_BONDING;
293 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
297 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
301 /* Get next available identificator.
302 * 1 - 128 are used by kernel.
303 * 129 - 199 are reserved.
304 * 200 - 254 are used by utilities like l2ping, etc.
307 spin_lock_bh(&conn->lock);
309 if (++conn->tx_ident > 128)
314 spin_unlock_bh(&conn->lock);
319 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
321 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
323 BT_DBG("code 0x%2.2x", code);
328 return hci_send_acl(conn->hcon, skb, 0);
331 static void l2cap_do_start(struct sock *sk)
333 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
335 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
336 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
339 if (l2cap_check_security(sk)) {
340 struct l2cap_conn_req req;
341 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
342 req.psm = l2cap_pi(sk)->psm;
344 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
346 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
347 L2CAP_CONN_REQ, sizeof(req), &req);
350 struct l2cap_info_req req;
351 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
353 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
354 conn->info_ident = l2cap_get_ident(conn);
356 mod_timer(&conn->info_timer, jiffies +
357 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
359 l2cap_send_cmd(conn, conn->info_ident,
360 L2CAP_INFO_REQ, sizeof(req), &req);
364 /* ---- L2CAP connections ---- */
365 static void l2cap_conn_start(struct l2cap_conn *conn)
367 struct l2cap_chan_list *l = &conn->chan_list;
370 BT_DBG("conn %p", conn);
374 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
377 if (sk->sk_type != SOCK_SEQPACKET) {
382 if (sk->sk_state == BT_CONNECT) {
383 if (l2cap_check_security(sk)) {
384 struct l2cap_conn_req req;
385 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
386 req.psm = l2cap_pi(sk)->psm;
388 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
390 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
391 L2CAP_CONN_REQ, sizeof(req), &req);
393 } else if (sk->sk_state == BT_CONNECT2) {
394 struct l2cap_conn_rsp rsp;
395 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
396 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
398 if (l2cap_check_security(sk)) {
399 if (bt_sk(sk)->defer_setup) {
400 struct sock *parent = bt_sk(sk)->parent;
401 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
402 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
403 parent->sk_data_ready(parent, 0);
406 sk->sk_state = BT_CONFIG;
407 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
408 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
411 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
412 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
415 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
416 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
422 read_unlock(&l->lock);
425 static void l2cap_conn_ready(struct l2cap_conn *conn)
427 struct l2cap_chan_list *l = &conn->chan_list;
430 BT_DBG("conn %p", conn);
434 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
437 if (sk->sk_type != SOCK_SEQPACKET) {
438 l2cap_sock_clear_timer(sk);
439 sk->sk_state = BT_CONNECTED;
440 sk->sk_state_change(sk);
441 } else if (sk->sk_state == BT_CONNECT)
447 read_unlock(&l->lock);
450 /* Notify sockets that we cannot guaranty reliability anymore */
451 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
453 struct l2cap_chan_list *l = &conn->chan_list;
456 BT_DBG("conn %p", conn);
460 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
461 if (l2cap_pi(sk)->force_reliable)
465 read_unlock(&l->lock);
468 static void l2cap_info_timeout(unsigned long arg)
470 struct l2cap_conn *conn = (void *) arg;
472 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
473 conn->info_ident = 0;
475 l2cap_conn_start(conn);
478 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
480 struct l2cap_conn *conn = hcon->l2cap_data;
485 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
489 hcon->l2cap_data = conn;
492 BT_DBG("hcon %p conn %p", hcon, conn);
494 conn->mtu = hcon->hdev->acl_mtu;
495 conn->src = &hcon->hdev->bdaddr;
496 conn->dst = &hcon->dst;
500 setup_timer(&conn->info_timer, l2cap_info_timeout,
501 (unsigned long) conn);
503 spin_lock_init(&conn->lock);
504 rwlock_init(&conn->chan_list.lock);
506 conn->disc_reason = 0x13;
511 static void l2cap_conn_del(struct hci_conn *hcon, int err)
513 struct l2cap_conn *conn = hcon->l2cap_data;
519 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
522 kfree_skb(conn->rx_skb);
525 while ((sk = conn->chan_list.head)) {
527 l2cap_chan_del(sk, err);
532 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
533 del_timer_sync(&conn->info_timer);
535 hcon->l2cap_data = NULL;
539 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
541 struct l2cap_chan_list *l = &conn->chan_list;
542 write_lock_bh(&l->lock);
543 __l2cap_chan_add(conn, sk, parent);
544 write_unlock_bh(&l->lock);
547 /* ---- Socket interface ---- */
548 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
551 struct hlist_node *node;
552 sk_for_each(sk, node, &l2cap_sk_list.head)
553 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
560 /* Find socket with psm and source bdaddr.
561 * Returns closest match.
563 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
565 struct sock *sk = NULL, *sk1 = NULL;
566 struct hlist_node *node;
568 sk_for_each(sk, node, &l2cap_sk_list.head) {
569 if (state && sk->sk_state != state)
572 if (l2cap_pi(sk)->psm == psm) {
574 if (!bacmp(&bt_sk(sk)->src, src))
578 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
582 return node ? sk : sk1;
585 /* Find socket with given address (psm, src).
586 * Returns locked socket */
587 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
590 read_lock(&l2cap_sk_list.lock);
591 s = __l2cap_get_sock_by_psm(state, psm, src);
592 if (s) bh_lock_sock(s);
593 read_unlock(&l2cap_sk_list.lock);
597 static void l2cap_sock_destruct(struct sock *sk)
601 skb_queue_purge(&sk->sk_receive_queue);
602 skb_queue_purge(&sk->sk_write_queue);
605 static void l2cap_sock_cleanup_listen(struct sock *parent)
609 BT_DBG("parent %p", parent);
611 /* Close not yet accepted channels */
612 while ((sk = bt_accept_dequeue(parent, NULL)))
613 l2cap_sock_close(sk);
615 parent->sk_state = BT_CLOSED;
616 sock_set_flag(parent, SOCK_ZAPPED);
619 /* Kill socket (only if zapped and orphan)
620 * Must be called on unlocked socket.
622 static void l2cap_sock_kill(struct sock *sk)
624 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
627 BT_DBG("sk %p state %d", sk, sk->sk_state);
629 /* Kill poor orphan */
630 bt_sock_unlink(&l2cap_sk_list, sk);
631 sock_set_flag(sk, SOCK_DEAD);
635 static void __l2cap_sock_close(struct sock *sk, int reason)
637 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
639 switch (sk->sk_state) {
641 l2cap_sock_cleanup_listen(sk);
646 if (sk->sk_type == SOCK_SEQPACKET) {
647 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
648 struct l2cap_disconn_req req;
650 sk->sk_state = BT_DISCONN;
651 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
653 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
654 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
655 l2cap_send_cmd(conn, l2cap_get_ident(conn),
656 L2CAP_DISCONN_REQ, sizeof(req), &req);
658 l2cap_chan_del(sk, reason);
662 if (sk->sk_type == SOCK_SEQPACKET) {
663 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
664 struct l2cap_conn_rsp rsp;
667 if (bt_sk(sk)->defer_setup)
668 result = L2CAP_CR_SEC_BLOCK;
670 result = L2CAP_CR_BAD_PSM;
672 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
673 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
674 rsp.result = cpu_to_le16(result);
675 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
676 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
677 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
679 l2cap_chan_del(sk, reason);
684 l2cap_chan_del(sk, reason);
688 sock_set_flag(sk, SOCK_ZAPPED);
693 /* Must be called on unlocked socket. */
694 static void l2cap_sock_close(struct sock *sk)
696 l2cap_sock_clear_timer(sk);
698 __l2cap_sock_close(sk, ECONNRESET);
703 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
705 struct l2cap_pinfo *pi = l2cap_pi(sk);
710 sk->sk_type = parent->sk_type;
711 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
713 pi->imtu = l2cap_pi(parent)->imtu;
714 pi->omtu = l2cap_pi(parent)->omtu;
715 pi->sec_level = l2cap_pi(parent)->sec_level;
716 pi->role_switch = l2cap_pi(parent)->role_switch;
717 pi->force_reliable = l2cap_pi(parent)->force_reliable;
719 pi->imtu = L2CAP_DEFAULT_MTU;
721 pi->sec_level = BT_SECURITY_LOW;
723 pi->force_reliable = 0;
726 /* Default config options */
728 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
731 static struct proto l2cap_proto = {
733 .owner = THIS_MODULE,
734 .obj_size = sizeof(struct l2cap_pinfo)
737 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
741 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
745 sock_init_data(sock, sk);
746 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
748 sk->sk_destruct = l2cap_sock_destruct;
749 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
751 sock_reset_flag(sk, SOCK_ZAPPED);
753 sk->sk_protocol = proto;
754 sk->sk_state = BT_OPEN;
756 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
758 bt_sock_link(&l2cap_sk_list, sk);
762 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
766 BT_DBG("sock %p", sock);
768 sock->state = SS_UNCONNECTED;
770 if (sock->type != SOCK_SEQPACKET &&
771 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
772 return -ESOCKTNOSUPPORT;
774 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
777 sock->ops = &l2cap_sock_ops;
779 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
783 l2cap_sock_init(sk, NULL);
787 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
789 struct sock *sk = sock->sk;
790 struct sockaddr_l2 la;
795 if (!addr || addr->sa_family != AF_BLUETOOTH)
798 memset(&la, 0, sizeof(la));
799 len = min_t(unsigned int, sizeof(la), alen);
800 memcpy(&la, addr, len);
807 if (sk->sk_state != BT_OPEN) {
812 if (la.l2_psm && btohs(la.l2_psm) < 0x1001 &&
813 !capable(CAP_NET_BIND_SERVICE)) {
818 write_lock_bh(&l2cap_sk_list.lock);
820 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
823 /* Save source address */
824 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
825 l2cap_pi(sk)->psm = la.l2_psm;
826 l2cap_pi(sk)->sport = la.l2_psm;
827 sk->sk_state = BT_BOUND;
829 if (btohs(la.l2_psm) == 0x0001 || btohs(la.l2_psm) == 0x0003)
830 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
833 write_unlock_bh(&l2cap_sk_list.lock);
840 static int l2cap_do_connect(struct sock *sk)
842 bdaddr_t *src = &bt_sk(sk)->src;
843 bdaddr_t *dst = &bt_sk(sk)->dst;
844 struct l2cap_conn *conn;
845 struct hci_conn *hcon;
846 struct hci_dev *hdev;
850 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
853 if (!(hdev = hci_get_route(dst, src)))
854 return -EHOSTUNREACH;
856 hci_dev_lock_bh(hdev);
860 if (sk->sk_type == SOCK_RAW) {
861 switch (l2cap_pi(sk)->sec_level) {
862 case BT_SECURITY_HIGH:
863 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
865 case BT_SECURITY_MEDIUM:
866 auth_type = HCI_AT_DEDICATED_BONDING;
869 auth_type = HCI_AT_NO_BONDING;
872 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
873 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
874 auth_type = HCI_AT_NO_BONDING_MITM;
876 auth_type = HCI_AT_NO_BONDING;
878 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
879 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
881 switch (l2cap_pi(sk)->sec_level) {
882 case BT_SECURITY_HIGH:
883 auth_type = HCI_AT_GENERAL_BONDING_MITM;
885 case BT_SECURITY_MEDIUM:
886 auth_type = HCI_AT_GENERAL_BONDING;
889 auth_type = HCI_AT_NO_BONDING;
894 hcon = hci_connect(hdev, ACL_LINK, dst,
895 l2cap_pi(sk)->sec_level, auth_type);
899 conn = l2cap_conn_add(hcon, 0);
907 /* Update source addr of the socket */
908 bacpy(src, conn->src);
910 l2cap_chan_add(conn, sk, NULL);
912 sk->sk_state = BT_CONNECT;
913 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
915 if (hcon->state == BT_CONNECTED) {
916 if (sk->sk_type != SOCK_SEQPACKET) {
917 l2cap_sock_clear_timer(sk);
918 sk->sk_state = BT_CONNECTED;
924 hci_dev_unlock_bh(hdev);
929 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
931 struct sock *sk = sock->sk;
932 struct sockaddr_l2 la;
937 if (!addr || addr->sa_family != AF_BLUETOOTH)
940 memset(&la, 0, sizeof(la));
941 len = min_t(unsigned int, sizeof(la), alen);
942 memcpy(&la, addr, len);
949 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
954 switch(sk->sk_state) {
958 /* Already connecting */
962 /* Already connected */
975 /* Set destination address and psm */
976 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
977 l2cap_pi(sk)->psm = la.l2_psm;
979 if ((err = l2cap_do_connect(sk)))
983 err = bt_sock_wait_state(sk, BT_CONNECTED,
984 sock_sndtimeo(sk, flags & O_NONBLOCK));
990 static int l2cap_sock_listen(struct socket *sock, int backlog)
992 struct sock *sk = sock->sk;
995 BT_DBG("sk %p backlog %d", sk, backlog);
999 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1004 if (!l2cap_pi(sk)->psm) {
1005 bdaddr_t *src = &bt_sk(sk)->src;
1010 write_lock_bh(&l2cap_sk_list.lock);
1012 for (psm = 0x1001; psm < 0x1100; psm += 2)
1013 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
1014 l2cap_pi(sk)->psm = htobs(psm);
1015 l2cap_pi(sk)->sport = htobs(psm);
1020 write_unlock_bh(&l2cap_sk_list.lock);
1026 sk->sk_max_ack_backlog = backlog;
1027 sk->sk_ack_backlog = 0;
1028 sk->sk_state = BT_LISTEN;
1035 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1037 DECLARE_WAITQUEUE(wait, current);
1038 struct sock *sk = sock->sk, *nsk;
1042 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1044 if (sk->sk_state != BT_LISTEN) {
1049 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1051 BT_DBG("sk %p timeo %ld", sk, timeo);
1053 /* Wait for an incoming connection. (wake-one). */
1054 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1055 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1056 set_current_state(TASK_INTERRUPTIBLE);
1063 timeo = schedule_timeout(timeo);
1064 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1066 if (sk->sk_state != BT_LISTEN) {
1071 if (signal_pending(current)) {
1072 err = sock_intr_errno(timeo);
1076 set_current_state(TASK_RUNNING);
1077 remove_wait_queue(sk->sk_sleep, &wait);
1082 newsock->state = SS_CONNECTED;
1084 BT_DBG("new socket %p", nsk);
1091 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1093 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1094 struct sock *sk = sock->sk;
1096 BT_DBG("sock %p, sk %p", sock, sk);
1098 addr->sa_family = AF_BLUETOOTH;
1099 *len = sizeof(struct sockaddr_l2);
1102 la->l2_psm = l2cap_pi(sk)->psm;
1103 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1104 la->l2_cid = htobs(l2cap_pi(sk)->dcid);
1106 la->l2_psm = l2cap_pi(sk)->sport;
1107 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1108 la->l2_cid = htobs(l2cap_pi(sk)->scid);
1114 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
1116 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1117 struct sk_buff *skb, **frag;
1118 int err, hlen, count, sent=0;
1119 struct l2cap_hdr *lh;
1121 BT_DBG("sk %p len %d", sk, len);
1123 /* First fragment (with L2CAP header) */
1124 if (sk->sk_type == SOCK_DGRAM)
1125 hlen = L2CAP_HDR_SIZE + 2;
1127 hlen = L2CAP_HDR_SIZE;
1129 count = min_t(unsigned int, (conn->mtu - hlen), len);
1131 skb = bt_skb_send_alloc(sk, hlen + count,
1132 msg->msg_flags & MSG_DONTWAIT, &err);
1136 /* Create L2CAP header */
1137 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1138 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1139 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1141 if (sk->sk_type == SOCK_DGRAM)
1142 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
1144 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1152 /* Continuation fragments (no L2CAP header) */
1153 frag = &skb_shinfo(skb)->frag_list;
1155 count = min_t(unsigned int, conn->mtu, len);
1157 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1161 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
1169 frag = &(*frag)->next;
1172 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
1182 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1184 struct sock *sk = sock->sk;
1187 BT_DBG("sock %p, sk %p", sock, sk);
1189 err = sock_error(sk);
1193 if (msg->msg_flags & MSG_OOB)
1196 /* Check outgoing MTU */
1197 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1202 if (sk->sk_state == BT_CONNECTED)
1203 err = l2cap_do_send(sk, msg, len);
1211 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1213 struct sock *sk = sock->sk;
1217 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1218 struct l2cap_conn_rsp rsp;
1220 sk->sk_state = BT_CONFIG;
1222 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1223 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1224 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1225 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1226 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1227 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1235 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1238 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
1240 struct sock *sk = sock->sk;
1241 struct l2cap_options opts;
1245 BT_DBG("sk %p", sk);
1251 opts.imtu = l2cap_pi(sk)->imtu;
1252 opts.omtu = l2cap_pi(sk)->omtu;
1253 opts.flush_to = l2cap_pi(sk)->flush_to;
1254 opts.mode = L2CAP_MODE_BASIC;
1256 len = min_t(unsigned int, sizeof(opts), optlen);
1257 if (copy_from_user((char *) &opts, optval, len)) {
1262 l2cap_pi(sk)->imtu = opts.imtu;
1263 l2cap_pi(sk)->omtu = opts.omtu;
1267 if (get_user(opt, (u32 __user *) optval)) {
1272 if (opt & L2CAP_LM_AUTH)
1273 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1274 if (opt & L2CAP_LM_ENCRYPT)
1275 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1276 if (opt & L2CAP_LM_SECURE)
1277 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1279 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1280 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1292 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1294 struct sock *sk = sock->sk;
1295 struct bt_security sec;
1299 BT_DBG("sk %p", sk);
1301 if (level == SOL_L2CAP)
1302 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1304 if (level != SOL_BLUETOOTH)
1305 return -ENOPROTOOPT;
1311 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1316 sec.level = BT_SECURITY_LOW;
1318 len = min_t(unsigned int, sizeof(sec), optlen);
1319 if (copy_from_user((char *) &sec, optval, len)) {
1324 if (sec.level < BT_SECURITY_LOW ||
1325 sec.level > BT_SECURITY_HIGH) {
1330 l2cap_pi(sk)->sec_level = sec.level;
1333 case BT_DEFER_SETUP:
1334 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1339 if (get_user(opt, (u32 __user *) optval)) {
1344 bt_sk(sk)->defer_setup = opt;
1356 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1358 struct sock *sk = sock->sk;
1359 struct l2cap_options opts;
1360 struct l2cap_conninfo cinfo;
1364 BT_DBG("sk %p", sk);
1366 if (get_user(len, optlen))
1373 opts.imtu = l2cap_pi(sk)->imtu;
1374 opts.omtu = l2cap_pi(sk)->omtu;
1375 opts.flush_to = l2cap_pi(sk)->flush_to;
1376 opts.mode = L2CAP_MODE_BASIC;
1378 len = min_t(unsigned int, len, sizeof(opts));
1379 if (copy_to_user(optval, (char *) &opts, len))
1385 switch (l2cap_pi(sk)->sec_level) {
1386 case BT_SECURITY_LOW:
1387 opt = L2CAP_LM_AUTH;
1389 case BT_SECURITY_MEDIUM:
1390 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1392 case BT_SECURITY_HIGH:
1393 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1401 if (l2cap_pi(sk)->role_switch)
1402 opt |= L2CAP_LM_MASTER;
1404 if (l2cap_pi(sk)->force_reliable)
1405 opt |= L2CAP_LM_RELIABLE;
1407 if (put_user(opt, (u32 __user *) optval))
1411 case L2CAP_CONNINFO:
1412 if (sk->sk_state != BT_CONNECTED &&
1413 !(sk->sk_state == BT_CONNECT2 &&
1414 bt_sk(sk)->defer_setup)) {
1419 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1420 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1422 len = min_t(unsigned int, len, sizeof(cinfo));
1423 if (copy_to_user(optval, (char *) &cinfo, len))
1437 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1439 struct sock *sk = sock->sk;
1440 struct bt_security sec;
1443 BT_DBG("sk %p", sk);
1445 if (level == SOL_L2CAP)
1446 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1448 if (level != SOL_BLUETOOTH)
1449 return -ENOPROTOOPT;
1451 if (get_user(len, optlen))
1458 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1463 sec.level = l2cap_pi(sk)->sec_level;
1465 len = min_t(unsigned int, len, sizeof(sec));
1466 if (copy_to_user(optval, (char *) &sec, len))
1471 case BT_DEFER_SETUP:
1472 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1477 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1491 static int l2cap_sock_shutdown(struct socket *sock, int how)
1493 struct sock *sk = sock->sk;
1496 BT_DBG("sock %p, sk %p", sock, sk);
1502 if (!sk->sk_shutdown) {
1503 sk->sk_shutdown = SHUTDOWN_MASK;
1504 l2cap_sock_clear_timer(sk);
1505 __l2cap_sock_close(sk, 0);
1507 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1508 err = bt_sock_wait_state(sk, BT_CLOSED,
1515 static int l2cap_sock_release(struct socket *sock)
1517 struct sock *sk = sock->sk;
1520 BT_DBG("sock %p, sk %p", sock, sk);
1525 err = l2cap_sock_shutdown(sock, 2);
1528 l2cap_sock_kill(sk);
1532 static void l2cap_chan_ready(struct sock *sk)
1534 struct sock *parent = bt_sk(sk)->parent;
1536 BT_DBG("sk %p, parent %p", sk, parent);
1538 l2cap_pi(sk)->conf_state = 0;
1539 l2cap_sock_clear_timer(sk);
1542 /* Outgoing channel.
1543 * Wake up socket sleeping on connect.
1545 sk->sk_state = BT_CONNECTED;
1546 sk->sk_state_change(sk);
1548 /* Incoming channel.
1549 * Wake up socket sleeping on accept.
1551 parent->sk_data_ready(parent, 0);
1555 /* Copy frame to all raw sockets on that connection */
1556 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1558 struct l2cap_chan_list *l = &conn->chan_list;
1559 struct sk_buff *nskb;
1562 BT_DBG("conn %p", conn);
1564 read_lock(&l->lock);
1565 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1566 if (sk->sk_type != SOCK_RAW)
1569 /* Don't send frame to the socket it came from */
1573 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1576 if (sock_queue_rcv_skb(sk, nskb))
1579 read_unlock(&l->lock);
1582 /* ---- L2CAP signalling commands ---- */
1583 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1584 u8 code, u8 ident, u16 dlen, void *data)
1586 struct sk_buff *skb, **frag;
1587 struct l2cap_cmd_hdr *cmd;
1588 struct l2cap_hdr *lh;
1591 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1593 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1594 count = min_t(unsigned int, conn->mtu, len);
1596 skb = bt_skb_alloc(count, GFP_ATOMIC);
1600 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1601 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1602 lh->cid = cpu_to_le16(0x0001);
1604 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1607 cmd->len = cpu_to_le16(dlen);
1610 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1611 memcpy(skb_put(skb, count), data, count);
1617 /* Continuation fragments (no L2CAP header) */
1618 frag = &skb_shinfo(skb)->frag_list;
1620 count = min_t(unsigned int, conn->mtu, len);
1622 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1626 memcpy(skb_put(*frag, count), data, count);
1631 frag = &(*frag)->next;
1641 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1643 struct l2cap_conf_opt *opt = *ptr;
1646 len = L2CAP_CONF_OPT_SIZE + opt->len;
1654 *val = *((u8 *) opt->val);
1658 *val = __le16_to_cpu(*((__le16 *) opt->val));
1662 *val = __le32_to_cpu(*((__le32 *) opt->val));
1666 *val = (unsigned long) opt->val;
1670 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1674 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1676 struct l2cap_conf_opt *opt = *ptr;
1678 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1685 *((u8 *) opt->val) = val;
1689 *((__le16 *) opt->val) = cpu_to_le16(val);
1693 *((__le32 *) opt->val) = cpu_to_le32(val);
1697 memcpy(opt->val, (void *) val, len);
1701 *ptr += L2CAP_CONF_OPT_SIZE + len;
1704 static int l2cap_build_conf_req(struct sock *sk, void *data)
1706 struct l2cap_pinfo *pi = l2cap_pi(sk);
1707 struct l2cap_conf_req *req = data;
1708 void *ptr = req->data;
1710 BT_DBG("sk %p", sk);
1712 if (pi->imtu != L2CAP_DEFAULT_MTU)
1713 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1715 /* FIXME: Need actual value of the flush timeout */
1716 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1717 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1719 req->dcid = cpu_to_le16(pi->dcid);
1720 req->flags = cpu_to_le16(0);
1725 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1727 struct l2cap_pinfo *pi = l2cap_pi(sk);
1728 struct l2cap_conf_rsp *rsp = data;
1729 void *ptr = rsp->data;
1730 void *req = pi->conf_req;
1731 int len = pi->conf_len;
1732 int type, hint, olen;
1734 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1735 u16 mtu = L2CAP_DEFAULT_MTU;
1736 u16 result = L2CAP_CONF_SUCCESS;
1738 BT_DBG("sk %p", sk);
1740 while (len >= L2CAP_CONF_OPT_SIZE) {
1741 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1747 case L2CAP_CONF_MTU:
1751 case L2CAP_CONF_FLUSH_TO:
1755 case L2CAP_CONF_QOS:
1758 case L2CAP_CONF_RFC:
1759 if (olen == sizeof(rfc))
1760 memcpy(&rfc, (void *) val, olen);
1767 result = L2CAP_CONF_UNKNOWN;
1768 *((u8 *) ptr++) = type;
1773 if (result == L2CAP_CONF_SUCCESS) {
1774 /* Configure output options and let the other side know
1775 * which ones we don't like. */
1777 if (rfc.mode == L2CAP_MODE_BASIC) {
1779 result = L2CAP_CONF_UNACCEPT;
1782 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1785 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1787 result = L2CAP_CONF_UNACCEPT;
1789 memset(&rfc, 0, sizeof(rfc));
1790 rfc.mode = L2CAP_MODE_BASIC;
1792 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1793 sizeof(rfc), (unsigned long) &rfc);
1797 rsp->scid = cpu_to_le16(pi->dcid);
1798 rsp->result = cpu_to_le16(result);
1799 rsp->flags = cpu_to_le16(0x0000);
1804 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1806 struct l2cap_conf_rsp *rsp = data;
1807 void *ptr = rsp->data;
1809 BT_DBG("sk %p", sk);
1811 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1812 rsp->result = cpu_to_le16(result);
1813 rsp->flags = cpu_to_le16(flags);
1818 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1820 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1822 if (rej->reason != 0x0000)
1825 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1826 cmd->ident == conn->info_ident) {
1827 del_timer(&conn->info_timer);
1829 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1830 conn->info_ident = 0;
1832 l2cap_conn_start(conn);
1838 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1840 struct l2cap_chan_list *list = &conn->chan_list;
1841 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1842 struct l2cap_conn_rsp rsp;
1843 struct sock *sk, *parent;
1844 int result, status = L2CAP_CS_NO_INFO;
1846 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1847 __le16 psm = req->psm;
1849 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1851 /* Check if we have socket listening on psm */
1852 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1854 result = L2CAP_CR_BAD_PSM;
1858 /* Check if the ACL is secure enough (if not SDP) */
1859 if (psm != cpu_to_le16(0x0001) &&
1860 !hci_conn_check_link_mode(conn->hcon)) {
1861 conn->disc_reason = 0x05;
1862 result = L2CAP_CR_SEC_BLOCK;
1866 result = L2CAP_CR_NO_MEM;
1868 /* Check for backlog size */
1869 if (sk_acceptq_is_full(parent)) {
1870 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1874 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1878 write_lock_bh(&list->lock);
1880 /* Check if we already have channel with that dcid */
1881 if (__l2cap_get_chan_by_dcid(list, scid)) {
1882 write_unlock_bh(&list->lock);
1883 sock_set_flag(sk, SOCK_ZAPPED);
1884 l2cap_sock_kill(sk);
1888 hci_conn_hold(conn->hcon);
1890 l2cap_sock_init(sk, parent);
1891 bacpy(&bt_sk(sk)->src, conn->src);
1892 bacpy(&bt_sk(sk)->dst, conn->dst);
1893 l2cap_pi(sk)->psm = psm;
1894 l2cap_pi(sk)->dcid = scid;
1896 __l2cap_chan_add(conn, sk, parent);
1897 dcid = l2cap_pi(sk)->scid;
1899 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1901 l2cap_pi(sk)->ident = cmd->ident;
1903 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
1904 if (l2cap_check_security(sk)) {
1905 if (bt_sk(sk)->defer_setup) {
1906 sk->sk_state = BT_CONNECT2;
1907 result = L2CAP_CR_PEND;
1908 status = L2CAP_CS_AUTHOR_PEND;
1909 parent->sk_data_ready(parent, 0);
1911 sk->sk_state = BT_CONFIG;
1912 result = L2CAP_CR_SUCCESS;
1913 status = L2CAP_CS_NO_INFO;
1916 sk->sk_state = BT_CONNECT2;
1917 result = L2CAP_CR_PEND;
1918 status = L2CAP_CS_AUTHEN_PEND;
1921 sk->sk_state = BT_CONNECT2;
1922 result = L2CAP_CR_PEND;
1923 status = L2CAP_CS_NO_INFO;
1926 write_unlock_bh(&list->lock);
1929 bh_unlock_sock(parent);
1932 rsp.scid = cpu_to_le16(scid);
1933 rsp.dcid = cpu_to_le16(dcid);
1934 rsp.result = cpu_to_le16(result);
1935 rsp.status = cpu_to_le16(status);
1936 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1938 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
1939 struct l2cap_info_req info;
1940 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1942 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1943 conn->info_ident = l2cap_get_ident(conn);
1945 mod_timer(&conn->info_timer, jiffies +
1946 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
1948 l2cap_send_cmd(conn, conn->info_ident,
1949 L2CAP_INFO_REQ, sizeof(info), &info);
1955 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1957 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1958 u16 scid, dcid, result, status;
1962 scid = __le16_to_cpu(rsp->scid);
1963 dcid = __le16_to_cpu(rsp->dcid);
1964 result = __le16_to_cpu(rsp->result);
1965 status = __le16_to_cpu(rsp->status);
1967 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1970 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1973 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1978 case L2CAP_CR_SUCCESS:
1979 sk->sk_state = BT_CONFIG;
1980 l2cap_pi(sk)->ident = 0;
1981 l2cap_pi(sk)->dcid = dcid;
1982 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1984 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
1986 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1987 l2cap_build_conf_req(sk, req), req);
1991 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
1995 l2cap_chan_del(sk, ECONNREFUSED);
2003 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2005 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2011 dcid = __le16_to_cpu(req->dcid);
2012 flags = __le16_to_cpu(req->flags);
2014 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2016 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
2019 if (sk->sk_state == BT_DISCONN)
2022 /* Reject if config buffer is too small. */
2023 len = cmd_len - sizeof(*req);
2024 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2025 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2026 l2cap_build_conf_rsp(sk, rsp,
2027 L2CAP_CONF_REJECT, flags), rsp);
2032 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2033 l2cap_pi(sk)->conf_len += len;
2035 if (flags & 0x0001) {
2036 /* Incomplete config. Send empty response. */
2037 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2038 l2cap_build_conf_rsp(sk, rsp,
2039 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2043 /* Complete config. */
2044 len = l2cap_parse_conf_req(sk, rsp);
2048 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2050 /* Reset config buffer. */
2051 l2cap_pi(sk)->conf_len = 0;
2053 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2056 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2057 sk->sk_state = BT_CONNECTED;
2058 l2cap_chan_ready(sk);
2062 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2064 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2065 l2cap_build_conf_req(sk, buf), buf);
2073 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2075 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2076 u16 scid, flags, result;
2079 scid = __le16_to_cpu(rsp->scid);
2080 flags = __le16_to_cpu(rsp->flags);
2081 result = __le16_to_cpu(rsp->result);
2083 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
2085 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
2089 case L2CAP_CONF_SUCCESS:
2092 case L2CAP_CONF_UNACCEPT:
2093 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
2095 /* It does not make sense to adjust L2CAP parameters
2096 * that are currently defined in the spec. We simply
2097 * resend config request that we sent earlier. It is
2098 * stupid, but it helps qualification testing which
2099 * expects at least some response from us. */
2100 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2101 l2cap_build_conf_req(sk, req), req);
2106 sk->sk_state = BT_DISCONN;
2107 sk->sk_err = ECONNRESET;
2108 l2cap_sock_set_timer(sk, HZ * 5);
2110 struct l2cap_disconn_req req;
2111 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
2112 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2113 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2114 L2CAP_DISCONN_REQ, sizeof(req), &req);
2122 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2124 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2125 sk->sk_state = BT_CONNECTED;
2126 l2cap_chan_ready(sk);
2134 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2136 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2137 struct l2cap_disconn_rsp rsp;
2141 scid = __le16_to_cpu(req->scid);
2142 dcid = __le16_to_cpu(req->dcid);
2144 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2146 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
2149 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2150 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2151 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2153 sk->sk_shutdown = SHUTDOWN_MASK;
2155 l2cap_chan_del(sk, ECONNRESET);
2158 l2cap_sock_kill(sk);
2162 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2164 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2168 scid = __le16_to_cpu(rsp->scid);
2169 dcid = __le16_to_cpu(rsp->dcid);
2171 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2173 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
2176 l2cap_chan_del(sk, 0);
2179 l2cap_sock_kill(sk);
2183 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2185 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2188 type = __le16_to_cpu(req->type);
2190 BT_DBG("type 0x%4.4x", type);
2192 if (type == L2CAP_IT_FEAT_MASK) {
2194 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2195 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2196 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2197 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
2198 l2cap_send_cmd(conn, cmd->ident,
2199 L2CAP_INFO_RSP, sizeof(buf), buf);
2200 } else if (type == L2CAP_IT_FIXED_CHAN) {
2202 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2203 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2204 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2205 memcpy(buf + 4, l2cap_fixed_chan, 8);
2206 l2cap_send_cmd(conn, cmd->ident,
2207 L2CAP_INFO_RSP, sizeof(buf), buf);
2209 struct l2cap_info_rsp rsp;
2210 rsp.type = cpu_to_le16(type);
2211 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2212 l2cap_send_cmd(conn, cmd->ident,
2213 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2219 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2221 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2224 type = __le16_to_cpu(rsp->type);
2225 result = __le16_to_cpu(rsp->result);
2227 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2229 del_timer(&conn->info_timer);
2231 if (type == L2CAP_IT_FEAT_MASK) {
2232 conn->feat_mask = get_unaligned_le32(rsp->data);
2234 if (conn->feat_mask & 0x0080) {
2235 struct l2cap_info_req req;
2236 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2238 conn->info_ident = l2cap_get_ident(conn);
2240 l2cap_send_cmd(conn, conn->info_ident,
2241 L2CAP_INFO_REQ, sizeof(req), &req);
2243 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2244 conn->info_ident = 0;
2246 l2cap_conn_start(conn);
2248 } else if (type == L2CAP_IT_FIXED_CHAN) {
2249 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2250 conn->info_ident = 0;
2252 l2cap_conn_start(conn);
2258 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2260 u8 *data = skb->data;
2262 struct l2cap_cmd_hdr cmd;
2265 l2cap_raw_recv(conn, skb);
2267 while (len >= L2CAP_CMD_HDR_SIZE) {
2269 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2270 data += L2CAP_CMD_HDR_SIZE;
2271 len -= L2CAP_CMD_HDR_SIZE;
2273 cmd_len = le16_to_cpu(cmd.len);
2275 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2277 if (cmd_len > len || !cmd.ident) {
2278 BT_DBG("corrupted command");
2283 case L2CAP_COMMAND_REJ:
2284 l2cap_command_rej(conn, &cmd, data);
2287 case L2CAP_CONN_REQ:
2288 err = l2cap_connect_req(conn, &cmd, data);
2291 case L2CAP_CONN_RSP:
2292 err = l2cap_connect_rsp(conn, &cmd, data);
2295 case L2CAP_CONF_REQ:
2296 err = l2cap_config_req(conn, &cmd, cmd_len, data);
2299 case L2CAP_CONF_RSP:
2300 err = l2cap_config_rsp(conn, &cmd, data);
2303 case L2CAP_DISCONN_REQ:
2304 err = l2cap_disconnect_req(conn, &cmd, data);
2307 case L2CAP_DISCONN_RSP:
2308 err = l2cap_disconnect_rsp(conn, &cmd, data);
2311 case L2CAP_ECHO_REQ:
2312 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2315 case L2CAP_ECHO_RSP:
2318 case L2CAP_INFO_REQ:
2319 err = l2cap_information_req(conn, &cmd, data);
2322 case L2CAP_INFO_RSP:
2323 err = l2cap_information_rsp(conn, &cmd, data);
2327 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2333 struct l2cap_cmd_rej rej;
2334 BT_DBG("error %d", err);
2336 /* FIXME: Map err to a valid reason */
2337 rej.reason = cpu_to_le16(0);
2338 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2348 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
2352 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2354 BT_DBG("unknown cid 0x%4.4x", cid);
2358 BT_DBG("sk %p, len %d", sk, skb->len);
2360 if (sk->sk_state != BT_CONNECTED)
2363 if (l2cap_pi(sk)->imtu < skb->len)
2366 /* If socket recv buffers overflows we drop data here
2367 * which is *bad* because L2CAP has to be reliable.
2368 * But we don't have any other choice. L2CAP doesn't
2369 * provide flow control mechanism. */
2371 if (!sock_queue_rcv_skb(sk, skb))
2384 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
2388 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
2392 BT_DBG("sk %p, len %d", sk, skb->len);
2394 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
2397 if (l2cap_pi(sk)->imtu < skb->len)
2400 if (!sock_queue_rcv_skb(sk, skb))
2407 if (sk) bh_unlock_sock(sk);
2411 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2413 struct l2cap_hdr *lh = (void *) skb->data;
2417 skb_pull(skb, L2CAP_HDR_SIZE);
2418 cid = __le16_to_cpu(lh->cid);
2419 len = __le16_to_cpu(lh->len);
2421 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2425 l2cap_sig_channel(conn, skb);
2429 psm = get_unaligned((__le16 *) skb->data);
2431 l2cap_conless_channel(conn, psm, skb);
2435 l2cap_data_channel(conn, cid, skb);
2440 /* ---- L2CAP interface with lower layer (HCI) ---- */
2442 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2444 int exact = 0, lm1 = 0, lm2 = 0;
2445 register struct sock *sk;
2446 struct hlist_node *node;
2448 if (type != ACL_LINK)
2451 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2453 /* Find listening sockets and check their link_mode */
2454 read_lock(&l2cap_sk_list.lock);
2455 sk_for_each(sk, node, &l2cap_sk_list.head) {
2456 if (sk->sk_state != BT_LISTEN)
2459 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2460 lm1 |= HCI_LM_ACCEPT;
2461 if (l2cap_pi(sk)->role_switch)
2462 lm1 |= HCI_LM_MASTER;
2464 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
2465 lm2 |= HCI_LM_ACCEPT;
2466 if (l2cap_pi(sk)->role_switch)
2467 lm2 |= HCI_LM_MASTER;
2470 read_unlock(&l2cap_sk_list.lock);
2472 return exact ? lm1 : lm2;
2475 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2477 struct l2cap_conn *conn;
2479 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2481 if (hcon->type != ACL_LINK)
2485 conn = l2cap_conn_add(hcon, status);
2487 l2cap_conn_ready(conn);
2489 l2cap_conn_del(hcon, bt_err(status));
2494 static int l2cap_disconn_ind(struct hci_conn *hcon)
2496 struct l2cap_conn *conn = hcon->l2cap_data;
2498 BT_DBG("hcon %p", hcon);
2500 if (hcon->type != ACL_LINK || !conn)
2503 return conn->disc_reason;
2506 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
2508 BT_DBG("hcon %p reason %d", hcon, reason);
2510 if (hcon->type != ACL_LINK)
2513 l2cap_conn_del(hcon, bt_err(reason));
2518 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
2520 if (sk->sk_type != SOCK_SEQPACKET)
2523 if (encrypt == 0x00) {
2524 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
2525 l2cap_sock_clear_timer(sk);
2526 l2cap_sock_set_timer(sk, HZ * 5);
2527 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
2528 __l2cap_sock_close(sk, ECONNREFUSED);
2530 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
2531 l2cap_sock_clear_timer(sk);
2535 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2537 struct l2cap_chan_list *l;
2538 struct l2cap_conn *conn = hcon->l2cap_data;
2544 l = &conn->chan_list;
2546 BT_DBG("conn %p", conn);
2548 read_lock(&l->lock);
2550 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2553 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
2558 if (!status && (sk->sk_state == BT_CONNECTED ||
2559 sk->sk_state == BT_CONFIG)) {
2560 l2cap_check_encryption(sk, encrypt);
2565 if (sk->sk_state == BT_CONNECT) {
2567 struct l2cap_conn_req req;
2568 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2569 req.psm = l2cap_pi(sk)->psm;
2571 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2573 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2574 L2CAP_CONN_REQ, sizeof(req), &req);
2576 l2cap_sock_clear_timer(sk);
2577 l2cap_sock_set_timer(sk, HZ / 10);
2579 } else if (sk->sk_state == BT_CONNECT2) {
2580 struct l2cap_conn_rsp rsp;
2584 sk->sk_state = BT_CONFIG;
2585 result = L2CAP_CR_SUCCESS;
2587 sk->sk_state = BT_DISCONN;
2588 l2cap_sock_set_timer(sk, HZ / 10);
2589 result = L2CAP_CR_SEC_BLOCK;
2592 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2593 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2594 rsp.result = cpu_to_le16(result);
2595 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2596 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2597 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2603 read_unlock(&l->lock);
2608 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2610 struct l2cap_conn *conn = hcon->l2cap_data;
2612 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2615 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2617 if (flags & ACL_START) {
2618 struct l2cap_hdr *hdr;
2622 BT_ERR("Unexpected start frame (len %d)", skb->len);
2623 kfree_skb(conn->rx_skb);
2624 conn->rx_skb = NULL;
2626 l2cap_conn_unreliable(conn, ECOMM);
2630 BT_ERR("Frame is too short (len %d)", skb->len);
2631 l2cap_conn_unreliable(conn, ECOMM);
2635 hdr = (struct l2cap_hdr *) skb->data;
2636 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2638 if (len == skb->len) {
2639 /* Complete frame received */
2640 l2cap_recv_frame(conn, skb);
2644 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2646 if (skb->len > len) {
2647 BT_ERR("Frame is too long (len %d, expected len %d)",
2649 l2cap_conn_unreliable(conn, ECOMM);
2653 /* Allocate skb for the complete frame (with header) */
2654 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2657 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2659 conn->rx_len = len - skb->len;
2661 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2663 if (!conn->rx_len) {
2664 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2665 l2cap_conn_unreliable(conn, ECOMM);
2669 if (skb->len > conn->rx_len) {
2670 BT_ERR("Fragment is too long (len %d, expected %d)",
2671 skb->len, conn->rx_len);
2672 kfree_skb(conn->rx_skb);
2673 conn->rx_skb = NULL;
2675 l2cap_conn_unreliable(conn, ECOMM);
2679 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2681 conn->rx_len -= skb->len;
2683 if (!conn->rx_len) {
2684 /* Complete frame received */
2685 l2cap_recv_frame(conn, conn->rx_skb);
2686 conn->rx_skb = NULL;
2695 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2698 struct hlist_node *node;
2701 read_lock_bh(&l2cap_sk_list.lock);
2703 sk_for_each(sk, node, &l2cap_sk_list.head) {
2704 struct l2cap_pinfo *pi = l2cap_pi(sk);
2706 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
2707 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2708 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2709 pi->imtu, pi->omtu, pi->sec_level);
2712 read_unlock_bh(&l2cap_sk_list.lock);
2717 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2719 static const struct proto_ops l2cap_sock_ops = {
2720 .family = PF_BLUETOOTH,
2721 .owner = THIS_MODULE,
2722 .release = l2cap_sock_release,
2723 .bind = l2cap_sock_bind,
2724 .connect = l2cap_sock_connect,
2725 .listen = l2cap_sock_listen,
2726 .accept = l2cap_sock_accept,
2727 .getname = l2cap_sock_getname,
2728 .sendmsg = l2cap_sock_sendmsg,
2729 .recvmsg = l2cap_sock_recvmsg,
2730 .poll = bt_sock_poll,
2731 .ioctl = bt_sock_ioctl,
2732 .mmap = sock_no_mmap,
2733 .socketpair = sock_no_socketpair,
2734 .shutdown = l2cap_sock_shutdown,
2735 .setsockopt = l2cap_sock_setsockopt,
2736 .getsockopt = l2cap_sock_getsockopt
2739 static struct net_proto_family l2cap_sock_family_ops = {
2740 .family = PF_BLUETOOTH,
2741 .owner = THIS_MODULE,
2742 .create = l2cap_sock_create,
2745 static struct hci_proto l2cap_hci_proto = {
2747 .id = HCI_PROTO_L2CAP,
2748 .connect_ind = l2cap_connect_ind,
2749 .connect_cfm = l2cap_connect_cfm,
2750 .disconn_ind = l2cap_disconn_ind,
2751 .disconn_cfm = l2cap_disconn_cfm,
2752 .security_cfm = l2cap_security_cfm,
2753 .recv_acldata = l2cap_recv_acldata
2756 static int __init l2cap_init(void)
2760 err = proto_register(&l2cap_proto, 0);
2764 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2766 BT_ERR("L2CAP socket registration failed");
2770 err = hci_register_proto(&l2cap_hci_proto);
2772 BT_ERR("L2CAP protocol registration failed");
2773 bt_sock_unregister(BTPROTO_L2CAP);
2777 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2778 BT_ERR("Failed to create L2CAP info file");
2780 BT_INFO("L2CAP ver %s", VERSION);
2781 BT_INFO("L2CAP socket layer initialized");
2786 proto_unregister(&l2cap_proto);
2790 static void __exit l2cap_exit(void)
2792 class_remove_file(bt_class, &class_attr_l2cap);
2794 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2795 BT_ERR("L2CAP socket unregistration failed");
2797 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2798 BT_ERR("L2CAP protocol unregistration failed");
2800 proto_unregister(&l2cap_proto);
2803 void l2cap_load(void)
2805 /* Dummy function to trigger automatic L2CAP module loading by
2806 * other modules that use L2CAP sockets but don't use any other
2807 * symbols from it. */
2810 EXPORT_SYMBOL(l2cap_load);
2812 module_init(l2cap_init);
2813 module_exit(l2cap_exit);
2815 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
2816 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2817 MODULE_VERSION(VERSION);
2818 MODULE_LICENSE("GPL");
2819 MODULE_ALIAS("bt-proto-0");