2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #ifndef CONFIG_BT_L2CAP_DEBUG
58 #define VERSION "2.11"
60 static u32 l2cap_feat_mask = 0x0000;
62 static const struct proto_ops l2cap_sock_ops;
64 static struct bt_sock_list l2cap_sk_list = {
65 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
68 static void __l2cap_sock_close(struct sock *sk, int reason);
69 static void l2cap_sock_close(struct sock *sk);
70 static void l2cap_sock_kill(struct sock *sk);
72 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
73 u8 code, u8 ident, u16 dlen, void *data);
75 /* ---- L2CAP timers ---- */
76 static void l2cap_sock_timeout(unsigned long arg)
78 struct sock *sk = (struct sock *) arg;
81 BT_DBG("sock %p state %d", sk, sk->sk_state);
85 if (sk->sk_state == BT_CONNECT &&
86 (l2cap_pi(sk)->link_mode & (L2CAP_LM_AUTH |
87 L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)))
88 reason = ECONNREFUSED;
92 __l2cap_sock_close(sk, reason);
100 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
102 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
103 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
106 static void l2cap_sock_clear_timer(struct sock *sk)
108 BT_DBG("sock %p state %d", sk, sk->sk_state);
109 sk_stop_timer(sk, &sk->sk_timer);
112 /* ---- L2CAP channels ---- */
113 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
116 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
117 if (l2cap_pi(s)->dcid == cid)
123 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
126 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
127 if (l2cap_pi(s)->scid == cid)
133 /* Find channel with given SCID.
134 * Returns locked socket */
135 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
139 s = __l2cap_get_chan_by_scid(l, cid);
140 if (s) bh_lock_sock(s);
141 read_unlock(&l->lock);
145 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
148 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
149 if (l2cap_pi(s)->ident == ident)
155 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
159 s = __l2cap_get_chan_by_ident(l, ident);
160 if (s) bh_lock_sock(s);
161 read_unlock(&l->lock);
165 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
169 for (; cid < 0xffff; cid++) {
170 if(!__l2cap_get_chan_by_scid(l, cid))
177 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
182 l2cap_pi(l->head)->prev_c = sk;
184 l2cap_pi(sk)->next_c = l->head;
185 l2cap_pi(sk)->prev_c = NULL;
189 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
191 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
193 write_lock_bh(&l->lock);
198 l2cap_pi(next)->prev_c = prev;
200 l2cap_pi(prev)->next_c = next;
201 write_unlock_bh(&l->lock);
206 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
208 struct l2cap_chan_list *l = &conn->chan_list;
210 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
212 l2cap_pi(sk)->conn = conn;
214 if (sk->sk_type == SOCK_SEQPACKET) {
215 /* Alloc CID for connection-oriented socket */
216 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
217 } else if (sk->sk_type == SOCK_DGRAM) {
218 /* Connectionless socket */
219 l2cap_pi(sk)->scid = 0x0002;
220 l2cap_pi(sk)->dcid = 0x0002;
221 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
223 /* Raw socket can send/recv signalling messages only */
224 l2cap_pi(sk)->scid = 0x0001;
225 l2cap_pi(sk)->dcid = 0x0001;
226 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
229 __l2cap_chan_link(l, sk);
232 bt_accept_enqueue(parent, sk);
236 * Must be called on the locked socket. */
237 static void l2cap_chan_del(struct sock *sk, int err)
239 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
240 struct sock *parent = bt_sk(sk)->parent;
242 l2cap_sock_clear_timer(sk);
244 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
247 /* Unlink from channel list */
248 l2cap_chan_unlink(&conn->chan_list, sk);
249 l2cap_pi(sk)->conn = NULL;
250 hci_conn_put(conn->hcon);
253 sk->sk_state = BT_CLOSED;
254 sock_set_flag(sk, SOCK_ZAPPED);
260 bt_accept_unlink(sk);
261 parent->sk_data_ready(parent, 0);
263 sk->sk_state_change(sk);
266 /* Service level security */
267 static inline int l2cap_check_link_mode(struct sock *sk)
269 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
271 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
272 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE))
273 return hci_conn_encrypt(conn->hcon);
275 if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH)
276 return hci_conn_auth(conn->hcon);
281 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
285 /* Get next available identificator.
286 * 1 - 128 are used by kernel.
287 * 129 - 199 are reserved.
288 * 200 - 254 are used by utilities like l2ping, etc.
291 spin_lock_bh(&conn->lock);
293 if (++conn->tx_ident > 128)
298 spin_unlock_bh(&conn->lock);
303 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
305 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
307 BT_DBG("code 0x%2.2x", code);
312 return hci_send_acl(conn->hcon, skb, 0);
315 static void l2cap_do_start(struct sock *sk)
317 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
319 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
320 if (l2cap_check_link_mode(sk)) {
321 struct l2cap_conn_req req;
322 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
323 req.psm = l2cap_pi(sk)->psm;
325 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
327 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
328 L2CAP_CONN_REQ, sizeof(req), &req);
331 struct l2cap_info_req req;
332 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
334 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
335 conn->info_ident = l2cap_get_ident(conn);
337 mod_timer(&conn->info_timer, jiffies +
338 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
340 l2cap_send_cmd(conn, conn->info_ident,
341 L2CAP_INFO_REQ, sizeof(req), &req);
345 /* ---- L2CAP connections ---- */
346 static void l2cap_conn_start(struct l2cap_conn *conn)
348 struct l2cap_chan_list *l = &conn->chan_list;
351 BT_DBG("conn %p", conn);
355 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
358 if (sk->sk_type != SOCK_SEQPACKET) {
363 if (sk->sk_state == BT_CONNECT) {
364 if (l2cap_check_link_mode(sk)) {
365 struct l2cap_conn_req req;
366 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
367 req.psm = l2cap_pi(sk)->psm;
369 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
371 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
372 L2CAP_CONN_REQ, sizeof(req), &req);
374 } else if (sk->sk_state == BT_CONNECT2) {
375 struct l2cap_conn_rsp rsp;
376 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
377 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
379 if (l2cap_check_link_mode(sk)) {
380 sk->sk_state = BT_CONFIG;
381 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
382 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
384 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
385 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
388 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
389 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
395 read_unlock(&l->lock);
398 static void l2cap_conn_ready(struct l2cap_conn *conn)
400 struct l2cap_chan_list *l = &conn->chan_list;
403 BT_DBG("conn %p", conn);
407 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
410 if (sk->sk_type != SOCK_SEQPACKET) {
411 l2cap_sock_clear_timer(sk);
412 sk->sk_state = BT_CONNECTED;
413 sk->sk_state_change(sk);
414 } else if (sk->sk_state == BT_CONNECT)
420 read_unlock(&l->lock);
423 /* Notify sockets that we cannot guaranty reliability anymore */
424 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
426 struct l2cap_chan_list *l = &conn->chan_list;
429 BT_DBG("conn %p", conn);
433 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
434 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
438 read_unlock(&l->lock);
441 static void l2cap_info_timeout(unsigned long arg)
443 struct l2cap_conn *conn = (void *) arg;
445 conn->info_ident = 0;
447 l2cap_conn_start(conn);
450 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
452 struct l2cap_conn *conn = hcon->l2cap_data;
457 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
461 hcon->l2cap_data = conn;
464 BT_DBG("hcon %p conn %p", hcon, conn);
466 conn->mtu = hcon->hdev->acl_mtu;
467 conn->src = &hcon->hdev->bdaddr;
468 conn->dst = &hcon->dst;
472 setup_timer(&conn->info_timer, l2cap_info_timeout,
473 (unsigned long) conn);
475 spin_lock_init(&conn->lock);
476 rwlock_init(&conn->chan_list.lock);
481 static void l2cap_conn_del(struct hci_conn *hcon, int err)
483 struct l2cap_conn *conn = hcon->l2cap_data;
489 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
492 kfree_skb(conn->rx_skb);
495 while ((sk = conn->chan_list.head)) {
497 l2cap_chan_del(sk, err);
502 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
503 del_timer_sync(&conn->info_timer);
505 hcon->l2cap_data = NULL;
509 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
511 struct l2cap_chan_list *l = &conn->chan_list;
512 write_lock_bh(&l->lock);
513 __l2cap_chan_add(conn, sk, parent);
514 write_unlock_bh(&l->lock);
517 /* ---- Socket interface ---- */
518 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
521 struct hlist_node *node;
522 sk_for_each(sk, node, &l2cap_sk_list.head)
523 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
530 /* Find socket with psm and source bdaddr.
531 * Returns closest match.
533 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
535 struct sock *sk = NULL, *sk1 = NULL;
536 struct hlist_node *node;
538 sk_for_each(sk, node, &l2cap_sk_list.head) {
539 if (state && sk->sk_state != state)
542 if (l2cap_pi(sk)->psm == psm) {
544 if (!bacmp(&bt_sk(sk)->src, src))
548 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
552 return node ? sk : sk1;
555 /* Find socket with given address (psm, src).
556 * Returns locked socket */
557 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
560 read_lock(&l2cap_sk_list.lock);
561 s = __l2cap_get_sock_by_psm(state, psm, src);
562 if (s) bh_lock_sock(s);
563 read_unlock(&l2cap_sk_list.lock);
567 static void l2cap_sock_destruct(struct sock *sk)
571 skb_queue_purge(&sk->sk_receive_queue);
572 skb_queue_purge(&sk->sk_write_queue);
575 static void l2cap_sock_cleanup_listen(struct sock *parent)
579 BT_DBG("parent %p", parent);
581 /* Close not yet accepted channels */
582 while ((sk = bt_accept_dequeue(parent, NULL)))
583 l2cap_sock_close(sk);
585 parent->sk_state = BT_CLOSED;
586 sock_set_flag(parent, SOCK_ZAPPED);
589 /* Kill socket (only if zapped and orphan)
590 * Must be called on unlocked socket.
592 static void l2cap_sock_kill(struct sock *sk)
594 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
597 BT_DBG("sk %p state %d", sk, sk->sk_state);
599 /* Kill poor orphan */
600 bt_sock_unlink(&l2cap_sk_list, sk);
601 sock_set_flag(sk, SOCK_DEAD);
605 static void __l2cap_sock_close(struct sock *sk, int reason)
607 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
609 switch (sk->sk_state) {
611 l2cap_sock_cleanup_listen(sk);
617 if (sk->sk_type == SOCK_SEQPACKET) {
618 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
619 struct l2cap_disconn_req req;
621 sk->sk_state = BT_DISCONN;
622 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
624 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
625 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
626 l2cap_send_cmd(conn, l2cap_get_ident(conn),
627 L2CAP_DISCONN_REQ, sizeof(req), &req);
629 l2cap_chan_del(sk, reason);
634 l2cap_chan_del(sk, reason);
638 sock_set_flag(sk, SOCK_ZAPPED);
643 /* Must be called on unlocked socket. */
644 static void l2cap_sock_close(struct sock *sk)
646 l2cap_sock_clear_timer(sk);
648 __l2cap_sock_close(sk, ECONNRESET);
653 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
655 struct l2cap_pinfo *pi = l2cap_pi(sk);
660 sk->sk_type = parent->sk_type;
661 pi->imtu = l2cap_pi(parent)->imtu;
662 pi->omtu = l2cap_pi(parent)->omtu;
663 pi->link_mode = l2cap_pi(parent)->link_mode;
665 pi->imtu = L2CAP_DEFAULT_MTU;
670 /* Default config options */
672 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
675 static struct proto l2cap_proto = {
677 .owner = THIS_MODULE,
678 .obj_size = sizeof(struct l2cap_pinfo)
681 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
685 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
689 sock_init_data(sock, sk);
690 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
692 sk->sk_destruct = l2cap_sock_destruct;
693 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
695 sock_reset_flag(sk, SOCK_ZAPPED);
697 sk->sk_protocol = proto;
698 sk->sk_state = BT_OPEN;
700 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
702 bt_sock_link(&l2cap_sk_list, sk);
706 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
710 BT_DBG("sock %p", sock);
712 sock->state = SS_UNCONNECTED;
714 if (sock->type != SOCK_SEQPACKET &&
715 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
716 return -ESOCKTNOSUPPORT;
718 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
721 sock->ops = &l2cap_sock_ops;
723 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
727 l2cap_sock_init(sk, NULL);
731 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
733 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
734 struct sock *sk = sock->sk;
737 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
739 if (!addr || addr->sa_family != AF_BLUETOOTH)
744 if (sk->sk_state != BT_OPEN) {
749 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
750 !capable(CAP_NET_BIND_SERVICE)) {
755 write_lock_bh(&l2cap_sk_list.lock);
757 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
760 /* Save source address */
761 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
762 l2cap_pi(sk)->psm = la->l2_psm;
763 l2cap_pi(sk)->sport = la->l2_psm;
764 sk->sk_state = BT_BOUND;
767 write_unlock_bh(&l2cap_sk_list.lock);
774 static int l2cap_do_connect(struct sock *sk)
776 bdaddr_t *src = &bt_sk(sk)->src;
777 bdaddr_t *dst = &bt_sk(sk)->dst;
778 struct l2cap_conn *conn;
779 struct hci_conn *hcon;
780 struct hci_dev *hdev;
784 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
786 if (!(hdev = hci_get_route(dst, src)))
787 return -EHOSTUNREACH;
789 hci_dev_lock_bh(hdev);
793 if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH ||
794 l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT ||
795 l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE) {
796 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001))
797 auth_type = HCI_AT_NO_BONDING_MITM;
799 auth_type = HCI_AT_GENERAL_BONDING_MITM;
801 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001))
802 auth_type = HCI_AT_NO_BONDING;
804 auth_type = HCI_AT_GENERAL_BONDING;
807 hcon = hci_connect(hdev, ACL_LINK, dst, auth_type);
811 conn = l2cap_conn_add(hcon, 0);
819 /* Update source addr of the socket */
820 bacpy(src, conn->src);
822 l2cap_chan_add(conn, sk, NULL);
824 sk->sk_state = BT_CONNECT;
825 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
827 if (hcon->state == BT_CONNECTED) {
828 if (sk->sk_type != SOCK_SEQPACKET) {
829 l2cap_sock_clear_timer(sk);
830 sk->sk_state = BT_CONNECTED;
836 hci_dev_unlock_bh(hdev);
841 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
843 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
844 struct sock *sk = sock->sk;
851 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
856 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
861 switch(sk->sk_state) {
865 /* Already connecting */
869 /* Already connected */
882 /* Set destination address and psm */
883 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
884 l2cap_pi(sk)->psm = la->l2_psm;
886 if ((err = l2cap_do_connect(sk)))
890 err = bt_sock_wait_state(sk, BT_CONNECTED,
891 sock_sndtimeo(sk, flags & O_NONBLOCK));
897 static int l2cap_sock_listen(struct socket *sock, int backlog)
899 struct sock *sk = sock->sk;
902 BT_DBG("sk %p backlog %d", sk, backlog);
906 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
911 if (!l2cap_pi(sk)->psm) {
912 bdaddr_t *src = &bt_sk(sk)->src;
917 write_lock_bh(&l2cap_sk_list.lock);
919 for (psm = 0x1001; psm < 0x1100; psm += 2)
920 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
921 l2cap_pi(sk)->psm = htobs(psm);
922 l2cap_pi(sk)->sport = htobs(psm);
927 write_unlock_bh(&l2cap_sk_list.lock);
933 sk->sk_max_ack_backlog = backlog;
934 sk->sk_ack_backlog = 0;
935 sk->sk_state = BT_LISTEN;
942 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
944 DECLARE_WAITQUEUE(wait, current);
945 struct sock *sk = sock->sk, *nsk;
949 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
951 if (sk->sk_state != BT_LISTEN) {
956 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
958 BT_DBG("sk %p timeo %ld", sk, timeo);
960 /* Wait for an incoming connection. (wake-one). */
961 add_wait_queue_exclusive(sk->sk_sleep, &wait);
962 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
963 set_current_state(TASK_INTERRUPTIBLE);
970 timeo = schedule_timeout(timeo);
971 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
973 if (sk->sk_state != BT_LISTEN) {
978 if (signal_pending(current)) {
979 err = sock_intr_errno(timeo);
983 set_current_state(TASK_RUNNING);
984 remove_wait_queue(sk->sk_sleep, &wait);
989 newsock->state = SS_CONNECTED;
991 BT_DBG("new socket %p", nsk);
998 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1000 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1001 struct sock *sk = sock->sk;
1003 BT_DBG("sock %p, sk %p", sock, sk);
1005 addr->sa_family = AF_BLUETOOTH;
1006 *len = sizeof(struct sockaddr_l2);
1009 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1011 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1013 la->l2_psm = l2cap_pi(sk)->psm;
1017 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
1019 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1020 struct sk_buff *skb, **frag;
1021 int err, hlen, count, sent=0;
1022 struct l2cap_hdr *lh;
1024 BT_DBG("sk %p len %d", sk, len);
1026 /* First fragment (with L2CAP header) */
1027 if (sk->sk_type == SOCK_DGRAM)
1028 hlen = L2CAP_HDR_SIZE + 2;
1030 hlen = L2CAP_HDR_SIZE;
1032 count = min_t(unsigned int, (conn->mtu - hlen), len);
1034 skb = bt_skb_send_alloc(sk, hlen + count,
1035 msg->msg_flags & MSG_DONTWAIT, &err);
1039 /* Create L2CAP header */
1040 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1041 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1042 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1044 if (sk->sk_type == SOCK_DGRAM)
1045 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
1047 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1055 /* Continuation fragments (no L2CAP header) */
1056 frag = &skb_shinfo(skb)->frag_list;
1058 count = min_t(unsigned int, conn->mtu, len);
1060 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1064 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
1072 frag = &(*frag)->next;
1075 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
1085 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1087 struct sock *sk = sock->sk;
1090 BT_DBG("sock %p, sk %p", sock, sk);
1092 err = sock_error(sk);
1096 if (msg->msg_flags & MSG_OOB)
1099 /* Check outgoing MTU */
1100 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1105 if (sk->sk_state == BT_CONNECTED)
1106 err = l2cap_do_send(sk, msg, len);
1114 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1116 struct sock *sk = sock->sk;
1117 struct l2cap_options opts;
1121 BT_DBG("sk %p", sk);
1127 opts.imtu = l2cap_pi(sk)->imtu;
1128 opts.omtu = l2cap_pi(sk)->omtu;
1129 opts.flush_to = l2cap_pi(sk)->flush_to;
1130 opts.mode = L2CAP_MODE_BASIC;
1132 len = min_t(unsigned int, sizeof(opts), optlen);
1133 if (copy_from_user((char *) &opts, optval, len)) {
1138 l2cap_pi(sk)->imtu = opts.imtu;
1139 l2cap_pi(sk)->omtu = opts.omtu;
1143 if (get_user(opt, (u32 __user *) optval)) {
1148 l2cap_pi(sk)->link_mode = opt;
1160 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1162 struct sock *sk = sock->sk;
1163 struct l2cap_options opts;
1164 struct l2cap_conninfo cinfo;
1167 BT_DBG("sk %p", sk);
1169 if (get_user(len, optlen))
1176 opts.imtu = l2cap_pi(sk)->imtu;
1177 opts.omtu = l2cap_pi(sk)->omtu;
1178 opts.flush_to = l2cap_pi(sk)->flush_to;
1179 opts.mode = L2CAP_MODE_BASIC;
1181 len = min_t(unsigned int, len, sizeof(opts));
1182 if (copy_to_user(optval, (char *) &opts, len))
1188 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1192 case L2CAP_CONNINFO:
1193 if (sk->sk_state != BT_CONNECTED) {
1198 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1199 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1201 len = min_t(unsigned int, len, sizeof(cinfo));
1202 if (copy_to_user(optval, (char *) &cinfo, len))
1216 static int l2cap_sock_shutdown(struct socket *sock, int how)
1218 struct sock *sk = sock->sk;
1221 BT_DBG("sock %p, sk %p", sock, sk);
1227 if (!sk->sk_shutdown) {
1228 sk->sk_shutdown = SHUTDOWN_MASK;
1229 l2cap_sock_clear_timer(sk);
1230 __l2cap_sock_close(sk, 0);
1232 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1233 err = bt_sock_wait_state(sk, BT_CLOSED,
1240 static int l2cap_sock_release(struct socket *sock)
1242 struct sock *sk = sock->sk;
1245 BT_DBG("sock %p, sk %p", sock, sk);
1250 err = l2cap_sock_shutdown(sock, 2);
1253 l2cap_sock_kill(sk);
1257 static void l2cap_chan_ready(struct sock *sk)
1259 struct sock *parent = bt_sk(sk)->parent;
1261 BT_DBG("sk %p, parent %p", sk, parent);
1263 l2cap_pi(sk)->conf_state = 0;
1264 l2cap_sock_clear_timer(sk);
1267 /* Outgoing channel.
1268 * Wake up socket sleeping on connect.
1270 sk->sk_state = BT_CONNECTED;
1271 sk->sk_state_change(sk);
1273 /* Incoming channel.
1274 * Wake up socket sleeping on accept.
1276 parent->sk_data_ready(parent, 0);
1279 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE) {
1280 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1281 hci_conn_change_link_key(conn->hcon);
1285 /* Copy frame to all raw sockets on that connection */
1286 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1288 struct l2cap_chan_list *l = &conn->chan_list;
1289 struct sk_buff *nskb;
1292 BT_DBG("conn %p", conn);
1294 read_lock(&l->lock);
1295 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1296 if (sk->sk_type != SOCK_RAW)
1299 /* Don't send frame to the socket it came from */
1303 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1306 if (sock_queue_rcv_skb(sk, nskb))
1309 read_unlock(&l->lock);
1312 /* ---- L2CAP signalling commands ---- */
1313 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1314 u8 code, u8 ident, u16 dlen, void *data)
1316 struct sk_buff *skb, **frag;
1317 struct l2cap_cmd_hdr *cmd;
1318 struct l2cap_hdr *lh;
1321 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1323 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1324 count = min_t(unsigned int, conn->mtu, len);
1326 skb = bt_skb_alloc(count, GFP_ATOMIC);
1330 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1331 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1332 lh->cid = cpu_to_le16(0x0001);
1334 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1337 cmd->len = cpu_to_le16(dlen);
1340 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1341 memcpy(skb_put(skb, count), data, count);
1347 /* Continuation fragments (no L2CAP header) */
1348 frag = &skb_shinfo(skb)->frag_list;
1350 count = min_t(unsigned int, conn->mtu, len);
1352 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1356 memcpy(skb_put(*frag, count), data, count);
1361 frag = &(*frag)->next;
1371 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1373 struct l2cap_conf_opt *opt = *ptr;
1376 len = L2CAP_CONF_OPT_SIZE + opt->len;
1384 *val = *((u8 *) opt->val);
1388 *val = __le16_to_cpu(*((__le16 *) opt->val));
1392 *val = __le32_to_cpu(*((__le32 *) opt->val));
1396 *val = (unsigned long) opt->val;
1400 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1404 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1406 struct l2cap_conf_opt *opt = *ptr;
1408 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1415 *((u8 *) opt->val) = val;
1419 *((__le16 *) opt->val) = cpu_to_le16(val);
1423 *((__le32 *) opt->val) = cpu_to_le32(val);
1427 memcpy(opt->val, (void *) val, len);
1431 *ptr += L2CAP_CONF_OPT_SIZE + len;
1434 static int l2cap_build_conf_req(struct sock *sk, void *data)
1436 struct l2cap_pinfo *pi = l2cap_pi(sk);
1437 struct l2cap_conf_req *req = data;
1438 void *ptr = req->data;
1440 BT_DBG("sk %p", sk);
1442 if (pi->imtu != L2CAP_DEFAULT_MTU)
1443 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1445 /* FIXME: Need actual value of the flush timeout */
1446 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1447 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1449 req->dcid = cpu_to_le16(pi->dcid);
1450 req->flags = cpu_to_le16(0);
1455 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1457 struct l2cap_pinfo *pi = l2cap_pi(sk);
1458 struct l2cap_conf_rsp *rsp = data;
1459 void *ptr = rsp->data;
1460 void *req = pi->conf_req;
1461 int len = pi->conf_len;
1462 int type, hint, olen;
1464 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1465 u16 mtu = L2CAP_DEFAULT_MTU;
1466 u16 result = L2CAP_CONF_SUCCESS;
1468 BT_DBG("sk %p", sk);
1470 while (len >= L2CAP_CONF_OPT_SIZE) {
1471 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1477 case L2CAP_CONF_MTU:
1481 case L2CAP_CONF_FLUSH_TO:
1485 case L2CAP_CONF_QOS:
1488 case L2CAP_CONF_RFC:
1489 if (olen == sizeof(rfc))
1490 memcpy(&rfc, (void *) val, olen);
1497 result = L2CAP_CONF_UNKNOWN;
1498 *((u8 *) ptr++) = type;
1503 if (result == L2CAP_CONF_SUCCESS) {
1504 /* Configure output options and let the other side know
1505 * which ones we don't like. */
1507 if (rfc.mode == L2CAP_MODE_BASIC) {
1509 result = L2CAP_CONF_UNACCEPT;
1512 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1515 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1517 result = L2CAP_CONF_UNACCEPT;
1519 memset(&rfc, 0, sizeof(rfc));
1520 rfc.mode = L2CAP_MODE_BASIC;
1522 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1523 sizeof(rfc), (unsigned long) &rfc);
1527 rsp->scid = cpu_to_le16(pi->dcid);
1528 rsp->result = cpu_to_le16(result);
1529 rsp->flags = cpu_to_le16(0x0000);
1534 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1536 struct l2cap_conf_rsp *rsp = data;
1537 void *ptr = rsp->data;
1539 BT_DBG("sk %p", sk);
1541 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1542 rsp->result = cpu_to_le16(result);
1543 rsp->flags = cpu_to_le16(flags);
1548 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1550 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1552 if (rej->reason != 0x0000)
1555 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1556 cmd->ident == conn->info_ident) {
1557 conn->info_ident = 0;
1558 del_timer(&conn->info_timer);
1559 l2cap_conn_start(conn);
1565 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1567 struct l2cap_chan_list *list = &conn->chan_list;
1568 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1569 struct l2cap_conn_rsp rsp;
1570 struct sock *sk, *parent;
1571 int result, status = L2CAP_CS_NO_INFO;
1573 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1574 __le16 psm = req->psm;
1576 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1578 /* Check if we have socket listening on psm */
1579 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1581 result = L2CAP_CR_BAD_PSM;
1585 /* Check if the ACL is secure enough (if not SDP) */
1586 if (psm != cpu_to_le16(0x0001) &&
1587 !hci_conn_check_link_mode(conn->hcon)) {
1588 result = L2CAP_CR_SEC_BLOCK;
1592 result = L2CAP_CR_NO_MEM;
1594 /* Check for backlog size */
1595 if (sk_acceptq_is_full(parent)) {
1596 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1600 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1604 write_lock_bh(&list->lock);
1606 /* Check if we already have channel with that dcid */
1607 if (__l2cap_get_chan_by_dcid(list, scid)) {
1608 write_unlock_bh(&list->lock);
1609 sock_set_flag(sk, SOCK_ZAPPED);
1610 l2cap_sock_kill(sk);
1614 hci_conn_hold(conn->hcon);
1616 l2cap_sock_init(sk, parent);
1617 bacpy(&bt_sk(sk)->src, conn->src);
1618 bacpy(&bt_sk(sk)->dst, conn->dst);
1619 l2cap_pi(sk)->psm = psm;
1620 l2cap_pi(sk)->dcid = scid;
1622 __l2cap_chan_add(conn, sk, parent);
1623 dcid = l2cap_pi(sk)->scid;
1625 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1627 l2cap_pi(sk)->ident = cmd->ident;
1629 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1630 if (l2cap_check_link_mode(sk)) {
1631 sk->sk_state = BT_CONFIG;
1632 result = L2CAP_CR_SUCCESS;
1633 status = L2CAP_CS_NO_INFO;
1635 sk->sk_state = BT_CONNECT2;
1636 result = L2CAP_CR_PEND;
1637 status = L2CAP_CS_AUTHEN_PEND;
1640 sk->sk_state = BT_CONNECT2;
1641 result = L2CAP_CR_PEND;
1642 status = L2CAP_CS_NO_INFO;
1645 write_unlock_bh(&list->lock);
1648 bh_unlock_sock(parent);
1651 rsp.scid = cpu_to_le16(scid);
1652 rsp.dcid = cpu_to_le16(dcid);
1653 rsp.result = cpu_to_le16(result);
1654 rsp.status = cpu_to_le16(status);
1655 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1657 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
1658 struct l2cap_info_req info;
1659 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1661 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1662 conn->info_ident = l2cap_get_ident(conn);
1664 mod_timer(&conn->info_timer, jiffies +
1665 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
1667 l2cap_send_cmd(conn, conn->info_ident,
1668 L2CAP_INFO_REQ, sizeof(info), &info);
1674 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1676 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1677 u16 scid, dcid, result, status;
1681 scid = __le16_to_cpu(rsp->scid);
1682 dcid = __le16_to_cpu(rsp->dcid);
1683 result = __le16_to_cpu(rsp->result);
1684 status = __le16_to_cpu(rsp->status);
1686 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1689 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1692 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1697 case L2CAP_CR_SUCCESS:
1698 sk->sk_state = BT_CONFIG;
1699 l2cap_pi(sk)->ident = 0;
1700 l2cap_pi(sk)->dcid = dcid;
1701 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1703 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1704 l2cap_build_conf_req(sk, req), req);
1711 l2cap_chan_del(sk, ECONNREFUSED);
1719 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1721 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1727 dcid = __le16_to_cpu(req->dcid);
1728 flags = __le16_to_cpu(req->flags);
1730 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1732 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1735 if (sk->sk_state == BT_DISCONN)
1738 /* Reject if config buffer is too small. */
1739 len = cmd_len - sizeof(*req);
1740 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1741 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1742 l2cap_build_conf_rsp(sk, rsp,
1743 L2CAP_CONF_REJECT, flags), rsp);
1748 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1749 l2cap_pi(sk)->conf_len += len;
1751 if (flags & 0x0001) {
1752 /* Incomplete config. Send empty response. */
1753 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1754 l2cap_build_conf_rsp(sk, rsp,
1755 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1759 /* Complete config. */
1760 len = l2cap_parse_conf_req(sk, rsp);
1764 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1766 /* Reset config buffer. */
1767 l2cap_pi(sk)->conf_len = 0;
1769 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1772 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1773 sk->sk_state = BT_CONNECTED;
1774 l2cap_chan_ready(sk);
1778 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1780 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1781 l2cap_build_conf_req(sk, buf), buf);
1789 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1791 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1792 u16 scid, flags, result;
1795 scid = __le16_to_cpu(rsp->scid);
1796 flags = __le16_to_cpu(rsp->flags);
1797 result = __le16_to_cpu(rsp->result);
1799 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1801 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1805 case L2CAP_CONF_SUCCESS:
1808 case L2CAP_CONF_UNACCEPT:
1809 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1811 /* It does not make sense to adjust L2CAP parameters
1812 * that are currently defined in the spec. We simply
1813 * resend config request that we sent earlier. It is
1814 * stupid, but it helps qualification testing which
1815 * expects at least some response from us. */
1816 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1817 l2cap_build_conf_req(sk, req), req);
1822 sk->sk_state = BT_DISCONN;
1823 sk->sk_err = ECONNRESET;
1824 l2cap_sock_set_timer(sk, HZ * 5);
1826 struct l2cap_disconn_req req;
1827 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1828 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1829 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1830 L2CAP_DISCONN_REQ, sizeof(req), &req);
1838 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1840 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1841 sk->sk_state = BT_CONNECTED;
1842 l2cap_chan_ready(sk);
1850 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1852 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1853 struct l2cap_disconn_rsp rsp;
1857 scid = __le16_to_cpu(req->scid);
1858 dcid = __le16_to_cpu(req->dcid);
1860 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1862 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1865 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1866 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1867 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1869 sk->sk_shutdown = SHUTDOWN_MASK;
1871 l2cap_chan_del(sk, ECONNRESET);
1874 l2cap_sock_kill(sk);
1878 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1880 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1884 scid = __le16_to_cpu(rsp->scid);
1885 dcid = __le16_to_cpu(rsp->dcid);
1887 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1889 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1892 l2cap_chan_del(sk, 0);
1895 l2cap_sock_kill(sk);
1899 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1901 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1904 type = __le16_to_cpu(req->type);
1906 BT_DBG("type 0x%4.4x", type);
1908 if (type == L2CAP_IT_FEAT_MASK) {
1910 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
1911 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1912 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
1913 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
1914 l2cap_send_cmd(conn, cmd->ident,
1915 L2CAP_INFO_RSP, sizeof(buf), buf);
1917 struct l2cap_info_rsp rsp;
1918 rsp.type = cpu_to_le16(type);
1919 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1920 l2cap_send_cmd(conn, cmd->ident,
1921 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1927 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1929 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1932 type = __le16_to_cpu(rsp->type);
1933 result = __le16_to_cpu(rsp->result);
1935 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1937 conn->info_ident = 0;
1939 del_timer(&conn->info_timer);
1941 if (type == L2CAP_IT_FEAT_MASK)
1942 conn->feat_mask = get_unaligned_le32(rsp->data);
1944 l2cap_conn_start(conn);
1949 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1951 u8 *data = skb->data;
1953 struct l2cap_cmd_hdr cmd;
1956 l2cap_raw_recv(conn, skb);
1958 while (len >= L2CAP_CMD_HDR_SIZE) {
1960 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1961 data += L2CAP_CMD_HDR_SIZE;
1962 len -= L2CAP_CMD_HDR_SIZE;
1964 cmd_len = le16_to_cpu(cmd.len);
1966 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
1968 if (cmd_len > len || !cmd.ident) {
1969 BT_DBG("corrupted command");
1974 case L2CAP_COMMAND_REJ:
1975 l2cap_command_rej(conn, &cmd, data);
1978 case L2CAP_CONN_REQ:
1979 err = l2cap_connect_req(conn, &cmd, data);
1982 case L2CAP_CONN_RSP:
1983 err = l2cap_connect_rsp(conn, &cmd, data);
1986 case L2CAP_CONF_REQ:
1987 err = l2cap_config_req(conn, &cmd, cmd_len, data);
1990 case L2CAP_CONF_RSP:
1991 err = l2cap_config_rsp(conn, &cmd, data);
1994 case L2CAP_DISCONN_REQ:
1995 err = l2cap_disconnect_req(conn, &cmd, data);
1998 case L2CAP_DISCONN_RSP:
1999 err = l2cap_disconnect_rsp(conn, &cmd, data);
2002 case L2CAP_ECHO_REQ:
2003 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2006 case L2CAP_ECHO_RSP:
2009 case L2CAP_INFO_REQ:
2010 err = l2cap_information_req(conn, &cmd, data);
2013 case L2CAP_INFO_RSP:
2014 err = l2cap_information_rsp(conn, &cmd, data);
2018 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2024 struct l2cap_cmd_rej rej;
2025 BT_DBG("error %d", err);
2027 /* FIXME: Map err to a valid reason */
2028 rej.reason = cpu_to_le16(0);
2029 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2039 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
2043 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2045 BT_DBG("unknown cid 0x%4.4x", cid);
2049 BT_DBG("sk %p, len %d", sk, skb->len);
2051 if (sk->sk_state != BT_CONNECTED)
2054 if (l2cap_pi(sk)->imtu < skb->len)
2057 /* If socket recv buffers overflows we drop data here
2058 * which is *bad* because L2CAP has to be reliable.
2059 * But we don't have any other choice. L2CAP doesn't
2060 * provide flow control mechanism. */
2062 if (!sock_queue_rcv_skb(sk, skb))
2075 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
2079 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
2083 BT_DBG("sk %p, len %d", sk, skb->len);
2085 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
2088 if (l2cap_pi(sk)->imtu < skb->len)
2091 if (!sock_queue_rcv_skb(sk, skb))
2098 if (sk) bh_unlock_sock(sk);
2102 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2104 struct l2cap_hdr *lh = (void *) skb->data;
2108 skb_pull(skb, L2CAP_HDR_SIZE);
2109 cid = __le16_to_cpu(lh->cid);
2110 len = __le16_to_cpu(lh->len);
2112 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2116 l2cap_sig_channel(conn, skb);
2120 psm = get_unaligned((__le16 *) skb->data);
2122 l2cap_conless_channel(conn, psm, skb);
2126 l2cap_data_channel(conn, cid, skb);
2131 /* ---- L2CAP interface with lower layer (HCI) ---- */
2133 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2135 int exact = 0, lm1 = 0, lm2 = 0;
2136 register struct sock *sk;
2137 struct hlist_node *node;
2139 if (type != ACL_LINK)
2142 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2144 /* Find listening sockets and check their link_mode */
2145 read_lock(&l2cap_sk_list.lock);
2146 sk_for_each(sk, node, &l2cap_sk_list.head) {
2147 if (sk->sk_state != BT_LISTEN)
2150 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2151 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2153 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
2154 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2156 read_unlock(&l2cap_sk_list.lock);
2158 return exact ? lm1 : lm2;
2161 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2163 struct l2cap_conn *conn;
2165 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2167 if (hcon->type != ACL_LINK)
2171 conn = l2cap_conn_add(hcon, status);
2173 l2cap_conn_ready(conn);
2175 l2cap_conn_del(hcon, bt_err(status));
2180 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
2182 BT_DBG("hcon %p reason %d", hcon, reason);
2184 if (hcon->type != ACL_LINK)
2187 l2cap_conn_del(hcon, bt_err(reason));
2192 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
2194 struct l2cap_chan_list *l;
2195 struct l2cap_conn *conn = hcon->l2cap_data;
2201 l = &conn->chan_list;
2203 BT_DBG("conn %p", conn);
2205 read_lock(&l->lock);
2207 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2208 struct l2cap_pinfo *pi = l2cap_pi(sk);
2212 if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
2213 !(hcon->link_mode & HCI_LM_ENCRYPT) &&
2219 if (sk->sk_state == BT_CONNECT) {
2221 struct l2cap_conn_req req;
2222 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2223 req.psm = l2cap_pi(sk)->psm;
2225 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2227 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2228 L2CAP_CONN_REQ, sizeof(req), &req);
2230 l2cap_sock_clear_timer(sk);
2231 l2cap_sock_set_timer(sk, HZ / 10);
2233 } else if (sk->sk_state == BT_CONNECT2) {
2234 struct l2cap_conn_rsp rsp;
2238 sk->sk_state = BT_CONFIG;
2239 result = L2CAP_CR_SUCCESS;
2241 sk->sk_state = BT_DISCONN;
2242 l2cap_sock_set_timer(sk, HZ / 10);
2243 result = L2CAP_CR_SEC_BLOCK;
2246 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2247 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2248 rsp.result = cpu_to_le16(result);
2249 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2250 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2251 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2257 read_unlock(&l->lock);
2262 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2264 struct l2cap_chan_list *l;
2265 struct l2cap_conn *conn = hcon->l2cap_data;
2271 l = &conn->chan_list;
2273 BT_DBG("conn %p", conn);
2275 read_lock(&l->lock);
2277 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2278 struct l2cap_pinfo *pi = l2cap_pi(sk);
2282 if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
2283 (sk->sk_state == BT_CONNECTED ||
2284 sk->sk_state == BT_CONFIG) &&
2285 !status && encrypt == 0x00) {
2286 __l2cap_sock_close(sk, ECONNREFUSED);
2291 if (sk->sk_state == BT_CONNECT) {
2293 struct l2cap_conn_req req;
2294 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2295 req.psm = l2cap_pi(sk)->psm;
2297 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2299 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2300 L2CAP_CONN_REQ, sizeof(req), &req);
2302 l2cap_sock_clear_timer(sk);
2303 l2cap_sock_set_timer(sk, HZ / 10);
2305 } else if (sk->sk_state == BT_CONNECT2) {
2306 struct l2cap_conn_rsp rsp;
2310 sk->sk_state = BT_CONFIG;
2311 result = L2CAP_CR_SUCCESS;
2313 sk->sk_state = BT_DISCONN;
2314 l2cap_sock_set_timer(sk, HZ / 10);
2315 result = L2CAP_CR_SEC_BLOCK;
2318 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2319 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2320 rsp.result = cpu_to_le16(result);
2321 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2322 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2323 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2329 read_unlock(&l->lock);
2334 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2336 struct l2cap_conn *conn = hcon->l2cap_data;
2338 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2341 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2343 if (flags & ACL_START) {
2344 struct l2cap_hdr *hdr;
2348 BT_ERR("Unexpected start frame (len %d)", skb->len);
2349 kfree_skb(conn->rx_skb);
2350 conn->rx_skb = NULL;
2352 l2cap_conn_unreliable(conn, ECOMM);
2356 BT_ERR("Frame is too short (len %d)", skb->len);
2357 l2cap_conn_unreliable(conn, ECOMM);
2361 hdr = (struct l2cap_hdr *) skb->data;
2362 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2364 if (len == skb->len) {
2365 /* Complete frame received */
2366 l2cap_recv_frame(conn, skb);
2370 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2372 if (skb->len > len) {
2373 BT_ERR("Frame is too long (len %d, expected len %d)",
2375 l2cap_conn_unreliable(conn, ECOMM);
2379 /* Allocate skb for the complete frame (with header) */
2380 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2383 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2385 conn->rx_len = len - skb->len;
2387 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2389 if (!conn->rx_len) {
2390 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2391 l2cap_conn_unreliable(conn, ECOMM);
2395 if (skb->len > conn->rx_len) {
2396 BT_ERR("Fragment is too long (len %d, expected %d)",
2397 skb->len, conn->rx_len);
2398 kfree_skb(conn->rx_skb);
2399 conn->rx_skb = NULL;
2401 l2cap_conn_unreliable(conn, ECOMM);
2405 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2407 conn->rx_len -= skb->len;
2409 if (!conn->rx_len) {
2410 /* Complete frame received */
2411 l2cap_recv_frame(conn, conn->rx_skb);
2412 conn->rx_skb = NULL;
2421 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2424 struct hlist_node *node;
2427 read_lock_bh(&l2cap_sk_list.lock);
2429 sk_for_each(sk, node, &l2cap_sk_list.head) {
2430 struct l2cap_pinfo *pi = l2cap_pi(sk);
2432 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2433 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2434 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2435 pi->imtu, pi->omtu, pi->link_mode);
2438 read_unlock_bh(&l2cap_sk_list.lock);
2443 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2445 static const struct proto_ops l2cap_sock_ops = {
2446 .family = PF_BLUETOOTH,
2447 .owner = THIS_MODULE,
2448 .release = l2cap_sock_release,
2449 .bind = l2cap_sock_bind,
2450 .connect = l2cap_sock_connect,
2451 .listen = l2cap_sock_listen,
2452 .accept = l2cap_sock_accept,
2453 .getname = l2cap_sock_getname,
2454 .sendmsg = l2cap_sock_sendmsg,
2455 .recvmsg = bt_sock_recvmsg,
2456 .poll = bt_sock_poll,
2457 .ioctl = bt_sock_ioctl,
2458 .mmap = sock_no_mmap,
2459 .socketpair = sock_no_socketpair,
2460 .shutdown = l2cap_sock_shutdown,
2461 .setsockopt = l2cap_sock_setsockopt,
2462 .getsockopt = l2cap_sock_getsockopt
2465 static struct net_proto_family l2cap_sock_family_ops = {
2466 .family = PF_BLUETOOTH,
2467 .owner = THIS_MODULE,
2468 .create = l2cap_sock_create,
2471 static struct hci_proto l2cap_hci_proto = {
2473 .id = HCI_PROTO_L2CAP,
2474 .connect_ind = l2cap_connect_ind,
2475 .connect_cfm = l2cap_connect_cfm,
2476 .disconn_ind = l2cap_disconn_ind,
2477 .auth_cfm = l2cap_auth_cfm,
2478 .encrypt_cfm = l2cap_encrypt_cfm,
2479 .recv_acldata = l2cap_recv_acldata
2482 static int __init l2cap_init(void)
2486 err = proto_register(&l2cap_proto, 0);
2490 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2492 BT_ERR("L2CAP socket registration failed");
2496 err = hci_register_proto(&l2cap_hci_proto);
2498 BT_ERR("L2CAP protocol registration failed");
2499 bt_sock_unregister(BTPROTO_L2CAP);
2503 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2504 BT_ERR("Failed to create L2CAP info file");
2506 BT_INFO("L2CAP ver %s", VERSION);
2507 BT_INFO("L2CAP socket layer initialized");
2512 proto_unregister(&l2cap_proto);
2516 static void __exit l2cap_exit(void)
2518 class_remove_file(bt_class, &class_attr_l2cap);
2520 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2521 BT_ERR("L2CAP socket unregistration failed");
2523 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2524 BT_ERR("L2CAP protocol unregistration failed");
2526 proto_unregister(&l2cap_proto);
2529 void l2cap_load(void)
2531 /* Dummy function to trigger automatic L2CAP module loading by
2532 * other modules that use L2CAP sockets but don't use any other
2533 * symbols from it. */
2536 EXPORT_SYMBOL(l2cap_load);
2538 module_init(l2cap_init);
2539 module_exit(l2cap_exit);
2541 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
2542 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2543 MODULE_VERSION(VERSION);
2544 MODULE_LICENSE("GPL");
2545 MODULE_ALIAS("bt-proto-0");