2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #ifndef CONFIG_BT_L2CAP_DEBUG
60 static u32 l2cap_feat_mask = 0x0000;
62 static const struct proto_ops l2cap_sock_ops;
64 static struct bt_sock_list l2cap_sk_list = {
65 .lock = RW_LOCK_UNLOCKED
68 static void __l2cap_sock_close(struct sock *sk, int reason);
69 static void l2cap_sock_close(struct sock *sk);
70 static void l2cap_sock_kill(struct sock *sk);
72 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
73 u8 code, u8 ident, u16 dlen, void *data);
75 /* ---- L2CAP timers ---- */
76 static void l2cap_sock_timeout(unsigned long arg)
78 struct sock *sk = (struct sock *) arg;
80 BT_DBG("sock %p state %d", sk, sk->sk_state);
83 __l2cap_sock_close(sk, ETIMEDOUT);
90 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
92 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
93 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
96 static void l2cap_sock_clear_timer(struct sock *sk)
98 BT_DBG("sock %p state %d", sk, sk->sk_state);
99 sk_stop_timer(sk, &sk->sk_timer);
102 /* ---- L2CAP channels ---- */
103 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
106 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
107 if (l2cap_pi(s)->dcid == cid)
113 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
116 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
117 if (l2cap_pi(s)->scid == cid)
123 /* Find channel with given SCID.
124 * Returns locked socket */
125 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
129 s = __l2cap_get_chan_by_scid(l, cid);
130 if (s) bh_lock_sock(s);
131 read_unlock(&l->lock);
135 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
138 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
139 if (l2cap_pi(s)->ident == ident)
145 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
149 s = __l2cap_get_chan_by_ident(l, ident);
150 if (s) bh_lock_sock(s);
151 read_unlock(&l->lock);
155 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
159 for (; cid < 0xffff; cid++) {
160 if(!__l2cap_get_chan_by_scid(l, cid))
167 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
172 l2cap_pi(l->head)->prev_c = sk;
174 l2cap_pi(sk)->next_c = l->head;
175 l2cap_pi(sk)->prev_c = NULL;
179 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
181 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
183 write_lock_bh(&l->lock);
188 l2cap_pi(next)->prev_c = prev;
190 l2cap_pi(prev)->next_c = next;
191 write_unlock_bh(&l->lock);
196 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
198 struct l2cap_chan_list *l = &conn->chan_list;
200 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
202 l2cap_pi(sk)->conn = conn;
204 if (sk->sk_type == SOCK_SEQPACKET) {
205 /* Alloc CID for connection-oriented socket */
206 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
207 } else if (sk->sk_type == SOCK_DGRAM) {
208 /* Connectionless socket */
209 l2cap_pi(sk)->scid = 0x0002;
210 l2cap_pi(sk)->dcid = 0x0002;
211 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
213 /* Raw socket can send/recv signalling messages only */
214 l2cap_pi(sk)->scid = 0x0001;
215 l2cap_pi(sk)->dcid = 0x0001;
216 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
219 __l2cap_chan_link(l, sk);
222 bt_accept_enqueue(parent, sk);
226 * Must be called on the locked socket. */
227 static void l2cap_chan_del(struct sock *sk, int err)
229 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
230 struct sock *parent = bt_sk(sk)->parent;
232 l2cap_sock_clear_timer(sk);
234 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
237 /* Unlink from channel list */
238 l2cap_chan_unlink(&conn->chan_list, sk);
239 l2cap_pi(sk)->conn = NULL;
240 hci_conn_put(conn->hcon);
243 sk->sk_state = BT_CLOSED;
244 sock_set_flag(sk, SOCK_ZAPPED);
250 bt_accept_unlink(sk);
251 parent->sk_data_ready(parent, 0);
253 sk->sk_state_change(sk);
256 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
260 /* Get next available identificator.
261 * 1 - 128 are used by kernel.
262 * 129 - 199 are reserved.
263 * 200 - 254 are used by utilities like l2ping, etc.
266 spin_lock_bh(&conn->lock);
268 if (++conn->tx_ident > 128)
273 spin_unlock_bh(&conn->lock);
278 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
280 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
282 BT_DBG("code 0x%2.2x", code);
287 return hci_send_acl(conn->hcon, skb, 0);
290 /* ---- L2CAP connections ---- */
291 static void l2cap_conn_start(struct l2cap_conn *conn)
293 struct l2cap_chan_list *l = &conn->chan_list;
296 BT_DBG("conn %p", conn);
300 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
303 if (sk->sk_type != SOCK_SEQPACKET) {
304 l2cap_sock_clear_timer(sk);
305 sk->sk_state = BT_CONNECTED;
306 sk->sk_state_change(sk);
307 } else if (sk->sk_state == BT_CONNECT) {
308 struct l2cap_conn_req req;
309 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
310 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
311 req.psm = l2cap_pi(sk)->psm;
312 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
313 L2CAP_CONN_REQ, sizeof(req), &req);
319 read_unlock(&l->lock);
322 static void l2cap_conn_ready(struct l2cap_conn *conn)
324 BT_DBG("conn %p", conn);
326 if (conn->chan_list.head || !hlist_empty(&l2cap_sk_list.head)) {
327 struct l2cap_info_req req;
329 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
331 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
332 conn->info_ident = l2cap_get_ident(conn);
334 mod_timer(&conn->info_timer,
335 jiffies + msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
337 l2cap_send_cmd(conn, conn->info_ident,
338 L2CAP_INFO_REQ, sizeof(req), &req);
342 /* Notify sockets that we cannot guaranty reliability anymore */
343 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
345 struct l2cap_chan_list *l = &conn->chan_list;
348 BT_DBG("conn %p", conn);
352 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
353 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
357 read_unlock(&l->lock);
360 static void l2cap_info_timeout(unsigned long arg)
362 struct l2cap_conn *conn = (void *) arg;
364 conn->info_ident = 0;
366 l2cap_conn_start(conn);
369 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
371 struct l2cap_conn *conn = hcon->l2cap_data;
376 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
380 hcon->l2cap_data = conn;
383 BT_DBG("hcon %p conn %p", hcon, conn);
385 conn->mtu = hcon->hdev->acl_mtu;
386 conn->src = &hcon->hdev->bdaddr;
387 conn->dst = &hcon->dst;
391 setup_timer(&conn->info_timer, l2cap_info_timeout, (unsigned long)conn);
393 spin_lock_init(&conn->lock);
394 rwlock_init(&conn->chan_list.lock);
399 static void l2cap_conn_del(struct hci_conn *hcon, int err)
401 struct l2cap_conn *conn = hcon->l2cap_data;
407 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
410 kfree_skb(conn->rx_skb);
413 while ((sk = conn->chan_list.head)) {
415 l2cap_chan_del(sk, err);
420 hcon->l2cap_data = NULL;
424 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
426 struct l2cap_chan_list *l = &conn->chan_list;
427 write_lock_bh(&l->lock);
428 __l2cap_chan_add(conn, sk, parent);
429 write_unlock_bh(&l->lock);
432 /* ---- Socket interface ---- */
433 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
436 struct hlist_node *node;
437 sk_for_each(sk, node, &l2cap_sk_list.head)
438 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
445 /* Find socket with psm and source bdaddr.
446 * Returns closest match.
448 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
450 struct sock *sk = NULL, *sk1 = NULL;
451 struct hlist_node *node;
453 sk_for_each(sk, node, &l2cap_sk_list.head) {
454 if (state && sk->sk_state != state)
457 if (l2cap_pi(sk)->psm == psm) {
459 if (!bacmp(&bt_sk(sk)->src, src))
463 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
467 return node ? sk : sk1;
470 /* Find socket with given address (psm, src).
471 * Returns locked socket */
472 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
475 read_lock(&l2cap_sk_list.lock);
476 s = __l2cap_get_sock_by_psm(state, psm, src);
477 if (s) bh_lock_sock(s);
478 read_unlock(&l2cap_sk_list.lock);
482 static void l2cap_sock_destruct(struct sock *sk)
486 skb_queue_purge(&sk->sk_receive_queue);
487 skb_queue_purge(&sk->sk_write_queue);
490 static void l2cap_sock_cleanup_listen(struct sock *parent)
494 BT_DBG("parent %p", parent);
496 /* Close not yet accepted channels */
497 while ((sk = bt_accept_dequeue(parent, NULL)))
498 l2cap_sock_close(sk);
500 parent->sk_state = BT_CLOSED;
501 sock_set_flag(parent, SOCK_ZAPPED);
504 /* Kill socket (only if zapped and orphan)
505 * Must be called on unlocked socket.
507 static void l2cap_sock_kill(struct sock *sk)
509 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
512 BT_DBG("sk %p state %d", sk, sk->sk_state);
514 /* Kill poor orphan */
515 bt_sock_unlink(&l2cap_sk_list, sk);
516 sock_set_flag(sk, SOCK_DEAD);
520 static void __l2cap_sock_close(struct sock *sk, int reason)
522 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
524 switch (sk->sk_state) {
526 l2cap_sock_cleanup_listen(sk);
532 if (sk->sk_type == SOCK_SEQPACKET) {
533 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
534 struct l2cap_disconn_req req;
536 sk->sk_state = BT_DISCONN;
537 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
539 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
540 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
541 l2cap_send_cmd(conn, l2cap_get_ident(conn),
542 L2CAP_DISCONN_REQ, sizeof(req), &req);
544 l2cap_chan_del(sk, reason);
550 l2cap_chan_del(sk, reason);
554 sock_set_flag(sk, SOCK_ZAPPED);
559 /* Must be called on unlocked socket. */
560 static void l2cap_sock_close(struct sock *sk)
562 l2cap_sock_clear_timer(sk);
564 __l2cap_sock_close(sk, ECONNRESET);
569 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
571 struct l2cap_pinfo *pi = l2cap_pi(sk);
576 sk->sk_type = parent->sk_type;
577 pi->imtu = l2cap_pi(parent)->imtu;
578 pi->omtu = l2cap_pi(parent)->omtu;
579 pi->link_mode = l2cap_pi(parent)->link_mode;
581 pi->imtu = L2CAP_DEFAULT_MTU;
586 /* Default config options */
588 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
591 static struct proto l2cap_proto = {
593 .owner = THIS_MODULE,
594 .obj_size = sizeof(struct l2cap_pinfo)
597 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
601 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
605 sock_init_data(sock, sk);
606 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
608 sk->sk_destruct = l2cap_sock_destruct;
609 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
611 sock_reset_flag(sk, SOCK_ZAPPED);
613 sk->sk_protocol = proto;
614 sk->sk_state = BT_OPEN;
616 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long)sk);
618 bt_sock_link(&l2cap_sk_list, sk);
622 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
626 BT_DBG("sock %p", sock);
628 sock->state = SS_UNCONNECTED;
630 if (sock->type != SOCK_SEQPACKET &&
631 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
632 return -ESOCKTNOSUPPORT;
634 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
637 sock->ops = &l2cap_sock_ops;
639 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
643 l2cap_sock_init(sk, NULL);
647 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
649 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
650 struct sock *sk = sock->sk;
653 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
655 if (!addr || addr->sa_family != AF_BLUETOOTH)
660 if (sk->sk_state != BT_OPEN) {
665 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
666 !capable(CAP_NET_BIND_SERVICE)) {
671 write_lock_bh(&l2cap_sk_list.lock);
673 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
676 /* Save source address */
677 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
678 l2cap_pi(sk)->psm = la->l2_psm;
679 l2cap_pi(sk)->sport = la->l2_psm;
680 sk->sk_state = BT_BOUND;
683 write_unlock_bh(&l2cap_sk_list.lock);
690 static int l2cap_do_connect(struct sock *sk)
692 bdaddr_t *src = &bt_sk(sk)->src;
693 bdaddr_t *dst = &bt_sk(sk)->dst;
694 struct l2cap_conn *conn;
695 struct hci_conn *hcon;
696 struct hci_dev *hdev;
699 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
701 if (!(hdev = hci_get_route(dst, src)))
702 return -EHOSTUNREACH;
704 hci_dev_lock_bh(hdev);
708 hcon = hci_connect(hdev, ACL_LINK, dst);
712 conn = l2cap_conn_add(hcon, 0);
720 /* Update source addr of the socket */
721 bacpy(src, conn->src);
723 l2cap_chan_add(conn, sk, NULL);
725 sk->sk_state = BT_CONNECT;
726 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
728 if (hcon->state == BT_CONNECTED) {
729 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
730 l2cap_conn_ready(conn);
734 if (sk->sk_type == SOCK_SEQPACKET) {
735 struct l2cap_conn_req req;
736 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
737 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
738 req.psm = l2cap_pi(sk)->psm;
739 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
740 L2CAP_CONN_REQ, sizeof(req), &req);
742 l2cap_sock_clear_timer(sk);
743 sk->sk_state = BT_CONNECTED;
748 hci_dev_unlock_bh(hdev);
753 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
755 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
756 struct sock *sk = sock->sk;
763 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
768 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
773 switch(sk->sk_state) {
777 /* Already connecting */
781 /* Already connected */
794 /* Set destination address and psm */
795 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
796 l2cap_pi(sk)->psm = la->l2_psm;
798 if ((err = l2cap_do_connect(sk)))
802 err = bt_sock_wait_state(sk, BT_CONNECTED,
803 sock_sndtimeo(sk, flags & O_NONBLOCK));
809 static int l2cap_sock_listen(struct socket *sock, int backlog)
811 struct sock *sk = sock->sk;
814 BT_DBG("sk %p backlog %d", sk, backlog);
818 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
823 if (!l2cap_pi(sk)->psm) {
824 bdaddr_t *src = &bt_sk(sk)->src;
829 write_lock_bh(&l2cap_sk_list.lock);
831 for (psm = 0x1001; psm < 0x1100; psm += 2)
832 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
833 l2cap_pi(sk)->psm = htobs(psm);
834 l2cap_pi(sk)->sport = htobs(psm);
839 write_unlock_bh(&l2cap_sk_list.lock);
845 sk->sk_max_ack_backlog = backlog;
846 sk->sk_ack_backlog = 0;
847 sk->sk_state = BT_LISTEN;
854 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
856 DECLARE_WAITQUEUE(wait, current);
857 struct sock *sk = sock->sk, *nsk;
861 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
863 if (sk->sk_state != BT_LISTEN) {
868 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
870 BT_DBG("sk %p timeo %ld", sk, timeo);
872 /* Wait for an incoming connection. (wake-one). */
873 add_wait_queue_exclusive(sk->sk_sleep, &wait);
874 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
875 set_current_state(TASK_INTERRUPTIBLE);
882 timeo = schedule_timeout(timeo);
883 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
885 if (sk->sk_state != BT_LISTEN) {
890 if (signal_pending(current)) {
891 err = sock_intr_errno(timeo);
895 set_current_state(TASK_RUNNING);
896 remove_wait_queue(sk->sk_sleep, &wait);
901 newsock->state = SS_CONNECTED;
903 BT_DBG("new socket %p", nsk);
910 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
912 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
913 struct sock *sk = sock->sk;
915 BT_DBG("sock %p, sk %p", sock, sk);
917 addr->sa_family = AF_BLUETOOTH;
918 *len = sizeof(struct sockaddr_l2);
921 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
923 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
925 la->l2_psm = l2cap_pi(sk)->psm;
929 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
931 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
932 struct sk_buff *skb, **frag;
933 int err, hlen, count, sent=0;
934 struct l2cap_hdr *lh;
936 BT_DBG("sk %p len %d", sk, len);
938 /* First fragment (with L2CAP header) */
939 if (sk->sk_type == SOCK_DGRAM)
940 hlen = L2CAP_HDR_SIZE + 2;
942 hlen = L2CAP_HDR_SIZE;
944 count = min_t(unsigned int, (conn->mtu - hlen), len);
946 skb = bt_skb_send_alloc(sk, hlen + count,
947 msg->msg_flags & MSG_DONTWAIT, &err);
951 /* Create L2CAP header */
952 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
953 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
954 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
956 if (sk->sk_type == SOCK_DGRAM)
957 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
959 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
967 /* Continuation fragments (no L2CAP header) */
968 frag = &skb_shinfo(skb)->frag_list;
970 count = min_t(unsigned int, conn->mtu, len);
972 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
976 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
984 frag = &(*frag)->next;
987 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
997 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
999 struct sock *sk = sock->sk;
1002 BT_DBG("sock %p, sk %p", sock, sk);
1004 err = sock_error(sk);
1008 if (msg->msg_flags & MSG_OOB)
1011 /* Check outgoing MTU */
1012 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1017 if (sk->sk_state == BT_CONNECTED)
1018 err = l2cap_do_send(sk, msg, len);
1026 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1028 struct sock *sk = sock->sk;
1029 struct l2cap_options opts;
1033 BT_DBG("sk %p", sk);
1039 opts.imtu = l2cap_pi(sk)->imtu;
1040 opts.omtu = l2cap_pi(sk)->omtu;
1041 opts.flush_to = l2cap_pi(sk)->flush_to;
1042 opts.mode = L2CAP_MODE_BASIC;
1044 len = min_t(unsigned int, sizeof(opts), optlen);
1045 if (copy_from_user((char *) &opts, optval, len)) {
1050 l2cap_pi(sk)->imtu = opts.imtu;
1051 l2cap_pi(sk)->omtu = opts.omtu;
1055 if (get_user(opt, (u32 __user *) optval)) {
1060 l2cap_pi(sk)->link_mode = opt;
1072 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1074 struct sock *sk = sock->sk;
1075 struct l2cap_options opts;
1076 struct l2cap_conninfo cinfo;
1079 BT_DBG("sk %p", sk);
1081 if (get_user(len, optlen))
1088 opts.imtu = l2cap_pi(sk)->imtu;
1089 opts.omtu = l2cap_pi(sk)->omtu;
1090 opts.flush_to = l2cap_pi(sk)->flush_to;
1091 opts.mode = L2CAP_MODE_BASIC;
1093 len = min_t(unsigned int, len, sizeof(opts));
1094 if (copy_to_user(optval, (char *) &opts, len))
1100 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1104 case L2CAP_CONNINFO:
1105 if (sk->sk_state != BT_CONNECTED) {
1110 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1111 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1113 len = min_t(unsigned int, len, sizeof(cinfo));
1114 if (copy_to_user(optval, (char *) &cinfo, len))
1128 static int l2cap_sock_shutdown(struct socket *sock, int how)
1130 struct sock *sk = sock->sk;
1133 BT_DBG("sock %p, sk %p", sock, sk);
1139 if (!sk->sk_shutdown) {
1140 sk->sk_shutdown = SHUTDOWN_MASK;
1141 l2cap_sock_clear_timer(sk);
1142 __l2cap_sock_close(sk, 0);
1144 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1145 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
1151 static int l2cap_sock_release(struct socket *sock)
1153 struct sock *sk = sock->sk;
1156 BT_DBG("sock %p, sk %p", sock, sk);
1161 err = l2cap_sock_shutdown(sock, 2);
1164 l2cap_sock_kill(sk);
1168 static void l2cap_chan_ready(struct sock *sk)
1170 struct sock *parent = bt_sk(sk)->parent;
1172 BT_DBG("sk %p, parent %p", sk, parent);
1174 l2cap_pi(sk)->conf_state = 0;
1175 l2cap_sock_clear_timer(sk);
1178 /* Outgoing channel.
1179 * Wake up socket sleeping on connect.
1181 sk->sk_state = BT_CONNECTED;
1182 sk->sk_state_change(sk);
1184 /* Incoming channel.
1185 * Wake up socket sleeping on accept.
1187 parent->sk_data_ready(parent, 0);
1191 /* Copy frame to all raw sockets on that connection */
1192 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1194 struct l2cap_chan_list *l = &conn->chan_list;
1195 struct sk_buff *nskb;
1198 BT_DBG("conn %p", conn);
1200 read_lock(&l->lock);
1201 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1202 if (sk->sk_type != SOCK_RAW)
1205 /* Don't send frame to the socket it came from */
1209 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1212 if (sock_queue_rcv_skb(sk, nskb))
1215 read_unlock(&l->lock);
1218 /* ---- L2CAP signalling commands ---- */
1219 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1220 u8 code, u8 ident, u16 dlen, void *data)
1222 struct sk_buff *skb, **frag;
1223 struct l2cap_cmd_hdr *cmd;
1224 struct l2cap_hdr *lh;
1227 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1229 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1230 count = min_t(unsigned int, conn->mtu, len);
1232 skb = bt_skb_alloc(count, GFP_ATOMIC);
1236 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1237 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1238 lh->cid = cpu_to_le16(0x0001);
1240 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1243 cmd->len = cpu_to_le16(dlen);
1246 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1247 memcpy(skb_put(skb, count), data, count);
1253 /* Continuation fragments (no L2CAP header) */
1254 frag = &skb_shinfo(skb)->frag_list;
1256 count = min_t(unsigned int, conn->mtu, len);
1258 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1262 memcpy(skb_put(*frag, count), data, count);
1267 frag = &(*frag)->next;
1277 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1279 struct l2cap_conf_opt *opt = *ptr;
1282 len = L2CAP_CONF_OPT_SIZE + opt->len;
1290 *val = *((u8 *) opt->val);
1294 *val = __le16_to_cpu(*((__le16 *) opt->val));
1298 *val = __le32_to_cpu(*((__le32 *) opt->val));
1302 *val = (unsigned long) opt->val;
1306 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1310 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1312 struct l2cap_conf_opt *opt = *ptr;
1314 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1321 *((u8 *) opt->val) = val;
1325 *((__le16 *) opt->val) = cpu_to_le16(val);
1329 *((__le32 *) opt->val) = cpu_to_le32(val);
1333 memcpy(opt->val, (void *) val, len);
1337 *ptr += L2CAP_CONF_OPT_SIZE + len;
1340 static int l2cap_build_conf_req(struct sock *sk, void *data)
1342 struct l2cap_pinfo *pi = l2cap_pi(sk);
1343 struct l2cap_conf_req *req = data;
1344 void *ptr = req->data;
1346 BT_DBG("sk %p", sk);
1348 if (pi->imtu != L2CAP_DEFAULT_MTU)
1349 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1351 /* FIXME: Need actual value of the flush timeout */
1352 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1353 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1355 req->dcid = cpu_to_le16(pi->dcid);
1356 req->flags = cpu_to_le16(0);
1361 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1363 struct l2cap_pinfo *pi = l2cap_pi(sk);
1364 struct l2cap_conf_rsp *rsp = data;
1365 void *ptr = rsp->data;
1366 void *req = pi->conf_req;
1367 int len = pi->conf_len;
1368 int type, hint, olen;
1370 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1371 u16 mtu = L2CAP_DEFAULT_MTU;
1372 u16 result = L2CAP_CONF_SUCCESS;
1374 BT_DBG("sk %p", sk);
1376 while (len >= L2CAP_CONF_OPT_SIZE) {
1377 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1383 case L2CAP_CONF_MTU:
1387 case L2CAP_CONF_FLUSH_TO:
1391 case L2CAP_CONF_QOS:
1394 case L2CAP_CONF_RFC:
1395 if (olen == sizeof(rfc))
1396 memcpy(&rfc, (void *) val, olen);
1403 result = L2CAP_CONF_UNKNOWN;
1404 *((u8 *) ptr++) = type;
1409 if (result == L2CAP_CONF_SUCCESS) {
1410 /* Configure output options and let the other side know
1411 * which ones we don't like. */
1413 if (rfc.mode == L2CAP_MODE_BASIC) {
1415 result = L2CAP_CONF_UNACCEPT;
1418 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1421 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1423 result = L2CAP_CONF_UNACCEPT;
1425 memset(&rfc, 0, sizeof(rfc));
1426 rfc.mode = L2CAP_MODE_BASIC;
1428 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1429 sizeof(rfc), (unsigned long) &rfc);
1433 rsp->scid = cpu_to_le16(pi->dcid);
1434 rsp->result = cpu_to_le16(result);
1435 rsp->flags = cpu_to_le16(0x0000);
1440 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1442 struct l2cap_conf_rsp *rsp = data;
1443 void *ptr = rsp->data;
1445 BT_DBG("sk %p", sk);
1447 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1448 rsp->result = cpu_to_le16(result);
1449 rsp->flags = cpu_to_le16(flags);
1454 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1456 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1458 if (rej->reason != 0x0000)
1461 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1462 cmd->ident == conn->info_ident) {
1463 conn->info_ident = 0;
1464 del_timer(&conn->info_timer);
1465 l2cap_conn_start(conn);
1471 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1473 struct l2cap_chan_list *list = &conn->chan_list;
1474 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1475 struct l2cap_conn_rsp rsp;
1476 struct sock *sk, *parent;
1477 int result = 0, status = 0;
1479 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1480 __le16 psm = req->psm;
1482 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1484 /* Check if we have socket listening on psm */
1485 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1487 result = L2CAP_CR_BAD_PSM;
1491 result = L2CAP_CR_NO_MEM;
1493 /* Check for backlog size */
1494 if (sk_acceptq_is_full(parent)) {
1495 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1499 sk = l2cap_sock_alloc(parent->sk_net, NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1503 write_lock_bh(&list->lock);
1505 /* Check if we already have channel with that dcid */
1506 if (__l2cap_get_chan_by_dcid(list, scid)) {
1507 write_unlock_bh(&list->lock);
1508 sock_set_flag(sk, SOCK_ZAPPED);
1509 l2cap_sock_kill(sk);
1513 hci_conn_hold(conn->hcon);
1515 l2cap_sock_init(sk, parent);
1516 bacpy(&bt_sk(sk)->src, conn->src);
1517 bacpy(&bt_sk(sk)->dst, conn->dst);
1518 l2cap_pi(sk)->psm = psm;
1519 l2cap_pi(sk)->dcid = scid;
1521 __l2cap_chan_add(conn, sk, parent);
1522 dcid = l2cap_pi(sk)->scid;
1524 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1526 /* Service level security */
1527 result = L2CAP_CR_PEND;
1528 status = L2CAP_CS_AUTHEN_PEND;
1529 sk->sk_state = BT_CONNECT2;
1530 l2cap_pi(sk)->ident = cmd->ident;
1532 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1533 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1534 if (!hci_conn_encrypt(conn->hcon))
1536 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1537 if (!hci_conn_auth(conn->hcon))
1541 sk->sk_state = BT_CONFIG;
1542 result = status = 0;
1545 write_unlock_bh(&list->lock);
1548 bh_unlock_sock(parent);
1551 rsp.scid = cpu_to_le16(scid);
1552 rsp.dcid = cpu_to_le16(dcid);
1553 rsp.result = cpu_to_le16(result);
1554 rsp.status = cpu_to_le16(status);
1555 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1559 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1561 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1562 u16 scid, dcid, result, status;
1566 scid = __le16_to_cpu(rsp->scid);
1567 dcid = __le16_to_cpu(rsp->dcid);
1568 result = __le16_to_cpu(rsp->result);
1569 status = __le16_to_cpu(rsp->status);
1571 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1574 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1577 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1582 case L2CAP_CR_SUCCESS:
1583 sk->sk_state = BT_CONFIG;
1584 l2cap_pi(sk)->ident = 0;
1585 l2cap_pi(sk)->dcid = dcid;
1586 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1588 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1589 l2cap_build_conf_req(sk, req), req);
1596 l2cap_chan_del(sk, ECONNREFUSED);
1604 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1606 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1612 dcid = __le16_to_cpu(req->dcid);
1613 flags = __le16_to_cpu(req->flags);
1615 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1617 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1620 if (sk->sk_state == BT_DISCONN)
1623 /* Reject if config buffer is too small. */
1624 len = cmd_len - sizeof(*req);
1625 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1626 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1627 l2cap_build_conf_rsp(sk, rsp,
1628 L2CAP_CONF_REJECT, flags), rsp);
1633 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1634 l2cap_pi(sk)->conf_len += len;
1636 if (flags & 0x0001) {
1637 /* Incomplete config. Send empty response. */
1638 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1639 l2cap_build_conf_rsp(sk, rsp,
1640 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1644 /* Complete config. */
1645 len = l2cap_parse_conf_req(sk, rsp);
1649 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1651 /* Reset config buffer. */
1652 l2cap_pi(sk)->conf_len = 0;
1654 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1657 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1658 sk->sk_state = BT_CONNECTED;
1659 l2cap_chan_ready(sk);
1663 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1665 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1666 l2cap_build_conf_req(sk, req), req);
1674 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1676 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1677 u16 scid, flags, result;
1680 scid = __le16_to_cpu(rsp->scid);
1681 flags = __le16_to_cpu(rsp->flags);
1682 result = __le16_to_cpu(rsp->result);
1684 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1686 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1690 case L2CAP_CONF_SUCCESS:
1693 case L2CAP_CONF_UNACCEPT:
1694 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1696 /* It does not make sense to adjust L2CAP parameters
1697 * that are currently defined in the spec. We simply
1698 * resend config request that we sent earlier. It is
1699 * stupid, but it helps qualification testing which
1700 * expects at least some response from us. */
1701 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1702 l2cap_build_conf_req(sk, req), req);
1707 sk->sk_state = BT_DISCONN;
1708 sk->sk_err = ECONNRESET;
1709 l2cap_sock_set_timer(sk, HZ * 5);
1711 struct l2cap_disconn_req req;
1712 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1713 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1714 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1715 L2CAP_DISCONN_REQ, sizeof(req), &req);
1723 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1725 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1726 sk->sk_state = BT_CONNECTED;
1727 l2cap_chan_ready(sk);
1735 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1737 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1738 struct l2cap_disconn_rsp rsp;
1742 scid = __le16_to_cpu(req->scid);
1743 dcid = __le16_to_cpu(req->dcid);
1745 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1747 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1750 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1751 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1752 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1754 sk->sk_shutdown = SHUTDOWN_MASK;
1756 l2cap_chan_del(sk, ECONNRESET);
1759 l2cap_sock_kill(sk);
1763 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1765 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1769 scid = __le16_to_cpu(rsp->scid);
1770 dcid = __le16_to_cpu(rsp->dcid);
1772 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1774 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1777 l2cap_chan_del(sk, 0);
1780 l2cap_sock_kill(sk);
1784 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1786 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1789 type = __le16_to_cpu(req->type);
1791 BT_DBG("type 0x%4.4x", type);
1793 if (type == L2CAP_IT_FEAT_MASK) {
1795 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
1796 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1797 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
1798 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
1799 l2cap_send_cmd(conn, cmd->ident,
1800 L2CAP_INFO_RSP, sizeof(buf), buf);
1802 struct l2cap_info_rsp rsp;
1803 rsp.type = cpu_to_le16(type);
1804 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1805 l2cap_send_cmd(conn, cmd->ident,
1806 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1812 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1814 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1817 type = __le16_to_cpu(rsp->type);
1818 result = __le16_to_cpu(rsp->result);
1820 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1822 conn->info_ident = 0;
1824 del_timer(&conn->info_timer);
1826 if (type == L2CAP_IT_FEAT_MASK)
1827 conn->feat_mask = __le32_to_cpu(get_unaligned((__le32 *) rsp->data));
1829 l2cap_conn_start(conn);
1834 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1836 u8 *data = skb->data;
1838 struct l2cap_cmd_hdr cmd;
1841 l2cap_raw_recv(conn, skb);
1843 while (len >= L2CAP_CMD_HDR_SIZE) {
1845 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1846 data += L2CAP_CMD_HDR_SIZE;
1847 len -= L2CAP_CMD_HDR_SIZE;
1849 cmd_len = le16_to_cpu(cmd.len);
1851 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
1853 if (cmd_len > len || !cmd.ident) {
1854 BT_DBG("corrupted command");
1859 case L2CAP_COMMAND_REJ:
1860 l2cap_command_rej(conn, &cmd, data);
1863 case L2CAP_CONN_REQ:
1864 err = l2cap_connect_req(conn, &cmd, data);
1867 case L2CAP_CONN_RSP:
1868 err = l2cap_connect_rsp(conn, &cmd, data);
1871 case L2CAP_CONF_REQ:
1872 err = l2cap_config_req(conn, &cmd, cmd_len, data);
1875 case L2CAP_CONF_RSP:
1876 err = l2cap_config_rsp(conn, &cmd, data);
1879 case L2CAP_DISCONN_REQ:
1880 err = l2cap_disconnect_req(conn, &cmd, data);
1883 case L2CAP_DISCONN_RSP:
1884 err = l2cap_disconnect_rsp(conn, &cmd, data);
1887 case L2CAP_ECHO_REQ:
1888 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
1891 case L2CAP_ECHO_RSP:
1894 case L2CAP_INFO_REQ:
1895 err = l2cap_information_req(conn, &cmd, data);
1898 case L2CAP_INFO_RSP:
1899 err = l2cap_information_rsp(conn, &cmd, data);
1903 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1909 struct l2cap_cmd_rej rej;
1910 BT_DBG("error %d", err);
1912 /* FIXME: Map err to a valid reason */
1913 rej.reason = cpu_to_le16(0);
1914 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1924 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1928 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1930 BT_DBG("unknown cid 0x%4.4x", cid);
1934 BT_DBG("sk %p, len %d", sk, skb->len);
1936 if (sk->sk_state != BT_CONNECTED)
1939 if (l2cap_pi(sk)->imtu < skb->len)
1942 /* If socket recv buffers overflows we drop data here
1943 * which is *bad* because L2CAP has to be reliable.
1944 * But we don't have any other choice. L2CAP doesn't
1945 * provide flow control mechanism. */
1947 if (!sock_queue_rcv_skb(sk, skb))
1960 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
1964 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1968 BT_DBG("sk %p, len %d", sk, skb->len);
1970 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1973 if (l2cap_pi(sk)->imtu < skb->len)
1976 if (!sock_queue_rcv_skb(sk, skb))
1983 if (sk) bh_unlock_sock(sk);
1987 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1989 struct l2cap_hdr *lh = (void *) skb->data;
1993 skb_pull(skb, L2CAP_HDR_SIZE);
1994 cid = __le16_to_cpu(lh->cid);
1995 len = __le16_to_cpu(lh->len);
1997 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2001 l2cap_sig_channel(conn, skb);
2005 psm = get_unaligned((__le16 *) skb->data);
2007 l2cap_conless_channel(conn, psm, skb);
2011 l2cap_data_channel(conn, cid, skb);
2016 /* ---- L2CAP interface with lower layer (HCI) ---- */
2018 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2020 int exact = 0, lm1 = 0, lm2 = 0;
2021 register struct sock *sk;
2022 struct hlist_node *node;
2024 if (type != ACL_LINK)
2027 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2029 /* Find listening sockets and check their link_mode */
2030 read_lock(&l2cap_sk_list.lock);
2031 sk_for_each(sk, node, &l2cap_sk_list.head) {
2032 if (sk->sk_state != BT_LISTEN)
2035 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2036 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2038 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
2039 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2041 read_unlock(&l2cap_sk_list.lock);
2043 return exact ? lm1 : lm2;
2046 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2048 struct l2cap_conn *conn;
2050 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2052 if (hcon->type != ACL_LINK)
2056 conn = l2cap_conn_add(hcon, status);
2058 l2cap_conn_ready(conn);
2060 l2cap_conn_del(hcon, bt_err(status));
2065 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
2067 BT_DBG("hcon %p reason %d", hcon, reason);
2069 if (hcon->type != ACL_LINK)
2072 l2cap_conn_del(hcon, bt_err(reason));
2077 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
2079 struct l2cap_chan_list *l;
2080 struct l2cap_conn *conn = conn = hcon->l2cap_data;
2081 struct l2cap_conn_rsp rsp;
2088 l = &conn->chan_list;
2090 BT_DBG("conn %p", conn);
2092 read_lock(&l->lock);
2094 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2097 if (sk->sk_state != BT_CONNECT2 ||
2098 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
2099 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
2105 sk->sk_state = BT_CONFIG;
2108 sk->sk_state = BT_DISCONN;
2109 l2cap_sock_set_timer(sk, HZ/10);
2110 result = L2CAP_CR_SEC_BLOCK;
2113 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2114 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2115 rsp.result = cpu_to_le16(result);
2116 rsp.status = cpu_to_le16(0);
2117 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2118 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2123 read_unlock(&l->lock);
2127 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
2129 struct l2cap_chan_list *l;
2130 struct l2cap_conn *conn = hcon->l2cap_data;
2131 struct l2cap_conn_rsp rsp;
2138 l = &conn->chan_list;
2140 BT_DBG("conn %p", conn);
2142 read_lock(&l->lock);
2144 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2147 if (sk->sk_state != BT_CONNECT2) {
2153 sk->sk_state = BT_CONFIG;
2156 sk->sk_state = BT_DISCONN;
2157 l2cap_sock_set_timer(sk, HZ/10);
2158 result = L2CAP_CR_SEC_BLOCK;
2161 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2162 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2163 rsp.result = cpu_to_le16(result);
2164 rsp.status = cpu_to_le16(0);
2165 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2166 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2168 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
2169 hci_conn_change_link_key(hcon);
2174 read_unlock(&l->lock);
2178 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2180 struct l2cap_conn *conn = hcon->l2cap_data;
2182 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2185 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2187 if (flags & ACL_START) {
2188 struct l2cap_hdr *hdr;
2192 BT_ERR("Unexpected start frame (len %d)", skb->len);
2193 kfree_skb(conn->rx_skb);
2194 conn->rx_skb = NULL;
2196 l2cap_conn_unreliable(conn, ECOMM);
2200 BT_ERR("Frame is too short (len %d)", skb->len);
2201 l2cap_conn_unreliable(conn, ECOMM);
2205 hdr = (struct l2cap_hdr *) skb->data;
2206 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2208 if (len == skb->len) {
2209 /* Complete frame received */
2210 l2cap_recv_frame(conn, skb);
2214 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2216 if (skb->len > len) {
2217 BT_ERR("Frame is too long (len %d, expected len %d)",
2219 l2cap_conn_unreliable(conn, ECOMM);
2223 /* Allocate skb for the complete frame (with header) */
2224 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2227 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2229 conn->rx_len = len - skb->len;
2231 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2233 if (!conn->rx_len) {
2234 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2235 l2cap_conn_unreliable(conn, ECOMM);
2239 if (skb->len > conn->rx_len) {
2240 BT_ERR("Fragment is too long (len %d, expected %d)",
2241 skb->len, conn->rx_len);
2242 kfree_skb(conn->rx_skb);
2243 conn->rx_skb = NULL;
2245 l2cap_conn_unreliable(conn, ECOMM);
2249 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2251 conn->rx_len -= skb->len;
2253 if (!conn->rx_len) {
2254 /* Complete frame received */
2255 l2cap_recv_frame(conn, conn->rx_skb);
2256 conn->rx_skb = NULL;
2265 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2268 struct hlist_node *node;
2271 read_lock_bh(&l2cap_sk_list.lock);
2273 sk_for_each(sk, node, &l2cap_sk_list.head) {
2274 struct l2cap_pinfo *pi = l2cap_pi(sk);
2276 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2277 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2278 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2279 pi->imtu, pi->omtu, pi->link_mode);
2282 read_unlock_bh(&l2cap_sk_list.lock);
2287 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2289 static const struct proto_ops l2cap_sock_ops = {
2290 .family = PF_BLUETOOTH,
2291 .owner = THIS_MODULE,
2292 .release = l2cap_sock_release,
2293 .bind = l2cap_sock_bind,
2294 .connect = l2cap_sock_connect,
2295 .listen = l2cap_sock_listen,
2296 .accept = l2cap_sock_accept,
2297 .getname = l2cap_sock_getname,
2298 .sendmsg = l2cap_sock_sendmsg,
2299 .recvmsg = bt_sock_recvmsg,
2300 .poll = bt_sock_poll,
2301 .mmap = sock_no_mmap,
2302 .socketpair = sock_no_socketpair,
2303 .ioctl = sock_no_ioctl,
2304 .shutdown = l2cap_sock_shutdown,
2305 .setsockopt = l2cap_sock_setsockopt,
2306 .getsockopt = l2cap_sock_getsockopt
2309 static struct net_proto_family l2cap_sock_family_ops = {
2310 .family = PF_BLUETOOTH,
2311 .owner = THIS_MODULE,
2312 .create = l2cap_sock_create,
2315 static struct hci_proto l2cap_hci_proto = {
2317 .id = HCI_PROTO_L2CAP,
2318 .connect_ind = l2cap_connect_ind,
2319 .connect_cfm = l2cap_connect_cfm,
2320 .disconn_ind = l2cap_disconn_ind,
2321 .auth_cfm = l2cap_auth_cfm,
2322 .encrypt_cfm = l2cap_encrypt_cfm,
2323 .recv_acldata = l2cap_recv_acldata
2326 static int __init l2cap_init(void)
2330 err = proto_register(&l2cap_proto, 0);
2334 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2336 BT_ERR("L2CAP socket registration failed");
2340 err = hci_register_proto(&l2cap_hci_proto);
2342 BT_ERR("L2CAP protocol registration failed");
2343 bt_sock_unregister(BTPROTO_L2CAP);
2347 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2348 BT_ERR("Failed to create L2CAP info file");
2350 BT_INFO("L2CAP ver %s", VERSION);
2351 BT_INFO("L2CAP socket layer initialized");
2356 proto_unregister(&l2cap_proto);
2360 static void __exit l2cap_exit(void)
2362 class_remove_file(bt_class, &class_attr_l2cap);
2364 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2365 BT_ERR("L2CAP socket unregistration failed");
2367 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2368 BT_ERR("L2CAP protocol unregistration failed");
2370 proto_unregister(&l2cap_proto);
2373 void l2cap_load(void)
2375 /* Dummy function to trigger automatic L2CAP module loading by
2376 * other modules that use L2CAP sockets but don't use any other
2377 * symbols from it. */
2380 EXPORT_SYMBOL(l2cap_load);
2382 module_init(l2cap_init);
2383 module_exit(l2cap_exit);
2385 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2386 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2387 MODULE_VERSION(VERSION);
2388 MODULE_LICENSE("GPL");
2389 MODULE_ALIAS("bt-proto-0");