2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #ifndef CONFIG_BT_L2CAP_DEBUG
60 static const struct proto_ops l2cap_sock_ops;
62 static struct bt_sock_list l2cap_sk_list = {
63 .lock = RW_LOCK_UNLOCKED
66 static void __l2cap_sock_close(struct sock *sk, int reason);
67 static void l2cap_sock_close(struct sock *sk);
68 static void l2cap_sock_kill(struct sock *sk);
70 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71 u8 code, u8 ident, u16 dlen, void *data);
73 /* ---- L2CAP timers ---- */
74 static void l2cap_sock_timeout(unsigned long arg)
76 struct sock *sk = (struct sock *) arg;
78 BT_DBG("sock %p state %d", sk, sk->sk_state);
81 __l2cap_sock_close(sk, ETIMEDOUT);
88 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
90 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
91 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
94 static void l2cap_sock_clear_timer(struct sock *sk)
96 BT_DBG("sock %p state %d", sk, sk->sk_state);
97 sk_stop_timer(sk, &sk->sk_timer);
100 static void l2cap_sock_init_timer(struct sock *sk)
102 init_timer(&sk->sk_timer);
103 sk->sk_timer.function = l2cap_sock_timeout;
104 sk->sk_timer.data = (unsigned long)sk;
107 /* ---- L2CAP channels ---- */
108 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
111 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
112 if (l2cap_pi(s)->dcid == cid)
118 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
121 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
122 if (l2cap_pi(s)->scid == cid)
128 /* Find channel with given SCID.
129 * Returns locked socket */
130 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
134 s = __l2cap_get_chan_by_scid(l, cid);
135 if (s) bh_lock_sock(s);
136 read_unlock(&l->lock);
140 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
143 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
144 if (l2cap_pi(s)->ident == ident)
150 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
154 s = __l2cap_get_chan_by_ident(l, ident);
155 if (s) bh_lock_sock(s);
156 read_unlock(&l->lock);
160 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
164 for (; cid < 0xffff; cid++) {
165 if(!__l2cap_get_chan_by_scid(l, cid))
172 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
177 l2cap_pi(l->head)->prev_c = sk;
179 l2cap_pi(sk)->next_c = l->head;
180 l2cap_pi(sk)->prev_c = NULL;
184 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
186 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
188 write_lock_bh(&l->lock);
193 l2cap_pi(next)->prev_c = prev;
195 l2cap_pi(prev)->next_c = next;
196 write_unlock_bh(&l->lock);
201 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
203 struct l2cap_chan_list *l = &conn->chan_list;
205 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
207 l2cap_pi(sk)->conn = conn;
209 if (sk->sk_type == SOCK_SEQPACKET) {
210 /* Alloc CID for connection-oriented socket */
211 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
212 } else if (sk->sk_type == SOCK_DGRAM) {
213 /* Connectionless socket */
214 l2cap_pi(sk)->scid = 0x0002;
215 l2cap_pi(sk)->dcid = 0x0002;
216 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
218 /* Raw socket can send/recv signalling messages only */
219 l2cap_pi(sk)->scid = 0x0001;
220 l2cap_pi(sk)->dcid = 0x0001;
221 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
224 __l2cap_chan_link(l, sk);
227 bt_accept_enqueue(parent, sk);
231 * Must be called on the locked socket. */
232 static void l2cap_chan_del(struct sock *sk, int err)
234 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
235 struct sock *parent = bt_sk(sk)->parent;
237 l2cap_sock_clear_timer(sk);
239 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
242 /* Unlink from channel list */
243 l2cap_chan_unlink(&conn->chan_list, sk);
244 l2cap_pi(sk)->conn = NULL;
245 hci_conn_put(conn->hcon);
248 sk->sk_state = BT_CLOSED;
249 sock_set_flag(sk, SOCK_ZAPPED);
255 bt_accept_unlink(sk);
256 parent->sk_data_ready(parent, 0);
258 sk->sk_state_change(sk);
261 /* ---- L2CAP connections ---- */
262 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
264 struct l2cap_conn *conn = hcon->l2cap_data;
269 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
273 hcon->l2cap_data = conn;
276 BT_DBG("hcon %p conn %p", hcon, conn);
278 conn->mtu = hcon->hdev->acl_mtu;
279 conn->src = &hcon->hdev->bdaddr;
280 conn->dst = &hcon->dst;
282 spin_lock_init(&conn->lock);
283 rwlock_init(&conn->chan_list.lock);
288 static void l2cap_conn_del(struct hci_conn *hcon, int err)
290 struct l2cap_conn *conn = hcon->l2cap_data;
296 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
299 kfree_skb(conn->rx_skb);
302 while ((sk = conn->chan_list.head)) {
304 l2cap_chan_del(sk, err);
309 hcon->l2cap_data = NULL;
313 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
315 struct l2cap_chan_list *l = &conn->chan_list;
316 write_lock_bh(&l->lock);
317 __l2cap_chan_add(conn, sk, parent);
318 write_unlock_bh(&l->lock);
321 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
325 /* Get next available identificator.
326 * 1 - 128 are used by kernel.
327 * 129 - 199 are reserved.
328 * 200 - 254 are used by utilities like l2ping, etc.
331 spin_lock_bh(&conn->lock);
333 if (++conn->tx_ident > 128)
338 spin_unlock_bh(&conn->lock);
343 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
345 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
347 BT_DBG("code 0x%2.2x", code);
352 return hci_send_acl(conn->hcon, skb, 0);
355 /* ---- Socket interface ---- */
356 static struct sock *__l2cap_get_sock_by_addr(u16 psm, bdaddr_t *src)
359 struct hlist_node *node;
360 sk_for_each(sk, node, &l2cap_sk_list.head)
361 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
368 /* Find socket with psm and source bdaddr.
369 * Returns closest match.
371 static struct sock *__l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
373 struct sock *sk = NULL, *sk1 = NULL;
374 struct hlist_node *node;
376 sk_for_each(sk, node, &l2cap_sk_list.head) {
377 if (state && sk->sk_state != state)
380 if (l2cap_pi(sk)->psm == psm) {
382 if (!bacmp(&bt_sk(sk)->src, src))
386 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
390 return node ? sk : sk1;
393 /* Find socket with given address (psm, src).
394 * Returns locked socket */
395 static inline struct sock *l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
398 read_lock(&l2cap_sk_list.lock);
399 s = __l2cap_get_sock_by_psm(state, psm, src);
400 if (s) bh_lock_sock(s);
401 read_unlock(&l2cap_sk_list.lock);
405 static void l2cap_sock_destruct(struct sock *sk)
409 skb_queue_purge(&sk->sk_receive_queue);
410 skb_queue_purge(&sk->sk_write_queue);
413 static void l2cap_sock_cleanup_listen(struct sock *parent)
417 BT_DBG("parent %p", parent);
419 /* Close not yet accepted channels */
420 while ((sk = bt_accept_dequeue(parent, NULL)))
421 l2cap_sock_close(sk);
423 parent->sk_state = BT_CLOSED;
424 sock_set_flag(parent, SOCK_ZAPPED);
427 /* Kill socket (only if zapped and orphan)
428 * Must be called on unlocked socket.
430 static void l2cap_sock_kill(struct sock *sk)
432 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
435 BT_DBG("sk %p state %d", sk, sk->sk_state);
437 /* Kill poor orphan */
438 bt_sock_unlink(&l2cap_sk_list, sk);
439 sock_set_flag(sk, SOCK_DEAD);
443 static void __l2cap_sock_close(struct sock *sk, int reason)
445 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
447 switch (sk->sk_state) {
449 l2cap_sock_cleanup_listen(sk);
455 if (sk->sk_type == SOCK_SEQPACKET) {
456 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
457 struct l2cap_disconn_req req;
459 sk->sk_state = BT_DISCONN;
460 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
462 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
463 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
464 l2cap_send_cmd(conn, l2cap_get_ident(conn),
465 L2CAP_DISCONN_REQ, sizeof(req), &req);
467 l2cap_chan_del(sk, reason);
473 l2cap_chan_del(sk, reason);
477 sock_set_flag(sk, SOCK_ZAPPED);
482 /* Must be called on unlocked socket. */
483 static void l2cap_sock_close(struct sock *sk)
485 l2cap_sock_clear_timer(sk);
487 __l2cap_sock_close(sk, ECONNRESET);
492 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
494 struct l2cap_pinfo *pi = l2cap_pi(sk);
499 sk->sk_type = parent->sk_type;
500 pi->imtu = l2cap_pi(parent)->imtu;
501 pi->omtu = l2cap_pi(parent)->omtu;
502 pi->link_mode = l2cap_pi(parent)->link_mode;
504 pi->imtu = L2CAP_DEFAULT_MTU;
509 /* Default config options */
510 pi->conf_mtu = L2CAP_DEFAULT_MTU;
511 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
514 static struct proto l2cap_proto = {
516 .owner = THIS_MODULE,
517 .obj_size = sizeof(struct l2cap_pinfo)
520 static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, gfp_t prio)
524 sk = sk_alloc(PF_BLUETOOTH, prio, &l2cap_proto, 1);
528 sock_init_data(sock, sk);
529 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
531 sk->sk_destruct = l2cap_sock_destruct;
532 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
534 sock_reset_flag(sk, SOCK_ZAPPED);
536 sk->sk_protocol = proto;
537 sk->sk_state = BT_OPEN;
539 l2cap_sock_init_timer(sk);
541 bt_sock_link(&l2cap_sk_list, sk);
545 static int l2cap_sock_create(struct socket *sock, int protocol)
549 BT_DBG("sock %p", sock);
551 sock->state = SS_UNCONNECTED;
553 if (sock->type != SOCK_SEQPACKET &&
554 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
555 return -ESOCKTNOSUPPORT;
557 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
560 sock->ops = &l2cap_sock_ops;
562 sk = l2cap_sock_alloc(sock, protocol, GFP_ATOMIC);
566 l2cap_sock_init(sk, NULL);
570 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
572 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
573 struct sock *sk = sock->sk;
576 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
578 if (!addr || addr->sa_family != AF_BLUETOOTH)
583 if (sk->sk_state != BT_OPEN) {
588 if (la->l2_psm > 0 && btohs(la->l2_psm) < 0x1001 &&
589 !capable(CAP_NET_BIND_SERVICE)) {
594 write_lock_bh(&l2cap_sk_list.lock);
596 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
599 /* Save source address */
600 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
601 l2cap_pi(sk)->psm = la->l2_psm;
602 l2cap_pi(sk)->sport = la->l2_psm;
603 sk->sk_state = BT_BOUND;
606 write_unlock_bh(&l2cap_sk_list.lock);
613 static int l2cap_do_connect(struct sock *sk)
615 bdaddr_t *src = &bt_sk(sk)->src;
616 bdaddr_t *dst = &bt_sk(sk)->dst;
617 struct l2cap_conn *conn;
618 struct hci_conn *hcon;
619 struct hci_dev *hdev;
622 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
624 if (!(hdev = hci_get_route(dst, src)))
625 return -EHOSTUNREACH;
627 hci_dev_lock_bh(hdev);
631 hcon = hci_connect(hdev, ACL_LINK, dst);
635 conn = l2cap_conn_add(hcon, 0);
643 /* Update source addr of the socket */
644 bacpy(src, conn->src);
646 l2cap_chan_add(conn, sk, NULL);
648 sk->sk_state = BT_CONNECT;
649 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
651 if (hcon->state == BT_CONNECTED) {
652 if (sk->sk_type == SOCK_SEQPACKET) {
653 struct l2cap_conn_req req;
654 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
655 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
656 req.psm = l2cap_pi(sk)->psm;
657 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
658 L2CAP_CONN_REQ, sizeof(req), &req);
660 l2cap_sock_clear_timer(sk);
661 sk->sk_state = BT_CONNECTED;
666 hci_dev_unlock_bh(hdev);
671 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
673 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
674 struct sock *sk = sock->sk;
681 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
686 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
691 switch(sk->sk_state) {
695 /* Already connecting */
699 /* Already connected */
712 /* Set destination address and psm */
713 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
714 l2cap_pi(sk)->psm = la->l2_psm;
716 if ((err = l2cap_do_connect(sk)))
720 err = bt_sock_wait_state(sk, BT_CONNECTED,
721 sock_sndtimeo(sk, flags & O_NONBLOCK));
727 static int l2cap_sock_listen(struct socket *sock, int backlog)
729 struct sock *sk = sock->sk;
732 BT_DBG("sk %p backlog %d", sk, backlog);
736 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
741 if (!l2cap_pi(sk)->psm) {
742 bdaddr_t *src = &bt_sk(sk)->src;
747 write_lock_bh(&l2cap_sk_list.lock);
749 for (psm = 0x1001; psm < 0x1100; psm += 2)
750 if (!__l2cap_get_sock_by_addr(psm, src)) {
751 l2cap_pi(sk)->psm = htobs(psm);
752 l2cap_pi(sk)->sport = htobs(psm);
757 write_unlock_bh(&l2cap_sk_list.lock);
763 sk->sk_max_ack_backlog = backlog;
764 sk->sk_ack_backlog = 0;
765 sk->sk_state = BT_LISTEN;
772 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
774 DECLARE_WAITQUEUE(wait, current);
775 struct sock *sk = sock->sk, *nsk;
779 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
781 if (sk->sk_state != BT_LISTEN) {
786 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
788 BT_DBG("sk %p timeo %ld", sk, timeo);
790 /* Wait for an incoming connection. (wake-one). */
791 add_wait_queue_exclusive(sk->sk_sleep, &wait);
792 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
793 set_current_state(TASK_INTERRUPTIBLE);
800 timeo = schedule_timeout(timeo);
801 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
803 if (sk->sk_state != BT_LISTEN) {
808 if (signal_pending(current)) {
809 err = sock_intr_errno(timeo);
813 set_current_state(TASK_RUNNING);
814 remove_wait_queue(sk->sk_sleep, &wait);
819 newsock->state = SS_CONNECTED;
821 BT_DBG("new socket %p", nsk);
828 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
830 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
831 struct sock *sk = sock->sk;
833 BT_DBG("sock %p, sk %p", sock, sk);
835 addr->sa_family = AF_BLUETOOTH;
836 *len = sizeof(struct sockaddr_l2);
839 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
841 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
843 la->l2_psm = l2cap_pi(sk)->psm;
847 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
849 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
850 struct sk_buff *skb, **frag;
851 int err, hlen, count, sent=0;
852 struct l2cap_hdr *lh;
854 BT_DBG("sk %p len %d", sk, len);
856 /* First fragment (with L2CAP header) */
857 if (sk->sk_type == SOCK_DGRAM)
858 hlen = L2CAP_HDR_SIZE + 2;
860 hlen = L2CAP_HDR_SIZE;
862 count = min_t(unsigned int, (conn->mtu - hlen), len);
864 skb = bt_skb_send_alloc(sk, hlen + count,
865 msg->msg_flags & MSG_DONTWAIT, &err);
869 /* Create L2CAP header */
870 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
871 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
872 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
874 if (sk->sk_type == SOCK_DGRAM)
875 put_unaligned(l2cap_pi(sk)->psm, (u16 *) skb_put(skb, 2));
877 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
885 /* Continuation fragments (no L2CAP header) */
886 frag = &skb_shinfo(skb)->frag_list;
888 count = min_t(unsigned int, conn->mtu, len);
890 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
894 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
902 frag = &(*frag)->next;
905 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
915 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
917 struct sock *sk = sock->sk;
920 BT_DBG("sock %p, sk %p", sock, sk);
922 err = sock_error(sk);
926 if (msg->msg_flags & MSG_OOB)
929 /* Check outgoing MTU */
930 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
935 if (sk->sk_state == BT_CONNECTED)
936 err = l2cap_do_send(sk, msg, len);
944 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
946 struct sock *sk = sock->sk;
947 struct l2cap_options opts;
957 opts.imtu = l2cap_pi(sk)->imtu;
958 opts.omtu = l2cap_pi(sk)->omtu;
959 opts.flush_to = l2cap_pi(sk)->flush_to;
962 len = min_t(unsigned int, sizeof(opts), optlen);
963 if (copy_from_user((char *) &opts, optval, len)) {
968 l2cap_pi(sk)->imtu = opts.imtu;
969 l2cap_pi(sk)->omtu = opts.omtu;
973 if (get_user(opt, (u32 __user *) optval)) {
978 l2cap_pi(sk)->link_mode = opt;
990 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
992 struct sock *sk = sock->sk;
993 struct l2cap_options opts;
994 struct l2cap_conninfo cinfo;
999 if (get_user(len, optlen))
1006 opts.imtu = l2cap_pi(sk)->imtu;
1007 opts.omtu = l2cap_pi(sk)->omtu;
1008 opts.flush_to = l2cap_pi(sk)->flush_to;
1011 len = min_t(unsigned int, len, sizeof(opts));
1012 if (copy_to_user(optval, (char *) &opts, len))
1018 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1022 case L2CAP_CONNINFO:
1023 if (sk->sk_state != BT_CONNECTED) {
1028 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1029 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1031 len = min_t(unsigned int, len, sizeof(cinfo));
1032 if (copy_to_user(optval, (char *) &cinfo, len))
1046 static int l2cap_sock_shutdown(struct socket *sock, int how)
1048 struct sock *sk = sock->sk;
1051 BT_DBG("sock %p, sk %p", sock, sk);
1057 if (!sk->sk_shutdown) {
1058 sk->sk_shutdown = SHUTDOWN_MASK;
1059 l2cap_sock_clear_timer(sk);
1060 __l2cap_sock_close(sk, 0);
1062 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1063 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
1069 static int l2cap_sock_release(struct socket *sock)
1071 struct sock *sk = sock->sk;
1074 BT_DBG("sock %p, sk %p", sock, sk);
1079 err = l2cap_sock_shutdown(sock, 2);
1082 l2cap_sock_kill(sk);
1086 static void l2cap_conn_ready(struct l2cap_conn *conn)
1088 struct l2cap_chan_list *l = &conn->chan_list;
1091 BT_DBG("conn %p", conn);
1093 read_lock(&l->lock);
1095 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1098 if (sk->sk_type != SOCK_SEQPACKET) {
1099 l2cap_sock_clear_timer(sk);
1100 sk->sk_state = BT_CONNECTED;
1101 sk->sk_state_change(sk);
1102 } else if (sk->sk_state == BT_CONNECT) {
1103 struct l2cap_conn_req req;
1104 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
1105 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1106 req.psm = l2cap_pi(sk)->psm;
1107 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1113 read_unlock(&l->lock);
1116 /* Notify sockets that we cannot guaranty reliability anymore */
1117 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1119 struct l2cap_chan_list *l = &conn->chan_list;
1122 BT_DBG("conn %p", conn);
1124 read_lock(&l->lock);
1125 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1126 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
1129 read_unlock(&l->lock);
1132 static void l2cap_chan_ready(struct sock *sk)
1134 struct sock *parent = bt_sk(sk)->parent;
1136 BT_DBG("sk %p, parent %p", sk, parent);
1138 l2cap_pi(sk)->conf_state = 0;
1139 l2cap_sock_clear_timer(sk);
1142 /* Outgoing channel.
1143 * Wake up socket sleeping on connect.
1145 sk->sk_state = BT_CONNECTED;
1146 sk->sk_state_change(sk);
1148 /* Incoming channel.
1149 * Wake up socket sleeping on accept.
1151 parent->sk_data_ready(parent, 0);
1155 /* Copy frame to all raw sockets on that connection */
1156 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1158 struct l2cap_chan_list *l = &conn->chan_list;
1159 struct sk_buff *nskb;
1162 BT_DBG("conn %p", conn);
1164 read_lock(&l->lock);
1165 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1166 if (sk->sk_type != SOCK_RAW)
1169 /* Don't send frame to the socket it came from */
1173 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1176 if (sock_queue_rcv_skb(sk, nskb))
1179 read_unlock(&l->lock);
1182 /* ---- L2CAP signalling commands ---- */
1183 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1184 u8 code, u8 ident, u16 dlen, void *data)
1186 struct sk_buff *skb, **frag;
1187 struct l2cap_cmd_hdr *cmd;
1188 struct l2cap_hdr *lh;
1191 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1193 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1194 count = min_t(unsigned int, conn->mtu, len);
1196 skb = bt_skb_alloc(count, GFP_ATOMIC);
1200 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1201 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1202 lh->cid = cpu_to_le16(0x0001);
1204 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1207 cmd->len = cpu_to_le16(dlen);
1210 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1211 memcpy(skb_put(skb, count), data, count);
1217 /* Continuation fragments (no L2CAP header) */
1218 frag = &skb_shinfo(skb)->frag_list;
1220 count = min_t(unsigned int, conn->mtu, len);
1222 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1226 memcpy(skb_put(*frag, count), data, count);
1231 frag = &(*frag)->next;
1241 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1243 struct l2cap_conf_opt *opt = *ptr;
1246 len = L2CAP_CONF_OPT_SIZE + opt->len;
1254 *val = *((u8 *) opt->val);
1258 *val = __le16_to_cpu(*((u16 *)opt->val));
1262 *val = __le32_to_cpu(*((u32 *)opt->val));
1266 *val = (unsigned long) opt->val;
1270 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1274 static inline void l2cap_parse_conf_req(struct sock *sk, void *data, int len)
1276 int type, hint, olen;
1280 BT_DBG("sk %p len %d", sk, len);
1282 while (len >= L2CAP_CONF_OPT_SIZE) {
1283 len -= l2cap_get_conf_opt(&ptr, &type, &olen, &val);
1289 case L2CAP_CONF_MTU:
1290 l2cap_pi(sk)->conf_mtu = val;
1293 case L2CAP_CONF_FLUSH_TO:
1294 l2cap_pi(sk)->flush_to = val;
1297 case L2CAP_CONF_QOS:
1304 /* FIXME: Reject unknown option */
1310 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1312 struct l2cap_conf_opt *opt = *ptr;
1314 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1321 *((u8 *) opt->val) = val;
1325 *((u16 *) opt->val) = cpu_to_le16(val);
1329 *((u32 *) opt->val) = cpu_to_le32(val);
1333 memcpy(opt->val, (void *) val, len);
1337 *ptr += L2CAP_CONF_OPT_SIZE + len;
1340 static int l2cap_build_conf_req(struct sock *sk, void *data)
1342 struct l2cap_pinfo *pi = l2cap_pi(sk);
1343 struct l2cap_conf_req *req = data;
1344 void *ptr = req->data;
1346 BT_DBG("sk %p", sk);
1348 if (pi->imtu != L2CAP_DEFAULT_MTU)
1349 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1351 /* FIXME: Need actual value of the flush timeout */
1352 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1353 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1355 req->dcid = cpu_to_le16(pi->dcid);
1356 req->flags = cpu_to_le16(0);
1361 static inline int l2cap_conf_output(struct sock *sk, void **ptr)
1363 struct l2cap_pinfo *pi = l2cap_pi(sk);
1366 /* Configure output options and let the other side know
1367 * which ones we don't like. */
1368 if (pi->conf_mtu < pi->omtu)
1369 result = L2CAP_CONF_UNACCEPT;
1371 pi->omtu = pi->conf_mtu;
1373 l2cap_add_conf_opt(ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1375 BT_DBG("sk %p result %d", sk, result);
1379 static int l2cap_build_conf_rsp(struct sock *sk, void *data, int *result)
1381 struct l2cap_conf_rsp *rsp = data;
1382 void *ptr = rsp->data;
1385 BT_DBG("sk %p complete %d", sk, result ? 1 : 0);
1388 *result = l2cap_conf_output(sk, &ptr);
1392 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1393 rsp->result = cpu_to_le16(result ? *result : 0);
1394 rsp->flags = cpu_to_le16(flags);
1399 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1401 struct l2cap_chan_list *list = &conn->chan_list;
1402 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1403 struct l2cap_conn_rsp rsp;
1404 struct sock *sk, *parent;
1405 int result = 0, status = 0;
1407 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1410 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1412 /* Check if we have socket listening on psm */
1413 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1415 result = L2CAP_CR_BAD_PSM;
1419 result = L2CAP_CR_NO_MEM;
1421 /* Check for backlog size */
1422 if (sk_acceptq_is_full(parent)) {
1423 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1427 sk = l2cap_sock_alloc(NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1431 write_lock_bh(&list->lock);
1433 /* Check if we already have channel with that dcid */
1434 if (__l2cap_get_chan_by_dcid(list, scid)) {
1435 write_unlock_bh(&list->lock);
1436 sock_set_flag(sk, SOCK_ZAPPED);
1437 l2cap_sock_kill(sk);
1441 hci_conn_hold(conn->hcon);
1443 l2cap_sock_init(sk, parent);
1444 bacpy(&bt_sk(sk)->src, conn->src);
1445 bacpy(&bt_sk(sk)->dst, conn->dst);
1446 l2cap_pi(sk)->psm = psm;
1447 l2cap_pi(sk)->dcid = scid;
1449 __l2cap_chan_add(conn, sk, parent);
1450 dcid = l2cap_pi(sk)->scid;
1452 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1454 /* Service level security */
1455 result = L2CAP_CR_PEND;
1456 status = L2CAP_CS_AUTHEN_PEND;
1457 sk->sk_state = BT_CONNECT2;
1458 l2cap_pi(sk)->ident = cmd->ident;
1460 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1461 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1462 if (!hci_conn_encrypt(conn->hcon))
1464 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1465 if (!hci_conn_auth(conn->hcon))
1469 sk->sk_state = BT_CONFIG;
1470 result = status = 0;
1473 write_unlock_bh(&list->lock);
1476 bh_unlock_sock(parent);
1479 rsp.scid = cpu_to_le16(scid);
1480 rsp.dcid = cpu_to_le16(dcid);
1481 rsp.result = cpu_to_le16(result);
1482 rsp.status = cpu_to_le16(status);
1483 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1487 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1489 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1490 u16 scid, dcid, result, status;
1494 scid = __le16_to_cpu(rsp->scid);
1495 dcid = __le16_to_cpu(rsp->dcid);
1496 result = __le16_to_cpu(rsp->result);
1497 status = __le16_to_cpu(rsp->status);
1499 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1502 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1505 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1510 case L2CAP_CR_SUCCESS:
1511 sk->sk_state = BT_CONFIG;
1512 l2cap_pi(sk)->ident = 0;
1513 l2cap_pi(sk)->dcid = dcid;
1514 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1516 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1517 l2cap_build_conf_req(sk, req), req);
1524 l2cap_chan_del(sk, ECONNREFUSED);
1532 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1534 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1540 dcid = __le16_to_cpu(req->dcid);
1541 flags = __le16_to_cpu(req->flags);
1543 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1545 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1548 if (sk->sk_state == BT_DISCONN)
1551 l2cap_parse_conf_req(sk, req->data, cmd->len - sizeof(*req));
1553 if (flags & 0x0001) {
1554 /* Incomplete config. Send empty response. */
1555 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1556 l2cap_build_conf_rsp(sk, rsp, NULL), rsp);
1560 /* Complete config. */
1561 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1562 l2cap_build_conf_rsp(sk, rsp, &result), rsp);
1567 /* Output config done */
1568 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1570 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1571 sk->sk_state = BT_CONNECTED;
1572 l2cap_chan_ready(sk);
1573 } else if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1575 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1576 l2cap_build_conf_req(sk, req), req);
1584 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1586 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1587 u16 scid, flags, result;
1590 scid = __le16_to_cpu(rsp->scid);
1591 flags = __le16_to_cpu(rsp->flags);
1592 result = __le16_to_cpu(rsp->result);
1594 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1596 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1600 case L2CAP_CONF_SUCCESS:
1603 case L2CAP_CONF_UNACCEPT:
1604 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1606 /* It does not make sense to adjust L2CAP parameters
1607 * that are currently defined in the spec. We simply
1608 * resend config request that we sent earlier. It is
1609 * stupid, but it helps qualification testing which
1610 * expects at least some response from us. */
1611 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1612 l2cap_build_conf_req(sk, req), req);
1617 sk->sk_state = BT_DISCONN;
1618 sk->sk_err = ECONNRESET;
1619 l2cap_sock_set_timer(sk, HZ * 5);
1621 struct l2cap_disconn_req req;
1622 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1623 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1624 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1625 L2CAP_DISCONN_REQ, sizeof(req), &req);
1633 /* Input config done */
1634 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1636 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1637 sk->sk_state = BT_CONNECTED;
1638 l2cap_chan_ready(sk);
1646 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1648 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1649 struct l2cap_disconn_rsp rsp;
1653 scid = __le16_to_cpu(req->scid);
1654 dcid = __le16_to_cpu(req->dcid);
1656 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1658 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1661 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1662 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1663 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1665 sk->sk_shutdown = SHUTDOWN_MASK;
1667 l2cap_chan_del(sk, ECONNRESET);
1670 l2cap_sock_kill(sk);
1674 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1676 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1680 scid = __le16_to_cpu(rsp->scid);
1681 dcid = __le16_to_cpu(rsp->dcid);
1683 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1685 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1688 l2cap_chan_del(sk, 0);
1691 l2cap_sock_kill(sk);
1695 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1697 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1698 struct l2cap_info_rsp rsp;
1701 type = __le16_to_cpu(req->type);
1703 BT_DBG("type 0x%4.4x", type);
1705 rsp.type = cpu_to_le16(type);
1706 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1707 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1712 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1714 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1717 type = __le16_to_cpu(rsp->type);
1718 result = __le16_to_cpu(rsp->result);
1720 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1725 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1727 u8 *data = skb->data;
1729 struct l2cap_cmd_hdr cmd;
1732 l2cap_raw_recv(conn, skb);
1734 while (len >= L2CAP_CMD_HDR_SIZE) {
1735 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1736 data += L2CAP_CMD_HDR_SIZE;
1737 len -= L2CAP_CMD_HDR_SIZE;
1739 cmd.len = __le16_to_cpu(cmd.len);
1741 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd.len, cmd.ident);
1743 if (cmd.len > len || !cmd.ident) {
1744 BT_DBG("corrupted command");
1749 case L2CAP_COMMAND_REJ:
1750 /* FIXME: We should process this */
1753 case L2CAP_CONN_REQ:
1754 err = l2cap_connect_req(conn, &cmd, data);
1757 case L2CAP_CONN_RSP:
1758 err = l2cap_connect_rsp(conn, &cmd, data);
1761 case L2CAP_CONF_REQ:
1762 err = l2cap_config_req(conn, &cmd, data);
1765 case L2CAP_CONF_RSP:
1766 err = l2cap_config_rsp(conn, &cmd, data);
1769 case L2CAP_DISCONN_REQ:
1770 err = l2cap_disconnect_req(conn, &cmd, data);
1773 case L2CAP_DISCONN_RSP:
1774 err = l2cap_disconnect_rsp(conn, &cmd, data);
1777 case L2CAP_ECHO_REQ:
1778 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd.len, data);
1781 case L2CAP_ECHO_RSP:
1784 case L2CAP_INFO_REQ:
1785 err = l2cap_information_req(conn, &cmd, data);
1788 case L2CAP_INFO_RSP:
1789 err = l2cap_information_rsp(conn, &cmd, data);
1793 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1799 struct l2cap_cmd_rej rej;
1800 BT_DBG("error %d", err);
1802 /* FIXME: Map err to a valid reason */
1803 rej.reason = cpu_to_le16(0);
1804 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1814 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1818 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1820 BT_DBG("unknown cid 0x%4.4x", cid);
1824 BT_DBG("sk %p, len %d", sk, skb->len);
1826 if (sk->sk_state != BT_CONNECTED)
1829 if (l2cap_pi(sk)->imtu < skb->len)
1832 /* If socket recv buffers overflows we drop data here
1833 * which is *bad* because L2CAP has to be reliable.
1834 * But we don't have any other choice. L2CAP doesn't
1835 * provide flow control mechanism. */
1837 if (!sock_queue_rcv_skb(sk, skb))
1850 static inline int l2cap_conless_channel(struct l2cap_conn *conn, u16 psm, struct sk_buff *skb)
1854 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1858 BT_DBG("sk %p, len %d", sk, skb->len);
1860 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1863 if (l2cap_pi(sk)->imtu < skb->len)
1866 if (!sock_queue_rcv_skb(sk, skb))
1873 if (sk) bh_unlock_sock(sk);
1877 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1879 struct l2cap_hdr *lh = (void *) skb->data;
1882 skb_pull(skb, L2CAP_HDR_SIZE);
1883 cid = __le16_to_cpu(lh->cid);
1884 len = __le16_to_cpu(lh->len);
1886 BT_DBG("len %d, cid 0x%4.4x", len, cid);
1890 l2cap_sig_channel(conn, skb);
1894 psm = get_unaligned((u16 *) skb->data);
1896 l2cap_conless_channel(conn, psm, skb);
1900 l2cap_data_channel(conn, cid, skb);
1905 /* ---- L2CAP interface with lower layer (HCI) ---- */
1907 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1909 int exact = 0, lm1 = 0, lm2 = 0;
1910 register struct sock *sk;
1911 struct hlist_node *node;
1913 if (type != ACL_LINK)
1916 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
1918 /* Find listening sockets and check their link_mode */
1919 read_lock(&l2cap_sk_list.lock);
1920 sk_for_each(sk, node, &l2cap_sk_list.head) {
1921 if (sk->sk_state != BT_LISTEN)
1924 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
1925 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1927 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1928 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1930 read_unlock(&l2cap_sk_list.lock);
1932 return exact ? lm1 : lm2;
1935 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
1937 struct l2cap_conn *conn;
1939 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
1941 if (hcon->type != ACL_LINK)
1945 conn = l2cap_conn_add(hcon, status);
1947 l2cap_conn_ready(conn);
1949 l2cap_conn_del(hcon, bt_err(status));
1954 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
1956 BT_DBG("hcon %p reason %d", hcon, reason);
1958 if (hcon->type != ACL_LINK)
1961 l2cap_conn_del(hcon, bt_err(reason));
1966 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
1968 struct l2cap_chan_list *l;
1969 struct l2cap_conn *conn = conn = hcon->l2cap_data;
1970 struct l2cap_conn_rsp rsp;
1977 l = &conn->chan_list;
1979 BT_DBG("conn %p", conn);
1981 read_lock(&l->lock);
1983 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1986 if (sk->sk_state != BT_CONNECT2 ||
1987 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1988 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1994 sk->sk_state = BT_CONFIG;
1997 sk->sk_state = BT_DISCONN;
1998 l2cap_sock_set_timer(sk, HZ/10);
1999 result = L2CAP_CR_SEC_BLOCK;
2002 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2003 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2004 rsp.result = cpu_to_le16(result);
2005 rsp.status = cpu_to_le16(0);
2006 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2007 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2012 read_unlock(&l->lock);
2016 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
2018 struct l2cap_chan_list *l;
2019 struct l2cap_conn *conn = hcon->l2cap_data;
2020 struct l2cap_conn_rsp rsp;
2027 l = &conn->chan_list;
2029 BT_DBG("conn %p", conn);
2031 read_lock(&l->lock);
2033 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2036 if (sk->sk_state != BT_CONNECT2) {
2042 sk->sk_state = BT_CONFIG;
2045 sk->sk_state = BT_DISCONN;
2046 l2cap_sock_set_timer(sk, HZ/10);
2047 result = L2CAP_CR_SEC_BLOCK;
2050 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2051 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2052 rsp.result = cpu_to_le16(result);
2053 rsp.status = cpu_to_le16(0);
2054 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2055 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2057 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
2058 hci_conn_change_link_key(hcon);
2063 read_unlock(&l->lock);
2067 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2069 struct l2cap_conn *conn = hcon->l2cap_data;
2071 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2074 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2076 if (flags & ACL_START) {
2077 struct l2cap_hdr *hdr;
2081 BT_ERR("Unexpected start frame (len %d)", skb->len);
2082 kfree_skb(conn->rx_skb);
2083 conn->rx_skb = NULL;
2085 l2cap_conn_unreliable(conn, ECOMM);
2089 BT_ERR("Frame is too short (len %d)", skb->len);
2090 l2cap_conn_unreliable(conn, ECOMM);
2094 hdr = (struct l2cap_hdr *) skb->data;
2095 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2097 if (len == skb->len) {
2098 /* Complete frame received */
2099 l2cap_recv_frame(conn, skb);
2103 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2105 if (skb->len > len) {
2106 BT_ERR("Frame is too long (len %d, expected len %d)",
2108 l2cap_conn_unreliable(conn, ECOMM);
2112 /* Allocate skb for the complete frame (with header) */
2113 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2116 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2118 conn->rx_len = len - skb->len;
2120 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2122 if (!conn->rx_len) {
2123 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2124 l2cap_conn_unreliable(conn, ECOMM);
2128 if (skb->len > conn->rx_len) {
2129 BT_ERR("Fragment is too long (len %d, expected %d)",
2130 skb->len, conn->rx_len);
2131 kfree_skb(conn->rx_skb);
2132 conn->rx_skb = NULL;
2134 l2cap_conn_unreliable(conn, ECOMM);
2138 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2140 conn->rx_len -= skb->len;
2142 if (!conn->rx_len) {
2143 /* Complete frame received */
2144 l2cap_recv_frame(conn, conn->rx_skb);
2145 conn->rx_skb = NULL;
2154 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2157 struct hlist_node *node;
2160 read_lock_bh(&l2cap_sk_list.lock);
2162 sk_for_each(sk, node, &l2cap_sk_list.head) {
2163 struct l2cap_pinfo *pi = l2cap_pi(sk);
2165 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2166 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2167 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2168 pi->imtu, pi->omtu, pi->link_mode);
2171 read_unlock_bh(&l2cap_sk_list.lock);
2176 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2178 static const struct proto_ops l2cap_sock_ops = {
2179 .family = PF_BLUETOOTH,
2180 .owner = THIS_MODULE,
2181 .release = l2cap_sock_release,
2182 .bind = l2cap_sock_bind,
2183 .connect = l2cap_sock_connect,
2184 .listen = l2cap_sock_listen,
2185 .accept = l2cap_sock_accept,
2186 .getname = l2cap_sock_getname,
2187 .sendmsg = l2cap_sock_sendmsg,
2188 .recvmsg = bt_sock_recvmsg,
2189 .poll = bt_sock_poll,
2190 .mmap = sock_no_mmap,
2191 .socketpair = sock_no_socketpair,
2192 .ioctl = sock_no_ioctl,
2193 .shutdown = l2cap_sock_shutdown,
2194 .setsockopt = l2cap_sock_setsockopt,
2195 .getsockopt = l2cap_sock_getsockopt
2198 static struct net_proto_family l2cap_sock_family_ops = {
2199 .family = PF_BLUETOOTH,
2200 .owner = THIS_MODULE,
2201 .create = l2cap_sock_create,
2204 static struct hci_proto l2cap_hci_proto = {
2206 .id = HCI_PROTO_L2CAP,
2207 .connect_ind = l2cap_connect_ind,
2208 .connect_cfm = l2cap_connect_cfm,
2209 .disconn_ind = l2cap_disconn_ind,
2210 .auth_cfm = l2cap_auth_cfm,
2211 .encrypt_cfm = l2cap_encrypt_cfm,
2212 .recv_acldata = l2cap_recv_acldata
2215 static int __init l2cap_init(void)
2219 err = proto_register(&l2cap_proto, 0);
2223 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2225 BT_ERR("L2CAP socket registration failed");
2229 err = hci_register_proto(&l2cap_hci_proto);
2231 BT_ERR("L2CAP protocol registration failed");
2232 bt_sock_unregister(BTPROTO_L2CAP);
2236 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2237 BT_ERR("Failed to create L2CAP info file");
2239 BT_INFO("L2CAP ver %s", VERSION);
2240 BT_INFO("L2CAP socket layer initialized");
2245 proto_unregister(&l2cap_proto);
2249 static void __exit l2cap_exit(void)
2251 class_remove_file(bt_class, &class_attr_l2cap);
2253 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2254 BT_ERR("L2CAP socket unregistration failed");
2256 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2257 BT_ERR("L2CAP protocol unregistration failed");
2259 proto_unregister(&l2cap_proto);
2262 void l2cap_load(void)
2264 /* Dummy function to trigger automatic L2CAP module loading by
2265 * other modules that use L2CAP sockets but don't use any other
2266 * symbols from it. */
2269 EXPORT_SYMBOL(l2cap_load);
2271 module_init(l2cap_init);
2272 module_exit(l2cap_exit);
2274 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2275 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2276 MODULE_VERSION(VERSION);
2277 MODULE_LICENSE("GPL");
2278 MODULE_ALIAS("bt-proto-0");