2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #ifndef CONFIG_BT_L2CAP_DEBUG
60 static const struct proto_ops l2cap_sock_ops;
62 static struct bt_sock_list l2cap_sk_list = {
63 .lock = RW_LOCK_UNLOCKED
66 static void __l2cap_sock_close(struct sock *sk, int reason);
67 static void l2cap_sock_close(struct sock *sk);
68 static void l2cap_sock_kill(struct sock *sk);
70 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71 u8 code, u8 ident, u16 dlen, void *data);
73 /* ---- L2CAP timers ---- */
74 static void l2cap_sock_timeout(unsigned long arg)
76 struct sock *sk = (struct sock *) arg;
78 BT_DBG("sock %p state %d", sk, sk->sk_state);
81 __l2cap_sock_close(sk, ETIMEDOUT);
88 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
90 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
91 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
94 static void l2cap_sock_clear_timer(struct sock *sk)
96 BT_DBG("sock %p state %d", sk, sk->sk_state);
97 sk_stop_timer(sk, &sk->sk_timer);
100 static void l2cap_sock_init_timer(struct sock *sk)
102 init_timer(&sk->sk_timer);
103 sk->sk_timer.function = l2cap_sock_timeout;
104 sk->sk_timer.data = (unsigned long)sk;
107 /* ---- L2CAP channels ---- */
108 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
111 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
112 if (l2cap_pi(s)->dcid == cid)
118 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
121 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
122 if (l2cap_pi(s)->scid == cid)
128 /* Find channel with given SCID.
129 * Returns locked socket */
130 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
134 s = __l2cap_get_chan_by_scid(l, cid);
135 if (s) bh_lock_sock(s);
136 read_unlock(&l->lock);
140 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
143 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
144 if (l2cap_pi(s)->ident == ident)
150 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
154 s = __l2cap_get_chan_by_ident(l, ident);
155 if (s) bh_lock_sock(s);
156 read_unlock(&l->lock);
160 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
164 for (; cid < 0xffff; cid++) {
165 if(!__l2cap_get_chan_by_scid(l, cid))
172 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
177 l2cap_pi(l->head)->prev_c = sk;
179 l2cap_pi(sk)->next_c = l->head;
180 l2cap_pi(sk)->prev_c = NULL;
184 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
186 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
188 write_lock_bh(&l->lock);
193 l2cap_pi(next)->prev_c = prev;
195 l2cap_pi(prev)->next_c = next;
196 write_unlock_bh(&l->lock);
201 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
203 struct l2cap_chan_list *l = &conn->chan_list;
205 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
207 l2cap_pi(sk)->conn = conn;
209 if (sk->sk_type == SOCK_SEQPACKET) {
210 /* Alloc CID for connection-oriented socket */
211 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
212 } else if (sk->sk_type == SOCK_DGRAM) {
213 /* Connectionless socket */
214 l2cap_pi(sk)->scid = 0x0002;
215 l2cap_pi(sk)->dcid = 0x0002;
216 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
218 /* Raw socket can send/recv signalling messages only */
219 l2cap_pi(sk)->scid = 0x0001;
220 l2cap_pi(sk)->dcid = 0x0001;
221 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
224 __l2cap_chan_link(l, sk);
227 bt_accept_enqueue(parent, sk);
231 * Must be called on the locked socket. */
232 static void l2cap_chan_del(struct sock *sk, int err)
234 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
235 struct sock *parent = bt_sk(sk)->parent;
237 l2cap_sock_clear_timer(sk);
239 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
242 /* Unlink from channel list */
243 l2cap_chan_unlink(&conn->chan_list, sk);
244 l2cap_pi(sk)->conn = NULL;
245 hci_conn_put(conn->hcon);
248 sk->sk_state = BT_CLOSED;
249 sock_set_flag(sk, SOCK_ZAPPED);
255 bt_accept_unlink(sk);
256 parent->sk_data_ready(parent, 0);
258 sk->sk_state_change(sk);
261 /* ---- L2CAP connections ---- */
262 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
264 struct l2cap_conn *conn = hcon->l2cap_data;
269 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
273 hcon->l2cap_data = conn;
276 BT_DBG("hcon %p conn %p", hcon, conn);
278 conn->mtu = hcon->hdev->acl_mtu;
279 conn->src = &hcon->hdev->bdaddr;
280 conn->dst = &hcon->dst;
282 spin_lock_init(&conn->lock);
283 rwlock_init(&conn->chan_list.lock);
288 static void l2cap_conn_del(struct hci_conn *hcon, int err)
290 struct l2cap_conn *conn = hcon->l2cap_data;
296 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
299 kfree_skb(conn->rx_skb);
302 while ((sk = conn->chan_list.head)) {
304 l2cap_chan_del(sk, err);
309 hcon->l2cap_data = NULL;
313 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
315 struct l2cap_chan_list *l = &conn->chan_list;
316 write_lock_bh(&l->lock);
317 __l2cap_chan_add(conn, sk, parent);
318 write_unlock_bh(&l->lock);
321 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
325 /* Get next available identificator.
326 * 1 - 128 are used by kernel.
327 * 129 - 199 are reserved.
328 * 200 - 254 are used by utilities like l2ping, etc.
331 spin_lock_bh(&conn->lock);
333 if (++conn->tx_ident > 128)
338 spin_unlock_bh(&conn->lock);
343 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
345 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
347 BT_DBG("code 0x%2.2x", code);
352 return hci_send_acl(conn->hcon, skb, 0);
355 /* ---- Socket interface ---- */
356 static struct sock *__l2cap_get_sock_by_addr(u16 psm, bdaddr_t *src)
359 struct hlist_node *node;
360 sk_for_each(sk, node, &l2cap_sk_list.head)
361 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
368 /* Find socket with psm and source bdaddr.
369 * Returns closest match.
371 static struct sock *__l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
373 struct sock *sk = NULL, *sk1 = NULL;
374 struct hlist_node *node;
376 sk_for_each(sk, node, &l2cap_sk_list.head) {
377 if (state && sk->sk_state != state)
380 if (l2cap_pi(sk)->psm == psm) {
382 if (!bacmp(&bt_sk(sk)->src, src))
386 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
390 return node ? sk : sk1;
393 /* Find socket with given address (psm, src).
394 * Returns locked socket */
395 static inline struct sock *l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
398 read_lock(&l2cap_sk_list.lock);
399 s = __l2cap_get_sock_by_psm(state, psm, src);
400 if (s) bh_lock_sock(s);
401 read_unlock(&l2cap_sk_list.lock);
405 static void l2cap_sock_destruct(struct sock *sk)
409 skb_queue_purge(&sk->sk_receive_queue);
410 skb_queue_purge(&sk->sk_write_queue);
413 static void l2cap_sock_cleanup_listen(struct sock *parent)
417 BT_DBG("parent %p", parent);
419 /* Close not yet accepted channels */
420 while ((sk = bt_accept_dequeue(parent, NULL)))
421 l2cap_sock_close(sk);
423 parent->sk_state = BT_CLOSED;
424 sock_set_flag(parent, SOCK_ZAPPED);
427 /* Kill socket (only if zapped and orphan)
428 * Must be called on unlocked socket.
430 static void l2cap_sock_kill(struct sock *sk)
432 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
435 BT_DBG("sk %p state %d", sk, sk->sk_state);
437 /* Kill poor orphan */
438 bt_sock_unlink(&l2cap_sk_list, sk);
439 sock_set_flag(sk, SOCK_DEAD);
443 static void __l2cap_sock_close(struct sock *sk, int reason)
445 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
447 switch (sk->sk_state) {
449 l2cap_sock_cleanup_listen(sk);
455 if (sk->sk_type == SOCK_SEQPACKET) {
456 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
457 struct l2cap_disconn_req req;
459 sk->sk_state = BT_DISCONN;
460 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
462 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
463 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
464 l2cap_send_cmd(conn, l2cap_get_ident(conn),
465 L2CAP_DISCONN_REQ, sizeof(req), &req);
467 l2cap_chan_del(sk, reason);
473 l2cap_chan_del(sk, reason);
477 sock_set_flag(sk, SOCK_ZAPPED);
482 /* Must be called on unlocked socket. */
483 static void l2cap_sock_close(struct sock *sk)
485 l2cap_sock_clear_timer(sk);
487 __l2cap_sock_close(sk, ECONNRESET);
492 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
494 struct l2cap_pinfo *pi = l2cap_pi(sk);
499 sk->sk_type = parent->sk_type;
500 pi->imtu = l2cap_pi(parent)->imtu;
501 pi->omtu = l2cap_pi(parent)->omtu;
502 pi->link_mode = l2cap_pi(parent)->link_mode;
504 pi->imtu = L2CAP_DEFAULT_MTU;
509 /* Default config options */
510 pi->conf_mtu = L2CAP_DEFAULT_MTU;
511 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
514 static struct proto l2cap_proto = {
516 .owner = THIS_MODULE,
517 .obj_size = sizeof(struct l2cap_pinfo)
520 static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, gfp_t prio)
524 sk = sk_alloc(PF_BLUETOOTH, prio, &l2cap_proto, 1);
528 sock_init_data(sock, sk);
529 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
531 sk->sk_destruct = l2cap_sock_destruct;
532 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
534 sock_reset_flag(sk, SOCK_ZAPPED);
536 sk->sk_protocol = proto;
537 sk->sk_state = BT_OPEN;
539 l2cap_sock_init_timer(sk);
541 bt_sock_link(&l2cap_sk_list, sk);
545 static int l2cap_sock_create(struct socket *sock, int protocol)
549 BT_DBG("sock %p", sock);
551 sock->state = SS_UNCONNECTED;
553 if (sock->type != SOCK_SEQPACKET &&
554 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
555 return -ESOCKTNOSUPPORT;
557 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
560 sock->ops = &l2cap_sock_ops;
562 sk = l2cap_sock_alloc(sock, protocol, GFP_ATOMIC);
566 l2cap_sock_init(sk, NULL);
570 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
572 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
573 struct sock *sk = sock->sk;
576 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
578 if (!addr || addr->sa_family != AF_BLUETOOTH)
583 if (sk->sk_state != BT_OPEN) {
588 if (la->l2_psm > 0 && btohs(la->l2_psm) < 0x1001 &&
589 !capable(CAP_NET_BIND_SERVICE)) {
594 write_lock_bh(&l2cap_sk_list.lock);
596 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
599 /* Save source address */
600 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
601 l2cap_pi(sk)->psm = la->l2_psm;
602 l2cap_pi(sk)->sport = la->l2_psm;
603 sk->sk_state = BT_BOUND;
606 write_unlock_bh(&l2cap_sk_list.lock);
613 static int l2cap_do_connect(struct sock *sk)
615 bdaddr_t *src = &bt_sk(sk)->src;
616 bdaddr_t *dst = &bt_sk(sk)->dst;
617 struct l2cap_conn *conn;
618 struct hci_conn *hcon;
619 struct hci_dev *hdev;
622 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
624 if (!(hdev = hci_get_route(dst, src)))
625 return -EHOSTUNREACH;
627 hci_dev_lock_bh(hdev);
631 hcon = hci_connect(hdev, ACL_LINK, dst);
635 conn = l2cap_conn_add(hcon, 0);
643 /* Update source addr of the socket */
644 bacpy(src, conn->src);
646 l2cap_chan_add(conn, sk, NULL);
648 sk->sk_state = BT_CONNECT;
649 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
651 if (hcon->state == BT_CONNECTED) {
652 if (sk->sk_type == SOCK_SEQPACKET) {
653 struct l2cap_conn_req req;
654 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
655 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
656 req.psm = l2cap_pi(sk)->psm;
657 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
658 L2CAP_CONN_REQ, sizeof(req), &req);
660 l2cap_sock_clear_timer(sk);
661 sk->sk_state = BT_CONNECTED;
666 hci_dev_unlock_bh(hdev);
671 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
673 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
674 struct sock *sk = sock->sk;
681 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
686 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
691 switch(sk->sk_state) {
695 /* Already connecting */
699 /* Already connected */
712 /* Set destination address and psm */
713 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
714 l2cap_pi(sk)->psm = la->l2_psm;
716 if ((err = l2cap_do_connect(sk)))
720 err = bt_sock_wait_state(sk, BT_CONNECTED,
721 sock_sndtimeo(sk, flags & O_NONBLOCK));
727 static int l2cap_sock_listen(struct socket *sock, int backlog)
729 struct sock *sk = sock->sk;
732 BT_DBG("sk %p backlog %d", sk, backlog);
736 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
741 if (!l2cap_pi(sk)->psm) {
742 bdaddr_t *src = &bt_sk(sk)->src;
747 write_lock_bh(&l2cap_sk_list.lock);
749 for (psm = 0x1001; psm < 0x1100; psm += 2)
750 if (!__l2cap_get_sock_by_addr(psm, src)) {
751 l2cap_pi(sk)->psm = htobs(psm);
752 l2cap_pi(sk)->sport = htobs(psm);
757 write_unlock_bh(&l2cap_sk_list.lock);
763 sk->sk_max_ack_backlog = backlog;
764 sk->sk_ack_backlog = 0;
765 sk->sk_state = BT_LISTEN;
772 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
774 DECLARE_WAITQUEUE(wait, current);
775 struct sock *sk = sock->sk, *nsk;
779 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
781 if (sk->sk_state != BT_LISTEN) {
786 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
788 BT_DBG("sk %p timeo %ld", sk, timeo);
790 /* Wait for an incoming connection. (wake-one). */
791 add_wait_queue_exclusive(sk->sk_sleep, &wait);
792 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
793 set_current_state(TASK_INTERRUPTIBLE);
800 timeo = schedule_timeout(timeo);
801 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
803 if (sk->sk_state != BT_LISTEN) {
808 if (signal_pending(current)) {
809 err = sock_intr_errno(timeo);
813 set_current_state(TASK_RUNNING);
814 remove_wait_queue(sk->sk_sleep, &wait);
819 newsock->state = SS_CONNECTED;
821 BT_DBG("new socket %p", nsk);
828 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
830 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
831 struct sock *sk = sock->sk;
833 BT_DBG("sock %p, sk %p", sock, sk);
835 addr->sa_family = AF_BLUETOOTH;
836 *len = sizeof(struct sockaddr_l2);
839 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
841 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
843 la->l2_psm = l2cap_pi(sk)->psm;
847 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
849 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
850 struct sk_buff *skb, **frag;
851 int err, hlen, count, sent=0;
852 struct l2cap_hdr *lh;
854 BT_DBG("sk %p len %d", sk, len);
856 /* First fragment (with L2CAP header) */
857 if (sk->sk_type == SOCK_DGRAM)
858 hlen = L2CAP_HDR_SIZE + 2;
860 hlen = L2CAP_HDR_SIZE;
862 count = min_t(unsigned int, (conn->mtu - hlen), len);
864 skb = bt_skb_send_alloc(sk, hlen + count,
865 msg->msg_flags & MSG_DONTWAIT, &err);
869 /* Create L2CAP header */
870 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
871 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
872 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
874 if (sk->sk_type == SOCK_DGRAM)
875 put_unaligned(l2cap_pi(sk)->psm, (u16 *) skb_put(skb, 2));
877 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
885 /* Continuation fragments (no L2CAP header) */
886 frag = &skb_shinfo(skb)->frag_list;
888 count = min_t(unsigned int, conn->mtu, len);
890 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
894 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
902 frag = &(*frag)->next;
905 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
915 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
917 struct sock *sk = sock->sk;
920 BT_DBG("sock %p, sk %p", sock, sk);
922 err = sock_error(sk);
926 if (msg->msg_flags & MSG_OOB)
929 /* Check outgoing MTU */
930 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
935 if (sk->sk_state == BT_CONNECTED)
936 err = l2cap_do_send(sk, msg, len);
944 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
946 struct sock *sk = sock->sk;
947 struct l2cap_options opts;
957 len = min_t(unsigned int, sizeof(opts), optlen);
958 if (copy_from_user((char *) &opts, optval, len)) {
962 l2cap_pi(sk)->imtu = opts.imtu;
963 l2cap_pi(sk)->omtu = opts.omtu;
967 if (get_user(opt, (u32 __user *) optval)) {
972 l2cap_pi(sk)->link_mode = opt;
984 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
986 struct sock *sk = sock->sk;
987 struct l2cap_options opts;
988 struct l2cap_conninfo cinfo;
993 if (get_user(len, optlen))
1000 opts.imtu = l2cap_pi(sk)->imtu;
1001 opts.omtu = l2cap_pi(sk)->omtu;
1002 opts.flush_to = l2cap_pi(sk)->flush_to;
1005 len = min_t(unsigned int, len, sizeof(opts));
1006 if (copy_to_user(optval, (char *) &opts, len))
1012 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1016 case L2CAP_CONNINFO:
1017 if (sk->sk_state != BT_CONNECTED) {
1022 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1023 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1025 len = min_t(unsigned int, len, sizeof(cinfo));
1026 if (copy_to_user(optval, (char *) &cinfo, len))
1040 static int l2cap_sock_shutdown(struct socket *sock, int how)
1042 struct sock *sk = sock->sk;
1045 BT_DBG("sock %p, sk %p", sock, sk);
1051 if (!sk->sk_shutdown) {
1052 sk->sk_shutdown = SHUTDOWN_MASK;
1053 l2cap_sock_clear_timer(sk);
1054 __l2cap_sock_close(sk, 0);
1056 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1057 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
1063 static int l2cap_sock_release(struct socket *sock)
1065 struct sock *sk = sock->sk;
1068 BT_DBG("sock %p, sk %p", sock, sk);
1073 err = l2cap_sock_shutdown(sock, 2);
1076 l2cap_sock_kill(sk);
1080 static void l2cap_conn_ready(struct l2cap_conn *conn)
1082 struct l2cap_chan_list *l = &conn->chan_list;
1085 BT_DBG("conn %p", conn);
1087 read_lock(&l->lock);
1089 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1092 if (sk->sk_type != SOCK_SEQPACKET) {
1093 l2cap_sock_clear_timer(sk);
1094 sk->sk_state = BT_CONNECTED;
1095 sk->sk_state_change(sk);
1096 } else if (sk->sk_state == BT_CONNECT) {
1097 struct l2cap_conn_req req;
1098 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
1099 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1100 req.psm = l2cap_pi(sk)->psm;
1101 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1107 read_unlock(&l->lock);
1110 /* Notify sockets that we cannot guaranty reliability anymore */
1111 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1113 struct l2cap_chan_list *l = &conn->chan_list;
1116 BT_DBG("conn %p", conn);
1118 read_lock(&l->lock);
1119 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1120 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
1123 read_unlock(&l->lock);
1126 static void l2cap_chan_ready(struct sock *sk)
1128 struct sock *parent = bt_sk(sk)->parent;
1130 BT_DBG("sk %p, parent %p", sk, parent);
1132 l2cap_pi(sk)->conf_state = 0;
1133 l2cap_sock_clear_timer(sk);
1136 /* Outgoing channel.
1137 * Wake up socket sleeping on connect.
1139 sk->sk_state = BT_CONNECTED;
1140 sk->sk_state_change(sk);
1142 /* Incoming channel.
1143 * Wake up socket sleeping on accept.
1145 parent->sk_data_ready(parent, 0);
1149 /* Copy frame to all raw sockets on that connection */
1150 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1152 struct l2cap_chan_list *l = &conn->chan_list;
1153 struct sk_buff *nskb;
1156 BT_DBG("conn %p", conn);
1158 read_lock(&l->lock);
1159 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1160 if (sk->sk_type != SOCK_RAW)
1163 /* Don't send frame to the socket it came from */
1167 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1170 if (sock_queue_rcv_skb(sk, nskb))
1173 read_unlock(&l->lock);
1176 /* ---- L2CAP signalling commands ---- */
1177 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1178 u8 code, u8 ident, u16 dlen, void *data)
1180 struct sk_buff *skb, **frag;
1181 struct l2cap_cmd_hdr *cmd;
1182 struct l2cap_hdr *lh;
1185 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1187 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1188 count = min_t(unsigned int, conn->mtu, len);
1190 skb = bt_skb_alloc(count, GFP_ATOMIC);
1194 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1195 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1196 lh->cid = cpu_to_le16(0x0001);
1198 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1201 cmd->len = cpu_to_le16(dlen);
1204 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1205 memcpy(skb_put(skb, count), data, count);
1211 /* Continuation fragments (no L2CAP header) */
1212 frag = &skb_shinfo(skb)->frag_list;
1214 count = min_t(unsigned int, conn->mtu, len);
1216 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1220 memcpy(skb_put(*frag, count), data, count);
1225 frag = &(*frag)->next;
1235 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1237 struct l2cap_conf_opt *opt = *ptr;
1240 len = L2CAP_CONF_OPT_SIZE + opt->len;
1248 *val = *((u8 *) opt->val);
1252 *val = __le16_to_cpu(*((u16 *)opt->val));
1256 *val = __le32_to_cpu(*((u32 *)opt->val));
1260 *val = (unsigned long) opt->val;
1264 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1268 static inline void l2cap_parse_conf_req(struct sock *sk, void *data, int len)
1270 int type, hint, olen;
1274 BT_DBG("sk %p len %d", sk, len);
1276 while (len >= L2CAP_CONF_OPT_SIZE) {
1277 len -= l2cap_get_conf_opt(&ptr, &type, &olen, &val);
1283 case L2CAP_CONF_MTU:
1284 l2cap_pi(sk)->conf_mtu = val;
1287 case L2CAP_CONF_FLUSH_TO:
1288 l2cap_pi(sk)->flush_to = val;
1291 case L2CAP_CONF_QOS:
1298 /* FIXME: Reject unknown option */
1304 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1306 struct l2cap_conf_opt *opt = *ptr;
1308 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1315 *((u8 *) opt->val) = val;
1319 *((u16 *) opt->val) = cpu_to_le16(val);
1323 *((u32 *) opt->val) = cpu_to_le32(val);
1327 memcpy(opt->val, (void *) val, len);
1331 *ptr += L2CAP_CONF_OPT_SIZE + len;
1334 static int l2cap_build_conf_req(struct sock *sk, void *data)
1336 struct l2cap_pinfo *pi = l2cap_pi(sk);
1337 struct l2cap_conf_req *req = data;
1338 void *ptr = req->data;
1340 BT_DBG("sk %p", sk);
1342 if (pi->imtu != L2CAP_DEFAULT_MTU)
1343 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1345 /* FIXME: Need actual value of the flush timeout */
1346 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1347 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1349 req->dcid = cpu_to_le16(pi->dcid);
1350 req->flags = cpu_to_le16(0);
1355 static inline int l2cap_conf_output(struct sock *sk, void **ptr)
1357 struct l2cap_pinfo *pi = l2cap_pi(sk);
1360 /* Configure output options and let the other side know
1361 * which ones we don't like. */
1362 if (pi->conf_mtu < pi->omtu)
1363 result = L2CAP_CONF_UNACCEPT;
1365 pi->omtu = pi->conf_mtu;
1367 l2cap_add_conf_opt(ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1369 BT_DBG("sk %p result %d", sk, result);
1373 static int l2cap_build_conf_rsp(struct sock *sk, void *data, int *result)
1375 struct l2cap_conf_rsp *rsp = data;
1376 void *ptr = rsp->data;
1379 BT_DBG("sk %p complete %d", sk, result ? 1 : 0);
1382 *result = l2cap_conf_output(sk, &ptr);
1386 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1387 rsp->result = cpu_to_le16(result ? *result : 0);
1388 rsp->flags = cpu_to_le16(flags);
1393 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1395 struct l2cap_chan_list *list = &conn->chan_list;
1396 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1397 struct l2cap_conn_rsp rsp;
1398 struct sock *sk, *parent;
1399 int result = 0, status = 0;
1401 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1404 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1406 /* Check if we have socket listening on psm */
1407 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1409 result = L2CAP_CR_BAD_PSM;
1413 result = L2CAP_CR_NO_MEM;
1415 /* Check for backlog size */
1416 if (sk_acceptq_is_full(parent)) {
1417 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1421 sk = l2cap_sock_alloc(NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1425 write_lock_bh(&list->lock);
1427 /* Check if we already have channel with that dcid */
1428 if (__l2cap_get_chan_by_dcid(list, scid)) {
1429 write_unlock_bh(&list->lock);
1430 sock_set_flag(sk, SOCK_ZAPPED);
1431 l2cap_sock_kill(sk);
1435 hci_conn_hold(conn->hcon);
1437 l2cap_sock_init(sk, parent);
1438 bacpy(&bt_sk(sk)->src, conn->src);
1439 bacpy(&bt_sk(sk)->dst, conn->dst);
1440 l2cap_pi(sk)->psm = psm;
1441 l2cap_pi(sk)->dcid = scid;
1443 __l2cap_chan_add(conn, sk, parent);
1444 dcid = l2cap_pi(sk)->scid;
1446 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1448 /* Service level security */
1449 result = L2CAP_CR_PEND;
1450 status = L2CAP_CS_AUTHEN_PEND;
1451 sk->sk_state = BT_CONNECT2;
1452 l2cap_pi(sk)->ident = cmd->ident;
1454 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1455 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1456 if (!hci_conn_encrypt(conn->hcon))
1458 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1459 if (!hci_conn_auth(conn->hcon))
1463 sk->sk_state = BT_CONFIG;
1464 result = status = 0;
1467 write_unlock_bh(&list->lock);
1470 bh_unlock_sock(parent);
1473 rsp.scid = cpu_to_le16(scid);
1474 rsp.dcid = cpu_to_le16(dcid);
1475 rsp.result = cpu_to_le16(result);
1476 rsp.status = cpu_to_le16(status);
1477 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1481 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1483 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1484 u16 scid, dcid, result, status;
1488 scid = __le16_to_cpu(rsp->scid);
1489 dcid = __le16_to_cpu(rsp->dcid);
1490 result = __le16_to_cpu(rsp->result);
1491 status = __le16_to_cpu(rsp->status);
1493 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1496 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1499 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1504 case L2CAP_CR_SUCCESS:
1505 sk->sk_state = BT_CONFIG;
1506 l2cap_pi(sk)->ident = 0;
1507 l2cap_pi(sk)->dcid = dcid;
1508 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1510 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1511 l2cap_build_conf_req(sk, req), req);
1518 l2cap_chan_del(sk, ECONNREFUSED);
1526 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1528 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1534 dcid = __le16_to_cpu(req->dcid);
1535 flags = __le16_to_cpu(req->flags);
1537 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1539 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1542 if (sk->sk_state == BT_DISCONN)
1545 l2cap_parse_conf_req(sk, req->data, cmd->len - sizeof(*req));
1547 if (flags & 0x0001) {
1548 /* Incomplete config. Send empty response. */
1549 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1550 l2cap_build_conf_rsp(sk, rsp, NULL), rsp);
1554 /* Complete config. */
1555 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1556 l2cap_build_conf_rsp(sk, rsp, &result), rsp);
1561 /* Output config done */
1562 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1564 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1565 sk->sk_state = BT_CONNECTED;
1566 l2cap_chan_ready(sk);
1567 } else if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1569 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1570 l2cap_build_conf_req(sk, req), req);
1578 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1580 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1581 u16 scid, flags, result;
1584 scid = __le16_to_cpu(rsp->scid);
1585 flags = __le16_to_cpu(rsp->flags);
1586 result = __le16_to_cpu(rsp->result);
1588 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1590 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1594 case L2CAP_CONF_SUCCESS:
1597 case L2CAP_CONF_UNACCEPT:
1598 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1600 /* It does not make sense to adjust L2CAP parameters
1601 * that are currently defined in the spec. We simply
1602 * resend config request that we sent earlier. It is
1603 * stupid, but it helps qualification testing which
1604 * expects at least some response from us. */
1605 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1606 l2cap_build_conf_req(sk, req), req);
1611 sk->sk_state = BT_DISCONN;
1612 sk->sk_err = ECONNRESET;
1613 l2cap_sock_set_timer(sk, HZ * 5);
1615 struct l2cap_disconn_req req;
1616 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1617 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1618 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1619 L2CAP_DISCONN_REQ, sizeof(req), &req);
1627 /* Input config done */
1628 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1630 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1631 sk->sk_state = BT_CONNECTED;
1632 l2cap_chan_ready(sk);
1640 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1642 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1643 struct l2cap_disconn_rsp rsp;
1647 scid = __le16_to_cpu(req->scid);
1648 dcid = __le16_to_cpu(req->dcid);
1650 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1652 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1655 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1656 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1657 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1659 sk->sk_shutdown = SHUTDOWN_MASK;
1661 l2cap_chan_del(sk, ECONNRESET);
1664 l2cap_sock_kill(sk);
1668 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1670 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1674 scid = __le16_to_cpu(rsp->scid);
1675 dcid = __le16_to_cpu(rsp->dcid);
1677 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1679 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1682 l2cap_chan_del(sk, 0);
1685 l2cap_sock_kill(sk);
1689 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1691 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1692 struct l2cap_info_rsp rsp;
1695 type = __le16_to_cpu(req->type);
1697 BT_DBG("type 0x%4.4x", type);
1699 rsp.type = cpu_to_le16(type);
1700 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1701 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1706 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1708 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1711 type = __le16_to_cpu(rsp->type);
1712 result = __le16_to_cpu(rsp->result);
1714 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1719 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1721 u8 *data = skb->data;
1723 struct l2cap_cmd_hdr cmd;
1726 l2cap_raw_recv(conn, skb);
1728 while (len >= L2CAP_CMD_HDR_SIZE) {
1729 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1730 data += L2CAP_CMD_HDR_SIZE;
1731 len -= L2CAP_CMD_HDR_SIZE;
1733 cmd.len = __le16_to_cpu(cmd.len);
1735 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd.len, cmd.ident);
1737 if (cmd.len > len || !cmd.ident) {
1738 BT_DBG("corrupted command");
1743 case L2CAP_COMMAND_REJ:
1744 /* FIXME: We should process this */
1747 case L2CAP_CONN_REQ:
1748 err = l2cap_connect_req(conn, &cmd, data);
1751 case L2CAP_CONN_RSP:
1752 err = l2cap_connect_rsp(conn, &cmd, data);
1755 case L2CAP_CONF_REQ:
1756 err = l2cap_config_req(conn, &cmd, data);
1759 case L2CAP_CONF_RSP:
1760 err = l2cap_config_rsp(conn, &cmd, data);
1763 case L2CAP_DISCONN_REQ:
1764 err = l2cap_disconnect_req(conn, &cmd, data);
1767 case L2CAP_DISCONN_RSP:
1768 err = l2cap_disconnect_rsp(conn, &cmd, data);
1771 case L2CAP_ECHO_REQ:
1772 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd.len, data);
1775 case L2CAP_ECHO_RSP:
1778 case L2CAP_INFO_REQ:
1779 err = l2cap_information_req(conn, &cmd, data);
1782 case L2CAP_INFO_RSP:
1783 err = l2cap_information_rsp(conn, &cmd, data);
1787 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1793 struct l2cap_cmd_rej rej;
1794 BT_DBG("error %d", err);
1796 /* FIXME: Map err to a valid reason */
1797 rej.reason = cpu_to_le16(0);
1798 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1808 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1812 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1814 BT_DBG("unknown cid 0x%4.4x", cid);
1818 BT_DBG("sk %p, len %d", sk, skb->len);
1820 if (sk->sk_state != BT_CONNECTED)
1823 if (l2cap_pi(sk)->imtu < skb->len)
1826 /* If socket recv buffers overflows we drop data here
1827 * which is *bad* because L2CAP has to be reliable.
1828 * But we don't have any other choice. L2CAP doesn't
1829 * provide flow control mechanism. */
1831 if (!sock_queue_rcv_skb(sk, skb))
1844 static inline int l2cap_conless_channel(struct l2cap_conn *conn, u16 psm, struct sk_buff *skb)
1848 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1852 BT_DBG("sk %p, len %d", sk, skb->len);
1854 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1857 if (l2cap_pi(sk)->imtu < skb->len)
1860 if (!sock_queue_rcv_skb(sk, skb))
1867 if (sk) bh_unlock_sock(sk);
1871 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1873 struct l2cap_hdr *lh = (void *) skb->data;
1876 skb_pull(skb, L2CAP_HDR_SIZE);
1877 cid = __le16_to_cpu(lh->cid);
1878 len = __le16_to_cpu(lh->len);
1880 BT_DBG("len %d, cid 0x%4.4x", len, cid);
1884 l2cap_sig_channel(conn, skb);
1888 psm = get_unaligned((u16 *) skb->data);
1890 l2cap_conless_channel(conn, psm, skb);
1894 l2cap_data_channel(conn, cid, skb);
1899 /* ---- L2CAP interface with lower layer (HCI) ---- */
1901 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1903 int exact = 0, lm1 = 0, lm2 = 0;
1904 register struct sock *sk;
1905 struct hlist_node *node;
1907 if (type != ACL_LINK)
1910 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
1912 /* Find listening sockets and check their link_mode */
1913 read_lock(&l2cap_sk_list.lock);
1914 sk_for_each(sk, node, &l2cap_sk_list.head) {
1915 if (sk->sk_state != BT_LISTEN)
1918 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
1919 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1921 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1922 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1924 read_unlock(&l2cap_sk_list.lock);
1926 return exact ? lm1 : lm2;
1929 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
1931 struct l2cap_conn *conn;
1933 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
1935 if (hcon->type != ACL_LINK)
1939 conn = l2cap_conn_add(hcon, status);
1941 l2cap_conn_ready(conn);
1943 l2cap_conn_del(hcon, bt_err(status));
1948 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
1950 BT_DBG("hcon %p reason %d", hcon, reason);
1952 if (hcon->type != ACL_LINK)
1955 l2cap_conn_del(hcon, bt_err(reason));
1960 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
1962 struct l2cap_chan_list *l;
1963 struct l2cap_conn *conn = conn = hcon->l2cap_data;
1964 struct l2cap_conn_rsp rsp;
1971 l = &conn->chan_list;
1973 BT_DBG("conn %p", conn);
1975 read_lock(&l->lock);
1977 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1980 if (sk->sk_state != BT_CONNECT2 ||
1981 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1982 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1988 sk->sk_state = BT_CONFIG;
1991 sk->sk_state = BT_DISCONN;
1992 l2cap_sock_set_timer(sk, HZ/10);
1993 result = L2CAP_CR_SEC_BLOCK;
1996 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1997 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1998 rsp.result = cpu_to_le16(result);
1999 rsp.status = cpu_to_le16(0);
2000 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2001 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2006 read_unlock(&l->lock);
2010 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
2012 struct l2cap_chan_list *l;
2013 struct l2cap_conn *conn = hcon->l2cap_data;
2014 struct l2cap_conn_rsp rsp;
2021 l = &conn->chan_list;
2023 BT_DBG("conn %p", conn);
2025 read_lock(&l->lock);
2027 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2030 if (sk->sk_state != BT_CONNECT2) {
2036 sk->sk_state = BT_CONFIG;
2039 sk->sk_state = BT_DISCONN;
2040 l2cap_sock_set_timer(sk, HZ/10);
2041 result = L2CAP_CR_SEC_BLOCK;
2044 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2045 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2046 rsp.result = cpu_to_le16(result);
2047 rsp.status = cpu_to_le16(0);
2048 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2049 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2051 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
2052 hci_conn_change_link_key(hcon);
2057 read_unlock(&l->lock);
2061 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2063 struct l2cap_conn *conn = hcon->l2cap_data;
2065 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2068 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2070 if (flags & ACL_START) {
2071 struct l2cap_hdr *hdr;
2075 BT_ERR("Unexpected start frame (len %d)", skb->len);
2076 kfree_skb(conn->rx_skb);
2077 conn->rx_skb = NULL;
2079 l2cap_conn_unreliable(conn, ECOMM);
2083 BT_ERR("Frame is too short (len %d)", skb->len);
2084 l2cap_conn_unreliable(conn, ECOMM);
2088 hdr = (struct l2cap_hdr *) skb->data;
2089 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2091 if (len == skb->len) {
2092 /* Complete frame received */
2093 l2cap_recv_frame(conn, skb);
2097 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2099 if (skb->len > len) {
2100 BT_ERR("Frame is too long (len %d, expected len %d)",
2102 l2cap_conn_unreliable(conn, ECOMM);
2106 /* Allocate skb for the complete frame (with header) */
2107 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2110 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2112 conn->rx_len = len - skb->len;
2114 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2116 if (!conn->rx_len) {
2117 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2118 l2cap_conn_unreliable(conn, ECOMM);
2122 if (skb->len > conn->rx_len) {
2123 BT_ERR("Fragment is too long (len %d, expected %d)",
2124 skb->len, conn->rx_len);
2125 kfree_skb(conn->rx_skb);
2126 conn->rx_skb = NULL;
2128 l2cap_conn_unreliable(conn, ECOMM);
2132 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2134 conn->rx_len -= skb->len;
2136 if (!conn->rx_len) {
2137 /* Complete frame received */
2138 l2cap_recv_frame(conn, conn->rx_skb);
2139 conn->rx_skb = NULL;
2148 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2151 struct hlist_node *node;
2154 read_lock_bh(&l2cap_sk_list.lock);
2156 sk_for_each(sk, node, &l2cap_sk_list.head) {
2157 struct l2cap_pinfo *pi = l2cap_pi(sk);
2159 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2160 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2161 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2162 pi->imtu, pi->omtu, pi->link_mode);
2165 read_unlock_bh(&l2cap_sk_list.lock);
2170 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2172 static const struct proto_ops l2cap_sock_ops = {
2173 .family = PF_BLUETOOTH,
2174 .owner = THIS_MODULE,
2175 .release = l2cap_sock_release,
2176 .bind = l2cap_sock_bind,
2177 .connect = l2cap_sock_connect,
2178 .listen = l2cap_sock_listen,
2179 .accept = l2cap_sock_accept,
2180 .getname = l2cap_sock_getname,
2181 .sendmsg = l2cap_sock_sendmsg,
2182 .recvmsg = bt_sock_recvmsg,
2183 .poll = bt_sock_poll,
2184 .mmap = sock_no_mmap,
2185 .socketpair = sock_no_socketpair,
2186 .ioctl = sock_no_ioctl,
2187 .shutdown = l2cap_sock_shutdown,
2188 .setsockopt = l2cap_sock_setsockopt,
2189 .getsockopt = l2cap_sock_getsockopt
2192 static struct net_proto_family l2cap_sock_family_ops = {
2193 .family = PF_BLUETOOTH,
2194 .owner = THIS_MODULE,
2195 .create = l2cap_sock_create,
2198 static struct hci_proto l2cap_hci_proto = {
2200 .id = HCI_PROTO_L2CAP,
2201 .connect_ind = l2cap_connect_ind,
2202 .connect_cfm = l2cap_connect_cfm,
2203 .disconn_ind = l2cap_disconn_ind,
2204 .auth_cfm = l2cap_auth_cfm,
2205 .encrypt_cfm = l2cap_encrypt_cfm,
2206 .recv_acldata = l2cap_recv_acldata
2209 static int __init l2cap_init(void)
2213 err = proto_register(&l2cap_proto, 0);
2217 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2219 BT_ERR("L2CAP socket registration failed");
2223 err = hci_register_proto(&l2cap_hci_proto);
2225 BT_ERR("L2CAP protocol registration failed");
2226 bt_sock_unregister(BTPROTO_L2CAP);
2230 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2231 BT_ERR("Failed to create L2CAP info file");
2233 BT_INFO("L2CAP ver %s", VERSION);
2234 BT_INFO("L2CAP socket layer initialized");
2239 proto_unregister(&l2cap_proto);
2243 static void __exit l2cap_exit(void)
2245 class_remove_file(bt_class, &class_attr_l2cap);
2247 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2248 BT_ERR("L2CAP socket unregistration failed");
2250 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2251 BT_ERR("L2CAP protocol unregistration failed");
2253 proto_unregister(&l2cap_proto);
2256 void l2cap_load(void)
2258 /* Dummy function to trigger automatic L2CAP module loading by
2259 * other modules that use L2CAP sockets but don't use any other
2260 * symbols from it. */
2263 EXPORT_SYMBOL(l2cap_load);
2265 module_init(l2cap_init);
2266 module_exit(l2cap_exit);
2268 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2269 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2270 MODULE_VERSION(VERSION);
2271 MODULE_LICENSE("GPL");
2272 MODULE_ALIAS("bt-proto-0");