2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #ifndef CONFIG_BT_L2CAP_DEBUG
60 static const struct proto_ops l2cap_sock_ops;
62 static struct bt_sock_list l2cap_sk_list = {
63 .lock = RW_LOCK_UNLOCKED
66 static int l2cap_conn_del(struct hci_conn *conn, int err);
68 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent);
69 static void l2cap_chan_del(struct sock *sk, int err);
71 static void __l2cap_sock_close(struct sock *sk, int reason);
72 static void l2cap_sock_close(struct sock *sk);
73 static void l2cap_sock_kill(struct sock *sk);
75 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
76 u8 code, u8 ident, u16 dlen, void *data);
78 /* ---- L2CAP timers ---- */
79 static void l2cap_sock_timeout(unsigned long arg)
81 struct sock *sk = (struct sock *) arg;
83 BT_DBG("sock %p state %d", sk, sk->sk_state);
86 __l2cap_sock_close(sk, ETIMEDOUT);
93 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
95 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
96 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
99 static void l2cap_sock_clear_timer(struct sock *sk)
101 BT_DBG("sock %p state %d", sk, sk->sk_state);
102 sk_stop_timer(sk, &sk->sk_timer);
105 static void l2cap_sock_init_timer(struct sock *sk)
107 init_timer(&sk->sk_timer);
108 sk->sk_timer.function = l2cap_sock_timeout;
109 sk->sk_timer.data = (unsigned long)sk;
112 /* ---- L2CAP connections ---- */
113 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
115 struct l2cap_conn *conn;
117 if ((conn = hcon->l2cap_data))
123 if (!(conn = kmalloc(sizeof(struct l2cap_conn), GFP_ATOMIC)))
125 memset(conn, 0, sizeof(struct l2cap_conn));
127 hcon->l2cap_data = conn;
130 conn->mtu = hcon->hdev->acl_mtu;
131 conn->src = &hcon->hdev->bdaddr;
132 conn->dst = &hcon->dst;
134 spin_lock_init(&conn->lock);
135 rwlock_init(&conn->chan_list.lock);
137 BT_DBG("hcon %p conn %p", hcon, conn);
141 static int l2cap_conn_del(struct hci_conn *hcon, int err)
143 struct l2cap_conn *conn;
146 if (!(conn = hcon->l2cap_data))
149 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
152 kfree_skb(conn->rx_skb);
155 while ((sk = conn->chan_list.head)) {
157 l2cap_chan_del(sk, err);
162 hcon->l2cap_data = NULL;
167 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
169 struct l2cap_chan_list *l = &conn->chan_list;
170 write_lock(&l->lock);
171 __l2cap_chan_add(conn, sk, parent);
172 write_unlock(&l->lock);
175 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
179 /* Get next available identificator.
180 * 1 - 128 are used by kernel.
181 * 129 - 199 are reserved.
182 * 200 - 254 are used by utilities like l2ping, etc.
185 spin_lock(&conn->lock);
187 if (++conn->tx_ident > 128)
192 spin_unlock(&conn->lock);
197 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
199 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
201 BT_DBG("code 0x%2.2x", code);
206 return hci_send_acl(conn->hcon, skb, 0);
209 /* ---- Socket interface ---- */
210 static struct sock *__l2cap_get_sock_by_addr(u16 psm, bdaddr_t *src)
213 struct hlist_node *node;
214 sk_for_each(sk, node, &l2cap_sk_list.head)
215 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
222 /* Find socket with psm and source bdaddr.
223 * Returns closest match.
225 static struct sock *__l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
227 struct sock *sk = NULL, *sk1 = NULL;
228 struct hlist_node *node;
230 sk_for_each(sk, node, &l2cap_sk_list.head) {
231 if (state && sk->sk_state != state)
234 if (l2cap_pi(sk)->psm == psm) {
236 if (!bacmp(&bt_sk(sk)->src, src))
240 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
244 return node ? sk : sk1;
247 /* Find socket with given address (psm, src).
248 * Returns locked socket */
249 static inline struct sock *l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
252 read_lock(&l2cap_sk_list.lock);
253 s = __l2cap_get_sock_by_psm(state, psm, src);
254 if (s) bh_lock_sock(s);
255 read_unlock(&l2cap_sk_list.lock);
259 static void l2cap_sock_destruct(struct sock *sk)
263 skb_queue_purge(&sk->sk_receive_queue);
264 skb_queue_purge(&sk->sk_write_queue);
267 static void l2cap_sock_cleanup_listen(struct sock *parent)
271 BT_DBG("parent %p", parent);
273 /* Close not yet accepted channels */
274 while ((sk = bt_accept_dequeue(parent, NULL)))
275 l2cap_sock_close(sk);
277 parent->sk_state = BT_CLOSED;
278 sock_set_flag(parent, SOCK_ZAPPED);
281 /* Kill socket (only if zapped and orphan)
282 * Must be called on unlocked socket.
284 static void l2cap_sock_kill(struct sock *sk)
286 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
289 BT_DBG("sk %p state %d", sk, sk->sk_state);
291 /* Kill poor orphan */
292 bt_sock_unlink(&l2cap_sk_list, sk);
293 sock_set_flag(sk, SOCK_DEAD);
297 static void __l2cap_sock_close(struct sock *sk, int reason)
299 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
301 switch (sk->sk_state) {
303 l2cap_sock_cleanup_listen(sk);
309 if (sk->sk_type == SOCK_SEQPACKET) {
310 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
311 struct l2cap_disconn_req req;
313 sk->sk_state = BT_DISCONN;
314 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
316 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
317 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
318 l2cap_send_cmd(conn, l2cap_get_ident(conn),
319 L2CAP_DISCONN_REQ, sizeof(req), &req);
321 l2cap_chan_del(sk, reason);
327 l2cap_chan_del(sk, reason);
331 sock_set_flag(sk, SOCK_ZAPPED);
336 /* Must be called on unlocked socket. */
337 static void l2cap_sock_close(struct sock *sk)
339 l2cap_sock_clear_timer(sk);
341 __l2cap_sock_close(sk, ECONNRESET);
346 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
348 struct l2cap_pinfo *pi = l2cap_pi(sk);
353 sk->sk_type = parent->sk_type;
354 pi->imtu = l2cap_pi(parent)->imtu;
355 pi->omtu = l2cap_pi(parent)->omtu;
356 pi->link_mode = l2cap_pi(parent)->link_mode;
358 pi->imtu = L2CAP_DEFAULT_MTU;
363 /* Default config options */
364 pi->conf_mtu = L2CAP_DEFAULT_MTU;
365 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
368 static struct proto l2cap_proto = {
370 .owner = THIS_MODULE,
371 .obj_size = sizeof(struct l2cap_pinfo)
374 static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, gfp_t prio)
378 sk = sk_alloc(PF_BLUETOOTH, prio, &l2cap_proto, 1);
382 sock_init_data(sock, sk);
383 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
385 sk->sk_destruct = l2cap_sock_destruct;
386 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
388 sock_reset_flag(sk, SOCK_ZAPPED);
390 sk->sk_protocol = proto;
391 sk->sk_state = BT_OPEN;
393 l2cap_sock_init_timer(sk);
395 bt_sock_link(&l2cap_sk_list, sk);
399 static int l2cap_sock_create(struct socket *sock, int protocol)
403 BT_DBG("sock %p", sock);
405 sock->state = SS_UNCONNECTED;
407 if (sock->type != SOCK_SEQPACKET &&
408 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
409 return -ESOCKTNOSUPPORT;
411 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
414 sock->ops = &l2cap_sock_ops;
416 sk = l2cap_sock_alloc(sock, protocol, GFP_KERNEL);
420 l2cap_sock_init(sk, NULL);
424 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
426 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
427 struct sock *sk = sock->sk;
430 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
432 if (!addr || addr->sa_family != AF_BLUETOOTH)
437 if (sk->sk_state != BT_OPEN) {
442 write_lock_bh(&l2cap_sk_list.lock);
444 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
447 /* Save source address */
448 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
449 l2cap_pi(sk)->psm = la->l2_psm;
450 l2cap_pi(sk)->sport = la->l2_psm;
451 sk->sk_state = BT_BOUND;
454 write_unlock_bh(&l2cap_sk_list.lock);
461 static int l2cap_do_connect(struct sock *sk)
463 bdaddr_t *src = &bt_sk(sk)->src;
464 bdaddr_t *dst = &bt_sk(sk)->dst;
465 struct l2cap_conn *conn;
466 struct hci_conn *hcon;
467 struct hci_dev *hdev;
470 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
472 if (!(hdev = hci_get_route(dst, src)))
473 return -EHOSTUNREACH;
475 hci_dev_lock_bh(hdev);
479 hcon = hci_connect(hdev, ACL_LINK, dst);
483 conn = l2cap_conn_add(hcon, 0);
491 /* Update source addr of the socket */
492 bacpy(src, conn->src);
494 l2cap_chan_add(conn, sk, NULL);
496 sk->sk_state = BT_CONNECT;
497 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
499 if (hcon->state == BT_CONNECTED) {
500 if (sk->sk_type == SOCK_SEQPACKET) {
501 struct l2cap_conn_req req;
502 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
503 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
504 req.psm = l2cap_pi(sk)->psm;
505 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
506 L2CAP_CONN_REQ, sizeof(req), &req);
508 l2cap_sock_clear_timer(sk);
509 sk->sk_state = BT_CONNECTED;
514 hci_dev_unlock_bh(hdev);
519 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
521 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
522 struct sock *sk = sock->sk;
529 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
534 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
539 switch(sk->sk_state) {
543 /* Already connecting */
547 /* Already connected */
560 /* Set destination address and psm */
561 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
562 l2cap_pi(sk)->psm = la->l2_psm;
564 if ((err = l2cap_do_connect(sk)))
568 err = bt_sock_wait_state(sk, BT_CONNECTED,
569 sock_sndtimeo(sk, flags & O_NONBLOCK));
575 static int l2cap_sock_listen(struct socket *sock, int backlog)
577 struct sock *sk = sock->sk;
580 BT_DBG("sk %p backlog %d", sk, backlog);
584 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
589 if (!l2cap_pi(sk)->psm) {
590 bdaddr_t *src = &bt_sk(sk)->src;
595 write_lock_bh(&l2cap_sk_list.lock);
597 for (psm = 0x1001; psm < 0x1100; psm += 2)
598 if (!__l2cap_get_sock_by_addr(psm, src)) {
599 l2cap_pi(sk)->psm = htobs(psm);
600 l2cap_pi(sk)->sport = htobs(psm);
605 write_unlock_bh(&l2cap_sk_list.lock);
611 sk->sk_max_ack_backlog = backlog;
612 sk->sk_ack_backlog = 0;
613 sk->sk_state = BT_LISTEN;
620 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
622 DECLARE_WAITQUEUE(wait, current);
623 struct sock *sk = sock->sk, *nsk;
629 if (sk->sk_state != BT_LISTEN) {
634 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
636 BT_DBG("sk %p timeo %ld", sk, timeo);
638 /* Wait for an incoming connection. (wake-one). */
639 add_wait_queue_exclusive(sk->sk_sleep, &wait);
640 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
641 set_current_state(TASK_INTERRUPTIBLE);
648 timeo = schedule_timeout(timeo);
651 if (sk->sk_state != BT_LISTEN) {
656 if (signal_pending(current)) {
657 err = sock_intr_errno(timeo);
661 set_current_state(TASK_RUNNING);
662 remove_wait_queue(sk->sk_sleep, &wait);
667 newsock->state = SS_CONNECTED;
669 BT_DBG("new socket %p", nsk);
676 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
678 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
679 struct sock *sk = sock->sk;
681 BT_DBG("sock %p, sk %p", sock, sk);
683 addr->sa_family = AF_BLUETOOTH;
684 *len = sizeof(struct sockaddr_l2);
687 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
689 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
691 la->l2_psm = l2cap_pi(sk)->psm;
695 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
697 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
698 struct sk_buff *skb, **frag;
699 int err, hlen, count, sent=0;
700 struct l2cap_hdr *lh;
702 BT_DBG("sk %p len %d", sk, len);
704 /* First fragment (with L2CAP header) */
705 if (sk->sk_type == SOCK_DGRAM)
706 hlen = L2CAP_HDR_SIZE + 2;
708 hlen = L2CAP_HDR_SIZE;
710 count = min_t(unsigned int, (conn->mtu - hlen), len);
712 skb = bt_skb_send_alloc(sk, hlen + count,
713 msg->msg_flags & MSG_DONTWAIT, &err);
717 /* Create L2CAP header */
718 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
719 lh->cid = __cpu_to_le16(l2cap_pi(sk)->dcid);
720 lh->len = __cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
722 if (sk->sk_type == SOCK_DGRAM)
723 put_unaligned(l2cap_pi(sk)->psm, (u16 *) skb_put(skb, 2));
725 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
733 /* Continuation fragments (no L2CAP header) */
734 frag = &skb_shinfo(skb)->frag_list;
736 count = min_t(unsigned int, conn->mtu, len);
738 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
742 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
750 frag = &(*frag)->next;
753 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
763 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
765 struct sock *sk = sock->sk;
768 BT_DBG("sock %p, sk %p", sock, sk);
770 err = sock_error(sk);
774 if (msg->msg_flags & MSG_OOB)
777 /* Check outgoing MTU */
778 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
783 if (sk->sk_state == BT_CONNECTED)
784 err = l2cap_do_send(sk, msg, len);
792 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
794 struct sock *sk = sock->sk;
795 struct l2cap_options opts;
805 len = min_t(unsigned int, sizeof(opts), optlen);
806 if (copy_from_user((char *) &opts, optval, len)) {
810 l2cap_pi(sk)->imtu = opts.imtu;
811 l2cap_pi(sk)->omtu = opts.omtu;
815 if (get_user(opt, (u32 __user *) optval)) {
820 l2cap_pi(sk)->link_mode = opt;
832 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
834 struct sock *sk = sock->sk;
835 struct l2cap_options opts;
836 struct l2cap_conninfo cinfo;
841 if (get_user(len, optlen))
848 opts.imtu = l2cap_pi(sk)->imtu;
849 opts.omtu = l2cap_pi(sk)->omtu;
850 opts.flush_to = l2cap_pi(sk)->flush_to;
853 len = min_t(unsigned int, len, sizeof(opts));
854 if (copy_to_user(optval, (char *) &opts, len))
860 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
865 if (sk->sk_state != BT_CONNECTED) {
870 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
871 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
873 len = min_t(unsigned int, len, sizeof(cinfo));
874 if (copy_to_user(optval, (char *) &cinfo, len))
888 static int l2cap_sock_shutdown(struct socket *sock, int how)
890 struct sock *sk = sock->sk;
893 BT_DBG("sock %p, sk %p", sock, sk);
899 if (!sk->sk_shutdown) {
900 sk->sk_shutdown = SHUTDOWN_MASK;
901 l2cap_sock_clear_timer(sk);
902 __l2cap_sock_close(sk, 0);
904 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
905 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
911 static int l2cap_sock_release(struct socket *sock)
913 struct sock *sk = sock->sk;
916 BT_DBG("sock %p, sk %p", sock, sk);
921 err = l2cap_sock_shutdown(sock, 2);
928 /* ---- L2CAP channels ---- */
929 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
932 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
933 if (l2cap_pi(s)->dcid == cid)
939 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
942 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
943 if (l2cap_pi(s)->scid == cid)
949 /* Find channel with given SCID.
950 * Returns locked socket */
951 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
955 s = __l2cap_get_chan_by_scid(l, cid);
956 if (s) bh_lock_sock(s);
957 read_unlock(&l->lock);
961 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
964 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
965 if (l2cap_pi(s)->ident == ident)
971 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
975 s = __l2cap_get_chan_by_ident(l, ident);
976 if (s) bh_lock_sock(s);
977 read_unlock(&l->lock);
981 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
985 for (; cid < 0xffff; cid++) {
986 if(!__l2cap_get_chan_by_scid(l, cid))
993 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
998 l2cap_pi(l->head)->prev_c = sk;
1000 l2cap_pi(sk)->next_c = l->head;
1001 l2cap_pi(sk)->prev_c = NULL;
1005 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
1007 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
1009 write_lock(&l->lock);
1014 l2cap_pi(next)->prev_c = prev;
1016 l2cap_pi(prev)->next_c = next;
1017 write_unlock(&l->lock);
1022 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
1024 struct l2cap_chan_list *l = &conn->chan_list;
1026 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
1028 l2cap_pi(sk)->conn = conn;
1030 if (sk->sk_type == SOCK_SEQPACKET) {
1031 /* Alloc CID for connection-oriented socket */
1032 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
1033 } else if (sk->sk_type == SOCK_DGRAM) {
1034 /* Connectionless socket */
1035 l2cap_pi(sk)->scid = 0x0002;
1036 l2cap_pi(sk)->dcid = 0x0002;
1037 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
1039 /* Raw socket can send/recv signalling messages only */
1040 l2cap_pi(sk)->scid = 0x0001;
1041 l2cap_pi(sk)->dcid = 0x0001;
1042 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
1045 __l2cap_chan_link(l, sk);
1048 bt_accept_enqueue(parent, sk);
1052 * Must be called on the locked socket. */
1053 static void l2cap_chan_del(struct sock *sk, int err)
1055 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1056 struct sock *parent = bt_sk(sk)->parent;
1058 l2cap_sock_clear_timer(sk);
1060 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
1063 /* Unlink from channel list */
1064 l2cap_chan_unlink(&conn->chan_list, sk);
1065 l2cap_pi(sk)->conn = NULL;
1066 hci_conn_put(conn->hcon);
1069 sk->sk_state = BT_CLOSED;
1070 sock_set_flag(sk, SOCK_ZAPPED);
1076 bt_accept_unlink(sk);
1077 parent->sk_data_ready(parent, 0);
1079 sk->sk_state_change(sk);
1082 static void l2cap_conn_ready(struct l2cap_conn *conn)
1084 struct l2cap_chan_list *l = &conn->chan_list;
1087 BT_DBG("conn %p", conn);
1089 read_lock(&l->lock);
1091 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1094 if (sk->sk_type != SOCK_SEQPACKET) {
1095 l2cap_sock_clear_timer(sk);
1096 sk->sk_state = BT_CONNECTED;
1097 sk->sk_state_change(sk);
1098 } else if (sk->sk_state == BT_CONNECT) {
1099 struct l2cap_conn_req req;
1100 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
1101 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1102 req.psm = l2cap_pi(sk)->psm;
1103 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1109 read_unlock(&l->lock);
1112 /* Notify sockets that we cannot guaranty reliability anymore */
1113 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1115 struct l2cap_chan_list *l = &conn->chan_list;
1118 BT_DBG("conn %p", conn);
1120 read_lock(&l->lock);
1121 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1122 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
1125 read_unlock(&l->lock);
1128 static void l2cap_chan_ready(struct sock *sk)
1130 struct sock *parent = bt_sk(sk)->parent;
1132 BT_DBG("sk %p, parent %p", sk, parent);
1134 l2cap_pi(sk)->conf_state = 0;
1135 l2cap_sock_clear_timer(sk);
1138 /* Outgoing channel.
1139 * Wake up socket sleeping on connect.
1141 sk->sk_state = BT_CONNECTED;
1142 sk->sk_state_change(sk);
1144 /* Incoming channel.
1145 * Wake up socket sleeping on accept.
1147 parent->sk_data_ready(parent, 0);
1151 /* Copy frame to all raw sockets on that connection */
1152 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1154 struct l2cap_chan_list *l = &conn->chan_list;
1155 struct sk_buff *nskb;
1158 BT_DBG("conn %p", conn);
1160 read_lock(&l->lock);
1161 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1162 if (sk->sk_type != SOCK_RAW)
1165 /* Don't send frame to the socket it came from */
1169 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1172 if (sock_queue_rcv_skb(sk, nskb))
1175 read_unlock(&l->lock);
1178 /* ---- L2CAP signalling commands ---- */
1179 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1180 u8 code, u8 ident, u16 dlen, void *data)
1182 struct sk_buff *skb, **frag;
1183 struct l2cap_cmd_hdr *cmd;
1184 struct l2cap_hdr *lh;
1187 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1189 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1190 count = min_t(unsigned int, conn->mtu, len);
1192 skb = bt_skb_alloc(count, GFP_ATOMIC);
1196 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1197 lh->len = __cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1198 lh->cid = __cpu_to_le16(0x0001);
1200 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1203 cmd->len = __cpu_to_le16(dlen);
1206 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1207 memcpy(skb_put(skb, count), data, count);
1213 /* Continuation fragments (no L2CAP header) */
1214 frag = &skb_shinfo(skb)->frag_list;
1216 count = min_t(unsigned int, conn->mtu, len);
1218 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1222 memcpy(skb_put(*frag, count), data, count);
1227 frag = &(*frag)->next;
1237 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1239 struct l2cap_conf_opt *opt = *ptr;
1242 len = L2CAP_CONF_OPT_SIZE + opt->len;
1250 *val = *((u8 *) opt->val);
1254 *val = __le16_to_cpu(*((u16 *)opt->val));
1258 *val = __le32_to_cpu(*((u32 *)opt->val));
1262 *val = (unsigned long) opt->val;
1266 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1270 static inline void l2cap_parse_conf_req(struct sock *sk, void *data, int len)
1272 int type, hint, olen;
1276 BT_DBG("sk %p len %d", sk, len);
1278 while (len >= L2CAP_CONF_OPT_SIZE) {
1279 len -= l2cap_get_conf_opt(&ptr, &type, &olen, &val);
1285 case L2CAP_CONF_MTU:
1286 l2cap_pi(sk)->conf_mtu = val;
1289 case L2CAP_CONF_FLUSH_TO:
1290 l2cap_pi(sk)->flush_to = val;
1293 case L2CAP_CONF_QOS:
1300 /* FIXME: Reject unknown option */
1306 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1308 struct l2cap_conf_opt *opt = *ptr;
1310 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1317 *((u8 *) opt->val) = val;
1321 *((u16 *) opt->val) = __cpu_to_le16(val);
1325 *((u32 *) opt->val) = __cpu_to_le32(val);
1329 memcpy(opt->val, (void *) val, len);
1333 *ptr += L2CAP_CONF_OPT_SIZE + len;
1336 static int l2cap_build_conf_req(struct sock *sk, void *data)
1338 struct l2cap_pinfo *pi = l2cap_pi(sk);
1339 struct l2cap_conf_req *req = data;
1340 void *ptr = req->data;
1342 BT_DBG("sk %p", sk);
1344 if (pi->imtu != L2CAP_DEFAULT_MTU)
1345 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1347 /* FIXME: Need actual value of the flush timeout */
1348 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1349 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1351 req->dcid = __cpu_to_le16(pi->dcid);
1352 req->flags = __cpu_to_le16(0);
1357 static inline int l2cap_conf_output(struct sock *sk, void **ptr)
1359 struct l2cap_pinfo *pi = l2cap_pi(sk);
1362 /* Configure output options and let the other side know
1363 * which ones we don't like. */
1364 if (pi->conf_mtu < pi->omtu) {
1365 l2cap_add_conf_opt(ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1366 result = L2CAP_CONF_UNACCEPT;
1368 pi->omtu = pi->conf_mtu;
1371 BT_DBG("sk %p result %d", sk, result);
1375 static int l2cap_build_conf_rsp(struct sock *sk, void *data, int *result)
1377 struct l2cap_conf_rsp *rsp = data;
1378 void *ptr = rsp->data;
1381 BT_DBG("sk %p complete %d", sk, result ? 1 : 0);
1384 *result = l2cap_conf_output(sk, &ptr);
1388 rsp->scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1389 rsp->result = __cpu_to_le16(result ? *result : 0);
1390 rsp->flags = __cpu_to_le16(flags);
1395 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1397 struct l2cap_chan_list *list = &conn->chan_list;
1398 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1399 struct l2cap_conn_rsp rsp;
1400 struct sock *sk, *parent;
1401 int result = 0, status = 0;
1403 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1406 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1408 /* Check if we have socket listening on psm */
1409 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1411 result = L2CAP_CR_BAD_PSM;
1415 result = L2CAP_CR_NO_MEM;
1417 /* Check for backlog size */
1418 if (sk_acceptq_is_full(parent)) {
1419 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1423 sk = l2cap_sock_alloc(NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1427 write_lock(&list->lock);
1429 /* Check if we already have channel with that dcid */
1430 if (__l2cap_get_chan_by_dcid(list, scid)) {
1431 write_unlock(&list->lock);
1432 sock_set_flag(sk, SOCK_ZAPPED);
1433 l2cap_sock_kill(sk);
1437 hci_conn_hold(conn->hcon);
1439 l2cap_sock_init(sk, parent);
1440 bacpy(&bt_sk(sk)->src, conn->src);
1441 bacpy(&bt_sk(sk)->dst, conn->dst);
1442 l2cap_pi(sk)->psm = psm;
1443 l2cap_pi(sk)->dcid = scid;
1445 __l2cap_chan_add(conn, sk, parent);
1446 dcid = l2cap_pi(sk)->scid;
1448 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1450 /* Service level security */
1451 result = L2CAP_CR_PEND;
1452 status = L2CAP_CS_AUTHEN_PEND;
1453 sk->sk_state = BT_CONNECT2;
1454 l2cap_pi(sk)->ident = cmd->ident;
1456 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1457 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1458 if (!hci_conn_encrypt(conn->hcon))
1460 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1461 if (!hci_conn_auth(conn->hcon))
1465 sk->sk_state = BT_CONFIG;
1466 result = status = 0;
1469 write_unlock(&list->lock);
1472 bh_unlock_sock(parent);
1475 rsp.scid = __cpu_to_le16(scid);
1476 rsp.dcid = __cpu_to_le16(dcid);
1477 rsp.result = __cpu_to_le16(result);
1478 rsp.status = __cpu_to_le16(status);
1479 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1483 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1485 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1486 u16 scid, dcid, result, status;
1490 scid = __le16_to_cpu(rsp->scid);
1491 dcid = __le16_to_cpu(rsp->dcid);
1492 result = __le16_to_cpu(rsp->result);
1493 status = __le16_to_cpu(rsp->status);
1495 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1498 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1501 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1506 case L2CAP_CR_SUCCESS:
1507 sk->sk_state = BT_CONFIG;
1508 l2cap_pi(sk)->ident = 0;
1509 l2cap_pi(sk)->dcid = dcid;
1510 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1512 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1513 l2cap_build_conf_req(sk, req), req);
1520 l2cap_chan_del(sk, ECONNREFUSED);
1528 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1530 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1536 dcid = __le16_to_cpu(req->dcid);
1537 flags = __le16_to_cpu(req->flags);
1539 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1541 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1544 l2cap_parse_conf_req(sk, req->data, cmd->len - sizeof(*req));
1546 if (flags & 0x0001) {
1547 /* Incomplete config. Send empty response. */
1548 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1549 l2cap_build_conf_rsp(sk, rsp, NULL), rsp);
1553 /* Complete config. */
1554 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1555 l2cap_build_conf_rsp(sk, rsp, &result), rsp);
1560 /* Output config done */
1561 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1563 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1564 sk->sk_state = BT_CONNECTED;
1565 l2cap_chan_ready(sk);
1566 } else if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1568 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1569 l2cap_build_conf_req(sk, req), req);
1577 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1579 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1580 u16 scid, flags, result;
1583 scid = __le16_to_cpu(rsp->scid);
1584 flags = __le16_to_cpu(rsp->flags);
1585 result = __le16_to_cpu(rsp->result);
1587 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1589 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1593 case L2CAP_CONF_SUCCESS:
1596 case L2CAP_CONF_UNACCEPT:
1597 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1599 /* It does not make sense to adjust L2CAP parameters
1600 * that are currently defined in the spec. We simply
1601 * resend config request that we sent earlier. It is
1602 * stupid, but it helps qualification testing which
1603 * expects at least some response from us. */
1604 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1605 l2cap_build_conf_req(sk, req), req);
1610 sk->sk_state = BT_DISCONN;
1611 sk->sk_err = ECONNRESET;
1612 l2cap_sock_set_timer(sk, HZ * 5);
1614 struct l2cap_disconn_req req;
1615 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1616 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1617 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1618 L2CAP_DISCONN_REQ, sizeof(req), &req);
1626 /* Input config done */
1627 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1629 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1630 sk->sk_state = BT_CONNECTED;
1631 l2cap_chan_ready(sk);
1639 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1641 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1642 struct l2cap_disconn_rsp rsp;
1646 scid = __le16_to_cpu(req->scid);
1647 dcid = __le16_to_cpu(req->dcid);
1649 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1651 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1654 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1655 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1656 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1658 sk->sk_shutdown = SHUTDOWN_MASK;
1660 l2cap_chan_del(sk, ECONNRESET);
1663 l2cap_sock_kill(sk);
1667 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1669 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1673 scid = __le16_to_cpu(rsp->scid);
1674 dcid = __le16_to_cpu(rsp->dcid);
1676 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1678 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1681 l2cap_chan_del(sk, 0);
1684 l2cap_sock_kill(sk);
1688 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1690 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1691 struct l2cap_info_rsp rsp;
1694 type = __le16_to_cpu(req->type);
1696 BT_DBG("type 0x%4.4x", type);
1698 rsp.type = __cpu_to_le16(type);
1699 rsp.result = __cpu_to_le16(L2CAP_IR_NOTSUPP);
1700 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1705 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1707 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1710 type = __le16_to_cpu(rsp->type);
1711 result = __le16_to_cpu(rsp->result);
1713 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1718 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1720 u8 *data = skb->data;
1722 struct l2cap_cmd_hdr cmd;
1725 l2cap_raw_recv(conn, skb);
1727 while (len >= L2CAP_CMD_HDR_SIZE) {
1728 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1729 data += L2CAP_CMD_HDR_SIZE;
1730 len -= L2CAP_CMD_HDR_SIZE;
1732 cmd.len = __le16_to_cpu(cmd.len);
1734 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd.len, cmd.ident);
1736 if (cmd.len > len || !cmd.ident) {
1737 BT_DBG("corrupted command");
1742 case L2CAP_COMMAND_REJ:
1743 /* FIXME: We should process this */
1746 case L2CAP_CONN_REQ:
1747 err = l2cap_connect_req(conn, &cmd, data);
1750 case L2CAP_CONN_RSP:
1751 err = l2cap_connect_rsp(conn, &cmd, data);
1754 case L2CAP_CONF_REQ:
1755 err = l2cap_config_req(conn, &cmd, data);
1758 case L2CAP_CONF_RSP:
1759 err = l2cap_config_rsp(conn, &cmd, data);
1762 case L2CAP_DISCONN_REQ:
1763 err = l2cap_disconnect_req(conn, &cmd, data);
1766 case L2CAP_DISCONN_RSP:
1767 err = l2cap_disconnect_rsp(conn, &cmd, data);
1770 case L2CAP_ECHO_REQ:
1771 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd.len, data);
1774 case L2CAP_ECHO_RSP:
1777 case L2CAP_INFO_REQ:
1778 err = l2cap_information_req(conn, &cmd, data);
1781 case L2CAP_INFO_RSP:
1782 err = l2cap_information_rsp(conn, &cmd, data);
1786 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1792 struct l2cap_cmd_rej rej;
1793 BT_DBG("error %d", err);
1795 /* FIXME: Map err to a valid reason */
1796 rej.reason = __cpu_to_le16(0);
1797 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1807 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1811 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1813 BT_DBG("unknown cid 0x%4.4x", cid);
1817 BT_DBG("sk %p, len %d", sk, skb->len);
1819 if (sk->sk_state != BT_CONNECTED)
1822 if (l2cap_pi(sk)->imtu < skb->len)
1825 /* If socket recv buffers overflows we drop data here
1826 * which is *bad* because L2CAP has to be reliable.
1827 * But we don't have any other choice. L2CAP doesn't
1828 * provide flow control mechanism. */
1830 if (!sock_queue_rcv_skb(sk, skb))
1837 if (sk) bh_unlock_sock(sk);
1841 static inline int l2cap_conless_channel(struct l2cap_conn *conn, u16 psm, struct sk_buff *skb)
1845 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1849 BT_DBG("sk %p, len %d", sk, skb->len);
1851 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1854 if (l2cap_pi(sk)->imtu < skb->len)
1857 if (!sock_queue_rcv_skb(sk, skb))
1864 if (sk) bh_unlock_sock(sk);
1868 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1870 struct l2cap_hdr *lh = (void *) skb->data;
1873 skb_pull(skb, L2CAP_HDR_SIZE);
1874 cid = __le16_to_cpu(lh->cid);
1875 len = __le16_to_cpu(lh->len);
1877 BT_DBG("len %d, cid 0x%4.4x", len, cid);
1881 l2cap_sig_channel(conn, skb);
1885 psm = get_unaligned((u16 *) skb->data);
1887 l2cap_conless_channel(conn, psm, skb);
1891 l2cap_data_channel(conn, cid, skb);
1896 /* ---- L2CAP interface with lower layer (HCI) ---- */
1898 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1900 int exact = 0, lm1 = 0, lm2 = 0;
1901 register struct sock *sk;
1902 struct hlist_node *node;
1904 if (type != ACL_LINK)
1907 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
1909 /* Find listening sockets and check their link_mode */
1910 read_lock(&l2cap_sk_list.lock);
1911 sk_for_each(sk, node, &l2cap_sk_list.head) {
1912 if (sk->sk_state != BT_LISTEN)
1915 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
1916 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1918 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1919 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1921 read_unlock(&l2cap_sk_list.lock);
1923 return exact ? lm1 : lm2;
1926 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
1928 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
1930 if (hcon->type != ACL_LINK)
1934 struct l2cap_conn *conn;
1936 conn = l2cap_conn_add(hcon, status);
1938 l2cap_conn_ready(conn);
1940 l2cap_conn_del(hcon, bt_err(status));
1945 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
1947 BT_DBG("hcon %p reason %d", hcon, reason);
1949 if (hcon->type != ACL_LINK)
1952 l2cap_conn_del(hcon, bt_err(reason));
1956 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
1958 struct l2cap_chan_list *l;
1959 struct l2cap_conn *conn;
1960 struct l2cap_conn_rsp rsp;
1964 if (!(conn = hcon->l2cap_data))
1966 l = &conn->chan_list;
1968 BT_DBG("conn %p", conn);
1970 read_lock(&l->lock);
1972 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1975 if (sk->sk_state != BT_CONNECT2 ||
1976 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1977 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1983 sk->sk_state = BT_CONFIG;
1986 sk->sk_state = BT_DISCONN;
1987 l2cap_sock_set_timer(sk, HZ/10);
1988 result = L2CAP_CR_SEC_BLOCK;
1991 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1992 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1993 rsp.result = __cpu_to_le16(result);
1994 rsp.status = __cpu_to_le16(0);
1995 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
1996 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2001 read_unlock(&l->lock);
2005 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
2007 struct l2cap_chan_list *l;
2008 struct l2cap_conn *conn;
2009 struct l2cap_conn_rsp rsp;
2013 if (!(conn = hcon->l2cap_data))
2015 l = &conn->chan_list;
2017 BT_DBG("conn %p", conn);
2019 read_lock(&l->lock);
2021 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2024 if (sk->sk_state != BT_CONNECT2) {
2030 sk->sk_state = BT_CONFIG;
2033 sk->sk_state = BT_DISCONN;
2034 l2cap_sock_set_timer(sk, HZ/10);
2035 result = L2CAP_CR_SEC_BLOCK;
2038 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
2039 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
2040 rsp.result = __cpu_to_le16(result);
2041 rsp.status = __cpu_to_le16(0);
2042 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2043 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2045 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
2046 hci_conn_change_link_key(hcon);
2051 read_unlock(&l->lock);
2055 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2057 struct l2cap_conn *conn = hcon->l2cap_data;
2059 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2062 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2064 if (flags & ACL_START) {
2065 struct l2cap_hdr *hdr;
2069 BT_ERR("Unexpected start frame (len %d)", skb->len);
2070 kfree_skb(conn->rx_skb);
2071 conn->rx_skb = NULL;
2073 l2cap_conn_unreliable(conn, ECOMM);
2077 BT_ERR("Frame is too short (len %d)", skb->len);
2078 l2cap_conn_unreliable(conn, ECOMM);
2082 hdr = (struct l2cap_hdr *) skb->data;
2083 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2085 if (len == skb->len) {
2086 /* Complete frame received */
2087 l2cap_recv_frame(conn, skb);
2091 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2093 if (skb->len > len) {
2094 BT_ERR("Frame is too long (len %d, expected len %d)",
2096 l2cap_conn_unreliable(conn, ECOMM);
2100 /* Allocate skb for the complete frame (with header) */
2101 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2104 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2105 conn->rx_len = len - skb->len;
2107 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2109 if (!conn->rx_len) {
2110 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2111 l2cap_conn_unreliable(conn, ECOMM);
2115 if (skb->len > conn->rx_len) {
2116 BT_ERR("Fragment is too long (len %d, expected %d)",
2117 skb->len, conn->rx_len);
2118 kfree_skb(conn->rx_skb);
2119 conn->rx_skb = NULL;
2121 l2cap_conn_unreliable(conn, ECOMM);
2125 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2126 conn->rx_len -= skb->len;
2128 if (!conn->rx_len) {
2129 /* Complete frame received */
2130 l2cap_recv_frame(conn, conn->rx_skb);
2131 conn->rx_skb = NULL;
2140 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2143 struct hlist_node *node;
2146 read_lock_bh(&l2cap_sk_list.lock);
2148 sk_for_each(sk, node, &l2cap_sk_list.head) {
2149 struct l2cap_pinfo *pi = l2cap_pi(sk);
2151 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2152 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2153 sk->sk_state, pi->psm, pi->scid, pi->dcid, pi->imtu,
2154 pi->omtu, pi->link_mode);
2157 read_unlock_bh(&l2cap_sk_list.lock);
2162 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2164 static const struct proto_ops l2cap_sock_ops = {
2165 .family = PF_BLUETOOTH,
2166 .owner = THIS_MODULE,
2167 .release = l2cap_sock_release,
2168 .bind = l2cap_sock_bind,
2169 .connect = l2cap_sock_connect,
2170 .listen = l2cap_sock_listen,
2171 .accept = l2cap_sock_accept,
2172 .getname = l2cap_sock_getname,
2173 .sendmsg = l2cap_sock_sendmsg,
2174 .recvmsg = bt_sock_recvmsg,
2175 .poll = bt_sock_poll,
2176 .mmap = sock_no_mmap,
2177 .socketpair = sock_no_socketpair,
2178 .ioctl = sock_no_ioctl,
2179 .shutdown = l2cap_sock_shutdown,
2180 .setsockopt = l2cap_sock_setsockopt,
2181 .getsockopt = l2cap_sock_getsockopt
2184 static struct net_proto_family l2cap_sock_family_ops = {
2185 .family = PF_BLUETOOTH,
2186 .owner = THIS_MODULE,
2187 .create = l2cap_sock_create,
2190 static struct hci_proto l2cap_hci_proto = {
2192 .id = HCI_PROTO_L2CAP,
2193 .connect_ind = l2cap_connect_ind,
2194 .connect_cfm = l2cap_connect_cfm,
2195 .disconn_ind = l2cap_disconn_ind,
2196 .auth_cfm = l2cap_auth_cfm,
2197 .encrypt_cfm = l2cap_encrypt_cfm,
2198 .recv_acldata = l2cap_recv_acldata
2201 static int __init l2cap_init(void)
2205 err = proto_register(&l2cap_proto, 0);
2209 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2211 BT_ERR("L2CAP socket registration failed");
2215 err = hci_register_proto(&l2cap_hci_proto);
2217 BT_ERR("L2CAP protocol registration failed");
2218 bt_sock_unregister(BTPROTO_L2CAP);
2222 class_create_file(&bt_class, &class_attr_l2cap);
2224 BT_INFO("L2CAP ver %s", VERSION);
2225 BT_INFO("L2CAP socket layer initialized");
2230 proto_unregister(&l2cap_proto);
2234 static void __exit l2cap_exit(void)
2236 class_remove_file(&bt_class, &class_attr_l2cap);
2238 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2239 BT_ERR("L2CAP socket unregistration failed");
2241 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2242 BT_ERR("L2CAP protocol unregistration failed");
2244 proto_unregister(&l2cap_proto);
2247 void l2cap_load(void)
2249 /* Dummy function to trigger automatic L2CAP module loading by
2250 * other modules that use L2CAP sockets but don't use any other
2251 * symbols from it. */
2254 EXPORT_SYMBOL(l2cap_load);
2256 module_init(l2cap_init);
2257 module_exit(l2cap_exit);
2259 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2260 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2261 MODULE_VERSION(VERSION);
2262 MODULE_LICENSE("GPL");
2263 MODULE_ALIAS("bt-proto-0");