2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
28 /* Bluetooth L2CAP core. */
30 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64 static LIST_HEAD(chan_list);
65 static DEFINE_RWLOCK(chan_list_lock);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
69 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
75 /* ---- L2CAP channels ---- */
77 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
81 list_for_each_entry(c, &conn->chan_l, list) {
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
92 list_for_each_entry(c, &conn->chan_l, list) {
99 /* Find channel with given SCID.
100 * Returns locked socket */
101 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103 struct l2cap_chan *c;
105 mutex_lock(&conn->chan_lock);
106 c = __l2cap_get_chan_by_scid(conn, cid);
107 mutex_unlock(&conn->chan_lock);
112 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
114 struct l2cap_chan *c;
116 list_for_each_entry(c, &conn->chan_l, list) {
117 if (c->ident == ident)
123 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
125 struct l2cap_chan *c;
127 mutex_lock(&conn->chan_lock);
128 c = __l2cap_get_chan_by_ident(conn, ident);
129 mutex_unlock(&conn->chan_lock);
134 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
136 struct l2cap_chan *c;
138 list_for_each_entry(c, &chan_list, global_l) {
139 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
145 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
149 write_lock(&chan_list_lock);
151 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
164 for (p = 0x1001; p < 0x1100; p += 2)
165 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
166 chan->psm = cpu_to_le16(p);
167 chan->sport = cpu_to_le16(p);
174 write_unlock(&chan_list_lock);
178 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
180 write_lock(&chan_list_lock);
184 write_unlock(&chan_list_lock);
189 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
191 u16 cid = L2CAP_CID_DYN_START;
193 for (; cid < L2CAP_CID_DYN_END; cid++) {
194 if (!__l2cap_get_chan_by_scid(conn, cid))
201 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
203 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
204 state_to_string(state));
207 chan->ops->state_change(chan->data, state);
210 static void l2cap_state_change(struct l2cap_chan *chan, int state)
212 struct sock *sk = chan->sk;
215 __l2cap_state_change(chan, state);
219 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
221 struct sock *sk = chan->sk;
226 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
228 struct sock *sk = chan->sk;
231 __l2cap_chan_set_err(chan, err);
235 /* ---- L2CAP sequence number lists ---- */
237 /* For ERTM, ordered lists of sequence numbers must be tracked for
238 * SREJ requests that are received and for frames that are to be
239 * retransmitted. These seq_list functions implement a singly-linked
240 * list in an array, where membership in the list can also be checked
241 * in constant time. Items can also be added to the tail of the list
242 * and removed from the head in constant time, without further memory
246 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
248 size_t alloc_size, i;
250 /* Allocated size is a power of 2 to map sequence numbers
251 * (which may be up to 14 bits) in to a smaller array that is
252 * sized for the negotiated ERTM transmit windows.
254 alloc_size = roundup_pow_of_two(size);
256 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
260 seq_list->mask = alloc_size - 1;
261 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
262 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
263 for (i = 0; i < alloc_size; i++)
264 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
269 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
271 kfree(seq_list->list);
274 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
277 /* Constant-time check for list membership */
278 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
281 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
283 u16 mask = seq_list->mask;
285 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
286 /* In case someone tries to pop the head of an empty list */
287 return L2CAP_SEQ_LIST_CLEAR;
288 } else if (seq_list->head == seq) {
289 /* Head can be removed in constant time */
290 seq_list->head = seq_list->list[seq & mask];
291 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
293 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
294 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
295 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
298 /* Walk the list to find the sequence number */
299 u16 prev = seq_list->head;
300 while (seq_list->list[prev & mask] != seq) {
301 prev = seq_list->list[prev & mask];
302 if (prev == L2CAP_SEQ_LIST_TAIL)
303 return L2CAP_SEQ_LIST_CLEAR;
306 /* Unlink the number from the list and clear it */
307 seq_list->list[prev & mask] = seq_list->list[seq & mask];
308 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
309 if (seq_list->tail == seq)
310 seq_list->tail = prev;
315 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
317 /* Remove the head in constant time */
318 return l2cap_seq_list_remove(seq_list, seq_list->head);
321 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
323 if (seq_list->head != L2CAP_SEQ_LIST_CLEAR) {
325 for (i = 0; i <= seq_list->mask; i++)
326 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
328 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
329 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
333 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
335 u16 mask = seq_list->mask;
337 /* All appends happen in constant time */
339 if (seq_list->list[seq & mask] == L2CAP_SEQ_LIST_CLEAR) {
340 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
341 seq_list->head = seq;
343 seq_list->list[seq_list->tail & mask] = seq;
345 seq_list->tail = seq;
346 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
350 static void l2cap_chan_timeout(struct work_struct *work)
352 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
354 struct l2cap_conn *conn = chan->conn;
357 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
359 mutex_lock(&conn->chan_lock);
360 l2cap_chan_lock(chan);
362 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
363 reason = ECONNREFUSED;
364 else if (chan->state == BT_CONNECT &&
365 chan->sec_level != BT_SECURITY_SDP)
366 reason = ECONNREFUSED;
370 l2cap_chan_close(chan, reason);
372 l2cap_chan_unlock(chan);
374 chan->ops->close(chan->data);
375 mutex_unlock(&conn->chan_lock);
377 l2cap_chan_put(chan);
380 struct l2cap_chan *l2cap_chan_create(void)
382 struct l2cap_chan *chan;
384 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
388 mutex_init(&chan->lock);
390 write_lock(&chan_list_lock);
391 list_add(&chan->global_l, &chan_list);
392 write_unlock(&chan_list_lock);
394 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
396 chan->state = BT_OPEN;
398 atomic_set(&chan->refcnt, 1);
400 BT_DBG("chan %p", chan);
405 void l2cap_chan_destroy(struct l2cap_chan *chan)
407 write_lock(&chan_list_lock);
408 list_del(&chan->global_l);
409 write_unlock(&chan_list_lock);
411 l2cap_chan_put(chan);
414 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
416 chan->fcs = L2CAP_FCS_CRC16;
417 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
418 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
419 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
420 chan->sec_level = BT_SECURITY_LOW;
422 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
425 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
427 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
428 __le16_to_cpu(chan->psm), chan->dcid);
430 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
434 switch (chan->chan_type) {
435 case L2CAP_CHAN_CONN_ORIENTED:
436 if (conn->hcon->type == LE_LINK) {
438 chan->omtu = L2CAP_LE_DEFAULT_MTU;
439 chan->scid = L2CAP_CID_LE_DATA;
440 chan->dcid = L2CAP_CID_LE_DATA;
442 /* Alloc CID for connection-oriented socket */
443 chan->scid = l2cap_alloc_cid(conn);
444 chan->omtu = L2CAP_DEFAULT_MTU;
448 case L2CAP_CHAN_CONN_LESS:
449 /* Connectionless socket */
450 chan->scid = L2CAP_CID_CONN_LESS;
451 chan->dcid = L2CAP_CID_CONN_LESS;
452 chan->omtu = L2CAP_DEFAULT_MTU;
456 /* Raw socket can send/recv signalling messages only */
457 chan->scid = L2CAP_CID_SIGNALING;
458 chan->dcid = L2CAP_CID_SIGNALING;
459 chan->omtu = L2CAP_DEFAULT_MTU;
462 chan->local_id = L2CAP_BESTEFFORT_ID;
463 chan->local_stype = L2CAP_SERV_BESTEFFORT;
464 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
465 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
466 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
467 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
469 l2cap_chan_hold(chan);
471 list_add(&chan->list, &conn->chan_l);
474 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
476 mutex_lock(&conn->chan_lock);
477 __l2cap_chan_add(conn, chan);
478 mutex_unlock(&conn->chan_lock);
481 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
483 struct sock *sk = chan->sk;
484 struct l2cap_conn *conn = chan->conn;
485 struct sock *parent = bt_sk(sk)->parent;
487 __clear_chan_timer(chan);
489 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
492 /* Delete from channel list */
493 list_del(&chan->list);
495 l2cap_chan_put(chan);
498 hci_conn_put(conn->hcon);
503 __l2cap_state_change(chan, BT_CLOSED);
504 sock_set_flag(sk, SOCK_ZAPPED);
507 __l2cap_chan_set_err(chan, err);
510 bt_accept_unlink(sk);
511 parent->sk_data_ready(parent, 0);
513 sk->sk_state_change(sk);
517 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
518 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
521 skb_queue_purge(&chan->tx_q);
523 if (chan->mode == L2CAP_MODE_ERTM) {
524 struct srej_list *l, *tmp;
526 __clear_retrans_timer(chan);
527 __clear_monitor_timer(chan);
528 __clear_ack_timer(chan);
530 skb_queue_purge(&chan->srej_q);
532 l2cap_seq_list_free(&chan->srej_list);
533 l2cap_seq_list_free(&chan->retrans_list);
534 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
541 static void l2cap_chan_cleanup_listen(struct sock *parent)
545 BT_DBG("parent %p", parent);
547 /* Close not yet accepted channels */
548 while ((sk = bt_accept_dequeue(parent, NULL))) {
549 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
551 l2cap_chan_lock(chan);
552 __clear_chan_timer(chan);
553 l2cap_chan_close(chan, ECONNRESET);
554 l2cap_chan_unlock(chan);
556 chan->ops->close(chan->data);
560 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
562 struct l2cap_conn *conn = chan->conn;
563 struct sock *sk = chan->sk;
565 BT_DBG("chan %p state %s sk %p", chan,
566 state_to_string(chan->state), sk);
568 switch (chan->state) {
571 l2cap_chan_cleanup_listen(sk);
573 __l2cap_state_change(chan, BT_CLOSED);
574 sock_set_flag(sk, SOCK_ZAPPED);
580 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
581 conn->hcon->type == ACL_LINK) {
582 __set_chan_timer(chan, sk->sk_sndtimeo);
583 l2cap_send_disconn_req(conn, chan, reason);
585 l2cap_chan_del(chan, reason);
589 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
590 conn->hcon->type == ACL_LINK) {
591 struct l2cap_conn_rsp rsp;
594 if (bt_sk(sk)->defer_setup)
595 result = L2CAP_CR_SEC_BLOCK;
597 result = L2CAP_CR_BAD_PSM;
598 l2cap_state_change(chan, BT_DISCONN);
600 rsp.scid = cpu_to_le16(chan->dcid);
601 rsp.dcid = cpu_to_le16(chan->scid);
602 rsp.result = cpu_to_le16(result);
603 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
604 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
608 l2cap_chan_del(chan, reason);
613 l2cap_chan_del(chan, reason);
618 sock_set_flag(sk, SOCK_ZAPPED);
624 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
626 if (chan->chan_type == L2CAP_CHAN_RAW) {
627 switch (chan->sec_level) {
628 case BT_SECURITY_HIGH:
629 return HCI_AT_DEDICATED_BONDING_MITM;
630 case BT_SECURITY_MEDIUM:
631 return HCI_AT_DEDICATED_BONDING;
633 return HCI_AT_NO_BONDING;
635 } else if (chan->psm == cpu_to_le16(0x0001)) {
636 if (chan->sec_level == BT_SECURITY_LOW)
637 chan->sec_level = BT_SECURITY_SDP;
639 if (chan->sec_level == BT_SECURITY_HIGH)
640 return HCI_AT_NO_BONDING_MITM;
642 return HCI_AT_NO_BONDING;
644 switch (chan->sec_level) {
645 case BT_SECURITY_HIGH:
646 return HCI_AT_GENERAL_BONDING_MITM;
647 case BT_SECURITY_MEDIUM:
648 return HCI_AT_GENERAL_BONDING;
650 return HCI_AT_NO_BONDING;
655 /* Service level security */
656 int l2cap_chan_check_security(struct l2cap_chan *chan)
658 struct l2cap_conn *conn = chan->conn;
661 auth_type = l2cap_get_auth_type(chan);
663 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
666 static u8 l2cap_get_ident(struct l2cap_conn *conn)
670 /* Get next available identificator.
671 * 1 - 128 are used by kernel.
672 * 129 - 199 are reserved.
673 * 200 - 254 are used by utilities like l2ping, etc.
676 spin_lock(&conn->lock);
678 if (++conn->tx_ident > 128)
683 spin_unlock(&conn->lock);
688 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
690 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
693 BT_DBG("code 0x%2.2x", code);
698 if (lmp_no_flush_capable(conn->hcon->hdev))
699 flags = ACL_START_NO_FLUSH;
703 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
704 skb->priority = HCI_PRIO_MAX;
706 hci_send_acl(conn->hchan, skb, flags);
709 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
711 struct hci_conn *hcon = chan->conn->hcon;
714 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
717 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
718 lmp_no_flush_capable(hcon->hdev))
719 flags = ACL_START_NO_FLUSH;
723 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
724 hci_send_acl(chan->conn->hchan, skb, flags);
727 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
730 struct l2cap_hdr *lh;
731 struct l2cap_conn *conn = chan->conn;
734 if (chan->state != BT_CONNECTED)
737 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
738 hlen = L2CAP_EXT_HDR_SIZE;
740 hlen = L2CAP_ENH_HDR_SIZE;
742 if (chan->fcs == L2CAP_FCS_CRC16)
743 hlen += L2CAP_FCS_SIZE;
745 BT_DBG("chan %p, control 0x%8.8x", chan, control);
747 count = min_t(unsigned int, conn->mtu, hlen);
749 control |= __set_sframe(chan);
751 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
752 control |= __set_ctrl_final(chan);
754 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
755 control |= __set_ctrl_poll(chan);
757 skb = bt_skb_alloc(count, GFP_ATOMIC);
761 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
762 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
763 lh->cid = cpu_to_le16(chan->dcid);
765 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
767 if (chan->fcs == L2CAP_FCS_CRC16) {
768 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
769 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
772 skb->priority = HCI_PRIO_MAX;
773 l2cap_do_send(chan, skb);
776 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
778 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
779 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
780 set_bit(CONN_RNR_SENT, &chan->conn_state);
782 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
784 control |= __set_reqseq(chan, chan->buffer_seq);
786 l2cap_send_sframe(chan, control);
789 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
791 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
794 static void l2cap_send_conn_req(struct l2cap_chan *chan)
796 struct l2cap_conn *conn = chan->conn;
797 struct l2cap_conn_req req;
799 req.scid = cpu_to_le16(chan->scid);
802 chan->ident = l2cap_get_ident(conn);
804 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
806 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
809 static void l2cap_do_start(struct l2cap_chan *chan)
811 struct l2cap_conn *conn = chan->conn;
813 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
814 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
817 if (l2cap_chan_check_security(chan) &&
818 __l2cap_no_conn_pending(chan))
819 l2cap_send_conn_req(chan);
821 struct l2cap_info_req req;
822 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
824 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
825 conn->info_ident = l2cap_get_ident(conn);
827 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
829 l2cap_send_cmd(conn, conn->info_ident,
830 L2CAP_INFO_REQ, sizeof(req), &req);
834 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
836 u32 local_feat_mask = l2cap_feat_mask;
838 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
841 case L2CAP_MODE_ERTM:
842 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
843 case L2CAP_MODE_STREAMING:
844 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
850 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
852 struct sock *sk = chan->sk;
853 struct l2cap_disconn_req req;
858 if (chan->mode == L2CAP_MODE_ERTM) {
859 __clear_retrans_timer(chan);
860 __clear_monitor_timer(chan);
861 __clear_ack_timer(chan);
864 req.dcid = cpu_to_le16(chan->dcid);
865 req.scid = cpu_to_le16(chan->scid);
866 l2cap_send_cmd(conn, l2cap_get_ident(conn),
867 L2CAP_DISCONN_REQ, sizeof(req), &req);
870 __l2cap_state_change(chan, BT_DISCONN);
871 __l2cap_chan_set_err(chan, err);
875 /* ---- L2CAP connections ---- */
876 static void l2cap_conn_start(struct l2cap_conn *conn)
878 struct l2cap_chan *chan, *tmp;
880 BT_DBG("conn %p", conn);
882 mutex_lock(&conn->chan_lock);
884 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
885 struct sock *sk = chan->sk;
887 l2cap_chan_lock(chan);
889 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
890 l2cap_chan_unlock(chan);
894 if (chan->state == BT_CONNECT) {
895 if (!l2cap_chan_check_security(chan) ||
896 !__l2cap_no_conn_pending(chan)) {
897 l2cap_chan_unlock(chan);
901 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
902 && test_bit(CONF_STATE2_DEVICE,
903 &chan->conf_state)) {
904 l2cap_chan_close(chan, ECONNRESET);
905 l2cap_chan_unlock(chan);
909 l2cap_send_conn_req(chan);
911 } else if (chan->state == BT_CONNECT2) {
912 struct l2cap_conn_rsp rsp;
914 rsp.scid = cpu_to_le16(chan->dcid);
915 rsp.dcid = cpu_to_le16(chan->scid);
917 if (l2cap_chan_check_security(chan)) {
919 if (bt_sk(sk)->defer_setup) {
920 struct sock *parent = bt_sk(sk)->parent;
921 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
922 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
924 parent->sk_data_ready(parent, 0);
927 __l2cap_state_change(chan, BT_CONFIG);
928 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
929 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
933 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
934 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
937 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
940 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
941 rsp.result != L2CAP_CR_SUCCESS) {
942 l2cap_chan_unlock(chan);
946 set_bit(CONF_REQ_SENT, &chan->conf_state);
947 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
948 l2cap_build_conf_req(chan, buf), buf);
949 chan->num_conf_req++;
952 l2cap_chan_unlock(chan);
955 mutex_unlock(&conn->chan_lock);
958 /* Find socket with cid and source bdaddr.
959 * Returns closest match, locked.
961 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
964 struct l2cap_chan *c, *c1 = NULL;
966 read_lock(&chan_list_lock);
968 list_for_each_entry(c, &chan_list, global_l) {
969 struct sock *sk = c->sk;
971 if (state && c->state != state)
974 if (c->scid == cid) {
976 if (!bacmp(&bt_sk(sk)->src, src)) {
977 read_unlock(&chan_list_lock);
982 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
987 read_unlock(&chan_list_lock);
992 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
994 struct sock *parent, *sk;
995 struct l2cap_chan *chan, *pchan;
999 /* Check if we have socket listening on cid */
1000 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1009 /* Check for backlog size */
1010 if (sk_acceptq_is_full(parent)) {
1011 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1015 chan = pchan->ops->new_connection(pchan->data);
1021 hci_conn_hold(conn->hcon);
1023 bacpy(&bt_sk(sk)->src, conn->src);
1024 bacpy(&bt_sk(sk)->dst, conn->dst);
1026 bt_accept_enqueue(parent, sk);
1028 l2cap_chan_add(conn, chan);
1030 __set_chan_timer(chan, sk->sk_sndtimeo);
1032 __l2cap_state_change(chan, BT_CONNECTED);
1033 parent->sk_data_ready(parent, 0);
1036 release_sock(parent);
1039 static void l2cap_chan_ready(struct l2cap_chan *chan)
1041 struct sock *sk = chan->sk;
1042 struct sock *parent;
1046 parent = bt_sk(sk)->parent;
1048 BT_DBG("sk %p, parent %p", sk, parent);
1050 chan->conf_state = 0;
1051 __clear_chan_timer(chan);
1053 __l2cap_state_change(chan, BT_CONNECTED);
1054 sk->sk_state_change(sk);
1057 parent->sk_data_ready(parent, 0);
1062 static void l2cap_conn_ready(struct l2cap_conn *conn)
1064 struct l2cap_chan *chan;
1066 BT_DBG("conn %p", conn);
1068 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1069 l2cap_le_conn_ready(conn);
1071 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1072 smp_conn_security(conn, conn->hcon->pending_sec_level);
1074 mutex_lock(&conn->chan_lock);
1076 list_for_each_entry(chan, &conn->chan_l, list) {
1078 l2cap_chan_lock(chan);
1080 if (conn->hcon->type == LE_LINK) {
1081 if (smp_conn_security(conn, chan->sec_level))
1082 l2cap_chan_ready(chan);
1084 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1085 struct sock *sk = chan->sk;
1086 __clear_chan_timer(chan);
1088 __l2cap_state_change(chan, BT_CONNECTED);
1089 sk->sk_state_change(sk);
1092 } else if (chan->state == BT_CONNECT)
1093 l2cap_do_start(chan);
1095 l2cap_chan_unlock(chan);
1098 mutex_unlock(&conn->chan_lock);
1101 /* Notify sockets that we cannot guaranty reliability anymore */
1102 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1104 struct l2cap_chan *chan;
1106 BT_DBG("conn %p", conn);
1108 mutex_lock(&conn->chan_lock);
1110 list_for_each_entry(chan, &conn->chan_l, list) {
1111 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1112 __l2cap_chan_set_err(chan, err);
1115 mutex_unlock(&conn->chan_lock);
1118 static void l2cap_info_timeout(struct work_struct *work)
1120 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1123 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1124 conn->info_ident = 0;
1126 l2cap_conn_start(conn);
1129 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1131 struct l2cap_conn *conn = hcon->l2cap_data;
1132 struct l2cap_chan *chan, *l;
1137 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1139 kfree_skb(conn->rx_skb);
1141 mutex_lock(&conn->chan_lock);
1144 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1145 l2cap_chan_lock(chan);
1147 l2cap_chan_del(chan, err);
1149 l2cap_chan_unlock(chan);
1151 chan->ops->close(chan->data);
1154 mutex_unlock(&conn->chan_lock);
1156 hci_chan_del(conn->hchan);
1158 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1159 cancel_delayed_work_sync(&conn->info_timer);
1161 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1162 cancel_delayed_work_sync(&conn->security_timer);
1163 smp_chan_destroy(conn);
1166 hcon->l2cap_data = NULL;
1170 static void security_timeout(struct work_struct *work)
1172 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1173 security_timer.work);
1175 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1178 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1180 struct l2cap_conn *conn = hcon->l2cap_data;
1181 struct hci_chan *hchan;
1186 hchan = hci_chan_create(hcon);
1190 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1192 hci_chan_del(hchan);
1196 hcon->l2cap_data = conn;
1198 conn->hchan = hchan;
1200 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1202 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1203 conn->mtu = hcon->hdev->le_mtu;
1205 conn->mtu = hcon->hdev->acl_mtu;
1207 conn->src = &hcon->hdev->bdaddr;
1208 conn->dst = &hcon->dst;
1210 conn->feat_mask = 0;
1212 spin_lock_init(&conn->lock);
1213 mutex_init(&conn->chan_lock);
1215 INIT_LIST_HEAD(&conn->chan_l);
1217 if (hcon->type == LE_LINK)
1218 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1220 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1222 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1227 /* ---- Socket interface ---- */
1229 /* Find socket with psm and source bdaddr.
1230 * Returns closest match.
1232 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1234 struct l2cap_chan *c, *c1 = NULL;
1236 read_lock(&chan_list_lock);
1238 list_for_each_entry(c, &chan_list, global_l) {
1239 struct sock *sk = c->sk;
1241 if (state && c->state != state)
1244 if (c->psm == psm) {
1246 if (!bacmp(&bt_sk(sk)->src, src)) {
1247 read_unlock(&chan_list_lock);
1252 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1257 read_unlock(&chan_list_lock);
1262 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1264 struct sock *sk = chan->sk;
1265 bdaddr_t *src = &bt_sk(sk)->src;
1266 struct l2cap_conn *conn;
1267 struct hci_conn *hcon;
1268 struct hci_dev *hdev;
1272 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1273 __le16_to_cpu(chan->psm));
1275 hdev = hci_get_route(dst, src);
1277 return -EHOSTUNREACH;
1281 l2cap_chan_lock(chan);
1283 /* PSM must be odd and lsb of upper byte must be 0 */
1284 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1285 chan->chan_type != L2CAP_CHAN_RAW) {
1290 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1295 switch (chan->mode) {
1296 case L2CAP_MODE_BASIC:
1298 case L2CAP_MODE_ERTM:
1299 case L2CAP_MODE_STREAMING:
1310 switch (sk->sk_state) {
1314 /* Already connecting */
1320 /* Already connected */
1336 /* Set destination address and psm */
1337 bacpy(&bt_sk(sk)->dst, dst);
1344 auth_type = l2cap_get_auth_type(chan);
1346 if (chan->dcid == L2CAP_CID_LE_DATA)
1347 hcon = hci_connect(hdev, LE_LINK, dst,
1348 chan->sec_level, auth_type);
1350 hcon = hci_connect(hdev, ACL_LINK, dst,
1351 chan->sec_level, auth_type);
1354 err = PTR_ERR(hcon);
1358 conn = l2cap_conn_add(hcon, 0);
1365 /* Update source addr of the socket */
1366 bacpy(src, conn->src);
1368 l2cap_chan_unlock(chan);
1369 l2cap_chan_add(conn, chan);
1370 l2cap_chan_lock(chan);
1372 l2cap_state_change(chan, BT_CONNECT);
1373 __set_chan_timer(chan, sk->sk_sndtimeo);
1375 if (hcon->state == BT_CONNECTED) {
1376 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1377 __clear_chan_timer(chan);
1378 if (l2cap_chan_check_security(chan))
1379 l2cap_state_change(chan, BT_CONNECTED);
1381 l2cap_do_start(chan);
1387 l2cap_chan_unlock(chan);
1388 hci_dev_unlock(hdev);
1393 int __l2cap_wait_ack(struct sock *sk)
1395 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1396 DECLARE_WAITQUEUE(wait, current);
1400 add_wait_queue(sk_sleep(sk), &wait);
1401 set_current_state(TASK_INTERRUPTIBLE);
1402 while (chan->unacked_frames > 0 && chan->conn) {
1406 if (signal_pending(current)) {
1407 err = sock_intr_errno(timeo);
1412 timeo = schedule_timeout(timeo);
1414 set_current_state(TASK_INTERRUPTIBLE);
1416 err = sock_error(sk);
1420 set_current_state(TASK_RUNNING);
1421 remove_wait_queue(sk_sleep(sk), &wait);
1425 static void l2cap_monitor_timeout(struct work_struct *work)
1427 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1428 monitor_timer.work);
1430 BT_DBG("chan %p", chan);
1432 l2cap_chan_lock(chan);
1434 if (chan->retry_count >= chan->remote_max_tx) {
1435 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1436 l2cap_chan_unlock(chan);
1437 l2cap_chan_put(chan);
1441 chan->retry_count++;
1442 __set_monitor_timer(chan);
1444 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1445 l2cap_chan_unlock(chan);
1446 l2cap_chan_put(chan);
1449 static void l2cap_retrans_timeout(struct work_struct *work)
1451 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1452 retrans_timer.work);
1454 BT_DBG("chan %p", chan);
1456 l2cap_chan_lock(chan);
1458 chan->retry_count = 1;
1459 __set_monitor_timer(chan);
1461 set_bit(CONN_WAIT_F, &chan->conn_state);
1463 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1465 l2cap_chan_unlock(chan);
1466 l2cap_chan_put(chan);
1469 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1471 struct sk_buff *skb;
1473 while ((skb = skb_peek(&chan->tx_q)) &&
1474 chan->unacked_frames) {
1475 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1478 skb = skb_dequeue(&chan->tx_q);
1481 chan->unacked_frames--;
1484 if (!chan->unacked_frames)
1485 __clear_retrans_timer(chan);
1488 static void l2cap_streaming_send(struct l2cap_chan *chan)
1490 struct sk_buff *skb;
1494 while ((skb = skb_dequeue(&chan->tx_q))) {
1495 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1496 control |= __set_txseq(chan, chan->next_tx_seq);
1497 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1499 if (chan->fcs == L2CAP_FCS_CRC16) {
1500 fcs = crc16(0, (u8 *)skb->data,
1501 skb->len - L2CAP_FCS_SIZE);
1502 put_unaligned_le16(fcs,
1503 skb->data + skb->len - L2CAP_FCS_SIZE);
1506 l2cap_do_send(chan, skb);
1508 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1512 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1514 struct sk_buff *skb, *tx_skb;
1518 skb = skb_peek(&chan->tx_q);
1522 while (bt_cb(skb)->tx_seq != tx_seq) {
1523 if (skb_queue_is_last(&chan->tx_q, skb))
1526 skb = skb_queue_next(&chan->tx_q, skb);
1529 if (chan->remote_max_tx &&
1530 bt_cb(skb)->retries == chan->remote_max_tx) {
1531 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1535 tx_skb = skb_clone(skb, GFP_ATOMIC);
1536 bt_cb(skb)->retries++;
1538 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1539 control &= __get_sar_mask(chan);
1541 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1542 control |= __set_ctrl_final(chan);
1544 control |= __set_reqseq(chan, chan->buffer_seq);
1545 control |= __set_txseq(chan, tx_seq);
1547 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1549 if (chan->fcs == L2CAP_FCS_CRC16) {
1550 fcs = crc16(0, (u8 *)tx_skb->data,
1551 tx_skb->len - L2CAP_FCS_SIZE);
1552 put_unaligned_le16(fcs,
1553 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1556 l2cap_do_send(chan, tx_skb);
1559 static int l2cap_ertm_send(struct l2cap_chan *chan)
1561 struct sk_buff *skb, *tx_skb;
1566 if (chan->state != BT_CONNECTED)
1569 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1571 if (chan->remote_max_tx &&
1572 bt_cb(skb)->retries == chan->remote_max_tx) {
1573 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1577 tx_skb = skb_clone(skb, GFP_ATOMIC);
1579 bt_cb(skb)->retries++;
1581 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1582 control &= __get_sar_mask(chan);
1584 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1585 control |= __set_ctrl_final(chan);
1587 control |= __set_reqseq(chan, chan->buffer_seq);
1588 control |= __set_txseq(chan, chan->next_tx_seq);
1590 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1592 if (chan->fcs == L2CAP_FCS_CRC16) {
1593 fcs = crc16(0, (u8 *)skb->data,
1594 tx_skb->len - L2CAP_FCS_SIZE);
1595 put_unaligned_le16(fcs, skb->data +
1596 tx_skb->len - L2CAP_FCS_SIZE);
1599 l2cap_do_send(chan, tx_skb);
1601 __set_retrans_timer(chan);
1603 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1605 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1607 if (bt_cb(skb)->retries == 1) {
1608 chan->unacked_frames++;
1611 __clear_ack_timer(chan);
1614 chan->frames_sent++;
1616 if (skb_queue_is_last(&chan->tx_q, skb))
1617 chan->tx_send_head = NULL;
1619 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1625 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1629 if (!skb_queue_empty(&chan->tx_q))
1630 chan->tx_send_head = chan->tx_q.next;
1632 chan->next_tx_seq = chan->expected_ack_seq;
1633 ret = l2cap_ertm_send(chan);
1637 static void __l2cap_send_ack(struct l2cap_chan *chan)
1641 control |= __set_reqseq(chan, chan->buffer_seq);
1643 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1644 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1645 set_bit(CONN_RNR_SENT, &chan->conn_state);
1646 l2cap_send_sframe(chan, control);
1650 if (l2cap_ertm_send(chan) > 0)
1653 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1654 l2cap_send_sframe(chan, control);
1657 static void l2cap_send_ack(struct l2cap_chan *chan)
1659 __clear_ack_timer(chan);
1660 __l2cap_send_ack(chan);
1663 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1665 struct srej_list *tail;
1668 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1669 control |= __set_ctrl_final(chan);
1671 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1672 control |= __set_reqseq(chan, tail->tx_seq);
1674 l2cap_send_sframe(chan, control);
1677 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1678 struct msghdr *msg, int len,
1679 int count, struct sk_buff *skb)
1681 struct l2cap_conn *conn = chan->conn;
1682 struct sk_buff **frag;
1685 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1691 /* Continuation fragments (no L2CAP header) */
1692 frag = &skb_shinfo(skb)->frag_list;
1694 count = min_t(unsigned int, conn->mtu, len);
1696 *frag = chan->ops->alloc_skb(chan, count,
1697 msg->msg_flags & MSG_DONTWAIT);
1700 return PTR_ERR(*frag);
1701 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1704 (*frag)->priority = skb->priority;
1709 frag = &(*frag)->next;
1715 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1716 struct msghdr *msg, size_t len,
1719 struct l2cap_conn *conn = chan->conn;
1720 struct sk_buff *skb;
1721 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1722 struct l2cap_hdr *lh;
1724 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1726 count = min_t(unsigned int, (conn->mtu - hlen), len);
1728 skb = chan->ops->alloc_skb(chan, count + hlen,
1729 msg->msg_flags & MSG_DONTWAIT);
1733 skb->priority = priority;
1735 /* Create L2CAP header */
1736 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1737 lh->cid = cpu_to_le16(chan->dcid);
1738 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1739 put_unaligned(chan->psm, skb_put(skb, 2));
1741 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1742 if (unlikely(err < 0)) {
1744 return ERR_PTR(err);
1749 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1750 struct msghdr *msg, size_t len,
1753 struct l2cap_conn *conn = chan->conn;
1754 struct sk_buff *skb;
1755 int err, count, hlen = L2CAP_HDR_SIZE;
1756 struct l2cap_hdr *lh;
1758 BT_DBG("chan %p len %d", chan, (int)len);
1760 count = min_t(unsigned int, (conn->mtu - hlen), len);
1762 skb = chan->ops->alloc_skb(chan, count + hlen,
1763 msg->msg_flags & MSG_DONTWAIT);
1767 skb->priority = priority;
1769 /* Create L2CAP header */
1770 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1771 lh->cid = cpu_to_le16(chan->dcid);
1772 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1774 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1775 if (unlikely(err < 0)) {
1777 return ERR_PTR(err);
1782 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1783 struct msghdr *msg, size_t len,
1784 u32 control, u16 sdulen)
1786 struct l2cap_conn *conn = chan->conn;
1787 struct sk_buff *skb;
1788 int err, count, hlen;
1789 struct l2cap_hdr *lh;
1791 BT_DBG("chan %p len %d", chan, (int)len);
1794 return ERR_PTR(-ENOTCONN);
1796 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1797 hlen = L2CAP_EXT_HDR_SIZE;
1799 hlen = L2CAP_ENH_HDR_SIZE;
1802 hlen += L2CAP_SDULEN_SIZE;
1804 if (chan->fcs == L2CAP_FCS_CRC16)
1805 hlen += L2CAP_FCS_SIZE;
1807 count = min_t(unsigned int, (conn->mtu - hlen), len);
1809 skb = chan->ops->alloc_skb(chan, count + hlen,
1810 msg->msg_flags & MSG_DONTWAIT);
1814 /* Create L2CAP header */
1815 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1816 lh->cid = cpu_to_le16(chan->dcid);
1817 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1819 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1822 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1824 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1825 if (unlikely(err < 0)) {
1827 return ERR_PTR(err);
1830 if (chan->fcs == L2CAP_FCS_CRC16)
1831 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1833 bt_cb(skb)->retries = 0;
1837 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1839 struct sk_buff *skb;
1840 struct sk_buff_head sar_queue;
1844 skb_queue_head_init(&sar_queue);
1845 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1846 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1848 return PTR_ERR(skb);
1850 __skb_queue_tail(&sar_queue, skb);
1851 len -= chan->remote_mps;
1852 size += chan->remote_mps;
1857 if (len > chan->remote_mps) {
1858 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1859 buflen = chan->remote_mps;
1861 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1865 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1867 skb_queue_purge(&sar_queue);
1868 return PTR_ERR(skb);
1871 __skb_queue_tail(&sar_queue, skb);
1875 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1876 if (chan->tx_send_head == NULL)
1877 chan->tx_send_head = sar_queue.next;
1882 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1885 struct sk_buff *skb;
1889 /* Connectionless channel */
1890 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1891 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1893 return PTR_ERR(skb);
1895 l2cap_do_send(chan, skb);
1899 switch (chan->mode) {
1900 case L2CAP_MODE_BASIC:
1901 /* Check outgoing MTU */
1902 if (len > chan->omtu)
1905 /* Create a basic PDU */
1906 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1908 return PTR_ERR(skb);
1910 l2cap_do_send(chan, skb);
1914 case L2CAP_MODE_ERTM:
1915 case L2CAP_MODE_STREAMING:
1916 /* Entire SDU fits into one PDU */
1917 if (len <= chan->remote_mps) {
1918 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1919 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1922 return PTR_ERR(skb);
1924 __skb_queue_tail(&chan->tx_q, skb);
1926 if (chan->tx_send_head == NULL)
1927 chan->tx_send_head = skb;
1930 /* Segment SDU into multiples PDUs */
1931 err = l2cap_sar_segment_sdu(chan, msg, len);
1936 if (chan->mode == L2CAP_MODE_STREAMING) {
1937 l2cap_streaming_send(chan);
1942 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1943 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1948 err = l2cap_ertm_send(chan);
1955 BT_DBG("bad state %1.1x", chan->mode);
1962 /* Copy frame to all raw sockets on that connection */
1963 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1965 struct sk_buff *nskb;
1966 struct l2cap_chan *chan;
1968 BT_DBG("conn %p", conn);
1970 mutex_lock(&conn->chan_lock);
1972 list_for_each_entry(chan, &conn->chan_l, list) {
1973 struct sock *sk = chan->sk;
1974 if (chan->chan_type != L2CAP_CHAN_RAW)
1977 /* Don't send frame to the socket it came from */
1980 nskb = skb_clone(skb, GFP_ATOMIC);
1984 if (chan->ops->recv(chan->data, nskb))
1988 mutex_unlock(&conn->chan_lock);
1991 /* ---- L2CAP signalling commands ---- */
1992 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1993 u8 code, u8 ident, u16 dlen, void *data)
1995 struct sk_buff *skb, **frag;
1996 struct l2cap_cmd_hdr *cmd;
1997 struct l2cap_hdr *lh;
2000 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2001 conn, code, ident, dlen);
2003 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2004 count = min_t(unsigned int, conn->mtu, len);
2006 skb = bt_skb_alloc(count, GFP_ATOMIC);
2010 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2011 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2013 if (conn->hcon->type == LE_LINK)
2014 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2016 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2018 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2021 cmd->len = cpu_to_le16(dlen);
2024 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2025 memcpy(skb_put(skb, count), data, count);
2031 /* Continuation fragments (no L2CAP header) */
2032 frag = &skb_shinfo(skb)->frag_list;
2034 count = min_t(unsigned int, conn->mtu, len);
2036 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2040 memcpy(skb_put(*frag, count), data, count);
2045 frag = &(*frag)->next;
2055 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2057 struct l2cap_conf_opt *opt = *ptr;
2060 len = L2CAP_CONF_OPT_SIZE + opt->len;
2068 *val = *((u8 *) opt->val);
2072 *val = get_unaligned_le16(opt->val);
2076 *val = get_unaligned_le32(opt->val);
2080 *val = (unsigned long) opt->val;
2084 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2088 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2090 struct l2cap_conf_opt *opt = *ptr;
2092 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2099 *((u8 *) opt->val) = val;
2103 put_unaligned_le16(val, opt->val);
2107 put_unaligned_le32(val, opt->val);
2111 memcpy(opt->val, (void *) val, len);
2115 *ptr += L2CAP_CONF_OPT_SIZE + len;
2118 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2120 struct l2cap_conf_efs efs;
2122 switch (chan->mode) {
2123 case L2CAP_MODE_ERTM:
2124 efs.id = chan->local_id;
2125 efs.stype = chan->local_stype;
2126 efs.msdu = cpu_to_le16(chan->local_msdu);
2127 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2128 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2129 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2132 case L2CAP_MODE_STREAMING:
2134 efs.stype = L2CAP_SERV_BESTEFFORT;
2135 efs.msdu = cpu_to_le16(chan->local_msdu);
2136 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2145 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2146 (unsigned long) &efs);
2149 static void l2cap_ack_timeout(struct work_struct *work)
2151 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2154 BT_DBG("chan %p", chan);
2156 l2cap_chan_lock(chan);
2158 __l2cap_send_ack(chan);
2160 l2cap_chan_unlock(chan);
2162 l2cap_chan_put(chan);
2165 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2169 chan->expected_ack_seq = 0;
2170 chan->unacked_frames = 0;
2171 chan->buffer_seq = 0;
2172 chan->num_acked = 0;
2173 chan->frames_sent = 0;
2175 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2176 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2177 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2179 skb_queue_head_init(&chan->srej_q);
2181 INIT_LIST_HEAD(&chan->srej_l);
2182 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2186 return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2189 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2192 case L2CAP_MODE_STREAMING:
2193 case L2CAP_MODE_ERTM:
2194 if (l2cap_mode_supported(mode, remote_feat_mask))
2198 return L2CAP_MODE_BASIC;
2202 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2204 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2207 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2209 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2212 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2214 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2215 __l2cap_ews_supported(chan)) {
2216 /* use extended control field */
2217 set_bit(FLAG_EXT_CTRL, &chan->flags);
2218 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2220 chan->tx_win = min_t(u16, chan->tx_win,
2221 L2CAP_DEFAULT_TX_WINDOW);
2222 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2226 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2228 struct l2cap_conf_req *req = data;
2229 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2230 void *ptr = req->data;
2233 BT_DBG("chan %p", chan);
2235 if (chan->num_conf_req || chan->num_conf_rsp)
2238 switch (chan->mode) {
2239 case L2CAP_MODE_STREAMING:
2240 case L2CAP_MODE_ERTM:
2241 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2244 if (__l2cap_efs_supported(chan))
2245 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2249 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2254 if (chan->imtu != L2CAP_DEFAULT_MTU)
2255 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2257 switch (chan->mode) {
2258 case L2CAP_MODE_BASIC:
2259 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2260 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2263 rfc.mode = L2CAP_MODE_BASIC;
2265 rfc.max_transmit = 0;
2266 rfc.retrans_timeout = 0;
2267 rfc.monitor_timeout = 0;
2268 rfc.max_pdu_size = 0;
2270 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2271 (unsigned long) &rfc);
2274 case L2CAP_MODE_ERTM:
2275 rfc.mode = L2CAP_MODE_ERTM;
2276 rfc.max_transmit = chan->max_tx;
2277 rfc.retrans_timeout = 0;
2278 rfc.monitor_timeout = 0;
2280 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2281 L2CAP_EXT_HDR_SIZE -
2284 rfc.max_pdu_size = cpu_to_le16(size);
2286 l2cap_txwin_setup(chan);
2288 rfc.txwin_size = min_t(u16, chan->tx_win,
2289 L2CAP_DEFAULT_TX_WINDOW);
2291 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2292 (unsigned long) &rfc);
2294 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2295 l2cap_add_opt_efs(&ptr, chan);
2297 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2300 if (chan->fcs == L2CAP_FCS_NONE ||
2301 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2302 chan->fcs = L2CAP_FCS_NONE;
2303 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2306 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2307 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2311 case L2CAP_MODE_STREAMING:
2312 rfc.mode = L2CAP_MODE_STREAMING;
2314 rfc.max_transmit = 0;
2315 rfc.retrans_timeout = 0;
2316 rfc.monitor_timeout = 0;
2318 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2319 L2CAP_EXT_HDR_SIZE -
2322 rfc.max_pdu_size = cpu_to_le16(size);
2324 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2325 (unsigned long) &rfc);
2327 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2328 l2cap_add_opt_efs(&ptr, chan);
2330 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2333 if (chan->fcs == L2CAP_FCS_NONE ||
2334 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2335 chan->fcs = L2CAP_FCS_NONE;
2336 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2341 req->dcid = cpu_to_le16(chan->dcid);
2342 req->flags = cpu_to_le16(0);
2347 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2349 struct l2cap_conf_rsp *rsp = data;
2350 void *ptr = rsp->data;
2351 void *req = chan->conf_req;
2352 int len = chan->conf_len;
2353 int type, hint, olen;
2355 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2356 struct l2cap_conf_efs efs;
2358 u16 mtu = L2CAP_DEFAULT_MTU;
2359 u16 result = L2CAP_CONF_SUCCESS;
2362 BT_DBG("chan %p", chan);
2364 while (len >= L2CAP_CONF_OPT_SIZE) {
2365 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2367 hint = type & L2CAP_CONF_HINT;
2368 type &= L2CAP_CONF_MASK;
2371 case L2CAP_CONF_MTU:
2375 case L2CAP_CONF_FLUSH_TO:
2376 chan->flush_to = val;
2379 case L2CAP_CONF_QOS:
2382 case L2CAP_CONF_RFC:
2383 if (olen == sizeof(rfc))
2384 memcpy(&rfc, (void *) val, olen);
2387 case L2CAP_CONF_FCS:
2388 if (val == L2CAP_FCS_NONE)
2389 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2392 case L2CAP_CONF_EFS:
2394 if (olen == sizeof(efs))
2395 memcpy(&efs, (void *) val, olen);
2398 case L2CAP_CONF_EWS:
2400 return -ECONNREFUSED;
2402 set_bit(FLAG_EXT_CTRL, &chan->flags);
2403 set_bit(CONF_EWS_RECV, &chan->conf_state);
2404 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2405 chan->remote_tx_win = val;
2412 result = L2CAP_CONF_UNKNOWN;
2413 *((u8 *) ptr++) = type;
2418 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2421 switch (chan->mode) {
2422 case L2CAP_MODE_STREAMING:
2423 case L2CAP_MODE_ERTM:
2424 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2425 chan->mode = l2cap_select_mode(rfc.mode,
2426 chan->conn->feat_mask);
2431 if (__l2cap_efs_supported(chan))
2432 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2434 return -ECONNREFUSED;
2437 if (chan->mode != rfc.mode)
2438 return -ECONNREFUSED;
2444 if (chan->mode != rfc.mode) {
2445 result = L2CAP_CONF_UNACCEPT;
2446 rfc.mode = chan->mode;
2448 if (chan->num_conf_rsp == 1)
2449 return -ECONNREFUSED;
2451 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2452 sizeof(rfc), (unsigned long) &rfc);
2455 if (result == L2CAP_CONF_SUCCESS) {
2456 /* Configure output options and let the other side know
2457 * which ones we don't like. */
2459 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2460 result = L2CAP_CONF_UNACCEPT;
2463 set_bit(CONF_MTU_DONE, &chan->conf_state);
2465 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2468 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2469 efs.stype != L2CAP_SERV_NOTRAFIC &&
2470 efs.stype != chan->local_stype) {
2472 result = L2CAP_CONF_UNACCEPT;
2474 if (chan->num_conf_req >= 1)
2475 return -ECONNREFUSED;
2477 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2479 (unsigned long) &efs);
2481 /* Send PENDING Conf Rsp */
2482 result = L2CAP_CONF_PENDING;
2483 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2488 case L2CAP_MODE_BASIC:
2489 chan->fcs = L2CAP_FCS_NONE;
2490 set_bit(CONF_MODE_DONE, &chan->conf_state);
2493 case L2CAP_MODE_ERTM:
2494 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2495 chan->remote_tx_win = rfc.txwin_size;
2497 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2499 chan->remote_max_tx = rfc.max_transmit;
2501 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2503 L2CAP_EXT_HDR_SIZE -
2506 rfc.max_pdu_size = cpu_to_le16(size);
2507 chan->remote_mps = size;
2509 rfc.retrans_timeout =
2510 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2511 rfc.monitor_timeout =
2512 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2514 set_bit(CONF_MODE_DONE, &chan->conf_state);
2516 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2517 sizeof(rfc), (unsigned long) &rfc);
2519 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2520 chan->remote_id = efs.id;
2521 chan->remote_stype = efs.stype;
2522 chan->remote_msdu = le16_to_cpu(efs.msdu);
2523 chan->remote_flush_to =
2524 le32_to_cpu(efs.flush_to);
2525 chan->remote_acc_lat =
2526 le32_to_cpu(efs.acc_lat);
2527 chan->remote_sdu_itime =
2528 le32_to_cpu(efs.sdu_itime);
2529 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2530 sizeof(efs), (unsigned long) &efs);
2534 case L2CAP_MODE_STREAMING:
2535 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2537 L2CAP_EXT_HDR_SIZE -
2540 rfc.max_pdu_size = cpu_to_le16(size);
2541 chan->remote_mps = size;
2543 set_bit(CONF_MODE_DONE, &chan->conf_state);
2545 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2546 sizeof(rfc), (unsigned long) &rfc);
2551 result = L2CAP_CONF_UNACCEPT;
2553 memset(&rfc, 0, sizeof(rfc));
2554 rfc.mode = chan->mode;
2557 if (result == L2CAP_CONF_SUCCESS)
2558 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2560 rsp->scid = cpu_to_le16(chan->dcid);
2561 rsp->result = cpu_to_le16(result);
2562 rsp->flags = cpu_to_le16(0x0000);
2567 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2569 struct l2cap_conf_req *req = data;
2570 void *ptr = req->data;
2573 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2574 struct l2cap_conf_efs efs;
2576 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2578 while (len >= L2CAP_CONF_OPT_SIZE) {
2579 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2582 case L2CAP_CONF_MTU:
2583 if (val < L2CAP_DEFAULT_MIN_MTU) {
2584 *result = L2CAP_CONF_UNACCEPT;
2585 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2588 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2591 case L2CAP_CONF_FLUSH_TO:
2592 chan->flush_to = val;
2593 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2597 case L2CAP_CONF_RFC:
2598 if (olen == sizeof(rfc))
2599 memcpy(&rfc, (void *)val, olen);
2601 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2602 rfc.mode != chan->mode)
2603 return -ECONNREFUSED;
2607 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2608 sizeof(rfc), (unsigned long) &rfc);
2611 case L2CAP_CONF_EWS:
2612 chan->tx_win = min_t(u16, val,
2613 L2CAP_DEFAULT_EXT_WINDOW);
2614 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2618 case L2CAP_CONF_EFS:
2619 if (olen == sizeof(efs))
2620 memcpy(&efs, (void *)val, olen);
2622 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2623 efs.stype != L2CAP_SERV_NOTRAFIC &&
2624 efs.stype != chan->local_stype)
2625 return -ECONNREFUSED;
2627 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2628 sizeof(efs), (unsigned long) &efs);
2633 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2634 return -ECONNREFUSED;
2636 chan->mode = rfc.mode;
2638 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2640 case L2CAP_MODE_ERTM:
2641 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2642 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2643 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2645 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2646 chan->local_msdu = le16_to_cpu(efs.msdu);
2647 chan->local_sdu_itime =
2648 le32_to_cpu(efs.sdu_itime);
2649 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2650 chan->local_flush_to =
2651 le32_to_cpu(efs.flush_to);
2655 case L2CAP_MODE_STREAMING:
2656 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2660 req->dcid = cpu_to_le16(chan->dcid);
2661 req->flags = cpu_to_le16(0x0000);
2666 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2668 struct l2cap_conf_rsp *rsp = data;
2669 void *ptr = rsp->data;
2671 BT_DBG("chan %p", chan);
2673 rsp->scid = cpu_to_le16(chan->dcid);
2674 rsp->result = cpu_to_le16(result);
2675 rsp->flags = cpu_to_le16(flags);
2680 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2682 struct l2cap_conn_rsp rsp;
2683 struct l2cap_conn *conn = chan->conn;
2686 rsp.scid = cpu_to_le16(chan->dcid);
2687 rsp.dcid = cpu_to_le16(chan->scid);
2688 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2689 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2690 l2cap_send_cmd(conn, chan->ident,
2691 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2693 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2696 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2697 l2cap_build_conf_req(chan, buf), buf);
2698 chan->num_conf_req++;
2701 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2705 struct l2cap_conf_rfc rfc;
2707 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2709 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2712 while (len >= L2CAP_CONF_OPT_SIZE) {
2713 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2716 case L2CAP_CONF_RFC:
2717 if (olen == sizeof(rfc))
2718 memcpy(&rfc, (void *)val, olen);
2723 /* Use sane default values in case a misbehaving remote device
2724 * did not send an RFC option.
2726 rfc.mode = chan->mode;
2727 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2728 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2729 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2731 BT_ERR("Expected RFC option was not found, using defaults");
2735 case L2CAP_MODE_ERTM:
2736 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2737 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2738 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2740 case L2CAP_MODE_STREAMING:
2741 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2745 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2747 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2749 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2752 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2753 cmd->ident == conn->info_ident) {
2754 cancel_delayed_work(&conn->info_timer);
2756 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2757 conn->info_ident = 0;
2759 l2cap_conn_start(conn);
2765 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2767 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2768 struct l2cap_conn_rsp rsp;
2769 struct l2cap_chan *chan = NULL, *pchan;
2770 struct sock *parent, *sk = NULL;
2771 int result, status = L2CAP_CS_NO_INFO;
2773 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2774 __le16 psm = req->psm;
2776 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
2778 /* Check if we have socket listening on psm */
2779 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2781 result = L2CAP_CR_BAD_PSM;
2787 mutex_lock(&conn->chan_lock);
2790 /* Check if the ACL is secure enough (if not SDP) */
2791 if (psm != cpu_to_le16(0x0001) &&
2792 !hci_conn_check_link_mode(conn->hcon)) {
2793 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2794 result = L2CAP_CR_SEC_BLOCK;
2798 result = L2CAP_CR_NO_MEM;
2800 /* Check for backlog size */
2801 if (sk_acceptq_is_full(parent)) {
2802 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2806 chan = pchan->ops->new_connection(pchan->data);
2812 /* Check if we already have channel with that dcid */
2813 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2814 sock_set_flag(sk, SOCK_ZAPPED);
2815 chan->ops->close(chan->data);
2819 hci_conn_hold(conn->hcon);
2821 bacpy(&bt_sk(sk)->src, conn->src);
2822 bacpy(&bt_sk(sk)->dst, conn->dst);
2826 bt_accept_enqueue(parent, sk);
2828 __l2cap_chan_add(conn, chan);
2832 __set_chan_timer(chan, sk->sk_sndtimeo);
2834 chan->ident = cmd->ident;
2836 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2837 if (l2cap_chan_check_security(chan)) {
2838 if (bt_sk(sk)->defer_setup) {
2839 __l2cap_state_change(chan, BT_CONNECT2);
2840 result = L2CAP_CR_PEND;
2841 status = L2CAP_CS_AUTHOR_PEND;
2842 parent->sk_data_ready(parent, 0);
2844 __l2cap_state_change(chan, BT_CONFIG);
2845 result = L2CAP_CR_SUCCESS;
2846 status = L2CAP_CS_NO_INFO;
2849 __l2cap_state_change(chan, BT_CONNECT2);
2850 result = L2CAP_CR_PEND;
2851 status = L2CAP_CS_AUTHEN_PEND;
2854 __l2cap_state_change(chan, BT_CONNECT2);
2855 result = L2CAP_CR_PEND;
2856 status = L2CAP_CS_NO_INFO;
2860 release_sock(parent);
2861 mutex_unlock(&conn->chan_lock);
2864 rsp.scid = cpu_to_le16(scid);
2865 rsp.dcid = cpu_to_le16(dcid);
2866 rsp.result = cpu_to_le16(result);
2867 rsp.status = cpu_to_le16(status);
2868 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2870 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2871 struct l2cap_info_req info;
2872 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2874 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2875 conn->info_ident = l2cap_get_ident(conn);
2877 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
2879 l2cap_send_cmd(conn, conn->info_ident,
2880 L2CAP_INFO_REQ, sizeof(info), &info);
2883 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2884 result == L2CAP_CR_SUCCESS) {
2886 set_bit(CONF_REQ_SENT, &chan->conf_state);
2887 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2888 l2cap_build_conf_req(chan, buf), buf);
2889 chan->num_conf_req++;
2895 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2897 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2898 u16 scid, dcid, result, status;
2899 struct l2cap_chan *chan;
2903 scid = __le16_to_cpu(rsp->scid);
2904 dcid = __le16_to_cpu(rsp->dcid);
2905 result = __le16_to_cpu(rsp->result);
2906 status = __le16_to_cpu(rsp->status);
2908 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
2909 dcid, scid, result, status);
2911 mutex_lock(&conn->chan_lock);
2914 chan = __l2cap_get_chan_by_scid(conn, scid);
2920 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
2929 l2cap_chan_lock(chan);
2932 case L2CAP_CR_SUCCESS:
2933 l2cap_state_change(chan, BT_CONFIG);
2936 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2938 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2941 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2942 l2cap_build_conf_req(chan, req), req);
2943 chan->num_conf_req++;
2947 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2951 l2cap_chan_del(chan, ECONNREFUSED);
2955 l2cap_chan_unlock(chan);
2958 mutex_unlock(&conn->chan_lock);
2963 static inline void set_default_fcs(struct l2cap_chan *chan)
2965 /* FCS is enabled only in ERTM or streaming mode, if one or both
2968 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2969 chan->fcs = L2CAP_FCS_NONE;
2970 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2971 chan->fcs = L2CAP_FCS_CRC16;
2974 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2976 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2979 struct l2cap_chan *chan;
2982 dcid = __le16_to_cpu(req->dcid);
2983 flags = __le16_to_cpu(req->flags);
2985 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2987 chan = l2cap_get_chan_by_scid(conn, dcid);
2991 l2cap_chan_lock(chan);
2993 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2994 struct l2cap_cmd_rej_cid rej;
2996 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2997 rej.scid = cpu_to_le16(chan->scid);
2998 rej.dcid = cpu_to_le16(chan->dcid);
3000 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3005 /* Reject if config buffer is too small. */
3006 len = cmd_len - sizeof(*req);
3007 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3008 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3009 l2cap_build_conf_rsp(chan, rsp,
3010 L2CAP_CONF_REJECT, flags), rsp);
3015 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3016 chan->conf_len += len;
3018 if (flags & 0x0001) {
3019 /* Incomplete config. Send empty response. */
3020 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3021 l2cap_build_conf_rsp(chan, rsp,
3022 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3026 /* Complete config. */
3027 len = l2cap_parse_conf_req(chan, rsp);
3029 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3033 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3034 chan->num_conf_rsp++;
3036 /* Reset config buffer. */
3039 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3042 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3043 set_default_fcs(chan);
3045 l2cap_state_change(chan, BT_CONNECTED);
3047 chan->next_tx_seq = 0;
3048 chan->expected_tx_seq = 0;
3049 skb_queue_head_init(&chan->tx_q);
3050 if (chan->mode == L2CAP_MODE_ERTM)
3051 err = l2cap_ertm_init(chan);
3054 l2cap_send_disconn_req(chan->conn, chan, -err);
3056 l2cap_chan_ready(chan);
3061 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3063 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3064 l2cap_build_conf_req(chan, buf), buf);
3065 chan->num_conf_req++;
3068 /* Got Conf Rsp PENDING from remote side and asume we sent
3069 Conf Rsp PENDING in the code above */
3070 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3071 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3073 /* check compatibility */
3075 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3076 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3078 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3079 l2cap_build_conf_rsp(chan, rsp,
3080 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3084 l2cap_chan_unlock(chan);
3088 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3090 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3091 u16 scid, flags, result;
3092 struct l2cap_chan *chan;
3093 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3096 scid = __le16_to_cpu(rsp->scid);
3097 flags = __le16_to_cpu(rsp->flags);
3098 result = __le16_to_cpu(rsp->result);
3100 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3103 chan = l2cap_get_chan_by_scid(conn, scid);
3107 l2cap_chan_lock(chan);
3110 case L2CAP_CONF_SUCCESS:
3111 l2cap_conf_rfc_get(chan, rsp->data, len);
3112 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3115 case L2CAP_CONF_PENDING:
3116 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3118 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3121 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3124 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3128 /* check compatibility */
3130 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3131 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3133 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3134 l2cap_build_conf_rsp(chan, buf,
3135 L2CAP_CONF_SUCCESS, 0x0000), buf);
3139 case L2CAP_CONF_UNACCEPT:
3140 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3143 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3144 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3148 /* throw out any old stored conf requests */
3149 result = L2CAP_CONF_SUCCESS;
3150 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3153 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3157 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3158 L2CAP_CONF_REQ, len, req);
3159 chan->num_conf_req++;
3160 if (result != L2CAP_CONF_SUCCESS)
3166 l2cap_chan_set_err(chan, ECONNRESET);
3168 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3169 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3176 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3178 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3179 set_default_fcs(chan);
3181 l2cap_state_change(chan, BT_CONNECTED);
3182 chan->next_tx_seq = 0;
3183 chan->expected_tx_seq = 0;
3184 skb_queue_head_init(&chan->tx_q);
3185 if (chan->mode == L2CAP_MODE_ERTM)
3186 err = l2cap_ertm_init(chan);
3189 l2cap_send_disconn_req(chan->conn, chan, -err);
3191 l2cap_chan_ready(chan);
3195 l2cap_chan_unlock(chan);
3199 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3201 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3202 struct l2cap_disconn_rsp rsp;
3204 struct l2cap_chan *chan;
3207 scid = __le16_to_cpu(req->scid);
3208 dcid = __le16_to_cpu(req->dcid);
3210 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3212 mutex_lock(&conn->chan_lock);
3214 chan = __l2cap_get_chan_by_scid(conn, dcid);
3216 mutex_unlock(&conn->chan_lock);
3220 l2cap_chan_lock(chan);
3224 rsp.dcid = cpu_to_le16(chan->scid);
3225 rsp.scid = cpu_to_le16(chan->dcid);
3226 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3229 sk->sk_shutdown = SHUTDOWN_MASK;
3232 l2cap_chan_del(chan, ECONNRESET);
3234 l2cap_chan_unlock(chan);
3236 chan->ops->close(chan->data);
3238 mutex_unlock(&conn->chan_lock);
3243 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3245 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3247 struct l2cap_chan *chan;
3249 scid = __le16_to_cpu(rsp->scid);
3250 dcid = __le16_to_cpu(rsp->dcid);
3252 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3254 mutex_lock(&conn->chan_lock);
3256 chan = __l2cap_get_chan_by_scid(conn, scid);
3258 mutex_unlock(&conn->chan_lock);
3262 l2cap_chan_lock(chan);
3264 l2cap_chan_del(chan, 0);
3266 l2cap_chan_unlock(chan);
3268 chan->ops->close(chan->data);
3270 mutex_unlock(&conn->chan_lock);
3275 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3277 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3280 type = __le16_to_cpu(req->type);
3282 BT_DBG("type 0x%4.4x", type);
3284 if (type == L2CAP_IT_FEAT_MASK) {
3286 u32 feat_mask = l2cap_feat_mask;
3287 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3288 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3289 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3291 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3294 feat_mask |= L2CAP_FEAT_EXT_FLOW
3295 | L2CAP_FEAT_EXT_WINDOW;
3297 put_unaligned_le32(feat_mask, rsp->data);
3298 l2cap_send_cmd(conn, cmd->ident,
3299 L2CAP_INFO_RSP, sizeof(buf), buf);
3300 } else if (type == L2CAP_IT_FIXED_CHAN) {
3302 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3305 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3307 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3309 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3310 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3311 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3312 l2cap_send_cmd(conn, cmd->ident,
3313 L2CAP_INFO_RSP, sizeof(buf), buf);
3315 struct l2cap_info_rsp rsp;
3316 rsp.type = cpu_to_le16(type);
3317 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3318 l2cap_send_cmd(conn, cmd->ident,
3319 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3325 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3327 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3330 type = __le16_to_cpu(rsp->type);
3331 result = __le16_to_cpu(rsp->result);
3333 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3335 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3336 if (cmd->ident != conn->info_ident ||
3337 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3340 cancel_delayed_work(&conn->info_timer);
3342 if (result != L2CAP_IR_SUCCESS) {
3343 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3344 conn->info_ident = 0;
3346 l2cap_conn_start(conn);
3352 case L2CAP_IT_FEAT_MASK:
3353 conn->feat_mask = get_unaligned_le32(rsp->data);
3355 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3356 struct l2cap_info_req req;
3357 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3359 conn->info_ident = l2cap_get_ident(conn);
3361 l2cap_send_cmd(conn, conn->info_ident,
3362 L2CAP_INFO_REQ, sizeof(req), &req);
3364 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3365 conn->info_ident = 0;
3367 l2cap_conn_start(conn);
3371 case L2CAP_IT_FIXED_CHAN:
3372 conn->fixed_chan_mask = rsp->data[0];
3373 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3374 conn->info_ident = 0;
3376 l2cap_conn_start(conn);
3383 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3384 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3387 struct l2cap_create_chan_req *req = data;
3388 struct l2cap_create_chan_rsp rsp;
3391 if (cmd_len != sizeof(*req))
3397 psm = le16_to_cpu(req->psm);
3398 scid = le16_to_cpu(req->scid);
3400 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3402 /* Placeholder: Always reject */
3404 rsp.scid = cpu_to_le16(scid);
3405 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3406 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3408 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3414 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3415 struct l2cap_cmd_hdr *cmd, void *data)
3417 BT_DBG("conn %p", conn);
3419 return l2cap_connect_rsp(conn, cmd, data);
3422 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3423 u16 icid, u16 result)
3425 struct l2cap_move_chan_rsp rsp;
3427 BT_DBG("icid %d, result %d", icid, result);
3429 rsp.icid = cpu_to_le16(icid);
3430 rsp.result = cpu_to_le16(result);
3432 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3435 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3436 struct l2cap_chan *chan, u16 icid, u16 result)
3438 struct l2cap_move_chan_cfm cfm;
3441 BT_DBG("icid %d, result %d", icid, result);
3443 ident = l2cap_get_ident(conn);
3445 chan->ident = ident;
3447 cfm.icid = cpu_to_le16(icid);
3448 cfm.result = cpu_to_le16(result);
3450 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3453 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3456 struct l2cap_move_chan_cfm_rsp rsp;
3458 BT_DBG("icid %d", icid);
3460 rsp.icid = cpu_to_le16(icid);
3461 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3464 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3465 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3467 struct l2cap_move_chan_req *req = data;
3469 u16 result = L2CAP_MR_NOT_ALLOWED;
3471 if (cmd_len != sizeof(*req))
3474 icid = le16_to_cpu(req->icid);
3476 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3481 /* Placeholder: Always refuse */
3482 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3487 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3488 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3490 struct l2cap_move_chan_rsp *rsp = data;
3493 if (cmd_len != sizeof(*rsp))
3496 icid = le16_to_cpu(rsp->icid);
3497 result = le16_to_cpu(rsp->result);
3499 BT_DBG("icid %d, result %d", icid, result);
3501 /* Placeholder: Always unconfirmed */
3502 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3507 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3508 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3510 struct l2cap_move_chan_cfm *cfm = data;
3513 if (cmd_len != sizeof(*cfm))
3516 icid = le16_to_cpu(cfm->icid);
3517 result = le16_to_cpu(cfm->result);
3519 BT_DBG("icid %d, result %d", icid, result);
3521 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3526 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3527 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3529 struct l2cap_move_chan_cfm_rsp *rsp = data;
3532 if (cmd_len != sizeof(*rsp))
3535 icid = le16_to_cpu(rsp->icid);
3537 BT_DBG("icid %d", icid);
3542 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3547 if (min > max || min < 6 || max > 3200)
3550 if (to_multiplier < 10 || to_multiplier > 3200)
3553 if (max >= to_multiplier * 8)
3556 max_latency = (to_multiplier * 8 / max) - 1;
3557 if (latency > 499 || latency > max_latency)
3563 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3564 struct l2cap_cmd_hdr *cmd, u8 *data)
3566 struct hci_conn *hcon = conn->hcon;
3567 struct l2cap_conn_param_update_req *req;
3568 struct l2cap_conn_param_update_rsp rsp;
3569 u16 min, max, latency, to_multiplier, cmd_len;
3572 if (!(hcon->link_mode & HCI_LM_MASTER))
3575 cmd_len = __le16_to_cpu(cmd->len);
3576 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3579 req = (struct l2cap_conn_param_update_req *) data;
3580 min = __le16_to_cpu(req->min);
3581 max = __le16_to_cpu(req->max);
3582 latency = __le16_to_cpu(req->latency);
3583 to_multiplier = __le16_to_cpu(req->to_multiplier);
3585 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3586 min, max, latency, to_multiplier);
3588 memset(&rsp, 0, sizeof(rsp));
3590 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3592 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3594 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3596 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3600 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3605 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3606 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3610 switch (cmd->code) {
3611 case L2CAP_COMMAND_REJ:
3612 l2cap_command_rej(conn, cmd, data);
3615 case L2CAP_CONN_REQ:
3616 err = l2cap_connect_req(conn, cmd, data);
3619 case L2CAP_CONN_RSP:
3620 err = l2cap_connect_rsp(conn, cmd, data);
3623 case L2CAP_CONF_REQ:
3624 err = l2cap_config_req(conn, cmd, cmd_len, data);
3627 case L2CAP_CONF_RSP:
3628 err = l2cap_config_rsp(conn, cmd, data);
3631 case L2CAP_DISCONN_REQ:
3632 err = l2cap_disconnect_req(conn, cmd, data);
3635 case L2CAP_DISCONN_RSP:
3636 err = l2cap_disconnect_rsp(conn, cmd, data);
3639 case L2CAP_ECHO_REQ:
3640 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3643 case L2CAP_ECHO_RSP:
3646 case L2CAP_INFO_REQ:
3647 err = l2cap_information_req(conn, cmd, data);
3650 case L2CAP_INFO_RSP:
3651 err = l2cap_information_rsp(conn, cmd, data);
3654 case L2CAP_CREATE_CHAN_REQ:
3655 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3658 case L2CAP_CREATE_CHAN_RSP:
3659 err = l2cap_create_channel_rsp(conn, cmd, data);
3662 case L2CAP_MOVE_CHAN_REQ:
3663 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3666 case L2CAP_MOVE_CHAN_RSP:
3667 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3670 case L2CAP_MOVE_CHAN_CFM:
3671 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3674 case L2CAP_MOVE_CHAN_CFM_RSP:
3675 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3679 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3687 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3688 struct l2cap_cmd_hdr *cmd, u8 *data)
3690 switch (cmd->code) {
3691 case L2CAP_COMMAND_REJ:
3694 case L2CAP_CONN_PARAM_UPDATE_REQ:
3695 return l2cap_conn_param_update_req(conn, cmd, data);
3697 case L2CAP_CONN_PARAM_UPDATE_RSP:
3701 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3706 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3707 struct sk_buff *skb)
3709 u8 *data = skb->data;
3711 struct l2cap_cmd_hdr cmd;
3714 l2cap_raw_recv(conn, skb);
3716 while (len >= L2CAP_CMD_HDR_SIZE) {
3718 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3719 data += L2CAP_CMD_HDR_SIZE;
3720 len -= L2CAP_CMD_HDR_SIZE;
3722 cmd_len = le16_to_cpu(cmd.len);
3724 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3726 if (cmd_len > len || !cmd.ident) {
3727 BT_DBG("corrupted command");
3731 if (conn->hcon->type == LE_LINK)
3732 err = l2cap_le_sig_cmd(conn, &cmd, data);
3734 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3737 struct l2cap_cmd_rej_unk rej;
3739 BT_ERR("Wrong link type (%d)", err);
3741 /* FIXME: Map err to a valid reason */
3742 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3743 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3753 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3755 u16 our_fcs, rcv_fcs;
3758 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3759 hdr_size = L2CAP_EXT_HDR_SIZE;
3761 hdr_size = L2CAP_ENH_HDR_SIZE;
3763 if (chan->fcs == L2CAP_FCS_CRC16) {
3764 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3765 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3766 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3768 if (our_fcs != rcv_fcs)
3774 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3778 chan->frames_sent = 0;
3780 control |= __set_reqseq(chan, chan->buffer_seq);
3782 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3783 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3784 l2cap_send_sframe(chan, control);
3785 set_bit(CONN_RNR_SENT, &chan->conn_state);
3788 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3789 l2cap_retransmit_frames(chan);
3791 l2cap_ertm_send(chan);
3793 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3794 chan->frames_sent == 0) {
3795 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3796 l2cap_send_sframe(chan, control);
3800 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3802 struct sk_buff *next_skb;
3803 int tx_seq_offset, next_tx_seq_offset;
3805 bt_cb(skb)->tx_seq = tx_seq;
3806 bt_cb(skb)->sar = sar;
3808 next_skb = skb_peek(&chan->srej_q);
3810 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3813 if (bt_cb(next_skb)->tx_seq == tx_seq)
3816 next_tx_seq_offset = __seq_offset(chan,
3817 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3819 if (next_tx_seq_offset > tx_seq_offset) {
3820 __skb_queue_before(&chan->srej_q, next_skb, skb);
3824 if (skb_queue_is_last(&chan->srej_q, next_skb))
3827 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3830 __skb_queue_tail(&chan->srej_q, skb);
3835 static void append_skb_frag(struct sk_buff *skb,
3836 struct sk_buff *new_frag, struct sk_buff **last_frag)
3838 /* skb->len reflects data in skb as well as all fragments
3839 * skb->data_len reflects only data in fragments
3841 if (!skb_has_frag_list(skb))
3842 skb_shinfo(skb)->frag_list = new_frag;
3844 new_frag->next = NULL;
3846 (*last_frag)->next = new_frag;
3847 *last_frag = new_frag;
3849 skb->len += new_frag->len;
3850 skb->data_len += new_frag->len;
3851 skb->truesize += new_frag->truesize;
3854 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3858 switch (__get_ctrl_sar(chan, control)) {
3859 case L2CAP_SAR_UNSEGMENTED:
3863 err = chan->ops->recv(chan->data, skb);
3866 case L2CAP_SAR_START:
3870 chan->sdu_len = get_unaligned_le16(skb->data);
3871 skb_pull(skb, L2CAP_SDULEN_SIZE);
3873 if (chan->sdu_len > chan->imtu) {
3878 if (skb->len >= chan->sdu_len)
3882 chan->sdu_last_frag = skb;
3888 case L2CAP_SAR_CONTINUE:
3892 append_skb_frag(chan->sdu, skb,
3893 &chan->sdu_last_frag);
3896 if (chan->sdu->len >= chan->sdu_len)
3906 append_skb_frag(chan->sdu, skb,
3907 &chan->sdu_last_frag);
3910 if (chan->sdu->len != chan->sdu_len)
3913 err = chan->ops->recv(chan->data, chan->sdu);
3916 /* Reassembly complete */
3918 chan->sdu_last_frag = NULL;
3926 kfree_skb(chan->sdu);
3928 chan->sdu_last_frag = NULL;
3935 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3937 BT_DBG("chan %p, Enter local busy", chan);
3939 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3940 l2cap_seq_list_clear(&chan->srej_list);
3942 __set_ack_timer(chan);
3945 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3949 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3952 control = __set_reqseq(chan, chan->buffer_seq);
3953 control |= __set_ctrl_poll(chan);
3954 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3955 l2cap_send_sframe(chan, control);
3956 chan->retry_count = 1;
3958 __clear_retrans_timer(chan);
3959 __set_monitor_timer(chan);
3961 set_bit(CONN_WAIT_F, &chan->conn_state);
3964 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3965 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3967 BT_DBG("chan %p, Exit local busy", chan);
3970 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3972 if (chan->mode == L2CAP_MODE_ERTM) {
3974 l2cap_ertm_enter_local_busy(chan);
3976 l2cap_ertm_exit_local_busy(chan);
3980 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3982 struct sk_buff *skb;
3985 while ((skb = skb_peek(&chan->srej_q)) &&
3986 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3989 if (bt_cb(skb)->tx_seq != tx_seq)
3992 skb = skb_dequeue(&chan->srej_q);
3993 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3994 err = l2cap_reassemble_sdu(chan, skb, control);
3997 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4001 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
4002 tx_seq = __next_seq(chan, tx_seq);
4006 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4008 struct srej_list *l, *tmp;
4011 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
4012 if (l->tx_seq == tx_seq) {
4017 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4018 control |= __set_reqseq(chan, l->tx_seq);
4019 l2cap_send_sframe(chan, control);
4021 list_add_tail(&l->list, &chan->srej_l);
4025 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4027 struct srej_list *new;
4030 while (tx_seq != chan->expected_tx_seq) {
4031 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4032 control |= __set_reqseq(chan, chan->expected_tx_seq);
4033 l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
4034 l2cap_send_sframe(chan, control);
4036 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
4040 new->tx_seq = chan->expected_tx_seq;
4042 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4044 list_add_tail(&new->list, &chan->srej_l);
4047 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4052 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4054 u16 tx_seq = __get_txseq(chan, rx_control);
4055 u16 req_seq = __get_reqseq(chan, rx_control);
4056 u8 sar = __get_ctrl_sar(chan, rx_control);
4057 int tx_seq_offset, expected_tx_seq_offset;
4058 int num_to_ack = (chan->tx_win/6) + 1;
4061 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
4062 tx_seq, rx_control);
4064 if (__is_ctrl_final(chan, rx_control) &&
4065 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4066 __clear_monitor_timer(chan);
4067 if (chan->unacked_frames > 0)
4068 __set_retrans_timer(chan);
4069 clear_bit(CONN_WAIT_F, &chan->conn_state);
4072 chan->expected_ack_seq = req_seq;
4073 l2cap_drop_acked_frames(chan);
4075 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4077 /* invalid tx_seq */
4078 if (tx_seq_offset >= chan->tx_win) {
4079 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4083 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4084 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4085 l2cap_send_ack(chan);
4089 if (tx_seq == chan->expected_tx_seq)
4092 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4093 struct srej_list *first;
4095 first = list_first_entry(&chan->srej_l,
4096 struct srej_list, list);
4097 if (tx_seq == first->tx_seq) {
4098 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4099 l2cap_check_srej_gap(chan, tx_seq);
4101 list_del(&first->list);
4104 if (list_empty(&chan->srej_l)) {
4105 chan->buffer_seq = chan->buffer_seq_srej;
4106 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
4107 l2cap_send_ack(chan);
4108 BT_DBG("chan %p, Exit SREJ_SENT", chan);
4111 struct srej_list *l;
4113 /* duplicated tx_seq */
4114 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
4117 list_for_each_entry(l, &chan->srej_l, list) {
4118 if (l->tx_seq == tx_seq) {
4119 l2cap_resend_srejframe(chan, tx_seq);
4124 err = l2cap_send_srejframe(chan, tx_seq);
4126 l2cap_send_disconn_req(chan->conn, chan, -err);
4131 expected_tx_seq_offset = __seq_offset(chan,
4132 chan->expected_tx_seq, chan->buffer_seq);
4134 /* duplicated tx_seq */
4135 if (tx_seq_offset < expected_tx_seq_offset)
4138 set_bit(CONN_SREJ_SENT, &chan->conn_state);
4140 BT_DBG("chan %p, Enter SREJ", chan);
4142 INIT_LIST_HEAD(&chan->srej_l);
4143 chan->buffer_seq_srej = chan->buffer_seq;
4145 __skb_queue_head_init(&chan->srej_q);
4146 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4148 /* Set P-bit only if there are some I-frames to ack. */
4149 if (__clear_ack_timer(chan))
4150 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4152 err = l2cap_send_srejframe(chan, tx_seq);
4154 l2cap_send_disconn_req(chan->conn, chan, -err);
4161 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4163 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4164 bt_cb(skb)->tx_seq = tx_seq;
4165 bt_cb(skb)->sar = sar;
4166 __skb_queue_tail(&chan->srej_q, skb);
4170 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4171 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4174 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4178 if (__is_ctrl_final(chan, rx_control)) {
4179 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4180 l2cap_retransmit_frames(chan);
4184 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4185 if (chan->num_acked == num_to_ack - 1)
4186 l2cap_send_ack(chan);
4188 __set_ack_timer(chan);
4197 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4199 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4200 __get_reqseq(chan, rx_control), rx_control);
4202 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4203 l2cap_drop_acked_frames(chan);
4205 if (__is_ctrl_poll(chan, rx_control)) {
4206 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4207 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4208 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4209 (chan->unacked_frames > 0))
4210 __set_retrans_timer(chan);
4212 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4213 l2cap_send_srejtail(chan);
4215 l2cap_send_i_or_rr_or_rnr(chan);
4218 } else if (__is_ctrl_final(chan, rx_control)) {
4219 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4221 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4222 l2cap_retransmit_frames(chan);
4225 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4226 (chan->unacked_frames > 0))
4227 __set_retrans_timer(chan);
4229 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4230 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4231 l2cap_send_ack(chan);
4233 l2cap_ertm_send(chan);
4237 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4239 u16 tx_seq = __get_reqseq(chan, rx_control);
4241 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4243 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4245 chan->expected_ack_seq = tx_seq;
4246 l2cap_drop_acked_frames(chan);
4248 if (__is_ctrl_final(chan, rx_control)) {
4249 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4250 l2cap_retransmit_frames(chan);
4252 l2cap_retransmit_frames(chan);
4254 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4255 set_bit(CONN_REJ_ACT, &chan->conn_state);
4258 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4260 u16 tx_seq = __get_reqseq(chan, rx_control);
4262 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4264 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4266 if (__is_ctrl_poll(chan, rx_control)) {
4267 chan->expected_ack_seq = tx_seq;
4268 l2cap_drop_acked_frames(chan);
4270 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4271 l2cap_retransmit_one_frame(chan, tx_seq);
4273 l2cap_ertm_send(chan);
4275 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4276 chan->srej_save_reqseq = tx_seq;
4277 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4279 } else if (__is_ctrl_final(chan, rx_control)) {
4280 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4281 chan->srej_save_reqseq == tx_seq)
4282 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4284 l2cap_retransmit_one_frame(chan, tx_seq);
4286 l2cap_retransmit_one_frame(chan, tx_seq);
4287 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4288 chan->srej_save_reqseq = tx_seq;
4289 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4294 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4296 u16 tx_seq = __get_reqseq(chan, rx_control);
4298 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4300 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4301 chan->expected_ack_seq = tx_seq;
4302 l2cap_drop_acked_frames(chan);
4304 if (__is_ctrl_poll(chan, rx_control))
4305 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4307 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4308 __clear_retrans_timer(chan);
4309 if (__is_ctrl_poll(chan, rx_control))
4310 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4314 if (__is_ctrl_poll(chan, rx_control)) {
4315 l2cap_send_srejtail(chan);
4317 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4318 l2cap_send_sframe(chan, rx_control);
4322 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4324 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4326 if (__is_ctrl_final(chan, rx_control) &&
4327 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4328 __clear_monitor_timer(chan);
4329 if (chan->unacked_frames > 0)
4330 __set_retrans_timer(chan);
4331 clear_bit(CONN_WAIT_F, &chan->conn_state);
4334 switch (__get_ctrl_super(chan, rx_control)) {
4335 case L2CAP_SUPER_RR:
4336 l2cap_data_channel_rrframe(chan, rx_control);
4339 case L2CAP_SUPER_REJ:
4340 l2cap_data_channel_rejframe(chan, rx_control);
4343 case L2CAP_SUPER_SREJ:
4344 l2cap_data_channel_srejframe(chan, rx_control);
4347 case L2CAP_SUPER_RNR:
4348 l2cap_data_channel_rnrframe(chan, rx_control);
4356 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4360 int len, next_tx_seq_offset, req_seq_offset;
4362 control = __get_control(chan, skb->data);
4363 skb_pull(skb, __ctrl_size(chan));
4367 * We can just drop the corrupted I-frame here.
4368 * Receiver will miss it and start proper recovery
4369 * procedures and ask retransmission.
4371 if (l2cap_check_fcs(chan, skb))
4374 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4375 len -= L2CAP_SDULEN_SIZE;
4377 if (chan->fcs == L2CAP_FCS_CRC16)
4378 len -= L2CAP_FCS_SIZE;
4380 if (len > chan->mps) {
4381 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4385 req_seq = __get_reqseq(chan, control);
4387 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4389 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4390 chan->expected_ack_seq);
4392 /* check for invalid req-seq */
4393 if (req_seq_offset > next_tx_seq_offset) {
4394 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4398 if (!__is_sframe(chan, control)) {
4400 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4404 l2cap_data_channel_iframe(chan, control, skb);
4408 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4412 l2cap_data_channel_sframe(chan, control, skb);
4422 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4424 struct l2cap_chan *chan;
4429 chan = l2cap_get_chan_by_scid(conn, cid);
4431 BT_DBG("unknown cid 0x%4.4x", cid);
4432 /* Drop packet and return */
4437 l2cap_chan_lock(chan);
4439 BT_DBG("chan %p, len %d", chan, skb->len);
4441 if (chan->state != BT_CONNECTED)
4444 switch (chan->mode) {
4445 case L2CAP_MODE_BASIC:
4446 /* If socket recv buffers overflows we drop data here
4447 * which is *bad* because L2CAP has to be reliable.
4448 * But we don't have any other choice. L2CAP doesn't
4449 * provide flow control mechanism. */
4451 if (chan->imtu < skb->len)
4454 if (!chan->ops->recv(chan->data, skb))
4458 case L2CAP_MODE_ERTM:
4459 l2cap_ertm_data_rcv(chan, skb);
4463 case L2CAP_MODE_STREAMING:
4464 control = __get_control(chan, skb->data);
4465 skb_pull(skb, __ctrl_size(chan));
4468 if (l2cap_check_fcs(chan, skb))
4471 if (__is_sar_start(chan, control))
4472 len -= L2CAP_SDULEN_SIZE;
4474 if (chan->fcs == L2CAP_FCS_CRC16)
4475 len -= L2CAP_FCS_SIZE;
4477 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4480 tx_seq = __get_txseq(chan, control);
4482 if (chan->expected_tx_seq != tx_seq) {
4483 /* Frame(s) missing - must discard partial SDU */
4484 kfree_skb(chan->sdu);
4486 chan->sdu_last_frag = NULL;
4489 /* TODO: Notify userland of missing data */
4492 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4494 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4495 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4500 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4508 l2cap_chan_unlock(chan);
4513 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4515 struct l2cap_chan *chan;
4517 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4521 BT_DBG("chan %p, len %d", chan, skb->len);
4523 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4526 if (chan->imtu < skb->len)
4529 if (!chan->ops->recv(chan->data, skb))
4538 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4539 struct sk_buff *skb)
4541 struct l2cap_chan *chan;
4543 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4547 BT_DBG("chan %p, len %d", chan, skb->len);
4549 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4552 if (chan->imtu < skb->len)
4555 if (!chan->ops->recv(chan->data, skb))
4564 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4566 struct l2cap_hdr *lh = (void *) skb->data;
4570 skb_pull(skb, L2CAP_HDR_SIZE);
4571 cid = __le16_to_cpu(lh->cid);
4572 len = __le16_to_cpu(lh->len);
4574 if (len != skb->len) {
4579 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4582 case L2CAP_CID_LE_SIGNALING:
4583 case L2CAP_CID_SIGNALING:
4584 l2cap_sig_channel(conn, skb);
4587 case L2CAP_CID_CONN_LESS:
4588 psm = get_unaligned((__le16 *) skb->data);
4590 l2cap_conless_channel(conn, psm, skb);
4593 case L2CAP_CID_LE_DATA:
4594 l2cap_att_channel(conn, cid, skb);
4598 if (smp_sig_channel(conn, skb))
4599 l2cap_conn_del(conn->hcon, EACCES);
4603 l2cap_data_channel(conn, cid, skb);
4608 /* ---- L2CAP interface with lower layer (HCI) ---- */
4610 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4612 int exact = 0, lm1 = 0, lm2 = 0;
4613 struct l2cap_chan *c;
4615 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4617 /* Find listening sockets and check their link_mode */
4618 read_lock(&chan_list_lock);
4619 list_for_each_entry(c, &chan_list, global_l) {
4620 struct sock *sk = c->sk;
4622 if (c->state != BT_LISTEN)
4625 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4626 lm1 |= HCI_LM_ACCEPT;
4627 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4628 lm1 |= HCI_LM_MASTER;
4630 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4631 lm2 |= HCI_LM_ACCEPT;
4632 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4633 lm2 |= HCI_LM_MASTER;
4636 read_unlock(&chan_list_lock);
4638 return exact ? lm1 : lm2;
4641 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4643 struct l2cap_conn *conn;
4645 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4648 conn = l2cap_conn_add(hcon, status);
4650 l2cap_conn_ready(conn);
4652 l2cap_conn_del(hcon, bt_to_errno(status));
4657 int l2cap_disconn_ind(struct hci_conn *hcon)
4659 struct l2cap_conn *conn = hcon->l2cap_data;
4661 BT_DBG("hcon %p", hcon);
4664 return HCI_ERROR_REMOTE_USER_TERM;
4665 return conn->disc_reason;
4668 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4670 BT_DBG("hcon %p reason %d", hcon, reason);
4672 l2cap_conn_del(hcon, bt_to_errno(reason));
4676 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4678 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4681 if (encrypt == 0x00) {
4682 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4683 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4684 } else if (chan->sec_level == BT_SECURITY_HIGH)
4685 l2cap_chan_close(chan, ECONNREFUSED);
4687 if (chan->sec_level == BT_SECURITY_MEDIUM)
4688 __clear_chan_timer(chan);
4692 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4694 struct l2cap_conn *conn = hcon->l2cap_data;
4695 struct l2cap_chan *chan;
4700 BT_DBG("conn %p", conn);
4702 if (hcon->type == LE_LINK) {
4703 smp_distribute_keys(conn, 0);
4704 cancel_delayed_work(&conn->security_timer);
4707 mutex_lock(&conn->chan_lock);
4709 list_for_each_entry(chan, &conn->chan_l, list) {
4710 l2cap_chan_lock(chan);
4712 BT_DBG("chan->scid %d", chan->scid);
4714 if (chan->scid == L2CAP_CID_LE_DATA) {
4715 if (!status && encrypt) {
4716 chan->sec_level = hcon->sec_level;
4717 l2cap_chan_ready(chan);
4720 l2cap_chan_unlock(chan);
4724 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4725 l2cap_chan_unlock(chan);
4729 if (!status && (chan->state == BT_CONNECTED ||
4730 chan->state == BT_CONFIG)) {
4731 l2cap_check_encryption(chan, encrypt);
4732 l2cap_chan_unlock(chan);
4736 if (chan->state == BT_CONNECT) {
4738 l2cap_send_conn_req(chan);
4740 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4742 } else if (chan->state == BT_CONNECT2) {
4743 struct sock *sk = chan->sk;
4744 struct l2cap_conn_rsp rsp;
4750 if (bt_sk(sk)->defer_setup) {
4751 struct sock *parent = bt_sk(sk)->parent;
4752 res = L2CAP_CR_PEND;
4753 stat = L2CAP_CS_AUTHOR_PEND;
4755 parent->sk_data_ready(parent, 0);
4757 __l2cap_state_change(chan, BT_CONFIG);
4758 res = L2CAP_CR_SUCCESS;
4759 stat = L2CAP_CS_NO_INFO;
4762 __l2cap_state_change(chan, BT_DISCONN);
4763 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4764 res = L2CAP_CR_SEC_BLOCK;
4765 stat = L2CAP_CS_NO_INFO;
4770 rsp.scid = cpu_to_le16(chan->dcid);
4771 rsp.dcid = cpu_to_le16(chan->scid);
4772 rsp.result = cpu_to_le16(res);
4773 rsp.status = cpu_to_le16(stat);
4774 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4778 l2cap_chan_unlock(chan);
4781 mutex_unlock(&conn->chan_lock);
4786 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4788 struct l2cap_conn *conn = hcon->l2cap_data;
4791 conn = l2cap_conn_add(hcon, 0);
4796 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4798 if (!(flags & ACL_CONT)) {
4799 struct l2cap_hdr *hdr;
4800 struct l2cap_chan *chan;
4805 BT_ERR("Unexpected start frame (len %d)", skb->len);
4806 kfree_skb(conn->rx_skb);
4807 conn->rx_skb = NULL;
4809 l2cap_conn_unreliable(conn, ECOMM);
4812 /* Start fragment always begin with Basic L2CAP header */
4813 if (skb->len < L2CAP_HDR_SIZE) {
4814 BT_ERR("Frame is too short (len %d)", skb->len);
4815 l2cap_conn_unreliable(conn, ECOMM);
4819 hdr = (struct l2cap_hdr *) skb->data;
4820 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4821 cid = __le16_to_cpu(hdr->cid);
4823 if (len == skb->len) {
4824 /* Complete frame received */
4825 l2cap_recv_frame(conn, skb);
4829 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4831 if (skb->len > len) {
4832 BT_ERR("Frame is too long (len %d, expected len %d)",
4834 l2cap_conn_unreliable(conn, ECOMM);
4838 chan = l2cap_get_chan_by_scid(conn, cid);
4840 if (chan && chan->sk) {
4841 struct sock *sk = chan->sk;
4844 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4845 BT_ERR("Frame exceeding recv MTU (len %d, "
4849 l2cap_conn_unreliable(conn, ECOMM);
4855 /* Allocate skb for the complete frame (with header) */
4856 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4860 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4862 conn->rx_len = len - skb->len;
4864 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4866 if (!conn->rx_len) {
4867 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4868 l2cap_conn_unreliable(conn, ECOMM);
4872 if (skb->len > conn->rx_len) {
4873 BT_ERR("Fragment is too long (len %d, expected %d)",
4874 skb->len, conn->rx_len);
4875 kfree_skb(conn->rx_skb);
4876 conn->rx_skb = NULL;
4878 l2cap_conn_unreliable(conn, ECOMM);
4882 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4884 conn->rx_len -= skb->len;
4886 if (!conn->rx_len) {
4887 /* Complete frame received */
4888 l2cap_recv_frame(conn, conn->rx_skb);
4889 conn->rx_skb = NULL;
4898 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4900 struct l2cap_chan *c;
4902 read_lock(&chan_list_lock);
4904 list_for_each_entry(c, &chan_list, global_l) {
4905 struct sock *sk = c->sk;
4907 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4908 batostr(&bt_sk(sk)->src),
4909 batostr(&bt_sk(sk)->dst),
4910 c->state, __le16_to_cpu(c->psm),
4911 c->scid, c->dcid, c->imtu, c->omtu,
4912 c->sec_level, c->mode);
4915 read_unlock(&chan_list_lock);
4920 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4922 return single_open(file, l2cap_debugfs_show, inode->i_private);
4925 static const struct file_operations l2cap_debugfs_fops = {
4926 .open = l2cap_debugfs_open,
4928 .llseek = seq_lseek,
4929 .release = single_release,
4932 static struct dentry *l2cap_debugfs;
4934 int __init l2cap_init(void)
4938 err = l2cap_init_sockets();
4943 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4944 bt_debugfs, NULL, &l2cap_debugfs_fops);
4946 BT_ERR("Failed to create L2CAP debug file");
4952 void l2cap_exit(void)
4954 debugfs_remove(l2cap_debugfs);
4955 l2cap_cleanup_sockets();
4958 module_param(disable_ertm, bool, 0644);
4959 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");